prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
import numpy as np
import pandas as pd
import tensorflow as tf
from PrognosAIs.IO import utils
class LabelLoader:
def __init__(
self,
label_file: str,
filter_missing: bool = False,
missing_value: int = -1,
make_one_hot: bool = False,
new_root_path: str = None,
) -> None:
"""
Create a label loader, that can load the image paths and labels
from a text file to be used for a data generator
Args:
label_file: The label file from which to read the labels
filter_missing: Whether missing values should be masked when generating one hot labels and class weights
missing_value: If filter_missing is True, this value is used to mask
make_one_hot: Whether labels should be transformed to one hot labels
new_root_path: If you want to move the files, this will be the new root path
"""
self.label_file = label_file
self.label_data = pd.read_csv(label_file, sep="\t", header=0, index_col=0)
self._original_label_data = self.label_data.copy(deep=True)
self.filter_missing = filter_missing
self.missing_value = missing_value
self.make_one_hot = make_one_hot
self.one_hot_encoded = False
self.new_root_path = new_root_path
self.total_weight_sum = 1.0
if self.new_root_path is not None:
self.replace_root_path()
if self.make_one_hot:
self.encode_labels_one_hot()
return
def get_labels(self) -> list:
"""Get all labels of all samples
Args:
None
Returns:
labels: List of labels
"""
labels = np.squeeze(self.label_data.values)
if isinstance(labels, np.ndarray) and labels.size > 1:
labels = labels.tolist()
elif isinstance(labels, np.ndarray):
# Otherwise if it is 1 element it will remove the list,
# and return only a string
labels = [labels.tolist()]
else:
labels = [labels]
return labels
def get_samples(self) -> list:
"""Get all labels of all samples
Args:
None
Returns:
samples: List of samples
"""
return self.label_data.index.to_list()
def get_data(self) -> dict:
"""Get all data from the label file
Args:
None
Returns:
data: Dictionary mapping each sample to each label
"""
return self.label_data.to_dict(orient="index")
def get_label_from_sample(self, sample: str) -> dict:
"""Get label from a sample
Args:
sample: The sample from which to get the label
Returns:
label: Label of the sample
"""
return self.label_data.loc[sample].to_dict()
def get_label_categories(self) -> list:
"""Get categories of labels
Args:
None
Returns:
label_categories: Category names
"""
return self.label_data.columns.to_numpy(copy=True).tolist()
def get_labels_from_category(self, category_name: str) -> list:
"""Get labels of a specific category/class
Args:
category_name: Name of the category/class to get
Returns:
list: Labels of the category
"""
return self.label_data[category_name].to_numpy(copy=True).tolist()
def get_original_labels_from_category(self, category_name: str) -> list:
"""Get original labels of a specific category/class
Args:
category_name: Name of the category/class to get
Returns:
list: Original labels of the category
"""
return self._original_label_data[category_name].to_numpy(copy=True).tolist()
def get_label_category_type(self, category_name: str) -> type:
"""Get the type of a label of a specific category/class
Args:
category_name: Name of the category/class to get type of
Returns:
type: Type of the labels of the category
"""
category_label_type = type(self.label_data[category_name][0])
return category_label_type
def get_original_label_category_type(self, category_name: str) -> type:
"""Get the original type of a label of a specific category/class
Args:
category_name: Name of the category/class to get type of
Returns:
type: Type of the labels of the category
"""
category_label_type = type(self._original_label_data[category_name][0])
return category_label_type
def encode_labels_one_hot(self) -> None:
"""Encode sample labels as one hot
Args:
None
Returns:
None
"""
if self.one_hot_encoded:
return
label_categories = self.get_label_categories()
for i_label_category in label_categories:
category_type = self.get_label_category_type(i_label_category)
if np.issubdtype(category_type, np.integer):
category_labels = self.get_labels_from_category(i_label_category)
category_labels = np.asarray(category_labels)
labels_min = np.amin(category_labels)
if self.filter_missing and labels_min == self.missing_value:
labels_min = np.amin(category_labels[category_labels != self.missing_value])
N_labels = np.amax(category_labels) - labels_min + 1
category_labels -= labels_min
one_hot_labels = tf.one_hot(category_labels, N_labels, dtype=tf.int8).numpy()
# We need to replace the one hot labels with the value again
missing_label_index = np.sum(one_hot_labels, axis=-1)
one_hot_labels[missing_label_index == 0] = self.missing_value
self.label_data[i_label_category] = one_hot_labels.tolist()
self.one_hot_encoded = True
return
def replace_root_path(self) -> None:
"""Replace the root path of the sample files in case they have been
moved to a different a different directory.
Args:
new_root_path: Path in which the files are now located
Returns:
None
"""
if self.new_root_path is not None:
samples = self.get_samples()
new_root_path = utils.normalize_path(self.new_root_path)
for i_i_sample, i_sample in enumerate(samples):
old_root_path = utils.get_file_path(i_sample)
samples[i_i_sample] = i_sample.replace(old_root_path, new_root_path)
self.label_data.index = samples
label_categories = self.get_label_categories()
for i_label_category in label_categories:
if self.get_label_category_type(i_label_category) is str:
category_labels = self.get_labels_from_category(i_label_category)
for i_i_label, i_label in enumerate(category_labels):
old_root_path = utils.get_file_path(i_label)
category_labels[i_i_label] = i_label.replace(old_root_path, new_root_path)
self.label_data[i_label_category] = category_labels
return
def get_class_weights(self, json_serializable=False) -> dict:
""" Get class weights for unbalanced labels
Args:
None
Returns:
Scaled_weights: the weights for each class of each label category, scaled
such that the total weights*number of samples of each class
approximates the total number of samples
"""
out_scaled_weights = {}
for i_label_category in self.get_label_categories():
category_type = self.get_original_label_category_type(i_label_category)
if np.issubdtype(category_type, np.integer):
category_labels = self.get_original_labels_from_category(i_label_category)
category_labels = np.asarray(category_labels)
if self.filter_missing:
category_labels = category_labels[category_labels != self.missing_value]
if self.make_one_hot:
category_labels = category_labels - np.amin(category_labels)
classes, counts = np.unique(category_labels, return_counts=True)
N_samples = len(category_labels)
weights = N_samples / (counts * len(classes))
if json_serializable:
classes = [str(i_class) for i_class in classes]
# Dictionary for each category, with dictionary of weight for each class in that category
# This allows for easy input with tensorflow
out_scaled_weights[i_label_category] = dict(zip(classes, weights))
else:
out_scaled_weights[i_label_category] = None
return out_scaled_weights
def get_number_of_classes_from_category(self, category_name: str) -> int:
""" Get number of classes for a label category
Args:
category_name: Category to get number of classes for
Returns:
number_of_classes: The number of classes for the category
"""
category_type = self.get_original_label_category_type(category_name)
if | np.issubdtype(category_type, np.integer) | numpy.issubdtype |
# coding: utf8
"""
Ensemble de fonctions pour manipuler une instance du problème "Le jardinier et
les taupes", dans le cas où l'objectif est :
- qu'aucune taupe ne puisse pénétrer dans le jardin ;
- que le nombre de pièges soit minimal.
"""
import os
import itertools
import numpy as np
import pulp
def _dimcheck(grid, threshold):
"""
Indique si la grille dont `grid` est la complémentaire est admissible
dans sa dernière dimension.
Paramètres :
------------
- grid : tableau numpy
Tableau contenant des 0 (espace occupé) et des 1 (espace libre).
- threshold : entier positif
Nombre d'espaces libres adjacents à partir duquel la grille est
considérée comme non admissible.
Exemples :
----------
# On crée une grille avec deux espaces libres adjacents :
>>> grid = np.array([0, 1, 1, 0, 0])
>>> _dimcheck(grid, 2)
False
>>> _dimcheck(grid, 3)
True
"""
dsize = grid.shape[-1]
if threshold > dsize:
return True
elif threshold < 0:
raise ValueError("threshold must be positive.")
check = 0
for start in range(threshold):
check += grid[..., start:(dsize - threshold + 1 + start)]
if np.any(check >= threshold):
return False
else:
return True
def admissible(grid, threshold):
"""
Indique si la grille `grid` est admissible. Une grille est admissible si
elle ne contient jamais plus de `threshold` - 1 espaces libres adjacents.
Paramètres :
------------
- grid : tableau numpy
Tableau contenant des 0 (espace libre) et des 1 (espace occupé).
- threshold : entier positif
Nombre d'espaces libres adjacents à partir duquel la grille est
considérée comme non admissible.
Exemples :
----------
>>> grid = np.array([0, 1, 1, 0, 0, 1]).reshape((2, 3))
>>> admissible(grid, 2)
False
>>> admissible(grid, 3)
True
"""
# La méthode de calcul est bourrine.
comp = np.where(grid, 0, 1) # On travaille sur le complémentaire de grid.
res = True
for _ in range(comp.ndim):
res = (res and _dimcheck(comp, threshold))
if res is False:
break
# Permutation circulaire des axes :
comp = comp.transpose(comp.ndim - 1, *range(comp.ndim - 1))
return res
def score(grid, threshold):
"""
Calcule le score associé à la grille `grid`. Plus le score est faible,
meilleur il est ; si la grille n'est pas admissible, renvoie un score
égal à l'infini.
Paramètres :
------------
- grid : tableau numpy
Tableau contenant des 0 (espace libre) et des 1 (espace occupé).
- threshold : entier positif
Nombre d'espaces libres adjacents à partir duquel la grille est
considérée comme non admissible.
"""
if admissible(grid, threshold):
return grid.sum()
else:
return np.inf
def generate(shape, npoints):
"""
Génère une grille ayant la forme `shape` et contenant `npoints` pièges.
Paramètres :
------------
- shape : entier positif, tuple d'entiers positifs
Dimensions de la grille.
- npoints : entier positif
Nombre de pièges imposés à placer aléatoirement dans la grille.
"""
size = np.product(shape)
if size <= 0:
raise ValueError("the shape %s should contain positive values only."\
% str(shape))
points = np.random.choice(np.arange(size), npoints, replace=False)
grid = | np.zeros(size, dtype=np.int) | numpy.zeros |
# Script for Da-Tacos cover song identification from Feature Fused Matrices
#Importing
import librosa
import numpy as np
import scipy
from scipy.spatial.distance import pdist, squareform
from scipy.interpolate import interp2d
from scipy.sparse.csgraph import laplacian
from scipy.spatial.distance import directed_hausdorff
from scipy.cluster import hierarchy
from scipy.linalg import eigh
from scipy.ndimage import median_filter
from sklearn.metrics import average_precision_score
from sklearn.preprocessing import normalize
import cv2
from sklearn import metrics
import dill
import sys
import glob
import os
import random
import json
import deepdish as dd
#change matplotlib backend to save rendered plots correctly on linux
import matplotlib as mpl
mpl.use('Agg')
from matplotlib import pyplot as plt
# #--supress warnings--#
# import warnings
# warnings.filterwarnings("ignore")
#---Load metadata---#
with open('/home/ismir/Documents/ISMIR/Datasets/da-tacos/da-tacos_benchmark_subset_metadata.json') as f:
benchmark_metadata = json.load(f)
#---Segmentation parameters---#
rs_size = 128
kmin = 8
kmax = 12
#---Counters---#
count = 0
W_count=0
P_count = 0
#---Loading limits---#
min_covers = 5 #load works for which there are at least min_covers performances
max_covers = 5 #stop loading performances if over max_covers per work
max_works = 15
#---Storage---#
all_sets = []
#all_shapeDNAs = []
all_WP = []
y = []
#for all Works
for W in benchmark_metadata.keys():
if len(benchmark_metadata[W].keys()) >= min_covers: #if it contains at least 5 covers
P_count = 0
#for all performances
for P in benchmark_metadata[W].keys():
P_count += 1
#Computations
try:
SSM = dd.io.load("/home/ismir/Documents/ISMIR/Datasets/da-tacosSSMs/StructureLaplacian_datacos_crema_" + P + ".h5")['WFused']
except:
print("Couldn't load " + P + ".")
continue
N = dd.io.load("/home/ismir/Documents/ISMIR/Datasets/da-tacosSSMs/StructureLaplacian_datacos_crema_" + P + ".h5")['N']
#Construct square matrix from flattened upper triangle
A = np.zeros((N,N))
iN = np.triu_indices(N) #return indices for upper-triangle of (N,N) matrix
for i in range(len(SSM)):
A[iN[0][i]][iN[1][i]] = SSM[i]
B = np.transpose(A)
square_SSM = A+B
#Resample
SSM_ds = cv2.resize(square_SSM, (rs_size,rs_size))
#Compute the Laplacian
L = laplacian(SSM_ds, normed=True)
#Laplacian eigenvalues and eigenvectors
evals, evecs = eigh(L)
# #Shape DNA
# shapeDNA = evals[:30]
# all_shapeDNAs.append(shapeDNA)
#Hierarchical structure
evecs = median_filter(evecs, size=(9, 1))
Cnorm = np.cumsum(evecs**2, axis=1)**0.5
# #temporary replacement for bug
# a_min_value = 3.6934424e-08
# Cnorm[Cnorm == 0.0] = a_min_value
# if (np.isnan(np.sum(Cnorm))):
# print("WOOOOOAH")
dist_set = []
for k in range(kmin, kmax):
X = evecs[:, :k] / Cnorm[:, k-1:k]
distance = squareform(pdist(X, metric='euclidean'))
dist_set.append(distance)
all_sets.append(dist_set)
y.append(W)
#append W and P
all_WP.append([W, P])
#plt.matshow()
#plt.colorbar()
#plt.show()
if (P_count >=max_covers):
break
W_count +=1
sys.stdout.write("\rLoading %i works." % W_count)
sys.stdout.flush()
if (W_count >= max_works):
break
all_sets = np.asarray(all_sets)
file_no = all_sets.shape[0]
# all_shapeDNAs = np.asarray(all_shapeDNAs)
print("\nLoaded Da-TACOS SMMs.")
print("Data shape:", all_sets.shape)
#------------#
#-Formatting-#
#------------#
all_flat = [] #kmin-kmin sets each with a flattened matrix
all_merged = [] #single concatenated vector with all flattened matrices
all_shingled2 = [] #shingle adjacent pairs of flat approoximations
all_shingled3 = [] #shingle adjacent triples of flat approoximations
#traverse songs
for f in range(file_no):
#formatting
flat_approximations = []
merged_approximations = np.empty((0))
for j in range(kmax-kmin):
flat_approximations.append(all_sets[f][j].flatten())
merged_approximations = np.concatenate((merged_approximations, flat_approximations[j]))
all_flat.append(np.asarray(flat_approximations))
all_merged.append(merged_approximations)
#shingling per 2
shingled = []
for j in range(kmax-kmin-1):
#shingled.append(np.array([all_flat[f][j],all_flat[f][j+1]]))
shingled.append(np.concatenate((all_flat[f][j],all_flat[f][j+1]), axis=None))
all_shingled2.append(np.asarray(shingled))
#shingling per 3
shingled = []
for j in range(kmax-kmin-2):
#shingled.append(np.array([all_flat[f][j],all_flat[f][j+1],all_flat[f][j+2]]))
shingled.append(np.concatenate((all_flat[f][j],all_flat[f][j+1],all_flat[f][j+2]), axis=None))
all_shingled3.append(np.asarray(shingled))
#progress
sys.stdout.write("\rFormatted %i/%s approximation sets." % ((f+1), str(file_no)))
sys.stdout.flush()
print('')
all_flat = np.asarray(all_flat)
all_merged = np.asarray(all_merged)
all_shingled2 = np.asarray(all_shingled2)
all_shingled3 = np.asarray(all_shingled3)
#----------------------#
#-Covers vs Non-covers-#
#----------------------#
#True if cover, False if non-cover
covers = np.zeros((len(all_WP), len(all_WP)), dtype=np.bool_)
for i in range(len(all_WP)):
for j in range(len(all_WP)):
if (all_WP[i][0] == all_WP[j][0]):
covers[i][j] = True
else:
covers[i][j] = False
#-----------#
#-Distances-#
#-----------#
fig_dir = '/home/ismir/Documents/ISMIR/figures/datacos/'
#---L1---#
L1_distances = np.zeros((max_works, max_works*max_covers))
for i in range(max_works):
for j in range(max_covers*max_works):
L1_distances[i][j] = np.linalg.norm(all_merged[i*max_covers]-all_merged[j], ord=1)
# #Histogram
# L1_distances_covers = []
# L1_distances_noncovers = []
# for i in range(file_no):
# for j in range(file_no):
# if covers[i][j]:
# if (L1_distances[i][j] != 0):
# L1_distances_covers.append(L1_distances[i][j])
# else:
# L1_distances_noncovers.append(L1_distances[i][j])
# plt.figure()
# plt.hist(L1_distances_covers, bins=200, alpha=0.5, label='Covers', density=1)
# plt.hist(L1_distances_noncovers, bins=200, alpha=0.5, label='Non-covers', density=1)
# plt.title("Histogram of L1 distances between cover and non-cover pairs")
# plt.legend(loc='upper right')
# plt.savefig(fig_dir+'Histogram-L1norm.png')
#Mean position of first hit
all_cvrs = []
hit_positions = []
for i in range(max_works):
d = L1_distances[i]
d = np.argsort(d)
hits = []
cvrs = []
for j in range(max_covers-1):
cvrs.append((i*max_covers)+j+1)
for c in range(len(cvrs)): #traverse covers
hits.append(np.where(d==cvrs[c])[0][0])
hit_positions.append(min(hits))
cvrs.insert(0,cvrs[0]-1)
all_cvrs.append(cvrs)
L1_average_hit = np.mean(hit_positions)
print('L1 mean position of first hit:', L1_average_hit)
#Mean Average Precision
for i in range(max_works):
#get all distances to selected song, normalize [0,1], convert to similarity metric, not dissimilarity
d = 1-(L1_distances[i]/np.linalg.norm(L1_distances[i]))
cr = np.zeros((max_works*max_covers)) #get all cover relationships to selected song
for c in all_cvrs[i]:
cr[c] = 1
mAP = 0
for j in range(max_works):
mAP += average_precision_score(cr, d)
mAP = mAP/float(max_works)
print('L1 mean average precision:', mAP)
#---Frobenius norm---#
fro_distances = np.zeros((max_works, max_works*max_covers))
for i in range(max_works):
for j in range(max_covers*max_works):
fro_distances[i][j] = np.linalg.norm(all_merged[i*max_covers]-all_merged[j])
# #Histogram
# fro_distances_covers = []
# fro_distances_noncovers = []
# for i in range(file_no):
# for j in range(file_no):
# if covers[i][j]:
# if (fro_distances[i][j] != 0):
# fro_distances_covers.append(fro_distances[i][j])
# else:
# fro_distances_noncovers.append(fro_distances[i][j])
# plt.figure()
# plt.hist(fro_distances_covers, bins=200, alpha=0.5, label='Covers', density=1)
# plt.hist(fro_distances_noncovers, bins=200, alpha=0.5, label='Non-covers', density=1)
# plt.title("Histogram of fro distances between cover and non-cover pairs")
# plt.legend(loc='upper right')
# plt.savefig(fig_dir+'Histogram-fronorm.png')
#Mean position of first hit
all_cvrs = []
hit_positions = []
for i in range(max_works):
d = fro_distances[i]
d = np.argsort(d)
hits = []
cvrs = []
for j in range(max_covers-1):
cvrs.append((i*max_covers)+j+1)
for c in range(len(cvrs)): #traverse covers
hits.append(np.where(d==cvrs[c])[0][0])
hit_positions.append(min(hits))
cvrs.insert(0,cvrs[0]-1)
all_cvrs.append(cvrs)
fro_average_hit = np.mean(hit_positions)
print('fro mean position of first hit:', fro_average_hit)
#Mean Average Precision
for i in range(max_works):
#get all distances to selected song, normalize [0,1], convert to similarity metric, not dissimilarity
d = 1-(fro_distances[i]/np.linalg.norm(fro_distances[i]))
cr = np.zeros((max_works*max_covers)) #get all cover relationships to selected song
for c in all_cvrs[i]:
cr[c] = 1
mAP = 0
for j in range(max_works):
mAP += average_precision_score(cr, d)
mAP = mAP/float(max_works)
print('fro mean average precision:', mAP)
#---Sub-sequence Dynamic Time Warping Cost---#
dtw_cost = np.zeros((max_works, max_works*max_covers))
for i in range(max_works):
for j in range(max_covers*max_works):
costs = []
for k in range(kmax-kmin):
costs.append(librosa.sequence.dtw(all_sets[i*max_covers][k], all_sets[j][k], subseq=False, metric='euclidean')[0][rs_size-1,rs_size-1])
dtw_cost[i][j] = sum(costs)/len(costs)
# dtw_cost_covers = []
# dtw_cost_noncovers = []
# for i in range(file_no):
# for j in range(file_no):
# if covers[i][j]:
# if (dtw_cost[i][j] != 0):
# dtw_cost_covers.append(dtw_cost[i][j])
# else:
# dtw_cost_noncovers.append(dtw_cost[i][j])
# plt.figure()
# plt.hist(dtw_cost_covers, bins=200, alpha=0.5, label='Covers', density=1)
# plt.hist(dtw_cost_noncovers, bins=200, alpha=0.5, label='Non-covers', density=1)
# plt.title("Histogram of subsequence DTW cost between cover and non-cover pairs")
# plt.legend(loc='upper right')
# plt.savefig(fig_dir+'Histogram-dtw.png')
#Mean position of first hit
all_cvrs = []
hit_positions = []
for i in range(max_works):
d = dtw_cost[i]
d = np.argsort(d)
hits = []
cvrs = []
for j in range(max_covers-1):
cvrs.append((i*max_covers)+j+1)
for c in range(len(cvrs)): #traverse covers
hits.append(np.where(d==cvrs[c])[0][0])
hit_positions.append(min(hits))
cvrs.insert(0,cvrs[0]-1)
all_cvrs.append(cvrs)
dtw_average_hit = np.mean(hit_positions)
print('dtw mean position of first hit:', dtw_average_hit)
#Mean Average Precision
for i in range(max_works):
#get all distances to selected song, normalize [0,1], convert to similarity metric, not dissimilarity
d = 1-(dtw_cost[i]/np.linalg.norm(dtw_cost[i]))
cr = np.zeros((max_works*max_covers)) #get all cover relationships to selected song
for c in all_cvrs[i]:
cr[c] = 1
mAP = 0
for j in range(max_works):
mAP += average_precision_score(cr, d)
mAP = mAP/float(max_works)
print('dtw mean average precision:', mAP)
#---Directed Hausdorff distance---#
hausdorff_distances = np.zeros((max_works, max_works*max_covers))
for i in range(max_works):
for j in range(max_covers*max_works):
hausdorff_distances[i][j] = (directed_hausdorff(all_flat[i*max_covers], all_flat[j]))[0]
# hausdorff_distances_covers = []
# hausdorff_distances_noncovers = []
# for i in range(file_no):
# for j in range(file_no):
# if covers[i][j]:
# if (hausdorff_distances[i][j] != 0):
# hausdorff_distances_covers.append(hausdorff_distances[i][j])
# else:
# hausdorff_distances_noncovers.append(hausdorff_distances[i][j])
# plt.figure()
# plt.hist(hausdorff_distances_covers, bins=200, alpha=0.5, label='Covers', density=1)
# plt.hist(hausdorff_distances_noncovers, bins=200, alpha=0.5, label='Non-covers', density=1)
# plt.title("Histogram of Hausdorff distances between cover and non-cover pairs")
# plt.legend(loc='upper right')
# plt.savefig(fig_dir+'Histogram-hau.png')
#Mean position of first hit
all_cvrs = []
hit_positions = []
for i in range(max_works):
d = hausdorff_distances[i]
d = np.argsort(d)
hits = []
cvrs = []
for j in range(max_covers-1):
cvrs.append((i*max_covers)+j+1)
for c in range(len(cvrs)): #traverse covers
hits.append(np.where(d==cvrs[c])[0][0])
hit_positions.append(min(hits))
cvrs.insert(0,cvrs[0]-1)
all_cvrs.append(cvrs)
hau_average_hit = np.mean(hit_positions)
print('hau mean position of first hit:', hau_average_hit)
#Mean Average Precision
for i in range(max_works):
#get all distances to selected song, normalize [0,1], convert to similarity metric, not dissimilarity
d = 1-(hausdorff_distances[i]/np.linalg.norm(hausdorff_distances[i]))
cr = np.zeros((max_works*max_covers)) #get all cover relationships to selected song
for c in all_cvrs[i]:
cr[c] = 1
mAP = 0
for j in range(max_works):
mAP += average_precision_score(cr, d)
mAP = mAP/float(max_works)
print('hau mean average precision:', mAP)
#---Minimum distance across all pairs---#
min_distances = np.zeros((max_works, max_works*max_covers))
for i in range(max_works):
for j in range(max_covers*max_works):
dists = []
for n in range(kmax-kmin):
for m in range(kmax-kmin):
dists.append(np.linalg.norm(all_sets[i*max_covers][n]-all_sets[j][m]))
min_distances[i][j] = min(dists)
# min_distances_covers = []
# min_distances_noncovers = []
# for i in range(file_no):
# for j in range(file_no):
# if covers[i][j]:
# if (min_distances[i][j] != 0):
# min_distances_covers.append(min_distances[i][j])
# else:
# min_distances_noncovers.append(min_distances[i][j])
# plt.figure()
# plt.hist(min_distances_covers, bins=200, alpha=0.5, label='Covers', density=1)
# plt.hist(min_distances_noncovers, bins=200, alpha=0.5, label='Non-covers', density=1)
# plt.title("Histogram of min pair distances between cover and non-cover pairs")
# plt.legend(loc='upper right')
# plt.savefig(fig_dir+'Histogram-pair.png')
#Mean position of first hit
all_cvrs = []
hit_positions = []
for i in range(max_works):
d = min_distances[i]
d = np.argsort(d)
hits = []
cvrs = []
for j in range(max_covers-1):
cvrs.append((i*max_covers)+j+1)
for c in range(len(cvrs)): #traverse covers
hits.append(np.where(d==cvrs[c])[0][0])
hit_positions.append(min(hits))
cvrs.insert(0,cvrs[0]-1)
all_cvrs.append(cvrs)
pair_average_hit = np.mean(hit_positions)
print('pair mean position of first hit:', pair_average_hit)
#Mean Average Precision
for i in range(max_works):
#get all distances to selected song, normalize [0,1], convert to similarity metric, not dissimilarity
d = 1-(min_distances[i]/np.linalg.norm(min_distances[i]))
cr = | np.zeros((max_works*max_covers)) | numpy.zeros |
import numpy as np
import scipy.stats as t
class Variable_selection():
def __init__(self,model,input_data,target_data):
self.model = model
self.input_data = input_data
self.target_data = target_data
def Sum_of_SQ(self,model, X, Y):
yhat = model.predict(X)
SSR = sum((np.mean(Y) - yhat) ** 2)
SSE = sum((Y - yhat) ** 2)
df_ssr = np.shape(X)[1]
df_sse = np.shape(X)[0] - np.shape(X)[1]
return SSR, SSE, df_ssr, df_sse
def T_statistics(self,model, X, Y):
params = np.append(model.intercept_, model.coef_)
predictions = model.predict(X)
newX = np.append(np.ones((len(X), 1)), X, axis=1)
MSE = (sum((Y - predictions) ** 2)) / (len(newX) - len(newX[0]))
var_b = MSE * (np.linalg.inv(np.dot(newX.T, newX)).diagonal())
sd_b = np.sqrt(var_b)
ts_b = params / sd_b
p_values = [2 * (1 - t.cdf(np.abs(i), (len(newX) - 1))) for i in ts_b]
return ts_b, p_values
def F_statistics(self,model, X, Y):
SSR, SSE, df_ssr, df_sse = self.Sum_of_SQ(model, X, Y)
F = (SSR / df_ssr) / (SSE / df_sse)
return F
def R_sq(self,model, X, Y):
model_trained = model.fit(X,Y)
yhat = model_trained.predict(X)
SSR = np.sum((np.mean(Y) - yhat) ** 2)
SSE = np.sum((Y - yhat) ** 2)
SST = SSR + SSE
r_sq = 1 - (float(SSE)) / SST
adj_r_sq = 1 - (1 - r_sq) * (len(Y) - 1) / (len(Y) - X.shape[1] - 1)
return r_sq, adj_r_sq
# selection cells
def forward_cell(self, model, candidate_var, X, Y):
initial_var = [n for n in range(0, np.shape(X)[1])]
possible_list = np.delete(initial_var, candidate_var, 0).tolist()
F_list = []
# For all variables are selected
if len(possible_list) == 0:
tmpX = np.take(X, candidate_var, axis=1)
model.fit(tmpX, Y)
_, p_selected = self.T_statistics(model, tmpX, Y)
return candidate_var, p_selected
else:
for i in range(len(possible_list)):
tmp_variable = candidate_var + possible_list[i:i + 1]
tmp_input = np.take(X, tmp_variable, axis=1)
model.fit(tmp_input, Y)
F = self.F_statistics(model, tmp_input, Y)
F_list.append(F)
selected_idx = np.argmax(F_list)
result = candidate_var + [possible_list[selected_idx]]
# for stopping
model.fit(np.take(X, result, axis=1), Y)
_, p_selected = self.T_statistics(model, np.take(X, result, axis=1), Y)
return result, p_selected[1:]
def backward_cell(self,model, candidate_var, X, Y):
SSR_list = []
for i in range(len(candidate_var)):
tmp_var = candidate_var[:i] + candidate_var[i + 1:]
tmp_input = np.take(X, tmp_var, axis=1)
model.fit(tmp_input, Y)
SSR, SSE, _, _ = self.Sum_of_SQ(model, tmp_input, Y)
SSR_list.append(SSR)
selected_idx = | np.argmax(SSR_list) | numpy.argmax |
import numpy as np
import torch
def normalize(x):
s = torch.sum(x)
if torch.abs(s) < 1e-10:
return torch.ones_like(x) / x.shape[0]
else:
return x / s
def MPE(X, U, qr=True, objective=None):
"""Minimal polynomial extrapolation
:param X: k x n matrix, sequence elements
:param U: n x k matrix, differences
:param qr: use QR factorization
"""
n, k = U.shape
c = torch.ones(k, device=U.device, dtype=U.dtype)
A = U[:, :-1]
b = -U[:, [-1]]
if qr:
Q, R = torch.linalg.qr(A, mode="reduced")
c[:-1] = torch.triangular_solve(Q.T @ b, R, upper=True).solution.flatten()
else:
M = A.T @ A
c[:-1] = torch.solve(A.T @ b, M).solution.flatten()
gamma = normalize(c)
return gamma @ X
def RRE(X, U, qr=True, objective=None):
"""Reduced rank extrapolation
:param X: k x n matrix, sequence elements
:param U: n x k matrix, differences
:param qr: use QR factorization
"""
n, k = U.shape
b = torch.ones((k, 1), device=U.device, dtype=U.dtype)
if qr:
Q, R = torch.linalg.qr(U, mode="r")
y = torch.triangular_solve(b, R.T, upper=False).solution
c = torch.triangular_solve(y, R, upper=True).solution
else:
M = U.T @ U
c = torch.solve(b, M).solution
gamma = normalize(c)
return (gamma.T @ X).flatten()
def regularized_RRE(X, U, lambda_, objective=None):
"""Regularized nonlinear acceleration
:param X: k x n matrix, sequence elements
:param U: n x k matrix, differences
:param lambda_: regularization constant
"""
n, k = U.shape
M = U.T @ U
M = M / torch.linalg.norm(M, 2)
I = torch.eye(k, device=U.device, dtype=U.dtype)
b = torch.ones((k, 1), device=U.device, dtype=U.dtype)
c = torch.solve(b, M + lambda_ * I).solution
gamma = normalize(c)
return (gamma.T @ X).flatten()
def MMPE(X, U, objective=None):
"""Modified MPE
:param X: k x n matrix, sequence elements
:param U: n x k matrix, differences
"""
n, k = U.shape
c = torch.ones(k, device=U.device, dtype=U.dtype)
c[:-1] = torch.solve(-U[:k - 1, [-1]], U[:k - 1, :-1]).solution.flatten()
gamma = normalize(c)
return gamma @ X
def TEA_solve(X, U, q=None, objective=None):
"""Topological Shanks transformation using matrix inverse
:param X: k+1 x n matrix, sequence elements
:param U: n x 2k matrix, differences
:param q: vector used for scalar product, by default a vector of ones
"""
n, k2 = U.shape
k = k2 // 2
if q is None:
q = torch.ones(n, device=U.device, dtype=U.dtype)
A = torch.zeros((k, k + 1), device=U.device, dtype=U.dtype)
for i in range(k):
A[i, :] = q[None, :] @ U[:, i:i + k + 1]
c = torch.ones(k + 1, device=U.device, dtype=U.dtype)
c[:-1] = torch.solve(-A[:, [-1]], A[:, :-1]).solution.flatten()
gamma = normalize(c)
return gamma @ X
def inv(x):
return x / torch.sum(x ** 2, 1, keepdim=True)
def vector_epsilon_v1(X, k, U=None, objective=None):
"""Vector epsilon algorithm using the Moore–Penrose generalized inverse
:param X: 2k+1 x n matrix; sequence elements
:param k: value of k for the algorithm
"""
e0 = torch.zeros((X.shape[0] + 1, X.shape[1]), device=X.device, dtype=X.dtype)
e1 = X
e2 = None
for _ in range(2 * k):
e2 = e0[1:-1] + inv(e1[1:] - e1[:-1])
e0 = e1
e1 = e2
return e2.flatten()
def vector_epsilon_v2(X, k, U=None, objective=None, q=None):
"""Topological epsilon algorithm
:param X: 2k+1 x n matrix; sequence elements
:param k: value of k for the algorithm
:param q: a vector of size n, used in the scalar product
"""
n, m = X.shape
e_odd = torch.zeros((n + 1, m), device=X.device, dtype=X.dtype)
e_even = X.clone()
if q is None:
q = torch.ones(m, device=X.device, dtype=X.dtype)
for i in range(k):
for j in range(n - 2 * i - 1):
e_odd[j] = e_odd[j + 1] + q / (q @ (e_even[j + 1] - e_even[j]))
for j in range(n - 2 * i - 2):
e_even[j] = e_even[j + 1] + (e_even[j + 1] - e_even[j]) \
/ ((e_odd[j + 1] - e_odd[j]) @ (e_even[j + 1] - e_even[j]))
return e_even[:n - 2 * k].flatten()
def topological_vector_epsilon(X: torch.Tensor, k, U=None, objective=None, q=None):
"""Simplified topological epsilon algorithm
:param X: 2k+1 x n matrix; sequence elements
:param k: value of k for the algorithm
:param q: a vector of size n, used in the scalar product
"""
if q is None:
q = torch.ones(X.shape[1], device=X.device, dtype=X.dtype)
e = X.clone()
eps1 = torch.zeros((X.shape[0] + 1, 1), device=X.device)
eps2 = X @ q[:, None]
for i in range(k):
# scalar update for 2k+1
eps1 = eps1[1:-1] + 1. / (eps2[1:] - eps2[:-1])
# vector update
e = e[1:-1] + (e[2:] - e[1:-1]) / ((eps2[2:] - eps2[1:-1]) * (eps1[1:] - eps1[:-1]))
# scalar update for 2k+2
eps2 = eps2[1:-1] + 1. / (eps1[1:] - eps1[:-1])
return e.flatten()
def RNA(X, U, objective, lambda_range, linesearch=True, norm=True):
"""Adaptive regularized nonlinear acceleration
:param X: k x n matrix, sequence elements
:param U: n x k matrix, differences
:param objective: objective function to be minimized
:param lambda_range: range of tested values of the regularization parameter
:param linesearch: if True, linesearch is used to improve the solution
:param norm: if True, matrix U^TU is normalized
"""
n, k = U.shape
solutions = []
M = U.T @ U
if norm:
M /= torch.linalg.norm(M, 2)
I = torch.eye(k, device=U.device, dtype=U.dtype)
b = torch.ones((k, 1), device=U.device, dtype=U.dtype)
for lambda_ in np.geomspace(lambda_range[0], lambda_range[1], k):
c = torch.solve(b, M + lambda_ * I).solution
gamma = normalize(c.T)
solutions.append((gamma @ X).flatten())
values = [objective(x).item() for x in solutions]
idx = np.argmin(values)
solution = solutions[idx]
if linesearch:
t = 1
x0 = X[0]
ft = objective(x0 + t * (solution - x0)).item()
f2t = objective(x0 + 2 * t * (solution - x0)).item()
while f2t < ft:
t *= 2
ft = f2t
f2t = objective(x0 + 2 * t * (solution - x0)).item()
return x0 + t * (solution - x0)
else:
return solution
def RNA_cholesky(X, U, objective, lambda_range, linesearch=True, norm=True):
"""Adaptive regularized nonlinear acceleration using Cholesky decomposition"""
n, k = U.shape
solutions = []
b = torch.ones((k, 1), device=U.device, dtype=U.dtype)
if norm:
U_norm = torch.linalg.norm(U, 2).item()
else:
U_norm = 1.
# \|U^T U\| = \|U\|^2
for lambda_ in np.geomspace(lambda_range[0], lambda_range[1], k) * U_norm ** 2:
L = torch.zeros((k, k), device=U.device, dtype=U.dtype)
L[0, 0] = torch.sqrt(U[:, 0] @ U[:, 0] + lambda_)
for i in range(1, k):
a = torch.triangular_solve(U[:, :i].T @ U[:, [i]], L[:i, :i], upper=False).solution
d = torch.sqrt(U[:, i] @ U[:, i] + lambda_ - a.T @ a).item()
assert d != 0, f"L will be singular; lambda={lambda_}, i={i}"
L[i, :i] = a.ravel()
L[i, i] = d
L /= U_norm
y = torch.triangular_solve(b, L, upper=False).solution
c = torch.triangular_solve(y, L.T, upper=True).solution
gamma = normalize(c.T)
solutions.append((gamma @ X).flatten())
values = [objective(x).item() for x in solutions]
idx = np.argmin(values)
solution = solutions[idx]
if linesearch:
t = 1
x0 = X[0]
ft = objective(x0 + t * (solution - x0)).item()
f2t = objective(x0 + 2 * t * (solution - x0)).item()
while f2t < ft:
t *= 2
ft = f2t
f2t = objective(x0 + 2 * t * (solution - x0)).item()
return x0 + t * (solution - x0)
else:
return solution
def mixing_RNA(X, Y, lambda_, beta, objective=None):
"""Online RNA from 'Online Regularized Nonlinear Acceleration'"""
X = X.T
Y = Y.T
n, k = X.shape
U = X - Y
M = U.T @ U
M = M / torch.linalg.norm(M, 2)
I = torch.eye(k, device=U.device, dtype=U.dtype)
b = torch.ones((k, 1), device=U.device, dtype=U.dtype)
c = torch.solve(b, M + lambda_ * I).solution
gamma = normalize(c)
return ((Y - beta * U) @ gamma).flatten()
def optimal_RNA(X, Y, lambda_, alpha, beta, objective, f_xi=None):
"""Algorithm 3 from 'Online Regularized Nonlinear Acceleration'"""
y_extr = mixing_RNA(X, Y, lambda_, 0)
z = (y_extr + beta * X[-1]) / (1. + beta)
if f_xi is None:
f_xi = objective(X[-1]).item()
if objective(z).item() < f_xi - 0.5 * alpha * f_xi ** 2:
return y_extr
else:
return (1. + beta) * X[-1] - beta * X[-2]
def difference_matrix(X):
k = len(X) - 1
U = torch.empty((X[0].shape[0], k), dtype=X[0].dtype)
for i in range(k):
U[:, i] = X[i + 1] - X[i]
return U
def absmax(x, axis=None):
idx = np.argmax(np.abs(x), axis=axis)
if axis is None:
return x.ravel()[idx]
else:
idx = np.expand_dims(idx, axis=axis)
return np.take_along_axis(x, idx, axis=axis)
def safe_div(a, b):
return np.divide(a, b, out=np.zeros_like(a), where=b != 0)
def levin_remainder(x, type, vector):
N = x.shape[0]
dx = np.diff(x, axis=0)
if type == "t":
r = dx
elif type == "u":
if vector:
r = np.arange(1, N) * absmax(dx, 1).ravel()
else:
r = np.arange(1, N)[:, None] * dx
elif type == "v":
r = safe_div(dx[:-1] * dx[1:], dx[1:] - dx[:-1])
else:
raise RuntimeError("Invalid type")
if vector and type != "u":
r = absmax(r, 1).ravel()
return r
def h_algorithm(xt, k, type="t", U=None, objective=None):
""" Vector E-algorithm
:param xt: k+2 x n matrix for type = "t", "u"; k+3 x n matrix for type = "v"; sequence elements
:param k: value of k for the algorithm
:param type: remainder estimate, either "t", "u" or "v"
"""
x = xt.cpu().numpy()
r = levin_remainder(x, type, True)
N = min(x.shape[0], r.shape[0])
h = x[:N]
g = r[:N, None] / (np.arange(N)[:, None] + 1) ** np.arange(k)[None, :]
for i in range(k): # i -> k
h = h[:-1] - g[:-1, i, None] * np.diff(h, axis=0) / np.diff(g[:, [i]], axis=0)
if i < k - 1:
g = g[:-1] - g[:-1, [i]] * np.diff(g, axis=0) / np.diff(g[:, [i]], axis=0)
return torch.tensor(h.ravel(), dtype=xt.dtype, device=xt.device)
def levin_transform(xt, k, type="t", U=None, objective=None):
""" Vector transform using the recursive algorithm
:param xt: k+2 x n matrix for type = "t", "u"; k+3 x n matrix for type = "v"; sequence elements
:param k: value of k for the algorithm
:param type: remainder estimate, either "t", "u" or "v"
"""
def step(s, i):
n = s.shape[0]
c = np.arange(1, n) # n + \beta
if i > 0:
c = c * (c + i) ** (i - 1) / (c + i + 1) ** i
else:
c = np.ones_like(c)
return s[1:] - c[:, None] * s[:-1]
x = xt.cpu().numpy()
r = levin_remainder(x, type, True)
N = min(x.shape[0], r.shape[0])
num = x[:N] / r[:N, None]
denum = 1 / r[:N, None]
for i in range(k):
num = step(num, i)
denum = step(denum, i)
h = num / denum
return torch.tensor(h.ravel(), dtype=xt.dtype, device=xt.device)
def e_algorithm(xt, k, type="t", U=None, objective=None):
""" Scalar E-algorithm performed separately for each input dimension
:param xt: k+2 x n matrix for type = "t", "u"; k+3 x n matrix for type = "v"; sequence elements
:param k: value of k for the algorithm
:param type: remainder estimate, either "t", "u" or "v"
"""
x = xt.cpu().numpy()
r = levin_remainder(x, type, False)
N = min(x.shape[0], r.shape[0])
e = x[:N]
pow_ = (np.arange(N)[None, :] + 1) ** np.arange(k)[:, None]
g = r[None, :N, :] / pow_[:, :, None]
for i in range(k):
e = e[:-1] - safe_div(g[i, :-1] * np.diff(e, axis=0), np.diff(g[i], axis=0))
if i < k - 1:
g = g[:, :-1] - safe_div(g[i, :-1] * | np.diff(g, axis=1) | numpy.diff |
# Copyright (c) 2021-2022, NVIDIA CORPORATION. All rights reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import re
import numpy as np
import pytest
from nvflare.apis.dxo import DXO, DataKind, MetaKey, from_shareable
from nvflare.apis.fl_constant import ReservedKey
from nvflare.apis.fl_context import FLContext
from nvflare.apis.shareable import Shareable
from nvflare.app_common.aggregators.intime_accumulate_model_aggregator import InTimeAccumulateWeightedAggregator
from nvflare.app_common.app_constant import AppConstants
class TestInTimeAccumulateWeightedAggregator:
@pytest.mark.parametrize(
"exclude_vars,aggregation_weights,expected_data_kind,error,error_msg",
[
(
2.0,
None,
DataKind.WEIGHT_DIFF,
ValueError,
f"exclude_vars = 2.0 should be a regex string but got {type(2.0)}.",
),
(
{"dxo1": 3.0, "dxo2": ""},
None,
{"dxo1": DataKind.WEIGHT_DIFF, "dxo2": DataKind.WEIGHT_DIFF},
ValueError,
f"exclude_vars[dxo1] = 3.0 should be a regex string but got {type(3.0)}.",
),
(None, None, DataKind.ANALYTIC, ValueError, "expected_data_kind = ANALYTIC is not WEIGHT_DIFF or WEIGHTS"),
(
None,
None,
{"dxo1": DataKind.WEIGHT_DIFF, "dxo2": DataKind.ANALYTIC},
ValueError,
"expected_data_kind[dxo2] = ANALYTIC is not WEIGHT_DIFF or WEIGHTS",
),
(
None,
{"dxo1": {"client_0": 1.0, "client_1": 2.0}},
{"dxo1": DataKind.WEIGHT_DIFF, "dxo2": DataKind.WEIGHT_DIFF},
ValueError,
"A dict of dict aggregation_weights should specify aggregation_weights "
"for every key in expected_data_kind. But missed these keys: ['dxo2']",
),
(
{"dxo2": ""},
None,
{"dxo1": DataKind.WEIGHT_DIFF, "dxo2": DataKind.WEIGHT_DIFF},
ValueError,
"A dict exclude_vars should specify exclude_vars for every key in expected_data_kind. "
"But missed these keys: ['dxo1']",
),
],
)
def test_invalid_create(self, exclude_vars, aggregation_weights, expected_data_kind, error, error_msg):
with pytest.raises(error, match=re.escape(error_msg)):
_ = InTimeAccumulateWeightedAggregator(
exclude_vars=exclude_vars,
aggregation_weights=aggregation_weights,
expected_data_kind=expected_data_kind,
)
@pytest.mark.parametrize(
"exclude_vars,aggregation_weights,expected_data_kind,expected_object",
[
(
None,
None,
DataKind.WEIGHTS,
InTimeAccumulateWeightedAggregator(
exclude_vars=None, aggregation_weights=None, expected_data_kind=DataKind.WEIGHTS
),
),
(
"hello",
None,
{"dxo1": DataKind.WEIGHTS, "dxo2": DataKind.WEIGHT_DIFF},
InTimeAccumulateWeightedAggregator(
exclude_vars={"dxo1": "hello", "dxo2": "hello"},
aggregation_weights=None,
expected_data_kind={"dxo1": DataKind.WEIGHTS, "dxo2": DataKind.WEIGHT_DIFF},
),
),
(
None,
{"client_0": 1.0, "client_1": 2.0},
{"dxo1": DataKind.WEIGHTS, "dxo2": DataKind.WEIGHT_DIFF},
InTimeAccumulateWeightedAggregator(
exclude_vars=None,
aggregation_weights={
"dxo1": {"client_0": 1.0, "client_1": 2.0},
"dxo2": {"client_0": 1.0, "client_1": 2.0},
},
expected_data_kind={"dxo1": DataKind.WEIGHTS, "dxo2": DataKind.WEIGHT_DIFF},
),
),
],
)
def test_create(self, exclude_vars, aggregation_weights, expected_data_kind, expected_object):
result = InTimeAccumulateWeightedAggregator(
exclude_vars=exclude_vars, aggregation_weights=aggregation_weights, expected_data_kind=expected_data_kind
)
assert result.exclude_vars == expected_object.exclude_vars
assert result.aggregation_weights == expected_object.aggregation_weights
assert result.expected_data_kind == expected_object.expected_data_kind
@pytest.mark.parametrize("current_round,contribution_round,expected", [(1, 1, True), (2, 1, False)])
def test_accept(self, current_round, contribution_round, expected):
aggregation_weights = {f"client_{i}": random.random() for i in range(2)}
agg = InTimeAccumulateWeightedAggregator(aggregation_weights=aggregation_weights)
client_name = "client_0"
iter_number = 1
weights = np.random.random(4)
fl_ctx = FLContext()
s = Shareable()
s.set_peer_props({ReservedKey.IDENTITY_NAME: client_name})
s.set_header(AppConstants.CONTRIBUTION_ROUND, contribution_round)
fl_ctx.set_prop(AppConstants.CURRENT_ROUND, current_round)
dxo = DXO(
DataKind.WEIGHT_DIFF,
data={"var1": weights},
meta={
MetaKey.NUM_STEPS_CURRENT_ROUND: iter_number,
},
)
assert agg.accept(dxo.update_shareable(s), fl_ctx) == expected
@pytest.mark.parametrize(
"received,expected",
[
(
{"client1": {"weight": 0.5, "iter_number": 1, "aggr_data": {"var1": np.array([2.0, 3.0, 1.1, 0.1])}}},
{"var1": np.array([2.0, 3.0, 1.1, 0.1])},
),
(
{"client1": {"weight": 1.0, "iter_number": 1, "aggr_data": {"var1": np.array([2.0, 3.0, 1.1, 0.1])}}},
{"var1": np.array([2.0, 3.0, 1.1, 0.1])},
),
(
{
"client1": {"weight": 0.5, "iter_number": 1, "aggr_data": {"var1": np.array([2.0, 3.0, 1.1, 0.1])}},
"client2": {"weight": 1.0, "iter_number": 1, "aggr_data": {"var1": np.array([1.0, 1.0, 2.1, 0.5])}},
},
{
"var1": np.array(
[
(0.5 * 2.0 + 1.0 * 1.0) / (0.5 + 1),
(0.5 * 3.0 + 1.0 * 1.0) / (0.5 + 1),
(0.5 * 1.1 + 1.0 * 2.1) / (0.5 + 1),
(0.5 * 0.1 + 1.0 * 0.5) / (0.5 + 1),
]
)
},
),
(
{
"client1": {"weight": 1.0, "iter_number": 2, "aggr_data": {"var1": np.array([2.0, 3.0, 1.1, 0.1])}},
"client2": {"weight": 1.0, "iter_number": 4, "aggr_data": {"var1": np.array([1.0, 1.0, 2.1, 0.5])}},
},
{
"var1": np.array(
[
(2 * 2.0 + 4 * 1.0) / (2 + 4),
(2 * 3.0 + 4 * 1.0) / (2 + 4),
(2 * 1.1 + 4 * 2.1) / (2 + 4),
(2 * 0.1 + 4 * 0.5) / (2 + 4),
]
)
},
),
],
)
def test_aggregate(self, received, expected):
aggregation_weights = {k: v["weight"] for k, v in received.items()}
agg = InTimeAccumulateWeightedAggregator(aggregation_weights=aggregation_weights)
fl_ctx = FLContext()
fl_ctx.set_prop(AppConstants.CURRENT_ROUND, 0)
for k, v in received.items():
dxo = DXO(
DataKind.WEIGHT_DIFF,
data=v["aggr_data"],
meta={
MetaKey.NUM_STEPS_CURRENT_ROUND: v["iter_number"],
},
)
s = Shareable()
s.set_peer_props({ReservedKey.IDENTITY_NAME: k})
s.set_header(AppConstants.CONTRIBUTION_ROUND, 0)
agg.accept(dxo.update_shareable(s), fl_ctx)
result = agg.aggregate(fl_ctx)
| np.testing.assert_allclose(result["DXO"]["data"]["var1"], expected["var1"]) | numpy.testing.assert_allclose |
''' Code partially copied from python_speech_features package
'''
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from . import audio_utils as sigproc
import os
import numpy as np
import logging
from scipy import signal
from scipy.fftpack import dct
import librosa
class Feature(object):
""" Base class for features calculation
All children class must implement __str__ and _call function.
# Arguments
fs: sampling frequency of audio signal. If the audio has not this fs,
it will be resampled
eps
"""
def __init__(self, fs=16e3, eps=1e-8, stride=1, num_context=0,
mean_norm=True, var_norm=True):
self.fs = fs
self.eps = eps
self.mean_norm = mean_norm
self.var_norm = var_norm
self.stride = stride
self.num_context = num_context
self._logger = logging.getLogger('%s.%s' % (__name__,
self.__class__.__name__))
def __call__(self, audio):
""" This method load the audio and do the transformation of signal
# Inputs
audio:
if audio is a string and the file exists, the wave file will
be loaded and resampled (if necessary) to fs
if audio is a ndarray or list and is not empty, it will make
the transformation without any resampling
# Exception
TypeError if audio were not recognized
"""
if ((isinstance(audio, str) or isinstance(audio, unicode))
and os.path.isfile(audio)):
audio, current_fs = librosa.audio.load(audio)
audio = librosa.core.resample(audio, current_fs, self.fs)
feats = self._call(audio)
elif type(audio) in (np.ndarray, list) and len(audio) > 1:
feats = self._call(audio)
else:
TypeError("audio type is not support")
return self._standarize(self._postprocessing(feats))
def _call(self, data):
raise NotImplementedError("__call__ must be overrided")
def _standarize(self, feats):
if self.mean_norm:
feats -= np.mean(feats, axis=0, keepdims=True)
if self.var_norm:
feats /= (np.std(feats, axis=0, keepdims=True) + self.eps)
return feats
def _postprocessing(self, feats):
# Code adapted from
# https://github.com/mozilla/DeepSpeech/blob/master/util/audio.py
# We only keep every second feature (BiRNN stride = 2)
feats = feats[::self.stride]
if self.num_context == 0:
return feats
num_feats = feats.shape[1]
train_inputs = np.array([], np.float32)
train_inputs.resize((feats.shape[0],
num_feats + 2*num_feats*self.num_context))
# Prepare pre-fix post fix context
# (TODO: Fill empty_mfcc with MCFF of silence)
empty_mfcc = np.array([])
empty_mfcc.resize((num_feats))
# Prepare train_inputs with past and future contexts
time_slices = range(train_inputs.shape[0])
context_past_min = time_slices[0] + self.num_context
context_future_max = time_slices[-1] - self.num_context
for time_slice in time_slices:
# Reminder: array[start:stop:step]
# slices from indice |start| up to |stop| (not included), every
# |step|
# Pick up to self.num_context time slices in the past, and complete
# with empty
# mfcc features
need_empty_past = max(0, (context_past_min - time_slice))
empty_source_past = list(empty_mfcc for empty_slots
in range(need_empty_past))
data_source_past = feats[max(0, time_slice -
self.num_context):time_slice]
assert(len(empty_source_past) +
len(data_source_past) == self.num_context)
# Pick up to self.num_context time slices in the future, and
# complete with empty
# mfcc features
need_empty_future = max(0, (time_slice - context_future_max))
empty_source_future = list(empty_mfcc
for empty_slots in
range(need_empty_future))
data_source_future = feats[time_slice + 1:time_slice +
self.num_context + 1]
assert(len(empty_source_future) +
len(data_source_future) == self.num_context)
if need_empty_past:
past = np.concatenate((empty_source_past, data_source_past))
else:
past = data_source_past
if need_empty_future:
future = np.concatenate((data_source_future,
empty_source_future))
else:
future = data_source_future
past = np.reshape(past, self.num_context*num_feats)
now = feats[time_slice]
future = np.reshape(future, self.num_context*num_feats)
train_inputs[time_slice] = np.concatenate((past, now, future))
assert(len(train_inputs[time_slice])
== num_feats + 2*num_feats*self.num_context)
self._num_feats = num_feats + 2*num_feats*self.num_context
return train_inputs
def __str__(self):
raise NotImplementedError("__str__ must be overrided")
@property
def num_feats(self):
return self._num_feats
class FBank(Feature):
"""Compute Mel-filterbank energy features from an audio signal.
# Arguments
win_len: the length of the analysis window in seconds.
Default is 0.025s (25 milliseconds)
win_step: the step between successive windows in seconds.
Default is 0.01s (10 milliseconds)
num_filt: the number of filters in the filterbank, default 40.
nfft: the FFT size. Default is 512.
low_freq: lowest band edge of mel filters in Hz.
Default is 20.
high_freq: highest band edge of mel filters in Hz.
Default is 7800
pre_emph: apply preemphasis filter with preemph as coefficient.
0 is no filter. Default is 0.97.
win_func: the analysis window to apply to each frame.
By default hamming window is applied.
"""
def __init__(self, win_len=0.025, win_step=0.01,
num_filt=40, nfft=512, low_freq=20, high_freq=7800,
pre_emph=0.97, win_fun=signal.hamming, **kwargs):
super(FBank, self).__init__(**kwargs)
if high_freq > self.fs / 2:
raise ValueError("high_freq must be less or equal than fs/2")
self.win_len = win_len
self.win_step = win_step
self.num_filt = num_filt
self.nfft = nfft
self.low_freq = low_freq
self.high_freq = high_freq or self.fs / 2
self.pre_emph = pre_emph
self.win_fun = win_fun
self._filterbanks = self._get_filterbanks()
self._num_feats = self.num_filt
@property
def mel_points(self):
return np.linspace(self._low_mel, self._high_mel, self.num_filt + 2)
@property
def low_freq(self):
return self._low_freq
@low_freq.setter
def low_freq(self, value):
self._low_mel = self._hz2mel(value)
self._low_freq = value
@property
def high_freq(self):
return self._high_freq
@high_freq.setter
def high_freq(self, value):
self._high_mel = self._hz2mel(value)
self._high_freq = value
def _call(self, signal):
"""Compute Mel-filterbank energy features from an audio signal.
:param signal: the audio signal from which to compute features. Should
be an N*1 array
Returns:
2 values. The first is a numpy array of size (NUMFRAMES by nfilt)
containing features. Each row holds 1 feature vector. The
second return value is the energy in each frame (total energy,
unwindowed)
"""
signal = sigproc.preemphasis(signal, self.pre_emph)
frames = sigproc.framesig(signal,
self.win_len * self.fs,
self.win_step * self.fs,
self.win_fun)
pspec = sigproc.powspec(frames, self.nfft)
# this stores the total energy in each frame
energy = np.sum(pspec, 1)
# if energy is zero, we get problems with log
energy = np.where(energy == 0, | np.finfo(float) | numpy.finfo |
import sys, os, glob
import argparse
import time
import random
from copy import copy, deepcopy
from termcolor import colored, cprint
import numpy as np
from sklearn.metrics import precision_recall_fscore_support
from sklearn.metrics import roc_auc_score
from sklearn.metrics import precision_recall_curve
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.autograd import Variable
import torch.utils.data as data
from torch.utils.tensorboard import SummaryWriter
sys.path.append('../')
from msda_src.model_utils import get_model_class, get_critic_class
from msda_src.model_utils.domain_critic import ClassificationD, MMD, CoralD, WassersteinD
from msda_src.utils.io import AmazonDataset, AmazonDomainDataset
from msda_src.utils.io import say
from msda_src.utils.op import softmax
from dataset import ProcessedCNNInputDataset, OAGDomainDataset
from models.cnn import CNNMatchModel
from sklearn.metrics import confusion_matrix
from sklearn.manifold import TSNE
from utils import settings
import warnings
warnings.filterwarnings("ignore")
argparser = argparse.ArgumentParser(description="Learning to Adapt from Multi-Source Domains")
argparser.add_argument("--cuda", action="store_true")
argparser.add_argument("--train", type=str, default="author,paper,aff",
help="multi-source domains for training, separated with (,)")
argparser.add_argument("--test", type=str, default="venue",
help="target domain for testing")
argparser.add_argument("--eval_only", action="store_true")
argparser.add_argument("--critic", type=str, default="mmd")
argparser.add_argument("--batch_size", type=int, default=32)
argparser.add_argument("--batch_size_d", type=int, default=32)
argparser.add_argument("--max_epoch", type=int, default=500)
argparser.add_argument("--lr", type=float, default=1e-4)
argparser.add_argument("--lr_d", type=float, default=1e-4)
argparser.add_argument("--lambda_critic", type=float, default=0)
argparser.add_argument("--lambda_gp", type=float, default=0)
argparser.add_argument("--lambda_moe", type=float, default=1)
argparser.add_argument("--lambda_mtl", type=float, default=0.3)
argparser.add_argument("--lambda_all", type=float, default=0)
argparser.add_argument("--lambda_dst", type=float, default=0)
argparser.add_argument("--m_rank", type=int, default=10)
argparser.add_argument("--lambda_entropy", type=float, default=0.0)
argparser.add_argument("--load_model", type=str)
argparser.add_argument("--save_model", type=str)
argparser.add_argument("--metric", type=str, default="biaffine",
help="mahalanobis: mahalanobis distance; biaffine: biaffine distance")
argparser.add_argument('--no-cuda', action='store_true', default=False, help='Disables CUDA training.')
argparser.add_argument('--matrix-size1', type=int, default=7, help='Matrix size 1.')
argparser.add_argument('--matrix-size2', type=int, default=4, help='Matrix size 2.')
argparser.add_argument('--mat1-channel1', type=int, default=8, help='Matrix1 number of channels1.')
argparser.add_argument('--mat1-kernel-size1', type=int, default=3, help='Matrix1 kernel size1.')
argparser.add_argument('--mat1-channel2', type=int, default=16, help='Matrix1 number of channel2.')
argparser.add_argument('--mat1-kernel-size2', type=int, default=2, help='Matrix1 kernel size2.')
argparser.add_argument('--mat1-hidden', type=int, default=512, help='Matrix1 hidden dim.')
argparser.add_argument('--mat2-channel1', type=int, default=8, help='Matrix2 number of channels1.')
argparser.add_argument('--mat2-kernel-size1', type=int, default=2, help='Matrix2 kernel size1.')
argparser.add_argument('--mat2-hidden', type=int, default=512, help='Matrix2 hidden dim')
argparser.add_argument('--build-index-window', type=int, default=5, help='Matrix2 hidden dim')
argparser.add_argument('--seed', type=int, default=42, help='Random seed.')
argparser.add_argument('--seed-delta', type=int, default=0, help='Random seed.')
argparser.add_argument('--initial-accumulator-value', type=float, default=0.01, help='Initial accumulator value.')
argparser.add_argument('--weight-decay', type=float, default=1e-3,
help='Weight decay (L2 loss on parameters).')
# argparser.add_argument('--dropout', type=float, default=0.2,
# help='Dropout rate (1 - keep probability).')
argparser.add_argument('--attn-dropout', type=float, default=0.,
help='Dropout rate (1 - keep probability).')
argparser.add_argument('--check-point', type=int, default=2, help="Check point")
argparser.add_argument('--shuffle', action='store_true', default=True, help="Shuffle dataset")
args, _ = argparser.parse_known_args()
writer = SummaryWriter('runs/{}_mix_moe_{}'.format(args.test, args.seed_delta))
class WeightScaler(nn.Module):
def __init__(self):
super(WeightScaler, self).__init__()
self.multp = nn.Parameter(torch.rand(1)) # requires_grad is True by default for Parameter
class HLoss(nn.Module):
def __init__(self):
super(HLoss, self).__init__()
def forward(self, x):
# b = F.softmax(x, dim=1) * F.log_softmax(x, dim=1)
b = x * torch.log(x)
b = -1.0 * b.sum()
return b
class L1Norm(nn.Module):
def __init__(self):
super(L1Norm, self).__init__()
def forward(self, x):
return torch.norm(x, 1, 1).sum()
def domain_encoding(loaders, args, encoders):
''' Compute the encoding of domains, each domain is represented as its mean vector
Note: the covariance inverse matrix is learned
'''
statistics = []
for load_i, loader in enumerate(loaders):
ind = 0
labels = None
S = []
for batch1, batch2, label in loader:
if args.cuda:
batch1 = Variable(batch1.cuda())
batch2 = Variable(batch2.cuda())
_, s_out = encoders[load_i](batch1, batch2)
# print("s_out", s_out)
S.append(s_out)
if ind == 0:
labels = label
else:
labels = torch.cat((labels, label), dim=0)
ind += 1
S = torch.cat(S, 0)
# print("S", S)
neg_index = ((labels == 0).nonzero())
pos_index = ((labels == 1).nonzero())
neg_index = Variable(neg_index.expand(neg_index.size(0), S.size(1)))
pos_index = Variable(pos_index.expand(pos_index.size(0), S.size(1)))
if args.cuda:
pos_index = pos_index.cuda()
neg_index = neg_index.cuda()
pos_S = torch.gather(S, 0, pos_index)
neg_S = torch.gather(S, 0, neg_index)
pos_mu_S = torch.mean(pos_S, dim=0, keepdim=True)
neg_mu_S = torch.mean(neg_S, dim=0, keepdim=True)
mu_S = torch.mean(S, dim=0, keepdim=True)
# print("mu_s", mu_S)
# print("pos_mu_s", pos_mu_S)
# print("neg_mu_s", neg_mu_S)
statistics.append((mu_S, pos_mu_S, neg_mu_S))
return statistics
TEMPERATURE = 4
def mahalanobis_metric_fast(p, mu, U, pos_mu, pos_U, neg_mu, neg_U):
# covi = (cov + I).inverse()
# print("p", type(p), p)
# print("p", p.shape, p)
# print("mu", mu.shape, mu)
#
# print("p - mu", p - mu)
# print("U", U)
mahalanobis_distances = (p - mu).mm(U.mm(U.t())).mm((p - mu).t())
pos_mahalanobis_distance = (p - pos_mu).mm(pos_U.mm(pos_U.t())).mm((p - pos_mu).t()).diag().sqrt().data
neg_mahalanobis_distance = (p - neg_mu).mm(neg_U.mm(neg_U.t())).mm((p - neg_mu).t()).diag().sqrt().data
mahalanobis_ratio1 = pos_mahalanobis_distance - neg_mahalanobis_distance
mahalanobis_ratio2 = neg_mahalanobis_distance - pos_mahalanobis_distance
max_ratio = torch.max(mahalanobis_ratio1, mahalanobis_ratio2)
return max_ratio # / TEMPERATURE
# return mahalanobis_distances.diag().sqrt().data
def mahalanobis_metric(p, S, L, U, pos_U, neg_U, args, encoder=None):
r''' Compute the mahalanobis distance between the encoding of a sample (p) and a set (S).
Args:
p: tensor (batch_size, dim), a batch of samples
S: tensor (size, dim), a domain which contains a set of samples
encoder: a module used for encoding p and S
Return:
mahalanobis_distances: tensor (batch_size)
'''
if encoder is not None:
p = encoder(p) # (batch_size, dim)
S = encoder(S) # (size, dim)
neg_index = ((L == 0).nonzero())
pos_index = ((L == 1).nonzero())
neg_index = neg_index.expand(neg_index.size(0), S.data.size(1))
pos_index = pos_index.expand(pos_index.size(0), S.data.size(1))
neg_S = torch.gather(S, 0, neg_index)
pos_S = torch.gather(S, 0, pos_index)
neg_mu = torch.mean(neg_S, dim=0, keepdim=True)
pos_mu = torch.mean(pos_S, dim=0, keepdim=True)
pos_mahalanobis_distance = (p - pos_mu).mm(pos_U.mm(pos_U.t())).mm((p - pos_mu).t()).diag().sqrt()
neg_mahalanobis_distance = (p - neg_mu).mm(neg_U.mm(neg_U.t())).mm((p - neg_mu).t()).diag().sqrt()
mahalanobis_ratio1 = pos_mahalanobis_distance - neg_mahalanobis_distance
mahalanobis_ratio2 = neg_mahalanobis_distance - pos_mahalanobis_distance
max_ratio = torch.max(mahalanobis_ratio1, mahalanobis_ratio2)
return max_ratio.clamp(0.01, 2) # / TEMPERATURE # .clamp(0.001, 1)
# mu_S = torch.mean(S, dim=0, keepdim=True) # (1, dim)
# mahalanobis_distances = (p - mu_S).mm(U.mm(U.t())).mm((p - mu_S).t())
# return mahalanobis_distances.diag().sqrt().clamp(0.01, 2)
def biaffine_metric_fast(p, mu, U):
biaffine_distances = p.mm(U).mm(mu.t())
return biaffine_distances.squeeze(1).data
def biaffine_metric(p, S, U, W, V, args, encoder=None):
''' Compute the biaffine distance between the encoding of a sample (p) and a set (S).
Args:
p: tensor (batch_size, dim), a batch of samples
U: matrix (dim, dim)
S: tensor (size, dim), a domain which contains a set of samples
encoder: a module used for encoding p and S
Return:
biaffine_distance: tensor (batch_size)
'''
if encoder is not None:
p = encoder(p)
S = encoder(S)
mu_S = torch.mean(S, dim=0, keepdim=True)
biaffine_distances = p.mm(U).mm(mu_S.t()) + p.mm(W) + mu_S.mm(V) # extra components
return biaffine_distances.squeeze(1).clamp(-10, 10)
DATA_DIR = "../../msda-data/amazon/chen12"
def train_epoch(iter_cnt, encoders, classifiers, critic, mats, data_loaders, args, optim_model, epoch):
encoders, encoder_dst = encoders
classifiers, classifier_dst, classifier_mix = classifiers
map(lambda m: m.train(), encoders + [encoder_dst, classifier_dst, critic, classifier_mix] + classifiers)
train_loaders, train_loader_dst, unl_loader, valid_loader = data_loaders
dup_train_loaders = deepcopy(train_loaders)
# mtl_criterion = nn.CrossEntropyLoss()
mtl_criterion = nn.NLLLoss()
moe_criterion = nn.NLLLoss() # with log_softmax separated
kl_criterion = nn.MSELoss()
entropy_criterion = HLoss()
if args.metric == "biaffine":
metric = biaffine_metric
Us, Ws, Vs = mats
else:
metric = mahalanobis_metric
Us, Ps, Ns = mats
loss_total = 0
total = 0
for batches, batches_dst, unl_batch in zip(zip(*train_loaders), train_loader_dst, unl_loader):
train_batches1, train_batches2, train_labels = zip(*batches)
# print("train batches1", train_labels[0].size())
# print("train batches2", train_batches2)
# print("train labels", train_labels)
unl_critic_batch1, unl_critic_batch2, unl_critic_label = unl_batch
# print("unl", unl_critic_batch1)
batches1_dst, batches2_dst, labels_dst = batches_dst
# print("batches1_dst", batches1_dst)
# print("batches2_dst", batches2_dst)
total += len(batches1_dst)
iter_cnt += 1
if args.cuda:
train_batches1 = [batch.cuda() for batch in train_batches1]
train_batches2 = [batch.cuda() for batch in train_batches2]
train_labels = [label.cuda() for label in train_labels]
batches1_dst = batches1_dst.cuda()
batches2_dst = batches2_dst.cuda()
labels_dst = labels_dst.cuda()
unl_critic_batch1 = unl_critic_batch1.cuda()
unl_critic_batch2 = unl_critic_batch2.cuda()
unl_critic_label = unl_critic_label.cuda()
# train_batches1 = [Variable(batch) for batch in train_batches1]
# train_batches2 = [Variable(batch) for batch in train_batches2]
# train_labels = [Variable(label) for label in train_labels]
# unl_critic_batch1 = Variable(unl_critic_batch1)
# unl_critic_batch2 = Variable(unl_critic_batch2)
# unl_critic_label = Variable(unl_critic_label)
optim_model.zero_grad()
loss_train_dst = []
loss_mtl = []
loss_moe = []
loss_kl = []
loss_entropy = []
loss_dan = []
loss_all = []
ms_outputs = [] # (n_sources, n_classifiers)
hiddens = []
hidden_corresponding_labels = []
# labels = []
_, hidden_dst = encoder_dst(batches1_dst, batches2_dst)
cur_output_dst = classifier_dst(hidden_dst)
cur_output_dst_mem = torch.softmax(cur_output_dst, dim=1)
cur_output_dst = torch.log(cur_output_dst_mem)
loss_train_dst.append(mtl_criterion(cur_output_dst, labels_dst))
outputs_dst_transfer = []
for i in range(len(train_batches1)):
_, cur_hidden = encoders[i](batches1_dst, batches2_dst)
cur_output = classifiers[i](cur_hidden)
outputs_dst_transfer.append(cur_output)
for i, (batch1, batch2, label) in enumerate(zip(train_batches1, train_batches2, train_labels)): # source i
_, hidden = encoders[i](batch1, batch2)
outputs = []
# create output matrix:
# - (i, j) indicates the output of i'th source batch using j'th classifier
# print("hidden", hidden)
# raise
hiddens.append(hidden)
for classifier in classifiers:
output = classifier(hidden)
output = torch.log_softmax(output, dim=1)
# print("output", output)
outputs.append(output)
ms_outputs.append(outputs)
hidden_corresponding_labels.append(label)
# multi-task loss
# print("ms & label", ms_outputs[i][i], label)
loss_mtl.append(mtl_criterion(ms_outputs[i][i], label))
# labels.append(label)
if args.lambda_critic > 0:
# critic_batch = torch.cat([batch, unl_critic_batch])
critic_label = torch.cat([1 - unl_critic_label, unl_critic_label])
# critic_label = torch.cat([1 - unl_critic_label] * len(train_batches) + [unl_critic_label])
if isinstance(critic, ClassificationD):
critic_output = critic(torch.cat(hidden, encoders[i](unl_critic_batch1, unl_critic_batch2)))
loss_dan.append(critic.compute_loss(critic_output, critic_label))
else:
critic_output = critic(hidden, encoders[i](unl_critic_batch1, unl_critic_batch2))
loss_dan.append(critic_output)
# critic_output = critic(torch.cat(hiddens), encoder(unl_critic_batch))
# loss_dan = critic_output
else:
loss_dan = Variable(torch.FloatTensor([0]))
# assert (len(outputs) == len(outputs[0]))
source_ids = range(len(train_batches1))
# for i in source_ids:
# support_ids = [x for x in source_ids if x != i] # experts
support_ids = [x for x in source_ids] # experts
# i = 0
# support_alphas = [ metric(
# hiddens[i],
# hiddens[j].detach(),
# hidden_corresponding_labels[j],
# Us[j], Ps[j], Ns[j],
# args) for j in support_ids ]
if args.metric == "biaffine":
source_alphas = [metric(hidden_dst,
hiddens[j].detach(),
Us[0], Ws[0], Vs[0], # for biaffine metric, we use a unified matrix
args) for j in source_ids]
else:
source_alphas = [metric(hidden_dst, # i^th source
hiddens[j].detach(),
hidden_corresponding_labels[j],
Us[j], Ps[j], Ns[j],
args) for j in source_ids]
support_alphas = [source_alphas[x] for x in support_ids]
# print torch.cat([ x.unsqueeze(1) for x in support_alphas ], 1)
support_alphas = softmax(support_alphas)
# print("support_alphas after softmax", support_alphas)
# meta-supervision: KL loss over \alpha and real source
source_alphas = softmax(source_alphas) # [ 32, 32, 32 ]
source_labels = [torch.FloatTensor([x == len(train_batches1)]) for x in source_ids] # one-hot
if args.cuda:
source_alphas = [alpha.cuda() for alpha in source_alphas]
source_labels = [label.cuda() for label in source_labels]
source_labels = Variable(torch.stack(source_labels, dim=0)) # 3*1
# print("source labels", source_labels)
source_alphas = torch.stack(source_alphas, dim=0)
# print("source_alpha after stack", source_alphas)
source_labels = source_labels.expand_as(source_alphas).permute(1, 0)
source_alphas = source_alphas.permute(1, 0)
loss_kl.append(kl_criterion(source_alphas, source_labels))
# entropy loss over \alpha
# entropy_loss = entropy_criterion(torch.stack(support_alphas, dim=0).permute(1, 0))
# print source_alphas
loss_entropy.append(entropy_criterion(source_alphas))
output_moe_i = sum([alpha.unsqueeze(1).repeat(1, 2) * F.softmax(outputs_dst_transfer[id], dim=1) \
for alpha, id in zip(support_alphas, support_ids)])
# output_moe_full = sum([ alpha.unsqueeze(1).repeat(1, 2) * F.softmax(ms_outputs[i][id], dim=1) \
# for alpha, id in zip(full_alphas, source_ids) ])
# print("output_moe_i & labels", output_moe_i, train_labels[i])
loss_moe.append(moe_criterion(torch.log(output_moe_i), labels_dst))
# loss_moe.append(moe_criterion(torch.log(output_moe_full), train_labels[i]))
# print("labels_dst", labels_dst)
# upper_out = classifier_mix(torch.cat((cur_output_dst_mem, output_moe_i), dim=1))
upper_out = cur_output_dst_mem + classifier_mix.multp * output_moe_i
loss_all = mtl_criterion(torch.log_softmax(upper_out, dim=1), labels_dst)
loss_train_dst = sum(loss_train_dst)
loss_mtl = sum(loss_mtl)
# print("loss mtl", loss_mtl)
# loss_mtl = loss_mtl.mean()
loss_mtl /= len(source_ids)
loss_moe = sum(loss_moe)
# if iter_cnt < 400:
# lambda_moe = 0
# lambda_entropy = 0
# else:
lambda_moe = args.lambda_moe
lambda_entropy = args.lambda_entropy
# loss = (1 - lambda_moe) * loss_mtl + lambda_moe * loss_moe
loss = args.lambda_mtl * loss_mtl + lambda_moe * loss_moe
loss_kl = sum(loss_kl)
loss_entropy = sum(loss_entropy)
loss += args.lambda_entropy * loss_entropy
loss += loss_train_dst * args.lambda_dst
loss += loss_all * args.lambda_all
loss_total += loss
if args.lambda_critic > 0:
loss_dan = sum(loss_dan)
loss += args.lambda_critic * loss_dan
loss.backward()
optim_model.step()
# print("loss entropy", loss_entropy)
# print("mats", [Us, Ps, Ns])
# for paras in task_paras:
# print(paras)
# for name, param in paras:
# if param.requires_grad:
# print(name, param.data)
# for name, param in encoder.named_parameters():
# if param.requires_grad:
# # print(name, param.data)
# print(name, param.grad)
for cls_i, classifier in enumerate(classifiers):
for name, param in classifier.named_parameters():
# print(cls_i, name, param.grad)
pass
if iter_cnt % 5 == 0:
# [(mu_i, covi_i), ...]
# domain_encs = domain_encoding(dup_train_loaders, args, encoder)
if args.metric == "biaffine":
mats = [Us, Ws, Vs]
else:
mats = [Us, Ps, Ns]
# evaluate(
# # [encoders, encoder_dst],
# # [classifiers, classifier_dst, classifier_mix],
# # mats,
# # [dup_train_loaders, valid_loader],
# # True,
# # args
# # )
# say("\r" + " " * 50)
# TODO: print train acc as well
# print("loss dan", loss_dan)
say("{} MTL loss: {:.4f}, MOE loss: {:.4f}, DAN loss: {:.4f}, "
"loss: {:.4f}\n"
# ", dev acc/oracle: {:.4f}/{:.4f}"
.format(iter_cnt,
loss_mtl.item(),
loss_moe.item(),
loss_dan.item(),
loss.item(),
# curr_dev,
# oracle_curr_dev
))
writer.add_scalar('training_loss',
loss_total / total,
epoch)
say("\n")
return iter_cnt
def compute_oracle(outputs, label, args):
''' Compute the oracle accuracy given outputs from multiple classifiers
'''
# oracle = torch.ByteTensor([0] * label.shape[0])
oracle = torch.BoolTensor([0] * label.shape[0])
if args.cuda:
oracle = oracle.cuda()
for i, output in enumerate(outputs):
pred = output.data.max(dim=1)[1]
# print("pred", pred)
# print("label", label)
oracle |= pred.eq(label.byte())
return oracle
def evaluate(epoch, encoders, classifiers, mats, loaders, return_best_thrs, args, thr=None):
''' Evaluate model using MOE
'''
encoders, encoder_dst = encoders
classifiers, classifier_dst, classifier_mix = classifiers
map(lambda m: m.eval(), encoders + classifiers + [encoder_dst, classifier_dst, classifier_mix])
if args.metric == "biaffine":
Us, Ws, Vs = mats
else:
Us, Ps, Ns = mats
source_loaders, valid_loader = loaders
domain_encs = domain_encoding(source_loaders, args, encoders)
oracle_correct = 0
correct = 0
tot_cnt = 0
y_true = []
y_pred = []
y_score = []
loss = 0.
source_ids = range(len(domain_encs))
for batch1, batch2, label in valid_loader:
if args.cuda:
batch1 = batch1.cuda()
batch2 = batch2.cuda()
label = label.cuda()
# print("eval labels", label)
batch1 = Variable(batch1)
batch2 = Variable(batch2)
bs = len(batch1)
# print("bs", len(batch1))
_, hidden_dst = encoder_dst(batch1, batch2)
cur_output_dst = classifier_dst(hidden_dst)
cur_output_dst_mem = torch.softmax(cur_output_dst, dim=1)
# print("mem", cur_output_dst_mem)
cur_output_dst = torch.log(cur_output_dst_mem)
outputs_dst_transfer = []
for src_i in range(len(source_loaders)):
_, cur_hidden = encoders[src_i](batch1, batch2)
cur_output = classifiers[src_i](cur_hidden)
outputs_dst_transfer.append(cur_output)
# _, hidden = encoders[0](batch1, batch2)
# source_ids = range(len(domain_encs))
if args.metric == "biaffine":
alphas = [biaffine_metric_fast(hidden_dst, mu[0], Us[0]) \
for mu in domain_encs]
else:
alphas = [mahalanobis_metric_fast(hidden_dst, mu[0], U, mu[1], P, mu[2], N) \
for (mu, U, P, N) in zip(domain_encs, Us, Ps, Ns)]
# # alphas = [ (1 - x / sum(alphas)) for x in alphas ]
alphas = softmax(alphas)
if args.cuda:
alphas = [alpha.cuda() for alpha in alphas]
alphas = [Variable(alpha) for alpha in alphas]
#
# outputs = [F.softmax(classifier(hidden), dim=1) for classifier in classifiers]
output_moe = sum([alpha.unsqueeze(1).repeat(1, 2) * output_i \
for (alpha, output_i) in zip(alphas, outputs_dst_transfer)])
# pred = output.data.max(dim=1)[1]
# oracle_eq = compute_oracle(outputs, label, args)
# outputs = classifier_mix(torch.cat((cur_output_dst_mem, output_moe), dim=1))
outputs = cur_output_dst_mem + classifier_mix.multp * output_moe
# print("weight mix", classifier_mix.multp)
outputs_upper_logits = torch.log_softmax(outputs, dim=1)
# outputs_upper_logits = torch.log(cur_output_dst_mem)
outputs_upper_logits = output_moe
# print("outputs_upper_logits", outputs_upper_logits)
pred = outputs_upper_logits.data.max(dim=1)[1]
# oracle_eq = compute_oracle(outputs_upper_logits, label, args)
loss_batch = F.nll_loss(outputs_upper_logits, label)
loss += bs * loss_batch.item()
# if args.eval_only:
# for i in range(batch1.shape[0]):
# for j in range(len(alphas)):
# say("{:.4f}: [{:.4f}, {:.4f}], ".format(
# alphas[j].data[i], outputs[j].data[i][0], outputs[j].data[i][1])
# )
# oracle_TF = "T" if oracle_eq[i] == 1 else colored("F", 'red')
# say("gold: {}, pred: {}, oracle: {}\n".format(label[i], pred[i], oracle_TF))
# say("\n")
# print torch.cat(
# [
# torch.cat([ x.unsqueeze(1) for x in alphas ], 1),
# torch.cat([ x for x in outputs ], 1)
# ], 1
# )
y_true += label.tolist()
y_pred += pred.tolist()
# print("output", output[:, 1].data.tolist())
y_score += outputs_upper_logits[:, 1].data.tolist()
# print("cur y score", y_score)
correct += pred.eq(label).sum()
# oracle_correct += oracle_eq.sum()
tot_cnt += outputs_upper_logits.size(0)
# print("y_true", y_true)
# print("y_pred", y_pred)
if thr is not None:
print("using threshold %.4f" % thr)
y_score = np.array(y_score)
y_pred = np.zeros_like(y_score)
y_pred[y_score > thr] = 1
else:
# print("y_score", y_score)
pass
loss /= tot_cnt
prec, rec, f1, _ = precision_recall_fscore_support(y_true, y_pred, average="binary")
# print("y_score", y_score)
auc = roc_auc_score(y_true, y_score)
print("Loss: {:.4f}, AUC: {:.2f}, Prec: {:.2f}, Rec: {:.2f}, F1: {:.2f}".format(
loss, auc * 100, prec * 100, rec * 100, f1 * 100))
best_thr = None
metric = [auc, prec, rec, f1]
if return_best_thrs:
precs, recs, thrs = precision_recall_curve(y_true, y_score)
f1s = 2 * precs * recs / (precs + recs)
f1s = f1s[:-1]
thrs = thrs[~np.isnan(f1s)]
f1s = f1s[~np.isnan(f1s)]
best_thr = thrs[ | np.argmax(f1s) | numpy.argmax |
"""日常的草稿"""
import numpy as np
import csv
import pickle
import pandas as pd
import matplotlib.pyplot as plt
import tensorflow as tf
import tqdm
from tensorflow.keras import Sequential, layers, losses, optimizers, models
import os
# 1. 调试early stopping
# a = [0.2, 0.3, 0.3, 0.4, 0.4, 0.4, 0.77, 0.77, 0.77, 0.77, 0.77, 0.999]
# wait = np.zeros(len(a))
# temp = 0
# i = 0
#
# for acc in a:
# if acc >= 0.999:
# print("The realistic training epochs is: %d" % (i + 1)) # 设置early stopping,打印实际训练次数
# break
# if acc - temp <= 0.001:
# wait[i] = wait[i] + 1
# if (i >= 4) & (np.sum(wait[(i - 4):i]) >= 3):
# print("The realistic training epochs is: %d" % (i + 1))
# break
#
# i += 1
# temp = acc
# 记录SNR=-10dB时,每一次训练的精度,写入csv
# accuracy2 = {'model_1': [0, 0.214000, 0.214500, 0.215000, 0.484000, 0.619500, 0.367000, 0.741500, 0.830000, 0.620000, 0.803000,
# 0.807500, 0.813500, 0.881500, 0.866000, 0.889000, 0.871000, 0.876500, 0.887500, 0.870500, 0.886500,
# 0.888500, 0.865500, 0.866000, 0.887000, 0.879500, 0.869500],
# 'model_2': [0, 0.214500, 0.256500, 0.409500, 0.427000, 0.613500, 0.615500, 0.449500, 0.548000, 0.421000, 0.643500, 0.595500],
# 'model_3': [0, 0.321500, 0.214500, 0.219500, 0.346000, 0.407000, 0.610500, 0.615500, 0.712500, 0.690000, 0.712000,
# 0.737500, 0.650000, 0.718500, 0.734500, 0.640000, 0.672500, 0.702000, 0.746000, 0.719500],
# 'model_4': [0, 0.214500, 0.214500, 0.214500, 0.218000, 0.596500, 0.641500, 0.650000, 0.636000, 0.617500, 0.626500,
# 0.652500, 0.682500, 0.741500, 0.797000, 0.823000, 0.831500, 0.816500, 0.824500, 0.796000, 0.837500,
# 0.797000, 0.791000, 0.812000],
# 'model_5': [0, 0.383500, 0.464500, 0.395000, 0.492500, 0.614000, 0.621500, 0.619000, 0.635500, 0.629500, 0.615000,
# 0.631500, 0.632500, 0.639500, 0.627500, 0.652000, 0.634500, 0.651000, 0.613000, 0.618500, 0.625500]}
#
# with open('top5_model_accuracy.csv', mode='a+', newline='') as f:
# data = []
# a = []
# for index in accuracy2:
# data = accuracy2[index]
# a.append([data, [index]])
#
# writer = csv.writer(f)
# writer.writerow(a)
# 2. 从csv文件中画图
lines1 = []
files = ['optimized_model1.csv']
for file in files:
with open(file, 'r') as f:
for line in f:
temp = line.split(',')
temp[-1] = temp[-1][:-1] # remove \n
temp[0:50] = [float(i) for i in temp[0:50]] # convert accuracy to float
lines1.append(temp)
lines2 = []
files = ['optimized_model2.csv']
for file in files:
with open(file, 'r') as f:
for line in f:
temp = line.split(',')
temp[-1] = temp[-1][:-1] # remove \n
temp[0:50] = [float(i) for i in temp[0:50]] # convert accuracy to float
lines2.append(temp)
high_acc1 = []
for i in range(8):
temp = max(lines1[i][0:50])
high_acc1.append(temp)
high_acc2 = []
for i in range(8):
temp = max(lines2[i][0:50])
high_acc2.append(temp)
high_acc1.insert(0, np.nan)
high_acc2.insert(0, np.nan)
plt.plot(high_acc1, '--k', label='standard NAS')
plt.plot(high_acc2, 'r', label='balanced NAS')
plt.legend()
plt.ylim((0, 1))
plt.xlabel('B')
plt.ylabel('Accuracy')
# my_x_ticks = np.arange(1, 9, 1)
# plt.xticks(my_x_ticks)
plt.show()
#
# plt.plot(lines[0][0:50], label='model_1')
# plt.plot(lines[1][0:50], label='model_2')
# plt.plot(lines[2][0:50], label='model_3')
# plt.plot(lines[3][0:50], label='model_4')
# plt.plot(lines[4][0:50], label='model_5')
# plt.legend()
# plt.show()
# 3. 计算模型参数量
# 先用model summary
# from model import ModelGenerator
# action = [0, '1x7-7x1 conv', 0, '1x7-7x1 conv', 0, '3x3 maxpool', 0, '3x3 avgpool', 0, '1x7-7x1 conv', 0, '7x7 dconv']
#
#
# def counts(actions, input_channel, filter_number, classes, cell):
# """ 在最后还有一个全局池化
# 池化层默认padding为same,所以输出尺寸为input_size/2
# 卷积层后面要加偏置,含有BN,所以要加3*output_feature_map
# 前B层的filter个数为32,后B层为64 """
#
# # 先从actions中取出卷积核及池化核,取出奇数位
# operator = actions[1::2]
# conv_size = []
# pool_size = []
# numbers = 0
#
# for index in range(len(operator)):
# name = operator[index].split(' ', 1)
# if name[1] == 'conv':
# temp = [i.split('x', 1) for i in name[0].split('-', 1)] # 分离出数值
# content1 = [[float(temp[0][0]), float(temp[0][1])], [float(temp[1][0]), float(temp[1][1])]] # 将str转换为float,[[1,7],[7,1]]
# conv_size.append(content1)
# elif name[1] == 'dconv':
# temp = name[0].split('x', 1)
# content1 = [float(temp[0]), float(temp[1])]
# conv_size.append(content1)
#
# if any([name[1] == 'maxpool', name[1] == 'avgpool']):
# temp = name[0].split('x', 1)
# content2 = [float(temp[0]), float(temp[1])]
# pool_size.append(content2)
#
# # 取出卷积层的数值
# conv_size2 = []
# for i in conv_size:
# if isinstance(i, list):
# for j in i:
# if isinstance(j, list):
# for t in j:
# conv_size2.append(t) # 只取出了嵌套的列表
# else:
# conv_size2.append(j)
#
# conv_size2 = [conv_size2[i:i+2] for i in range(0, len(conv_size2), 2)]
#
# # 综合其他参数开始计算网络中的训练参数总量
# if cell == 1:
# feed1 = input_channel
# for i in range(len(conv_size2)): # 卷积层
# numbers1 = feed1 * conv_size2[i][0] * conv_size2[i][1] * filter_number[0] + 3 * filter_number[0] # cell_1
# feed1 = filter_number[0]
# numbers += numbers1
#
# numbers += (classes * feed1 + classes) # Dense层
#
# elif cell == 2:
# feed1 = input_channel
# for i in range(len(conv_size2)): # 卷积层
# numbers1 = feed1 * conv_size2[i][0] * conv_size2[i][1] * filter_number[0] + 3 * filter_number[0] # cell_1
# feed1 = filter_number[0]
# numbers += numbers1
# feed2 = feed1
# for i in range(len(conv_size2)):
# numbers2 = feed2 * conv_size2[i][0] * conv_size2[i][1] * filter_number[1] + 3 * filter_number[1] # cell_2
# feed2 = filter_number[1]
# numbers += numbers2
#
# numbers += (classes * feed2 + classes) # Dense层
#
# else:
# print('The numbers of cell is undefined!')
#
# return numbers
#
#
# total_count = counts(action, input_channel=3, filter_number=[32, 64], classes=5, cell=1)
num_acc = []
for i in range(8):
action = lines1[i][50:(50+(i+1)*4)]
temp = counts(action, input_channel=3, input_size=[32, 32, 3], stride=2, filter_number=[32, 64],
classes=5, B=i+1, cell=2)
num_acc.append(temp)
num_rew = []
for i in range(8):
action = lines2[i][50:(50+(i+1)*4)]
temp = counts(action, input_channel=3, input_size=[32, 32, 3], stride=2, filter_number=[32, 64],
classes=5, B=i+1, cell=2)
num_rew.append(temp)
num_acc.insert(0, np.nan)
num_rew.insert(0, np.nan)
plt.plot(num_acc, '--k', label='standard NAS')
plt.plot(num_rew, 'r', label='balanced NAS')
plt.legend()
plt.xlabel('B')
plt.ylabel('Computation amount')
plt.show()
# network = Sequential()
# network.add(layers.Conv2D(32, kernel_size=(1, 7), padding='SAME', strides=2, activation='relu', input_shape=(512, 512, 3)))
# network.add(layers.Conv2D(32, kernel_size=(7, 1), padding='SAME', strides=2, activation='relu'))
#
# network.add(layers.Conv2D(32, kernel_size=(1, 7), padding='SAME', strides=2, activation='relu'))
# network.add(layers.Conv2D(32, kernel_size=(7, 1), padding='SAME', strides=2, activation='relu'))
#
# network.add(layers.MaxPooling2D(pool_size=3, strides=2)) # 第1个池化层,高宽各减半的池化层
#
# network.add(layers.MaxPooling2D(pool_size=3, strides=2)) # 第1个池化层,高宽各减半的池化层
#
# network.add(layers.Conv2D(32, kernel_size=(1, 7), padding='SAME', strides=2, activation='relu'))
# network.add(layers.Conv2D(32, kernel_size=(7, 1), padding='SAME', strides=2, activation='relu'))
#
# network.add(layers.Conv2D(32, kernel_size=(7, 7), padding='SAME', strides=2, activation='relu'))
# network.add(layers.Conv2D(64, kernel_size=(1, 7), padding='SAME', strides=2, activation='relu'))
# network.add(layers.Conv2D(64, kernel_size=(7, 1), padding='SAME', strides=2, activation='relu'))
#
# network.add(layers.Conv2D(64, kernel_size=(1, 7), padding='SAME', strides=2, activation='relu'))
# network.add(layers.Conv2D(64, kernel_size=(7, 1), padding='SAME', strides=2, activation='relu'))
#
# network.add(layers.MaxPooling2D(pool_size=3, strides=2)) # 第1个池化层,高宽各减半的池化层
#
# network.add(layers.MaxPooling2D(pool_size=3, strides=2)) # 第1个池化层,高宽各减半的池化层
#
# network.add(layers.Conv2D(64, kernel_size=(1, 7), padding='SAME', strides=2, activation='relu'))
# network.add(layers.Conv2D(64, kernel_size=(7, 1), padding='SAME', strides=2, activation='relu'))
#
# network.add(layers.Conv2D(64, kernel_size=(7, 7), padding='SAME', strides=2, activation='relu'))
# network.add(layers.Dense(units=5, activation='softmax'))
# network.summary()
# 4. 调试skip connection,需要创造支路,同时需要drop path
# action = [0, '1x7-7x1 conv', 0, '1x7-7x1 conv', 0, '3x3 maxpool', 0, '3x3 avgpool', 0, '1x7-7x1 conv', 0, '7x7 dconv']
# network = ModelGenerator(action) # 建立网络,调用时将图像数据输入network
# 5. 调试平衡函数
'''
思路:精度为增函数,运算量函数为减函数;
函数选取幂函数,线性函数,指数函数,sigmoid函数
第一种:令精度的次幂小于运算量,仿真结果发现,当信噪比较低时,高精度的模型得分较小
第二种:令精度的次幂大于运算量
'''
# (一)第一种
# (1) 平衡函数与精度之间的关系
# x = np.linspace(0, 1, 1000)
# F1 = 1/(1+np.exp(-10*(x-0.5))) # sigmoid函数
# plt.plot(x, F1, label='$F1(x)$', color='green', linewidth=0.5)
# plt.legend()
# plt.show()
# f2 = a**(1/2) # 幂函数
# plt.plot(a, f2, label='$f2(a)$', color='green', linewidth=0.5)
# plt.show()
# (2) 平衡函数与运算量之间的关系
# x = np.linspace(0, 1, 1000)
# fe_1 = 1/(1+np.exp(10*(x-0.5))) # sigmoid函数
# plt.plot(x, fe_1, label='$fe_1(x)$', color='green', linewidth=0.5)
# plt.show()
# (3) 画出不同边缘函数组合的图像(二维图)
# a = np.linspace(0, 1, 1000)
# fa_1 = a**(1/2)
# fa_1 = 1/(1+np.exp(-10*(a-0.5)))
# fe_1 = -a+1
# f1 = (1/2) * (fe_1+fa_1)
# plt.plot(a, f1, color='green', linewidth=0.5)
# plt.show()
# F_2 = -x*x
# f2 = (1/2) * (fe_2+fa_1)
# plt.plot(x, F_2, label='$F2(y)$', color='green', linewidth=0.5)
# plt.legend()
# plt.show()
# fe_3 = 1/(a+0.01)
# f3 = (1/2) * (fe_3+fa_1)
# plt.plot(a, f3, color='green', linewidth=0.5)
# plt.show()
# fe_4 = 1/(1+np.exp(10*(a-0.5)))
# f4 = (1/2) * (fe_4+fa_1)
# plt.plot(a, f4, color='green', linewidth=0.5)
# plt.show()
# 三维图
from mpl_toolkits.mplot3d import Axes3D
x = np.arange(0, 1, 0.01) # 精度
y = np.arange(0, 1, 0.01) # 运算量
fig = plt.figure()
ax = Axes3D(fig)
X, Y = np.meshgrid(x, y) # 网格的创建
# # fa_1 = X**(1/2)
fa_1 = 1/(1+np.exp(-10*(X-0.5)))
# # fe_1 = -Y + 1
# fe_1 = -Y*Y
# Z = 1/2 * (fa_1 + fe_1)
# plt.xlabel('x')
# plt.ylabel('y')
# ax.plot_surface(X, Y, Z, rstride=1, cstride=1, cmap='rainbow')
# plt.show()
# (二)第二种
# 此法效果较好
# fa_1 = 1/(1+np.exp(-10*(X-0.5)))
fe_1 = | np.exp(-Y) | numpy.exp |
import numpy as np
import cv2
import open3d as o3d
def getAffordancePointCloudBasedOnVariance(pcd):
""" Computes 9 points for each affordance, based on standard deviation,
present in the point cloud
Input:
pcd - o3d.geometry.PointCloud()
Output:
pcd_box - o3d.geometry.PointCloud()
"""
affordances, affordance_counts = getPredictedAffordancesInPointCloud(pcd)
affordance_counts = affordance_counts / np.linalg.norm(affordance_counts)
out_points, out_colors = [], []
points = np.asanyarray(pcd.points)
colors = np.asanyarray(pcd.colors)
if np.max(colors) <= 1.0:
colors = colors * 255
label_colors = getAffordanceColors()
first = True
for label_count, label_color in enumerate(label_colors):
if label_count != 0:
if affordance_counts[label_count] > 0.005:
idx = colors == label_color
idx = np.sum(idx, axis = -1) == 3
if True in idx:
aff_points = points[idx]
x_c, y_c, z_c = np.mean(aff_points, axis = 0)
x_std, y_std, z_std = np.std(aff_points, axis = 0) / 2
box_points = []
box_points.append([x_c, y_c, z_c]) # centroid
box_points.append([x_c - x_std, y_c - y_std, z_c - z_std])
box_points.append([x_c + x_std, y_c - y_std, z_c - z_std])
box_points.append([x_c - x_std, y_c + y_std, z_c - z_std])
box_points.append([x_c + x_std, y_c + y_std, z_c - z_std])
box_points.append([x_c - x_std, y_c - y_std, z_c + z_std])
box_points.append([x_c + x_std, y_c - y_std, z_c + z_std])
box_points.append([x_c - x_std, y_c + y_std, z_c + z_std])
box_points.append([x_c + x_std, y_c + y_std, z_c + z_std])
box_colors = [label_color for i in range(len(box_points))]
box_points = np.array(box_points)
box_colors = np.array(box_colors) / 255
if first:
out_points = box_points
out_colors = box_colors
first = False
else:
out_points = np.vstack((out_points, box_points))
out_colors = np.vstack((out_colors, box_colors))
pcd_box = o3d.geometry.PointCloud()
pcd_box.points = o3d.utility.Vector3dVector(out_points)
pcd_box.colors = o3d.utility.Vector3dVector(out_colors)
return pcd_box
def getPredictedAffordancesInPointCloud(pcd):
""" Input:
pcd - o3d.geometry.PointCloud()
Output:
affordances - list[], one-hot-encoded vector with present affordances.
counts - np.array(), shape(num_affordances), int count of every affordance.
"""
label_colors = getAffordanceColors()
colors = np.asanyarray(pcd.colors)
if np.max(colors) <= 1.0:
colors = colors * 255
affordances, counts = [], []
for label_color in label_colors:
idx = colors == label_color
idx = np.sum(idx, axis = -1) == 3
if True in idx:
affordances.append(1)
counts.append(np.count_nonzero(idx == True))
else:
affordances.append(0)
counts.append(0)
return affordances, np.array(counts)
def getPredictedAffordances(masks, bbox = None):
""" Input:
masks - np.array, shape (affordances, h, w)
bbox - if provided speeds up computations
Output:
affordances - list, [N, ??], affordances found in each N object
"""
affordances = []
if bbox is not None:
masks = masks[:, bbox[1]:bbox[3], bbox[0]:bbox[2]]
for count, affordance_mask in enumerate(masks):
if True in np.unique(affordance_mask):
affordances.append(count)
return affordances
def getAffordanceColors():
""" Output:
colors - 'official' list of colors so they are uniform
"""
colors = [(0,0,0), (0,0,255), (0,255,0), (123, 255, 123), (255, 0, 0),
(255, 255, 0), (255, 255, 255), (255, 0, 255), (123, 123, 123), (255, 255, 0), (70, 70, 70), (0,10,0)]
return colors
def getAffordanceContours(affordance_id, masks, bbox = None):
""" Input:
affordance_id - int,
masks - np.array, bool, shape (affordances, h, w)
bbox - np.array, shape (4, 2), if provided speeds up computation
Output:
contours - list[N, cv2 contours]
"""
contours = []
m = masks[affordance_id]
if bbox is not None:
m = masks[affordance_id, bbox[1]:bbox[3], bbox[0]:bbox[2]].astype(np.uint8)
contours, hierarchy = cv2.findContours(m, cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
return contours
def getAffordanceBoundingBoxes(pcd):
""" Returns 8 points of the non-axis aligned bounding box for each affordance
present in the provided affordance point cloud
Input:
pcd - o3d.geometry.PointCloud where each point has an affordance
assigned.
Output:
points - np.array (N, 3) x, y, z points
colors - np.array (N, 3) r, g, b associated with each affordance
"""
pcd_points = np.asanyarray(pcd.points)
pcd_colors = np.asanyarray(pcd.colors).astype(np.uint8)
if np.max(pcd_colors) <= 1.0:
pcd_colors = pcd_colors * 255
label_colors = getAffordanceColors()
points = []
colors = []
#print(np.unique(pcd_colors, axis = 0))
for color in label_colors:
idx = pcd_colors == color
idx = np.sum(idx, axis = -1) == 3
if True in idx:
aff_points = o3d.utility.Vector3dVector(pcd_points[idx])
bbox = np.asanyarray(o3d.geometry.OrientedBoundingBox.create_from_points(aff_points).get_box_points())
bbox_colors = [color for i in range(bbox.shape[0])]
#print(bbox)
if len(points) == 0:
points = bbox
colors = np.array(bbox_colors)
else:
points = np.vstack((points, bbox))
colors = np.vstack((colors, bbox_colors))
return points, colors
def getPointCloudAffordanceMask(affordance_id, points, uvs, masks, remove_outliers = True, bbox = None):
""" Input:
affordance_id - int,
points - np.array, shape: (n, 3), xyz points
masks - np.array, boolean, shape (affordances, h, w)
uvs - np.array, shape (n, 2)
remove_outliers - bool, uses open3d remove_statistical_outlier method
bbox - np.array, shape (4, 2) if provided speeds up computation
Output:
success - bool, set to False if computation fails
points - np.array, shape (n, 3), xyz points
"""
# Check if points are empty
if points.shape[0] <= 0:
return False, 0
# select affordance mask in bbox of object
if bbox is not None:
m = masks[affordance_id, bbox[1]:bbox[3], bbox[0]:bbox[2]]
# do the same for points and uv
points = points[np.where(uvs[:,0] > bbox[1])]
uvs = uvs[np.where(uvs[:,0] > bbox[1])]
points = points[np.where(uvs[:,0] < bbox[3])]
uvs = uvs[np.where(uvs[:,0] < bbox[3])]
points = points[np.where(uvs[:,1] > bbox[0])]
uvs = uvs[np.where(uvs[:,1] > bbox[0])]
points = points[np.where(uvs[:,1] < bbox[2])]
uvs = uvs[np.where(uvs[:,1] < bbox[2])]
uvs[:,0] -= bbox[1]
uvs[:,1] -= bbox[0]
else:
m = masks[affordance_id, :, :]
# Check if the affordance mask has the requested affordance
if not 255 in np.unique(m) and not 1 in np.unique(m):
return False, 0
# get points belonging to affordance
cloud_masked = []
for count, uv in enumerate(uvs):
if m[uv[0], uv[1]] != False:
cloud_masked.append(points[count])
points = np.array(cloud_masked)
if remove_outliers:
pcd = o3d.geometry.PointCloud()
pcd.points = o3d.utility.Vector3dVector(points)
pcd = pcd.voxel_down_sample(voxel_size=0.005)
pcd, _ = pcd.remove_statistical_outlier(nb_neighbors=20, std_ratio=2.0)
points = | np.asanyarray(pcd.points) | numpy.asanyarray |
import numpy as np
import cv2
import matplotlib.pyplot as plt
import pandas as pd
from scipy.optimize import linear_sum_assignment
from scipy import signal
from sklearn.neighbors import KernelDensity
import copy
import os
import utm
import rasterio
from CountLine import CountLine
import sys
sys.path.append('/home/golden/general-detection/functions')
import koger_tracking as ktf
def mark_bats_on_image(image_raw, centers, radii=None,
scale_circle_size=5, contours=None,
draw_contours=False):
'''
Draw a bunch of circles on given image
image: 2D or 3D image
centers: shape(n,2) array of circle centers
radii: list of circle radii
'''
if len(image_raw.shape) < 2:
print('image has too few dimensions')
return None
if len(image_raw.shape) == 2:
color = 200
else:
if image_raw.shape[2] == 3:
color = (0, 255, 255)
else:
print('image is the wrong shape')
return None
image = np.copy(image_raw)
if radii is None:
radii = np.ones(len(centers))
for circle_ind, radius in enumerate(radii):
cv2.circle(image,
(centers[circle_ind, 0].astype(int),
centers[circle_ind, 1].astype(int)),
int(radius * scale_circle_size), color , 1)
if draw_contours and contours:
for contour in contours:
if len(contour.shape) > 1:
rect = cv2.minAreaRect(contour)
box = cv2.boxPoints(rect)
box_d = np.int0(box)
cv2.drawContours(image, [box_d], 0, (0,255,100), 1)
return image
def get_tracks_in_frame(frame_ind, track_list):
""" Return list of all tracks present in frame ind. """
tracks_in_frame = []
for track in track_list:
if (track['last_frame'] >= frame_ind
and track['first_frame'] <= frame_ind):
tracks_in_frame.append(track)
return tracks_in_frame
def draw_tracks_on_frame(frame, frame_ind, track_list,
positions=None, figure_scale=60,
track_width=2, position_alpha=.5,
draw_whole_track=False, shift=0):
""" Draw all active tracks and all detected bat locations on given frame.
frame: loaded image - np array
frame_ind: frame number
track_list: list of all tracks in observation
positions: all detected bat positions in observation
figure_scale: how big to display output image
track_width: width of plotted tracks
position_alpha: alpha of position dots
draw_whole_track: Boolean draw track in the future of frame_ind
shift: compensate for lack of padding in network when drawing tracks
on input frames
"""
plt.figure(
figsize = (int(frame.shape[1] / figure_scale),
int(frame.shape[0] / figure_scale)))
plt.imshow(frame)
num_tracks = 0
for track in track_list:
if (track['last_frame'] >= frame_ind
and track['first_frame'] <= frame_ind):
rel_frame = frame_ind - track['first_frame']
if draw_whole_track:
plt.plot(track['track'][:, 0] + shift,
track['track'][:, 1] + shift,
linewidth=track_width)
else:
plt.plot(track['track'][:rel_frame, 0] + shift,
track['track'][:rel_frame, 1] + shift,
linewidth=track_width)
num_tracks += 1
if positions:
plt.scatter(positions[frame_ind][:,0] + shift,
positions[frame_ind][:,1] + shift,
c='red', alpha=position_alpha)
plt.title('Tracks: {}, Bats: {}'.format(num_tracks,
len(positions[frame_ind])))
def subtract_background(images, image_ind, background_sum):
'''
Subtract an averaged background from the image. Average over frame_range in the past and future
images: 3d numpy array (num images, height, width)
image_ind: index in circular image array
background_sum: sum of blue channel pixels across 0 dimension of images
'''
background = np.floor_divide(background_sum, images.shape[0])
# The order of subtraction means dark bats are now light in image_dif
image_dif = background - images[image_ind, :, :, 2]
return image_dif, background
def preprocess_to_binary(image, binary_thresh, background):
'''
Converts 2D image to binary after rescaling pixel intensity
image: 2D np array
low_pix_value: pixel value below which all pixels are set to 0
high_pix_value: pixel value above which all pixels are set to 255
binary_thresh: number from 0 - 255, above set to 255, bellow, set to 0
background: background image (2D probably blue channel)
'''
# # Rescale image pixels within range
# image_rescale = exposure.rescale_intensity(
# image, in_range=(low_pix_value, high_pix_value), out_range=(0, 255))
image_rescale = image
# Binarize image based on threshold
min_difference = 5
threshold = binary_thresh * background
threshold = np.where(threshold < min_difference, min_difference, threshold)
binary_image = np.where(image < threshold, 0, 255)
return binary_image
def get_blob_info(binary_image, background=None, size_threshold=0):
'''
Get contours from binary image. Then find center and average radius of each contour
binary_image: 2D image
background: 2D array used to see locally how dark the background is
size_threshold: radius above which blob is considered real
'''
contours, hierarchy = cv2.findContours(binary_image.astype(np.uint8).copy(), cv2.RETR_TREE, cv2.CHAIN_APPROX_SIMPLE)
centers = []
# Size of bounding rectangles
sizes = []
areas = []
# angle of bounding rectangle
angles = []
rects = []
good_contours = []
contours = [np.squeeze(contour) for contour in contours]
for contour_ind, contour in enumerate(contours):
if len(contour.shape) > 1:
rect = cv2.minAreaRect(contour)
if background is not None:
darkness = background[int(rect[0][1]), int(rect[0][0])]
if darkness < 30:
dark_size_threshold = size_threshold + 22
elif darkness < 50:
dark_size_threshold = size_threshold + 15
elif darkness < 80:
dark_size_threshold = size_threshold + 10
elif darkness < 100:
dark_size_threshold = size_threshold + 5
# elif darkness < 130:
# dark_size_threshold = size_threshold + 3
else:
dark_size_threshold = size_threshold
else:
dark_size_threshold = 0 # just used in if statement
area = rect[1][0] * rect[1][1]
if (area >= dark_size_threshold) or background is None:
centers.append(rect[0])
sizes.append(rect[1])
angles.append(rect[2])
good_contours.append(contour)
areas.append(area)
rects.append(rect)
if centers:
centers = np.stack(centers, 0)
sizes = np.stack(sizes, 0)
else:
centers = np.zeros((0,2))
return (centers, np.array(areas), good_contours, angles, sizes, rects)
def draw_circles_on_image(image, centers, sizes, rects=None):
'''
Draw a bunch of circles on given image
image: 2D or 3D image
centers: shape(n,2) array of circle centers
rects: list of minimum bounding rectangles
'''
if len(image.shape) < 2:
print('image has too few dimensions')
return None
if len(image.shape) == 2:
color = 200
rect_color = 100
else:
if image.shape[2] == 3:
color = (0, 255, 255)
rect_color = (0,255,100)
else:
print('image is the wrong shape')
return None
for circle_ind, size in enumerate(sizes):
cv2.circle(image, (centers[circle_ind, 0].astype(int), centers[circle_ind, 1].astype(int)),
int(np.max(size)), color , 1)
if rects:
for rect in rects:
box = cv2.boxPoints(rect)
box_d = np.int0(box)
cv2.drawContours(image, [box_d], 0, rect_color, 1)
return image
def update_circular_image_array(images, image_ind, image_files, frame_num, background_sum):
""" Add new image if nessesary and increment image_ind.
Also update sum of pixels across array for background subtraction.
If frame_num is less than half size of array than don't need to
replace image since intitally all images in average are in the future.
images: image array size (num images averaging, height, width, channel)
image_ind: index of focal frame in images
image_files: list of all image files in observation
frame_num: current frame number in observation
background_sum: sum of current frames blue dimension across frames
"""
if (frame_num > int(images.shape[0] / 2)
and frame_num < (len(image_files) - int(images.shape[0] / 2))):
replace_ind = image_ind + int(images.shape[0] / 2)
replace_ind %= images.shape[0]
# Subtract the pixel values that are about to be removed from background
background_sum -= images[replace_ind, :, :, 2]
image_file = image_files[frame_num + int(images.shape[0] / 2)]
image = cv2.imread(image_file)
images[replace_ind] = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
# Add new pixel values to the background sum
background_sum += images[replace_ind, :, :, 2]
image_ind += 1
# image_ind should always be in between 0 and images.shape - 1
image_ind = image_ind % images.shape[0]
return images, image_ind, background_sum
def initialize_image_array(image_files, focal_frame_ind, num_images):
""" Create array of num_images x h x w x 3.
Args:
image_files (list): sorted paths to all image files in observation
focal_frame_ind (int): number of the frame being process
num_images (int): number of frames used for background subtraction
return array, index in array where focal frame is located
"""
images = []
first_frame_ind = focal_frame_ind - (num_images // 2)
if num_images % 2 == 0:
# even
last_frame_ind = focal_frame_ind + (num_images // 2) - 1
else:
# odd
last_frame_ind = focal_frame_ind + (num_images // 2)
for file in image_files[first_frame_ind:last_frame_ind+1]:
image = cv2.imread(file)
images.append(cv2.cvtColor(image, cv2.COLOR_BGR2RGB))
images = np.stack(images)
focal_ind = num_images // 2
return(images, focal_ind)
def process_frame(images, focal_frame_ind, bat_thresh, background_sum, bat_area_thresh, debug=False):
"""Process bat frame.
images: n x h x w x c array where the n images are averaged together for background subtraction
focal_frame_ind: which index in images array should be processed
bat_thresh: float value to use for thresholding bat from background
background_sum: sum of all blue channel pixels across the n dimension of images
debug: if true return binary image
"""
size_threshold = bat_area_thresh
max_bats = 600
mean = np.mean(images[focal_frame_ind, :, :, 2])
if mean < 35:
max_bats = 200
if mean < 28:
max_bats = 100
if mean < 5:
print('Too dark...')
if debug:
return None, None, None, None, None, None, None, None
else:
return None, None, None, None, None, None, None
image_dif, background = subtract_background(images, focal_frame_ind, background_sum)
while True:
binary_image = preprocess_to_binary(image_dif, bat_thresh, background)
bat_centers, bat_areas, contours, rect_angles, bat_sizes, bat_rects = get_blob_info(
binary_image, background, size_threshold=size_threshold)
if len(bat_centers) < max_bats:
break
bat_thresh += 0.05
if debug:
return bat_centers, bat_areas, contours, rect_angles, bat_sizes, bat_rects, bat_thresh, binary_image
else:
return bat_centers, bat_areas, contours, rect_angles, bat_sizes, bat_rects, bat_thresh
def add_all_points_as_new_tracks(raw_track_list, positions, contours,
sizes, current_frame_ind, noise):
""" When there are no active tracks, add all new points to new tracks.
Args:
raw_track_list (list): list of tracks
positions (numpy array): p x 2
contours (list): p contours
current_frame_ind (int): current frame index
noise: how much noise to add to tracks initially
"""
for ind, (position, contour, size) in enumerate(zip(positions, contours, sizes)):
raw_track_list.append(
ktf.create_new_track(first_frame=current_frame_ind,
first_position=position, pos_index=ind,
noise=noise, contour=contour, size=size
)
)
return raw_track_list
def find_tracks(first_frame_ind, positions,
contours_files=None, contours_list=None,
sizes_list=None, max_frame=None, verbose=True,
tracks_file=None):
""" Take in positions of all individuals in frames and find tracks.
Args:
first_frame_ind (int): index of first frame of these tracks
positions (list): n x 2 for each frame
contours_files (list): list of files for contour info from each frame
contours_list: already loaded list of contours, only used if contours_file
is None
sizes_list (list): sizes info from each frame
return list of all tracks found
"""
raw_track_list = []
max_distance_threshold = 30
max_distance_threshold_noise = 30
min_distance_threshold = 0
max_unseen_time = 2
min_new_track_distance = 3
min_distance_big = 30
# #Create initial tracks based on the objects in the first frame
# raw_track_list = add_all_points_as_new_tracks(
# raw_track_list, positions[0], contours_list[0], sizes_list0, noise=0
# )
#try to connect points to the next frame
if max_frame is None:
max_frame = len(positions)
contours_file_ind = 0
previous_contours_seen = 0
if contours_files:
contours_list = np.load(contours_files[contours_file_ind], allow_pickle=True)
while first_frame_ind >= previous_contours_seen + len(contours_list):
contours_file_ind += 1
previous_contours_seen += len(contours_list)
contours_list = np.load(contours_files[contours_file_ind], allow_pickle=True)
print(f'using {contours_files[contours_file_ind]}')
elif not contours_list:
print("Needs contour_files or contour_list")
return
contours_ind = first_frame_ind - previous_contours_seen - 1
for frame_ind in range(first_frame_ind, max_frame):
contours_ind += 1
if contours_files:
if contours_ind >= len(contours_list):
# load next file
try:
contours_file_ind += 1
contours_list = np.load(contours_files[contours_file_ind], allow_pickle=True)
contours_ind = 0
except:
if tracks_file:
tracks_file_error = os.path.splitext(tracks_file)[0] + f'-error-{frame_ind}.npy'
print(tracks_file_error)
np.save(tracks_file_error, np.array(raw_track_list, dtype=object))
#get tracks that are still active (have been seen within the specified time)
active_list = ktf.calculate_active_list(raw_track_list, max_unseen_time, frame_ind)
if verbose:
if frame_ind % 10000 == 0:
print('frame {} processed.'.format(frame_ind))
if tracks_file:
np.save(tracks_file, np.array(raw_track_list, dtype=object))
if len(active_list) == 0:
#No existing tracks to connect to
#Every point in next frame must start a new track
raw_track_list = add_all_points_as_new_tracks(
raw_track_list, positions[frame_ind], contours_list[contours_ind],
sizes_list[frame_ind], frame_ind, noise=1
)
continue
# Make sure there are new points to add
new_positions = None
row_ind = None
col_ind = None
new_sizes = None
new_position_indexes = None
distance = None
contours = None
if len(positions[frame_ind]) != 0:
#positions from the next step
new_positions = positions[frame_ind]
contours = [np.copy(contour) for contour in contours_list[contours_ind]]
new_sizes = sizes_list[frame_ind]
raw_track_list = ktf.calculate_max_distance(
raw_track_list, active_list, max_distance_threshold,
max_distance_threshold_noise, min_distance_threshold,
use_size=True, min_distance_big=min_distance_big
)
distance = ktf.calculate_distances(
new_positions, raw_track_list, active_list
)
max_distance = ktf.create_max_distance_array(
distance, raw_track_list, active_list
)
assert distance.shape[1] == len(new_positions)
assert distance.shape[1] == len(contours)
assert distance.shape[1] == len(new_sizes)
# Some new points could be too far away from every existing track
raw_track_list, distance, new_positions, new_position_indexes, new_sizes, contours = ktf.process_points_without_tracks(
distance, max_distance, raw_track_list, new_positions, contours,
frame_ind, new_sizes
)
if distance.shape[1] > 0:
# There are new points can be assigned to existing tracks
#connect the dots from one frame to the next
row_ind, col_ind = linear_sum_assignment(np.log(distance + 1))
# for active_ind, track_ind in enumerate(active_list):
# if active_ind in row_ind:
# row_count = np.where(row_ind == active_ind)[0]
# raw_track_list[track_ind]['debug'].append(
# '{} dist {}, best {}'.format(
# frame_ind,
# distance[row_ind[row_count],
# col_ind[row_count]],
# np.min(distance[row_ind[row_count],
# :])
# )
# )
# best_col = np.argmin(distance[row_ind[row_count],
# :])
# row_count = np.where(col_ind == best_col)[0]
# raw_track_list[track_ind]['debug'].append(
# '{} row_ind {} col {} dist {} track {}'.format(
# frame_ind, row_ind[row_count],
# col_ind[row_count],
# distance[row_ind[row_count],
# col_ind[row_count]],
# active_list[row_ind[row_count][0]])
# )
# In casese where there are fewer new points than existing tracks
# some tracks won't get new point. Just assign them to
# the closest point
row_ind, col_ind = ktf.filter_tracks_without_new_points(
raw_track_list, distance, row_ind, col_ind, active_list, frame_ind
)
# Check if tracks with big bats got assigned to small points which are
# probably noise
row_ind, col_ind = ktf.fix_tracks_with_small_points(
raw_track_list, distance, row_ind, col_ind, active_list, new_sizes, frame_ind)
# see if points got assigned to tracks that are farther
# than max_threshold_distance
# This happens when the closer track gets assigned
# to a differnt point
row_ind, col_ind = ktf.filter_bad_assigns(raw_track_list, active_list, distance, max_distance,
row_ind, col_ind
)
raw_track_list = ktf.update_tracks(raw_track_list, active_list, frame_ind,
row_ind, col_ind, new_positions,
new_position_indexes, new_sizes, contours,
distance, min_new_track_distance)
raw_track_list = ktf.remove_noisy_tracks(raw_track_list)
raw_track_list = ktf.finalize_tracks(raw_track_list)
if tracks_file:
np.save(tracks_file, np.array(raw_track_list, dtype=object))
print('{} final save.'.format(os.path.basename(os.path.dirname(tracks_file))))
return raw_track_list
def get_tracked_bats_in_frame(image_files, focal_frame_ind, bat_thresh, bat_area_thresh):
centers_list = []
contours_list = []
sizes_list = []
clip_length = 5
array_size = 31
images, frame_buffer_ind = initialize_image_array(image_files, focal_frame_ind, array_size)
background_sum = np.sum(images[:,:,:,2], 0, dtype=np.int16)
for video_frame_ind in range(focal_frame_ind, focal_frame_ind+clip_length):
bat_centers, bat_areas, bat_contours, _, _, _, bat_thresh = process_frame(
images, frame_buffer_ind, bat_thresh, background_sum,
bat_area_thresh, debug=False)
centers_list.append(bat_centers)
contours_list.append(bat_contours)
sizes_list.append(bat_areas)
images, frame_buffer_ind, background_sum = update_circular_image_array(
images, frame_buffer_ind, image_files, video_frame_ind, background_sum)
raw_tracks = find_tracks(0, centers_list,
contours_list=contours_list,
sizes_list=sizes_list
)
return raw_tracks, centers_list
# return raw_tracks, centers_list, distance, max_distance, active_list, all_pre_distances, all_row_inds, all_col_inds
# return(connected_distance, connected_size)
def piecewise_linear(x, x0, y0, k1, k2):
return np.piecewise(x, [x < x0],
[lambda x:k1*x + y0-k1*x0, lambda x:k2*x + y0-k2*x0]
)
def get_bat_accumulation(crossing_frames, obs=None, parameters=None,
w_multiplier=True, w_darkness=True, w_frac=True):
""" Create and return cummulative sum of bats crossing count line over the course of
list of given positive and negative crossing frames.
crossing_frames: list of frame that each track crosses line. Positive if leaving
negative if going
obs: observation dictionary.
parameters: list of parameters of piecewise linear function
w_multiplier: multiply each bat crossing by apropriate bat multiplier for camera etc.
w_darkness: scale each bat crossing by apropriate accuracy corrrection based on frame darkness
w_frac: scale each bat crossing by fraction of total circle that camera sees
"""
if not np.any(crossing_frames):
return np.zeros(1)
last_crossing_frame = np.max(np.abs(crossing_frames))
crossing_per_frame = np.zeros(last_crossing_frame+1)
if obs and parameters:
accurracies = piecewise_linear(obs['darkness'], *parameters)
for crossing_frame, bm, acc in zip(crossing_frames, obs['multiplier'], accurracies):
scale = 1
if w_multiplier:
scale *= bm
if w_darkness:
scale *= (1/acc)
if crossing_frame < 0:
crossing_per_frame[-crossing_frame] -= scale
elif crossing_frame > 0:
crossing_per_frame[crossing_frame] += scale
if w_frac:
crossing_per_frame *= obs['fraction_total']
else:
for crossing_frame in crossing_frames:
if crossing_frame < 0:
crossing_per_frame[-crossing_frame] -= 1
elif crossing_frame > 0:
crossing_per_frame[crossing_frame] += 1
return np.cumsum(crossing_per_frame)
def threshold_short_tracks(raw_track_list, min_length_threshold=2):
"""Only return tracks that are longer than min_length_threshold."""
track_lengths = []
track_list = []
for track_num, track in enumerate(raw_track_list):
if isinstance(track['track'], list):
track['track'] = np.array(track['track'])
track_length = track['track'].shape[0]
if track_length >= min_length_threshold:
track_lengths.append(track['track'].shape[0])
track_list.append(track)
return track_list
def calculate_height(wingspan_pixels, camera_constant, wingspan_meters):
''' Calculate bats height above the ground assumming wingspan_meters is correct.
camera_constant = (frame pixels / 2) / tan(fov / 2)
height = constant * wingspan_meters / wingspan_pixels
'''
return camera_constant * wingspan_meters / wingspan_pixels
def calculate_bat_multiplier_simple(height, horizontal_fov, distance_to_center):
''' Calculate how many bats one bats at a given height and camera localtion represents.
height: height of bat
horizontal_fov: horizontal field of view of camera (degrees)
distance_to_center: distance from camera to center of colony
ASSUMES CIRCUMFERCE IS MUCH LARGER THAN WIDTH OF SPACE SEEN
circumfernce c = 2 * pi * distance_to_center
width of seen space w = 2 * height * tan(horizontal_fov / 2)
multiplier = c / w
'''
c = 2 * np.pi * distance_to_center
horizontal_fov_rad = horizontal_fov * np.pi / 180
w = 2 * height * np.tan(horizontal_fov_rad / 2)
return c / w
def calculate_bat_multiplier(height, horizontal_fov, distance_to_center):
''' Calculate how many bats one bats at a given height and camera
localtion represents.
height: height of bat
horizontal_fov: horizontal field of view of camera (degrees)
distance_to_center: distance from camera to center of colony
phi = arctan((height*tan(horizontal_fov/2)) / distance to center)
multiplier = pi / phi
'''
horizontal_fov_rad = horizontal_fov * np.pi / 180
distance_to_center = np.max([distance_to_center, 10e-5])
phi = np.arctan((height * np.tan(horizontal_fov_rad / 2))
/ distance_to_center
)
return np.pi/phi
def combined_bat_multiplier(frame_width, wingspan_meters,
wingspan_pixels, camera_distance):
""" Calculates bat multiplier.
Args:
frame_width: frame width in pixels
wingspan_meters: bat wingspan in meters
wingspan_pixels: bat wingspan in pixels
camera_distance: distance from forest point to camera in meters
should be a single value or an array of distances with same
shape as wingspan_pixels
Returns:
bat multiplier: float
"""
denominator = np.arctan(
(frame_width*wingspan_meters)
/ (2*wingspan_pixels*camera_distance)
)
return np.pi / denominator
def get_rects(track):
""" Fit rotated bounding rectangles to each contour in track.
track: track dict with 'contour' key linked to list of cv2 contours
"""
rects = []
for contour in track['contour']:
if len(contour.shape) > 1:
rect = cv2.minAreaRect(contour)
rects.append(rect[1])
else:
rects.append((np.nan, np.nan))
return np.array(rects)
def get_wingspan(track):
""" Estimate wingspan in pixels from average of peak sizes of longest
rectangle edges.
"""
if not 'rects' in track.keys():
track['rects'] = get_rects(track)
max_edge = np.nanmax(track['rects'], 1)
max_edge = max_edge[~np.isnan(max_edge)]
peaks = signal.find_peaks(max_edge)[0]
if len(peaks) != 0:
mean_wing = np.nanmean(max_edge[peaks])
else:
mean_wing = np.nanmean(max_edge)
return mean_wing
def measure_crossing_bats(track_list, frame_height=None, frame_width=None,
count_across=False, count_out=True, num_frames=None,
with_rects=True):
""" Find and quantify all tracks that cross middle line.
track_list: list of track dicts
frame_height: height of frame in pixels
frame_width: width of frame in pixels
count_across: count horizontal tracks
count_out: count vertical tracks
num_frames: number of frames in observation
with_rects: if True calculate rects if not already
in track and estimate wingspan and body size
"""
if count_across:
assert frame_width, "If vertical must specify frame width."
across_line = CountLine(int(frame_width/2), line_dim=0, total_frames=num_frames)
if count_out:
assert frame_height, "If horizontal must specify frame height."
out_line = CountLine(int(frame_height/2), line_dim=1, total_frames=num_frames)
crossing_track_list = []
for track_ind, track in enumerate(track_list[:]):
out_result = None
across_result = None
if count_out:
out_result, out_frame_num = out_line.is_crossing(track, track_ind)
if count_across:
across_result, across_frame_num = across_line.is_crossing(track, track_ind)
if out_result or across_result:
crossing_track_list.append(track)
# result is 1 if forward crossing -1 is backward crossing
if count_out:
if out_frame_num:
crossing_track_list[-1]['crossed'] = out_frame_num * out_result
else:
crossing_track_list[-1]['crossed'] = 0
if count_across:
if across_frame_num:
crossing_track_list[-1]['across_crossed'] = across_frame_num * across_result
else:
crossing_track_list[-1]['across_crossed'] = 0
track[id] = track_ind
if with_rects:
if not 'rects' in track.keys():
track['rects'] = get_rects(track)
min_edge = np.nanmin(track['rects'], 1)
min_edge = min_edge[~np.isnan(min_edge)]
peaks = signal.find_peaks(max_edge)[0]
if len(peaks) != 0:
mean_body = np.nanmean(min_edge[peaks])
else:
mean_body = np.nanmean(max_edge)
crossing_track_list[-1]['mean_wing'] = get_wingspan(track)
crossing_track_list[-1]['mean_body'] = mean_body
return crossing_track_list
def get_camera_locations(observations, all_camera_locations, exclude=False):
"""Return dict of all camera locations that appear in observations.
observations: dict of observations. Probably all observations from one day.
all_camera_locations: dict containing all camera locations across all days
exclude: if True, exclude observations as marked in obs dict
"""
camera_locations = {}
for camera, obs in observations.items():
if exclude:
if 'exclude' in obs.keys():
if obs['exclude']:
continue
camera_locations[obs['camera']] = all_camera_locations[obs['camera']]
return camera_locations
def get_camera_distance(camera_utm, center_utm):
""" Calculate the distance between utm of camera and possible
forest center in meters.
camera_utm: [x, y] array
center_utm: [x, y] array
"""
diff = camera_utm - center_utm
return np.sum(np.sqrt(diff ** 2))
def get_camera_distances(camera_utms, center_utm):
""" Calculate distance from every given camera to specified center.
camera_utms: dict with camera names and locations
center_utm: np.array 2d, location of forest center
"""
camera_distances = {}
for camera, camera_utm in camera_utms.items():
camera_distances[camera] = get_camera_distance(camera_utm,
center_utm)
return camera_distances
def get_camera_angles(camera_utms, center_utm):
""" Calculate angle from center point to each camera location.
camera_utms: dict pairs of camera names and location info
center_utm: 2d np.array, location of forest center
"""
camera_angles = {}
for camera, camera_utm in camera_utms.items():
dif = camera_utm - center_utm
camera_angles[camera] = np.arctan2(dif[1], dif[0])
return camera_angles
def get_camera_borders(camera_utms, center_utm, jitter=False):
""" Get angles around forest center that evenly bisect camera positions.
camera_utms: dict pairs of camera names and location info
center_utm: 2d np.array, location of forest center
jitter: if True, don't actually bisect cameras at midpoint but drawn
from a gaussian
"""
camera_border = {}
camera_angles = get_camera_angles(camera_utms, center_utm)
for camera, camera_utm in camera_utms.items():
min_neg = -10000
min_pos = 100000
# for border case where focal is positive angle
# and closest cclock is negative
max_pos = 0
# for same case a last comment
all_pos = True
# for border case where focal is positive angle
# and closest cclock is negative
max_neg = 0
# for same case a last comment
all_neg = True
max_camera = None
camera_border[camera] = {'cclock': None,
'cclock_angle': None,
'clock': None,
'clock_angle': None
}
for alt_camera, alt_camera_utm in camera_utms.items():
if camera == alt_camera:
continue
dif = camera_angles[camera] - camera_angles[alt_camera]
if dif < 0:
all_pos = False
if dif > min_neg:
min_neg = dif
camera_border[camera]['cclock'] = alt_camera
camera_border[camera]['cclock_angle'] = dif / 2
if dif < max_neg:
max_neg = dif
max_camera = alt_camera
if dif > 0:
all_neg = False
if dif < min_pos:
min_pos = dif
camera_border[camera]['clock'] = alt_camera
camera_border[camera]['clock_angle'] = dif / 2
if dif > max_pos:
max_pos = dif
max_camera = alt_camera
if all_pos:
camera_border[camera]['cclock'] = max_camera
camera_border[camera]['cclock_angle'] = (max_pos - 2*np.pi) / 2
if all_neg:
camera_border[camera]['clock'] = max_camera
camera_border[camera]['clock_angle'] = (max_neg + 2*np.pi) / 2
if jitter:
for camera, border_info in camera_border.items():
camera_angle = camera_angles[camera]
clockwise_camera = border_info['clock']
angle_dif = border_info['clock_angle']
# Three sttandard deviations is between camera pair
jitter_angle = np.random.normal(scale=angle_dif/3)
jitter_angle = np.maximum(-border_info['clock_angle'],
jitter_angle)
jitter_angle = np.minimum(border_info['clock_angle'],
jitter_angle)
camera_border[camera]['clock_angle'] += jitter_angle
if camera_border[camera]['clock_angle'] < 0:
camera_border[camera]['clock_angle'] += (2 * np.pi)
if camera_border[camera]['clock_angle'] >= (2 * np.pi):
camera_border[camera]['clock_angle'] -= (2 * np.pi)
camera_border[clockwise_camera]['cclock_angle'] += jitter_angle
if camera_border[clockwise_camera]['cclock_angle'] < -2 * np.pi:
camera_border[clockwise_camera]['cclock_angle'] += (2 * np.pi)
if camera_border[clockwise_camera]['cclock_angle'] >= (2 * np.pi):
camera_border[clockwise_camera]['cclock_angle'] -= (2 * np.pi)
return camera_border
def latlong_dict_to_utm(latlong_dict):
""" Convert dict of latlong coordinates to utm."""
utm_dict = {}
for key, latlong in latlong_dict.items():
utm_val = utm.from_latlon(*latlong)
utm_dict[key] = np.array([utm_val[0], utm_val[1]])
return utm_dict
def get_camera_fractions(camera_utms, center_utm, jitter=False):
""" Calculate the fraction of circle around center that each camera is closest to.
camera_utms: dict of camera locations
center_utm: 2d np array with utm coordinates of center
jitter: If True instead of evenly dividing circle by
cameras, set borders between camera from a gaussian
return dict with fraction for each camera
"""
if len(camera_utms) == 1:
return {list(camera_utms.keys())[0]: 1.0}
camera_borders = get_camera_borders(camera_utms,
center_utm,
jitter=jitter)
camera_fractions = {}
for camera, border_info in camera_borders.items():
angle = (-border_info['cclock_angle']
+ border_info['clock_angle']
)
camera_fractions[camera] = angle / (np.pi * 2)
return camera_fractions
def get_day_total(observations, center_utm, all_camera_utms,
frame_width, wingspan, exclude=False,
correct_darkness=False, parameters=None):
""" Estimate total number of bats based on all observation counts
and corespoinding camera locations.
observations: dict of all observations for a specific day
center_utm: estimated location of forest center
all_camera_utms: dict of the utm locations of each camera
frame_width: width of camera frame in pixels
wingspan: estimated wingspan off all bats in meters
exlude: to manually remove certain cameras, ie shut off early etc.
correct_darkness: divide by accuracy estimated for given darkness
parameters: param values of linear piecewise function for darkness
error correction. Required if correct_darkness is True
"""
frac_sum = 0
total = 0
obs_totals = []
camera_utms = get_camera_locations(observations, all_camera_utms, exclude=True)
camera_fractions = get_camera_fractions(camera_utms, center_utm)
for obs in observations.values():
if exclude:
if 'exclude' in obs.keys():
if obs['exclude']:
continue
camera_distances = get_camera_distances(camera_utms, center_utm)
obs['multiplier'] = combined_bat_multiplier(frame_width,
wingspan,
obs['mean_wing'],
camera_distances[obs['camera']]
)
if correct_darkness:
assert parameters is not None, "Must pass parameters if correcting for darkness."
acc = piecewise_linear(obs['darkness'], *parameters)
obs['total_darkness'] = np.sum(obs['multiplier'] * obs['direction'] * (1/acc))
obs['total'] = np.sum(obs['multiplier'] * obs['direction'])
obs['total_unscaled'] = | np.sum(obs['direction']) | numpy.sum |
from chainer.training import extensions
import chainer.serializers as S
import chainer
import os
import json
from chainer.training.triggers import IntervalTrigger
from collections import defaultdict
import numpy as np
class Trainer(object):
def __init__(self, folder, chain, train, test, batchsize=500, resume=True, gpu=0, nepoch=1, reports=[]):
self.reports = reports
self.nepoch = nepoch
self.folder = folder
self.chain = chain
self.gpu = gpu
if self.gpu >= 0:
chainer.cuda.get_device(gpu).use()
chain.to_gpu(gpu)
self.eval_chain = eval_chain = chain.copy()
self.chain.test = False
self.eval_chain.test = True
self.testset = test
if not os.path.exists(folder):
os.makedirs(folder)
train_iter = chainer.iterators.SerialIterator(train, batchsize, shuffle=True)
test_iter = chainer.iterators.SerialIterator(test, batchsize,
repeat=False, shuffle=False)
updater = chainer.training.StandardUpdater(train_iter, chain.optimizer, device=gpu)
trainer = chainer.training.Trainer(updater, (nepoch, 'epoch'), out=folder)
# trainer.extend(TrainingModeSwitch(chain))
# trainer.extend(extensions.dump_graph('main/loss'))
trainer.extend(extensions.Evaluator(test_iter, eval_chain, device=gpu), trigger=(1, 'epoch'))
trainer.extend(extensions.snapshot_object(
chain, 'chain_snapshot_epoch_{.updater.epoch:06}'), trigger=(1, 'epoch'))
trainer.extend(extensions.snapshot(
filename='snapshot_epoch_{.updater.epoch:06}'), trigger=(1, 'epoch'))
trainer.extend(extensions.LogReport(trigger=(1, 'epoch')), trigger=(1, 'iteration'))
trainer.extend(extensions.PrintReport(
['epoch', 'main/loss', 'validation/main/loss', 'main/accuracy', 'validation/main/accuracy',
'elapsed_time']), trigger=IntervalTrigger(1, 'epoch'))
self.trainer = trainer
if resume:
# if resumeFrom is not None:
# trainerFile = os.path.join(resumeFrom[0],'snapshot_epoch_{:06}'.format(resumeFrom[1]))
# S.load_npz(trainerFile, trainer)
i = 1
trainerFile = os.path.join(folder, 'snapshot_epoch_{:06}'.format(i))
while i <= nepoch and os.path.isfile(trainerFile):
i = i + 1
trainerFile = os.path.join(folder, 'snapshot_epoch_{:06}'.format(i))
i = i - 1
trainerFile = os.path.join(folder, 'snapshot_epoch_{:06}'.format(i))
if i >= 0 and os.path.isfile(trainerFile):
S.load_npz(trainerFile, trainer)
def run(self):
if self.gpu >= 0:
chainer.cuda.get_device(self.gpu).use()
self.chain.to_gpu(self.gpu)
self.chain.test = False
self.eval_chain.test = True
self.trainer.run()
# ext = self.trainer.get_extension('validation')()
# test_accuracy = ext['validation/main/accuracy']
# test_loss = ext['validation/main/loss']
# acc = test_accuracy.tolist()
# loss = test_loss.tolist()
if self.gpu >= 0:
self.chain.to_cpu()
return
# return self.evaluate()
# return acc,loss
def evaluate(self):
test_iter = chainer.iterators.SerialIterator(self.testset, 1,
repeat=False, shuffle=False)
self.chain.train = False
self.chain.test = True
if self.gpu >= 0:
self.chain.to_gpu(self.gpu)
result = extensions.Evaluator(test_iter, self.chain, device=self.gpu)()
if self.gpu >= 0:
self.chain.to_cpu()
# for k,v in result.iteritems():
# if k in ["main/numsamples", "main/accuracy", "main/branch0exit", "main/branch1exit", "main/branch2exit"]:
# print k, "\t\t\t", v
return result
def save_model(self):
trainer = self.trainer
chain = self.chain
trainer.extend(extensions.snapshot_object(chain, 'so_epoch_{.updater.epoch:06}'), trigger=(1,'epoch'))
trainer.extend(extensions.snapshot(filename='s_epoch_{.updater.epoch:06}'), trigger=(1,'epoch'))
# Deprecated
def get_result(self, key):
# this only returns the lastest log
ext = self.trainer.get_extension('validation')()
return ext.get('{}'.format(key), | np.array(None) | numpy.array |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import torch
import torch.utils.data as data
import os
import numpy as np
import pickle
import sys
TRAIN_SPLIT = 0.8
class Cornell(data.Dataset):
def __init__(self, root, train=True):
self.data_path = root
self.train = train
self.files = os.listdir(self.data_path)
if train:
train_files = self.files[:int(len(self.files) * TRAIN_SPLIT)]
self.num = len(train_files)
self.files = train_files
else:
test_files = self.files[int(len(self.files) * TRAIN_SPLIT):]
self.num = len(test_files)
self.files = test_files
def __len__(self):
return self.num
def __getitem__(self, index):
meta = self.files[index]
with open(os.path.join(self.data_path, meta), 'rb') as handle:
if sys.version_info.major > 2:
file = pickle.load(handle, encoding='latin1')
else:
file = pickle.load(handle)
rgb = np.asarray(file['rgb'])
rgb = np.squeeze(rgb, axis=0)
depth = np.asarray(file['depth_inpainted'])
depth = np.squeeze(depth, axis=0)
pos_img = np.asarray(file['grasp_points_img'])
pos_img = np.squeeze(pos_img, axis=0)
angle_img = np.asarray(file['angle_img'])
angle_img = np.squeeze(angle_img, axis=0)
width_img = np.asarray(file['grasp_width'])
width_img = np.squeeze(width_img, axis=0)
bbs = np.asarray(file['bounding_boxes'])
bbs = np.squeeze(bbs, axis=0)
rgb = torch.tensor(rgb)
depth = torch.tensor(depth)
grasp_points_img = torch.tensor(pos_img)
grasp_width = torch.tensor(width_img)
cos_img = np.cos(2 * angle_img)
sin_img = | np.sin(2 * angle_img) | numpy.sin |
"""
module for PPK positioning
Copyright (c) 2021 <NAME> (from CSSRLIB)
Copyright (c) 2022 <NAME>
"""
import numpy as np
from numpy.linalg import inv, norm
from sys import stdout
from copy import copy, deepcopy
import rtkcmn as gn
from rtkcmn import rCST, DTTOL, sat2prn, sat2freq, timediff, xyz2enu
import rinex as rn
from pntpos import pntpos
from ephemeris import satposs
from mlambda import mlambda
from rtkcmn import trace, tracemat, uGNSS
import __ppk_config as cfg
#rom ppp import tidedisp
MAX_VAR_EPH = 300**2
def rtkinit(cfg):
nav = gn.Nav(cfg)
""" initalize RTK-GNSS parameters from config file """
nav.gnss_t = cfg.gnss_t
nav.pmode = cfg.pmode
nav.filtertype = cfg.filtertype
# add rover vel and accel states for kinematic solution
nav.na = nav.nq = 3 if nav.pmode == 'static' else 9
nav.nx = nav.na + uGNSS.MAXSAT * nav.nf
nav.x = np.zeros(nav.nx)
nav.P = np.zeros((nav.nx, nav.nx))
nav.xa = np.zeros(nav.na)
nav.Pa = np.zeros((nav.na, nav.na))
nav.el = np.zeros(uGNSS.MAXSAT)
nav.gf = np.zeros(uGNSS.MAXSAT)
nav.ph = np.zeros((2, uGNSS.MAXSAT, nav.nf))
nav.pt = np.empty((2, uGNSS.MAXSAT, nav.nf), dtype=object)
nav.nfix = nav.neb = nav.tt = 0
nav.rb = cfg.rb
# parameter for RTK/PPK
nav.use_sing_pos = cfg.use_sing_pos
nav.cnr_min = cfg.cnr_min
nav.maxout = cfg.maxout # maximum outage [epoch]
nav.elmin = np.deg2rad(cfg.elmin)
nav.nf = cfg.nf
nav.excsats = cfg.excsats
nav.freq = cfg.freq
nav.dfreq_glo = cfg.dfreq_glo
nav.interp_base = cfg.interp_base
nav.gnss_t = cfg.gnss_t
nav.maxinno = cfg.maxinno
nav.thresdop = cfg.thresdop
nav.thresslip = cfg.thresslip
nav.maxage = cfg.maxage
nav.accelh = cfg.accelh
nav.accelv = cfg.accelv
nav.prnbias = cfg.prnbias
# ambiguity resolution
nav.armode = cfg.armode
nav.glo_hwbias = cfg.glo_hwbias
nav.thresar = cfg.thresar
nav.thresar1 = cfg.thresar1
nav.var_holdamb = cfg.var_holdamb
nav.elmaskar = np.deg2rad(cfg.elmaskar)
nav.minfix = cfg.minfix
nav.minfixsats = cfg.minfixsats
nav.minholdsats = cfg.minholdsats
nav.mindropsats = cfg.mindropsats
nav.excsat_ix = 0
nav.nfix = 0
nav.ratio = 0
# statistics
nav.efact = cfg.efact
nav.eratio = cfg.eratio
nav.err = np.array(cfg.err)
nav.sig_p0 = cfg.sig_p0
nav.sig_v0 = cfg.sig_v0
nav.sig_n0 = cfg.sig_n0
# solution parameters
nav.sol = []
dP = np.diag(nav.P)
dP.flags['WRITEABLE'] = True
dP[0:3] = nav.sig_p0**2
if nav.pmode == 'kinematic':
dP[3:9] = nav.sig_v0**2
# obs index
ix0, ix1 = cfg.freq_ix0, cfg.freq_ix1
freq0 = {k: cfg.freq[ix0[k]] for k in ix0.keys()}
freq1 = {k: cfg.freq[ix1[k]] for k in ix1.keys()}
nav.obs_idx = [ix0, ix1]
nav.obs_freq = [freq0, freq1]
# sat index
nav.sysprn = {i: gn.sat2prn(i) for i in range(1, uGNSS.MAXSAT+1)}
return nav
def zdres_sat(nav, obs, r, rtype, dant, ix):
_c = rCST.CLIGHT
nf = nav.nf
y = np.zeros(nf * 2)
for f in range(nf):
freq = sat2freq(obs.sat[ix], f, nav)
if obs.S[ix,f] < nav.cnr_min:
continue
# residuals = observable - estimated range (phase and code)
y[f] = obs.L[ix,f] * _c / freq - r - dant[f] if obs.L[ix,f] else 0
y[f+nf] = obs.P[ix,f] - r - dant[f] if obs.P[ix,f] else 0
# if obs.L[ix,f] != 0 or obs.P[ix,f] != 0:
# trace(3, 'zdres_sat: %d: L=%.6f P=%.6f r=%.6f f=%.0f\n' %
# (obs.sat[ix],obs.L[ix,f], obs.P[ix,f],r,freq))
return y
def zdres(nav, obs, rs, dts, svh, var, rr, rtype):
""" undifferenced phase/code residuals ----------------------------------------
calculate zero diff residuals [observed pseudorange - range]
output is in y[0:nu-1], only shared input with base is nav
args: I obs = sat observations
I n = # of sats
I rs = sat position {x,y,z} (m)
I dts = sat clock {bias,drift} (s|s/s)
I var = variance of ephemeris
I svh = sat health flags
I nav = sat nav data
I rr = rcvr pos (x,y,z)
I rtype: 0=base,1=rover
O y[] = zero diff residuals {phase,code} (m)
O e = line of sight unit vectors to sats
O azel = [az, el] to sats """
if obs == []:
return [], [], []
_c = rCST.CLIGHT
nf = nav.nf
n = len(obs.P)
y = np.zeros((n, nf * 2))
el = np.zeros(n)
e = np.zeros((n, 3))
rr_ = rr.copy()
trace(3, 'zdres: n=%d rr=%.2f %.2f %.2f\n' % (n, rr[0], rr[1], rr[2]))
pos = gn.ecef2pos(rr_)
# loop through satellites
ix = np.argsort(obs.sat)
for i in ix:
if gn.satexclude(obs.sat[i], var[i], svh[i], nav):
continue
# compute geometric-range and azimuth/elevation angle
r, e[i,:] = gn.geodist(rs[i,0:3], rr_)
_, el[i] = gn.satazel(pos, e[i,:])
if el[i] < nav.elmin:
continue
# adjust range for satellite clock-bias
r += -_c * dts[i]
# adjust range for troposphere delay model (hydrostatic)
trophs, tropw, _ = gn.tropmodel(obs.t, pos, np.deg2rad(90.0), 0.0)
zhd = trophs + tropw
mapfh, _ = gn.tropmapf(obs.t, pos, el[i])
r += mapfh * zhd
# calc receiver antenna phase center correction
dant = gn.antmodel(nav, el[i], nav.nf, rtype)
trace(4,'sat=%d r=%.6f c*dts=%.6f zhd=%.6f map=%.6f\n' % (obs.sat[i],r,_c*dts[i],zhd,mapfh))
# calc undifferenced phase/code residual for satellite
y[i] = zdres_sat(nav, obs, r, rtype, dant, i)
for i in ix:
if obs.L[i,0] != 0 and rtype == 0:
trace(4, 'sat=%2d %13.3f %13.3f %13.3f %13.10f %5.1f\n' %
(obs.sat[i], rs[i,0], rs[i,1], rs[i,2], dts[i],
np.rad2deg(el[i])))
tracemat(3, 'y=', y[ix,:].T, '13.3f')
return y, e, el
def ddcov(nb, n, Ri, Rj, nv):
""" double-differenced measurement error covariance ---------------------------
*
* nb[n]: # of sat pairs in group
* n: # of groups (2 for each system, phase and code)
* Ri[nv]: variances of first sats in double diff pairs
* Rj[nv]: variances of 2nd sats in double diff pairs
* nv: total # of sat pairs
* R[nv][nv]: double diff measurement err covariance matrix """
R = np.zeros((nv, nv))
k = 0
for b in range(n):
block = R[k:nb[b]+k, k:nb[b]+k] # define subarray
block += Ri[k:nb[b]+k]
block[range(nb[b]), range(nb[b])] += Rj[k:nb[b]+k]
k += nb[b]
return R
def sysidx(satlist, sys_ref):
""" return index of satellites with sys=sys_ref """
idx = []
for k, sat in enumerate(satlist):
sys, _ = sat2prn(sat)
if sys == sys_ref:
idx.append(k)
return idx
def IB(s, f, na=3):
""" return index of phase ambguity """
return na + uGNSS.MAXSAT * f + s - 1
def varerr(nav, sys, el, f, dt, rcvstd):
""" variation of measurement """
code = 1 * (f >= nav.nf) # 0 = phase, 1 = code
freq = f % nav.nf
s_el = np.sin(el)
if s_el <= 0.0:
return 0.0
fact = nav.eratio[freq] if code else 1
fact *= nav.efact[sys]
a, b = fact * nav.err[1:3]
c = fact * 0 # nav.err[4]*bl/1E4 # TODO: add baseline term
d = rCST.CLIGHT * nav.err[5] * dt # clock term
var = 2.0 * (a**2 + (b / s_el)**2 + c**2) + d**2
# TODO: add SNR term
# add scaled stdevs from receiver
if nav.err[3] > 0:
var += (nav.err[3] * rcvstd)**2
return var
def ddres(nav, x, P, yr, er, yu, eu, sat, el, dt, obsr):
""" /* double-differenced residuals and partial derivatives -----------------------------------
I nav = sat nav data
I dt = time diff between base and rover observations
I x = rover pos & vel and sat phase biases (float solution)
I P = error covariance matrix of float states
I sat = list of common sats
I y = zero diff residuals (code and phase, base and rover)
I e = line of sight unit vectors to sats
I el = el to sats
O v = double diff innovations (measurement-model) (phase and code)
O H = linearized translation from innovations to states (az/el to sats)
O R = measurement error covariances """
_c = rCST.CLIGHT
nf = nav.nf
ns = len(el)
ny = ns * nf * 2 # phase and code
nb = np.zeros(2 * len(nav.gnss_t) * nf, dtype=int)
Ri = np.zeros(ny)
Rj = np.zeros(ny)
H = np.zeros((nav.nx, ny))
P_init = nav.sig_n0**2 # value used to initialize P states
trace(3,"ddres : dt=%.4f ns=%d\n" % (dt, ns))
nv = b = 0
v = np.zeros(ny)
# step through sat systems
for sys in nav.gnss_t:
# step through phases/codes
for f in range(0, nf*2):
frq = f % nf
code = 1 * (f >= nf)
idx = sysidx(sat, sys) # find sats in sys
# remove sats with missing base or rover residuals
nozero = np.where((yr[:,f] != 0) & (yu[:,f] != 0))[0]
idx = np.intersect1d(idx, nozero)
if len(idx) == 0:
continue # no common sats
# find sat with max el and not just reset for reference
i_el = idx[np.argsort(el[idx])]
for i in i_el[::-1]:
ii = IB(sat[i], frq, nav.na)
# check if sat just reset
if P[ii,ii] != nav.sig_n0**2:
break
else:
i = i_el[0] # use highest sat if none without reset
# calculate double differences of residuals (code/phase) for each sat
freqi = sat2freq(sat[i], frq, nav)
lami = _c / freqi
for j in idx: # loop through sats
if i == j: continue # skip ref sat
# double-differenced measurements from 2 receivers and 2 sats in meters
v[nv] = (yu[i,f] - yr[i,f]) - (yu[j,f] - yr[j,f])
# partial derivatives by rover position, combine unit vectors from two sats
H[0:3, nv] = -eu[i,:] + er[j,:]
jj = IB(sat[j], frq, nav.na)
if not code: # carrier phase
# adjust phase residual by double-differenced phase-bias term
freqj = sat2freq(sat[j], frq, nav)
lamj = _c / freqj
v[nv] -= lami * x[ii] - lamj * x[jj]
H[ii, nv], H[jj, nv] = lami, -lamj
# adjust double-difference for glonass hw bias
if sys == uGNSS.GLO and nav.glo_hwbias != 0:
df = (freqi - freqj) / nav.dfreq_glo[frq]
v[nv] -= df * nav.glo_hwbias
# if residual too large, flag as outlier
thres = nav.maxinno
# use larger thresh for code or just initialized phase
if code or P[ii,ii] == P_init or P[jj,jj] == P_init:
thres *= nav.eratio[frq]
if abs(v[nv]) > thres:
nav.vsat[sat[j]-1,frq] = 0
nav.rejc[sat[j]-1,frq] += 1
trace(3,"outlier rejected: (sat=%3d-%3d %s%d v=%13.3f x=%13.3f %13.3f P=%.6f %.6f)\n"
% (sat[i], sat[j], 'LP'[code],frq+1, v[nv], x[ii], x[jj], P[ii,ii],P[jj,jj]))
H[ii, nv], H[jj, nv] = 0, 0
continue
# single-differenced measurement error variances (m)
si, sj = sat[i] - 1, sat[j] - 1
Ri[nv] = varerr(nav, sys, el[i], f, dt, nav.rcvstd[si,f])
Rj[nv] = varerr(nav, sys, el[j], f, dt, nav.rcvstd[sj,f])
if not code:
nav.vsat[si,frq] = 1
nav.vsat[sj,frq] = 1
trace(3,"sat=%3d-%3d %s%d v=%13.3f R=%9.6f %9.6f lock=%2d x=%13.3f\n" %
(sat[i], sat[j], 'LP'[code], frq+1, v[nv], Ri[nv], Rj[nv], nav.lock[sat[j]-1,frq],x[jj]))
nv += 1
nb[b] += 1
b += 1
R = ddcov(nb, b, Ri[:nv], Rj[:nv], nv)
return v[:nv], H[:,:nv], R
def valpos(nav, v, R, thres=4.0):
""" post-file residual test """
trace(3, 'valpos : nv=%d thres=%.1f\n' % (len(v), thres))
nv = len(v)
fact = thres**2
for i in range(nv):
if v[i]**2 > fact * R[i, i]:
trace(3, 'large residual (ix_sat=%d v=%.3f sig=%.3f)\n' %
(i, v[i], np.sqrt(R[i, i])))
return True
def intpres(time, nav, y0, y1, obs0, obs1):
""" time-interpolation of residuals """
tt, ttb = timediff(time, obs1.t), timediff(time, obs0.t)
if len(y0) == 0 or abs(ttb) > nav.maxage or abs(tt) < DTTOL:
return y1, tt
# find common sats
_, ix0, ix1 = np.intersect1d(obs0.sat, obs1.sat, return_indices=True)
for i in range(len(ix0)):
for j in range(4):
i0, i1 = ix0[i], ix1[i]
if y1[i1,j] == 0:
y1[i1,j] = y0[i0,j]
elif y0[i0,j] != 0:
y1[i1,j] = (ttb * y1[i1,j] - tt * y0[i0,j]) / (ttb - tt)
dt = min(abs(tt), abs(ttb)) / np.sqrt(2)
return y1, dt
def ddidx(nav, sats):
""" index for single to double-difference transformation matrix (D') """
nb, fix, ref = 0, [], []
ns = uGNSS.MAXSAT
#na = nav.na
ix = np.zeros((ns, 2), dtype=int)
# clear fix flag for all sats (1=float, 2=fix)
nav.fix[:,:] = 0
# step through constellations
for m in range(uGNSS.GNSSMAX):
k = nav.na # state index for first sat
# step through freqs
for f in range(nav.nf):
# look for first valid sat (i=state index, i-k=sat index)
for i in range(k, k + ns):
sati = i - k + 1
# if sati not in sats:
# xxx=1
sys = nav.sysprn[sati][0]
# skip if sat not active
if nav.x[i] == 0.0 or sys != m or nav.vsat[sati-1,f] == 0:
continue
if nav.lock[sati-1,f] >= 0 and nav.slip[sati-1,f] & 2 == 0 and \
nav.el[sati-1] >= nav.elmaskar:
# set sat to use for fixing ambiguity if meets criteria
nav.fix[sati-1,f] = 2 # fix
break # break out of loop if find good sat
else: # don't use this sat for fixing ambiguity
nav.fix[sati-1,f] = 1 # float
if nav.fix[sati-1,f] != 2: # no good sat found
continue
n = 0 # count of sat pairs for this freq/constellation
# step through all sats (j=state index, j-k=sat index, i-k=first good sat)
for j in range(k, k + ns):
satj = j - k + 1
sys = nav.sysprn[satj][0]
if i == j or nav.x[j] == 0.0 or sys != m or nav.vsat[satj-1,f] <= 0:
continue
if nav.lock[satj-1,f] >= 0 and nav.slip[satj-1,f] & 2 == 0 and \
nav.el[satj-1] >= nav.elmaskar:
# set D coeffs to subtract sat j from sat i
ix[nb, :] = [i,j] # state indices of ref bias and target bias
ref.append(sati)
fix.append(satj)
nav.fix[satj-1,f] = 2 # fix
nb += 1 # increment total count
n += 1 # inc count in freq/constellation
else: # don't use this sat for fixing ambiguity
nav.fix[satj-1,f] = 1 # float
if n == 0: # don't use ref sat if no sat pairs
nav.fix[sati-1,f] = 1
k += ns
ix = np.resize(ix, (nb, 2))
if nb > 0:
tracemat(3,'refSats= ', np.array(ref), '7d')
tracemat(3,'fixSats= ', np.array(fix), '7d')
return ix
def restamb(nav, bias, nb):
""" restore SD ambiguity """
nv = 0
xa = nav.x.copy()
xa[0:nav.na] = nav.xa[0:nav.na]
for m in range(uGNSS.GNSSMAX):
for f in range(nav.nf):
n = 0
index = []
for i in range(uGNSS.MAXSAT):
sys = nav.sysprn[i+1][0]
if sys != m or (sys not in nav.gnss_t) or nav.fix[i, f] != 2:
continue
index.append(IB(i+1, f, nav.na))
n += 1
if n < 2:
continue
xa[index[0]] = nav.x[index[0]]
for i in range(1, n):
xa[index[i]] = xa[index[0]] - bias[nv]
nv += 1
return xa
def resamb_lambda(nav, sats):
""" resolve integer ambiguity using LAMBDA method """
nx = nav.nx
na = nav.na
xa = np.zeros(na)
ix = ddidx(nav, sats)
nav.nb_ar = nb = len(ix)
if nb <= nav.minfixsats - 1: # nb is sat pairs
trace(3, 'resamb_lambda: not enough valid double-differences DD\n')
return -1, -1
# y=D*xc, Qb=D*Qc*D', Qab=Qac*D'
y = nav.x[ix[:, 0]] - nav.x[ix[:, 1]]
DP = nav.P[ix[:, 0], na:nx] - nav.P[ix[:, 1], na:nx]
Qb = DP[:, ix[:, 0] - na] - DP[:, ix[:, 1] - na]
Qab = nav.P[0:na, ix[:, 0]] - nav.P[0:na, ix[:, 1]]
tracemat(3,'N(0)= ', y, '7.3f')
tracemat(3, 'Qb*1000= ', 1000 * np.diag(Qb[0:nb]), '7.4f')
# MLAMBDA ILS
b, s = mlambda(y, Qb)
tracemat(3,'N(1)= ', b[:,0], '7.3f')
tracemat(3,'N(2)= ', b[:,1], '7.3f')
nav.ratio = s[1] / s[0]
if s[0] <= 0.0 or nav.ratio >= nav.thresar:
trace(3,'resamb : validation OK (nb=%d ratio=%.2f\n s=%.2f/%.2f\n'
% (nb, nav.ratio, s[1], s[0]))
nav.xa = nav.x[0:na].copy()
nav.Pa = nav.P[0:na, 0:na].copy()
bias = b[:, 0]
y -= b[:, 0]
K = Qab @ inv(Qb)
nav.xa -= K @ y
nav.Pa -= K @ Qab.T
# restore single diff ambiguity
xa = restamb(nav, bias, nb)
else:
trace(3,'ambiguity validation failed (nb=%d ratio=%.2f\n s=%.2f/%.2f'
% (nb, nav.ratio, s[1], s[0]))
nb = 0
return nb, xa
def manage_amb_LAMBDA(nav, sats, stat, posvar):
""" resolve integer ambiguity by LAMBDA using partial fix techniques and
multiple attempts """
trace(3, 'posvar=%.6f\n' % posvar)
trace(3, 'prevRatios = %.3f %.3f\n' % (nav.prev_ratio1, nav.prev_ratio2))
# skip AR if don't meet criteria
if stat != gn.SOLQ_FLOAT or posvar > nav.thresar1:
nav.ratio, nav.prev_ratio1, nav.prev_ratio2, nav.nb_ar = 0, 0, 0, 0
trace(3, 'Skip AR\n')
return 0, []
# if no fix on previous sample and enough sats, exclude next sat in list
excflag = False
if nav.prev_ratio2 < nav.thresar and nav.nb_ar >= nav.mindropsats:
# find and count sats used last time for AR
arsats = np.where(nav.prev_fix == 2)[0]
excflag = 0
if nav.excsat_ix < len(arsats):
excsat = arsats[nav.excsat_ix] + 1
lockc = copy(nav.lock[excsat-1]) # save lock count
# remove sat from AR long enough to enable hold if stays fixed
nav.lock[excsat-1] = -nav.nb_ar
trace(3, 'AR: exclude sat %d\n' % excsat);
excflag = True
nav.excsat_ix += 1
else:
nav.excsat_ix = 0 # exclude none and reset to beginning of list
# initial ambiguity resolution attempt, include all enabled sats
nb, xa = resamb_lambda(nav, sats)
ratio1 = nav.ratio
rerun = False
# if results are much poorer than previous epoch or dropped below AR ratio
# thresh, remove new sats
trace(3, 'lambda: nb=%d r1= %.3f r2=%.3f r=%.3f\n' % ((nb, nav.prev_ratio1, nav.prev_ratio2, nav.ratio)))
if nb >= 0 and nav.prev_ratio2 >= nav.thresar and (nav.ratio < nav.thresar
or (nav.ratio < nav.thresar * 1.1 and nav.ratio < nav.prev_ratio1 / 2.0)):
trace(3, 'low ratio: check for new sat\n')
dly = 2
ix = np.where((nav.fix >= 2) & (nav.lock == 0))
for i,f in zip(ix[0],ix[1]):
nav.lock[i,f] = -dly
dly +=2
trace(3, 'remove sat %d:%d lock=%d\n' % (i+1, f, nav.lock[i,f]))
rerun = True
# rerun if filter removed any sats
if rerun:
trace(3, 'rerun AR with new sat removed\n')
nb, xa = resamb_lambda(nav, sats)
# restore excluded sat if still no fix or significant increase in ar ratio
if excflag and nav.ratio < nav.thresar and nav.ratio < 1.5* nav.prev_ratio2:
nav.lock[excsat-1] = lockc
trace(3, 'AR: restore sat %d\n' % excsat)
nav.prev_ratio1, nav.prev_ratio2 = ratio1, nav.ratio
return nb, xa
def initx(nav, x0, v0, i):
""" initialize x and P for index i """
nav.x[i] = x0
nav.P[i,:] = 0
nav.P[:,i] = 0
nav.P[i,i] = v0
def detslp_dop(rcv, nav, obs, ix):
""" detect cycle slip with doppler measurement """
if nav.thresdop <= 0:
return
# calculate doppler differences for all sats and freqs
ns = len(ix)
mean_dop = ndop = 0
dopdif = np.zeros((ns, nav.nf))
tt = np.zeros((ns, nav.nf))
for i, ii in enumerate(ix):
sat = obs.sat[ii] - 1
for f in range(nav.nf):
if obs.L[ii,f] == 0.0 or obs.D[ii,f] == 0.0 or nav.ph[rcv,sat,f] == 0.0 \
or nav.pt[rcv,sat,f] == None:
continue
tt[i,f] = timediff(obs.t, nav.pt[rcv,sat,f])
if abs(tt[i,f]) < DTTOL:
continue
# calc phase difference and doppler x time (cycle)
dph = (obs.L[ii,f] - nav.ph[rcv,sat,f]) / tt[i,f]
dpt = -obs.D[ii,f]
dopdif[i,f] = dph - dpt
# if not outlier, use this to calculate mean
if abs(dopdif[i,f]) < 3 * nav.thresdop:
mean_dop += dopdif[i,f]
ndop += 1
# calc mean doppler diff, most likely due to clock error
if ndop == 0:
trace(4, 'detslp_dop rcv=%d: no valid doppler diffs\n' % (rcv+1))
return # unable to calc mean doppler, usually very large clock err
mean_dop /= ndop
# set slip if doppler difference with mean removed exceeds threshold
for i, ii in enumerate(ix):
sat = obs.sat[ii] - 1
for f in range(nav.nf):
if dopdif[i,f] == 0.0:
continue
if abs(dopdif[i,f] - mean_dop) > nav.thresdop:
nav.slip[sat,f] |= 1
trace(3, "slip detected doppler (sat=%2d rcv=%d dL%d=%.3f off=%.3f tt=%.2f)\n"
% (sat+1, rcv+1, f+1, dopdif[i,f] - mean_dop, mean_dop, tt[i,f]))
def detslp_gf(nav, obsb, obsr, iu, ir):
""" detect cycle slip with geometry-free LC """
# skip if check disabled
if nav.thresslip == 0 or nav.nf < 2:
return
ns = len(iu)
_c = rCST.CLIGHT
for i in range(ns):
sat = obsr.sat[iu[i]] - 1
# skip check if slip already detected
if (nav.slip[sat,0] & 1) or (nav.slip[sat,1] & 1):
continue
# calc SD geomotry free LC of phase between freq0 and freq1
L1R = obsr.L[iu[i],0]
L2R = obsr.L[iu[i],1]
L1B = obsb.L[ir[i],0]
L2B = obsb.L[ir[i],1]
if L1R == 0.0 or L1B == 0.0 or L2R == 0 or L2B == 0:
trace(4, 'gf: skip sat %d, L=0\n' % sat)
continue
freq0 = sat2freq(sat + 1, 0, nav)
freq1 = sat2freq(sat + 1, 1, nav)
gf1 = ((L1R - L1B) * _c / freq0 - (L2R - L2B) * _c / freq1)
if gf1 == 0:
continue
gf0 = nav.gf[sat] #retrieve previous gf
nav.gf[sat] = gf1 # save current gf for next epoch
if gf0 !=0.0 and abs(gf1 - gf0) > nav.thresslip:
nav.slip[sat,0] |= 1
nav.slip[sat,1] |= 1
trace(3, "slip detected GF jump (sat=%2d L1-L2 dGF=%.3f)\n" %
(sat + 1, gf0 - gf1))
def detslp_ll(nav, obs, ix, rcv):
""" detect cycle slip from rinex file flags """
# retrieve previous LLI
LLI = nav.prev_lli[:,:,rcv]
ixsat = obs.sat[ix] - 1
initP = (nav.sig_n0 / 2)**2 # init value for slips
slip = np.zeros_like(nav.slip)
for f in range(nav.nf):
ixL = np.where(obs.L[ix,f] != 0)[0]
if nav.tt >= 0: # forward
slip[ixsat[ixL],f] |= (obs.lli[ix[ixL],f] & 3)
else: # backward
slip[ixsat[ixL],f] |= (LLI[ixsat[ixL],f] & 3)
# detect slip by parity unknown flag transition in LLI
hc_slip = np.where((obs.lli[ix[ixL],f] & 2) !=
(LLI[ixsat[ixL],f] & 2))[0]
if len(hc_slip) > 0:
slip[ixsat[ixL[hc_slip]],f] |= 1
ixslip = np.where((slip[ixsat[ixL],f] & 1) != 0)[0]
slipsats = ixsat[ixL[ixslip]] + 1
ib = IB(slipsats, f, nav.na)
for i in ib:
nav.P[i,i] = max(nav.P[i,i], initP)
# output results to trace
if len(slipsats) > 0:
trace(3, 'slip detected from LLI flags: f=%d, sats=%s\n' % (f, str(slipsats)))
trace(3, ' slip=%s\n' % str(slip[ixsat[ixL[ixslip]], f]))
def udpos(nav, sol):
""" states propagation for kalman filter """
tt = nav.tt
trace(3, 'udpos : tt=%.3f\n' % tt)
if nav.pmode == 'static':
return
# check variance of estimated position
posvar = np.sum( | np.diag(nav.P[0:3]) | numpy.diag |
"""Validate a face recognizer on the "Labeled Faces in the Wild" dataset (http://vis-www.cs.umass.edu/lfw/).
Embeddings are calculated using the pairs from http://vis-www.cs.umass.edu/lfw/pairs.txt and the ROC curve
is calculated and plotted.
"""
import os
import sys
import math
import pathlib
import argparse
import importlib
import numpy as np
from tqdm import tqdm
from scipy import misc
import tensorflow as tf
from sklearn import metrics
from scipy import interpolate
from scipy.optimize import brentq
from sklearn.model_selection import KFold
from adaptive_triplet_loss import AdaptiveTripletLoss
from custom_triplet_loss import TripletBatchHardLoss, TripletFocalLoss, TripletBatchHardV2Loss, AssortedTripletLoss, ConstellationLoss
from custom_triplet_loss import HAP2S_ELoss, HAP2S_PLoss
from model_utils import create_neural_network_v2
import model_utils
from tensorflow_similarity.losses import MultiSimilarityLoss
def _read_pairs(pairs_filename, lfw_path):
pairs = []
actual_issame = []
with open(pairs_filename, 'r') as f:
for line in f.readlines()[1:]:
pair = line.strip().split(',')
for i in range(len(pair)):
pair[i] = pair[i].strip()
if not os.path.exists(os.path.join(lfw_path, pair[2])):
continue
if not os.path.exists(os.path.join(lfw_path, pair[3])):
continue
if not os.path.isdir(os.path.join(lfw_path, pair[2])):
continue
if not os.path.isdir(os.path.join(lfw_path, pair[3])):
continue
if len(os.listdir(os.path.join(lfw_path, pair[2]))) == 0:
continue
if len(os.listdir(os.path.join(lfw_path, pair[3]))) == 0:
continue
if int(pair[-2]) == 1:
actual_issame.append(True)
else:
actual_issame.append(False)
pairs.append(pair)
return np.array(pairs), np.array(actual_issame), len(pairs)
def _get_preprocessor(model_type):
if 'inception_resnet_v2' in model_type:
preprocessor = 'tensorflow.keras.applications.inception_resnet_v2'
#print('[INFO] Loaded Inception-Resnet-V2 data preprocessor', flush=True)
elif 'efficientnet' in model_type:
preprocessor = 'tensorflow.keras.applications.efficientnet'
#print('[INFO] Loaded EfficientNet data preprocessor', flush=True)
elif 'xception' in model_type:
preprocessor = 'tensorflow.keras.applications.xception'
#print('[INFO] Loaded Xception data preprocessor', flush=True)
elif 'inception_v3' in model_type:
preprocessor = 'tensorflow.keras.applications.inception_v3'
#print('[INFO] Loaded Inception-V3 data preprocessor', flush=True)
elif 'resnet' in model_type:
preprocessor = 'tensorflow.keras.applications.resnet'
#print('[INFO] Loaded Resnet data preprocessor', flush=True)
elif 'mobilenet_v2' in model_type:
preprocessor = 'tensorflow.keras.applications.mobilenet_v2'
#print('[INFO] Loaded MobileNet-V2 data preprocessor', flush=True)
elif 'mobilenet' in model_type:
preprocessor = 'tensorflow.keras.applications.mobilenet'
#print('[INFO] Loaded MobileNet data preprocessor', flush=True)
else:
preprocessor = None
#print('[WARNING] Could not find appropriate pre-processor for model', flush=True)
if preprocessor is not None:
preprocessor = importlib.import_module(preprocessor)
return preprocessor
def get_dataset(data_path, image_size, batch_size, crop_size, train_classes=0,
use_mixed_precision=False, use_tpu=False, model_type=None):
AUTOTUNE = tf.data.experimental.AUTOTUNE
data_path = pathlib.Path(data_path)
image_count = len(list(data_path.glob('*.png')))
preprocessor = _get_preprocessor(model_type)
ds = tf.data.Dataset.list_files(str(data_path/"*.png"), shuffle=True)
def decode_img(img):
#img = tf.io.decode_image(img, channels=3, expand_animations=False)
img = tf.io.decode_png(img, channels=3)
if use_mixed_precision is True:
if use_tpu is True:
img = tf.cast(img, tf.bfloat16)
else:
img = tf.cast(img, tf.float16)
else:
img = tf.cast(img, tf.float32)
img = tf.image.resize(img, [crop_size, crop_size])
return img
def process_path(file_path):
img = tf.io.read_file(file_path)
img = decode_img(img)
if preprocessor is not None:
img = preprocessor.preprocess_input(img)
else:
img = img / 255.
return img
ds = ds.map(process_path, num_parallel_calls=AUTOTUNE, deterministic=True)
ds = ds.batch(batch_size)
ds = ds.prefetch(AUTOTUNE)
return ds, image_count
def _distance(embeddings1, embeddings2, distance_metric=0):
if distance_metric==0:
# Euclidian distance
diff = np.subtract(embeddings1, embeddings2)
dist = np.sqrt(np.sum(np.square(diff), axis=1))
elif distance_metric==1:
# Distance based on cosine similarity
dot = np.sum(np.multiply(embeddings1, embeddings2), axis=1)
norm = np.linalg.norm(embeddings1, axis=1) * np.linalg.norm(embeddings2, axis=1)
similarity = dot / norm
dist = np.arccos(similarity) / math.pi
else:
raise 'Undefined distance metric %d' % distance_metric
return dist
def _calculate_roc(thresholds, embeddings1, embeddings2, actual_issame, nrof_folds=10,
distance_metric=0):
assert(embeddings1.shape[0] == embeddings2.shape[0])
assert(embeddings1.shape[1] == embeddings2.shape[1])
nrof_pairs = min(len(actual_issame), embeddings1.shape[0])
nrof_thresholds = len(thresholds)
k_fold = KFold(n_splits=nrof_folds, shuffle=False)
tprs = np.zeros((nrof_folds,nrof_thresholds))
fprs = np.zeros((nrof_folds,nrof_thresholds))
accuracy = np.zeros((nrof_folds))
indices = np.arange(nrof_pairs)
for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):
dist = _distance(embeddings1, embeddings2, distance_metric)
# Find the best threshold for the fold
acc_train = np.zeros((nrof_thresholds))
for threshold_idx, threshold in enumerate(thresholds):
_, _, acc_train[threshold_idx] = _calculate_accuracy(threshold, dist[train_set], actual_issame[train_set])
best_threshold_index = np.argmax(acc_train)
for threshold_idx, threshold in enumerate(thresholds):
tprs[fold_idx,threshold_idx], fprs[fold_idx,threshold_idx], _ = _calculate_accuracy(threshold, dist[test_set], actual_issame[test_set])
_, _, accuracy[fold_idx] = _calculate_accuracy(thresholds[best_threshold_index], dist[test_set], actual_issame[test_set])
tpr = np.mean(tprs, 0)
fpr = np.mean(fprs, 0)
return tpr, fpr, accuracy, np.std(accuracy)
def _calculate_accuracy(threshold, dist, actual_issame):
predict_issame = np.less(dist, threshold)
tp = np.sum(np.logical_and(predict_issame, actual_issame))
fp = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame)))
tn = np.sum(np.logical_and(np.logical_not(predict_issame), np.logical_not(actual_issame)))
fn = np.sum(np.logical_and(np.logical_not(predict_issame), actual_issame))
tpr = 0 if (tp+fn==0) else float(tp) / float(tp+fn)
fpr = 0 if (fp+tn==0) else float(fp) / float(fp+tn)
acc = float(tp+tn)/dist.size
return tpr, fpr, acc
def _calculate_val(thresholds, embeddings1, embeddings2, actual_issame, far_target, nrof_folds=10, distance_metric=0):
assert(embeddings1.shape[0] == embeddings2.shape[0])
assert(embeddings1.shape[1] == embeddings2.shape[1])
nrof_pairs = min(len(actual_issame), embeddings1.shape[0])
nrof_thresholds = len(thresholds)
k_fold = KFold(n_splits=nrof_folds, shuffle=False)
val = np.zeros(nrof_folds)
far = np.zeros(nrof_folds)
indices = np.arange(nrof_pairs)
for fold_idx, (train_set, test_set) in enumerate(k_fold.split(indices)):
dist = _distance(embeddings1, embeddings2, distance_metric)
# Find the threshold that gives FAR = far_target
far_train = np.zeros(nrof_thresholds)
for threshold_idx, threshold in enumerate(thresholds):
_, far_train[threshold_idx] = _calculate_val_far(threshold, dist[train_set], actual_issame[train_set])
if np.max(far_train)>=far_target:
f = interpolate.interp1d(far_train, thresholds, kind='slinear')
threshold = f(far_target)
else:
threshold = 0.0
val[fold_idx], far[fold_idx] = _calculate_val_far(threshold, dist[test_set], actual_issame[test_set])
val_mean = np.mean(val)
far_mean = np.mean(far)
val_std = np.std(val)
return val_mean, val_std, far_mean
def _calculate_val_far(threshold, dist, actual_issame):
predict_issame = np.less(dist, threshold)
true_accept = np.sum(np.logical_and(predict_issame, actual_issame))
false_accept = np.sum(np.logical_and(predict_issame, np.logical_not(actual_issame)))
n_same = np.sum(actual_issame)
n_diff = np.sum(np.logical_not(actual_issame))
val = float(true_accept) / float(n_same)
far = float(false_accept) / float(n_diff)
return val, far
def main(weights_path, lfw_path, image_size, crop_size, model_type, loss_type,
batch_size=50, use_mixed_precision=False, use_tpu=False, embedding_size=512,
load_from_file=False, distance_metric=0):
model = None
if loss_type == 'ADAPTIVE':
loss_obj = ['AdaptiveTripletLoss', AdaptiveTripletLoss]
elif loss_type == 'FOCAL':
loss_obj = ['TripletFocalLoss', TripletFocalLoss]
elif loss_type == 'BATCH_HARD':
loss_obj = ['TripletBatchHardLoss', TripletBatchHardLoss]
elif loss_type == 'BATCH_HARD_V2':
loss_obj = ['TripletBatchHardV2Loss', TripletBatchHardV2Loss]
elif loss_type == 'ASSORTED':
loss_obj = ['AssortedTripletLoss', AssortedTripletLoss]
elif loss_type == 'CONSTELLATION':
loss_obj = ['ConstellationLoss', ConstellationLoss]
elif loss_type == 'HAP2S_E':
loss_obj = ['HAP2S_ELoss', HAP2S_ELoss]
elif loss_type == 'HAP2S_P':
loss_obj = ['HAP2S_PLoss', HAP2S_PLoss]
else:
loss_obj = None
if loss_obj is not None:
model = tf.keras.models.load_model(weights_path, custom_objects={loss_obj[0]:loss_obj[1]})
#Another solution is skip the model_utils import and use : custom_objects={loss_obj[0]:loss_obj[1], 'tf':tf}
else:
model = tf.keras.models.load_model(weights_path)
pairs, actual_issame, nrof_pairs = _read_pairs('./data/ytface_pairs.txt', lfw_path)
#embeddings = np.zeros((nrof_pairs*2, embedding_size))
embeddings = None
if load_from_file is None or load_from_file is False:
for pair_num, pair in enumerate(tqdm(pairs, file=sys.stdout)):
temp_emb = None
x_ds, ic = get_dataset(data_path=os.path.join(lfw_path, pair[2]),
image_size=image_size,
batch_size=batch_size,
crop_size=crop_size,
use_mixed_precision=use_mixed_precision,
use_tpu=use_tpu,
train_classes=0,
model_type=model_type)
for i, xs in enumerate(x_ds):
embs = model.predict(xs)
embs = np.squeeze(embs)
if temp_emb is None:
temp_emb = embs
else:
temp_emb = np.vstack((temp_emb, embs))
#assert temp_emb.shape == (ic, embedding_size)
mean_emb = np.squeeze( | np.mean(temp_emb, axis=0) | numpy.mean |
"""
===========
classify.py
===========
This module contains functionality for classifying nanopore captures.
"""
import os
import re
from abc import ABC, abstractmethod
from dataclasses import dataclass
from pathlib import PosixPath
from typing import * # I know people don't like import *, but I think it has benefits for types (doesn't impede people from being generous with typing)
import numpy as np
import torch
import torch.nn as nn
from ..logger import Logger, getLogger
from ..signals import Capture, FractionalizedSignal, RawSignal
# TODO: Pipe through filtering https://github.com/uwmisl/poretitioner/issues/43 https://github.com/uwmisl/poretitioner/issues/68
# from .models import NTERs_trained_cnn_05152019 as pretrained_model
from . import filtering
from .configuration import ClassifierConfiguration
from .core import NumpyArrayLike, PathLikeOrString, ReadId
use_cuda = False # True
# TODO : Don't hardcode use of CUDA : https://github.com/uwmisl/poretitioner/issues/41
ClassLabel = NewType("ClassLabel", str)
# Maps a numpy array like (some vector encoding that represents a label) to a the label string.
LabelForResult = Callable[[NumpyArrayLike], ClassLabel]
__all__ = [
"predict_class",
"ClassificationRunId",
"ClassifierDetails",
"ClassifierPlugin",
"CLASSIFICATION_PATH",
"ClassificationResult",
"PytorchClassifierPlugin",
]
# Uniquely identifies a classification run that happened (e.g. 'NTER_2018_RandomForest_Attempt_3').
ClassificationRunId = NewType("ClassificationRunId", str)
@dataclass(frozen=True)
class ClassifierDetails:
model: str
model_version: str
classification_threshold: float
# Timestamp of when this classification occurred, in seconds from epoch (as a float).
#
# Q: Why not date-time?
#
# A: Sadly, as of 2020, h5py doesn't provide a good way of storing dates [1].
# Doing so would also be less precise than storing epoch time.
#
# Q: Why seconds (knowing it will be fractionalized)?
#
# A: On most modern machines, python time.time() provides micro-second precision.
# But this can't be guaranteed (on older machines, it might only provide second precision) [1].
#
# If we really wanted an int, the alternative to guarantee an int would be to store
# the timestamp in nanoseconds [3], but that feels verbose to me.
#
# [1] - https://stackoverflow.com/questions/23570632/store-datetimes-in-hdf5-with-h5py
# [2] - https://docs.python.org/3/library/time.html#time.time
# [3] - https://docs.python.org/3/library/time.html#time.time_ns
timestamp_ms: float
model_file: PathLikeOrString
@dataclass(frozen=True)
class CLASSIFICATION_PATH:
ROOT = f"/Classification/"
@classmethod
def for_classification_run(cls, classification_run: ClassificationRunId) -> str:
path = str(PosixPath(CLASSIFICATION_PATH.ROOT, classification_run))
return path
@classmethod
def pass_path(cls, classification_run: ClassificationRunId) -> str:
"""Path to the group that contains the readIds that passed classification during this
classification run.
Parameters
----------
classification_run : ClassificationRunId
A unique identifier for the classification run that generated these results (e.g. "my_classication_run_04").
Returns
-------
str
Pathlike to path. (e.g. /Classifcation/my_classication_run_04/pass)
"""
CLASSICATION_RUN_PATH = cls.for_classification_run(classification_run)
path = str(PosixPath(CLASSICATION_RUN_PATH, "pass"))
return path
@classmethod
def fail_path(cls, classification_run: ClassificationRunId) -> str:
"""Path to the group that contains the readIds that failed classification during this
classification run.
Parameters
----------
classification_run : ClassificationRunId
A unique identifier for the classification run that generated these results (e.g. "my_classication_run_04").
Returns
-------
str
Pathlike to path. (e.g. /Classifcation/my_classication_run_04/fail)
"""
CLASSICATION_RUN_PATH = cls.for_classification_run(classification_run)
path = str(PosixPath(CLASSICATION_RUN_PATH, "fail"))
return path
def read_id_path(cls, classification_run: ClassificationRunId, read_id: ReadId) -> str:
"""Path to the group that contains the classification results for a given readId.
Parameters
----------
classification_run : ClassificationRunId
A unique identifier for the classification run that generated these results (e.g. "my_classication_run_04").
read_id : ReadId
The readId of the read we want to know the classification results for.
Returns
-------
str
Path to the group that contains the classification results for a given readId.
"""
CLASSICATION_RUN_PATH = cls.for_classification_run(classification_run)
path = str(PosixPath(CLASSICATION_RUN_PATH, f"{read_id}"))
return path
@dataclass(frozen=True)
class ClassificationResult:
"""The result of passing the capture data to the classifier.
Fields
----------
score : float
A value representing the 'score' of a label predicted by the classifier.
Abstractly, the score is a measure of confidence that this label is correct, as determined by the score being greater than some threshold.
What exact values this score can take on depends on your classifier
(e.g. if you pass the final result through a soft-max, this score will represent a probability from 0 to 1.0).
label : ClassLabel
The label assigned to this prediction.
Returns
-------
[ClassificationResult]
ClassificationResult instance.
"""
label: ClassLabel
score: float
# TODO: Finish writing Classifier plugin architecture: https://github.com/uwmisl/poretitioner/issues/91
class ClassifierPlugin(ABC):
@abstractmethod
def model_name(self) -> str:
raise NotImplementedError("model_name hasn't been implemented for this classifier.")
@abstractmethod
def model_version(self) -> str:
raise NotImplementedError("model_version hasn't been implemented for this classifier.")
@abstractmethod
def model_file(self) -> str:
raise NotImplementedError("model_file hasn't been implemented for this classifier.")
@abstractmethod
def load(self, use_cuda: bool = False):
"""Loads a model for classification.
This method is where you should do any pre processing needed.
For exammple, loading and configuring a Pytorch model, or a sci-kit learn model.
Parameters
----------
use_cuda : bool
Whether to use cuda.
Raises
------
NotImplementedError
If this method hasn't been implemented.
"""
raise NotImplementedError("load hasn't been implemented for this classifier.")
@abstractmethod
def evaluate(self, capture) -> ClassificationResult:
raise NotImplementedError("Evaluate hasn't been implemented for this classifier.")
# TODO: Implement Classification with the new data model: https://github.com/uwmisl/poretitioner/issues/92
def filter_and_classify(
config, capture_filepaths: List[PathLikeOrString], overwrite=False, filter_name=None
):
local_logger = logger.getLogger()
clf_config = config["classify"]
classifier_name = clf_config["classifier"]
classification_path = clf_config["classification_path"]
# Load classifier
local_logger.info(f"Loading classifier {classifier_name}.")
assert classifier_name in ["NTER_cnn", "NTER_rf"]
assert classification_path is not None and len(classification_path) > 0
classifier = init_classifier(classifier_name, classification_path)
# Filter (optional) TODO: Restore filtering https://github.com/uwmisl/poretitioner/issues/43 https://github.com/uwmisl/poretitioner/issues/68
read_path = "/"
# if filter_name is not None:
# local_logger.info("Beginning filtering.")
# filter.filter_and_store_result(config, fast5_fnames, filter_name, overwrite=overwrite)
# read_path = f"/Filter/{filter_name}/pass"
# else:
# read_path = "/"
# Classify
classify_fast5_file(f5, clf_config, classifier, classifier_name, read_path)
# def classify_file(
# capturef5: ClassifierFile, configuration: ClassifierConfiguration, classifier: Classifier, classifier_run_name, read_path, class_labels=None):
# for read in capturef5.reads:
# pass
# TODO: Implement Classification with the new data model: https://github.com/uwmisl/poretitioner/issues/92
def classify_fast5_file(
capture_filepath: PathLikeOrString,
clf_config,
classifier,
classifier_run_name,
read_path,
class_labels=None,
):
local_logger = logger.getLogger()
local_logger.debug(f"Beginning classification for file {capture_filepath}.")
classifier_name = clf_config["classifier"]
classifier_version = clf_config["version"]
classifier_location = clf_config["filepath"]
classify_start = clf_config["start_obs"] # 100 in NTER paper
classify_end = clf_config["end_obs"] # 21000 in NTER paper
classifier_confidence_threshold = clf_config["min_confidence"]
configuration = ClassifierConfiguration(
classifier_name,
classifier_version,
classify_start,
classify_end,
classifier_confidence_threshold,
)
# details = ClassifierDetails(classifier_name, , , )
# ClassifierFile(filepath, )
details = None # ClassifierDetails(classifier_name, )
assert classify_start >= 0 and classify_end >= 0
assert classifier_confidence_threshold is None or (0 <= classifier_confidence_threshold <= 1)
local_logger.debug(
f"Classification parameters: name: {classifier_name}, "
f"range of data points: ({classify_start}, {classify_end})"
f"confidence required to pass: {classifier_confidence_threshold}"
)
results_path = f"/Classification/{classifier_run_name}"
write_classifier_details(f5, clf_config, results_path)
with ClassifierFile(capture_filepath, "r+") as classifier_f5:
details = ClassifierDetails(
classifier_name,
classifier_version,
classifier_location,
classifier_confidence_threshold,
)
classifier_f5.write_details(details)
for read in classifier_f5.reads:
signal = classifier_f5.get_fractionalized_read(
read, start=classify_start, end=classify_end
)
labels, probability = predict_class(
classifier_name, classifier, signal, class_labels=class_labels
)
if classifier_confidence_threshold is not None:
passed_classification = probability > classifier_confidence_threshold
else:
passed_classification = None
write_classifier_result()
# read_h5group_names = f5.get(read_path)
# for grp in read_h5group_names:
# if "read" not in grp:
# continue
# read_id = re.findall(r"read_(.*)", str(grp))[0]
# signal = get_fractional_blockage_for_read(
# f5, grp, start=classify_start, end=classify_end
# )
# y, p = predict_class(classifier_name, classifier, signal, class_labels=class_labels)
# if classifier_confidence_threshold is not None:
# passed_classification = False if p <= classifier_confidence_threshold else True
# else:
# passed_classification = None
# write_classifier_result(f5, results_path, read_id, y, p, passed_classification)
# TODO: Implement Classification with the new data model: https://github.com/uwmisl/poretitioner/issues/92
# TODO: This classifier initialization should be a special case of a Plugin: https://github.com/uwmisl/poretitioner/issues/91
def init_classifier(classifier_name, classification_path):
"""Initialize the classification model. Supported classifier names include
"NTER_cnn" and "NTER_rf".
According to documentation for original NTER code:
Prediction classes are 1-9:
0:Y00, 1:Y01, 2:Y02, 3:Y03, 4:Y04, 5:Y05, 6:Y06, 7:Y07, 8:Y08, 9:noise,
-1:below conf_thesh
Parameters
----------
classifier_name : str
The name of any supported classifier, currently "NTER_cnn" and "NTER_rf".
classification_path : str
Location of the pre-trained model file.
Returns
-------
model
Classification model (type depends on the spceified model).
Raises
------
ValueError
Raised if the classifier name is not supported.
OSError
Raised if the classifier path does not exist.
"""
if classifier_name == "NTER_cnn": # CNN classifier
if not os.path.exists(classification_path):
raise OSError(f"Classifier path doesn't exist: {classification_path}")
nanoporeTER_cnn = pretrained_model.load_cnn(classification_path)
return nanoporeTER_cnn
elif classifier_name == "NTER_rf": # Random forest classifier
if not os.path.exists(classification_path):
raise OSError(f"Classifier path doesn't exist: {classification_path}")
# TODO : Improve model maintainability : https://github.com/uwmisl/poretitioner/issues/38
# return joblib.load(open(classification_path, "rb"))
pass
else:
raise ValueError(f"Invalid classifier name: {classifier_name}")
# TODO: Implement Classification with the new data model: https://github.com/uwmisl/poretitioner/issues/92
def predict_class(classifier_name, classifier, raw, class_labels=None) -> ClassificationResult:
"""Runs the classifier using the given raw data as input. Does not apply
any kind of confidence threshold.
Parameters
----------
classifier_name : str
The name of any supported classifier, currently "NTER_cnn" and "NTER_rf".
classifier : model
Classification model returned by init_classifier.
raw : iterable of floats
Time series of nanopore current values (in units of fractionalized current).
Returns
-------
int or string
Class label
float
Model score (for NTER_cnn and NTER_rf, it's a probability)
Raises
------
NotImplementedError
Raised if the input classifier_name is not supported.
"""
if classifier_name == "NTER_cnn":
X_test = np.array([raw])
# 2D --> 3D array (each obs in a capture becomes its own array)
X_test = X_test.reshape(len(X_test), X_test.shape[1], 1)
if X_test.shape[1] < 19881:
temp = np.zeros((X_test.shape[0], 19881, 1))
temp[:, : X_test.shape[1], :] = X_test
X_test = temp
X_test = X_test[:, :19881] # First 19881 obs as per NTER paper
# Break capture into 141x141 (19881 total data points)
X_test = X_test.reshape(len(X_test), 1, 141, 141)
X_test = torch.from_numpy(X_test)
if use_cuda:
X_test = X_test.cuda()
outputs = classifier(X_test)
out = nn.functional.softmax(outputs, dim=1)
prob, label = torch.topk(out, 1)
if use_cuda:
label = label.cpu().numpy()[0][0]
else:
label = label.numpy()[0][0]
if class_labels is not None:
label = class_labels[label]
probability = prob[0][0].data
# TODO: Implement Classification with the new data model: https://github.com/uwmisl/poretitioner/issues/92
# TODO: Katie Q: Where does assigned class come from?
ClassificationResult(label, probability)
return label, probability
elif classifier_name == "NTER_rf":
class_proba = classifier.predict_proba(
[[np.mean(raw), np.std(raw), np.min(raw), | np.max(raw) | numpy.max |
import numpy as np
from sklearn.decomposition import PCA
import pandas as pd
import matplotlib.pyplot as plt
import random
import seaborn as sns
from sklearn.cluster import KMeans
from sklearn.metrics import confusion_matrix
from sklearn.metrics.cluster import adjusted_rand_score
from sklearn.datasets import fetch_openml
from sklearn.model_selection import train_test_split
import pandas.util.testing as tm
from keras.datasets import mnist
import tensorflow_datasets as tfds
import tensorflow as tf
from google.colab import files
import sys
import itertools as it
#@title ElasticNetSubspaceClustering
import warnings
import progressbar
import spams
import time
from scipy import sparse
from sklearn import cluster
from sklearn.base import BaseEstimator, ClusterMixin
from sklearn.decomposition import sparse_encode
from sklearn.linear_model import orthogonal_mp
from sklearn.neighbors import kneighbors_graph
from sklearn.preprocessing import normalize
from sklearn.utils import check_random_state, check_array, check_symmetric
class SelfRepresentation(BaseEstimator, ClusterMixin):
def __init__(self, n_clusters=8, affinity='symmetrize', random_state=None, n_init=20, n_jobs=1):
self.n_clusters = n_clusters
self.affinity = affinity
self.random_state = random_state
self.n_init = n_init
self.n_jobs = n_jobs
def fit(self, X, y=None):
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], dtype=np.float64)
time_base = time.time()
self._self_representation(X)
self.timer_self_representation_ = time.time() - time_base
self._representation_to_affinity()
self._spectral_clustering()
self.timer_time_ = time.time() - time_base
return self
def fit_self_representation(self, X, y=None):
X = check_array(X, accept_sparse=['csr', 'csc', 'coo'], dtype=np.float64)
time_base = time.time()
self._self_representation(X)
self.timer_self_representation_ = time.time() - time_base
return self
def _representation_to_affinity(self):
normalized_representation_matrix_ = normalize(self.representation_matrix_, 'l2')
if self.affinity == 'symmetrize':
self.affinity_matrix_ = 0.5 * (np.absolute(normalized_representation_matrix_) + np.absolute(normalized_representation_matrix_.T))
elif self.affinity == 'nearest_neighbors':
neighbors_graph = kneighbors_graph(normalized_representation_matrix_, 3,
mode='connectivity', include_self=False)
self.affinity_matrix_ = 0.5 * (neighbors_graph + neighbors_graph.T)
def _spectral_clustering(self):
affinity_matrix_ = check_symmetric(self.affinity_matrix_)
random_state = check_random_state(self.random_state)
laplacian = sparse.csgraph.laplacian(affinity_matrix_, normed=True)
_, vec = sparse.linalg.eigsh(sparse.identity(laplacian.shape[0]) - laplacian,
k=self.n_clusters, sigma=None, which='LA')
embedding = normalize(vec)
_, self.labels_, _ = cluster.k_means(embedding, self.n_clusters,
random_state=random_state, n_init=self.n_init)
def active_support_elastic_net(X, y, alpha, tau=1.0, algorithm='spams', support_init='knn',
support_size=100, maxiter=40):
n_samples = X.shape[0]
if n_samples <= support_size: # skip active support search for small scale data
supp = np.arange(n_samples, dtype=int) # this results in the following iteration to converge in 1 iteration
else:
if support_init == 'L2':
L2sol = np.linalg.solve(np.identity(y.shape[1]) * alpha + np.dot(X.T, X), y.T)
c0 = np.dot(X, L2sol)[:, 0]
supp = np.argpartition(-np.abs(c0), support_size)[0:support_size]
elif support_init == 'knn':
supp = np.argpartition(-np.abs(np.dot(y, X.T)[0]), support_size)[0:support_size]
curr_obj = float("inf")
for _ in range(maxiter):
Xs = X[supp, :]
if algorithm == 'spams':
cs = spams.lasso(np.asfortranarray(y.T), D=np.asfortranarray(Xs.T),
lambda1=tau*alpha, lambda2=(1.0-tau)*alpha)
cs = np.asarray(cs.todense()).T
else:
cs = sparse_encode(y, Xs, algorithm=algorithm, alpha=alpha)
delta = (y - np.dot(cs, Xs)) / alpha
obj = tau * np.sum(np.abs(cs[0])) + (1.0 - tau)/2.0 * np.sum(np.power(cs[0], 2.0)) + alpha/2.0 * np.sum(np.power(delta, 2.0))
if curr_obj - obj < 1.0e-10 * curr_obj:
break
curr_obj = obj
coherence = np.abs(np.dot(delta, X.T))[0]
coherence[supp] = 0
addedsupp = np.nonzero(coherence > tau + 1.0e-10)[0]
if addedsupp.size == 0: # converged
break
# Find the set of nonzero entries of cs.
activesupp = supp[np.abs(cs[0]) > 1.0e-10]
if activesupp.size > 0.8 * support_size: # this suggests that support_size is too small and needs to be increased
support_size = min([round(max([activesupp.size, support_size]) * 1.1), n_samples])
if addedsupp.size + activesupp.size > support_size:
ord = np.argpartition(-coherence[addedsupp], support_size - activesupp.size)[0:support_size - activesupp.size]
addedsupp = addedsupp[ord]
supp = np.concatenate([activesupp, addedsupp])
c = np.zeros(n_samples)
c[supp] = cs
return c
def elastic_net_subspace_clustering(X, gamma=50.0, gamma_nz=True, tau=1.0, algorithm='lasso_lars',
active_support=True, active_support_params=None, n_nonzero=50):
if algorithm in ('lasso_lars', 'lasso_cd') and tau < 1.0 - 1.0e-10:
warnings.warn('algorithm {} cannot handle tau smaller than 1. Using tau = 1'.format(algorithm))
tau = 1.0
if active_support == True and active_support_params == None:
active_support_params = {}
n_samples = X.shape[0]
rows = np.zeros(n_samples * n_nonzero)
cols = np.zeros(n_samples * n_nonzero)
vals = np.zeros(n_samples * n_nonzero)
curr_pos = 0
for i in progressbar.progressbar(range(n_samples)):
y = X[i, :].copy().reshape(1, -1)
X[i, :] = 0
if algorithm in ('lasso_lars', 'lasso_cd', 'spams'):
if gamma_nz == True:
coh = np.delete(np.absolute(np.dot(X, y.T)), i)
alpha0 = np.amax(coh) / tau # value for which the solution is zero
alpha = alpha0 / gamma
else:
alpha = 1.0 / gamma
if active_support == True:
c = active_support_elastic_net(X, y, alpha, tau, algorithm, **active_support_params)
else:
if algorithm == 'spams':
c = spams.lasso(np.asfortranarray(y.T), D=np.asfortranarray(X.T),
lambda1=tau * alpha, lambda2=(1.0-tau) * alpha)
c = np.asarray(c.todense()).T[0]
else:
c = sparse_encode(y, X, algorithm=algorithm, alpha=alpha)[0]
else:
warnings.warn("algorithm {} not found".format(algorithm))
index = np.flatnonzero(c)
if index.size > n_nonzero:
# warnings.warn("The number of nonzero entries in sparse subspace clustering exceeds n_nonzero")
index = index[np.argsort(-np.absolute(c[index]))[0:n_nonzero]]
rows[curr_pos:curr_pos + len(index)] = i
cols[curr_pos:curr_pos + len(index)] = index
vals[curr_pos:curr_pos + len(index)] = c[index]
curr_pos += len(index)
X[i, :] = y
# affinity = sparse.csr_matrix((vals, (rows, cols)), shape=(n_samples, n_samples)) + sparse.csr_matrix((vals, (cols, rows)), shape=(n_samples, n_samples))
return sparse.csr_matrix((vals, (rows, cols)), shape=(n_samples, n_samples))
class ElasticNetSubspaceClustering(SelfRepresentation):
def __init__(self, n_clusters=8, affinity='symmetrize', random_state=None, n_init=20, n_jobs=1, gamma=50.0, gamma_nz=True, tau=1.0,
algorithm='lasso_lars', active_support=True, active_support_params=None, n_nonzero=50):
self.gamma = gamma
self.gamma_nz = gamma_nz
self.tau = tau
self.algorithm = algorithm
self.active_support = active_support
self.active_support_params = active_support_params
self.n_nonzero = n_nonzero
SelfRepresentation.__init__(self, n_clusters, affinity, random_state, n_init, n_jobs)
def _self_representation(self, X):
self.representation_matrix_ = elastic_net_subspace_clustering(X, self.gamma, self.gamma_nz,
self.tau, self.algorithm,
self.active_support, self.active_support_params,
self.n_nonzero)
def sparse_subspace_clustering_orthogonal_matching_pursuit(X, n_nonzero=10, thr=1.0e-6):
n_samples = X.shape[0]
rows = np.zeros(n_samples * n_nonzero, dtype = int)
cols = np.zeros(n_samples * n_nonzero, dtype = int)
vals = np.zeros(n_samples * n_nonzero)
curr_pos = 0
for i in progressbar.progressbar(range(n_samples)):
# for i in range(n_samples):
residual = X[i, :].copy() # initialize residual
supp = np.empty(shape=(0), dtype = int) # initialize support
residual_norm_thr = np.linalg.norm(X[i, :]) * thr
for t in range(n_nonzero): # for each iteration of OMP
# compute coherence between residuals and X
coherence = abs( np.matmul(residual, X.T) )
coherence[i] = 0.0
# update support
supp = np.append(supp, np.argmax(coherence))
# compute coefficients
c = np.linalg.lstsq( X[supp, :].T, X[i, :].T, rcond=None)[0]
# compute residual
residual = X[i, :] - np.matmul(c.T, X[supp, :])
# check termination
if np.sum(residual **2) < residual_norm_thr:
break
rows[curr_pos:curr_pos + len(supp)] = i
cols[curr_pos:curr_pos + len(supp)] = supp
vals[curr_pos:curr_pos + len(supp)] = c
curr_pos += len(supp)
# affinity = sparse.csr_matrix((vals, (rows, cols)), shape=(n_samples, n_samples)) + sparse.csr_matrix((vals, (cols, rows)), shape=(n_samples, n_samples))
return sparse.csr_matrix((vals, (rows, cols)), shape=(n_samples, n_samples))
class SparseSubspaceClusteringOMP(SelfRepresentation):
def __init__(self, n_clusters=8, affinity='symmetrize', random_state=None, n_init=10, n_jobs=1, n_nonzero=10, thr=1.0e-6):
self.n_nonzero = n_nonzero
self.thr = thr
SelfRepresentation.__init__(self, n_clusters, affinity, random_state, n_init, n_jobs)
def _self_representation(self, X):
self.representation_matrix_ = sparse_subspace_clustering_orthogonal_matching_pursuit(X, self.n_nonzero, self.thr)
def least_squares_subspace_clustering(X, gamma=10.0, exclude_self=False):
n_samples, n_features = X.shape
if exclude_self == False:
if n_samples < n_features:
gram = np.matmul(X, X.T)
return np.linalg.solve(gram + np.eye(n_sample) / gamma, gram).T
else:
tmp = np.linalg.solve(np.matmul(X.T, X) + np.eye(n_features) / gamma, X.T)
return np.matmul(X, tmp).T
else:
if n_samples < n_features:
D = np.linalg.solve(np.matmul(X, X.T) + np.eye(n_sample) / gamma, np.eye(n_sample))
# see Theorem 6 in https://arxiv.org/pdf/1404.6736.pdf
else:
tmp = np.linalg.solve(np.matmul(X.T, X) + np.eye(n_features) / gamma, X.T)
D = eye(n_samples) - np.matmul(X, tmp)
D = D / D.diagonal()[None,:]
np.fill_diagonal(D, 0.0)
return -1.0 * D.T
class LeastSquaresSubspaceClustering(SelfRepresentation):
def __init__(self, n_clusters=8, affinity='symmetrize', random_state=None, n_init=None, n_jobs=1, gamma=10.0, exclude_self=False):
self.gamma = gamma
self.exclude_self = exclude_self
SelfRepresentation.__init__(self, n_clusters, affinity, random_state, n_init, n_jobs)
def _self_representation(self, X):
self.representation_matrix_ = least_squares_subspace_clustering(X, self.gamma, self.exclude_self)
if 'google.colab' in sys.modules:
uploaded = files.upload()
#subtract the mean from every class
def preprocess_substract_mean(X, y):
labels = np.unique(y)
X_processed= X.copy()
for l in labels:
mean = | np.average(X_processed[y == l], 0) | numpy.average |
import numpy as np
from base_test import ArkoudaTest
from context import arkouda as ak
from itertools import product
class ArrayViewTest(ArkoudaTest):
def test_mulitdimensional_array_creation(self):
n = np.array([[0, 0], [0, 1], [1, 1]])
a = ak.array([[0, 0], [0, 1], [1, 1]])
self.assertListEqual(n.tolist(), a.to_ndarray().tolist())
n = np.arange(27).reshape((3, 3, 3))
a = ak.arange(27).reshape((3, 3, 3))
self.assertListEqual(n.tolist(), a.to_ndarray().tolist())
n = np.arange(27).reshape(3, 3, 3)
a = ak.arange(27).reshape(3, 3, 3)
self.assertListEqual(n.tolist(), a.to_ndarray().tolist())
n = np.arange(27, dtype=np.uint64).reshape(3, 3, 3)
a = ak.arange(27, dtype=ak.uint64).reshape(3, 3, 3)
self.assertListEqual(n.tolist(), a.to_ndarray().tolist())
def test_arrayview_int_indexing(self):
nd = np.arange(9).reshape(3, 3)
pd_reshape = ak.arange(9).reshape(3, 3)
pd_array = ak.array([[0, 1, 2], [3, 4, 5], [6, 7, 8]])
nd_ind = [nd[i, j] for (i, j) in product(range(3), repeat=2)]
reshape_ind = [pd_reshape[i, j] for (i, j) in product(range(3), repeat=2)]
array_ind = [pd_array[i, j] for (i, j) in product(range(3), repeat=2)]
self.assertListEqual(nd_ind, reshape_ind)
self.assertListEqual(nd_ind, array_ind)
with self.assertRaises(IndexError):
# index out bounds (>= dimension)
# index 3 is out of bounds for axis 0 with size 3
pd_reshape[3, 1]
with self.assertRaises(IndexError):
# index -4 is out of bounds for axis 1 with size 3
pd_reshape[2, -4]
with self.assertRaises(IndexError):
# too many indicies for array: array is 2-dimensional, but 3 were indexed
pd_reshape[0, 1, 1]
with self.assertRaises(ValueError):
# cannot reshape array of size 9 into shape (4,3)
ak.arange(9).reshape(4, 3)
def test_int_list_indexing(self):
iav = ak.arange(30).reshape((5, 3, 2))
uav = ak.arange(30, dtype=ak.uint64).reshape((5, 3, 2))
iind = ak.array([3, 0, 1])
uind = ak.cast(iind, ak.uint64)
self.assertEqual(iav[iind], iav[uind])
self.assertEqual(uav[iind], uav[uind])
def test_set_index(self):
inav = np.arange(30).reshape((5, 3, 2))
unav = np.arange(30, dtype=np.uint64).reshape((5, 3, 2))
iav = ak.arange(30).reshape((5, 3, 2))
uav = ak.arange(30, dtype=ak.uint64).reshape((5, 3, 2))
nind = (3, 0, 1)
iind = ak.array(nind)
uind = ak.cast(iind, ak.uint64)
inav[nind] = -9999
unav[nind] = -9999
iav[uind] = -9999
uav[iind] = -9999
self.assertEqual(iav[uind], inav[nind])
self.assertEqual(iav[iind], iav[uind])
self.assertEqual(uav[uind], unav[nind])
self.assertEqual(uav[iind], uav[uind])
def test_get_bool_pdarray(self):
n = np.arange(30).reshape(5, 3, 2)
a = ak.arange(30).reshape(5, 3, 2)
n_bool_list = n[True, True, True].tolist()
a_bool_list = a[True, True, True].to_ndarray().tolist()
self.assertListEqual(n_bool_list, a_bool_list)
n_bool_list = n[False, True, True].tolist()
a_bool_list = a[False, True, True].to_ndarray().tolist()
self.assertListEqual(n_bool_list, a_bool_list)
n_bool_list = n[True, False, True].tolist()
a_bool_list = a[True, False, True].to_ndarray().tolist()
self.assertListEqual(n_bool_list, a_bool_list)
n_bool_list = n[True, True, False].tolist()
a_bool_list = a[True, True, False].to_ndarray().tolist()
self.assertListEqual(n_bool_list, a_bool_list)
def test_set_bool_pdarray(self):
n = np.arange(30).reshape(5, 3, 2)
a = ak.arange(30).reshape(5, 3, 2)
n[True, True, True] = 9
a[True, True, True] = 9
self.assertListEqual(n.tolist(), a.to_ndarray().tolist())
n[False, True, True] = 5
a[False, True, True] = 5
self.assertListEqual(n.tolist(), a.to_ndarray().tolist())
n[True, False, True] = 6
a[True, False, True] = 6
self.assertListEqual(n.tolist(), a.to_ndarray().tolist())
n[True, True, False] = 13
a[True, True, False] = 13
self.assertListEqual(n.tolist(), a.to_ndarray().tolist())
def test_reshape_order(self):
# Keep 'C'/'F' (C/Fortran) order to be consistent with numpy
# But also accept more descriptive 'row_major' and 'column_major'
nd = np.arange(30).reshape((5, 3, 2), order='C')
ak_C = ak.arange(30).reshape((5, 3, 2), order='C')
ak_row = ak.arange(30).reshape((5, 3, 2), order='row_major')
nd_ind = [nd[i, j, k] for (i, j, k) in product(range(5), range(3), range(2))]
C_order = [ak_C[i, j, k] for (i, j, k) in product(range(5), range(3), range(2))]
row_order = [ak_row[i, j, k] for (i, j, k) in product(range(5), range(3), range(2))]
self.assertListEqual(nd_ind, C_order)
self.assertListEqual(nd_ind, row_order)
nd = np.arange(30).reshape((5, 3, 2), order='F')
ak_F = ak.arange(30).reshape((5, 3, 2), order='F')
ak_column = ak.arange(30).reshape((5, 3, 2), order='column_major')
nd_ind = [nd[i, j, k] for (i, j, k) in product(range(5), range(3), range(2))]
F_order = [ak_F[i, j, k] for (i, j, k) in product(range(5), range(3), range(2))]
column_order = [ak_column[i, j, k] for (i, j, k) in product(range(5), range(3), range(2))]
self.assertListEqual(nd_ind, F_order)
self.assertListEqual(nd_ind, column_order)
def test_basic_indexing(self):
# verify functionality is consistent with numpy basic indexing tutorial
# https://numpy.org/doc/stable/user/basics.indexing.html
n = np.arange(10).reshape(2, 5)
a = ak.arange(10).reshape(2, 5)
self.assertListEqual(list(n.shape), a.shape.to_ndarray().tolist())
# n.tolist() = [[0, 1, 2, 3, 4], [5, 6, 7, 8, 9]]
self.assertListEqual(n.tolist(), a.to_ndarray().tolist())
self.assertEqual(n.__str__(), a.__str__())
self.assertEqual(n[1, 3], a[1, 3])
self.assertEqual(n[1, -1], a[1, -1])
self.assertListEqual(n[0].tolist(), a[0].to_ndarray().tolist())
self.assertEqual(n[0][2], a[0][2])
n = np.array([[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]])
a = ak.array([[0, 1, 2, 3, 4, 5, 6, 7, 8, 9], [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]])
# n[0, 1:7:2].tolist() = [1, 3, 5]
self.assertListEqual(n[0, 1:7:2].tolist(), a[0, 1:7:2].to_ndarray().tolist())
# n[0, -2:10].tolist() = [8, 9]
self.assertListEqual(n[0, -2:10].tolist(), a[0, -2:10].to_ndarray().tolist())
# n[0, -3:3:-1].tolist() = [7, 6, 5, 4]
self.assertListEqual(n[0, -3:3:-1].tolist(), a[0, -3:3:-1].to_ndarray().tolist())
# n[0, 5:].tolist() = [5, 6, 7, 8, 9]
self.assertListEqual(n[0, 5:].tolist(), a[0, 5:].to_ndarray().tolist())
n = np.array([[[1],[2],[3]], [[4],[5],[6]]])
a = ak.array([[[1],[2],[3]], [[4],[5],[6]]])
# list(n.shape) = [2, 3, 1]
self.assertListEqual(list(n.shape), a.shape.to_ndarray().tolist())
# n.tolist() = [[[1], [2], [3]], [[4], [5], [6]]]
self.assertListEqual(n.tolist(), a.to_ndarray().tolist())
self.assertEqual(n.__str__(), a.__str__())
# n[1:2].tolist() = [[[4], [5], [6]]]
self.assertListEqual(n[1:2].tolist(), a[1:2].to_ndarray().tolist())
def test_slicing(self):
a = ak.arange(30).reshape(2, 3, 5)
n = np.arange(30).reshape(2, 3, 5)
self.assertListEqual(n.tolist(), a.to_ndarray().tolist())
# n[:, ::-1, 1:5:2].tolist() = [[[11, 13], [6, 8], [1, 3]], [[26, 28], [21, 23], [16, 18]]]
self.assertListEqual(n[:, ::-1, 1:5:2].tolist(), a[:, ::-1, 1:5:2].to_ndarray().tolist())
# n[:, 5:8, 1:5:2].tolist() = [[], []]
self.assertListEqual(n[:, 5:8, 1:5:2].tolist(), a[:, 5:8, 1:5:2].to_ndarray().tolist())
# n[:, 5:8, 1:5:2][1].tolist() = []
self.assertListEqual(n[:, 5:8, 1:5:2][1].tolist(), a[:, 5:8, 1:5:2][1].to_ndarray().tolist())
a = ak.arange(30).reshape(2, 3, 5, order='F')
n = | np.arange(30) | numpy.arange |
import numpy as np
import time
#from scipy.linalg import sqrtm
ABSERR = 10E-10
def compute_psd_factorization(X,r,nIterates=100,method='multiplicative',Init = None,silent=False):
n1,n2 = X.shape
if Init is None:
A = gen_psdlinmap(n1,r)
B = gen_psdlinmap(n2,r)
else:
A,B = Init
Errs = np.zeros((nIterates,))
start = time.process_time()
if not(silent):
print(' It. # | Error | Time Taken')
for ii in range(nIterates):
t_start = time.time()
if method == 'multiplicative':
try:
B = update_multiplicative(A, B, X)
except:
print('d')
B = update_multiplicative_damped(A, B, X)
try:
A = update_multiplicative(B, A, X.T)
except:
print('d')
A = update_multiplicative_damped(B, A, X.T)
if method == 'multiplicativeaccelerated':
B = update_multiplicativeaccelerated(A, B, X)
A = update_multiplicativeaccelerated(B, A, X.T)
if method == 'fpgm':
B = update_fpgm(A, B, X, 10)
B = np.real(B)
A = update_fpgm(B, A, X.T, 10)
AB = linmap_dot(A, B)
Errs[ii,] = np.linalg.norm(AB-X)/np.linalg.norm(X)
elapsed_time = time.time() - t_start
np.save('A.npy',A)
| np.save('B.npy',B) | numpy.save |
# -*- coding: utf-8 -*-
"""
Created on Tue Feb 5 23:56:16 2019
@author: kirichoi
"""
import os, sys
import tellurium as te
import roadrunner
import numpy as np
import antimony
import scipy.optimize
import networkGenerator as ng
import time
import copy
def f1(k_list, *args):
global counts
global countf
args[0].reset()
args[0].setValues(args[0].getGlobalParameterIds(), k_list)
try:
args[0].steadyStateApproximate()
objCCC = args[0].getScaledConcentrationControlCoefficientMatrix()
objCCC[np.abs(objCCC) < 1e-12] = 0 # Set small values to zero
if np.isnan(objCCC).any():
dist_obj = 10000
else:
if args[3]:
objFlux = args[0].getReactionRates()
objFlux[np.abs(objFlux) < 1e-12] = 0 # Set small values to zero
# objFCC = args[0].getScaledFluxControlCoefficientMatrix()
# objFCC[np.abs(objFCC) < 1e-12] = 0 # Set small values to zero
objCCC_row = objCCC.rownames
objCCC_col = objCCC.colnames
objCCC = objCCC[np.argsort(objCCC_row)]
objCCC = objCCC[:,np.argsort(objCCC_col)]
if args[3]:
objFlux = objFlux[np.argsort(objCCC_col)]
dist_obj = (((np.linalg.norm(args[1] - objCCC)) + (np.linalg.norm(args[2] - objFlux))) *
((1 + np.sum(np.equal(np.sign(np.array(args[1])), np.sign(np.array(objCCC))))) +
(1 + np.sum(np.equal(np.sign(np.array(args[2])), np.sign(np.array(objFlux)))))))
else:
dist_obj = ((np.linalg.norm(args[1] - objCCC))*(1 +
np.sum(np.not_equal(np.sign(np.array(args[1])),
np.sign(np.array(objCCC))))))
except:
countf += 1
dist_obj = 10000
counts += 1
return dist_obj
def callbackF(X, convergence=0.):
global counts
global countf
print(str(counts) + ", " + str(countf))
return False
def initialize(Parameters):
global countf
global counts
numBadModels = 0
numGoodModels = 0
numIter = 0
ens_dist = np.empty(Parameters.ens_size)
ens_model = np.empty(Parameters.ens_size, dtype='object')
ens_rl = np.empty(Parameters.ens_size, dtype='object')
rl_track = []
rl_track.append(Parameters.knownReactionList)
# Initial Random generation
while (numGoodModels < Parameters.ens_size):
# Ensure no redundant model
rl = ng.generateReactionList(Parameters)
st = ng.getFullStoichiometryMatrix(rl, Parameters.ns).tolist()
stt = ng.removeBoundaryNodes(np.array(st))
while rl in rl_track:
rl = ng.generateReactionList(Parameters)
st = ng.getFullStoichiometryMatrix(rl, Parameters.ns).tolist()
stt = ng.removeBoundaryNodes(np.array(st))
antStr = ng.generateAntimony(Parameters.realFloatingIds, Parameters.realBoundaryIds, stt[1],
stt[2], rl, boundary_init=Parameters.realBoundaryVal)
try:
r = te.loada(antStr)
counts = 0
countf = 0
r.steadyStateApproximate()
p_bound = ng.generateParameterBoundary(r.getGlobalParameterIds())
res = scipy.optimize.differential_evolution(f1,
args=(r, Parameters.realConcCC, Parameters.realFlux, Parameters.FLUX),
bounds=p_bound,
maxiter=Parameters.optiMaxIter,
tol=Parameters.optiTol,
polish=Parameters.optiPolish,
seed=Parameters.r_seed)
if not res.success:
numBadModels += 1
else:
# TODO: Might be able to cut the bottom part by simply using
# the obj func value from optimizer
r = te.loada(antStr)
r.setValues(r.getGlobalParameterIds(), res.x)
r.steadyStateApproximate()
SS_i = r.getFloatingSpeciesConcentrations()
r.steadyStateApproximate()
if np.any(SS_i < 1e-5) or np.any(SS_i > 1e5):
numBadModels += 1
else:
concCC_i = r.getScaledConcentrationControlCoefficientMatrix()
if Parameters.FLUX:
flux_i = r.getReactionRates()
if np.isnan(concCC_i).any():
numBadModels += 1
else:
concCC_i[np.abs(concCC_i) < 1e-12] = 0 # Set small values to zero
if Parameters.FLUX:
flux_i[np.abs(flux_i) < 1e-12] = 0 # Set small values to zero
concCC_i_row = concCC_i.rownames
concCC_i_col = concCC_i.colnames
concCC_i = concCC_i[np.argsort(concCC_i_row)]
concCC_i = concCC_i[:,np.argsort(concCC_i_col)]
if Parameters.FLUX:
flux_i = flux_i[np.argsort(concCC_i_col)]
dist_i = (((np.linalg.norm(Parameters.realConcCC - concCC_i)) +
(np.linalg.norm(Parameters.realFlux - flux_i))) *
((1 + np.sum(np.not_equal(np.sign(np.array(Parameters.realConcCC)),
np.sign(np.array(concCC_i))))) +
(1 + np.sum(np.not_equal(np.sign(np.array(Parameters.realFlux)),
np.sign(np.array(flux_i)))))))
else:
dist_i = ((np.linalg.norm(Parameters.realConcCC - concCC_i))*(1 +
np.sum(np.not_equal(np.sign(np.array(Parameters.realConcCC)),
np.sign(np.array(concCC_i))))))
ens_dist[numGoodModels] = dist_i
r.reset()
ens_model[numGoodModels] = r.getAntimony(current=True)
ens_rl[numGoodModels] = rl
rl_track.append(rl)
numGoodModels = numGoodModels + 1
except:
numBadModels = numBadModels + 1
antimony.clearPreviousLoads()
numIter = numIter + 1
if int(numIter/1000) == (numIter/1000):
print("Number of iterations = " + str(numIter))
if int(numIter/10000) == (numIter/10000):
print("Number of good models = " + str(numGoodModels))
print("In generation: 1")
print("Number of total iterations = " + str(numIter))
print("Number of bad models = " + str(numBadModels))
return ens_dist, ens_model, ens_rl, rl_track
def mutate_and_evaluate(Parameters, listantStr, listdist, listrl, rl_track):
global countf
global counts
eval_dist = np.empty(Parameters.mut_size)
eval_model = np.empty(Parameters.mut_size, dtype='object')
eval_rl = np.empty(Parameters.mut_size, dtype='object')
for m in Parameters.mut_range:
o = 0
rl = ng.generateMutation(Parameters, listrl[m], listantStr[m])
st = ng.getFullStoichiometryMatrix(rl, Parameters.ns).tolist()
stt = ng.removeBoundaryNodes(np.array(st))
while ((rl in rl_track) and (o < Parameters.maxIter_mut)):
rl = ng.generateMutation(Parameters, listrl[m], listantStr[m])
st = ng.getFullStoichiometryMatrix(rl, Parameters.ns).tolist()
stt = ng.removeBoundaryNodes(np.array(st))
o += 1
if o >= Parameters.maxIter_mut:
eval_dist[m] = listdist[m]
eval_model[m] = listantStr[m]
eval_rl[m] = listrl[m]
else:
antStr = ng.generateAntimony(Parameters.realFloatingIds, Parameters.realBoundaryIds,
stt[1], stt[2], rl,
boundary_init=Parameters.realBoundaryVal)
try:
r = te.loada(antStr)
r.steadyStateApproximate()
p_bound = ng.generateParameterBoundary(r.getGlobalParameterIds())
res = scipy.optimize.differential_evolution(f1,
args=(r, Parameters.realConcCC, Parameters.realFlux, Parameters.FLUX),
bounds=p_bound,
maxiter=Parameters.optiMaxIter,
tol=Parameters.optiTol,
polish=Parameters.optiPolish,
seed=Parameters.r_seed)
if not res.success:
eval_dist[m] = listdist[m]
eval_model[m] = listantStr[m]
eval_rl[m] = listrl[m]
else:
r = te.loada(antStr)
r.setValues(r.getGlobalParameterIds(), res.x)
r.steadyStateApproximate()
SS_i = r.getFloatingSpeciesConcentrations()
r.steadyStateApproximate()
if np.any(SS_i < 1e-5) or np.any(SS_i > 1e5):
eval_dist[m] = listdist[m]
eval_model[m] = listantStr[m]
eval_rl[m] = listrl[m]
else:
concCC_i = r.getScaledConcentrationControlCoefficientMatrix()
if Parameters.FLUX:
flux_i = r.getReactionRates()
if np.isnan(concCC_i).any():
eval_dist[m] = listdist[m]
eval_model[m] = listantStr[m]
eval_rl[m] = listrl[m]
else:
concCC_i[np.abs(concCC_i) < 1e-12] = 0 # Set small values to zero
if Parameters.FLUX:
flux_i[np.abs(flux_i) < 1e-12] = 0 # Set small values to zero
concCC_i_row = concCC_i.rownames
concCC_i_col = concCC_i.colnames
concCC_i = concCC_i[np.argsort(concCC_i_row)]
concCC_i = concCC_i[:,np.argsort(concCC_i_col)]
if Parameters.FLUX:
flux_i = flux_i[np.argsort(concCC_i_col)]
dist_i = (((np.linalg.norm(Parameters.realConcCC - concCC_i)) +
(np.linalg.norm(Parameters.realFlux - flux_i))) *
((1 + np.sum(np.not_equal(np.sign(np.array(Parameters.realConcCC)),
np.sign(np.array(concCC_i))))) +
(1 + np.sum(np.not_equal(np.sign(np.array(Parameters.realFlux)),
np.sign(np.array(flux_i)))))))
else:
dist_i = ((np.linalg.norm(Parameters.realConcCC - concCC_i))*(1 +
np.sum(np.not_equal(np.sign(np.array(Parameters.realConcCC)),
np.sign(np.array(concCC_i))))))
if dist_i < listdist[m]:
eval_dist[m] = dist_i
r.reset()
eval_model[m] = r.getAntimony(current=True)
eval_rl[m] = rl
rl_track.append(rl)
else:
eval_dist[m] = listdist[m]
eval_model[m] = listantStr[m]
eval_rl[m] = listrl[m]
except:
eval_dist[m] = listdist[m]
eval_model[m] = listantStr[m]
eval_rl[m] = listrl[m]
antimony.clearPreviousLoads()
return eval_dist, eval_model, eval_rl, rl_track
def random_gen(Parameters, listAntStr, listDist, listrl, rl_track):
global countf
global counts
rndSize = len(listDist)
rnd_dist = np.empty(rndSize)
rnd_model = np.empty(rndSize, dtype='object')
rnd_rl = np.empty(rndSize, dtype='object')
for l in range(rndSize):
d = 0
rl = ng.generateReactionList(Parameters)
st = ng.getFullStoichiometryMatrix(rl, Parameters.ns).tolist()
stt = ng.removeBoundaryNodes(np.array(st))
# Ensure no redundant models
while ((rl in rl_track) and (d < Parameters.maxIter_gen)):
rl = ng.generateReactionList(Parameters)
st = ng.getFullStoichiometryMatrix(rl, Parameters.ns).tolist()
stt = ng.removeBoundaryNodes(np.array(st))
d += 1
if d >= Parameters.maxIter_gen:
rnd_dist[l] = listDist[l]
rnd_model[l] = listAntStr[l]
rnd_rl[l] = listrl[l]
else:
antStr = ng.generateAntimony(Parameters.realFloatingIds, Parameters.realBoundaryIds,
stt[1], stt[2], rl, boundary_init=Parameters.realBoundaryVal)
try:
r = te.loada(antStr)
r.steadyStateApproximate()
p_bound = ng.generateParameterBoundary(r.getGlobalParameterIds())
res = scipy.optimize.differential_evolution(f1,
args=(r, Parameters.realConcCC, Parameters.realFlux, Parameters.FLUX),
bounds=p_bound,
maxiter=Parameters.optiMaxIter,
tol=Parameters.optiTol,
polish=Parameters.optiPolish,
seed=Parameters.r_seed)
# Failed to find solution
if not res.success:
rnd_dist[l] = listDist[l]
rnd_model[l] = listAntStr[l]
rnd_rl[l] = listrl[l]
else:
r = te.loada(antStr)
r.setValues(r.getGlobalParameterIds(), res.x)
r.steadyStateApproximate()
SS_i = r.getFloatingSpeciesConcentrations()
r.steadyStateApproximate()
if np.any(SS_i < 1e-5) or np.any(SS_i > 1e5):
rnd_dist[l] = listDist[l]
rnd_model[l] = listAntStr[l]
rnd_rl[l] = listrl[l]
else:
concCC_i = r.getScaledConcentrationControlCoefficientMatrix()
if Parameters.FLUX:
flux_i = r.getReactionRates()
if np.isnan(concCC_i).any():
rnd_dist[l] = listDist[l]
rnd_model[l] = listAntStr[l]
rnd_rl[l] = listrl[l]
else:
concCC_i[np.abs(concCC_i) < 1e-12] = 0 # Set small values to zero
if Parameters.FLUX:
flux_i[np.abs(flux_i) < 1e-12] = 0 # Set small values to zero
concCC_i_row = concCC_i.rownames
concCC_i_col = concCC_i.colnames
concCC_i = concCC_i[np.argsort(concCC_i_row)]
concCC_i = concCC_i[:,np.argsort(concCC_i_col)]
if Parameters.FLUX:
flux_i = flux_i[np.argsort(concCC_i_col)]
dist_i = (((np.linalg.norm(Parameters.realConcCC - concCC_i)) +
(np.linalg.norm(Parameters.realFlux - flux_i))) *
((1 + np.sum(np.not_equal(np.sign(np.array(Parameters.realConcCC)),
np.sign(np.array(concCC_i))))) +
(1 + np.sum(np.not_equal(np.sign( | np.array(Parameters.realFlux) | numpy.array |
#
# Data for analyzing causality.
# By <NAME>
#
# Classes:
# ccm
# embed
#
# Paper:
# Detecting Causality in Complex Ecosystems
# Ge<NAME> et al. 2012
#
# Thanks to <NAME> and <NAME>
#
# Notes:
# Originally I thought this can be made way faster by only calculting the
# distances once and then chopping it to a specific library length. It turns out
# that calculating the distances is cheaper than filtering the indices.
#
import numpy as np
from sklearn import neighbors
from sklearn import metrics
import skccm.utilities as ut
import pandas as pd
import time
class CCM:
"""
Convergent cross mapping for two embedded time series
"""
def __init__(self, weights='exp', score_metric='corrcoef', verbose=False):
"""
Parameters
----------
weights : weighting scheme for predictions
- exp : exponential weighting
score : how to score the predictions
-'score'
-'corrcoef'
verbose : prints out calculation status
"""
self.weights = weights
self.score_metric = score_metric
self.verbose = verbose
def fit(self,X1,X2):
"""
Fit the training data for ccm. Creates seperate near neighbor regressors
for X1 and X2 independently.
X1 : embedded time series of shape (num_samps,embed_dim)
X2 : embedded time series of shape (num_samps,embed_dim)
near_neighs : string
- 'sorround' : this is what the paper uses
- 'all' : calculate the distance to all near neighbors
"""
# Save X1_train and X2_train for prediction later. Confusing,
# but we need to make predictions about our testing set using these.
self.X1 = X1
self.X2 = X2
#to sorround a point, there must be ndim + 1 points
# we add two here because the closest neighbor is itself. so that is
# going to be dropped.
near_neighs = X1.shape[1] + 2
self.knn1 = neighbors.KNeighborsRegressor(near_neighs)
self.knn2 = neighbors.KNeighborsRegressor(near_neighs)
def predict_no_drop(self,lib_lengths):
"""
Make a prediction
Parameters
----------
X1_test : test set
X2_test : test set
lib_lengths : list of library lengths to test
"""
X1_pred = []
X2_pred = []
for liblen in lib_lengths:
x1_p = np.empty(self.X1.shape)
x2_p = np.empty(self.X2.shape)
#keep only the indices that are less than library length
self.knn1.fit(self.X1[:liblen], self.X1[:liblen])
self.knn2.fit(self.X2[:liblen], self.X2[:liblen])
dist1,ind1 = self.knn1.kneighbors(self.X1)
dist2,ind2 = self.knn2.kneighbors(self.X2)
#drop indices and distances to themselves
dist1 = dist1[:,1:]
dist2 = dist2[:,1:]
ind1 = ind1[:,1:]
ind2 = ind2[:,1:]
for j in range(self.X1.shape[1]):
W1 = ut.exp_weight(dist1)
W2 = ut.exp_weight(dist2)
#flip the weights and indices
x1_p[:, j] = np.sum(self.X1[ind2, j] * W2, axis=1)
x2_p[:, j] = np.sum(self.X2[ind1, j] * W1, axis=1)
X1_pred.append(x1_p)
X2_pred.append(x2_p)
self.X1_pred = X1_pred
self.X2_pred = X2_pred
return X1_pred, X2_pred
def predict_drop_in_list(self,lib_lengths,emb_ind1,emb_ind2):
"""
Make a prediction, but the same indices cant be matched with each other.
Parameters
----------
lib_lengths : library lengths to Test
e_ind1 : indices of the first embed time series.
e_ind2 : indices of the second embed time series.
"""
X1_pred = []
X2_pred = []
#need to reset the class ot use all neighbors so that the appropriate
# neighbors can be dropped for each class
self.knn1 = neighbors.KNeighborsRegressor(len(self.X1))
self.knn2 = neighbors.KNeighborsRegressor(len(self.X2))
self.knn1.fit(self.X1, self.X1)
self.knn2.fit(self.X2, self.X2)
dist1,ind1 = self.knn1.kneighbors(self.X1)
dist2,ind2 = self.knn2.kneighbors(self.X2)
#find the conflicting indices
conf1 = ut.conflicting_indices(emb_ind1)
conf2 = ut.conflicting_indices(emb_ind2)
#throw out the indices that are in the embedding
dist1, ind1 = ut.throw_out_nn_indices(dist1,ind1,conf1)
dist2, ind2 = ut.throw_out_nn_indices(dist2,ind2,conf2)
n_sorround = self.X1.shape[1] + 1
#flipping allows for a faster implentation as we can feed
# ut.in_libary_len smaller and smaller arrays
for liblen in lib_lengths:
#keep only the indices that are less than library length
#t0 = time.time()
i_1, d_1 = ut.in_library_len_keep(ind1, dist1, liblen,n_sorround)
i_2, d_2 = ut.in_library_len_keep(ind2, dist2, liblen,n_sorround)
#t1 = time.time()
#t0 = time.time()
W1 = ut.exp_weight(d_1)
W2 = ut.exp_weight(d_2)
x1_p = np.empty(self.X1.shape)
x2_p = np.empty(self.X2.shape)
for j in range(self.X1.shape[1]):
#flip the weights and indices
x1_p[:, j] = | np.sum(self.X1[i_2, j] * W2, axis=1) | numpy.sum |
import glob
import os.path
import cv2
import numpy as np
import collections
import matplotlib
import scipy.spatial.distance
import itertools
import matplotlib.pyplot as plt
import matplotlib.animation as animation
IMAGE_DIR = '../input/train/'
MSEC_PER_FRAME = 200
MSEC_REPEAT_DELAY= 2000
ADD_MASK_OUTLINE = True
TILE_MIN_SIDE = 50 # pixels; see tile_features()
SHOW_GIF = False
def grays_to_RGB(img):
# Convert a 1-channel grayscale image into 3 channel RGB image
return np.dstack((img, img, img))
def image_plus_mask(img, mask):
# Returns a copy of the grayscale image, converted to RGB,
# and with the edges of the mask added in red
img_color = grays_to_RGB(img)
mask_edges = cv2.Canny(mask, 100, 200) > 0
img_color[mask_edges, 0] = 255 # chan 0 = bright red
img_color[mask_edges, 1] = 0
img_color[mask_edges, 2] = 0
return img_color
def to_mask_path(f_image):
# Convert an image file path into a corresponding mask file path
dirname, basename = os.path.split(f_image)
maskname = basename.replace(".tif", "_mask.tif")
return os.path.join(dirname, maskname)
def add_masks(images):
# Return copies of the group of images with mask outlines added
# Images are stored as dict[filepath], output is also dict[filepath]
images_plus_masks = {}
for f_image in images:
img = images[f_image]
mask = cv2.imread(to_mask_path(f_image))
images_plus_masks[f_image] = image_plus_mask(img, mask)
return images_plus_masks
def get_patient_images(patient):
# Return a dict of patient images, i.e. dict[filepath]
f_path = IMAGE_DIR + '%i_*.tif' % patient
f_ultrasounds = [f for f in glob.glob(f_path) if 'mask' not in f]
images = {f:get_image(f) for f in f_ultrasounds}
return images
def image_features(img):
return tile_features(img) # a tile is just an image...
def tile_features(tile, tile_min_side = TILE_MIN_SIDE):
# Recursively split a tile (image) into quadrants, down to a minimum
# tile size, then return flat array of the mean brightness in those tiles.
tile_x, tile_y = tile.shape
mid_x = tile_x / 2
mid_y = tile_y / 2
if (mid_x < tile_min_side) or (mid_y < tile_min_side):
return np.array([tile.mean()]) # hit minimum tile size
else:
tiles = [ tile[:mid_x, :mid_y ], tile[mid_x:, :mid_y ],
tile[:mid_x , mid_y:], tile[mid_x:, mid_y:] ]
features = [tile_features(t) for t in tiles]
return np.array(features).flatten()
def get_patient_images(patient):
# Return a dict of patient images, i.e. dict[filepath]
f_path = IMAGE_DIR + '%i_*.tif' % patient
f_ultrasounds = [f for f in glob.glob(f_path) if 'mask' not in f]
images = {f:get_image(f) for f in f_ultrasounds}
return images
def image_features(img):
return tile_features(img) # a tile is just an image...
def tile_features(tile, tile_min_side = TILE_MIN_SIDE):
# Recursively split a tile (image) into quadrants, down to a minimum
# tile size, then return flat array of the mean brightness in those tiles.
tile_x, tile_y = tile.shape
mid_x = tile_x / 2
mid_y = tile_y / 2
if (mid_x < tile_min_side) or (mid_y < tile_min_side):
return np.array([tile.mean()]) # hit minimum tile size
else:
tiles = [ tile[:mid_x, :mid_y ], tile[mid_x:, :mid_y ],
tile[:mid_x , mid_y:], tile[mid_x:, mid_y:] ]
features = [tile_features(t) for t in tiles]
return np.array(features).flatten()
def feature_dist(feats_0, feats_1):
# Definition of the distance metric between image features
return scipy.spatial.distance.euclidean(feats_0, feats_1)
def feature_dists(features):
# Calculate the distance between all pairs of images (using their features)
dists = collections.defaultdict(dict)
f_img_features = features.keys()
for f_img0, f_img1 in itertools.permutations(f_img_features, 2):
dists[f_img0][f_img1] = feature_dist(features[f_img0], features[f_img1])
return dists
def image_seq_start(dists, f_start):
# Given a starting image (i.e. named f_start), greedily pick a sequence
# of nearest-neighbor images until there are no more unpicked images.
f_picked = [f_start]
f_unpicked = set(dists.keys()) - set([f_start])
f_current = f_start
dist_tot = 0
while f_unpicked:
# Collect the distances from the current image to the
# remaining unpicked images, then pick the nearest one
candidates = [(dists[f_current][f_next], f_next) for f_next in f_unpicked]
dist_nearest, f_nearest = list(sorted(candidates))[0]
# Update the image accounting & make the nearest image the current image
f_unpicked.remove(f_nearest)
f_picked.append(f_nearest)
dist_tot += dist_nearest
f_current = f_nearest
return (dist_tot, f_picked)
def image_sequence(dists):
# Return a sequence of images that minimizes the sum of
# inter-image distances. This function relies on image_seq_start(),
# which requires an arbitray starting image.
# In order to find an even lower-cost sequence, this function
# tries all possible staring images and returns the best result.
f_starts = dists.keys()
seqs = [image_seq_start(dists, f_start) for f_start in f_starts]
dist_best, seq_best = list(sorted(seqs))[0]
return seq_best
def grayscale_to_RGB(img):
return np.asarray( | np.dstack((img, img, img)) | numpy.dstack |
# from __future__ import division
#-------------------------------------
#
# Started at 06/08/2018 (YuE)
#
# This script based on the previous script
# threeApproachesComparison_v6.py
#
## Upgraded version of python (python3.4): script was rewritten to take into
# account some differences in the descriptions and using of some functions
# (version cma_v3 and more earlier scripts are written under python2).
#
# 07/24/2018: IT IS NOT FINISHED:
#
# Which are still unsatisfactory:
# 1) the absolute values of frictional forces for all methods of calculation,
# 2) their dependence on the ion velocity.
#
# But nevertheless, the dependences of the transmitted energy on the impact
# parameter are close to the inverse quadratic (as it should be!) at all velocities.
#
# 07/27/2018: IT IS NOT FINISHED:
#
# Which are still unsatisfactory:
# 1) the absolute values of frictional forces for all methods of calculation,
# 2) their dependence on the ion velocity.
# The investigation of that is in progress.
#
# Some features were improved, some figures were corrected.
#
#-------------------------------------
#========================================================
#
# This code compairs two approaches: "classical" (from [1]) and
# "magnus" (from [2]).
#
# For "classical" approach the magnetized interaction between ion
# and electron is considered for ion velocities V_i > rmsTrnsvVe.
#
# References:
#
# [1] <NAME>, <NAME>, <NAME>, <NAME>.
# "Physics guide of BETACOOL code. Version 1.1". C-A/AP/#262, November
# 2006, Brookhaven National Laboratory, Upton, NY 11973.
# [2] <NAME>, <NAME>. "New Algorithm for Dynamical Friction
# of Ions in a Magnetized Electron Beam". AIP Conf. Proc. 1812, 05006 (2017).
#
#========================================================
#########################################################
#
# Main issues of the calculations:
#
# 1) Friction force (FF) is calculated in the (P)article (R)est (F)rame,
# i.e. in the frame moving together with both (cooled and cooling)
# beams at a velocity V0;
# 2) Friction force is calculated for each value of ion velocity
# in the interval from .1*rmsTrnsvVe till 10*rmsTrnsvVe;
# 3) Initially assumped that all electrons have a logitudinal
# velocity rmsLongVe and transversal velocity rmsTrnsvVe;
# 4) For each ion velocity the minimal and maximal values of the
# impact parameter are defined. Radius of the shielding of the
# electric field of the ion equals to the value of the maximal
# impact parameter;
# 5) For each impact parameter in the interval from minimal till
# maximal values the transfered momenta deltap_x,y,z are
# calculated;
# 6) Founded transfered momenta allow to calculate the transfered
# energy delta_E =deltap^2/(2*m_e) and to integrate it over
# impact parameter; then (expressions (3.4), (3.5) from [1]):
# FF =-2*pi*n_e*integral_rhoMin^rhoMax delta_E*rho*drho;
# 7) For taking into account the velocity distribution of the
# electrons it is necessary to repeat these calculations for
# each value of the electron's velocity and then integrate result
# over distribution of the velocities.
#
# 10/26/2018:
#
# 8) Item 6 is wrong and correct expression for transfered
# energy delta_E will be used;
# 9) Method (my own) Least Squares Method - LSM is used to fit the
# dependence of transferred momenta on impact parameter;
#
#
# 11/08/2018:
#
# 10) Two functions ('fitting' and 'errFitAB' are defined to realize
# my LSM to find the parameters of the fitting end error of this
# fitting;
#
# 11) Analys of different dependeces between values; graphical
# presentation of these dependences;
#
#########################################################
import os, sys
import numpy as np
import math
import matplotlib.pyplot as plt
import matplotlib.cm as cm
from matplotlib.colors import LogNorm
from matplotlib import ticker
from matplotlib import markers
import matplotlib as mpl
from matplotlib import cm
from mpl_toolkits.mplot3d import Axes3D
import matplotlib.pyplot as plt
from matplotlib.legend_handler import HandlerLine2D
import scipy.integrate as integrate
from scipy.integrate import quad, nquad, dblquad
from scipy.constants import pi
from scipy import optimize
from statistics import mean
from array import array
#
# All physical constants have its dimension in units in the system CI.
# This code uses units in the system CGS!
#
from scipy.constants import speed_of_light as clight
from scipy.constants import epsilon_0 as eps0
from scipy.constants import mu_0 as mu0
from scipy.constants import elementary_charge as qe
from scipy.constants import electron_mass as me
from scipy.constants import proton_mass as mp
from scipy.constants import Boltzmann as kB
pi=3.14159265358
#
# Physical constants:
#
m_e=9.10938356e-28 # electron mass, g
m_elec=m_e # to keep variable from previous script
m_p=1.672621898e-24 # electron mass, g
M_ion = m_p # to keep variable from previous script
q_e=4.803204673e-10 # electron charge, CGSE unit: sqrt(g*cm^3/sec^2)
q_elec=q_e # to keep variable from previous script
Z_ion = q_e # to keep variable from previous script
cLight=2.99792458e10 # speed of light, cm/sec
eVtoErg=1.6021766208e-12 # 1 eV = 1.6...e-12 erg
CtoPart=2.99792458e9 # 1 C = 1 A*sec = 2.9...e9 particles
m_e_eV = m_e*cLight**2/eVtoErg
#
# Electron beam parameters:
#
Ekin=3.0e4 # kinetic energy, eV
curBeam=0.5 # current density, A/cm^2
dBeam=3.0 # beam diameter, cm
angSpread=3.0 # angular spread, mrad
trnsvT=0.5 # transversal temperature, eV
longT=2.0e-4 # longitudinal temperature, eV (was 2.0e-4)
nField=1 # number ov values of the magnetic field
fieldB=np.zeros(nField) # magnetic field
fieldB[0]=3.e3 # Gs
omega_p=1.0e9 # plasma frequency, 1/sec
n_e=omega_p**2*m_e/(4.*pi*q_e**2) # plasma density, 3.1421e+08 cm-3
n_e1=8.e7 # plasma density, cm-3
omega_p1=np.sqrt(4.*pi*n_e1*q_e**2/m_e) # plasma frequency, 5.0459e+08 1/s
#
# Cooling system parameter:
#
coolLength=150.0 # typical length of the coolong section, cm
#
# HESR:
#
Ekin=90.8e4 # HESR kinetic energy, eV
curBeam=0.5 # HESR current beam, A
dBeam=2.0 # HESR beam diameter, cm
angSpread=0.0 # HESR angular spread, mrad
trnsvT=0.2 # HESR transversal temperature, eV
longT=1.0e-2 # HESR longitudinal temperature, eV (was 2.0e-4)
fieldB[0]=1.e3 # HESR, Gs
coolLength=270.0 # HESR typical length of the coolong section, cm
#
# EIC:
#
angSpread=0.0 # EIC angular spread, mrad
fieldB[0]=5.e4 # EIC, Gs
coolLength=300.0 # EIC typical length of the coolong section, cm
#
# Calculated parameters of the electron beam:
#
V0 = cLight*np.sqrt(Ekin/m_e_eV*(Ekin/m_e_eV+2.))/(Ekin/m_e_eV+1.)
print ('V0 =%e' % V0)
tetaV0=0. # angle between V0 and magnetic field, rad
B_mag=fieldB[0]*np.cos(tetaV0) # magnetic field acting on an electron, Gs
rmsTrnsvVe=np.sqrt(2.*trnsvT*eVtoErg/m_e) # RMS transversal velocity, cm/s
rmsLongVe=np.sqrt(2.*longT*eVtoErg/m_e) # RMS longitudinal velocity, cm/s
# HESR:
dens=curBeam*(CtoPart/q_e)/(pi*(.5*dBeam)**2*V0) # density, 1/cm^3
omega=np.sqrt(4.*pi*dens*q_e**2/m_e) # plasma frequency, 1/s
n_e=dens
omega_p=omega
print ('HESR: dens = %e,omega_p = %e' % (dens,omega_p))
# EIC:
rmsLongVe = 1.0e+7 # cm/s
longT = .5*m_e*rmsLongVe**2/eVtoErg
rmsTrnsvVe = 4.2e+7 # cm/s
trnsvT = .5*m_e*rmsTrnsvVe**2/eVtoErg
print ('EIC: rmsLongVe = %e, longT = %e, rmsTrnsvVe = %e, trnsvT = %e' % \
(rmsLongVe,longT,rmsTrnsvVe,trnsvT))
dens=2.e9 # density, 1/cm^3
omega=np.sqrt(4.*pi*dens*q_e**2/m_e) # plasma frequency, 1/s
n_e=dens
omega_p=omega
print ('EIC: dens = %e,omega_p = %e' % (dens,omega_p))
cyclFreq=q_e*B_mag/(m_e*cLight) # cyclotron frequency, 1/s
rmsRoLarm=rmsTrnsvVe*cyclFreq**(-1) # RMS Larmor radius, cm
dens=omega_p**2*m_e/(4.*pi*q_e**2) # density, 1/cm^3
likeDebyeR=(3./dens)**(1./3.) # "Debye" sphere with 3 electrons, cm
eTempTran=trnsvT # to keep variable from previous script
eTempLong=longT # to keep variable from previous script
coolPassTime=coolLength/V0 # time pass through cooling section, cm
thetaVi=0. # polar angle ion and cooled electron beams, rad
phiVi=0. # azimuth angle ion and cooled electron beams, rad
powV0=round(np.log10(V0))
mantV0=V0/(10**powV0)
pow_n_e=round(np.log10(n_e))
mant_n_e=n_e/(10**pow_n_e)
#
# Formfactor ffForm for friction force:
#
# ffForm = 2*pi*dens*q_e**4/(m_e*V0**2)=
# = 0.5*omega_p**2*q_e**2/V0**2
#
# Dimension of ffForm is force: g*cm/sec**2=erg/cm
#
# 1 MeV/m = 1.e6*eVtoErg/100. g*cm/sec**2 = 1.e4*eVtoErg erg/cm
MeV_mToErg_cm=1.e4*eVtoErg
# ffForm=-.5*omega_p**2*q_e**2/V0**2/MeV_mToErg_cm # MeV/m
eV_mToErg_m=100.*eVtoErg
# ffForm=-.5*omega_p**2*q_e**2/V0**2/eV_mToErg_m # =-6.8226e-12 eV/m
eV_mInErg_cm=100.*eVtoErg
ffForm=-.5*omega_p**2*q_e**2/V0**2/eVtoErg # =-6.8226e-10 eV/cm
ffForm=100.*ffForm # =-6.8226e-08 eV/m
ergToEV = 1./1.60218e-12
#
# Relative velocities of electrons:
#
relVeTrnsv=rmsTrnsvVe/V0
relVeLong=rmsLongVe/V0
print ('V0=%e cm/s, rmsTrnsvVe=%e cm/s (rel = %e), rmsLongVe=%e cm/s (rel = %e)' % \
(V0,rmsTrnsvVe,relVeTrnsv,rmsLongVe,relVeLong))
# Indices:
(Ix, Ipx, Iy, Ipy, Iz, Ipz) = range(6)
stepsNumberOnGyro = 25 # number of the steps on each Larmour period
'''
#
# Opening the input file:
#
inputFile='areaOfImpactParameter_tAC-v6_fig110.data'
print ('Open input file "%s"...' % inputFile)
inpfileFlag=0
try:
inpfile = open(inputFile,'r')
inpfileFlag=1
except:
print ('Problem to open input file "%s"' % inputFile)
if inpfileFlag == 1:
print ('No problem to open input file "%s"' % inputFile)
lines=0 # Number of current line from input file
dataNumber=0 # Number of current value of any types of Data
xAboundary=np.zeros(100)
xBboundary=np.zeros(100)
while True:
lineData=inpfile.readline()
# print ('line=%d: %s' % (lines,lineData))
if not lineData:
break
lines += 1
if lines > 4:
words=lineData.split()
nWords=len(words)
# print ('Data from %d: words=%s, number of entries = %d' % (lines,words,nWords))
xAboundary[dataNumber]=float(words[0])
xBboundary[dataNumber]=float(words[1])
dataNumber += 1
inpfile.close()
print ('Close input file "%s"' % inputFile)
'''
#====================================================================
#
#------------------ Begin of defined functions -----------------------
#
# Larmor frequency electron:
#
def omega_Larmor(mass,B_mag):
return (q_elec)*B_mag/(mass*clight*1.e+2) # rad/sec
#
# Derived quantities:
#
omega_L = omega_Larmor(m_elec,B_mag) # rad/sec
T_larm = 2*pi/omega_L # sec
timeStep = T_larm/stepsNumberOnGyro # time step, sec
print ('omega_Larmor= %e rad/sec, T_larm = %e sec, timeStep = %e sec' % \
(omega_L,T_larm,timeStep))
nLarmorAvrgng=10 # number of averaged Larmor rotations
#
# Data to integrate transferred momemta over the track:
#
timeStep_c=nLarmorAvrgng*stepsNumberOnGyro*timeStep # sec
print ('timeStep_c = %e s' % timeStep_c)
eVrmsTran = np.sqrt(2.*eTempTran*eVtoErg/m_elec) # cm/sec
eVrmsLong = np.sqrt(2.*eTempLong*eVtoErg/m_elec) # cm/sec
kinEnergy = m_elec*(eVrmsTran**2+eVrmsLong**2)/2. # kinetic energy; erg
print ('eVrmsTran = %e cm/sec, eVrmsLong = %e cm/sec, kinEnergy = %e eV' % \
(eVrmsTran,eVrmsLong,ergToEV*kinEnergy))
ro_larmRMS = eVrmsTran/omega_L # cm
print ('ro_larmRMS =%e mkm' % (1.e4*ro_larmRMS))
#
# Electrons are magnetized for impact parameter >> rhoCrit:
#
rhoCrit=math.pow(q_elec**2/(m_elec*omega_L**2),1./3) # cm
print ('rhoCrit (mkm) = ' , 1.e+4*rhoCrit)
#
# Convertion from 6-vector of relectron's "coordinates" to 6-vector
# of guiding-center coordinates:
# z_e=(x_e,px_e,y_e,py_e,z_e,pz_e) --> zgc_e=(phi,p_phi,y_gc,p_gc,z_e,pz_e);
#
def toGuidingCenter(z_e):
mOmega=m_elec*omega_L # g/sec
zgc_e=z_e.copy() # 6-vector
zgc_e[Ix] = np.arctan2(z_e[Ipx]+mOmega*z_e[Iy],z_e[Ipy]) # radians
zgc_e[Ipx]= (((z_e[Ipx]+mOmega*z_e[Iy])**2+z_e[Ipy]**2)/(2.*mOmega)) # g*cm**2/sec
zgc_e[Iy] =-z_e[Ipx]/mOmega # cm
zgc_e[Ipy]= z_e[Ipy]+mOmega*z_e[Ix] # g/sec
return zgc_e
#
# Convertion from 6-vector of guiding-center coordinates to 6-vector
# of electron's "coordinates":
# zgc_e=(phi,p_phi,y_gc,p_gc,z_e,pz_e) --> z_e=(x_e,px_e,y_e,py_e,z_e,pz_e);
#
def fromGuidingCenter(zgc_e):
mOmega=m_elec*omega_L # g/sec
rho_larm=np.sqrt(2.*zgc_e[Ipx]/mOmega) # cm
z_e = zgc_e.copy() # 6-vector
z_e[Ix] = zgc_e[Ipy]/mOmega-rho_larm*np.cos(zgc_e[Ix]) # cm
z_e[Ipx]=-mOmega*zgc_e[Iy] # g*cm/sec
z_e[Iy] = zgc_e[Iy]+rho_larm*np.sin(zgc_e[Ix]) # cm
z_e[Ipy]= mOmega*rho_larm*np.cos(zgc_e[Ix]) # g*cm/sec
return z_e
#
# Matrix to dragg electron through the solenoid with field 'B_mag'
# during time interval 'deltaT':
#
def solenoid_eMatrix(B_mag,deltaT):
slndMtrx=np.identity(6)
omega_L=omega_Larmor(m_elec,B_mag) # rad/sec
mOmega= m_elec*omega_L # g/sec
phi=omega_L*deltaT # phase, rad
cosPhi=math.cos(phi) # dimensionless
sinPhi=math.sin(phi) # dimensionless
cosPhi_1=2.*math.sin(phi/2.)**2 # dimensionless
slndMtrx[Iy, Iy ]= cosPhi # dimensionless
slndMtrx[Ipy,Ipy]= cosPhi # dimensionless
slndMtrx[Iy, Ipy]= sinPhi/mOmega # sec/g
slndMtrx[Ipy,Iy ]=-mOmega*sinPhi # g/sec
slndMtrx[Iz, Ipz]= deltaT/m_elec # sec/g
slndMtrx[Ix, Ipx]= sinPhi/mOmega # sec/g
slndMtrx[Ix, Iy ]= sinPhi # dimensionless
slndMtrx[Ix, Ipy]= cosPhi_1/mOmega # sec/g
slndMtrx[Iy, Ipx]=-cosPhi_1/mOmega # sec/g
slndMtrx[Ipy,Ipx]=-sinPhi # dimensionless
return slndMtrx
#
# Matrix to dragg particle through the drift during time interval 'deltaT':
#
def drift_Matrix(M_prtcl,deltaT):
driftMtrx = np.identity(6)
for i in (Ix,Iy,Iz):
driftMtrx[i,i+1]=deltaT/M_prtcl # sec/g
return driftMtrx
#
# Matrix to dragg electron in the "guiding center" system during time interval 'deltaT':
#
def guidingCenter_Matrix(deltaT):
gcMtrx = np.identity(6)
gcMtrx[Iz,Ipz]=deltaT/m_elec # sec/g
return gcMtrx
#
# Description of the collision during time interval 'deltaT'
# in the system coordinates of "guiding center" of electron
# input - 6-vectors for electron and ion before collision and time step deltaT;
# output - transfered momenta to ion and electron:
#
def guidingCenterCollision(vectrElec_gc,vectrIon,deltaT):
dpIon=np.zeros(3)
dpElec=np.zeros(3)
mOmegaLarm=m_elec*omega_L # g/sec
dpFactor_gc=q_elec**2 # g*cm^3/sec^2
rhoLarm_gc=np.sqrt(2.*vectrElec_gc[1]/mOmegaLarm) # cm
sinOmega_gc=math.sin(vectrElec_gc[0])
cosOmega_gc=math.cos(vectrElec_gc[0])
x_gc=vectrElec_gc[3]/mOmegaLarm # cm
numer=(vectrIon[0]-x_gc)*cosOmega_gc- \
(vectrIon[2]-vectrElec_gc[2])*sinOmega_gc # cm
denom=((vectrIon[0]-x_gc)**2+(vectrIon[2]-vectrElec_gc[2])**2+ \
(vectrIon[4]-vectrElec_gc[4])**2+rhoLarm_gc**2)**(3/2) # cm^3
action=vectrElec_gc[1]+dpFactor_gc*numer*rhoLarm_gc/(omega_L*denom) # g*cm^2/sec
b_gc=np.sqrt((vectrIon[0]-x_gc)**2+ \
(vectrIon[2]-vectrElec_gc[2])**2+ \
(vectrIon[4]-vectrElec_gc[4])**2+2.*action/mOmegaLarm) # cm
# Dimensions of dpIon, deElec are g*cm/sec:
dpIon[0]=-dpFactor_gc*deltaT*(vectrIon[0]-x_gc)/b_gc**3
dpIon[1]=-dpFactor_gc*deltaT*(vectrIon[2]-vectrElec_gc[2])/b_gc**3
dpIon[2]=-dpFactor_gc*deltaT*(vectrIon[4]-vectrElec_gc[4])/b_gc**3
dpElec[0]=-dpIon[0]
dpElec[1]=-dpIon[1]
dpElec[2]=-dpIon[2]
# print ('dpIon[0]=%e, dpIon[1]=%e, dpIon[2]=%e' % \
# (dpIon[0],dpIon[1],dpIon[2]))
return dpIon,dpElec,action,b_gc
#
# "Magnus expansion" description of the collision during time interval 'deltaT'
# in the system coordinates of "guiding center" of electron
# input - 6-vectors for electron and ion before collision and time step deltaT;
# output - transfered momenta to ion and electron and electron y_gc coordinate
# as well calculated parameters C1,C2,C3,b,D1,D2,q for testing:
#
def MagnusExpansionCollision(vectrElec_gc,vectrIon,deltaT):
# print ('Ion: x=%e, y=%e, z=%e' % (vectrIon[0],vectrIon[2],vectrIon[4]))
# print ('Electron: x=%e, y=%e, z=%e' %
# (vectrElec_gc[0],vectrElec_gc[4],vectrElec_gc[4]))
dpIon=np.zeros(3)
dpElec=np.zeros(3)
mOmegaLarm=m_elec*omega_L # g/sec
dpFactor_gc=q_elec**2 # g*cm^3/sec^2
rhoLarm_gc=np.sqrt(2.*vectrElec_gc[1]/mOmegaLarm) # cm
sinOmega_gc=math.sin(vectrElec_gc[0])
cosOmega_gc=math.cos(vectrElec_gc[0])
x_gc=vectrElec_gc[3]/mOmegaLarm # cm
numer=(vectrIon[0]-x_gc)*cosOmega_gc- \
(vectrIon[2]-vectrElec_gc[2])*sinOmega_gc # cm
denom=((vectrIon[0]-x_gc)**2+(vectrIon[2]-vectrElec_gc[2])**2+ \
(vectrIon[4]-vectrElec_gc[4])**2+rhoLarm_gc**2)**(3./2.) # cm^3
action=vectrElec_gc[1]+dpFactor_gc*numer*rhoLarm_gc/(omega_L*denom) # g*cm^2/sec
# C1=np.sqrt((vectrIon[0]-x_gc)**2+ \
# (vectrIon[2]-vectrElec_gc[2])**2+ \
# (vectrIon[4]-vectrElec_gc[4])**2+2.*action/mOmegaLarm) # cm^2
C1=(vectrIon[0]-x_gc)**2+(vectrIon[2]-vectrElec_gc[2])**2+ \
(vectrIon[4]-vectrElec_gc[4])**2+2.*action/mOmegaLarm # cm^2
C2=2.*((vectrIon[0]-x_gc)*vectrIon[1]/M_ion+ \
(vectrIon[2]-vectrElec_gc[2])*vectrIon[3]/M_ion+ \
(vectrIon[4]-vectrElec_gc[4])* \
(vectrIon[5]/M_ion-vectrElec_gc[5]/m_elec)) # cm^2/sec
C3=(vectrIon[1]/M_ion)**2+(vectrIon[3]/M_ion)**2+ \
(vectrIon[5]/M_ion-vectrElec_gc[5]/m_elec)**2 # cm^2/sec^2
b=np.sqrt(C1+C2*deltaT+C3*deltaT**2) # cm
D1=(2.*C3*deltaT+C2)/b-C2/np.sqrt(C1) # cm/sec
D2=(C2*deltaT+2.*C1)/b-2.*np.sqrt(C1) # cm
q=4.*C1*C3-C2**2 # cm^4/sec^2
# Dimensions of dpIon, deElec are g*cm/sec:
dpIon[0]=-2.*dpFactor_gc/q*((vectrIon[0]-x_gc)*D1-vectrIon[1]/M_ion*D2)
dpIon[1]=-2.*dpFactor_gc/q*((vectrIon[2]-vectrElec_gc[2])*D1- \
vectrIon[3]/M_ion*D2)
dpIon[2]=-2.*dpFactor_gc/q*((vectrIon[4]-vectrElec_gc[4])*D1- \
(vectrIon[5]/M_ion-vectrElec_gc[5]/m_elec)*D2)
dpElec[0]=-dpIon[0]
dpElec[1]=-dpIon[1]
dpElec[2]=-dpIon[2]
dy_gc=dpIon[0]/mOmegaLarm # cm
# print ('dpIon[0]=%e, dpIon[1]=%e, dpIon[2]=%e' % \
# (dpIon[0],dpIon[1],dpIon[2]))
return dpIon,dpElec,action,dy_gc,C1,C2,C3,b,D1,D2,q
#
# Minimized functional (my own Least Squares Method - LSM;
# Python has own routine for LSM - see site
# http://scipy-cookbook.readthedocs.io/items/FittingData.html):
#
# Funcional = {log10(funcY) - [fitB*log10(argX) + fitA]}^2
#
def fitting(nPar1,nPar2,argX,funcY):
log10argX = np.zeros((nPar1,nPar2))
log10funcY = np.zeros((nPar1,nPar2))
for i in range(nVion):
for n in range(nPar1):
log10argX[n,i] = np.log10(argX[n,i])
log10funcY[n,i] = np.log10(funcY[n,i])
sumArgX = np.zeros(nPar2)
sumArgX2 = np.zeros(nPar2)
sumFuncY = np.zeros(nPar2)
sumArgXfuncY= np.zeros(nPar2)
fitA = np.zeros(nPar2)
fitB = np.zeros(nPar2)
for i in range(nPar2):
for n in range(nPar1):
sumArgX[i] += log10argX[n,i]
sumArgX2[i] += log10argX[n,i]**2
sumFuncY[i] += log10funcY[n,i]
sumArgXfuncY[i] += log10argX[n,i]*log10funcY[n,i]
delta = sumArgX[i]**2-nPar1*sumArgX2[i]
fitA[i] = (sumArgX[i]*sumArgXfuncY[i]-sumArgX2[i]*sumFuncY[i])/delta
fitB[i] = (sumArgX[i]*sumFuncY[i]-nPar1*sumArgXfuncY[i])/delta
# print ('fitA(%d) = %e, fitB(%d) = %e' % (i,fitA[i],i,fitB[i]))
argXfit = np.zeros((nPar1,nPar2))
funcYfit = np.zeros((nPar1,nPar2))
funcHi2 = np.zeros(nPar2)
for i in range(nPar2):
factorA = math.pow(10.,fitA[i])
for n in range(nPar1):
argXfit[n,i] = math.pow(10.,log10argX[n,i])
funcYfit[n,i] = factorA*math.pow(argXfit[n,i],fitB[i])
funcHi2[i] += (np.log10(abs(funcY[n,i])) - np.log10(abs(funcYfit[n,i])))**2
return fitA,fitB,funcHi2,argXfit,funcYfit
#
# +-Errors for fitied parameters fitA and fitB:
#
def errFitAB(nPar1,nPar2,argX,funcY,fitA,fitB,funcHi2,errVar,errType):
log10argX = np.zeros((nPar1,nPar2))
log10funcY = np.zeros((nPar1,nPar2))
sumArgX = np.zeros(nPar2)
sumArgX2 = np.zeros(nPar2)
posErrFit = np.zeros(nPar2)
negErrFit = np.zeros(nPar2)
# return posErrFit,negErrFit
stepA = 5.e-4*mean(funcHi2)
stepB = 1.e-4*mean(funcHi2)
# print ('errFitAB: mean(funcHi2) = %e, stepA = %e, stepB = %e' % (mean(funcHi2),stepA,stepB))
for i in range(nPar2):
for n in range(nPar1):
log10argX[n,i] = np.log10(argX[n,i])
log10funcY[n,i] = np.log10(funcY[n,i])
sumArgX[i] += log10argX[n,i]
sumArgX2[i] += log10argX[n,i]**2
for i in range(nPar2):
k = 0
deltaFuncHi2 = 0.
while (deltaFuncHi2 < 1.):
k += 1
if k > 2000:
print ('Break in errFitAB (Fit funcY: case %d); positive error) for %d' % (errVar,i))
break
# print ('i=%d: fitParamtr = %e, funcHi2 = %e' % (i,fitParamtr[i], funcHi2[i]))
curFitA = fitA[i]
if (int(errVar) == 1):
curFitA = fitA[i] + k*stepA
curFuncHi2 = 0.
factorA = math.pow(10.,curFitA)
curFitB = fitB[i]
if (int(errVar) == 2):
curFitB = fitB[i] + k*stepB
curFuncHi2 = 0.
for n in range(nPar1):
curArgX = math.pow(10.,log10argX[n,i])
curFuncYfit = factorA*math.pow(curArgX,curFitB)
curFuncHi2 += (np.log10(abs(curFuncYfit)) - log10funcY[n,i])**2
deltaFuncHi2 = curFuncHi2 - funcHi2[i]
if (int(errVar) == 1):
posErrFit[i] = abs(curFitA - fitA[i])
else:
posErrFit[i] = abs(curFitB - fitB[i])
func1sigma2 = funcHi2[i]/(nPar2-3)
if (int(errVar) == 1):
fitSigma = np.sqrt(sumArgX2[i]/(nPar2*sumArgX2[i]-sumArgX[i]**2)*func1sigma2)
else:
fitSigma = np.sqrt(nPar2/(nPar2*sumArgX2[i]-sumArgX[i]**2)*func1sigma2)
if (int(errType) == 2):
posErrFit[i] = fitSigma
# if (int(errVar) == 1):
# print ('i=%d: fitA = %e + %e (%e), funcHi2 = %e (for %d steps curFuncHi2 = %e)' % \
# (i,fitA[i],posErrFit[i],fitSigma,funcHi2[i],k,curFuncHi2))
# else:
# print ('i=%d: fitB = %e + %e (%e), funcHi2 = %e (for %d steps curFuncHi2 = %e)' % \
# (i,fitB[i],posErrFit[i],fitSigma,funcHi2[i],k,curFuncHi2))
for i in range(nPar2):
k = 0
deltaFuncHi2 = 0.
while (deltaFuncHi2 < 1.):
k += 1
if k > 2000:
print ('Break in errFitAB (Fit funcY: case %d); negative error) for %d' % (errVar,i))
break
curFitA = fitA[i]
if (int(errVar) == 1):
curFitA = fitA[i] - k*stepA
factorA = math.pow(10.,curFitA)
curFitB = fitB[i]
if (int(errVar) == 2):
curFitB = fitB[i] - k*stepB
curFuncHi2 = 0.
for n in range(nPar1):
curArgX = math.pow(10.,log10argX[n,i])
curFuncYfit = factorA*math.pow(curArgX,curFitB)
curFuncHi2 += (np.log10(abs(curFuncYfit)) - log10funcY[n,i])**2
deltaFuncHi2 = curFuncHi2 - funcHi2[i]
if (int(errVar) == 1):
negErrFit[i] = abs(curFitA - fitA[i])
else:
negErrFit[i] = abs(curFitB - fitB[i])
if (int(errType) == 2):
negErrFit[i] = posErrFit[i]
# if (errVar == 1):
# print ('i=%d: fitA = %e - %e, funcHi2 = %e (for %d steps curFuncHi2 = %e)' % \
# (i,fitA[i],posErrFit[i],funcHi2[i],k,curFuncHi2))
# else:
# print ('i=%d: fitB = %e - %e, funcHi2 = %e (for %d steps curFuncHi2 = %e)' % \
# (i,fitB[i],negErrFit[i],funcHi2[i],k,curFuncHi2))
return posErrFit,negErrFit
def fittedGKintegration(xMin,xMax,fitA,fitB):
#
# "Gauss-Kronrod" method of integration (GK)
#
#
# Points (psi_i) and weigths (w_i) to integrate for interval from -1 to 1;
# These data are from <NAME>. "Handbook of Mathematical Science".
# 5th Edition, CRC Press, Inc, 1978.
#
# To integrate for interval from 0 to 1 it is necessary to change points
# psi_i with points ksi_i=(1+psi_i)/2;
#
# For method with order N for function F(x):
# int_(-1)^1 = sum_1^N [w_i* F(psi_i)];
#
# In case of integration over interval from a to b:
# int_(a)^b = (b-a)/2 * sum_1^N [w_i* F(x_i)], where
# x_i = (b-a)*psi_i/2+(a+b)/2.
#
#----------------------------------------------------
#
# Data for GK:
#
#----------------------------------------------------
nPoints_GK = 16
psi_16=np.array([-0.9894009, -0.9445750, -0.8656312, -0.7554044, -0.6178762, \
-0.4580168, -0.2816036, -0.0950125, 0.0950125, 0.2816036, \
0.4580168, 0.6178762, 0.7554044, 0.8656312, 0.9445750, \
0.9894009])
w_16 =np.array([ 0.0271525, 0.0622535, 0.0951585, 0.1246290, 0.1495960, \
0.1691565, 0.1826034, 0.1894506, 0.1894506, 0.1826034, \
0.1691565, 0.1495960, 0.1246290, 0.0951585, 0.0622535, \
0.0271525])
y = np.zeros(nPoints_GK)
yIntegrated = 0.
for n in range(nPoints_GK):
xCrrnt = psi_16[n]*(xMax-xMin)/2 + (xMax+xMin)/2.
factorA = math.pow(10.,fitA)
y[n] = factorA*math.pow(xCrrnt,fitB)
yIntegrated += (xMax-xMin)*w_16[n]*y[n]*xCrrnt
return y,yIntegrated
#------------------ End of defined functions -----------------------
#
#====================================================================
sphereNe=3.
R_e=math.pow(sphereNe/n_e,1./3) # cm
print ('R_e (cm)=%e' % R_e)
ro_Larm = eVrmsTran/omega_L # cm
print ('ro_Larm (cm)=%e' % ro_Larm)
impctPrmtrMin=2.*ro_Larm
# rhoDependenceFlag = 1 # skip calculation of rho dependence if = 0!
#============ Important flags ===========================
#
# Taking into account the transfer of momenta for both particles
# (for "classical" only):
dpTransferFlag = 1 # no taking into account if = 0!
#
saveFilesFlag = 0 # no saving if = 0!
#
plotFigureFlag = 1 # plot if = 1!
#
#========================================================
nVion=50
Vion=np.zeros(nVion)
VionLong=np.zeros(nVion)
VionTrnsv=np.zeros(nVion)
VionRel=np.zeros(nVion)
vIonMin=4.e-3*eVrmsTran
vIonMax=10.*eVrmsTran
vIonMinRel=vIonMin/V0
vIonMaxRel=vIonMax/V0
print ('VionMin=%e (vIonMinRel=%e), vIonMax=%e (vIonMaxRel=%e)' % \
(vIonMin,vIonMinRel,vIonMax,vIonMaxRel))
vIonLogStep=math.log10(vIonMax/vIonMin)/(nVion-1)
R_debye=np.zeros(nVion)
R_pass=np.zeros(nVion)
R_pass_1=np.zeros(nVion) # for longT=0. --> eVrmsLong=0.
impctPrmtrMax=np.zeros(nVion)
impctPrmtrMax_1=np.zeros(nVion) # for longT=0. --> eVrmsLong=0.
for i in range(nVion):
crrntLogVionRel=math.log10(vIonMinRel)+i*vIonLogStep
VionRel[i]=math.pow(10.,crrntLogVionRel)
Vion[i]=VionRel[i]*V0
VionLong[i]=Vion[i]*np.cos(thetaVi)
VionTrnsv[i]=Vion[i]*np.sin(thetaVi)
R_debye[i]=np.sqrt(Vion[i]**2+eVrmsTran**2+eVrmsLong**2)/omega_p
R_pass[i]=np.sqrt(Vion[i]**2+eVrmsLong**2)*coolPassTime
R_pass_1[i]=np.sqrt(Vion[i]**2+0.*eVrmsLong**2)*coolPassTime
help=max(R_debye[i],R_e)
impctPrmtrMax[i]=min(help,R_pass[i])
impctPrmtrMax_1[i]=min(help,R_pass_1[i])
#-----------------------------------------------------------------
# Checking of corection of the maximal impact parameter on depence
# of preset number of minimal Larmor turns
#
larmorTurnsMin=[10,20,30,40]
impctPrmtrMaxCrrctd=np.zeros((nVion,4))
impctPrmtrMaxCrrctdRel=np.zeros((nVion,4))
for n in range (4):
for i in range(nVion):
impctPrmtrMaxCrrctd[i,n]=impctPrmtrMax[i]* \
np.sqrt(1.- (pi*larmorTurnsMin[n]*eVrmsLong/omega_L/impctPrmtrMax[i])**2)
impctPrmtrMaxCrrctdRel[i,n]=impctPrmtrMaxCrrctd[i,n]/impctPrmtrMax[i]
#
# First plotting:
#
if (plotFigureFlag == 0):
fig10 = plt.figure(10)
plt.semilogx(impctPrmtrMax,impctPrmtrMaxCrrctdRel[:,0],'-r', \
impctPrmtrMax,impctPrmtrMaxCrrctdRel[:,1],'-b', \
impctPrmtrMax,impctPrmtrMaxCrrctdRel[:,2],'-g', \
impctPrmtrMax,impctPrmtrMaxCrrctdRel[:,3],'-m',linewidth=2)
plt.grid(True)
hold=True
plt.xlabel('Maximal Impact parameter $R_{max}$, cm',color='m',fontsize=16)
plt.ylabel('$R_{max}^{Crrctd}/R_{Max}$',color='m',fontsize=16)
# plt.xlim([.9*min(impctPrmtrMax),1.1*max(impctPrmtrMax)])
plt.xlim([1.e-2,1.1*max(impctPrmtrMax)])
plt.ylim([.986,1.001])
titleHeader='$R_{max}^{Crrctd}=R_{Max} \cdot [1-(\pi\cdot N_{Larm} \cdot'
titleHeader += '\Delta_{e||}/(\omega_{Larm} \cdot R_{max})]^{1/2}$'
plt.title(titleHeader,color='m',fontsize=16)
plt.legend([('$N_{Larm}=$%2d' % larmorTurnsMin[0]), \
('$N_{Larm}=$%2d' % larmorTurnsMin[1]), \
('$N_{Larm}=$%2d' % larmorTurnsMin[2]), \
('$N_{Larm}=$%2d' % larmorTurnsMin[3])],loc='lower center',fontsize=14)
if (saveFilesFlag == 1):
fig10.savefig('picturesCMA/correctedRmax_fig10cma.png')
print ('File "picturesCMA/correctedRmax_fig10cma.png" is written')
xLimit=[.9*VionRel[0],1.1*VionRel[nVion-1]]
#
# Typs of collisions:
#
if (plotFigureFlag == 0):
fig3151=plt.figure (3151)
plt.loglog(VionRel,impctPrmtrMax,'-r', VionRel,impctPrmtrMax_1,'--r', \
[VionRel[0],VionRel[nVion-1]],[impctPrmtrMin,impctPrmtrMin],'-b',linewidth=2)
plt.grid(True)
hold=True
plt.xlabel('Relative Ion Velocity, $V_i/V_{e0}$',color='m',fontsize=14)
plt.ylabel('Impact Parameter, cm',color='m',fontsize=14)
titleHeader= \
'Types of Collisions: $V_{e0}=%4.2f\cdot10^{%2d}$ cm/s, $B=%6.1f$ Gs'
plt.title(titleHeader % (mantV0,powV0,fieldB[0]),color='m',fontsize=16)
plt.xlim(xLimit)
yLimit=[8.e-4,.6]
plt.ylim(yLimit)
plt.plot([relVeTrnsv,relVeTrnsv],yLimit,'--m',linewidth=1)
plt.text(1.6e-3,5.e-4,'$ \Delta V_{e\perp}/ V_{e0}$',color='m',fontsize=14)
plt.plot([relVeLong,relVeLong],yLimit,'--m',linewidth=1)
plt.text(4.4e-5,.0018,'$ \Delta V_{e||}/ V_{e0}$',color='m',fontsize=14)
plt.text(3.e-4,1.75e-3,'$R_{min}=2\cdot<rho_\perp>$',color='k',fontsize=16)
plt.text(7.e-4,5.e-2,'$R_{max}$',color='k',fontsize=16)
plt.text(2.85e-5,3.3e-3,'$R_{max}$ $for$ $T_{e||}=0$',color='k',fontsize=16)
plt.plot([VionRel[0],VionRel[nVion-1]],[20.*rhoCrit,20.*rhoCrit],color='k')
plt.text(1.e-4,7.e-3,'Magnetized Collisions',color='r',fontsize=20)
plt.text(1.e-4,10.e-4,'Adiabatic or Fast Collisions',color='r',fontsize=20)
plt.text(2.25e-5,.275,'Collisions are Screened',color='r',fontsize=20)
plt.text(1.6e-5,1.e-3,'$ \cong 20\cdot R_{Crit}$',color='k',fontsize=16)
if (saveFilesFlag == 1):
fig3151.savefig('picturesCMA_v7/impctPrmtr_fig3151cma.png')
print ('File "picturesCMA_v7/impctPrmtr_fig3151cma.png" is written')
#
# Picture for HESR:
#
if (plotFigureFlag == 0):
fig3151=plt.figure (3151)
plt.loglog(VionRel,impctPrmtrMax,'-r', VionRel,impctPrmtrMax_1,'--r', \
[VionRel[0],VionRel[nVion-1]],[impctPrmtrMin,impctPrmtrMin],'-b',linewidth=2)
plt.grid(True)
hold=True
plt.xlabel('Relative Ion Velocity, $V_i/V_{e0}$',color='m',fontsize=14)
plt.ylabel('Impact Parameter, cm',color='m',fontsize=14)
titleHeader= \
'HESR Types of Collisions: $V_{e0}=%3.1f\cdot10^{%2d}$cm/s, $B=%3.1f$T'
plt.title(titleHeader % (mantV0,powV0,1.e-4*fieldB[0]),color='m',fontsize=16)
plt.xlim(xLimit)
yLimit=[8.e-4,.6]
plt.ylim(yLimit)
plt.plot([relVeTrnsv,relVeTrnsv],yLimit,'--m',linewidth=1)
plt.text(4.4e-4,8.4e-4,'$ \Delta V_{e\perp}/ V_{e0}$',color='m',fontsize=14)
plt.plot([relVeLong,relVeLong],yLimit,'--m',linewidth=1)
plt.text(1.e-4,8.4e-4,'$ \Delta V_{e||}/ V_{e0}$',color='m',fontsize=14)
plt.text(3.7e-6,3.4e-3,'$R_{min}=2\cdot<rho_\perp>$',color='b',fontsize=16)
plt.text(2.8e-4,.1,'$R_{max}$',color='k',fontsize=16)
plt.text(1.e-4,1.8e-2,'$R_{max}$ $for$ $T_{e||}=0$',color='k',fontsize=16)
plt.plot([VionRel[0],VionRel[nVion-1]],[20.*rhoCrit,20.*rhoCrit],color='k')
plt.text(6.8e-5,7.e-3,'Magnetized Collisions',color='r',fontsize=20)
plt.text(6.8e-5,1.2e-3,'Weak Collisions',color='r',fontsize=20)
plt.text(2.3e-5,1.95e-3,'Adiabatic or Fast Collisions',color='r',fontsize=20)
plt.text(2.e-5,.275,'Screened Collisions',color='r',fontsize=20)
plt.text(3.58e-6,2.05e-3,'$\cong$20$\cdot$$R_{Crit}$',color='k',fontsize=16)
if (saveFilesFlag == 1):
# fig3151.savefig('picturesCMA_v7/impctPrmtr_fig3151cma.png')
# print ('File "picturesCMA_v7/impctPrmtr_fig3151cma.png" is written')
fig3151.savefig('HESRimpctPrmtr_fig3151cma.png')
print ('File "HESRimpctPrmtr_fig3151cma.png" is written')
#
# Picture for EIC:
#
if (plotFigureFlag == 0):
fig3151=plt.figure (3151)
plt.loglog(VionRel,impctPrmtrMax,'-r', VionRel,impctPrmtrMax_1,'--r', \
[VionRel[0],VionRel[nVion-1]],[impctPrmtrMin,impctPrmtrMin],'-b',linewidth=2)
plt.grid(True)
hold=True
plt.xlabel('Relative Ion Velocity, $V_i/V_{e0}$',color='m',fontsize=14)
plt.ylabel('Impact Parameter, cm',color='m',fontsize=14)
titleHeader= \
'EIC Types of Collisions: $V_{e0}=%3.1f\cdot10^{%2d}$cm/s, $B=%3.1f$T'
plt.title(titleHeader % (mantV0,powV0,1.e-4*fieldB[0]),color='m',fontsize=16)
plt.xlim(xLimit)
yLimit=[5.e-5,.3]
plt.ylim(yLimit)
plt.plot([relVeTrnsv,relVeTrnsv],yLimit,'--m',linewidth=1)
plt.text(9.e-4,4.e-5,'$ \Delta V_{e\perp}/ V_{e0}$',color='m',fontsize=14)
plt.plot([relVeLong,relVeLong],yLimit,'--m',linewidth=1)
plt.text(1.7e-4,3.e-5,'$ \Delta V_{e||}/ V_{e0}$',color='m',fontsize=14)
plt.text(6.3e-6,1.1e-4,'$R_{min}=2\cdot<rho_\perp>$',color='b',fontsize=16)
plt.text(1.e-4,2.1e-2,'$R_{max}$',color='k',fontsize=16)
plt.text(2.57e-5,5.e-3,'$R_{max}$ $for$ $T_{e||}=0$',color='k',fontsize=16)
plt.plot([VionRel[0],VionRel[nVion-1]],[20.*rhoCrit,20.*rhoCrit],color='k')
plt.text(2.3e-5,1.e-3,'Magnetized Collisions',color='r',fontsize=20)
# plt.text(6.8e-5,1.2e-3,'Weak Collisions',color='r',fontsize=20)
plt.text(1.1e-5,5.7e-5,'Weak or Adiabatic or Fast Collisions',color='r',fontsize=16)
plt.text(2.e-5,.15,'Screened Collisions',color='r',fontsize=20)
plt.text(2.5e-3,1.7e-4,'$\cong$20$\cdot$$R_{Crit}$',color='k',fontsize=16)
if (saveFilesFlag == 1):
# fig3151.savefig('picturesCMA_v7/impctPrmtr_fig3151cma.png')
# print ('File "picturesCMA_v7/impctPrmtr_fig3151cma.png" is written')
fig3151.savefig('EICimpctPrmtr_fig3151cma.png')
print ('File "EICimpctPrmtr_fig3151cma.png" is written')
# plt.show()
# sys.exit()
#
# Magnetized collisions:
#
if (plotFigureFlag == 0):
fig209=plt.figure (209)
plt.loglog(VionRel,R_debye,'-r',VionRel,R_pass,'-b', \
VionRel,R_pass_1,'--b',linewidth=2)
plt.grid(True)
hold=True
plt.plot([VionRel[0],VionRel[nVion-1]],[R_e,R_e],color='m',linewidth=2)
plt.xlabel('Relative Ion Velocity, $V_i/V_{e0}$',color='m',fontsize=16)
plt.ylabel('$R_{Debye}$, $R_{Pass}$, $R_e$, cm',color='m',fontsize=16)
# titleHeader='Magnetized Collision: $R_{Debye}$, $R_{Pass}$, $R_e$: $V_{e0}=%5.3f\cdot10^{%2d}$cm/s'
# plt.title(titleHeader % (mantV0,powV0),color='m',fontsize=16)
plt.title('Magnetized Collisions: $R_{Debye}$, $R_{Pass}$, $R_e$',color='m',fontsize=16)
plt.xlim(xLimit)
yLimit=[1.e-3,10.]
plt.ylim(yLimit)
plt.plot([relVeTrnsv,relVeTrnsv],yLimit,'--m',linewidth=1)
plt.text(1.6e-3,5.5e-4,'$ \Delta V_{e\perp}/ V_{e0}$',color='m',fontsize=14)
plt.plot([relVeLong,relVeLong],yLimit,'--m',linewidth=1)
plt.text(4.4e-5,0.001175,'$ \Delta V_{e||}/ V_{e0}$',color='m',fontsize=14)
plt.text(3.e-5,2.45e-3,'$R_e$',color='k',fontsize=16)
plt.text(3.e-5,5.e-2,'$R_{Debye}$',color='k',fontsize=16)
plt.text(3.e-5,1.8e-2,'$R_{Pass}$',color='k',fontsize=16)
plt.text(4.5e-5,4.8e-3,'$R_{Pass}$ $for$ $T_{e||}=0$',color='k',fontsize=16)
plt.text(8.3e-5,4.0,('$V_{e0}=%5.3f\cdot10^{%2d}$cm/s' % (mantV0,powV0)), \
color='m',fontsize=16)
if (saveFilesFlag == 1):
fig209.savefig('picturesCMA/rDebye_rLikeDebye_rPass_fig209cma.png')
print ('File "picturesCMA/rDebye_rLikeDebye_rPass_fig209cma.png" is written')
#
# Coulomb logarithm evaluation:
#
clmbLog = np.zeros(nVion)
for i in range(nVion):
clmbLog[i] = math.log(impctPrmtrMax[i]/impctPrmtrMin)
# clmbLog[i] = math.log(impctPrmtrMax_1[i]/impctPrmtrMin)
if (plotFigureFlag == 0):
fig3155=plt.figure (3155)
plt.semilogx(VionRel,clmbLog,'-xr',linewidth=2)
plt.xlabel('Relative Ion Velocity, $V_i/V_{e0}$',color='m',fontsize=14)
plt.ylabel('Coulomb Logarithm $L_c$',color='m',fontsize=14)
plt.title('Coulomb Logarithm: $L_c$ = $ln(R_{max}/R_{min})$',color='m',fontsize=16)
yLimit=[min(clmbLog)-.1,max(clmbLog)+.1]
plt.ylim(yLimit)
plt.plot([relVeTrnsv,relVeTrnsv],yLimit,'--m',linewidth=1)
plt.text(1.6e-3,5.,'$ \Delta V_{e\perp}/ V_{e0}$',color='m',fontsize=14)
plt.plot([relVeLong,relVeLong],yLimit,'--m',linewidth=1)
plt.text(3.4e-5,5.,'$ \Delta V_{e||}/ V_{e0}$',color='m',fontsize=14)
if (saveFilesFlag == 1):
fig3155.savefig('picturesCMA_v7/coulombLogrthm_fig3155cma.png')
print ('File "picturesCMA_v7/coulombLogrthm_fig3155cma.png" is written')
#
# matrix for electron with .5*timeStep_c:
#
matr_elec_c=guidingCenter_Matrix(.5*timeStep_c)
#
# matrix for ion with mass M_ion and .5*timeStep_c:
#
matr_ion_c=drift_Matrix(M_ion,.5*timeStep_c)
larmorTurns = 10
nImpctPrmtr = 50
rhoMin = impctPrmtrMin
rhoMax = np.zeros(nVion)
log10rhoMin = math.log10(rhoMin)
crrntImpctPrmtr = np.zeros(nImpctPrmtr)
halfLintr = np.zeros((nImpctPrmtr,nVion))
pointAlongTrack = np.zeros((nImpctPrmtr,nVion))
totalPoints = 0
for i in range(nVion):
rhoMax[i] = impctPrmtrMax[i]* \
np.sqrt(1.- (pi*larmorTurns*eVrmsLong/omega_L/impctPrmtrMax[i])**2)
rhoMax[i] = impctPrmtrMax[i]
# rhoMax[i] = impctPrmtrMax_1[i] # for checking!
# print ('rhoMax(%d) = %e' % (i,rhoMax[i]))
log10rhoMax = math.log10(rhoMax[i])
log10rhoStep = (log10rhoMax-log10rhoMin)/(nImpctPrmtr)
# print ('Vion(%d) = %e, rhoMax = %e' % (i,Vion[i],rhoMax[i]))
for n in range(nImpctPrmtr):
log10rhoCrrnt = log10rhoMin+(n+0.5)*log10rhoStep
rhoCrrnt = math.pow(10.,log10rhoCrrnt)
# print (' rhoCrrnt(%d) = %e' % (n,rhoCrrnt))
halfLintr[n,i] = np.sqrt(rhoMax[i]**2-rhoCrrnt**2) # half length of interaction; cm
timeHalfPath = halfLintr[n,i]/eVrmsLong # 0.5 time of interaction; sec
numbLarmor = int(2.*timeHalfPath/T_larm)
pointAlongTrack[n,i] = int(2.*timeHalfPath/timeStep_c)
totalPoints += pointAlongTrack[n,i]
# print (' %d: rhoCrrnt = %e, numbLarmor = %d, pointAlongTrack = %d' % \
# (n,rhoCrrnt,numbLarmor,pointAlongTrack[n,i]))
# print ('totalPoints = %d' % totalPoints)
totalPoints = int(totalPoints)
nnTotalPoints=np.arange(0,2*totalPoints-1,1)
arrayA=np.zeros(2*totalPoints)
arrayB=np.zeros(2*totalPoints)
bCrrnt_c = np.zeros(2*totalPoints)
#
# Variables for different testing:
#
b_gc = np.zeros(totalPoints)
action_gc = np.zeros(totalPoints)
C1test = np.zeros(totalPoints)
C2test = np.zeros(totalPoints)
C3test = np.zeros(totalPoints)
b_ME = np.zeros(totalPoints)
D1test = np.zeros(totalPoints)
D2test = np.zeros(totalPoints)
qTest = np.zeros(totalPoints)
action_ME = np.zeros(totalPoints)
actn_gc_ME_rel = np.zeros(totalPoints)
indxTest = 0
rhoInit = np.zeros((nImpctPrmtr,nVion))
#
# "Classical" approach:
#
deltaPx_c = np.zeros((nImpctPrmtr,nVion))
deltaPy_c = np.zeros((nImpctPrmtr,nVion))
deltaPz_c = np.zeros((nImpctPrmtr,nVion))
ionVx_c = np.zeros((nImpctPrmtr,nVion))
ionVy_c = np.zeros((nImpctPrmtr,nVion))
ionVz_c = np.zeros((nImpctPrmtr,nVion))
deltaEnrgIon_c = np.zeros((nImpctPrmtr,nVion))
#
# "Magnus Expand" approach:
#
deltaPx_m = np.zeros((nImpctPrmtr,nVion))
deltaPy_m = np.zeros((nImpctPrmtr,nVion))
deltaPz_m = np.zeros((nImpctPrmtr,nVion))
ionVx_m = np.zeros((nImpctPrmtr,nVion))
ionVy_m = np.zeros((nImpctPrmtr,nVion))
ionVz_m = np.zeros((nImpctPrmtr,nVion))
deltaEnrgIon_m = np.zeros((nImpctPrmtr,nVion))
#
# Comparison of approaches (ratio deltaEnrgIon_c/deltaEnrgIon_m):
#
deltaPx_c_m = np.zeros((nImpctPrmtr,nVion))
deltaPy_c_m = np.zeros((nImpctPrmtr,nVion))
deltaPz_c_m = np.zeros((nImpctPrmtr,nVion))
dEion_c_m = np.zeros((nImpctPrmtr,nVion))
#
# Factor to calculate transferred energy to ion
# (the friction force is defined by this transfered energy):
#
deFactor = 0.5/M_ion # 1/g
frctnForce_cSM = np.zeros(nVion) # integration, using Simpson method
frctnForce_mSM = np.zeros(nVion) # integration, using Simpson method
numberWrongSign_c=0
numberWrongSign_m=0
posSignDeltaEnrgIon_c=0
negSignDeltaEnrgIon_c=0
posSignDeltaEnrgIon_m=0
negSignDeltaEnrgIon_m=0
timeRun = np.zeros(nVion)
totalTimeRun = 0.
indx = 0
# ----------------- Main simulation ---------------
#
for i in range(nVion):
# Taking into account the corection of the maximal impact parameter
# on depence of preset number of minimal Larmor turns:
rhoMax[i] = impctPrmtrMax[i]* \
np.sqrt(1.- (pi*larmorTurns*eVrmsLong/omega_L/impctPrmtrMax[i])**2)
# Without taking into account the corection of the maximal impact parameter
# on depence of preset number of minimal Larmor turns:
rhoMax[i] = impctPrmtrMax[i]
# rhoMax[i] = impctPrmtrMax_1[i] # for checking!
log10rhoMax = math.log10(rhoMax[i])
log10rhoStep = (log10rhoMax-log10rhoMin)/(nImpctPrmtr)
# print ('Vion(%d) = %e, rhoMax = %e' % (i,Vion[i],rhoMax[i]))
timeStart=os.times()
for n in range(nImpctPrmtr):
log10rhoCrrnt = log10rhoMin+(n+0.5)*log10rhoStep
rhoCrrnt = math.pow(10.,log10rhoCrrnt)
# rhoInit[i*nImpctPrmtr+n] = rhoCrrnt
rhoInit[n,i] = rhoCrrnt
halfLintr[n,i] = np.sqrt(rhoMax[i]**2-rhoCrrnt**2) # half length of interaction; cm
z_ionCrrnt_c = np.zeros(6) # Zeroing out of vector for ion ("GC"-approach)
z_elecCrrnt_c = np.zeros(6) # Zeroing out of vector for electron ("GC"-approach)
z_ionCrrnt_m = np.zeros(6) # Zeroing out of vector for ion ("ME"-approach)
z_elecCrrnt_m = np.zeros(6) # Zeroing out of vector for electron ("ME"-approach)
# Zeroing out of "guiding center" vector for electron (both approaches):
z_elecCrrnt_gc_c = np.zeros(6)
z_elecCrrnt_gc_m = np.zeros(6)
# Current values of transfered momemta
# (second index numerates "Guiding Center", (if 0) and
# "Magnus Expantion" (if 1) approaches:
dpCrrnt = np.zeros((3,2))
# Intermediate arrays:
dpIon_c = np.zeros(3)
dpIon_m = np.zeros(3)
dpElec_c = np.zeros(3)
dpElec_m = np.zeros(3)
# Current initial vector for electron:
z_elecCrrnt_c[Ix] = rhoCrrnt # x, cm
z_elecCrrnt_c[Iz] = -halfLintr[n,i] # z, cm
z_elecCrrnt_c[Ipy] = m_elec*eVrmsTran # py, g*cm/sec
z_elecCrrnt_c[Ipz] = m_elec*eVrmsLong # pz, g*cm/sec
z_elecCrrnt_m[Ix] = rhoCrrnt # x, cm
z_elecCrrnt_m[Iz] = -halfLintr[n,i] # z, cm
z_elecCrrnt_m[Ipy] = m_elec*eVrmsTran # py, g*cm/sec
z_elecCrrnt_m[Ipz] = m_elec*eVrmsLong # pz, g*cm/sec
# Current initial vector for ion velocity for both approaches:
ionVx_c[n,i] = VionTrnsv[i]*np.cos(phiVi)
ionVy_c[n,i] = VionTrnsv[i]*np.sin(phiVi)
ionVz_c[n,i] = VionLong[i]
ionVx_m[n,i] = VionTrnsv[i]*np.cos(phiVi)
ionVy_m[n,i] = VionTrnsv[i]*np.sin(phiVi)
ionVz_m[n,i] = VionLong[i]
# transfer to system of guiding center:
z_elecCrrnt_gc_c=toGuidingCenter(z_elecCrrnt_c)
z_elecCrrnt_gc_m=toGuidingCenter(z_elecCrrnt_m)
#
# Main loop along the each track:
#
for k in range(int(pointAlongTrack[n,i])):
#
# Dragging both particles through first half of the step of the track:
#
z_elecCrrnt_gc_c = np.dot(matr_elec_c,z_elecCrrnt_gc_c) # electron
z_elecCrrnt_gc_m = np.dot(matr_elec_c,z_elecCrrnt_gc_m) # electron
z_ionCrrnt_c = np.dot(matr_ion_c,z_ionCrrnt_c) # ion
z_ionCrrnt_m = np.dot(matr_ion_c,z_ionCrrnt_m) # ion
# transfer from system of guiding center:
z_elecCrrnt_c=fromGuidingCenter(z_elecCrrnt_gc_c)
z_elecCrrnt_m=fromGuidingCenter(z_elecCrrnt_gc_m)
# Current distance between ion and electron; cm:
bCrrnt_c[indx]=np.sqrt((z_ionCrrnt_c[0]-z_elecCrrnt_c[0])**2+ \
(z_ionCrrnt_c[2]-z_elecCrrnt_c[2])**2+ \
(z_ionCrrnt_c[4]-z_elecCrrnt_c[4])**2)
# Current values of parameters A,B:
arrayA[indx] = math.log10(ro_Larm/bCrrnt_c[indx])
arrayB[indx] = math.log10((q_elec**2/bCrrnt_c[indx])/kinEnergy)
indx += 1
#
# Dragging both particles through interaction during this step of track
# (for both approaches):
#
# "Guiding Center":
dpIon_c,dpElec_c,action,b_gc_c = \
guidingCenterCollision(z_elecCrrnt_gc_c,z_ionCrrnt_c,timeStep_c)
# "Magnus Expantion":
dpIon_m,dpElec_m,actionME,dy_gc_m,C1,C2,C3,b,D1,D2,q = \
MagnusExpansionCollision(z_elecCrrnt_gc_m,z_ionCrrnt_m,timeStep_c)
# Save data for testing:
b_gc[indxTest] = b_gc_c # "Guiding Center" approach
action_gc[indxTest] = action # -"- -"- -"- -"- -"- -"-
C1test[indxTest] = C1 # "Magnus expansion" approach
C2test[indxTest] = abs(C2) # -"- -"- -"- -"- -"- -"-
C3test[indxTest] = C3 # -"- -"- -"- -"- -"- -"-
b_ME[indxTest] = b # -"- -"- -"- -"- -"- -"-
D1test[indxTest] = D1 # -"- -"- -"- -"- -"- -"-
D2test[indxTest] = D2 # -"- -"- -"- -"- -"- -"-
qTest[indxTest] = q #-"- -"- -"- -"- -"- -"-
action_ME[indxTest] = actionME #-"- -"- -"- -"- -"- -"-
indxTest += 1
indxTestMax = indxTest
#
# Taking into account transfer of momentum for both particles:
#
if (dpTransferFlag == 1):
for ic in range(3):
z_ionCrrnt_c[2*ic+1] += dpIon_c[ic]
z_elecCrrnt_c[2*ic+1] += dpElec_c[ic]
z_ionCrrnt_m[2*ic+1] += dpIon_m[ic]
z_elecCrrnt_m[2*ic+1] += dpElec_m[ic]
# transfer to system of guiding center:
z_elecCrrnt_gc_c=toGuidingCenter(z_elecCrrnt_c)
z_elecCrrnt_gc_m=toGuidingCenter(z_elecCrrnt_m)
# Accumulation of the transfered momenta to ion along the track for both approaches:
for ic in range(3):
# if i == 0:
# print ('dpIon_c[%2d] = %20.14e, dpIon_m[%2d] = %20.14e' % \
# (ic,dpIon_c[ic],ic,dpIon_m[ic]))
dpCrrnt[ic,0] += dpIon_c[ic] # "Guiding Center", g*cm/sec
dpCrrnt[ic,1] += dpIon_m[ic] # "Magnus Expansion", g*cm/sec
#
# Ion's elocity change along the track - both approaches:
#
ionVx_c[n,i] += dpCrrnt[0,0]/M_ion # cm/sec
ionVy_c[n,i] += dpCrrnt[1,0]/M_ion # cm/sec
ionVz_c[n,i] += dpCrrnt[2,0]/M_ion # cm/sec
ionVx_m[n,i] += dpCrrnt[0,1]/M_ion # cm/sec
ionVy_m[n,i] += dpCrrnt[1,1]/M_ion # cm/sec
ionVz_m[n,i] += dpCrrnt[2,1]/M_ion # cm/sec
#
# Dragging both particles through second half of the step of the track:
#
z_elecCrrnt_gc_c = np.dot(matr_elec_c,z_elecCrrnt_gc_c) # electron
z_ionCrrnt_c = np.dot(matr_ion_c,z_ionCrrnt_c) # ion
z_elecCrrnt_gc_m = np.dot(matr_elec_c,z_elecCrrnt_gc_m) # electron
z_ionCrrnt_m = np.dot(matr_ion_c,z_ionCrrnt_m) # ion
# transfer from system of guiding center:
z_elecCrrnt_c=fromGuidingCenter(z_elecCrrnt_gc_c)
z_elecCrrnt_m=fromGuidingCenter(z_elecCrrnt_gc_m)
# Current distance between ion and electron; cm:
bCrrnt_c[indx]=np.sqrt((z_ionCrrnt_c[0]-z_elecCrrnt_c[0])**2+ \
(z_ionCrrnt_c[2]-z_elecCrrnt_c[2])**2+ \
(z_ionCrrnt_c[4]-z_elecCrrnt_c[4])**2)
# Current values of parameters A,B:
arrayA[indx] = math.log10(ro_Larm/bCrrnt_c[indx])
arrayB[indx] = math.log10((q_elec**2/bCrrnt_c[indx])/kinEnergy)
indx += 1
#
# Transferred momenta along the track - "Guiding Center" approach:
#
deltaPx_c[n,i] = dpCrrnt[0,0] # dpx, g*cm/sec
# if deltaPx_c[n,i] <= 0.:
# print ('deltaPx_c[%2d,%2d] = %e, dpCrrnt[%2d,%2d] = %e' % \
# (n,i,deltaPx_c[n,i],n,i,dpCrrnt[0,0]))
deltaPy_c[n,i] = dpCrrnt[1,0] # dpy, g*cm/sec
# if deltaPy_c[n,i] <= 0.:
# print ('deltaPy_c[%2d,%2d] = %e' % (n,i,deltaPy_c[n,i]))
deltaPz_c[n,i] = dpCrrnt[2,0] # dpz, g*cm/sec
# if deltaPz_c[n,i] <= 0.:
# print ('deltaPz_c[%2d,%2d] = %e' % (n,i,deltaPz_c[n,i]))
# Incorrect value:
# deltaEnrgIon_c[n,i] = (dpCrrnt[0,0]**2+dpCrrnt[1,0]**2+dpCrrnt[2,0]**2)* \
# deFactor/eVtoErg # eV
# Correct value:
crrntDeltaEnrg = (dpCrrnt[0,0]*ionVx_c[n,i]+ \
dpCrrnt[1,0]*ionVy_c[n,i]+ \
dpCrrnt[2,0]*ionVz_c[n,i])*deFactor/eVtoErg # eV
absDeltaEnrgIon_c = abs(crrntDeltaEnrg)
if (crrntDeltaEnrg != 0.):
signDeltaEnrgIon_c = crrntDeltaEnrg/abs(crrntDeltaEnrg)
deltaEnrgIon_c[n,i] = crrntDeltaEnrg
if (deltaEnrgIon_c[n,i] > 0.):
posSignDeltaEnrgIon_c += 1
else:
negSignDeltaEnrgIon_c += 1
#
# Transferred momenta along the track - "Magnus expansion" approach:
#
deltaPx_m[n,i] = dpCrrnt[0,1] # dpx, g*cm/sec
# if deltaPx_m[n,i] <= 0.:
# print ('deltaPx_m[%2d,%2d] = %e' % (n,i,deltaPx_m[n,i]))
deltaPy_m[n,i] = dpCrrnt[1,1]
# if deltaPy_m[n,i] <= 0.:
# print ('deltaPy_m[%2d,%2d] = %e' % (n,i,deltaPy_m[n,i]))
deltaPz_m[n,i] = dpCrrnt[2,1]
# if deltaPz_m[n,i] <= 0.:
# print ('deltaPz_m[%2d,%2d] = %e' % (n,i,deltaPz_m[n,i]))
# Incorrect value:
# deltaEnrgIon_m[n,i] = (dpCrrnt[0,1]**2+dpCrrnt[1,1]**2+dpCrrnt[2,1]**2)* \
# deFactor/eVtoErg # eV
# Correct value absolute value):
crrntDeltaEnrg = (dpCrrnt[0,1]*ionVx_m[n,i]+ \
dpCrrnt[1,1]*ionVy_m[n,i]+ \
dpCrrnt[2,1]*ionVz_m[n,i])*deFactor/eVtoErg # eV
absDeltaEnrgIon_m = abs(crrntDeltaEnrg)
if (crrntDeltaEnrg != 0.):
signDeltaEnrgIon_m = crrntDeltaEnrg/abs(crrntDeltaEnrg)
deltaEnrgIon_m[n,i] = crrntDeltaEnrg
if (deltaEnrgIon_m[n,i] > 0.):
posSignDeltaEnrgIon_m += 1
else:
negSignDeltaEnrgIon_m += 1
#
# Comparison of the approaches (%):
#
if (deltaPx_m[n,i] != 0.):
deltaPx_c_m[n,i] = 100.*(deltaPx_c[n,i]/deltaPx_m[n,i]-1.)
else:
print ('Bad value (=0.) of deltaPx_m[%d,%d] = ' % (n,i))
if (deltaPy_m[n,i] != 0.):
deltaPy_c_m[n,i] = 100.*(deltaPy_c[n,i]/deltaPy_m[n,i]-1.)
else:
print ('Bad value (=0.) of deltaPy_m[%d,%d] = ' % (n,i))
if (deltaPz_m[n,i] != 0.):
deltaPz_c_m[n,i] = 100.*(deltaPz_c[n,i]/deltaPz_m[n,i]-1.)
else:
print ('Bad value (=0.) of deltaPz_m[%d,%d] = ' % (n,i))
if (deltaEnrgIon_m[n,i] != 0.):
dEion_c_m[n,i] = 100.*(deltaEnrgIon_c[n,i]/deltaEnrgIon_m[n,i]-1.)
else:
print ('Bad value (=0.) of deltaEnrgIon_m[%d,%d] = ' % (n,i))
#
# Integration using Simpson method:
#
if (n > 0):
frctnForce_cSM[i] += pi*n_e*100.*(deltaEnrgIon_c[n,i]+deltaEnrgIon_c[n-1,i])* \
.5*(rhoInit[n,i]+rhoInit[n-1,i])* \
(rhoInit[n,i]-rhoInit[n-1,i]) # eV/m
frctnForce_mSM[i] += pi*n_e*100.*(deltaEnrgIon_m[n,i]+deltaEnrgIon_m[n-1,i])* \
.5*(rhoInit[n,i]+rhoInit[n-1,i])* \
(rhoInit[n,i]-rhoInit[n-1,i]) # eV/m
timeEnd = os.times()
timeRun[i] = float(timeEnd[0])-float(timeStart[0]) # CPU time , sec
totalTimeRun += timeRun[i]
print ('timeRun(%2d) = %6.3f seconds' % (i,timeRun[i]))
print ('Total time (icluding Simpson integration) = %6.3f seconds' % totalTimeRun)
print ('deltaEnrgIon_c: nPos=%d, nNeg=%d; deltaEnrgIon_m: nPos=%d, nNeg=%d' % \
(posSignDeltaEnrgIon_c,negSignDeltaEnrgIon_c, \
posSignDeltaEnrgIon_m,negSignDeltaEnrgIon_m))
#
# Output for checking:
#
# print \
# ('n Px_c Px_m Py_c Py_m Pz_c Pz_m Pz_c_m')
# for i in range(10,11,1):
# for n in range(nImpctPrmtr):
# print ('%d: %e %e %e %e %e %e %e' % \
# (n,deltaPx_c[n,i],deltaPx_m[n,i],deltaPy_c[n,i], \
# deltaPy_m[n,i],deltaPz_c[n,i],deltaPz_m[n,i],deltaPz_c_m[n,i]))
# print ('n dEion_c dEion_m')
# for i in range(10,11,1):
# for n in range(nImpctPrmtr):
# print ('%d: %e %e ' % (n,deltaEnrgIon_c[n,i],deltaEnrgIon_m[n,i]))
# print ('indxTestMax = %d' % indxTestMax)
#
# Plotting of the tests:
#
nn=np.arange(0,indxTestMax-1,1)
#
# C1:
#
if (plotFigureFlag == 0):
fig2020=plt.figure (2020)
plt.plot(nn,C1test[0:indxTestMax-1],'.r')
plt.xlabel('Points of Tracks',color='m',fontsize=16)
plt.ylabel('$C1$, $cm^2$',color='m',fontsize=16)
plt.title('$C1=[x_{gc}^2+y_{gc}^2+z_e^2+2J/(m_e \cdot \Omega_e)]^{0.5}$', \
color='m',fontsize=16)
plt.xlim([-5000,indxTestMax+5000])
plt.grid(True)
if (saveFilesFlag == 1):
fig2020.savefig('picturesCMA_v7/magnusExpansion_C1_fig2020cma.png')
print ('File "picturesCMA_v7/magnusExpansion_C1_fig2020cma.png" is written')
#
# C2:
#
if (plotFigureFlag == 0):
fig2030=plt.figure (2030)
plt.plot(nn,1.e-5*C2test[0:indxTestMax-1],'.r')
plt.xlabel('Points of Tracks',color='m',fontsize=16)
plt.ylabel('$C2$, $\cdot 10^5$ $cm^2/s$',color='m',fontsize=16)
plt.title('$C2=2\cdot[V_{ix}\cdot(x_i-x_{gc})+V_{iy}\cdot(y_i-y_{gc})+(V_{iz}-V_{ez})\cdot(z_i-z_e)]$', \
color='m',fontsize=14)
plt.xlim([-5000,indxTestMax+5000])
plt.grid(True)
if (saveFilesFlag == 1):
fig2030.savefig('picturesCMA_v7/magnusExpansion_C2_fig2030cma.png')
print ('File "picturesCMA_v7/magnusExpansion_C2_fig2030cma.png" is written')
#
# C3:
#
if (plotFigureFlag == 0):
fig2040=plt.figure (2040)
plt.plot(nn,1e-11*C3test[0:indxTestMax-1],'.r')
plt.xlabel('Points of Tracks',color='m',fontsize=16)
plt.ylabel('$C3$, $\cdot 10^{11}$ $cm^2/s^2$',color='m',fontsize=16)
plt.title('$C3=V_{ix}^2+V_{iy}^2+(V_{iz}-V_{ez})^2$',color='m',fontsize=16)
plt.xlim([-5000,indxTestMax+5000])
plt.grid(True)
if (saveFilesFlag == 1):
fig2040.savefig('picturesCMA_v7/magnusExpansion_C3_fig2040cma.png')
print ('File "picturesCMA_v7/magnusExpansion_C3_fig2040cma.png" is written')
#
# D1:
#
if (plotFigureFlag == 0):
fig2025=plt.figure (2025)
plt.plot(nn,1.e-5*D1test[0:indxTestMax-1],'.r')
plt.xlabel('Points of Tracks',color='m',fontsize=16)
plt.ylabel('$10^{-5}\cdot D1$, $cm/s$',color='m',fontsize=16)
plt.title('$D1=(2C_3\cdot \Delta t+C_2)/b_{ME}$ $-$ $C_2/C_1^{0.5}$',color='m',fontsize=16)
plt.xlim([-5000,indxTestMax+5000])
plt.grid(True)
if (saveFilesFlag == 1):
fig2025.savefig('picturesCMA_v7/magnusExpansion_D1_fig2025cma.png')
print ('File "picturesCMA_v7/magnusExpansion_D1_fig2025cma.png" is written')
#
# D2:
#
if (plotFigureFlag == 0):
fig2035=plt.figure (2035)
plt.plot(nn,1.e4*D2test[0:indxTestMax-1],'.r')
plt.xlabel('Points of Tracks',color='m',fontsize=16)
plt.ylabel('$10^4\cdot D2$, $cm$',color='m',fontsize=16)
plt.title('$D2=(2C_1+C_2\cdot \Delta t)/b_{ME}$ $-$ $2C_1^{0.5}$',color='m',fontsize=16)
plt.xlim([-5000,indxTestMax+5000])
plt.grid(True)
if (saveFilesFlag == 1):
fig2035.savefig('picturesCMA_v7/magnusExpansion_D2_fig2035cma.png')
print ('File "picturesCMA_v7/magnusExpansion_D2_fig2035cma.png" is written')
#
# Distance b_ME between particles for "ME" approach:
#
if (plotFigureFlag == 0):
fig2050=plt.figure (2050)
plt.plot(nn,b_ME[0:indxTestMax-1],'.r')
plt.xlabel('Points of Tracks',color='m',fontsize=16)
plt.ylabel('$b_{ME}$, $cm$',color='m',fontsize=16)
plt.title('Distance $b_{ME}$ between Particles for "ME" Approach', color='m',fontsize=16)
plt.text(3500,.4,'$b_{ME}=[C1+C2\cdot \Delta t +C3 \cdot \Delta t^2]^{0.5}$', \
color='m',fontsize=16)
plt.text(33000,.36,('$(\Delta t=%8.2e$ $s)$' % timeStep_c),color='m',fontsize=16)
plt.xlim([-5000,indxTestMax+5000])
plt.grid(True)
if (saveFilesFlag == 1):
fig2050.savefig('picturesCMA_v7/particleDistance_me_fig2050cma.png')
print ('File "picturesCMA_v7/particleDistance_me_fig2050cma.png" is written')
#
# Distance b_gc between particles for "GC" approach:
#
if (plotFigureFlag == 0):
fig2055=plt.figure (2055)
plt.plot(nn,b_gc[0:indxTestMax-1],'.r')
plt.xlabel('Points of Tracks',color='m',fontsize=16)
plt.ylabel('$b_{GC}$, $cm$',color='m',fontsize=16)
plt.title('Distance $b_{GC}$ between Particles for "GC" Approach', color='m',fontsize=16)
plt.text(0,.4,'$b_{GC}=[(x_i-x_{gc})^2+(y_i-y_{gc})^2+$',color='m',fontsize=16)
plt.text(55500,.36,'$+(z_i-z_e)^2+2J/(m_e \cdot \Omega_e)]^{0.5}$', \
color='m',fontsize=16)
plt.xlim([-5000,indxTestMax+5000])
plt.grid(True)
if (saveFilesFlag == 1):
fig2055.savefig('picturesCMA/particleDistance_gc_fig2055cma.png')
print ('File "picturesCMA/particleDistance_gc_fig2055cma.png" is written')
#
# Comparison of bCrrnt_c from "Guiding Center" with bTest from
# "Magnus expansion" approaches:
#
bCrrnt_cTest = np.zeros(indxTestMax)
bCrrnt_cTestRel = np.zeros(indxTestMax)
b_gc_ME_rel = np.zeros(indxTestMax)
for k in range(indxTestMax):
bCrrnt_cTest[k] = .5*(bCrrnt_c[2*k]+bCrrnt_c[2*k+1])
# bCrrnt_cTestRel[k] = bCrrnt_cTest[k]/b_ME[k]
b_gc_ME_rel[k] = b_gc[k]/b_ME[k]
actn_gc_ME_rel[k] = 1.e7*(action_gc[k]/action_ME[k]-1.)
if (plotFigureFlag == 0):
fig2060=plt.figure (2060)
# plt.semilogy(nn,bCrrnt_cTest[0:indxTestMax-1],'.r')
plt.plot(nn,bCrrnt_cTest[0:indxTestMax-1],'.r')
plt.xlabel('Points of Tracks',color='m',fontsize=16)
plt.ylabel('Test $b_{crrntTest}$, $cm$',color='m',fontsize=16)
plt.title('Test $b_{crrntTest} = .5 \cdot [b_{crrnt}(k)+b_{crrnt}(k+1)]$',color='m', \
fontsize=16)
plt.xlim([-5000,indxTestMax+5000])
# plt.ylim([.9*min(bCrrnt_cTest),1.1*max(bCrrnt_cTest)])
plt.grid(True)
#
# Ratio b_gc/b_ME (absolute value):
#
if (plotFigureFlag == 0):
fig2070=plt.figure (2070)
# plt.semilogy(nn,b_gc_ME_rel[0:indxTestMax-1],'.r')
plt.plot(nn,b_gc_ME_rel[0:indxTestMax-1],'.r')
plt.xlabel('Points of Tracks',color='m',fontsize=16)
plt.ylabel('$b_{GC}/b_{ME}$',color='m',fontsize=16)
plt.title('Comparison of Distances $b_{GC}$ and $b_{ME}$ between Particles',color='m',fontsize=16)
plt.xlim([-5000,indxTestMax+5000])
# plt.ylim([.9*min(b_gc_ME_rel),1.1*max(b_gc_ME_rel)])
plt.grid(True)
if (saveFilesFlag == 1):
fig2070.savefig('picturesCMA_v7/particleDistanceComprsn_gc_me_fig2070cma.png')
print ('File "picturesCMA_v7/particleDistanceComprsn_gc_me_fig2070cma.png" is written')
#
# Ratio b_gc/b_ME (relative value):
#
if (plotFigureFlag == 0):
fig2080=plt.figure (2080)
# plt.semilogy(nn,actn_gc_ME_rel[0:indxTestMax-1],'.r')
plt.plot(nn,actn_gc_ME_rel[0:indxTestMax-1],'.r')
plt.xlabel('Points of Tracks',color='m',fontsize=16)
plt.ylabel('$10^7\cdot (J_{GC}/J_{ME}$ $-$ $1)$',color='m',fontsize=16)
plt.title('Comparison of Actions $J_{GC}$ and $J_{ME}$',color='m',fontsize=16)
plt.xlim([-5000,indxTestMax+5000])
plt.ylim([.99*min(actn_gc_ME_rel),1.01*max(actn_gc_ME_rel)])
plt.grid(True)
if (saveFilesFlag == 1):
fig2080.savefig('picturesCMA_v7/actionComprsn_gc_me_fig2080cma.png')
print ('File "picturesCMA_v7/actionComprsn_gc_me_fig2080cma.png" is written')
#
# Total length of interaction (1/2 of value):
#
nn=np.arange(0,nVion*nImpctPrmtr,1)
halfLintrTest = np.zeros(nVion*nImpctPrmtr)
for i in range(nVion):
for n in range(nImpctPrmtr):
halfLintrTest[nVion*i+n] = halfLintr[i,n]
if (plotFigureFlag == 0):
fig2090=plt.figure (2090)
plt.semilogy(nn,halfLintrTest,'.r')
plt.xlabel('Points of Tracks',color='m',fontsize=16)
plt.ylabel('$0.5 \cdot L_{Intrctn}$, $cm$',color='m',fontsize=16)
plt.title('Total Length of Interaction: $L_{Intrctn}=2 \cdot [R_{max}^2-rho_{Init}^2)]^{0.5}$', \
color='m',fontsize=16)
plt.xlim([-100,nVion*nImpctPrmtr+100])
plt.ylim([.9*min(halfLintrTest),1.1*max(halfLintrTest)])
plt.grid(True)
if (saveFilesFlag == 1):
fig2090.savefig('picturesCMA/totalLengthIntrsctn_fig2090cma.png')
print ('File "picturesCMA/totalLengthIntrsctn_fig2090cma.png" is written')
#===================================================
#
# There is fitting for correct values of deltaEnrgIon_m
#
#===================================================
#
# Fitting for figures with deltaEnrgIon_m (my own Least Squares Method - LSM;
# Python has own routine for LSM - see site
# http://scipy-cookbook.readthedocs.io/items/FittingData.html):
#
#
# Fittied function:
#
# |deltaEnrgIon| = 10^fitA * rho^fitB,
# so that
#
# log10(|deltaEnrgIon|) = fitB*log10(rho) + fitA
#
# So, the dimension of expression (10^fitA * rho^fitB) is the same
# as deltaEnrgIon, i.e. eV
#
timeStart = os.times()
fitA_dEion = np.zeros(nVion) # dimensionless
fitB_dEion = np.zeros(nVion) # dimensionless
rhoInitFit_dEion = np.zeros((nImpctPrmtr,nVion))
deltaEnrgIon_m_fit = np.zeros((nImpctPrmtr,nVion))
funcHi2_dEion = np.zeros(nVion)
fitA_dEion,fitB_dEion,funcHi2_dEion,rhoInitFit_dEion, deltaEnrgIon_m_fit = \
fitting(nImpctPrmtr,nVion,rhoInit,deltaEnrgIon_m)
dPosA_dEion = np.zeros(nVion)
dNegA_dEion = np.zeros(nVion)
dPosA_dEion,dNegA_dEion = \
errFitAB(nImpctPrmtr,nVion,rhoInit,deltaEnrgIon_m_fit,fitA_dEion,fitB_dEion,funcHi2_dEion,1,2)
dPosB_dEion = np.zeros(nVion)
dNegB_dEion = np.zeros(nVion)
dPosB_dEion,dNegB_dEion = \
errFitAB(nImpctPrmtr,nVion,rhoInit,deltaEnrgIon_m_fit,fitA_dEion,fitB_dEion,funcHi2_dEion,2,2)
# print ('Fitting for deltaEion:')
# for i in range(nVion):
# print ('i=%2d: fitA_dEion = %e (+%e,-%e), fitB_dEion = %e (+%e,-%e), hi2_1 = %e' % \
# (i,fitA_dEion[i],dPosA_dEion[i],dNegA_dEion[i], \
# fitB_dEion[i],dPosB_dEion[i],dNegB_dEion[i],funcHi2_dEion[i]))
#
# Analytical Integration of the fitted dependence 10**A*rho**B.
#
# For this dependece on rho:
#
# Friction force = 10**A*n_e*integral_rhoMin^rhoMax (rho**B*rho)*dRho =
# = 10**A*n_e/(B+2)*[rhoMax**(B+2)-rhoMax**(B+2)] (dimension=eV/cm):
#
frctnForce_AI = np.zeros(nVion)
for i in range(nVion):
factorA1 = math.pow(10.,fitA_dEion[i])
factorB1 = 2.+fitB_dEion[i]
frctnForce_AI[i] = 2.*pi*n_e*100.*factorA1/factorB1* \
(math.pow(impctPrmtrMax[i],factorB1)- \
math.pow(impctPrmtrMin,factorB1)) # eV/m
timeEnd = os.times()
timeFitting = float(timeEnd[0])-float(timeStart[0]) # CPU time , sec
print ('Time of integration = %6.3f seconds' % timeFitting)
#
# Dependences of transferred energy to ion on ion velocity for
# different initial impact parameters:
#
rhoSlctd = [.004,.02,.06,.1]
nRhoSlctd = len(rhoSlctd)
deltaEnrgIon_dpnd_Vi = np.zeros((nRhoSlctd,nVion))
npStart = np.zeros((nRhoSlctd,), dtype=int)
for k in range(nRhoSlctd):
slctdFlag = 0
for i in range(nVion):
if (slctdFlag == 0):
for n in range(nImpctPrmtr):
if (rhoInit[n,i] >= rhoSlctd[k]):
npStart[k] = i
slctdFlag = 1
break
for k in range(nRhoSlctd):
for i in range(npStart[k],nVion,1):
factorA = math.pow(10.,fitA_dEion[i])
deltaEnrgIon_dpnd_Vi[k,i] = factorA*math.pow(rhoSlctd[k],fitB_dEion[i])
# print ('deltaEnrgIon_dpnd_Vi[%d,%d] = %e' %(k,i,deltaEnrgIon_dpnd_Vi[k,i]))
#===================================================
#
# There is fitting of deltaPz_m (these values > 0 always) !!!
#
#===================================================
#
# Fitting for figures with deltaPz_m (my own Least Squares Method - LSM;
# Python has own routine for LSM - see site
# http://scipy-cookbook.readthedocs.io/items/FittingData.html):
#
#
# Fittied function:
#
# deltaPz_m = 10^fitA_pz * rho^fitB_pz,
# so that
#
# log10(deltaPz_m) = fitB_pz*log10(rho) + fitA_pz
#
# So, the dimension of expression (10^fitA_pz * rho^fitB_pz) is the same
# as deltaPz_m, i.e. eV
#
fitA_pz = np.zeros(nVion) # dimensionless
fitB_pz = np.zeros(nVion) # dimensionless
rhoInitFit_pz = np.zeros((nImpctPrmtr,nVion))
deltaPz_m_fit = np.zeros((nImpctPrmtr,nVion))
fitA_pz,fitB_pz,funcHi2_pz,rhoInitFit_pz, deltaPz_m_fit = \
fitting(nImpctPrmtr,nVion,rhoInit,deltaPz_m)
dPosA_pz = np.zeros(nVion)
dNegA_pz = np.zeros(nVion)
dPosA_pz,dNegA_pz = \
errFitAB(nImpctPrmtr,nVion,rhoInit,deltaPz_m_fit,fitA_pz,fitB_pz,funcHi2_pz,1,2)
dPosB_pz = np.zeros(nVion)
dNegB_pz = np.zeros(nVion)
dPosB_pz,dNegB_pz = \
errFitAB(nImpctPrmtr,nVion,rhoInit,deltaPz_m_fit,fitA_pz,fitB_pz,funcHi2_pz,2,2)
# print ('Fitting fordeltaPz_m:')
# for i in range(nVion):
# print ('i=%2d: fitA_pz = %e (+%e,-%e), fitB_pz = %e (+%e,-%e), hi2_1 = %e' % \
# (i,fitA_pz[i],dPosA_pz[i],dNegA_pz[i], \
# fitB_pz[i],dPosB_pz[i],dNegB_pz[i],funcHi2_pz[i]))
# print ('<fitA_pz> = %e +- %e' % (mean(fitA_pz),mean(dNegA_pz)))
# print ('<fitB_pz> = %e +- %e' % (mean(fitB_pz),mean(dNegB_pz)))
#===================================================
#
# There is fitting of deltaPx_m (these values > 0 always) !!!
#
#===================================================
#
rhoInitFit_px = np.zeros((nImpctPrmtr,nVion))
deltaPx_m_fit = np.zeros((nImpctPrmtr,nVion))
funcHi2__px = np.zeros(nVion)
fitA_px = np.zeros(nVion) # dimensionless
fitB_px = np.zeros(nVion) # dimensionless
fitA_px,fitB_px,funcHi2_px,rhoInitFit_px, deltaPx_m_fit = \
fitting(nImpctPrmtr,nVion,rhoInit,deltaPx_m)
dPosA_px = np.zeros(nVion)
dNegA_px = np.zeros(nVion)
dPosA_px,dNegA_px = \
errFitAB(nImpctPrmtr,nVion,rhoInit,deltaPx_m_fit,fitA_px,fitB_px,funcHi2_px,1,2)
dPosB_px = np.zeros(nVion)
dNegB_px = np.zeros(nVion)
dPosB_px,dNegB_px = \
errFitAB(nImpctPrmtr,nVion,rhoInit,deltaPx_m_fit,fitA_px,fitB_px,funcHi2_px,2,2)
# print ('Fitting for deltaPx_m:')
# for i in range(nVion):
# print ('i=%2d: fitA_px = %e (+%e,-%e), fitB_px = %e (+%e,-%e), hi2_1 = %e' % \
# (i,fitA_px[i],dPosA_px[i],dNegA_px[i], \
# fitB_px[i],dPosB_px[i],dNegB_px[i],funcHi2_px[i]))
xLimit = [1.015*np.log10(VionRel[0]),.95*np.log10(VionRel[nVion-1])]
yLimMin = 0.
yLimMax = 10.*min(fitA_pz)
if (min(fitA_pz) > 0):
yLimMin = 10.*max(fitA_pz)
yLimMax = 0.
for i in range(nVion):
if (fitA_pz[i] - dNegA_pz[i]) < yLimMin:
yLimMin = fitA_pz[i] - dNegA_pz[i]
if (fitA_pz[i] + dPosA_pz[i]) > yLimMax:
yLimMax = fitA_pz[i] + dPosA_pz[i]
# print ('Exponent A (pz): yLimMin = %e, yLimMax = %e' % (yLimMin,yLimMax))
yLimit = [yLimMin-.25,yLimMax+.25]
if (plotFigureFlag == 0):
fig3000=plt.figure (3000)
plt.errorbar(np.log10(VionRel),fitA_pz,yerr=[dNegA_pz,dPosA_pz],fmt='-ro', \
ecolor='b',capsize=5,capthick=1)
plt.xlabel('Relative Ion Velocity, $log_{10}(V_{ion}/V_0)$',color='m',fontsize=14)
plt.ylabel('Exponent $A$', color='m',fontsize=14)
titleHeader = 'Dependence of Transferred Momenta to Single Ion: '
titleHeader += '$\Delta P_z$ = $10^A\cdot rho^B$'
plt.title(titleHeader,color='m',fontsize=12)
plt.text(-3.75,-26.0,('$V_{e0}=%5.3f\cdot10^{%2d}$cm/s' % (mantV0,powV0)), \
color='m',fontsize=16)
plt.text(-4.0,-28.,('<A>=%7.3f $\pm$ %5.3f' % (mean(fitA_pz),mean(dNegA_pz))), \
color='r',fontsize=16)
# plt.text(-3.25,-29.65,('$-$%5.3f' % (mean(dNegA_pz))),color='r',fontsize=12)
# plt.text(-3.25,-29.15,('$+$%5.3f' % (mean(dPosA_pz))),color='r',fontsize=12)
plt.xlim(xLimit)
plt.ylim(yLimit)
plt.grid(True)
plt.plot([np.log10(relVeTrnsv),np.log10(relVeTrnsv)],yLimit,'--m',linewidth=1)
plt.text(-2.55,-28.25,'$ \Delta V_{e\perp}/ V_{e0}$',color='m',fontsize=14)
plt.plot([np.log10(relVeLong),np.log10(relVeLong)],yLimit,'--m',linewidth=1)
plt.text(-4.24,-28.25,'$ \Delta V_{e||}/ V_{e0}$',color='m',fontsize=14)
if (saveFilesFlag == 1):
fig3000.savefig('picturesCMA_v7/fitA_dPz_fig3000cma.png')
print ('File "picturesCMA_v7/fitA_dPz_fig3000cma.png" is written')
yLimMin = 0.
yLimMax = 10.*min(fitB_pz)
if (min(fitB_pz) > 0):
yLimMin = 10.*max(fitB_pz)
yLimMax = 0.
for i in range(nVion):
if (fitB_pz[i] - dNegB_pz[i]) < yLimMin:
yLimMin = fitB_pz[i] - dNegB_pz[i]
if (fitB_pz[i] + dPosB_pz[i]) > yLimMax:
yLimMax = fitB_pz[i] + dPosB_pz[i]
# print ('Exponent B (pz): yLimMin = %e, yLimMax = %e' % (yLimMin,yLimMax))
yLimit = [yLimMin-.1,yLimMax+.1]
if (plotFigureFlag == 0):
fig3010=plt.figure (3010)
plt.errorbar(np.log10(VionRel),fitB_pz,yerr=[dNegB_pz,dPosB_pz],fmt='-ro', \
ecolor='b',capsize=5,capthick=1)
plt.xlabel('Relative Ion Velocity, $log_{10}(V_{ion}/V_0)$',color='m',fontsize=14)
plt.ylabel('Exponent $B$', color='m',fontsize=14)
titleHeader = 'Dependence of Transferred Momenta to Single Ion: '
titleHeader += '$\Delta P_z$ = $10^A\cdot rho^B$'
plt.title(titleHeader,color='m',fontsize=12)
plt.text(-3.75,-.87,('$V_{e0}=%5.3f\cdot10^{%2d}$cm/s' % (mantV0,powV0)), \
color='m',fontsize=16)
plt.text(-3.9,-1.55,('<B>=%6.3f $\pm$ %5.3f' % (mean(fitB_pz),mean(dNegB_pz))), \
color='r',fontsize=16)
# plt.text(-2.85,-2.25,('$-$%5.3f' % (mean(dNegB_pz))),color='r',fontsize=12)
# plt.text(-2.85,-1.75,('$+$%5.3f' % (mean(dPosB_pz))),color='r',fontsize=12)
plt.xlim(xLimit)
plt.ylim(yLimit)
plt.grid(True)
plt.plot([np.log10(relVeTrnsv),np.log10(relVeTrnsv)],yLimit,'--m',linewidth=1)
plt.text(-2.55,-1.74,'$ \Delta V_{e\perp}/ V_{e0}$',color='m',fontsize=14)
plt.plot([np.log10(relVeLong),np.log10(relVeLong)],yLimit,'--m',linewidth=1)
plt.text(-4.24,-1.74,'$ \Delta V_{e||}/ V_{e0}$',color='m',fontsize=14)
if (saveFilesFlag == 1):
fig3010.savefig('picturesCMA_v7/fitB_dPz_fig3010cma.png')
print ('File "picturesCMA_v7/fitB_dPz_fig3010cma.png" is written')
yLimMin = 0.
yLimMax = 10.*min(fitA_px)
if (min(fitA_px) > 0):
yLimMin = 10.*max(fitA_px)
yLimMax = 0.
for i in range(nVion):
if (fitA_px[i] - dNegA_px[i]) < yLimMin:
yLimMin = fitA_px[i] - dNegA_px[i]
if (fitA_px[i] + dPosA_px[i]) > yLimMax:
yLimMax = fitA_px[i] + dPosA_px[i]
# print ('Exponent A (px): yLimMin = %e, yLimMax = %e' % (yLimMin,yLimMax))
yLimit = [yLimMin-.15,yLimMax+.15]
if (plotFigureFlag == 0):
fig3020=plt.figure (3020)
plt.errorbar(np.log10(VionRel),fitA_px,yerr=[dNegA_px,dPosA_px],fmt='-ro', \
ecolor='b',capsize=5,capthick=1)
plt.xlabel('Relative Ion Velocity, $log_{10}(V_{ion}/V_0)$',color='m',fontsize=14)
plt.ylabel('Exponent $A$', color='m',fontsize=14)
titleHeader = 'Dependence of Transferred Momenta to Single Ion: '
titleHeader += '$\Delta P_x$ = $10^A\cdot rho^B$'
plt.title(titleHeader,color='m',fontsize=12)
plt.text(-3.75,-24.2,('$V_{e0}=%5.3f\cdot10^{%2d}$cm/s' % (mantV0,powV0)), \
color='m',fontsize=16)
plt.text(-3.9,-24.8,('<A>=%6.3f $\pm$ %5.3f' % (mean(fitA_px),mean(dNegA_px))), \
color='r',fontsize=16)
plt.xlim(xLimit)
plt.ylim(yLimit)
plt.grid(True)
plt.plot([np.log10(relVeTrnsv), | np.log10(relVeTrnsv) | numpy.log10 |
"""
File with the main Classes.
"""
import matplotlib.pyplot as plt
import matplotlib.dates as md
import matplotlib.collections as collections
from scipy.integrate import solve_ivp
from scipy.optimize import root
import numpy as np
import datetime
# Hovorka 's Model ########################
# TODO LIST
# - TRANSLATE
# - Write Tests
# - CONTROL
# - Unknown Error makes simulation fail - likely parameters
class Subject():
"""
Subject Simulation Class.
"""
# Paramete Distributions
# Paper Software for in Silico Testing.pdf
PARAM_DIST = {"EGP_0": {"mean": 0.0169, "std": 0.0039, "unit": "mmol/min", "distribution": "Gaussian", "variability": [[0, 3], 0.05]},
"F_01": {"mean": 0.0111, "std": 0.0007, "unit": "mmol/min", "distribution": "Gaussian", "variability": [[0, 3], 0.05]},
"k_12": {"mean": 0.00649, "std": 0.00282, "unit": "1/min", "distribution": "Gaussian", "variability": [[0, 3], 0.05]},
"S_id": {"mean": 0.00082, "std": 0.00032, "unit": "mU/(L min)", "distribution": "Gaussian", "variability": [[0, 3], 0.05]},
"S_ie": {"mean": 0.052, "std": 0.0125, "unit": "L/mU", "distribution": "Gaussian", "variability": [[0, 3], 0.05]},
"S_it": {"mean": 0.00512, "std": 0.00131, "unit": "mU/(L min)", "distribution": "Gaussian", "variability": [[0, 3], 0.05]},
"k_e": {"mean": 0.14, "std": 0.035, "unit": "1/min", "distribution": "Gaussian", "variability": None},
"Bio": {"mean": 0.7, "std": 1.2, "unit": "%", "distribution": "Uniform", "variability": [[0, 24], 0.2]},
"k_a_int": {"mean": -2.372, "std": 1.092, "unit": "1/min", "distribution": "Lognormal", "variability": [[0, 3], 0.05]},
"k_a1": {"mean": 0.0055, "std": 0.0056, "unit": "1/min", "distribution": "Gaussian", "variability": [[0, 3], 0.05]},
"k_a2": {"mean": 0.0683, "std": 0.0507, "unit": "1/min", "distribution": "Gaussian", "variability": [[0, 3], 0.05]},
"k_a3": {"mean": 0.0304, "std": 0.0235, "unit": "1/min", "distribution": "Gaussian", "variability": [[0, 3], 0.05]},
"V_g": {"mean": -1.89711998489, "std": 0.23, "unit": "L/kg", "distribution": "Lognormal", "variability": None},
"V_i": {"mean": 0.12, "std": 0.012, "unit": "L/kg", "distribution": "Gaussian", "variability": None},
"k_a": {"mean": 0.018, "std": 0.0045, "unit": "mmol/L", "distribution": "Gaussian", "variability": [[0, 3], 0.05]},
"t_max": {"mean": -3.689, "std": 0.025, "unit": "min", "distribution": "Lognormal", "variability": None},
"w": {"mean": 74.9, "std": 14.4, "unit": "kg", "distribution": "Gaussian", "variability": None},
"R_th": {"mean": 9, "std": 1.5, "unit": "mmol/L", "distribution": "Gaussian", "variability": None},
"R_cl": {"mean": 0.01, "std": 0.025, "unit": "1/min", "distribution": "Gaussian", "variability": None},
"U_ceil": {"mean": 0.02, "std": 0.035, "unit": "TODO ??", "distribution": "Uniform", "variability": None}
}
def __init__(self, params=None, food_intake_f=None, control_type=None):
"""
Initializes the Class Subject.
> params(default None): list with all the parameters in Hovorka 's Model in order given bellow.
In case of None, automatic parameters will be given following the built-in probability distribution.
[EGP_0, F_01, k_12, S_id, S_ie, S_it, k_e, Bio, k_a1, k_a2, k_a3, k_a_int, V_g, V_i, k_a, t_max, w, R_th, R_cl, U_ceil]
> food_intake_f(default None): Function in format 'fun(t)' that returns the rate of carbohydrate ingestetion in given time t.
The values should be (g / min).
> insulin_intake_f(default None): Function in format 'fun(t)' that return the rate of insulin infusion in given time t.
The values should be (U / min).
"""
# Constants
self.CGM_AVERAGE_DELAY = 15 # minutes
self.SAMPLING_TIME = 15 # minutes
# Last Control Variables
self.control_u = np.array([0]) # output after saturation
self.control_I = np.array([0]) # previous I
self.control_v = np.array([0]) # output before saturation
# Variable with resulting simulation values from 'simulate'
self.solution = np.array([])
self.time = np.array([])
self.max_glucose = 0
self.min_glucose = 0
# Steady-state basal insulin value
self.u_basal = 0
# Auxiliary input functions: food intake and control infusion
self.control_type = control_type
self.food_intake_fun = food_intake_f
# Continuous Variables
self.insulin = np.array([])
self.meal = np.array([])
# Sensor reading
self.cgm_glucose = np.array([])
self.cgm_time = np.array([])
# Parameters Initialization
if params is None:
# Initalize parameters from defined distributions
# [ *0, *1, *2, *3, *4, *5, *6, *7, *8, 9, 10, 11, 12, 13, 14, 15,16, 17, 18, 19]
# [EGP_0, F_01, k_12, S_id, S_ie, S_it, k_e, Bio, k_aint, k_a1, k_a2, k_a3, V_g, V_i, k_a, t_max, w, R_th, R_cl, U_ceil]
self.params = self.get_params_from_dist(self.__class__.PARAM_DIST)
else:
self.params = params
# Steady-state basal insulin
self.steady_state()
def get_params_from_dist(self, param_dist_dict):
"""
Collects the values from the parameters distributions.
The dictionary should be a dictionary of dictionaries and should have all these fields:
"EGp_dict, F_01, k_12, S_id, S_ie, S_it, k_e, Bio, k_aint, k_a1, k_a2, k_a3, V_g, V_i, k_a, t_max, w, R_th, R_cl, U_ceil"
And each field should result in a dictionary with the fields bellow:
"mean": 0.0169, "std": 0.0039, "unit": "mmol/min", "distribution": "Gaussian", "variability": "Oscillatory"
Description:
- mean": Average values of the distribution. OBS.: If the distribution is uniform, this is the min value.
- std": Standard deviation for the distribution. OBS.: If the distribution is uniform, this is the max value.
- unit": Units of the parameter.
- distribution": The type of distribution, ex.: "Gaussian", "Lognormal", etc.
- variability: List in the format [ min oscillatory frequency, max oscillatory frequency], max amplitude ] if the parameter is
oscillatory, None otherwise.
The dictionary returned has all these fields:
"EGp_dict, F_01, k_12, S_id, S_ie, S_it, k_e, Bio, k_aint, k_a1, k_a2, k_a3, V_g, V_i, k_a, t_max, w, R_th, R_cl, U_ceil"
And each inner dictionary should look like this:
"value": 0.0169, "var_freq": 2, "var_amp": 0.05, "unit": "mmol/min"
Description:
- value: Average values of the distribution.
- var_freq: Sinusoidal wavelength (frequency) of variability.
- var_amp: Sinusoidal amplitude of variability.
> param_dist_dict: Dictionary with all the parameter distributions.
< param_values_dict: Dictionary with all the values from the distributions.
"""
param_values_dict = {}
# For each item in the dict
for params, dist_dict in param_dist_dict.items():
# Initialize inner dict
param_values_dict[params] = {}
# For each type of distribution
dist_type = dist_dict["distribution"]
if dist_type == "Gaussian" or dist_type == "Normal":
param_values_dict[params]["value"] = np.random.normal(dist_dict["mean"], dist_dict["std"])
elif dist_type == "Lognormal":
param_values_dict[params]["value"] = np.random.lognormal(dist_dict["mean"], dist_dict["std"])
elif dist_type == "Uniform":
param_values_dict[params]["value"] = | np.random.uniform(dist_dict["mean"], dist_dict["std"]) | numpy.random.uniform |
import sys
from scipy import special, stats
from numpy import array as A
def compoundPartitioning(agents):
"""Compute and return sections with compound criteria
agents is a dict with keys "d", "id", "od", "s", "is", "os"
with sectorialized_agents__ with each of these criteria
"""
exc_h=set( agents["d"][-1][2]) & \
set(agents["id"][-1][2]) & \
set(agents["od"][-1][2]) & \
set( agents["s"][-1][2]) & \
set(agents["is"][-1][2]) & \
set(agents["os"][-1][2])
exc_i=set( agents["d"][-1][1]) & \
set(agents["id"][-1][1]) & \
set(agents["od"][-1][1]) & \
set( agents["s"][-1][1]) & \
set(agents["is"][-1][1]) & \
set(agents["os"][-1][1])
exc_p=set( agents["d"][-1][0]) & \
set(agents["id"][-1][0]) & \
set(agents["od"][-1][0]) & \
set( agents["s"][-1][0]) & \
set(agents["is"][-1][0]) & \
set(agents["os"][-1][0])
exc=exc_p,exc_i,exc_h
inc_h=set( agents["d"][-1][2]) | \
set(agents["id"][-1][2]) | \
set(agents["od"][-1][2]) | \
set( agents["s"][-1][2]) | \
set(agents["is"][-1][2]) | \
set(agents["os"][-1][2])
inc_i=set( agents["d"][-1][1]) | \
set(agents["id"][-1][1]) | \
set(agents["od"][-1][1]) | \
set( agents["s"][-1][1]) | \
set(agents["is"][-1][1]) | \
set(agents["os"][-1][1])
inc_p=set( agents["d"][-1][0]) | \
set(agents["id"][-1][0]) | \
set(agents["od"][-1][0]) | \
set( agents["s"][-1][0]) | \
set(agents["is"][-1][0]) | \
set(agents["os"][-1][0])
inc=inc_p, inc_i, inc_h
total=set(agents["d"][-1][0]+agents["d"][-1][1]+agents["d"][-1][2])
excc_h=exc[2]
excc_p=inc[0]
#excc_i=total - (exc[2] & inc[0])
excc_i=total - (exc[2] | inc[0])
excc=excc_p,excc_i,excc_h
incc_h=inc[2]
incc_p=excc[0]
incc_i=total-(incc_h | incc_p)
incc=incc_p,incc_i,incc_h
exce_h=exc[2]
exce_i=inc[1]
exce_p=total-(exce_h | exce_i)
exce=exce_p,exce_i,exce_h
ince_h=inc[2]
ince_i=exc[1]
ince_p=total-(ince_h | ince_i)
ince=ince_p,ince_i,ince_h
return dict(total=total, exc=exc, inc=inc, excc=excc, incc=incc, exce=exce, ince=ince)
class NetworkPartitioning:
network_count=0
def __init__(self,networkMeasures=None, minimum_incidence=1,metric="strength"):
if not networkMeasures:
networkMeasures=g.NetworkMeasures()
self.metric=metric
metric_=self.standardizeName(metric)
prob, max_degree_empirical, max_degree_possible = \
self.basicMeasures( networkMeasures , metric_)
incident_degrees, incident_degrees_, agent_degrees = \
self.makeDegreeLists( networkMeasures, metric_)
empirical_distribution = self.makeEmpiricalDistribution(
incident_degrees, incident_degrees_, networkMeasures.N )
binomial_distribution=self.makeBinomialDistribution(
prob, max_degree_possible, incident_degrees_)
binomial=stats.binom(max_degree_possible,prob)
#sectorialized_degrees= self.sectorializeDegrees(
# empirical_distribution, binomial_distribution, incident_degrees_)
#sectorialized_degrees_= self.newSectorializeDegrees(
# empirical_distribution, binomial_distribution, incident_degrees_)
sectorialized_degrees__= self.newerSectorializeDegrees(
empirical_distribution, binomial, incident_degrees_,
max_degree_empirical,minimum_incidence,networkMeasures.N )
#sectorialized_agents= self.sectorializeAgents(
# sectorialized_degrees, networkMeasures.degrees)
#sectorialized_agents_= self.sectorializeAgents(
# sectorialized_degrees_, networkMeasures.degrees)
sectorialized_agents__= self.sectorializeAgents(
sectorialized_degrees__, agent_degrees)
NetworkPartitioning.network_count+=1 # to keep track of how may partitions have been done
self.makeSelf("incident_degrees_ ",incident_degrees_ ,
"incident_degrees ",incident_degrees ,
#"sectorialized_agents ",sectorialized_agents ,
#"sectorialized_agents_ ",sectorialized_agents_ ,
"sectorialized_agents__ ",sectorialized_agents__ ,
#"sectorialized_degrees ",sectorialized_degrees ,
#"sectorialized_degrees_ ",sectorialized_degrees_ ,
"sectorialized_degrees__ ",sectorialized_degrees__ ,
"binomial_distribution ",binomial_distribution ,
"prob" ,prob,
"max" ,(max_degree_possible, max_degree_empirical),
"empirical_distribution",empirical_distribution,
"binomial",binomial,
"metric_",metric_,
"minimum_incidence",minimum_incidence,
"binomial_distribution" ,binomial_distribution)
def makeSelf(self, *args):
for signifier, signified in zip(args[::2], args[1::2]):
#try:
exec("self.{} = signified".format(signifier))
#thing=signified
#exec("self.{} = thing".format(signifier))
#exec("self.{} = {}".format(signifier, signified))
#exec("self.{} = ".format(signifier), signified)
#except:
# self.binomial=signified
def standardizeName(self,name):
if name in (["s","strength","st"]+["f","força","forca","fo"]):
name_="s"
elif name in (["is","in_strength","ist"]+["fe","força_e","forca_e","fe"]):
name_="is"
elif name in (["os","out_strength","ost"]+["fs","força_s","forca_s","fs"]):
name_="os"
elif name in (["d","degree","dg"]+["g","grau","gr"]):
name_="d"
elif name in (["id","in_degree","idg"]+["ge","grau_e","gre"]):
name_="id"
elif name in (["od","out_degree","odg"]+["gs","grau_s","grs"]):
name_="od"
return name_
def basicMeasures(self,networkMeasures,metric_):
nm=networkMeasures
if metric_ in ("s","is","os"):
edge_weights=[i[2]["weight"] for i in nm.edges]
average_edge_weight=sum(edge_weights)/nm.E
self.average_edge_weight=average_edge_weight
if metric_=="s":
max_degree_empirical=round(max(nm.strengths_) / average_edge_weight)
elif metric_=="is":
max_degree_empirical=round(2*max(nm.in_strengths_) / average_edge_weight)
elif metric_=="os":
max_degree_empirical=round(2*max(nm.out_strengths_) / average_edge_weight)
elif metric_=="d":
max_degree_empirical=max(nm.degrees_)
elif metric_=="id":
max_degree_empirical=2*max(nm.in_degrees_)
elif metric_=="od":
max_degree_empirical=2*max(nm.out_degrees_)
prob=nm.E/(nm.N*(nm.N-1)) # edge probability
max_degree_possible=2*(nm.N-1) # max d given N
return prob, max_degree_empirical, max_degree_possible
def makeDegreeLists(self, networkMeasures,metric_):
if metric_=="s":
agent_degrees={i:round(j/self.average_edge_weight) for i,j in networkMeasures.strengths.items()}
incident_degrees=list(agent_degrees.values())
elif metric_=="is":
agent_degrees={i:round((2*j)/self.average_edge_weight) for i,j in networkMeasures.in_strengths.items()}
incident_degrees=list(agent_degrees.values())
elif metric_=="os":
agent_degrees={i:round((2*j)/self.average_edge_weight) for i,j in networkMeasures.out_strengths.items()}
incident_degrees=list(agent_degrees.values())
elif metric_=="d":
agent_degrees=networkMeasures.degrees
incident_degrees=networkMeasures.degrees_
elif metric_=="id":
agent_degrees={i:(2*j) for i,j in networkMeasures.in_degrees.items()}
incident_degrees=list(agent_degrees.values())
elif metric_=="od":
agent_degrees={i:(2*j) for i,j in networkMeasures.out_degrees.items()}
incident_degrees=list(agent_degrees.values())
incident_degrees_=list(set(incident_degrees))
incident_degrees_.sort()
return incident_degrees, incident_degrees_, agent_degrees
def makeEmpiricalDistribution(self, incident_degrees, incident_degrees_, N):
empirical_distribution=[]
for degree in incident_degrees_:
empirical_distribution.append(incident_degrees.count(degree)/N)
return empirical_distribution
def makeBinomialDistribution(self,prob,max_degree_possible,incident_degrees_):
"""If max_degree_possible == max_degree_empirical, makeBinomial ==1"""
binomial_distribution=[] # occurance probability of degrees
for degree in incident_degrees_:
if len(binomial_distribution) and binomial_distribution[-1]==0.0:
binomial_distribution.append(0.0)
else:
n_occurrences=special.binom(max_degree_possible,degree)
prob_degree=n_occurrences * (prob**degree)*((1-prob)**(max_degree_possible-degree))
binomial_distribution.append(prob_degree)
return binomial_distribution
def sectorializeAgents(self,sectorialized_degrees,agent_degrees):
periphery=[x for x in agent_degrees
if agent_degrees[x] in sectorialized_degrees[0]]
intermediary=[x for x in agent_degrees
if agent_degrees[x] in sectorialized_degrees[1]]
hubs=[x for x in agent_degrees
if agent_degrees[x] in sectorialized_degrees[2]]
return periphery, intermediary, hubs
def newerSectorializeDegrees(self,empirical_distribution,binomial,incident_degrees_,max_degree_empirical,minimum_count,num_agents):
# compute bins [start, end]
prob_min=minimum_count/num_agents
llimit=0
rlimit=0
self.bins=bins=[]
self.empirical_probs=empirical_probs=[]
while (rlimit < len(incident_degrees_)):
if (sum(empirical_distribution[llimit:])>prob_min):
prob_empirical=0
while True:
prob_empirical=sum(
empirical_distribution[llimit:rlimit+1] )
if prob_empirical >= prob_min:
break
else:
rlimit+=1
bins.append((llimit,rlimit))
empirical_probs.append(prob_empirical)
rlimit+=1
llimit=rlimit
else: # last bin
print("last bin less probable than prob_min")
rlimit=len(incident_degrees_)-1
bins.append((llimit,rlimit))
prob_empirical=sum(
empirical_distribution[llimit:rlimit+1] )
empirical_probs.append(prob_empirical)
rlimit+=1
binomial_probs=[]
for i, bin_ in enumerate(bins):
llimit=bin_[0]
rlimit=bin_[1]
ldegree=incident_degrees_[llimit]-1
rdegree=incident_degrees_[rlimit]
binomial_prob=binomial.cdf(rdegree)-binomial.cdf(ldegree)
binomial_probs.append(binomial_prob)
# calcula probabilidades em cada bin
# compara as probabilidades
distribution_compare = list(A(empirical_probs) < A(binomial_probs))
self.binomial_probs=binomial_probs
self.distribution_compare0=distribution_compare
if sum(distribution_compare):
tindex= distribution_compare.index(True)
tindex2=distribution_compare[::-1].index(True)
periphery_degrees=incident_degrees_[:tindex]
intermediary_degrees=incident_degrees_[tindex:-tindex2]
hub_degrees= incident_degrees_[-tindex2:]
else:
periphery_degrees=incident_degrees_[:]
intermediary_degrees=[]
hub_degrees=[]
return periphery_degrees, intermediary_degrees, hub_degrees
def newSectorializeDegrees(self,empirical_distribution,binomial_distribution,incident_degrees_):
distribution_compare = A(empirical_distribution) < | A(binomial_distribution) | numpy.array |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Jan 24 13:24:43 2020
@author: ssli
Module to calculate the m bias
mcFitFunc:
Shear bias function.
WgQuantile1DFunc:
Calculate the weighted quantile by given probabilities
designed for 1D numpy array.
WgBin2DFunc:
Calculate the weighted quantile by given bin numbers
designed for 2D numpy array
mCalFunc:
Calculating the residual shear bias (m-value) in 2-d bins
"""
import numpy as np
from scipy import optimize
import pandas as pd
from astropy.io import fits
## All possible g1,g2 combinations
g1Range = np.array([-0.04,0.00,0.04,0.00,-0.0283,+0.0283,+0.0283,-0.0283])
g2Range = | np.array([0.00,0.04,0.00,-0.04,+0.0283,+0.0283,-0.0283,-0.0283]) | numpy.array |
# -*- coding: utf-8 -*-
"""
This module contains a set of utility functions and classes used for developing
and testing Neural Maching Translation models. Everything comes directly
from the accompanying code to the book "Natural Language Processing with
PyTorch" by <NAME> and <NAME>.
The original code and data can be found at:
https://github.com/joosthub/PyTorchNLPBook
Used with permission under Apache License 2.0
@author: <NAME> and <NAME>
"""
import numpy as np
import pandas as pd
import json
import torch
import torch.nn as nn
from torch.utils.data import Dataset, DataLoader
from torch.nn import functional as F
from torch.nn.utils.rnn import pack_padded_sequence, pad_packed_sequence
from nltk.translate import bleu_score
chencherry = bleu_score.SmoothingFunction()
class Vocabulary(object):
"""Class to process text and extract vocabulary for mapping"""
def __init__(self, token_to_idx=None):
"""
Args:
token_to_idx (dict): a pre-existing map of tokens to indices
"""
if token_to_idx is None:
token_to_idx = {}
self._token_to_idx = token_to_idx
self._idx_to_token = {idx: token
for token, idx in self._token_to_idx.items()}
def to_serializable(self):
""" returns a dictionary that can be serialized """
return {'token_to_idx': self._token_to_idx}
@classmethod
def from_serializable(cls, contents):
""" instantiates the Vocabulary from a serialized dictionary """
return cls(**contents)
def add_token(self, token):
"""Update mapping dicts based on the token.
Args:
token (str): the item to add into the Vocabulary
Returns:
index (int): the integer corresponding to the token
"""
if token in self._token_to_idx:
index = self._token_to_idx[token]
else:
index = len(self._token_to_idx)
self._token_to_idx[token] = index
self._idx_to_token[index] = token
return index
def add_many(self, tokens):
"""Add a list of tokens into the Vocabulary
Args:
tokens (list): a list of string tokens
Returns:
indices (list): a list of indices corresponding to the tokens
"""
return [self.add_token(token) for token in tokens]
def lookup_token(self, token):
"""Retrieve the index associated with the token
Args:
token (str): the token to look up
Returns:
index (int): the index corresponding to the token
"""
return self._token_to_idx[token]
def lookup_index(self, index):
"""Return the token associated with the index
Args:
index (int): the index to look up
Returns:
token (str): the token corresponding to the index
Raises:
KeyError: if the index is not in the Vocabulary
"""
if index not in self._idx_to_token:
raise KeyError("the index (%d) is not in the Vocabulary" % index)
return self._idx_to_token[index]
def __str__(self):
return "<Vocabulary(size=%d)>" % len(self)
def __len__(self):
return len(self._token_to_idx)
class SequenceVocabulary(Vocabulary):
def __init__(self, token_to_idx=None, unk_token="<UNK>",
mask_token="<MASK>", begin_seq_token="<BEGIN>",
end_seq_token="<END>"):
super().__init__(token_to_idx)
self._mask_token = mask_token
self._unk_token = unk_token
self._begin_seq_token = begin_seq_token
self._end_seq_token = end_seq_token
self.mask_index = self.add_token(self._mask_token)
self.unk_index = self.add_token(self._unk_token)
self.begin_seq_index = self.add_token(self._begin_seq_token)
self.end_seq_index = self.add_token(self._end_seq_token)
def to_serializable(self):
contents = super(SequenceVocabulary, self).to_serializable()
contents.update({'unk_token': self._unk_token,
'mask_token': self._mask_token,
'begin_seq_token': self._begin_seq_token,
'end_seq_token': self._end_seq_token})
return contents
def lookup_token(self, token):
"""Retrieve the index associated with the token
or the UNK index if token isn't present.
Args:
token (str): the token to look up
Returns:
index (int): the index corresponding to the token
Notes:
`unk_index` needs to be >=0 (having been added into the Vocabulary)
for the UNK functionality
"""
if self.unk_index >= 0:
return self._token_to_idx.get(token, self.unk_index)
else:
return self._token_to_idx[token]
class NMTVectorizer(object):
""" The Vectorizer which coordinates the Vocabularies and puts them to use"""
def __init__(self, source_vocab, target_vocab, max_source_length,
max_target_length):
"""
Args:
source_vocab (SequenceVocabulary): maps source words to integers
target_vocab (SequenceVocabulary): maps target words to integers
max_source_length (int): the longest sequence in the source dataset
max_target_length (int): the longest sequence in the target dataset
"""
self.source_vocab = source_vocab
self.target_vocab = target_vocab
self.max_source_length = max_source_length
self.max_target_length = max_target_length
def _vectorize(self, indices, vector_length=-1, mask_index=0):
"""Vectorize the provided indices
Args:
indices (list): a list of integers that represent a sequence
vector_length (int): an argument for forcing the length of index vector
mask_index (int): the mask_index to use; almost always 0
"""
if vector_length < 0:
vector_length = len(indices)
vector = | np.zeros(vector_length, dtype=np.int64) | numpy.zeros |
from load_data import Data
import numpy as np
import torch
from collections import defaultdict
from model import *
from torch.optim.lr_scheduler import ExponentialLR
import argparse
import os
device = torch.device('cuda:0')
class Experiment:
def __init__(self, num_iterations, batch_size, learning_rate, decay_rate, ent_vec_dim, rel_vec_dim,
k, ni, ranks, input_dropout, hidden_dropout):
self.num_iterations = num_iterations
self.batch_size = batch_size
self.learning_rate = learning_rate
self.decay_rate = decay_rate
self.ent_vec_dim = ent_vec_dim
self.rel_vec_dim = rel_vec_dim
self.k = k
self.ni = ni
self.ranks = ranks
self.kwargs = {'input_dropout': input_dropout, 'hidden_dropout': hidden_dropout}
def get_data_idxs(self, data):
if len(data[0])-1 == 3:
data_idxs = [(self.relation_idxs[data[i][0]], self.entity_idxs[data[i][1]], self.entity_idxs[data[i][2]], self.entity_idxs[data[i][3]]) for i in range(len(data))]
elif len(data[0])-1 == 4:
data_idxs = [(self.relation_idxs[data[i][0]], self.entity_idxs[data[i][1]], self.entity_idxs[data[i][2]], self.entity_idxs[data[i][3]], self.entity_idxs[data[i][4]]) for i in range(len(data))]
return data_idxs
def get_er_vocab(self, data, miss_ent_domain):
er_vocab = defaultdict(list)
if len(data[0])-1 == 3:
if miss_ent_domain == 1:
for triple in data:
er_vocab[(triple[0], triple[2], triple[3])].append(triple[1])
elif miss_ent_domain == 2:
for triple in data:
er_vocab[(triple[0], triple[1], triple[3])].append(triple[2])
elif miss_ent_domain == 3:
for triple in data:
er_vocab[(triple[0], triple[1], triple[2])].append(triple[3])
elif len(data[0])-1 == 4:
if miss_ent_domain == 1:
for triple in data:
er_vocab[(triple[0], triple[2], triple[3], triple[4])].append(triple[1])
elif miss_ent_domain == 2:
for triple in data:
er_vocab[(triple[0], triple[1], triple[3], triple[4])].append(triple[2])
elif miss_ent_domain == 3:
for triple in data:
er_vocab[(triple[0], triple[1], triple[2], triple[4])].append(triple[3])
elif miss_ent_domain == 4:
for triple in data:
er_vocab[(triple[0], triple[1], triple[2], triple[3])].append(triple[4])
return er_vocab
def get_batch(self, er_vocab, er_vocab_pairs, idx):
batch = er_vocab_pairs[idx:idx+self.batch_size]
targets = np.zeros((len(batch), len(d.entities)))
for idx, pair in enumerate(batch):
targets[idx, er_vocab[pair]] = 1.
targets = torch.FloatTensor(targets).to(device)
return np.array(batch), targets
def evaluate(self, model, data, W):
hits, ranks, losses = [], [], []
for _ in [1, 3, 10]:
hits.append([])
test_data_idxs = self.get_data_idxs(data)
ary = len(test_data_idxs[0])-1
er_vocab_list = []
er_vocab_pairs_list = []
for miss_ent_domain in range(1, ary+1):
er_vocab = self.get_er_vocab(self.get_data_idxs(d.data), miss_ent_domain)
er_vocab_pairs = list(er_vocab.keys())
er_vocab_list.append(er_vocab)
er_vocab_pairs_list.append(er_vocab_pairs)
for miss_ent_domain in range(1, ary+1):
er_vocab = er_vocab_list[miss_ent_domain-1]
for i in range(0, len(test_data_idxs), self.batch_size):
data_batch, _ = self.get_batch(er_vocab, test_data_idxs, i)
r_idx = torch.tensor(data_batch[:, 0], dtype=torch.long).to(device)
e1_idx = torch.tensor(data_batch[:, 1], dtype=torch.long).to(device)
e2_idx = torch.tensor(data_batch[:, 2], dtype=torch.long).to(device)
e3_idx = torch.tensor(data_batch[:, 3], dtype=torch.long).to(device)
if ary == 3:
if miss_ent_domain == 1:
e_idx = [e2_idx, e3_idx]
elif miss_ent_domain == 2:
e_idx = [e1_idx, e3_idx]
elif miss_ent_domain == 3:
e_idx = [e1_idx, e2_idx]
elif ary == 4:
e4_idx = torch.tensor(data_batch[:, 4], dtype=torch.long).to(device)
if miss_ent_domain == 1:
e_idx = [e2_idx, e3_idx, e4_idx]
elif miss_ent_domain == 2:
e_idx = [e1_idx, e3_idx, e4_idx]
elif miss_ent_domain == 3:
e_idx = [e1_idx, e2_idx, e4_idx]
elif miss_ent_domain == 4:
e_idx = [e1_idx, e2_idx, e3_idx]
pred, _ = model.forward(r_idx, e_idx, miss_ent_domain, W)
e_all_idx = []
for k0 in range(1, ary+1):
e_all_idx.append(torch.tensor(data_batch[:, k0], dtype=torch.long).to(device))
for j in range(data_batch.shape[0]):
er_vocab_key = []
for k0 in range(ary+1):
er_vocab_key.append(data_batch[j][k0])
er_vocab_key.remove(data_batch[j][miss_ent_domain])
filt = er_vocab[tuple(er_vocab_key)]
target_value = pred[j, e_all_idx[miss_ent_domain-1][j]].item()
pred[j, filt] = 0.0
pred[j, e_all_idx[miss_ent_domain-1][j]] = target_value
sort_values, sort_idxs = torch.sort(pred, dim=1, descending=True)
sort_idxs = sort_idxs.cpu().numpy()
for j in range(data_batch.shape[0]):
rank = np.where(sort_idxs[j] == e_all_idx[miss_ent_domain-1][j].item())[0][0]
ranks.append(rank+1)
for id, hits_level in enumerate([1, 3, 10]):
if rank+1 <= hits_level:
hits[id].append(1.0)
else:
hits[id].append(0.0)
return np.mean(1./np.array(ranks)), np.mean(hits[2]), | np.mean(hits[1]) | numpy.mean |
# mean = the average value
# median = the mid point value
# mode = the most common value
import numpy as np
speed = | np.array( [99,86,87,88,111,86,103,87,94,78,77,85,86] ) | numpy.array |
"""
The bpm module contain the Bpm class used to simulate the light propagation -
within low refractive index variation
and small angle (paraxial approximation) -
using the Beam Propagation Method.
This module was done by <NAME> during a master
university course from the PAIP master of the université de Lorraine,
under the directive of Pr. <NAME>.
The bpm codes are mainly based on a compilation of MatLab codes initialy
developed by <NAME> during his PhD thesis[2],
and later modified at the FEMTO-ST institute of the Université de
Franche-Comté and at the LMOPS laboratory [3] of the
Université de Lorraine.
[1] <NAME>, in Fundamentals of Optical Waveguides,
2nd ed., edited by <NAME> (Academic, Burlington, 2006), pp. 329–397.
[2] "Generation et propagation de reseaux periodiques de solitons spatiaux
dans un milieu de kerr massif" PhD thesis, université de Franche-Comté 1998.
[3] <NAME> et. al., Broadband photonic transport between waveguides by
adiabatic elimination Phys. Rev. A, 97 023811 (2018).
"""
from math import pi, ceil, radians, sqrt, log, sin, cos, acos, asin, exp
import time
from scipy import special
from numpy.fft import fft, ifft, fftshift
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Polygon
import numba
@numba.vectorize([numba.float64(numba.complex128),
numba.float32(numba.complex64)])
def abs2(x):
"""Square modulus of x. Fastest way possible for a numpy array."""
return x.real**2 + x.imag**2
class Bpm():
"""
The Bpm class is used to simulate light propagation -
within small refractive index variation guides
and small angle of propagation (paraxial) -
using the Beam Propagation Method.
Parameters
----------
no : float
Refractive index of the cladding.
lo : float
Wavelength of the beam in vaccum (µm).
length_z : float
Size of the compute window over z (µm).
dist_z : float
Step over z (µm)
nbr_z_disp : int
Number of points to display over z.
length_x : float
Size of the compute window over x (µm).
dist_x : float
Step over x (µm)
"""
def __init__(self, no, lo,
length_z, dist_z, nbr_z_disp,
length_x, dist_x):
"""
The Bpm class is used to simulate light propagation -
within small refractive index variation guides
and small angle of propagation (paraxial) -
using the Beam Propagation Method.
Parameters
----------
no : float
Refractive index of the cladding
lo : float
Wavelength of the beam in vaccum (µm).
length_z : float
Size of the compute window over z (µm).
dist_z : float
Step over z (µm).
nbr_z_disp : int
Number of points to display over z.
length_x : float
Size of the compute window over x (µm).
dist_x : float
Step over x (µm).
Notes
-----
This method creates the following variables within the class
:class:`Bpm`:
- All input variables.
- ko: the free space vector (1/µm).
"""
self.no = no
self.lo = lo
self.ko = 2*pi / self.lo # linear wave vector in free space (1/µm)
self.length_z = length_z
self.dist_z = dist_z
self.nbr_z_disp = nbr_z_disp
self.dist_x = dist_x
self.length_x = length_x
def create_x_z(self):
"""
Create the x, z array and ajust the resolution variables.
Returns
-------
length_z : float
Corrected value due to nbr_z being an int.
nbr_z : int
Number of points computed over z.
nbr_z_disp : int
Corrected value due to pas being an int.
length_x : float
Corrected value due to nbr_x being an int.
nbr_x : int
Number of point over x (µm).
x : array
x values between [-length_x/2, length_x/2-dist_x] center on 0.
Notes
-----
This method creates the following variables within the class
:class:`Bpm`:
- pas : Interval of computed points between each displayed points.
"""
assert self.nbr_z_disp > 0
self.nbr_z = ceil(self.length_z / self.dist_z)
self.length_z = self.nbr_z * self.dist_z
self.pas = ceil(self.length_z / (self.nbr_z_disp * self.dist_z))
self.nbr_z_disp = ceil(self.length_z / (self.pas * self.dist_z))
self.nbr_z_disp += 1 # add 1 for the initial field
self.nbr_z += 1 # add 1 for the initial field
self.nbr_x = ceil(self.length_x / self.dist_x) # nbr points over x
# check if even number
if self.nbr_x % 2 != 0:
self.nbr_x += 1
# check if multiple of 8: speeds up execution
# (was also needed for a obsolete feature)
for _ in range(3):
if self.nbr_x % 8 != 0:
self.nbr_x += 2
else:
break
self.length_x = self.nbr_x * self.dist_x
self.x = np.linspace(-self.length_x/2,
self.length_x/2 - self.dist_x,
self.nbr_x)
return [self.length_z, self.nbr_z, self.nbr_z_disp-1,
self.length_x, self.nbr_x, self.x]
# Guides #
def squared_guide(self, width):
"""
A lambda function than returns a centered rectangular shape.
return 1 if :math:`x >= -width/2` and :math:`x <= width/2`
else return 0.
Parameters
----------
width : float
Waveguide width.
"""
return lambda x: (x >= -width/2) & (x <= width/2)
def gauss_guide(self, width, gauss_pow=1):
"""
A lambda function than return a centered super-Gaussian shape.
:math:`e^{-(x/w)^{2P}}`
The waist is defined as width/2 and correspond to the 1/e
relative value.
See :func:`.example_guides_x` for more details.
Parameters
----------
width : float
Waveguide width (µm) at 1/e^2 intensity.
gauss_pow : int, optional
Index of the super-gaussian guide with 1 being a regural gaussian
guide and 4 being the conventionnal super-gaussian guide used to
describe realistic waveguides.
See on en.wikipedia.org/wiki/Gaussian_function
#Higher-order_Gaussian_or_super-Gaussian_function.
1 by Default.
"""
if width == 0:
return lambda x: 0
w = width / 2 # width is diameter and w is radius
return lambda x: np.exp(-(x / w)**(2*gauss_pow))
def create_guides(self, shape, delta_no, nbr_p, p, offset_guide=0, z=0):
"""
Create an array of guides over x using peaks positions and for a given
shape.
Parameters
----------
shape : method
:meth:`squared_guide`, :meth:`gauss_guide` or any lambda function
that takes one argument and return the relative refractive index
for the input position.
delta_no : float
Difference of refractive index between the core and the cladding.
Can contain the losses throught the imaginary part.
nbr_p : int
Number of guides.
p : float
Distance between two guides center (µm).
offset_guide : float, optional
Guide offset from the center (µm). 0 by default.
z : list
list [start, end] defining the waveguide length. Default length=
windows length.
Returns
-------
peaks : array-like
Central position of each guide [guide,z].
dn : array-like
Difference of refractive index [z,x]. Can contain the losses
throught the imaginary part.
Notes
-----
This methods uses the following variables defined in :class:`Bpm`:
nbr_z, nbr_x, x, dist_x.
"""
peaks = np.array([[None]*self.nbr_z]*nbr_p)
dn = np.zeros((self.nbr_z, self.nbr_x))
dn_z = np.zeros(self.nbr_x)
if nbr_p == 0:
return [np.array([[None]*self.nbr_z]), dn]
peaks_z = (p*np.linspace(-nbr_p/2, nbr_p/2-1, nbr_p)
+ p/2
+ offset_guide)
dn_fix = shape(self.x) # guide shape center on 0
# Sum each identical guide with an offset defined by peaks_z
for i in range(nbr_p):
dn_z += np.roll(dn_fix, int(round(peaks_z[i] / self.dist_x)))
if z == 0:
start = 0
end = self.nbr_z
else:
# assert z[0] >= 0 and z[1] <= self.length_z and z[0] <= z[1]
if z[0] > z[1]:
print("Warning, the waveguide beginning occurs after the end.",
z[0], "should be <=", z[1])
if z[1] > self.length_z:
z[1] = self.length_z
start = int(z[0]/self.dist_z)
end = int(z[1]/self.dist_z)
dn[start:end] = dn_z
for i in range(start, end):
peaks[:, i] = peaks_z
dn = dn*delta_no # give a value to the shape
return [peaks, dn]
def create_curved_guides(self, shape, width, delta_no, curve, half_delay,
distance_factor, offset_guide=0):
"""
Create two curved guides and one linear guide on the center (STIRAP).
The central positions over x and z are defined as follow:
Left guide: :math:`x_0-p_{min}-curve(z-length\_z/2-half\_delay)^2`
Right guide: :math:`x_0+p_{min}+curve(z-length\_z/2+half\_delay)^2`
Central guide: :math:`x_0`
Parameters
----------
shape : method
:meth:`square` or :meth:`gauss`
width : float
Waveguide width (µm) at 1/e^2 intensity.
delta_no : float
Difference of refractive index between the core and the cladding.
Can contain the losses throught the imaginary part.
curve : float
curvature factor in :math:`10^{-8} µm^{-2}`.
half_delay : float
Half distance over z in µm bewteen the two external guides where
they are the closest.
In other words, the distance from the center and the minimum of one
of the curved guides over z.
distance_factor : float
Factor between the guide width and the minimal distance between the
two guides =p_min/width.
If distance_factor=1, the curved guides will touch the central
guide (p_min=width).
offset_guide : float, optional
Waveguide offset from the center (µm). 0 by default.
Returns
-------
peaks : array
Central position of each guide as peaks[guide,z].
dn : array
Difference of refractive index as dn[z,x]. Can contain the losses
throught the imaginary part.
Notes
-----
This methods uses the following variables defined in :class:`Bpm`:
length_z, nbr_z, nbr_x, x, dist_x.
"""
# all points over z
z = np.linspace(0, self.length_z, self.nbr_z)
# left curved guide
sa = (- offset_guide
+ curve*(z - self.length_z/2 - half_delay)**2
+ width*distance_factor)
# right curved guide
sb = (offset_guide
+ curve*(z - self.length_z/2 + half_delay)**2
+ width*distance_factor)
peaks = np.array([-sa,
np.array([offset_guide] * self.nbr_z),
sb])
dn = np.zeros((self.nbr_z, self.nbr_x))
dn_fix = shape(self.x) # guide shape center on 0
for i in range(self.nbr_z):
dn[i, :] = np.roll(dn_fix, int(round(-sa[i] / self.dist_x))) \
+ np.roll(dn_fix, int(round(offset_guide / self.dist_x))) \
+ np.roll(dn_fix, int(round(sb[i] / self.dist_x)))
dn = dn * delta_no # give a value to the shape
return [peaks, dn]
# Light #
def gauss_light(self, fwhm, offset_light=0):
"""
Create a gaussien beam in amplitude.
:math:`E = e^{-((x-x_0)/w)^{2P}}`
The waist is defined as fwhm/sqrt(2*log(2)) and correspond to the 1/e
field value and 1/:math:`e^2` intensity value.
Parameters
----------
fwhm : float
Full width at half maximum (for intensity not amplitude) (µm).
offset_light : float, optional
Light offset from center in µm. 0 by default.
Returns
-------
field : array
Amplitude values over x in µm.
Notes
-----
This methods uses the x and dist_x variables defined in :class:`Bpm`.
"""
spot_size = fwhm / sqrt(2 * log(2)) # such as I=1/e^2 in intensity
if spot_size != 0:
field = np.exp(-(self.x / spot_size)**2)
field = np.roll(field, int(round(offset_light / self.dist_x)))
else:
field = 0 * self.x # Avoid division by zero error
return field
def squared_light(self, fwhm, offset_light=0):
"""
Create a flat-top beam (squared).
Parameters
----------
fwhm : float
Beam width in µm.
offset_light : float, optional
Light offset from center in µm. 0 by default.
Returns
-------
field : array
Amplitude values over x in µm.
Notes
-----
This methods uses the following variables defined in :class:`Bpm`:
nbr_x, x.
"""
field = np.zeros(self.nbr_x)
for j in range(self.nbr_x):
if self.x[j] >= -fwhm/2 and self.x[j] <= fwhm/2:
field[j] = 1
else:
field[j] = 0
field = np.roll(field, int(round(offset_light / self.dist_x)))
return field
def mode_determ(self, width, delta_no, mode):
"""
Solve the transcendental equation tan=sqrt that give the modes
allowed in a squared guide.
Parameters
----------
width : float
Waveguide width (µm) at 1/e^2 intensity.
delta_no : float
Difference of refractive index between the core and the cladding.
mode : int
Number of the searched mode.
Returns
-------
h_m : float
Transverse propagation constant over x (µm).
gamma_m : float
Extinction coefficient over x (µm).
beta_m : float
Longitudinal constant of propagation over z (µm).
Raises
------
ValueError
if no mode exists.
Notes
-----
This methods uses the following variables defined in :class:`Bpm`:
lo, no, ko.
"""
width = float(width)
if width == 0:
raise ValueError("no mode " + str(mode) + " existing")
delta_no = float(delta_no.real)
lim = self.lo/(2 * width * (self.no + delta_no)) - 1e-12
theta_c = acos(self.no / (self.no + delta_no)) # Critical angle
solu = np.linspace(
mode*lim + 0.000001,
(mode + 1) * lim,
round(1 + (lim - 0.000001)/0.000001))
lhs = np.tan(
pi * width * (self.no + delta_no) / self.lo * solu
- mode*pi/2)
rhs = np.sqrt(
0j # to avoid sqrt error when complexe
+ (sin(theta_c) / solu)**2
- 1)
result = rhs - lhs # 0 if left == right
minimum = abs(result).min() # return min value : where two equations~=
i_min = int(np.where(abs(result) == minimum)[0]) # min value index
if i_min == 0:
raise ValueError("no mode " + str(mode) + " existing")
sin_theta_m = solu[i_min]
theta_m = asin(sin_theta_m) # angle at which the mode propagate
beta_m = self.ko * (self.no + delta_no) * cos(theta_m)
h_m = sqrt((self.ko * (self.no + delta_no))**2 - beta_m**2)
gamma_m = (self.no * self.ko
* np.sqrt((cos(theta_m) / cos(theta_c))**2 - 1))
return [h_m, gamma_m, beta_m]
def mode_light(self, width, delta_no, mode, offset_light=0):
"""
Create light based on propagated mode inside a squared guide.
Parameters
----------
width : float
Waveguide width (µm) at 1/e^2 intensity.
delta_no : float
Difference of refractive index between the core and the cladding.
mode : int
Number of the searched mode.
offset_light : float, optional
Light offset from center (µm). 0 by default.
Returns
-------
field : array
Amplitude values over x (µm).
h_m : float
Transverse propagation constant over x (µm).
gamma_m : float
Extinction coefficient over x (µm).
beta_m : float
Longitudinal constant of propagation over z (µm).
Notes
-----
This methods uses the following variables defined in :class:`Bpm`:
nbr_x, x and the :meth`mode_determ` method.
"""
field = np.zeros(self.nbr_x)
[h_m, gamma_m, beta_m] = self.mode_determ(width, delta_no, mode)
if mode % 2 == 0: # if even mode
b_b = cos(h_m * width / 2) # Continuity value where x=width/2
for j in range(self.nbr_x): # Compute light based on h,gamma,beta
if abs(self.x[j]) <= width/2: # in core
field[j] = cos(h_m * self.x[j])
else: # in cladding
field[j] = b_b * exp(-gamma_m * (
abs(self.x[j])
- width/2))
else: # if odd mode
c_c = sin(h_m * width / 2) # Continuity value where x=width/2
for j in range(self.nbr_x): # Compute light based on h,gamma,beta
if abs(self.x[j]) <= width/2: # in core
field[j] = sin(h_m * self.x[j])
elif self.x[j] >= width/2: # Right cladding
field[j] = c_c * exp(-gamma_m * (
self.x[j]
- width/2))
else: # Left cladding
field[j] = -c_c * exp(gamma_m * (
self.x[j]
+ width/2))
field = np.roll(field, int(round(offset_light / self.dist_x)))
return [field, h_m, gamma_m, beta_m]
def all_modes(self, width, delta_no, offset_light=0):
"""
Compute all modes allowed by the guide and sum them into one field.
Parameters
----------
width : float
Waveguide width (µm) at 1/e^2 intensity.
delta_no : float
Difference of refractive index between the core and the cladding.
offset_light : float, optional
Light offset from center in µm. 0 by default.
Returns
-------
field : array
Sum of all possibles fields in the guide.
h : array, float
Transverse propagation constant over x in µm of all modes.
gamma : array, float
Extinction coefficient over z in µm of all modes.
beta : array, float
Longitudinal constant of propagation over z in µm of all modes.
Notes
-----
This methods uses the following variables defined in :class:`Bpm`:
nbr_x and the :meth`mode_light` method.
"""
i = 0
field = np.zeros(self.nbr_x)
h = np.array([])
gamma = np.array([])
beta = np.array([])
while True:
try:
[field_i, h_m, gamma_m, beta_m] = self.mode_light(
width, delta_no, i, offset_light)
field = field + field_i
h = np.append(h, h_m)
gamma = np.append(gamma, gamma_m)
beta = np.append(beta, beta_m)
i += 1
except ValueError:
break
return [field, h, gamma, beta]
def check_modes(self, width, delta_no):
"""
Return the last possible mode number.
Could be merged with :meth:`all_modes` but would increase the needed
time to compute just to display a number.
Parameters
----------
width : float
Waveguide width (µm) at 1/e^2 intensity.
delta_no : float
Difference of refractive index between the core and the cladding.
Returns
-------
m : int
Number of the last possible mode for a squared guide.
Notes
-----
This methods uses the :meth`mode_light` method defined in :class:`Bpm`.
"""
i = 0
while True:
try:
self.mode_light(width, delta_no, i)
i += 1
except ValueError:
print("This guide can propagate up to the modes", i-1)
return i-1
def airy_light(self, lobe_size, airy_zero, offset_light=0):
"""
Create an Airy beam using scipy.special.airy(x).
Parameters
----------
lobe_size : float
Size of the first lobe (µm).
airy_zero : int
Cut the beam at the asked zero of the Airy function. n lobes will
be displayed.
offset_light : float, optional
Light offset from center in µm. 0 by default.
Returns
-------
field : array
Amplitude values over x (µm).
airy_zero : int
Number of lobes. Corrected if higher than the window size.
Notes
-----
This methods uses the following variables defined in :class:`Bpm`:
nbr_x, length_x, dist_x, x.
"""
if lobe_size == 0 or airy_zero == 0:
return [np.zeros(self.nbr_x), 0]
lobe_size = -abs(lobe_size)
# Position of the first zero and the asked one
zero_pos = special.ai_zeros(airy_zero)[0]
first_zero = zero_pos[0]
last_zero = zero_pos[-1]
# Positions/size of the wanted beam
left = last_zero * lobe_size / first_zero
right = 10 * lobe_size / first_zero # Airy=1e-10 at x=10
# Reduce the last zero number to stay in the window
if -left > self.length_x:
left = zero_pos * lobe_size / first_zero # All possibles left posi
airy_zero = np.where(-left <= self.length_x)[0] # Higher index
if airy_zero.size == 0: # interface don't allow this situation
print("The first lobe is bigger than the windows size")
return [np.zeros(self.nbr_x), 0]
else: # take the higher lobe possible
airy_zero = int(airy_zero[-1])
last_zero = zero_pos[airy_zero] # Value of the last lobe
airy_zero += 1 # +1 to return the zero number
left = last_zero * lobe_size / first_zero # Corrected left positio
# Number of points in the Airy window to match the full window
nbr_point = int(round(abs((left - right) / self.dist_x)))
# Airy window size
x_airy = np.linspace(last_zero, 10, nbr_point)
# Positions of the Airy and full window center
center_airy = int(np.where(x_airy >= 0)[0][0])
center = int(np.where(self.x >= 0)[0][0])
# Airy field
field = np.array(special.airy(x_airy)[0])
# add last empty field to reach the windows size
if self.x.size > field.size:
field = np.append(field, np.zeros((self.x.size-field.size)))
else:
field.resize(self.x.size) # Cut if exceed windows size
# Recenter on 0
field = np.roll(field, int(round(center - center_airy)))
field = np.roll(field, int(round(offset_light / self.dist_x)))
field /= np.max(field) # Normalized
return [field, airy_zero]
def init_field(self, field, theta_ext, irrad):
"""
Initialize phase, field and power variables.
Parameters
----------
field : array, array-like
Amplitude values for each beams over x (µm) [beam,E] or E
theta_ext : float
Exterior inclinaison angle (°).
irrad : array, array-like
Irradiance for each beam (:math:`W/m^2`).
Returns
-------
progress_pow : array
Intensity values over x (µm).
Notes
-----
This method creates the following variables within the class
:class:`Bpm`:
- epnc: Convertion factor used to set unit of the field and irradiance.
- phase_mat: Free propagation in Fourier space over dz/2.
- current_power: Intensity for z=0.
- field: Field value with the unit.
This methods uses the following variables defined in :class:`Bpm`:
no, x, dist_x, nbr_x, nbr_z_disp.
"""
assert theta_ext <= 28 # paraxial approximation limitation
self.field = field.astype(complex)
# see en.wiki: Gaussian_beam#Mathematical_form for intensity definition
eta = 376.730313668 # Impedance of free space mu_0*c
self.epnc = self.no / (2*eta) # used to converte E into I
# unit(epnc)= W/V^2
try: # if multiple beams or one beam as [beam]
_ = self.field.shape[1] # Raise a IndexError if not
nbr_light = self.field.shape[0] # [beam1(x),beam2,beam3] -> 3
# Eo = sqrt(irrad[i] / self.epnc) # Peak value of the field (V/m).
for i in range(nbr_light):
self.field[i] *= sqrt(irrad[i] / self.epnc)
self.field = np.sum(self.field, axis=0) # merge all beam into one
except IndexError: # if only one beam and not in form [beam]
self.field *= sqrt(irrad / self.epnc)
# https://support.lumerical.com/hc/en-us/articles/
# 360034382894-Understanding-injection-angles-in-broadband-simulations
theta = asin(sin(radians(theta_ext)) / self.no) # angle in the guide
ph = self.no * self.ko * sin(theta) * self.x # k projection over x
self.field *= np.exp(1j * ph) # Initial phase due to angle
nu_max = 1 / (2*self.dist_x) # max frequency due to sampling
# Spacial frequencies over x (1/µm)
nu = np.linspace(-nu_max,
nu_max * (1 - 2/self.nbr_x),
self.nbr_x)
intermed = self.no * cos(theta) / self.lo
# Linear propagation phase
fr = 2 * pi * nu**2 / (intermed + np.sqrt(
intermed**2
- nu**2
+ 0j))
# Free space matrix
self.phase_mat = fftshift(np.exp(-1j * self.dist_z / 2 * fr))
# Initial irradiance
self.current_power = self.epnc * abs2(self.field)
self.progress_pow = np.zeros([self.nbr_z_disp, self.nbr_x])
self.progress_pow[0, :] = np.array([self.current_power])
return [self.progress_pow]
def guide_position(self, peaks, guide, size):
"""
Return the left and right position index over x of a given guide
for each z.
Parameters
----------
peaks : array-like
Central position of each guide [guide,z].
guide : int
Number of the guide.
size : float
Width (µm).
Returns
-------
x_beg : array
Left indices position of the selected guide over z.
x_end : array
Right indices position of the selected guide over z.
Notes
-----
This methods uses the following variables defined in :class:`Bpm`:
nbr_z, x, length_x.
"""
x_beg = np.array([None]*self.nbr_z)
x_end = np.array([None]*self.nbr_z)
if peaks.shape[0] != 0:
for j in range(self.nbr_z):
if peaks[guide, j] is None:
continue
pos_beg = (peaks[guide, j] - size/2) # Left position
# If the position is out of boundery, change interval to
# (-length_x/2, length_x)
if pos_beg < self.x[0] or pos_beg > self.x[-1]:
pos_beg = pos_beg % self.length_x
# If the pos_beg is between length_x/2 and length_x then change
# interval to (-length_x/2, length_x/2)
if pos_beg >= self.x[-1]:
pos_beg -= self.length_x
# Search the closest index value for this position
x_beg[j] = np.where(self.x >= pos_beg)[0][0]
pos_end = (peaks[guide, j] + size/2)
if pos_end < self.x[0] or pos_end > self.x[-1]:
pos_end = pos_end % self.length_x
if pos_end >= self.x[-1]:
pos_end -= self.length_x
x_end[j] = np.where(self.x >= pos_end)[0][0]
return [x_beg, x_end]
def power_guide(self, x_beg, x_end):
"""
return the power over z in a given interval by integrating the beam
irradiance.
Parameters
----------
x_beg : array
Left indices position over z for a selected guide.
x_end : array
Right indices position over z for a selected guide.
Returns
-------
P : array
Normalized power in the guide over z.
Notes
-----
This methods uses the following variables defined in :class:`Bpm`:
nbr_z_disp, progress_pow, pas.
"""
P = np.zeros(self.nbr_z_disp)
# explaination: power[0] is input so take dn[0] but dn[0] is for propag
# from 0 to 1 unit so next power power[1] is also dn[0]
for i in range(self.nbr_z_disp):
if i == 0:
index = 0
elif i == self.nbr_z_disp-1:
# -1 for beginning at 0 and -1 for final useless value
index = len(x_beg)-2
else:
index = i*self.pas-1
if x_beg[index] is None or x_end[index] is None:
continue
if x_beg[index] <= x_end[index]:
P[i] = np.trapz(
self.progress_pow[i, x_beg[index]:x_end[index]],
dx=self.dist_x*1e-6)
else: # Take into account guides that crosses the window edges
P[i] = np.trapz(
self.progress_pow[i, x_beg[index]:],
dx=self.dist_x*1e-6)
P[i] += np.trapz(
self.progress_pow[i, :x_end[index]],
dx=self.dist_x*1e-6)
P /= np.trapz(self.progress_pow[0], dx=self.dist_x*1e-6)
return P # f not normalized, unit: (W/m)
def kerr_effect(self, dn, n2=None, chi3=None, kerr_loop=1,
variance_check=False, field_start=None,
dn_start=None, phase_mat=None):
"""
Kerr effect: refractive index modulation by the light intensity.
See: https://optiwave.com/optibpm-manuals/bpm-non-linear-bpm-algorithm/
Parameters
----------
dn : array
Difference of refractive index as dn[z,x]. Can contain the losses
throught the imaginary part.
n2 : float, optional
Nonlinear refractive index responsable for the optical Kerr effect
in m^2/W. None by default.
chi3 : float, optional
Value of the third term of the electric susceptibility tensor
in m^2/V^2. None by default.
kerr_loop : int, optional
Number of corrective loops for the Kerr effect. 1 by default.
variance_check : bool, optional
Check if the kerr effect converge fast enought. False by default.
field_start : array, optional
Field without kerr effect.
If None were given, take the :meth:`main_compute` field.
dn_start : array, optional
Refractive index without kerr effect.
If None were given, take the :meth:`main_compute` dn.
phase_mat: array, optional
Free propagation in Fourier space over dz/2.
If None were given, take the :meth:`main_compute` phase_mat.
Returns
-------
dn : array
Refractive index with kerr effect.
nl_mat : array
refractive index modulation with kerr effect.
field_x : array
Field with the kerr effect at the self.i step.
cur_pow : array
Beam power with the kerr effect after the dz propagation.
Notes
-----
This methods uses the following variables defined in :class:`Bpm`:
i, epnc, no, ko, dist_z and the :meth:`variance` method.
"""
assert n2 is None or chi3 is None
# assert n2 is not None or chi3 is not None
# Set the default value if none were given
dn_start = dn[self.i, :] if dn_start is None else dn_start
nl_mat = self.ko * self.dist_z * dn_start
field_start = self.field if field_start is None else field_start
phase_mat = self.phase_mat if phase_mat is None else phase_mat
# Influence of the index modulation on the field
field_x = field_start * np.exp(1j * nl_mat)
# Linear propagation over dz/2
field_x = ifft(phase_mat * fft(field_x))
cur_pow = self.epnc * abs2(field_x)
for _ in range(kerr_loop):
prev_pow = cur_pow
# influence of the beam intensity on the index modulation
if n2 is not None: # dn = dn1+dn2*I with I unit: W/m^2
dn_kerr = dn_start + n2*prev_pow
elif chi3 is not None: # dn = dn1+ 3chi3/8*no*|E|^2
dn_kerr = dn_start + (3*chi3)/(8*self.no)*(prev_pow/self.epnc)
else:
dn_kerr = dn_start # identical to no kerr effect but slower
nl_mat = self.ko * self.dist_z * dn_kerr
# influence of the index modulation on the field
field_x = field_start * np.exp(1j * nl_mat)
# Linear propagation over dz/2
field_x = ifft(phase_mat * fft(field_x))
# power at pos z
cur_pow = self.epnc * abs2(field_x)
if variance_check:
try:
self.variance(prev_pow, cur_pow) # Check if converge
except ValueError as ex:
print(ex)
print("for the step i=", self.i)
if np.max(dn_kerr) > self.no/10:
print("Careful: index variation too high:",
"\t%.2f > %f/10" % (np.max(dn_kerr), self.no), sep="\n")
return [dn_kerr, nl_mat, field_x, cur_pow]
def variance(self, initial, final):
"""
This function alerts the user when the kerr effect don't converge fast
enough.
Raise a ValueError when the power standard deviation exceed
:math:`10^{-7}`.
Parameters
----------
initial : array
Power of the kerr effect looped n-1 time.
final : array
Power of the kerr effect looped n time.
Raises
------
ValueError
when the power standard deviation exceed :math:`10^{-7}`.
"""
finish_sum = np.sum(final)
nl_control_amp = 1/finish_sum * np.sqrt(np.abs(
np.sum(np.subtract(final, initial)**2)
- np.sum(np.subtract(final, initial))**2))
if nl_control_amp > 1e-7:
message = "Warning: solution doesn't converge " + \
"for a deviation of " + str(nl_control_amp)
raise ValueError(message)
def bpm_compute(self, dn, n2=None, chi3=None, kerr_loop=1,
variance_check=False):
"""
Compute BPM principle : free_propag over dz/2, index modulation,
free_propag over dz/2.
Parameters
----------
n2 : float, optional
Nonlinear refractive index responsable for the optical Kerr effect
in m^2/W. None by default.
chi3 : float, optional
Value of the third term of the electric susceptibility tensor
in m^2/V^2. None by default.
kerr : bool, optional
Activate the kerr effect. False by default.
kerr_loop : int, optional
Number of corrective loops for the Kerr effect. 1 by default.
variance_check : bool
Check if the kerr effect converge fast enought. False by default.
Returns
-------
current_power : array
Power after the propagation over dz.
Notes
-----
This method uses the :class:`Bpm` class variables:
nbr_lost, i, field, dist_z, dn, nl_mat, phase_mat, epnc,
:meth:`kerr_effect`.
This method change the values of:
field, dn, nl_mat, current_power.
"""
# Linear propagation over dz/2
self.field = ifft(self.phase_mat * | fft(self.field) | numpy.fft.fft |
from subprocess import call
import os, time
import shutil
import io
import base64
from IPython.display import HTML
import numpy as np
from PIL import ImageDraw, Image, ImageFont
from tempfile import NamedTemporaryFile
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from matplotlib import animation
import matplotlib
import math
import copy
import itertools
import tensorflow as tf
import subprocess
FLAGS = tf.app.flags.FLAGS
import cv2
#from pylab import *
import pylab
from matplotlib.patches import Wedge
from scipy.ndimage.filters import gaussian_filter
from mpl_toolkits.axes_grid1.anchored_artists import AnchoredDrawingArea
from matplotlib.patches import FancyArrowPatch
def images2video_highqual(frame_rate,
name="temp_name", dir_name="temp_dir"):
# make dir if not exists
if not os.path.isdir(dir_name):
os.mkdir(dir_name)
pwd = os.getcwd()
os.chdir(dir_name)
print("converting to video")
video_name = name+'.mp4'
cmd = "ffmpeg -y -f image2 -r " + str(frame_rate) + " -pattern_type glob -i '*.png' -crf 5 -preset veryslow " + \
"-threads 16 -vcodec libx264 -pix_fmt yuv420p " + video_name
call(cmd, shell=True)
call("rm *.png", shell=True)
os.chdir(pwd)
return os.path.join(dir_name, video_name)
def images2video(images, frame_rate,
name="temp_name", dir_name="temp_dir", highquality=True):
images = np.uint8(images)
shape = images.shape
assert (len(shape) == 4)
assert (shape[3] == 3 or shape[3] == 1)
# make dir if not exists
if not os.path.isdir(dir_name):
os.mkdir(dir_name)
pwd = os.getcwd()
os.chdir(dir_name)
# write out images
print("writing images")
for i in range(shape[0]):
j = Image.fromarray(images[i, :, :, :])
j.save("%05d.jpeg" % i, "jpeg", quality=93)
print("converting to video")
video_name = name+'.mp4'
quality_str = '16' if highquality else '28'
cmd = "ffmpeg -y -f image2 -r " + str(frame_rate) + " -pattern_type glob -i '*.jpeg' -crf "+quality_str+" -preset veryfast " + \
"-threads 16 -vcodec libx264 -pix_fmt yuv420p " + video_name
call(cmd, shell=True)
call("rm *.jpeg", shell=True)
os.chdir(pwd)
return os.path.join(dir_name, video_name)
def play_video(path):
video = io.open(path, 'r+b').read()
encoded = base64.b64encode(video)
return HTML(data='''<video alt="test" controls>
<source src="data:video/mp4;base64,{0}" type="video/mp4" />
</video>'''.format(encoded.decode('ascii')))
def visualize_images(images, frame_rate,
name="temp_name", dir_name="temp_dir",delete_temp=True):
path = images2video(images, frame_rate, name, dir_name)
out = play_video(path)
if delete_temp:
assert not("*" in dir_name)
shutil.rmtree(dir_name)
return out
def write_text_on_image(image, string,
lines=[],
fontsize=30,
lines_color=[]):
shape = image.shape
assert (len(shape) == 3)
assert (shape[-1] == 3 or shape[-1] == 1)
image = np.uint8(image)
j = Image.fromarray(image)
draw = ImageDraw.Draw(j)
# font = ImageFont.load_default().font
#font = ImageFont.truetype("/usr/share/fonts/truetype/inconsolata/Inconsolata.otf", fontsize)
font = ImageFont.truetype("/usr/share/fonts/truetype/dejavu/DejaVuSans.ttf", fontsize)
if isinstance(string, list):
for s in string:
draw.text(s[0], s[1], s[2], font=font)
else:
draw.text((0, 0), string, (255, 0, 0), font=font)
for line in lines:
draw.line(line, fill=128, width=1)
for line in lines_color:
draw.line(line[0], fill=line[1], width=1)
return np.array(j)
def egomotion2animation(ego):
# ego is a egomotion matrix, with nframes * previous frames * 3
fig = plt.figure()
ax = fig.add_subplot(111, projection='3d')
line = ax.plot([], [], '.', zs=[])
line = line[0]
def get_range(ego, axis):
data = ego[:, :, axis]
data = np.reshape(data, [-1])
return [np.min(data), np.max(data)]
ax.axis(get_range(ego, 0) + get_range(ego, 1))
zrange = get_range(ego, 2)
ax.set_zlim(zrange[0], zrange[1])
# initialization function: plot the background of each frame
def init():
line.set_data([], [])
return line,
# animation function. This is called sequentially
def animate(i):
line.set_data(ego[i, :, 0], ego[i, :, 1])
line.set_3d_properties(ego[i, :, 2])
return line,
# call the animator. blit=True means only re-draw the parts that have changed.
anim = animation.FuncAnimation(fig, animate, init_func=init,
frames=ego.shape[0], blit=True)
plt.close(anim._fig)
return anim
def animation2HTML(anim, frame_rate):
print("animaiton to video...")
if not hasattr(anim, '_encoded_video'):
with NamedTemporaryFile(suffix='.mp4') as f:
anim.save(f.name, fps=frame_rate,
extra_args=['-vcodec', 'libx264',
'-pix_fmt', 'yuv420p',
'-crf', '28',
'-preset', 'veryfast'])
video = io.open(f.name, 'r+b').read()
encoded = base64.b64encode(video)
return HTML(data='''<video alt="test" controls>
<source src="data:video/mp4;base64,{0}" type="video/mp4" />
</video>'''.format(encoded.decode('ascii')))
def visualize_egomotion(ego, frame_rate):
anim = egomotion2animation(ego)
return animation2HTML(anim, frame_rate)
def vis_reader(tout, frame_rate, j=0):
decoded, isvalid, ego, name, isstop = tout
images = decoded[j, :, :, :, :]
images_txt = np.zeros_like(images)
this_stop = isstop[j]
this_valid = isvalid[j]
for i in range(images.shape[0]):
stop_str = {1: "STOP",
0: "GO",
-1: "UNKNOWN"}[this_stop[i]]
valid_str = {0: "Egomotion=Invalid",
1: "Egomotion=Valid"}[this_valid[i]]
showing_str = stop_str + "\n" + valid_str
# showing_str = stop_str
images_txt[i, :, :, :] = write_text_on_image(images[i, :, :, :], showing_str)
print("showing visualization for video %s" % name[0])
return visualize_images(images_txt, frame_rate)
def move_to_line(move, h, w, multiplier = 10):
m = copy.deepcopy(move)
m[1] *= multiplier
m = [m[1] * math.sin(m[0]), m[1]*math.cos(m[0])]
return [w / 2, h, w/2+m[0], h-m[1]]
def draw_bar_on_image(image, bar_left_top, fraction, fill=(0,0,0,128), height=20, length=120):
image = np.uint8(image)
j = Image.fromarray(image)
draw = ImageDraw.Draw(j)
l = bar_left_top
draw.rectangle([l, (l[0]+int(length*fraction), l[1]+height)], fill=fill)
return np.array(j)
def vis_reader_stop_go(tout, prediction,frame_rate, j=0, save_visualize = False, dir_name="temp", provider="nexar_large_speed"):
#out_of_date, won't do stop go any more
decoded = tout[0]
speed = tout[1]
name = tout[2]
highres = tout[3]
isstop = tout[4]
turn = tout[5]
locs = tout[6]
decoded = highres
turn = turn[j, :, :]
locs = locs[j, :, :]
images = decoded[j, :, :, :, :]
images_txt = np.zeros_like(images)
stop = isstop[j, :]
speed = speed[j, :, :]
for i in range(images.shape[0]):
showing_str = "STOP" if prediction[i] == 1 else "GO!"
showing_str += "\n" + str(np.linalg.norm(speed[i, :]))
showing_str += "\n" + "GT: STOP" if stop[i] == 1 else "\nGT: GO!"
images_txt[i, :, :, :] = write_text_on_image(images[i, :, :, :],
showing_str)
print("showing visualization for video %s" % name[0])
#vis_speed(speed, frame_rate)
if save_visualize:
_, short_name = os.path.split(name[j])
short_name = short_name.split(".")[0]
return visualize_images(images_txt, frame_rate,
name=short_name,
dir_name=dir_name,
delete_temp=False)
else:
return visualize_images(images_txt, frame_rate)
def vis_discrete(tout, predict, frame_rate,
j=0, save_visualize=False, dir_name="temp"):
import data_providers.nexar_large_speed as provider
int2str = provider.MyDataset.turn_int2str
# city_data and only_seg are mutually exclusive, actually one flag is enough
if FLAGS.city_data == 1:
decoded = tout[0]
speed = tout[1]
name = tout[2]
isstop = tout[5]
turn = tout[6]
locs = tout[7]
elif FLAGS.only_seg == 1:
decoded = tout[0]
speed = tout[1]
name = tout[2]
isstop = tout[6]
turn = tout[7]
locs = tout[8]
else:
decoded = tout[0]
speed = tout[1]
name = tout[2]
highres = tout[3]
isstop = tout[4]
turn = tout[5]
locs = tout[6]
decoded = highres
images = copy.deepcopy(decoded[j, :, :, :, :])
_, hi, wi, _ = images.shape
locs = locs[j, :, :]
turn = turn[j, :, :]
for i in range(images.shape[0]):
# the ground truth course and speed
showing_str = "speed: %.1f m/s \ncourse: %.2f degree/s" % \
(locs[i, 1], locs[i, 0]/math.pi*180)
for k in range(4):
showing_str += "\n"+int2str[k]
gtline = move_to_line(locs[i,:], hi, wi)
FontHeight=18
FontWidth =8
for k in range(4):
images[i, :, :, :] = draw_bar_on_image(images[i,:,:,:],
(FontWidth*14, FontHeight*(2+k)),
fraction = turn[i, k],
fill=(255, 0, 0, 128),
height=FontHeight * 2 // 3,
length=FontWidth * 4)
images[i, :, :, :] = draw_bar_on_image(images[i, :, :, :],
(FontWidth * 20, FontHeight * (2 + k)),
fraction=predict[i, k],
fill=(0, 255, 0, 128),
height=FontHeight * 2 // 3,
length=FontWidth * 4)
images[i, :, :, :] = write_text_on_image(images[i, :, :, :],
showing_str,
[gtline],
fontsize=15)
print("showing visualization for video %s" % name[j])
if save_visualize:
_, short_name = os.path.split(name[j])
short_name = short_name.split(".")[0]
for i in range(10):
this_name = short_name + "_" + str(i)
if not os.path.isfile(os.path.join(dir_name,this_name+'.mp4')):
break
return visualize_images(images, frame_rate,
name=this_name,
dir_name=dir_name,
delete_temp=False)
else:
return visualize_images(images, frame_rate)
def vis_discrete_simplified(tout, predict, frame_rate,
j=0, save_visualize=False, dir_name="temp"):
import data_providers.nexar_large_speed as provider
int2str = provider.MyDataset.turn_int2str
decoded = tout[0]
speed = tout[1]
name = tout[2]
highres = tout[3]
isstop = tout[4]
turn = tout[5]
locs = tout[6]
decoded = highres
images = copy.deepcopy(decoded[j, :, :, :, :])
_, hi, wi, _ = images.shape
locs = locs[j, :, :]
turn = turn[j, :, :]
for i in range(images.shape[0]):
# the ground truth course and speed
showing_str = ""
for k in range(4):
showing_str += int2str[k] + "\n"
FontHeight = 18
FontWidth = 8
for k in range(4):
images[i, :, :, :] = draw_bar_on_image(images[i, :, :, :],
(FontWidth * 14, FontHeight * k),
fraction=turn[i, k],
fill=(255, 0, 0, 128),
height=FontHeight * 2 // 3,
length=FontWidth * 4)
images[i, :, :, :] = draw_bar_on_image(images[i, :, :, :],
(FontWidth * 20, FontHeight * k),
fraction=predict[i, k],
fill=(0, 255, 0, 128),
height=FontHeight * 2 // 3,
length=FontWidth * 4)
images[i, :, :, :] = write_text_on_image(images[i, :, :, :],
showing_str,
fontsize=15)
print("showing visualization for video %s" % name[j])
if save_visualize:
_, short_name = os.path.split(name[j])
short_name = short_name.split(".")[0]
for i in range(10):
this_name = short_name + "_" + str(i)
if not os.path.isfile(os.path.join(dir_name, this_name + '.mp4')):
break
return visualize_images(images, frame_rate,
name=this_name,
dir_name=dir_name,
delete_temp=False)
else:
return visualize_images(images, frame_rate)
def generate_meshlist(arange1, arange2):
return np.dstack(np.meshgrid(arange1, arange2, indexing='ij')).reshape((-1,2))
def draw_sector(image,
predict,
car_stop_model,
course_delta = 0.5 / 180 * math.pi,
speed_delta=0.3,
pdf_multiplier=255,
speed_multiplier = 5,
h=360, w=640,
max_speed=30,
uniform_speed=False,
consistent_vis=(False, 1e-3, 1e2),
has_alpha_channel=False):
course_samples = np.arange(-math.pi / 2-course_delta,
math.pi / 2+course_delta,
course_delta)
speed_samples = np.arange(0, max_speed+speed_delta, speed_delta)
total_pdf = car_stop_model.continous_pdf([predict],
generate_meshlist(course_samples, speed_samples),
"multi_querys")
total_pdf = np.reshape(total_pdf, (len(course_samples), len(speed_samples)))
if uniform_speed:
total_pdf = total_pdf / np.sum(total_pdf, axis=1, keepdims=True)
speed_scaled = max_speed * speed_multiplier
# potential xy positions to be filled
xy = generate_meshlist(np.arange(w / 2 - speed_scaled, w / 2 + speed_scaled),
np.arange(h - speed_scaled, h))
# filter out invalid speed
v=np.stack((xy[:,0]-w/2, h-xy[:,1]), axis=1)
speed_norm = np.sqrt(v[:,0]**2 + v[:,1]**2) *(1.0/speed_multiplier)
valid_speed = np.less(speed_norm, max_speed)
xy = xy[valid_speed, :]
speed_norm=speed_norm[valid_speed]
v=v[valid_speed]
course_norm = np.arctan(1.0*v[:, 0] / v[:, 1])
# search the course and speed
icourse = np.searchsorted(course_samples, course_norm)
ispeed = np.searchsorted(speed_samples, speed_norm)
green_portion = 1
total = total_pdf[icourse, ispeed]
if consistent_vis[0] == False:
total_max = np.amax(total)
total = total / total_max * 255*green_portion
else:
# consistent visualization between methods
MIN = consistent_vis[1]
MAX = consistent_vis[2]
total = np.maximum(MIN, total)
total = np.minimum(MAX, total)
#total = np.log(total) # map to log(MIN) to log(MAX)
#total = (total -np.log(MIN)) / (np.log(MAX) - np.log(MIN)) * 255
total = (total - MIN) / (MAX - MIN)
total = np.sqrt(total)
total = total * 255
# assign to image
image[xy[:, 1], xy[:, 0], :] *= (1-green_portion)
image[xy[:, 1], xy[:, 0], 1] += total
if has_alpha_channel:
image[xy[:, 1], xy[:, 0], 3] = 255
return image
def vis_continuous(tout, predict, frame_rate, car_stop_model,
j=0, save_visualize=False, dir_name="temp", return_first=False, **kwargs):
decoded = tout[0]
speed = tout[1]
name = tout[2]
highres = tout[3]
isstop = tout[4]
turn = tout[5]
locs = tout[6]
decoded = highres
images = copy.deepcopy(decoded[j, :, :, :, :])
images = images.astype('float64')
_, hi, wi, _ = images.shape
locs = locs[j, :, :]
for i in range(images.shape[0]):
# the ground truth course and speed
showing_str = "speed: %.1f m/s \ncourse: %.2f degree/s" % \
(locs[i, 1], locs[i, 0] / math.pi * 180)
gtline = move_to_line(locs[i, :], hi, wi, 10)
images[i, :, :, :] = draw_sector(images[i, :, :, :],
predict[i:(i+1), :],
car_stop_model,
course_delta=0.3 / 180 * math.pi,
speed_delta=0.3,
pdf_multiplier=255*10,
speed_multiplier=wi/30/3,
h=hi, w=wi,
consistent_vis=(True, 1e-5, 0.3))
# get the MAP prediction
map = car_stop_model.continous_MAP([predict[i:(i+1), :]])
mapline = move_to_line(map.ravel(), hi, wi, 10)
# swap the shorter line to the latter, avoid overwriting
lines_v = [(gtline, (255,0,0)), (mapline, (0, 0, 255))]
if locs[i, 1] < map.ravel()[1]:
lines_v = [lines_v[1], lines_v[0]]
images[i, :, :, :] = write_text_on_image(images[i, :, :, :],
showing_str,
lines_color=lines_v,
fontsize=15)
print("showing visualization for video %s" % name[j])
if return_first:
return images[0, :, :, :].astype(np.uint8)
if save_visualize:
_, short_name = os.path.split(name[j])
short_name = short_name.split(".")[0]
return visualize_images(images, frame_rate,
name=short_name,
dir_name=dir_name,
delete_temp=False)
else:
return visualize_images(images, frame_rate)
def vis_continuous_simplified(tout, predict, frame_rate, car_stop_model,
j=0, save_visualize=False, dir_name="temp", vis_radius=10):
decoded = tout[0]
speed = tout[1]
name = tout[2]
highres = tout[3]
isstop = tout[4]
turn = tout[5]
locs = tout[6]
decoded = highres
images = copy.deepcopy(decoded[j, :, :, :, :])
images = images.astype('float64')
_, hi, wi, _ = images.shape
locs = locs[j, :, :]
locs = copy.deepcopy(locs)
for i in range(images.shape[0]):
# the ground truth course and speed
locs[i, 1] = 10.0
# get the MAP prediction
map = car_stop_model.continous_MAP([predict[i:(i+1), :]])
map = map.ravel()
map[1] = 10.0
mapline = move_to_line(map, hi, wi, 10)
# get map2
map2 = car_stop_model.continous_MAP([predict[i:(i + 1), :]], return_second_best=True)
map2 = map2.ravel()
map2[1] = 10.0
mapline2 = move_to_line(map2, hi, wi, 10)
showing_str = [
[(0, 0), "driver's angular speed: %.2f degree/s" % (locs[i, 0] / math.pi * 180), (255, 0, 0)],
[(0, 20), "predicted angular speed: %.2f degree/s" % (map[0] / math.pi * 180), (0, 0, 255)]]
# disable the small str on top first
showing_str = ""
showing_str = "speed: %.1f m/s \ncourse: %.2f degree/s" % \
(locs[i, 1], locs[i, 0] / math.pi * 180)
gtline = move_to_line(locs[i, :], hi, wi, 10)
if FLAGS.is_MKZ_dataset:
# might be problematic since we enable the normalization
higher_bound = 0.3
else:
higher_bound = 3.0
images[i, :, :, :] = draw_sector(images[i, :, :, :],
predict[i:(i+1), :],
car_stop_model,
course_delta=0.1 / 180 * math.pi,
speed_delta=0.1,
pdf_multiplier=255*10,
speed_multiplier=int(wi/30/3),
h=hi, w=wi,
uniform_speed=True,
consistent_vis=(True, 1e-5, higher_bound))
# disable the MAP line first, since many times not the MAP line is considered
'''
# swap the shorter line to the latter, avoid overwriting
lines_v = [(gtline, (255,0,0)), (mapline, (0, 0, 255))]
if locs[i, 1] < map.ravel()[1]:
lines_v = [lines_v[1], lines_v[0]]
'''
lines_v = [(gtline, (255,0,0)), (mapline, (0,0,255)), (mapline2, (0, 255, 0))]
images[i, :, :, :] = write_text_on_image(images[i, :, :, :],
showing_str,
lines_color=lines_v,
fontsize=24)
print("showing visualization for video %s" % name[j])
if save_visualize:
_, short_name = os.path.split(name[j])
short_name = short_name.split(".")[0]
return visualize_images(images, frame_rate,
name=short_name,
dir_name=dir_name,
delete_temp=False)
else:
return visualize_images(images, frame_rate)
# some visualization functions for the speed
def visLoc(locs, label="NotSet"):
axis = lambda i: [loc[i] for loc in locs]
import matplotlib.ticker as ticker
fig, ax = plt.subplots()
#plt.grid(True)
ax.plot(axis(0), axis(1), 'g^', ms=2)
ylim = ax.get_ylim()
xlim = ax.get_xlim()
ax.set_xlim(min(xlim[0],ylim[0]) ,max(xlim[1],ylim[1]))
ax.set_ylim(min(xlim[0],ylim[0]) ,max(xlim[1],ylim[1]))
plt.title("Moving paths from " + label)
plt.xlabel("West -- East")
plt.ylabel("South -- North")
plt.show()
def integral(speed, time0):
out = np.zeros_like(speed)
l = speed.shape[0]
for i in range(l):
s = speed[i, :]
if i > 0:
out[i, :] = out[i - 1, :] + s * time0
return out
def vis_speed(speed, hz):
visLoc(integral(speed, 1.0 / hz), "speed and course")
def softmax(x):
"""Compute softmax values for each sets of scores in x."""
# x has shape: #instances * #classes
maxes = np.max(x, axis=1)
e_x = np.exp(x - maxes[:, None])
sums = np.sum(e_x, axis=1)
return e_x / sums[:, None]
def read_video_file(video_path, HEIGHT, WIDTH):
# take a video's path and return its decoded contents
cmnd = ['ffmpeg',
'-i', video_path,
'-f', 'image2pipe',
'-loglevel', 'panic',
'-pix_fmt', 'rgb24',
'-vcodec', 'rawvideo', '-']
pipe = subprocess.Popen(cmnd, stdout=subprocess.PIPE, bufsize=10 ** 7)
pout, perr = pipe.communicate()
image_buff = np.fromstring(pout, dtype='uint8')
if image_buff.size % (HEIGHT*WIDTH):
print("Height and Width are potentially not correct")
return None
image_buff = image_buff.reshape((-1, HEIGHT, WIDTH, 3))
return image_buff
def vis_discrete_colormap_antialias(tout, predict, frame_rate, j=0, save_visualize=False, dir_name="temp", string_type='image'):
if FLAGS.only_seg:
decoded = tout[0]
speed = tout[1]
name = tout[2]
isstop = tout[6]
turn = tout[7]
locs = tout[8]
else:
decoded = tout[0]
speed = tout[1]
name = tout[2]
highres = tout[3]
isstop = tout[4]
turn = tout[5]
locs = tout[6]
decoded = highres
images = copy.deepcopy(decoded[j, :, :, :, :])
_, hi, wi, _ = images.shape
turn = turn[j, :, :]
def get_color(prob):
cm = pylab.get_cmap('viridis') # inferno
color = cm(prob) # color will now be an RGBA tuple
r = color[0] * 255
g = color[1] * 255
b = color[2] * 255
return r, g, b
def clamp(x):
x = float(x)
return max(0, min(x, 1))
def add_to_ada(ada, pos_x, pos_y, radius, angle_s, angle_e, ring_width, color_code, edge_color, alpha_value):
ada.drawing_area.add_artist(
Wedge((pos_x, pos_y), radius, angle_s, angle_e, width=ring_width # , color=color_code#'#DAF7A6'
, alpha=alpha_value, antialiased=True, ec=edge_color, fc=color_code))
def draw_cake(ada, pos_x, pos_y, radius, angle_s, angle_diff, ring_width, color_code, edge_color, alpha_value,
share, shift=45):
angle_s = angle_s + shift
for i in range(share):
if (angle_s + (i + 1) * (angle_diff) / share) == 360:
angle_end = 360
else:
angle_end = angle_s + (i + 1) * (angle_diff) / share
#print(i,'_______________________________________')
add_to_ada(ada, pos_x, pos_y, radius,
angle_s + i * (angle_diff) / share, angle_end,
ring_width, color_code=color_code, edge_color=edge_color, alpha_value=alpha_value[i])
def draw_pile_cake(ada, pos_x, pos_y, radius, angle_s, angle_diff, ring_width, color_code, edge_color, alpha_value,
share, x_frac, y_frac, split, fontsize=24, shift=45):
# draw the black one
draw_cake(ada, pos_x=pos_x, pos_y=pos_y, radius=radius, angle_s=angle_s, angle_diff=360, ring_width=None,
color_code='k', edge_color=None, alpha_value=[0.6], share=1)
# draw the green one
draw_cake(ada, pos_x=pos_x, pos_y=pos_y, radius=radius, angle_s=angle_s, angle_diff=360, ring_width=ring_width,
color_code=color_code, edge_color='#FFFFFF', alpha_value=alpha_value, share=4)
# draw the white edge
draw_cake(ada, pos_x=pos_x, pos_y=pos_y, radius=radius, angle_s=angle_s, angle_diff=360, ring_width=ring_width,
color_code='none', edge_color='#FFFFFF', alpha_value=[1, 1, 1, 1], share=4)
ada.da.add_artist(
ax.annotate(split, xy=(x_frac, y_frac), xycoords="axes fraction", fontsize=fontsize, color='w'))
def draw_cake_type(ada, string_type, action_mean, predict_mean):
if string_type == 'video':
draw_pile_cake(ada, pos_x=210, pos_y=70, radius=60, angle_s=0, angle_diff=360, ring_width=30,
color_code='#00FF00', edge_color=None, alpha_value=predict_mean, share=1,
x_frac=0.513, y_frac=0.895, split='P')
draw_pile_cake(ada, pos_x=80, pos_y=70, radius=60, angle_s=0, angle_diff=360, ring_width=30,
color_code='#00FF00', edge_color=None, alpha_value=action_mean, share=1,
x_frac=0.185, y_frac=0.895, split='G')
elif string_type == 'image':
draw_pile_cake(ada, pos_x=240, pos_y=70, radius=70, angle_s=0, angle_diff=360, ring_width=40,
color_code='#00FF00', edge_color=None, alpha_value=predict_mean, share=1,
x_frac=0.580, y_frac=0.89, split='P', fontsize=32)
draw_pile_cake(ada, pos_x=80, pos_y=70, radius=70, angle_s=0, angle_diff=360, ring_width=40,
color_code='#00FF00', edge_color=None, alpha_value=action_mean, share=1,
x_frac=0.18, y_frac=0.89, split='G', fontsize=32)
_, short_name = os.path.split(name[j])
short_name = short_name.split(".")[0]
for i in range(images.shape[0]):
action_mean = [clamp(turn[i, 0]+0.05), clamp(turn[i, 2]+0.05),
clamp(turn[i, 1]+0.1), clamp(turn[i, 3]+0.05)]
predict_mean = [clamp(predict[i, 0]+0.05), clamp(predict[i, 2]+0.05),
clamp(predict[i, 1]+0.05), clamp(predict[i, 3]+0.05)]
fig = plt.figure(figsize=(16, 12))
ax_original = plt.gca()
ax_original.set_axis_off()
ax_original.get_xaxis().set_visible(False)
ax_original.get_yaxis().set_visible(False)
plt.imshow(images[i, :, :, :])
plt.axis('off')
ax = fig.add_subplot(121, projection='polar')
ax_2 = fig.add_subplot(122, projection='polar')
ada = AnchoredDrawingArea(200, 100, 0, 0, loc=2, pad=0., frameon=False)
draw_cake_type(ada, string_type, action_mean, predict_mean)
ax.add_artist(ada)
ax.set_axis_off()
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
ax_2.set_axis_off()
ax_2.get_xaxis().set_visible(False)
ax_2.get_yaxis().set_visible(False)
if not os.path.exists(os.path.join(dir_name,'viz')):
os.mkdir(os.path.join(dir_name,'viz'))
if not os.path.exists(os.path.join(dir_name,'viz', short_name+string_type)):
os.mkdir(os.path.join(dir_name, 'viz', short_name+string_type))
fig.savefig(os.path.join(dir_name, 'viz', short_name+string_type,'{0:04}.png'.format(i)),
bbox_inches='tight', pad_inches = -0.04, Transparent=True, dpi=100)
print(short_name,' ', i, 'Done!')
plt.show()
plt.close()
images2video_highqual(frame_rate = 3,
name=short_name, dir_name=os.path.join(dir_name, 'viz', short_name+string_type))
def vis_continuous_colormap_antialias(tout, predict, frame_rate, car_stop_model,
j=0, save_visualize=False, dir_name="temp", vis_radius=10):
decoded = tout[0]
speed = tout[1]
name = tout[2]
highres = tout[3]
isstop = tout[4]
turn = tout[5]
locs = tout[6]
decoded = highres
images = copy.deepcopy(decoded[j, :, :, :, :])
#images = images.astype('float64')
_, hi, wi, _ = images.shape
locs = locs[j, :, :]
def plot_greens(bin_ends, values, image_width, image_height, radius, driver_action):
# bins are: [0, bin_ends[0]], [bin_ends[0], bin_ends[1]] ...
# and the corresponding values to display are: values[0], values[1]
# the final results are added to ada
ada = AnchoredDrawingArea(radius * 2, radius, 0, 0, loc=10, pad=0., frameon=False)
def add_ada_custom(angle_s, angle_e, value, color):
add_to_ada(ada, radius, -(image_height / 2 - radius / 2), radius, angle_s, angle_e, None, color, value)
def add_to_ada(ada, pos_x, pos_y, radius, angle_s, angle_e, ring_width, color_code, alpha_value):
ada.drawing_area.add_artist(
Wedge((pos_x, pos_y), radius, angle_s, angle_e, width=ring_width, fc=color_code # '#DAF7A6'
,ec = 'none', alpha=alpha_value, antialiased=True))
bin_ends = 180 - np.array(bin_ends)
bin_ends = bin_ends[::-1]
values = np.array(values)
values = np.squeeze(values)
values = values[::-1]
# add a black background
add_ada_custom(0, 180, 0.8, "#000000")
color_shading = "#00FF00"
for i in range(len(values)):
#print(bin_ends.shape, '____all____bin_____')
#print(values.shape, '___all_____values____')
if i < 5:
print(bin_ends[i], bin_ends[i + 1], values[i], '________________________')
add_ada_custom(bin_ends[i], bin_ends[i + 1], values[i], color_shading)
white_border = 1
border_color = '#FFFFFF'
add_to_ada(ada, radius, -(image_height / 2 - radius / 2), radius + white_border, 0, 180, white_border,
border_color, 1)
tick_len = 20
tick_color = '#FFFFFF'
tick_width = 1.0 / 2
for i in range(len(bin_ends)):
add_to_ada(ada, radius, -(image_height / 2 - radius / 2), radius + white_border,
bin_ends[i] - tick_width / 2, bin_ends[i] + tick_width / 2, tick_len, tick_color, 10)
driver_action = driver_action / 180.0 * math.pi
start = np.array([radius, -(image_height / 2 - radius / 2) - 2])
delta = np.array([radius * math.cos(driver_action), radius * math.sin(driver_action)]) * 0.8
color_driver = "#0000FF"
ada.drawing_area.add_artist(FancyArrowPatch(start, start + delta, linewidth=2, color=color_driver))
return ada
_, short_name = os.path.split(name[j])
short_name = short_name.split(".")[0]
for i in range(images.shape[0]):
# the ground truth course and speed
locs[i, 1] = 10.0
# get the MAP prediction
fig = plt.figure(figsize=(16, 12))
course_bin, speed_bin = car_stop_model.get_bins()
course_bin = [-math.pi/2] + course_bin + [math.pi/2]
course_bin = np.array(course_bin)*180/math.pi + 90
ax_original = plt.gca()
ax_original.set_axis_off()
ax_original.get_xaxis().set_visible(False)
ax_original.get_yaxis().set_visible(False)
plt.imshow(images[i, :, :, :])
plt.axis('off')
course = softmax(predict[i:(i + 1), 0:FLAGS.discretize_n_bins])
course = course/np.max(course)
print(course_bin, course, '!'*10)
ada2 = plot_greens(course_bin, course, 1280, 501, 200, -locs[i, 0]*180/math.pi+90)
ax_original.add_artist(ada2)
plt.show()
if not os.path.exists(os.path.join(dir_name,'viz')):
os.mkdir(os.path.join(dir_name,'viz'))
if not os.path.exists(os.path.join(dir_name,'viz', short_name)):
os.mkdir(os.path.join(dir_name, 'viz', short_name))
fig.savefig(os.path.join(dir_name, 'viz', short_name, '{0:04}.png'.format(i)),
bbox_inches='tight', pad_inches=-0.04, Transparent=True, dpi=100)
plt.close()
print(short_name)
print("showing visualization for video %s" % name[j])
def vis_continuous_interpolated(tout, predict, frame_rate, car_stop_model,
j=0, save_visualize=False, dir_name="temp", vis_radius=10, need_softmax=True, return_first=False):
decoded = tout[0]
speed = tout[1]
name = tout[2]
highres = tout[3]
isstop = tout[4]
turn = tout[5]
locs = tout[6]
decoded = highres
images = copy.deepcopy(decoded[j, :, :, :, :])
_, hi, wi, _ = images.shape
locs = locs[j, :, :]
def gen_mask(bin_ends, values, radius, height, width):
# convert bin_ends to bin centers
new_ends = []
for i in range(len(bin_ends) - 1):
new_ends.append((bin_ends[i] + bin_ends[i + 1]) / 2)
# RGBA
out = np.zeros((height, width, 4), dtype=np.uint8)
xy = np.dstack(np.meshgrid(np.arange(width / 2 - radius, width / 2 + radius),
np.arange(height - radius, height),
indexing='ij')).reshape((-1, 2))
# filter out invalid speed
v = np.stack((xy[:, 0] - width / 2, height - xy[:, 1]), axis=1)
speed_norm = np.sqrt(v[:, 0] ** 2 + v[:, 1] ** 2)
valid_speed = np.less(speed_norm, radius)
xy = xy[valid_speed, :]
speed_norm = speed_norm[valid_speed]
v = v[valid_speed]
course_norm = np.arccos(1.0 * v[:, 0] / speed_norm)
course_norm = np.degrees(course_norm)
value = np.interp(course_norm, new_ends, values)
out[xy[:, 1], xy[:, 0], 1] = 255 * value
out[xy[:, 1], xy[:, 0], 3] = 255
return out
def plot_greens(bin_ends, values, image_width, image_height, radius, driver_action):
ada = AnchoredDrawingArea(radius * 2, radius, 0, 0, loc=10, pad=0., borderpad=0., frameon=False)
def add_to_ada(ada, pos_x, pos_y, radius, angle_s, angle_e, ring_width, color_code, alpha_value):
ada.drawing_area.add_artist(
Wedge((pos_x, pos_y), radius, angle_s, angle_e, width=ring_width, fc=color_code # '#DAF7A6'
, ec='none', alpha=alpha_value, antialiased=True))
bin_ends = 180 - np.array(bin_ends)
bin_ends = bin_ends[::-1]
values = np.array(values)
values = np.squeeze(values)
values = values[::-1]
mask = gen_mask(bin_ends, values, radius, image_height, image_width)
plt.imshow(mask, alpha=0.8)
white_border = 2
border_color = '#FFFFFF'
add_to_ada(ada, radius, -(image_height / 2 - radius / 2), radius + white_border, 0, 180, white_border + 1,
border_color, 1)
tick_len = 20
tick_color = '#FFFFFF'
tick_width = 1.0 / 2
for i in range(len(bin_ends)):
if abs(bin_ends[i] - 90) > 10:
add_to_ada(ada, radius, -(image_height / 2 - radius / 2), radius + white_border,
bin_ends[i] - tick_width / 2, bin_ends[i] + tick_width / 2, tick_len, tick_color, 10)
driver_action = driver_action / 180.0 * math.pi
start = | np.array([radius, -(image_height / 2 - radius / 2)]) | numpy.array |
import numpy as np
import pandas as pd
import h5py
import os
import scipy.sparse
import warnings
import thimbles as tmb
from thimbles.modeling import Model
from thimbles import resource_dir
from .profiles import convolved_stark
from .spectrum import Spectrum
from thimbles.tasks import task
from thimbles.sqlaimports import *
data_cols = np.loadtxt(os.path.join(resource_dir, "transition_data", "Hydrogen_lines.txt"), usecols=[0, 1, 2, 3, 5])
hlines = pd.DataFrame(data=dict(wv=data_cols[:, 0],
nlow=np.array(data_cols[:, 1], dtype=int),
nup=np.array(data_cols[:, 2], dtype=int),
ep=data_cols[:, 3],
loggf=data_cols[:, 4]),
)
@task()
def get_H_mask(wvs, masking_radius=-3.0, nup_max=15):
"""generates a mask which is false close to hydrogen features
inputs
wvs: np.ndarray
the wavelengths at which to evaluate the mask
masking_radius: float
the radius around each hydrogen line to exclude.
if the radius is positive it is interpreted as a constant number of
angstroms to exclude. If the number is negative it is interpreted as
the base 10 logarithm of the fraction of the line center wavelength
to exclude. That is for a line at wv lambda the radius of exclusion is
(10**masking_radius)*lambda.
nup_max: int
if specified hydrogen lines with upper energy levels above nup_max will
not contribute to the mask.
"""
min_wv = np.min(wvs)
max_wv = np.max(wvs)
mask = np.ones(wvs.shape, dtype=bool)
for line_idx in range(len(hlines)):
line_dat = hlines.iloc[line_idx]
lwv = line_dat["wv"]
nup = line_dat["nup"]
if nup > nup_max:
continue
if masking_radius < 0:
mrad = np.power(10.0, masking_radius)*lwv
else:
mrad = masking_radius
if lwv < (min_wv - mrad):
continue
if lwv > (max_wv + mrad):
continue
mask *= np.abs(wvs-lwv) > mrad
return mask
_lemke_dat = None
def try_load_lemke():
global _lemke_dat
if _lemke_dat is None:
try:
hf = h5py.File(os.path.join(resource_dir, "transition_data", "lemke.h5"), "r")
_lemke_dat = hf
return hf
except Exception as e:
warnings.warn(e)
class HydrogenLineOpacity(object):
def __init__(self, wv, nlow, nup):
try_load_lemke()
self.nlow = nlow
self.nup = nup
self.wv = wv
low_str = "{}".format(int(nlow))
up_str = "{}".format(int(nup))
if low_str in list(lemke_dat.keys()):
if up_str in list(lemke_dat[low_str].keys()):
pass
base_group = "{}/{}/".format(low_str, up_str)
self.log_nes = np.array(lemke_dat[base_group+"log_ne"])
self.log_ts = np.array(lemke_dat[base_group+"log_t"])
self.alphas = np.array(lemke_dat[base_group+"alphas"])
self.alpha_binner = tmb.coordinatization.as_coordinatization(self.alphas)
profile_grid = np.array(lemke_dat[base_group+"profile"])
pinterp = HypercubeGridInterpolator(
coordinates=[self.log_ts, self.log_nes],
grid_data=profile_grid
)
self.pinterp = pinterp
def __call__(self, wvs, parameters):
"""evaluate the line opacity at the given wavelengths
Log(Temperature), Log(electron density) = parameters
"""
#alpha def alpha = delta_wv/F0 F0 = 1.25e-9 * ne^(2/3)
input_alpha = np.abs(wvs-self.wv)
input_alpha /= np.power(10.0, (2.0/3.0)*parameters[1] -8.9030899869919438)
input_alpha = np.clip(input_alpha, self.alphas[0], self.alphas[-1])
alpha_profile = self.pinterp(parameters)
alpha_indicies = self.alpha_binner.coordinates_to_indicies(input_alpha)
min_indexes = np.array(alpha_indicies, dtype=int)
mixing_ratio = alpha_indicies - min_indexes
interped_profile = alpha_profile[min_indexes]*(1-mixing_ratio)
interped_profile += alpha_profile[np.clip(min_indexes+1, 0, len(alpha_profile)-1)]*mixing_ratio
return np.exp(interped_profile)
class HydrogenForegroundOpacityModel(Model):
_id = Column(Integer, ForeignKey("Model._id"), primary_key=True)
__mapper_args__={"polymorphic_identity":"HydrogenForegroundOpacityModel"}
def __init__(self, wvs, strength, temperature, electron_density):
Model.__init__(self)
Spectrum.__init__(self, wvs, np.ones(len(wvs)))
self.max_delta_wv_frac = 0.01
self._temperature = temperature
self._electron_density = electron_density
self.npts_model = len(self.wv)
min_wv = self.wv[0]
max_wv = self.wv[-1]
self.hdat = hlines[(hlines.wv > min_wv)*(hlines.wv < max_wv)].copy()
self.series_ids = np.unique(self.hdat.nlow.values)
self.series_index = {self.series_ids[idx]:idx for idx in range(len(self.series_ids))}
strength = np.atleast_1d(strength)
if len(strength) == 1:
strength = np.repeat(strength, len(self.series_ids))
elif len(strength) != len(self.series_ids):
raise ValueError("different number of strengths than there are available Hydrogen Series!")
self._strength = strength
self.initialize_lines()
self.calc_h_opac()
self.calc_opac()
@property
def electron_density(self):
return self._electron_density
@electron_density.setter
def electron_density(self, value):
self._electron_density = value
self.calc_h_opac()
self.calc_opac()
@property
def temperature(self):
return self._temperature
@temperature.setter
def temperature(self, value):
self._temperature = value
self.calc_h_opac()
def initialize_lines(self):
self.hprofiles = [[] for i in range(len(self.series_ids))]
for series_idx in range(len(self.series_ids)):
series_id = self.series_ids[series_idx]
series_dat = self.hdat[self.hdat.nlow == series_id]
for l_idx in range(len(series_dat)):
ldat = series_dat.iloc[l_idx]
cent_wv = ldat["wv"]
nlow, nup = ldat["nlow"], ldat["nup"]
self.hprofiles[series_idx].append(HydrogenLineOpacity(cent_wv, nlow, nup))
def calc_h_opac(self):
#self._h_opac_profile = np.zeros(self.wv.shape)
opac_vecs = [np.zeros(self.wv.shape) for i in range(len(self.series_ids))]
theta = 5040.0/self.temperature
for series_idx in range(len(self.series_ids)):
series_id = self.series_ids[series_idx]
series_dat = self.hdat[self.hdat.nlow == series_id]
rel_strengths = np.power(10.0, series_dat["loggf"]-theta*(series_dat["ep"]))
for line_idx, line_profile in enumerate(self.hprofiles[series_idx]):
rel_strength = rel_strengths.iloc[line_idx]
lb, ub = self.get_index(line_profile.wv*np.array([1.0-self.max_delta_wv_frac, 1.0+self.max_delta_wv_frac]), clip=True)
opac_vecs[series_idx][lb:ub+1] += line_profile(self.wv[lb:ub+1], [np.log10(self.temperature), np.log10(self.electron_density)])
self.opac_matrix = scipy.sparse.bmat(opac_vecs).transpose()
@property
def strength(self):
return self._strength
@strength.setter
def strength(self, value):
self._strength = | np.clip(value, 0.01, np.inf) | numpy.clip |
import torch
import torch.autograd
from torch.autograd import Variable
import torch.optim as optim
import torch.nn as nn
import numpy as np
import scipy as sp
import scipy.linalg
from qpsolvers import solve_qp
from core.MADDPG import MADDPGagent
from core.ConstraintNetwork import ConstraintNetwork
import ipdb
class SafeMADDPGagent(MADDPGagent):
def __init__(self, N_agents, state_dim, act_dim,
constraint_networks_dir, constraint_dim,critic_state_mask = [0,1,2,3,-1,-2], col_margin=0.33,
actor_learning_rate=1e-4,
critic_learning_rate=1e-3, gamma=0.99, tau=1e-2, max_memory_size=30000,
hidden_size_critic = [500, 500], hidden_size_actor = [100, 100],
batch_size = 128, soften = True):
# Call MADDPGagent's constructor
super().__init__(N_agents = N_agents, state_dim = state_dim,
act_dim = act_dim, critic_state_mask = critic_state_mask,
actor_learning_rate = actor_learning_rate,
critic_learning_rate = critic_learning_rate, gamma = gamma,
tau = tau, max_memory_size = max_memory_size,
hidden_size_critic = hidden_size_critic, hidden_size_actor = hidden_size_actor,
batch_size = batch_size)
# Extra Params
self.col_margin = col_margin
self.constraint_dim = constraint_dim
self.total_state_dim = self.state_dim * self.N_agents
self.total_constraint_dim = self.constraint_dim * self.N_agents
self.total_action_dim = self.act_dim * self.N_agents
self.constraint_nets = self.total_constraint_dim*[None]
# Initialize constraint networks
for i in range(self.total_constraint_dim):
self.constraint_nets[i] = ConstraintNetwork(self.total_state_dim, self.total_action_dim).double()
self.constraint_nets[i].load_state_dict(torch.load(constraint_networks_dir
+ "constraint_net_" + str(i) + ".pkl"))
# Define Solver Globaly
self.solver_interventions = 0
self.solver_infeasible = 0
# Choose Solver
if soften:
self.correct_actions = self.correct_actions_soften
else:
self.correct_actions = self.correct_actions_hard
self.soften = soften
def reset_metrics(self):
self.solver_interventions = 0
self.solver_infeasible = 0
def get_interventions(self):
return self.solver_interventions
def get_infeasible(self):
return self.solver_infeasible
@torch.no_grad()
def get_action(self, state, constraint):
# Original MADDPG
actions = []
for i in range(self.N_agents):
s = torch.tensor(state[i], dtype=torch.float64)
action = self.actors[i](s).detach()
actions.append(action)
# merge action and state vectors of all agents
action_total = torch.cat(actions).numpy()
return actions
@torch.no_grad()
def correct_actions_hard(self, state, actions, constraint):
actions = np.concatenate(actions)
state = torch.tensor(np.concatenate(state))
# (1) Problem Variables
# Problem specific constants
I = np.eye(self.total_action_dim)
ones = np.ones(self.total_action_dim)
C = np.concatenate(constraint)
# Formulate the constraints using neural networks
G = np.zeros([self.total_action_dim, self.total_action_dim])
for i, net in enumerate(self.constraint_nets):
G[i, :] = net(state).numpy()
# (2) Problem Variables in QP form
# Cost Function
q = -actions
P = np.eye(self.total_action_dim)
# Constraints
A = np.concatenate([-G, I, -I])
ub = np.concatenate([C - self.col_margin, ones, ones])
lb = None
# Solve Optimization Problem
try:
x = solve_qp(P.astype(np.float64), q.astype(np.float64), A.astype(np.float64),
ub.astype(np.float64), None, None, None, None)
except:
self.solver_infeasible +=1
return actions
# Count Solver interventions
if np.linalg.norm(actions - x) > 1e-3:
self.solver_interventions += 1
return x
@torch.no_grad()
def correct_actions_soften(self, state, actions, constraint):
actions = np.concatenate(actions)
state = torch.tensor(np.concatenate(state))
# (1) Create solver as a globar variable
l1_penalty = 1000
# (2) Problem Variables
# Problem specific constants
I = np.eye(self.total_action_dim)
Z = np.zeros([self.total_action_dim, self.total_action_dim])
ones = np.ones(self.total_action_dim)
zeros = np.zeros(self.total_action_dim)
C = np.concatenate(constraint) - self.col_margin
# Formulate the constraints using neural networks
G = | np.zeros([self.total_action_dim, self.total_action_dim]) | numpy.zeros |
import h5py
import os, sys, glob
import numpy as np
import plotly.offline as offline
from preprocessing import analysis_pp
from analysis.general_utils import aqua_utils, saving_utils, plotly_utils, general_utils, compare_astro_utils, correlation_utils, stat_utils
from scipy.stats.stats import power_divergence
from scipy.stats import ttest_ind_from_stats
import csv
import scipy.signal as ss
import math
import time
from pandas import DataFrame
from scipy import optimize
import pandas as pd
import matplotlib.pyplot as plt
from collections import deque
class AstrocytePlotter():
def __init__(self, output_folder):
self.output_folder = output_folder
#For correlation plots
self.filter_probs = [0.05, 0.10, 0.25]
self.n_samples_corr_fake = 20
self.num_frames_splits_l = [250, 500, 1000, 3000, 6000, 12000, 24000, 100000]
self.num_frames_splits_m_l = [0.5, 1, 2, 5, 10, 15, 20, 25, 30, 35, 40, 45, 50, 60, 70, 80]
self.num_frames_splits_splits_m_l = [10, 15, 20, 25, 30, 35, 40]
self.max_split_comparison_samples = 100
self.behaviours_list_a = ['default', 'rest', 'running',
'running_start', 'running_before', 'stick',
'stick_start', 'stick_end', 'stick_expect',
'stick_rest', 'whisker_rest_stick', 'whisker_stick']
self.behaviours_list_small = ['whisker_rest_stick', 'default', 'rest', 'running', 'stick']
def setup_plot_folders(self, output_experiment_path):
paths = ['borders', 'behaviour_heatmaps', 'behaviours_basic',
'signal_delays', 'signal_durations', 'triplet', 'behaviour_activity',
'behaviour_areas', 'signal_basic_samples', 'signal_behaviour_samples',
'correlations', 'random_events', 'splits', 'splits_self', 'signal_amplitudes',
'signal_proportion_delays', 'signal_stick_run_samples', 'splits_split_split',
'triplet_bar', 'size_v_time_corr',
'behaviour_heatmaps_threshold_with_random',
'split_behaviour_grids',
'size_histogram_bh_comparison_individual', 'amplitude_histogram_bh_comparison_individual', 'duration_histogram_bh_comparison_individual',]
for p in paths:
try:
os.makedirs(os.path.join(output_experiment_path, 'plots' , p))
except:
pass
def setup_file_folders(self, output_experiment_path):
paths = ['correlations', 'csv']
for p in paths:
try:
print(os.path.join(output_experiment_path, 'files', p))
os.makedirs(os.path.join(output_experiment_path, 'files', p))
except:
print('Folder structure exists?')
def setup_plot_folders_comparison(self, output_experiment_path_comparison):
paths = ['behaviour_heatmaps', 'triplet', 'intersection', 'correlations', 'align',
'intersection_border_xcorr_aligned',]
for p in paths:
try:
os.makedirs(os.path.join(output_experiment_path_comparison, 'plots', p))
except:
print('Folder structure exists?')
def setup_file_folders_comparison(self, output_experiment_path_comparison):
paths = ['correlations', 'csv']
for p in paths:
try:
print(os.path.join(output_experiment_path_comparison, 'files', p))
os.makedirs(os.path.join(output_experiment_path_comparison, 'files', p))
except:
print('Folder structure exists?')
def setup_plot_folders_all_comparison(self, output_experiment_path_all_comparison):
#print(output_experiment_path_all_comparison)
paths = ['size_histogram_comparison', 'amplitude_histogram_comparison', 'duration_histogram_comparison',
'size_histogram_bh_comparison', 'amplitude_histogram_bh_comparison', 'duration_histogram_bh_comparison',
'activity_all', 'activity_all_number_minute', 'waterfall_together', 'signal_proportion_delays',
'signal_proportion_delays_alt_average_proportions',
'behaviour_heatmaps_V2_comparison_scale',
'bar_rest_run_all',
'bar_rest_rest_stick_all',
'bar_run_run_stick_all',
'dot_rest_run_pair_all',
'bar_run_stick_run_transition_all',
'rest_to_run_proportions_alt',
'run_to_rest_proportions_alt',
'run_stick_run_proportions_alt',
'run_stick_run_proportions_alt_filter_max_3_frames',
'run_stick_run_proportions_alt_filter_max_5_frames',
'rest_to_run_amplitudes_default_alt',
'rest_to_run_amplitudes_alt',
'rest_to_run_durations_alt',
'rest_to_run_sizes_alt',
'rest_to_run_speed_alt',
'rest_to_run_pupil_alt',
'run_to_rest_amplitudes_default_alt',
'run_to_rest_amplitudes_alt',
'run_to_rest_durations_alt',
'run_to_rest_sizes_alt',
'rest_to_run_amplitudes_default_outlier_alt',
'rest_to_run_amplitudes_outlier_alt',
'rest_to_run_durations_outlier_alt',
'rest_to_run_sizes_outlier_alt',
'run_to_rest_amplitudes_default_outlier_alt',
'run_to_rest_amplitudes_outlier_alt',
'run_to_rest_durations_outlier_alt',
'run_to_rest_sizes_outlier_alt',
'run_to_rest_speed_alt',
'run_to_rest_pupil_alt',
'run_stick_run_amplitudes_default_alt',
'run_stick_run_amplitudes_alt',
'run_stick_run_durations_alt',
'run_stick_run_sizes_alt',
'run_stick_run_amplitudes_default_outlier_alt',
'run_stick_run_amplitudes_outlier_alt',
'run_stick_run_durations_outlier_alt',
'run_stick_run_sizes_outlier_alt',
'run_stick_run_speed_alt',
'run_stick_run_pupil_alt',
'run_stick_run_amplitudes_default_alt_filter_max_3_frames',
'run_stick_run_amplitudes_alt_filter_max_3_frames',
'run_stick_run_durations_alt_filter_max_3_frames',
'run_stick_run_sizes_alt_filter_max_3_frames',
'run_stick_run_speed_alt_filter_max_3_frames',
'run_stick_run_pupil_alt_filter_max_3_frames',
'run_stick_run_amplitudes_default_alt_filter_max_5_frames',
'run_stick_run_amplitudes_alt_filter_max_5_frames',
'run_stick_run_durations_alt_filter_max_5_frames',
'run_stick_run_sizes_alt_filter_max_5_frames',
'run_stick_run_speed_alt_filter_max_5_frames',
'run_stick_run_pupil_alt_filter_max_5_frames',
'all_amplitudes', 'all_durations', 'all_sizes',
'all_amplitudes_filt_bh', 'all_durations_filt_bh', 'all_sizes_filt_bh',
'correlations',
'correlations_long_events',
'correlations_short_events',
'correlations_no_align',
'correlations_no_align_long_events',
'correlations_no_align_short_events',
'correlations_csv',
'correlations_long_events_csv',
'correlations_short_events_csv',
'correlations_no_align_csv',
'correlations_no_align_long_events_csv',
'correlations_no_align_short_events_csv',
'control',
'outliers',
'triplet_dot_all',
'size_v_time_corr_ALL',
'speed_v_events_ALL',
'split_correlation_all',
'behaviour_over_recording',
'pixel_distribution',
'splits_self_all',
]
data_paths = [
'correlations',
'correlations_long_events',
'correlations_short_events',
'correlations_no_align',
'correlations_no_align_long_events',
'correlations_no_align_short_events',
'control',
'outliers',
'behaviour_ratios',
'top_average_values',
'split_correlation_all',
'splits_self_all'
]
for p in paths:
#print('Trying...', p)
try:
os.makedirs(os.path.join(output_experiment_path_all_comparison, 'plots', p))
except:
print('Folder structure exists?')
for p in data_paths:
try:
os.makedirs(os.path.join(output_experiment_path_all_comparison, 'data', p))
except:
print('Folder structure exists?')
def get_output_experiment_path(self, astroA, output_folder):
experiment_id = '/'.join(astroA.experiment_path.split('/')[-2:])
output_experiment_path = os.path.join(output_folder, experiment_id)
return output_experiment_path
def plot_all_single(self, astroA):
output_experiment_path = self.get_output_experiment_path(astroA, self.output_folder)
print('Making dirs', output_experiment_path)
self.setup_plot_folders(output_experiment_path)
print('Plotting behaviours basic...')
#Behaviour basic
figs_basic_plots = self.get_behaviour_basic_plots(astroA)
for fig_k in figs_basic_plots.keys():
saving_utils.save_plotly_fig(figs_basic_plots[fig_k], os.path.join(output_experiment_path, 'plots', 'behaviours_basic', '{}'.format(fig_k)), width=1000, height=400)
print('Plotting random samples of signals...')
fig_signals = self.get_signal_figs_samples(astroA, 20)
for i, fig_signal in enumerate(fig_signals):
fig_signal_path = os.path.join(output_experiment_path, 'plots', 'signal_basic_samples', 'signal_{}'.format(i))
saving_utils.save_plotly_fig(fig_signal, fig_signal_path)
print('Plotting borders...')
#Borders plot
fig_border = self.get_border_plot(astroA)
saving_utils.save_plotly_fig(fig_border, os.path.join(output_experiment_path, 'plots' , 'borders', 'border'))
print('Plotting behaviour heatmaps...')
#Behaviour heatmaps
fig_heatmap_grids, fig_heatmap_dff_grids = self.get_behaviour_contour_plots(astroA)
heatmap_grid_base_path = os.path.join(output_experiment_path, 'plots', 'behaviour_heatmaps')
for k in fig_heatmap_grids.keys():
saving_utils.save_plotly_fig(fig_heatmap_grids[k], os.path.join(heatmap_grid_base_path, k))
saving_utils.save_plotly_fig(fig_heatmap_dff_grids[k], os.path.join(heatmap_grid_base_path, k + 'dff'))
print('Plotting behaviour activity bar plot...')
behaviour_activity_path = os.path.join(output_experiment_path, 'plots', 'behaviour_activity', 'activity')
fig_behaviour_activity = self.get_behaviour_activity_plot(astroA)
print('BEHAVIOUR ACTIVITY PATH \nn\\n\n\n\n', behaviour_activity_path)
saving_utils.save_plotly_fig(fig_behaviour_activity, behaviour_activity_path, width=1200, height=800)
print('Plotting behaviour event size bar plot...')
behaviour_area_path = os.path.join(output_experiment_path, 'plots', 'behaviour_areas', 'areas')
fig_behaviour_area = self.get_behaviour_area_plot(astroA)
saving_utils.save_plotly_fig(fig_behaviour_area, behaviour_area_path)
print('Plotting behaviour amplitude size bar plot...')
behaviour_amplitude_path = os.path.join(output_experiment_path, 'plots', 'signal_amplitudes', 'amplitudes')
fig_behaviour_amplitude = self.get_behaviour_amplitude_bar_plot(astroA)
saving_utils.save_plotly_fig(fig_behaviour_amplitude, behaviour_amplitude_path)
print('Plotting random samples of signals on different behaviours...')
fig_bk_signals = self.get_signal_bk_figs_samples(astroA, 3)
for bk in fig_bk_signals.keys():
for i, fig_bk_signal in enumerate(fig_bk_signals[bk]):
fig_bk_signal_path = os.path.join(output_experiment_path, 'plots', 'signal_behaviour_samples', 'signal_{}-{}'.format(bk, i))
saving_utils.save_plotly_fig(fig_bk_signal, fig_bk_signal_path)
print('Plotting local signal samples with stick and running...')
stick_run_sample_path = os.path.join(output_experiment_path, 'plots', 'signal_stick_run_samples')
fig_stick_run_samples_l = self.get_stick_run_sample_figs(astroA)
for i, sample_figs in enumerate(fig_stick_run_samples_l):
saving_utils.save_plotly_fig(sample_figs[0], os.path.join(stick_run_sample_path, '{}-running'.format(i)))
saving_utils.save_plotly_fig(sample_figs[1], os.path.join(stick_run_sample_path, '{}-stick'.format(i)))
for j in range(min(10, len(sample_figs[2]))):
saving_utils.save_plotly_fig(sample_figs[2][j], os.path.join(stick_run_sample_path, '{}-signal_{}'.format(i, j)))
bh_l = ['rest', 'stick_rest', 'running', 'stick_run_ind_15']
#Area: None, 60, num_bins = 10
#Duration: None, 30, num_bins = 10
#dff : 0.6, 5, num_bins = 20
print('Comparing behaviour distribution plots for SINGLE...')
for n_bins in [10, 20]:
print('NUM BINS:', n_bins)
for behaviour_l in [bh_l]: #, ['rest', 'running'], ['running', 'stick'], ['rest', 'stick_rest'], ['running', 'stick_run_ind_15']]:
for measure, min_measure, max_measure in [
['area', None, 60],
['dffMax2', 0.6, 5],
['duration', None, 30],
]:
for confidence in [True]:
measure_name = aqua_utils.get_measure_names(measure)
path = os.path.join(output_experiment_path, 'plots', '{}_histogram_bh_comparison_individual'.format(measure_name), 'behaviours-{}-nbins={}-min={}-max={}-conf={}'.format('_'.join(behaviour_l), n_bins, min_measure, max_measure, confidence))
plot, stats_d = self.measure_distribution_bh_compare_plot([astroA], behaviour_l, measure=measure, num_bins=n_bins, min_measure=min_measure, max_measure=max_measure, measure_name=measure_name, confidence=confidence, with_stats=True, mode='MOA')
if measure == 'duration':
plotly_utils.apply_fun_axis_fig(plot, lambda x : x / astroA.fr, axis='x')
saving_utils.save_pth_plt_l_log([plot], [path], axis='x')
saving_utils.save_pth_plt_l_log([plot], [path], axis='y')
#Save results in text file
for i, name in enumerate(stats_d['names']):
#Create folder
data_folder_path = path
try:
os.makedirs(path)
except:
pass
temp_d = {k : stats_d[k][i] for k in stats_d.keys()}
saving_utils.save_csv_dict(temp_d, os.path.join(data_folder_path, '{}.csv'.format(name)), key_order=['names', 'x', 'mean', 'conf_95', 'std'])
np.savetxt(os.path.join(data_folder_path, '{}-data.csv'.format(name)), np.array(temp_d['data']).transpose(), delimiter=",")
'''
for confidence in [True]:
for with_log in [False, True]:
try:
measure_name = aqua_utils.get_measure_names(measure)
plot, stats_d = self.measure_distribution_bh_compare_plot_exponential_fit([astroA], behaviour_l, measure=measure, num_bins=n_bins, min_measure=min_measure, max_measure=max_measure, measure_name=measure_name, confidence=False, with_stats=True, with_log=with_log)
path = os.path.join(output_experiment_path, 'plots', '{}_histogram_bh_comparison_individual'.format(measure_name), 'behaviours-{}-nbins={}-min={}-max={}-conf={}_EXPFIT-withlog={}'.format('_'.join(behaviour_l), n_bins, min_measure, max_measure, confidence, with_log))
if measure == 'duration':
plotly_utils.apply_fun_axis_fig(plot, lambda x : x / astroA.fr, axis='x')
#Save results in text file
for i, name in enumerate(stats_d['names']):
#Create folder
data_folder_path = path
try:
os.makedirs(path)
except:
pass
temp_d = {k : stats_d[k][i] for k in stats_d.keys()}
if len(name.split('__')) == 2:
tx_name = name.split('__')[0] + '_expfit'
else:
tx_name = name
print('TX NAME', name)
saving_utils.save_csv_dict(temp_d, os.path.join(data_folder_path, '{}.csv'.format(tx_name)), key_order=['names', 'x', 'mean', 'conf_95', 'std'])
np.savetxt(os.path.join(data_folder_path, '{}-data.csv'.format(tx_name)), np.array(temp_d['data']).transpose(), delimiter=",")
saving_utils.save_plotly_fig(plot, path)
print('THE STAT HERE?', stats_d)
except Exception as e:
print('EXCEPTION\n\n\n', 'CONF', confidence, 'LOG', with_log, 'measure' ,measure)
'''
print('Plotting signal durations...')
#Signal durations plot
durations_base_path = os.path.join(output_experiment_path, 'plots', 'signal_durations')
fig_durations = self.get_signal_durations_plot(astroA)
for k in fig_durations.keys():
saving_utils.save_plotly_fig(fig_durations[k], os.path.join(durations_base_path, k + '-durations'))
'''
if astroA.aqua_bound == True:
print('Plotting triplet plot...')
#Triplet plot
triplet_base_path = os.path.join(output_experiment_path, 'plots' , 'triplet')
radii_path = os.path.join(output_experiment_path, 'plots', 'triplet', 'radii')
fig_triplets, fig_radii_border = self.get_triplet_plots(astroA, n_bins=8)
for k in fig_triplets.keys():
saving_utils.save_plotly_fig(fig_triplets[k], os.path.join(triplet_base_path, k + '-triplet'))
saving_utils.save_plotly_fig(fig_radii_border, radii_path)
print('Plotting bar plots (triplet plot bands) num_events, duration, amplitude, ')
measure_names = [None, 'Area', 'Amplitude', 'Time (s)']
for bh in ['default', 'rest', 'running', 'stick', 'stick_rest', 'stick_run_ind_15']:
for i, measure in enumerate([None, 'area', 'dffMax2', 'time_s']):
path = os.path.join(output_experiment_path, 'plots', 'triplet_bar', '{}_{}'.format(bh, measure))
if bh in astroA.event_subsets:
fig = self.triplet_bar_plot(astroA, bh=bh, measure=measure, n_bins=8, y_title=measure_names[i])
print('SAVING TRIPLET BAR')
saving_utils.save_plotly_fig(fig, path)
'''
'''
print('Plotting Signal duration split relative differences...')
duration_split_differences_path = os.path.join(output_experiment_path, 'plots', 'signal_durations', 'duration_splits_relative_differences')
fig_duration_split_differences = self.get_duration_split_differences_from_default(astroA)
saving_utils.save_plotly_fig(fig_duration_split_differences, duration_split_differences_path)
'''
'''
#Signal delays plot
signal_delays_path = os.path.join(output_experiment_path, 'plots' , 'signal_delays')
print('Plotting signal delays')
fig_delays_waterfall_d, fig_delays_waterfall_interpolate_d = self.get_waterfall_delays_plot_all(astroA)
for fig_k in fig_delays_waterfall_d.keys():
print('FIG K', fig_k)
saving_utils.save_plotly_fig(fig_delays_waterfall_d[fig_k], os.path.join(signal_delays_path, fig_k + '-delays_waterfall'))
saving_utils.save_plotly_fig(fig_delays_waterfall_interpolate_d[fig_k], os.path.join(signal_delays_path, fig_k + '-delays_waterfall_interpolate'))
print('Plotting singal proportion delays...')
fig_proportion_delays_path = os.path.join(output_experiment_path, 'plots', 'signal_proportion_delays')
fig_proportion_delays_d = self.get_proportion_delays_plot_all([astroA])
for fig_k in fig_proportion_delays_d.keys():
saving_utils.save_plotly_fig(fig_proportion_delays_d[fig_k], os.path.join(fig_proportion_delays_path, fig_k))
print('Plotting sample frame split examples...')
figs_frame_split_examples = self.get_frame_split_example_plots(astroA)
for pk in figs_frame_split_examples.keys():
for frame_split in figs_frame_split_examples[pk].keys():
figs_frame_split_example_path = os.path.join(output_experiment_path, 'plots', 'correlations', 'frame_split_pair_example_frames_{}_p={}'.format(frame_split, pk))
saving_utils.save_plotly_fig(figs_frame_split_examples[pk][frame_split], figs_frame_split_example_path)
print('Plotting random astrocyte FULL sample plots...')
figs_random_event_path = os.path.join(output_experiment_path, 'plots', 'random_events')
fig_l = self.get_random_astrocyte_plot(astroA)
for i, fig in enumerate(fig_l):
saving_utils.save_plotly_fig(fig, os.path.join(figs_random_event_path, 'sample_{}'.format(i)))
'''
'''
print('Plotting split counter')
figs_frame_split = self.get_compare_frame_split_plots(astroA)
for pk in figs_frame_split.keys():
figs_frame_split_path = os.path.join(output_experiment_path, 'plots', 'splits', 'splits_p={}'.format(pk))
saving_utils.save_plotly_fig(figs_frame_split[pk], figs_frame_split_path)
#TODO RUN THIS
print('Plotting frame split xcorr value to full self (self<->split)')
fig_frame_split_self_path_a = os.path.join(output_experiment_path, 'plots', 'splits_self', 'splits_self_a')
fig_frame_split_self_path_b = os.path.join(output_experiment_path, 'plots', 'splits_self', 'splits_self_b')
fig_frame_split_self_a, fig_frame_split_self_b = self.get_compare_full_self_frame_split_plot_xcorr(astroA)
saving_utils.save_plotly_fig(fig_frame_split_self_a, fig_frame_split_self_path_a)
saving_utils.save_plotly_fig(fig_frame_split_self_b, fig_frame_split_self_path_b)
'''
'''
print('Plotting frame split xcorr value to splits splits (split<->split)')
fig_frame_split_self_path_a = os.path.join(output_experiment_path, 'plots', 'splits_split_split', 'splits_self_a')
fig_frame_split_self_path_b = os.path.join(output_experiment_path, 'plots', 'splits_split_split', 'splits_self_b')
fig_frame_split_self_a, fig_frame_split_self_b = self.get_compare_full_self_frame_split_split_plot_xcorr(astroA)
saving_utils.save_plotly_fig(fig_frame_split_self_a, fig_frame_split_self_path_a)
saving_utils.save_plotly_fig(fig_frame_split_self_b, fig_frame_split_self_path_b)
'''
'''
print('Plotting first last 20 min of rest heatmap comparison...')
fig_20min_rest_path = os.path.join(output_experiment_path, 'plots', 'splits_self', 'splits_first_last_rest_20min')
fig_20min_rest = self.get_plot_first_last_x_min_behaviour(astroA, num_min=20, behaviour_ind='rest')
if fig_20min_rest is not None:
saving_utils.save_plotly_fig(fig_20min_rest, fig_20min_rest_path)
print('Plotting continuous 20 min rest heatmaps compared to start...')
fig_20min_cont_rest_path = os.path.join(output_experiment_path, 'plots', 'splits_self', 'cont_splits_first_last_rest_20min')
fig_20min_cont_rest = self.get_plot_x_min_rest_relative(astroA, num_min=20, behaviour_ind='rest')
if fig_20min_cont_rest is not None:
saving_utils.save_plotly_fig(fig_20min_cont_rest, fig_20min_cont_rest_path)
'''
'''
plt.ioff()
print('Plotting Size vs Time correlation plot...')
path = os.path.join(output_experiment_path, 'plots', 'size_v_time_corr')
areas = np.log(astroA.res_d['area'])
times = astroA.res_d['time_s']
r, p = stat_utils.get_pearsonr(times, areas)
df = pd.DataFrame({'Size': areas, 'Time': times})
title ='Size vs Time correlation plot'
text = 'r = {}, p < {}'.format(general_utils.truncate(r, 2), p)
for kind in ['reg', 'hex', 'kde']:
plotly_utils.seaborn_joint_grid(df, 'Size', 'Time', kind=kind, text=text)
plt.savefig(os.path.join(path, '{}.svg'.format(kind)))
plt.savefig(os.path.join(path, '{}.png'.format(kind)))
'''
'''
print('Split BEHAVIOUR GRIDS...')
n_chunks = 3
for bh in ['default', 'running', 'rest']:
event_grid_splits = aqua_utils.split_n_event_grids(astroA, bh=bh, n=n_chunks)
path = os.path.join(output_experiment_path, 'plots', 'split_behaviour_grids')
for i, event_grid_split in enumerate(event_grid_splits):
plot = plotly_utils.plot_contour(event_grid_split, title='{}-split {}/{}'.format(bh, i+1, len(event_grid_splits)))
saving_utils.save_plotly_fig(plot, os.path.join(path, 'bh_{}-split_{}-chunks_{}'.format(bh,i,n_chunks)))
'''
'''
print('HEATMAPS V2_2... (each astro day scaled with random)')
for dff_mode in ['False']:
#for bh in ['default', 'running', 'rest', 'stick_run_ind_15', 'stick_rest']:
for bh in ['default']:
print('THIS REPETITION LOOP MUST BE ONCE')
path = os.path.join(output_experiment_path, 'plots', 'behaviour_heatmaps_threshold_with_random')
d = self.get_individual_heatmaps_threshold_scaled(astroA, bh=bh, threshold=0.7, num_samples=3, dff_mode=dff_mode)
if d is None:
continue
saving_utils.save_plotly_fig(d['contour'], os.path.join(path, 'bh_{}-dff_{}'.format(bh, dff_mode)))
for i, contour_random in enumerate(d['contour_random']):
saving_utils.save_plotly_fig(contour_random, os.path.join(path, 'bh_{}-dff_{}-random_{}'.format(bh, dff_mode, i)))
'''
'''
#Every 60 seconds, whole vid
with_donwsample = True
downsample_length = int(astroA.fr * 60)
second_length = astroA.fr
bh_l = ['default', 'rest', 'running']
end_t = -1
start_t = 0
for bh in bh_l:
save_base_path = os.path.join(output_experiment_path, 'plots', 'video_plots-{}-d{}-e{}'.format(bh, downsample_length, end_t))
try:
os.makedirs(save_base_path)
except:
print('Folder exists')
self.make_event_appended_video(astroA,
bh=bh,
start_t=start_t,
end_t=end_t,
downsample_length=downsample_length,
save_base_path=save_base_path)
'''
'''
#Every 2 seconds, first 120 seconds
with_donwsample = True
downsample_length = int(astroA.fr * 2)
end_t = int(1200 * astroA.fr)
start_t = 0
second_length = astroA.fr
#bh_l = ['default', 'rest', 'running']
bh_l = ['default', 'rest', 'running']
for bh in bh_l:
save_base_path = os.path.join(output_experiment_path, 'plots', 'video_plots-{}-d{}-e{}'.format(bh, downsample_length, end_t))
try:
os.makedirs(save_base_path)
except:
print('Folder exists')
self.make_event_appended_video(astroA,
bh=bh,
start_t=start_t,
end_t=end_t,
downsample_length=downsample_length,
save_base_path=save_base_path)
'''
'''
bh_l = ['default', 'rest', 'running']
for bh in bh_l:
end_t = int(120*astroA.fr)
time_sorted_events_trunc = sorted((i for i,e in enumerate(astroA.res_d['tEnd']) if (e < frame_max)))
save_base_path = os.path.join(output_experiment_path, 'plots', 'video_plots_precise-{}-d{}-e{}'.format(bh, downsample_length, end_t))
downsample_length = int(astroA.fr * 2)
self.make_event_appended_video_precise(astroA,
event_l=time_sorted_events_trunc,
end_t=end_t,
downsample_length=downsample_length,
save_base_path=save_base_path)
'''
'''
bh_l = ['rest', 'running']
for bh in bh_l:
start_t = 0
end_t = int(1200 * astroA.fr)
downsample_length = int(astroA.fr * 2)
save_base_path = os.path.join(output_experiment_path, 'plots', 'video_plots_bh_frames-{}-d{}-e{}'.format(bh, downsample_length, end_t))
try:
os.makedirs(save_base_path)
except:
print('Folder exists')
self.make_event_appended_video_bh_frames(astroA,
bh=bh,
start_t=start_t,
end_t=end_t,
downsample_length=downsample_length,
save_base_path=save_base_path)
'''
def make_event_appended_video_bh_frames(self, astro, bh, start_t=0, end_t=-1, downsample_length=60, save_base_path=''):
curr_indices = astro.indices_d[bh][start_t:end_t]
if len(curr_indices) % downsample_length != 0:
curr_indices_fix = curr_indices[:-(len(curr_indices) % downsample_length)]
else:
curr_indices_fix = curr_indices
num_splits = len(curr_indices_fix) // downsample_length
curr_indices_split = {i : curr_indices_fix[i*downsample_length:(i+1)*downsample_length] for i in range(num_splits)}
curr_indices_split['default'] = astro.indices_d['default']
bh_event_subsets = aqua_utils.get_event_subsets(curr_indices_split, astro.res_d)
x2d_all = np.zeros([astro.input_shape[0], astro.input_shape[1]])
for i in range(num_splits):
print(i, '/', num_splits)
x2d = aqua_utils.get_event_grid_from_x2D(astro.res_d['x2D'][bh_event_subsets[i]], (astro.input_shape[0], astro.input_shape[1]))
x2d_all = x2d_all + x2d
x2d_all_normalized = np.copy(x2d_all) / ((i+1) * (downsample_length)) * astro.minute_frames
#Linearly rescale 0-1
x2d_all_normalized = (x2d_all_normalized - np.min(x2d_all_normalized)) / (np.max(x2d_all_normalized) - np.min(x2d_all_normalized))
fig = plotly_utils.plot_contour(x2d_all_normalized, title='', tick_x=[0.2, 0.4, 0.6, 0.8])
saving_utils.save_plotly_fig(fig, os.path.join(save_base_path, '{:05d}'.format(i)), save_svg=False)
def make_event_appended_video(self, astro, bh='default', start_t=0, end_t=-1, downsample_length=60, save_base_path=''):
# Create array of (end_t - start_t) values consisting of event indices (lists) inside each frame
#Time sorted events [[time, event_id], ..] sorted by time
with_downsample = False if downsample_length == 1 else True
if end_t == -1:
end_t = astro.total_indices
time_sorted_events = deque(sorted((e,i) for i,e in enumerate(astro.res_d['tBegin'][astro.event_subsets[bh]])))
#Populate events over time: for each frame we have a list of event indices starting then
events_ot_l = []
for t in range(start_t, end_t):
events_ot_l.append([])
#As long as first element has same time, we pop to add to our list
while(len(time_sorted_events) != 0 and t == time_sorted_events[0][0]):
events_ot_l[t].append(time_sorted_events.popleft()[1])
#################################################################
#Downsample
if with_downsample:
new_events_ot_l = general_utils.merge_l_l(events_ot_l, downsample_length)
else:
# copy it, not really need to
new_events_ot_l = [ev for ev in events_ot_l]
# Generate plots over time
x2d_all = np.zeros([astro.input_shape[0], astro.input_shape[1]])
for i, segment_events_l in enumerate(new_events_ot_l):
x2d = aqua_utils.get_event_grid_from_x2D(astro.res_d['x2D'][segment_events_l], (astro.input_shape[0], astro.input_shape[1]))
x2d_all = x2d_all + x2d
#Normalize
x2d_all_normalized = np.copy(x2d_all) / ((i+1) * (downsample_length if with_downsample else 1)) * astro.minute_frames
#Linearly rescale 0-1
x2d_all_normalized = (x2d_all_normalized - np.min(x2d_all_normalized)) / (np.max(x2d_all_normalized) - np.min(x2d_all_normalized))
fig = plotly_utils.plot_contour(x2d_all_normalized, title='', tick_x=[0.2, 0.4, 0.6, 0.8])
saving_utils.save_plotly_fig(fig, os.path.join(save_base_path, '{:05d}'.format(i)), save_svg=False)
#Pass event list to choose which events. E.g. events in first 2 minutes
#Slow but potentially prettier method. You can see each individual event its duration
def make_event_appended_video_precise(self, astro_curr, event_l, end_t, downsample_length, save_base_path):
dim_1 = astro_curr.input_shape[0]
dim_2 = astro_curr.input_shape[1]
#dim_3 = np.sum([x[2] for x in astro_curr.input_shape_l])
dim_3 = end_t
a = np.zeros([dim_1, dim_2, dim_3])
for i, event in enumerate(astro_curr.res_d['x3D'][event_l]):
print(i)
unraveled = np.unravel_index(event, [dim_1, dim_2, dim_3], order='F')
begin_time = np.min(unraveled[2])
end_time = np.max(unraveled[2])
added_arr = np.zeros([dim_1, dim_2])
for u_i in range(len(unraveled[0])):
c_0 = unraveled[0][u_i]
c_1 = unraveled[1][u_i]
t = unraveled[2][u_i]
#print('begin {} end {}'.format(begin_time, end_time))
if added_arr[c_0, c_1] == 1:
continue
a[c_0, c_1, t:] += 1
added_arr[c_0, c_1] = 1
return a
for i in range(a_3d.shape[2] // (downsample_length if with_downsample else 1)):
print(i)
x2d = np.sum(a_3d[:, :, i*downsample_length:(i+1)*downsample_length], axis=2)
#Normalize
x2d_all_normalized = np.copy(x2d) / ((i+1) * (downsample_length if with_downsample else 1)) * astro_curr.minute_frames
#Linearly rescale 0-1
x2d_all_normalized = (x2d_all_normalized - np.min(x2d_all_normalized)) / (np.max(x2d_all_normalized) - np.min(x2d_all_normalized))
fig = plotly_utils.plot_contour(x2d_all_normalized, title='', tick_x=[0.2, 0.4, 0.6, 0.8])
saving_utils.save_plotly_fig(fig, os.path.join(save_base_path, '{:05d}'.format(i)), save_svg=False)
#--------#--------#--------#--------#--------#--------#--------#--------#--------#--------
#Experiment_id/days
def plot_comparisons(self, astroA_l):
output_experiment_path_comparison, days_str, day_l_s, astroA_l_s = self.setup_comparison_vars(astroA_l, self.output_folder)
print(output_experiment_path_comparison)
#Setup folders
self.setup_plot_folders_comparison(output_experiment_path_comparison)
'''
#Behaviour contour plots compare
for k in astroA_l[0].event_subsets.keys():
try:
event_grids_l = [astroA.event_grids_compare[k] for astroA in astroA_l]
fig_k = plotly_utils.plot_contour_multiple(event_grids_l, title=k + '_event grid comparison_' + days_str, height=500, width=600*len(astroA_l))
saving_utils.save_plotly_fig(fig_k , os.path.join(output_experiment_path_comparison, 'plots', 'behaviour_heatmaps', k), height=500, width=600*len(astroA_l))
except:
continue
for k in astroA_l[0].event_subsets.keys():
try:
event_grids_dff_l = [astroA.event_grids_compare_dff[k] for astroA in astroA_l]
fig_k = plotly_utils.plot_contour_multiple(event_grids_dff_l, title=k + '_event grid dff comparison_' + days_str, height=500, width=600*len(astroA_l))
saving_utils.save_plotly_fig(fig_k , os.path.join(output_experiment_path_comparison, 'plots', 'behaviour_heatmaps', k + '-dff'), height=500, width=600*len(astroA_l))
except:
continue
'''
'''
name = '{}-{}'.format(astroA_l[0].day, astroA_l[1].day)
behaviour_l = ['default', 'running', 'rest']
p_l = [0.05, 0.1, 0.25]
dff_mode_l = [False, True]
for behaviour in behaviour_l:
for dff_mode in dff_mode_l:
for p in p_l:
same_spots_prob, astro_filt_l, astro_all_filt, astro_nz_bool_l, astro_all_nz_bool = compare_astro_utils.get_astro_pair_same_spots_prob([astroA_l[0], astroA_l[1]], p=0.05, dff_mode=True)
print('Plotting intersections...')
top_five_perc_path = os.path.join(output_experiment_path_comparison, 'plots', 'intersection', name + 'bh_{}-dff_{}-top_{}'.format(behaviour, dff_mode, p))
nz_border_path = os.path.join(output_experiment_path_comparison, 'plots', 'intersection', name + 'nz_border')
fig_perc = plotly_utils.plot_contour_multiple([astro_filt_l[0], astro_filt_l[1], astro_all_filt],
subplot_titles=['top 5% values day {}'.format(astroA_l[0].day), 'top 5% values day {}'.format(astroA_l[1].day), 'intersection'],
title='Probability to occur randomly {:.2e}'.format(same_spots_prob),
color_bar_title='',
line_width=0.1,
font_size_individual=40,
scale_equal=False)
fig_bord = plotly_utils.plot_contour_multiple([astro_nz_bool_l[0].astype(int), astro_nz_bool_l[1].astype(int), astro_all_nz_bool.astype(int)],
subplot_titles=['non-0 values day {}'.format(astroA_l[0].day), 'non-0 values day {}'.format(astroA_l[1].day), 'intersection'],
title='Event activity borders',
color_bar_title='',
line_width=0.1,
font_size_individual=40,
scale_equal=False)
saving_utils.save_plotly_fig(fig_perc, top_five_perc_path, width=2000, height=1000)
saving_utils.save_plotly_fig(fig_bord, nz_border_path, width=2000, height=1000)
'''
'''
behaviour_l = ['default', 'running', 'rest']
dff_mode_l = [False, True]
p_l = [0.05, 0.10, 0.25]
for behaviour in behaviour_l:
print('Plotting intersections after alignment...')
#move_vector = compare_astro_utils.get_move_vector_xcorr_default(astroA_l[0], astroA_l[1])
move_vector = [0, 0]
#p_l = [0.05, 0.1, 0.25]
for dff_mode in dff_mode_l:
for p in p_l:
same_spots_prob, astro_filt_l, astro_all_filt, astro_nz_bool_l, astro_all_nz_bool = compare_astro_utils.get_astro_pair_same_spots_prob([astroA_l[0], astroA_l[1]], p=0.05, move_vector=move_vector, dff_mode=True)
print('Plotting intersections...')
top_perc_path = os.path.join(output_experiment_path_comparison, 'plots', 'intersection_border_xcorr_aligned', name + 'bh_{}-dff_{}-top_{}'.format(behaviour, dff_mode, p))
fig_perc = plotly_utils.plot_contour_multiple([astro_filt_l[0], astro_filt_l[1], astro_all_filt],
subplot_titles=['top 5% values day {}'.format(astroA_l[0].day), 'top 5% values day {}'.format(astroA_l[1].day), 'intersection'],
title='Probability to occur randomly {:.2e}'.format(same_spots_prob),
color_bar_title='',
line_width=0.1,
font_size_individual=40,
scale_equal=False)
saving_utils.save_plotly_fig(fig_perc, top_perc_path, width=2000, height=1000)
'''
'''
print('Plotting correlations compare...')
figs_compare_corrs = self.get_compare_max_corrs_plots(astroA_l)
for pk in figs_compare_corrs.keys():
figs_compare_corrs_path = os.path.join(output_experiment_path_comparison, 'plots', 'correlations', 'max_correlations_compare_p={}'.format(pk))
saving_utils.save_plotly_fig(figs_compare_corrs[pk], figs_compare_corrs_path)
print('Plotting compare alignments intersection sizes...')
figs_compare_align = self.get_compare_align_plots(astroA_l)
for setting in figs_compare_align.keys():
for pk in figs_compare_align[setting].keys():
figs_compare_align_path = os.path.join(output_experiment_path_comparison, 'plots', 'align', 'align_compare_s={}_p={}'.format(setting, pk))
saving_utils.save_plotly_fig(figs_compare_align[setting][pk], figs_compare_align_path)
for behaviour in self.behaviours_list_small:
if (behaviour in astroA_l[0].indices_d) and (behaviour in astroA_l[1].indices_d):
print('Plotting compare alignments xcorr full... (Aligning borders then taking xcorr value of the 2 astrocytes. Then compare to random astrocyte plots)')
figs_compare_align_xcorr = self.get_compare_align_plots_xcorr(astroA_l, align_setting='xcorr', dff_mode=False, behaviour=behaviour)
figs_compare_align_xcorr_path = os.path.join(output_experiment_path_comparison, 'plots', 'align', 'align_compare_xcorr_values_full_{}'.format(behaviour))
saving_utils.save_plotly_fig(figs_compare_align_xcorr, figs_compare_align_xcorr_path)
print('Plotting compare alignments dff xcorr full... (Aligning borders then taking xcorr value of the 2 astrocytes. Then compare to random astrocyte plots)')
figs_compare_align_xcorr_dff = self.get_compare_align_plots_xcorr(astroA_l, align_setting='xcorr', dff_mode=True, behaviour=behaviour)
figs_compare_align_xcorr_dff_path = os.path.join(output_experiment_path_comparison, 'plots', 'align', 'align_compare_xcorr_values_full_dff_{}'.format(behaviour))
saving_utils.save_plotly_fig(figs_compare_align_xcorr_dff, figs_compare_align_xcorr_dff_path)
else:
print('Behaviour {} not existent in astro'.format(behaviour))
print('Plotting sample for comparison')
#Make contour plot of astro1, astro2, sample_1, sample_2, sample_3
figs_compare_samples = self.get_compare_corrs_samples_plots(astroA_l)
for pk in figs_compare_samples.keys():
for s in figs_compare_samples[pk].keys():
path_s = os.path.join(output_experiment_path_comparison, 'plots', 'correlations', '{}_p={}'.format(s, pk))
saving_utils.save_plotly_fig(figs_compare_samples[pk][s], path_s)
behaviour_corr_path = os.path.join(output_experiment_path_comparison, 'plots', 'correlations', 'behaviour_corr')
fig_behaviour_corr = self.get_plot_compare_behaviour_correlation(astroA_l)
saving_utils.save_plotly_fig(fig_behaviour_corr, behaviour_corr_path)
behaviour_corr_path = os.path.join(output_experiment_path_comparison, 'plots', 'correlations', 'behaviour_corr_dff')
fig_behaviour_corr = self.get_plot_compare_behaviour_correlation(astroA_l, dff_mode=True)
saving_utils.save_plotly_fig(fig_behaviour_corr, behaviour_corr_path)
'''
def plot_comparisons_all(self, astroA_l, astroA_l_pairs=None, astroA_l_good_pairs=None, astroA_l_good=None, astroA_long_l=None):
output_experiment_path_all_comparison, _, _, astroA_l_s = self.setup_comparison_all_vars(astroA_l, self.output_folder)
print('Plotting sizes histogram dataset comparison for each behaviour')
self.setup_plot_folders_all_comparison(output_experiment_path_all_comparison)
bh_l = ['rest', 'stick_rest', 'running', 'stick_run_ind_15']
astroA_l_filt = []
bh_l_test = ['rest', 'running', 'stick_run_ind_15', 'stick_rest']
for astroA in astroA_l:
include = True
for bh in bh_l_test:
if bh not in astroA.indices_d.keys() or bh not in astroA.activity_ratios.keys():
include = False
print(':(', astroA.print_id, bh)
if include:
astroA_l_filt.append(astroA)
day_0_1_pairs = []
if astroA_l_pairs is not None:
for astroA_l_pair in astroA_l_pairs:
if astroA_l_pair[1].day == 1:
day_0_1_pairs.append(astroA_l_pair)
'''
print('Saving results of ratios running, rest, stick-running, stick-rest of each astrocyte in csv...')
c = ['running', 'rest', 'stick_run_ind_15', 'stick_rest', 'total_time_s', 'total_time_m', 'avg_running_speed', 'avg_speed_global']
c_n = ['running', 'rest', 'stick_run', 'stick_rest', 'total_time(s)', 'total_time(m)', 'avg_speed(cm/s)', 'avg_speed_global(cm/s)']
astro_ratios_np = np.zeros([len(astroA_l), len(c)])
r = [astroA.id for astroA in astroA_l]
for i, astroA in enumerate(astroA_l):
num_frames = len(astroA.indices_d['default'])
num_seconds = num_frames / astroA.fr
num_minutes = general_utils.truncate(num_seconds / 60.0, 2)
num_seconds = general_utils.truncate(num_seconds, 2)
for j, k in enumerate(c):
if j == 4:
astro_ratios_np[i, j] = num_seconds
continue
if j == 5:
astro_ratios_np[i, j] = num_minutes
continue
if k not in astroA.indices_d:
if 'speed' in k:
if k == 'avg_running_speed':
astro_ratios_np[i, j] = np.mean(astroA.speed_values[astroA.speed_values!=0])
elif k == 'avg_speed_global':
astro_ratios_np[i, j] = np.mean(astroA.speed_values)
else:
print('Not exist', k, astroA.id)
astro_ratios_np[i, j] = 0
continue
else:
astro_ratios_np[i, j] = general_utils.truncate(len(astroA.indices_d[k]) / num_frames, 3)
behaviour_ratios_csv_path = os.path.join(output_experiment_path_all_comparison, 'data', 'behaviour_ratios', 'ratios.csv')
DataFrame(astro_ratios_np, columns=c, index=r).to_csv(behaviour_ratios_csv_path)
'''
'''
print('Saving results of average maximum characteristic values (e.g. Average maximum duration over all astrocyte recordings)')
measure_l = ['area', 'dffMax2', 'duration']
measure_names_l = ['area', 'amplitude', 'duration']
bh_l = ['rest', 'stick_rest', 'running', 'stick_run_ind_15']
settings = ['max', 'meantop10', 'mediantop10', 'meantop5', 'mediantop5']
settings_d_i = {setting: i for i, setting in enumerate(settings)}
np_d = [np.zeros([len(astroA_l), len(bh_l)]) for i in range(len(settings))]
max_np = np.zeros([len(astroA_l), len(bh_l)])
r = [astroA.id for astroA in astroA_l]
#Dictionary of events for each behaviour for each astrocyte.
#events_d_d['astro_id']['behaviour'] = event ids of astro id
events_d_d = {}
for astroA in astroA_l:
d = {'default': astroA.indices_d['default']}
for bh in bh_l:
if bh in astroA.indices_d:
d[bh] = astroA.indices_d[bh]
events_d_d[astroA.print_id] = aqua_utils.get_event_subsets(d, astroA.res_d)
base_path = os.path.join(output_experiment_path_all_comparison, 'data', 'top_average_values')
for m_i, measure in enumerate(measure_l):
for i, astroA in enumerate(astroA_l):
measure_vals_all = astroA.res_d[measure]
bh_events_d = events_d_d[astroA.print_id]
for j, bh in enumerate(bh_l):
if bh in bh_events_d:
#Measure values corresponding to given astrocyte & measure & behaviour
bh_measure_vals = measure_vals_all[bh_events_d[bh]]
bh_measure_vals_s = np.sort(bh_measure_vals)[::-1]
top10 = bh_measure_vals_s[:len(bh_measure_vals_s)//10]
top5 = bh_measure_vals_s[:len(bh_measure_vals_s)//20]
print(astroA.print_id)
if astroA.print_id == 'm181129_d190222_c005_day_0' and bh == 'stick_rest':
print('A')
print(top5)
if astroA.print_id == 'm181129_d190222_c005_day_3' and bh == 'stick_rest':
print('B')
print(top5)
np_d[settings_d_i['max']][i, j] = bh_measure_vals_s[0]
np_d[settings_d_i['meantop10']][i, j] = np.mean(top10)
np_d[settings_d_i['meantop5']][i, j] = np.mean(top5)
np_d[settings_d_i['mediantop10']][i, j] = np.median(top10)
np_d[settings_d_i['mediantop5']][i, j] = np.median(top5)
for setting in settings_d_i.keys():
DataFrame(np_d[settings_d_i[setting]], columns=bh_l, index=r).to_csv(os.path.join(base_path, 'measure={}-type={}.csv'.format(measure_names_l[m_i], setting)))
'''
'''
measure_l = ['time_s', 'dffMax2', 'area']
measure_names = ['Duration(s)', 'Amplitude', 'Area']
print('Calcium signal behaviour change over time')
#How does calcium signals change over recording time?
#1 sort events by time
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'behaviour_over_recording')
for astroA in astroA_l:
for i, measure in enumerate(measure_l):
sorted_ev_i = np.argsort(astroA.res_d['tBegin'])
x = []
y = []
for ev_i in sorted_ev_i:
x.append(ev_i)
y.append(astroA.res_d[measure][ev_i])
fig = plotly_utils.plot_scatter(np.array(x), np.array(y) , mode='markers', title='scatter', x_title='', y_title='')
plotly_utils.apply_fun_axis_fig(fig, lambda x : x / astroA.fr, axis='x')
saving_utils.save_plotly_fig(fig, os.path.join(path, '{}-{}'.format(astroA.print_id, measure_names[i])))
'''
'''
print('Speed over time...')
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'behaviour_over_recording')
for astroA in astroA_l:
fig = plotly_utils.plot_scatter(np.arange(len(astroA.speed_values)), astroA.speed_values, mode='lines')
plotly_utils.apply_fun_axis_fig(fig, lambda x : x / astroA.fr, axis='x')
saving_utils.save_plotly_fig(fig, os.path.join(path, '{}-speed'.format(astroA.print_id)))
'''
'''
print('Individual behaviour distribution plots...')
for n_bins in [10, 20, 40, 80]:
#Size, amplitude, signal duration distribution plots over all datasets on different behaviours
for bh in bh_l:
plt_l = []
pth_l = []
for measure, min_measure, max_measure in [
['area', None, 6],
['area', None, None],
['dffMax2', None, 5],
['dffMax2', None, None],
['duration', None, None],
['duration', None, 50]
]:
try:
for with_max in [True, False]:
measure_name = aqua_utils.get_measure_names(measure)
fig_path = os.path.join(output_experiment_path_all_comparison, 'plots', '{}_histogram_comparison'.format(measure_name), '{}-nbins={}-min={}-max={}'.format(bh, n_bins, min_measure, max_measure))
plot, _, _ = self.measure_distribution_plot(astroA_l, bh, measure=measure, num_bins=n_bins, max_measure=max_measure, min_measure=min_measure, measure_name=measure_name)
if measure == 'duration':
plotly_utils.apply_fun_axis_fig(plot, lambda x : x / astroA_l[0].fr, axis='x')
saving_utils.save_pth_plt_l_log([plot], [fig_path], axis='x')
except KeyError as e:
print('Got key error: some behaviour its fine {}'.format(e))
'''
'''
#Area: None, 60, num_bins = 10
#Duration: None, 30, num_bins = 10
#dff : 0.6, 5, num_bins = 20
print('Comparing behaviour distribution plots...')
for n_bins in [10, 20]:
print('NUM BINS:', n_bins)
for behaviour_l in [bh_l]: #, ['rest', 'running'], ['running', 'stick'], ['rest', 'stick_rest'], ['running', 'stick_run_ind_15']]:
for measure, min_measure, max_measure in [
['area', None, 60],
['dffMax2', 0.6, 5],
['duration', None, 30],
]:
for confidence in [True]:
for mode in ['MOA', 'MOE']:
measure_name = aqua_utils.get_measure_names(measure)
path = os.path.join(output_experiment_path_all_comparison, 'plots', '{}_histogram_bh_comparison'.format(measure_name), 'behaviours-{}-nbins={}-min={}-max={}-conf={}-mode={}'.format('_'.join(behaviour_l), n_bins, min_measure, max_measure, confidence, mode))
plot, stats_d = self.measure_distribution_bh_compare_plot(astroA_l, behaviour_l, measure=measure, num_bins=n_bins, min_measure=min_measure, max_measure=max_measure, measure_name=measure_name, confidence=confidence, with_stats=True, mode=mode)
if measure == 'duration':
plotly_utils.apply_fun_axis_fig(plot, lambda x : x / astroA_l[0].fr, axis='x')
saving_utils.save_pth_plt_l_log([plot], [path], axis='x')
saving_utils.save_pth_plt_l_log([plot], [path], axis='y')
#Save results in text file
for i, name in enumerate(stats_d['names']):
#Create folder
data_folder_path = path
try:
os.makedirs(path)
except:
pass
temp_d = {k : stats_d[k][i] for k in stats_d.keys()}
saving_utils.save_csv_dict(temp_d, os.path.join(data_folder_path, '{}.csv'.format(name)), key_order=['names', 'x', 'mean', 'conf_95', 'std'])
np.savetxt(os.path.join(data_folder_path, '{}-data.csv'.format(name)), np.array(temp_d['data']).transpose(), delimiter=",")
for confidence in [True]:
for with_log in [False, True]:
measure_name = aqua_utils.get_measure_names(measure)
plot, stats_d = self.measure_distribution_bh_compare_plot_exponential_fit(astroA_l, behaviour_l, measure=measure, num_bins=n_bins, min_measure=min_measure, max_measure=max_measure, measure_name=measure_name, confidence=False, with_stats=True, with_log=with_log)
path = os.path.join(output_experiment_path_all_comparison, 'plots', '{}_histogram_bh_comparison'.format(measure_name), 'behaviours-{}-nbins={}-min={}-max={}-conf={}_EXPFIT-withlog={}'.format('_'.join(behaviour_l), n_bins, min_measure, max_measure, confidence, with_log))
if measure == 'duration':
plotly_utils.apply_fun_axis_fig(plot, lambda x : x / astroA_l[0].fr, axis='x')
#Save results in text file
for i, name in enumerate(stats_d['names']):
#Create folder
data_folder_path = path
try:
os.makedirs(path)
except:
pass
temp_d = {k : stats_d[k][i] for k in stats_d.keys()}
if len(name.split('__')) == 2:
tx_name = name.split('__')[0] + '_expfit'
else:
tx_name = name
print('TX NAME', name)
saving_utils.save_csv_dict(temp_d, os.path.join(data_folder_path, '{}.csv'.format(tx_name)), key_order=['names', 'x', 'mean', 'conf_95', 'std'])
np.savetxt(os.path.join(data_folder_path, '{}-data.csv'.format(tx_name)), np.array(temp_d['data']).transpose(), delimiter=",")
saving_utils.save_plotly_fig(plot, path)
saving_utils.save_pth_plt_l_log([plot], [path], axis='y')
print('THE STAT HERE?', stats_d)
'''
'''
print('Violin plots...')
plt_l = []
pth_l = []
for max_dff in [2, 5, 10, None]:
#VIOLIN PLOTS comparing TWO behaviour distribution plots (but in violin form)
fig_amp_violin_path = os.path.join(output_experiment_path_all_comparison, 'plots', 'amplitude_histogram_comparison', 'violin_rest_run_dff={}'.format(max_dff))
fig = self.amplitude_distribution_plot_violin_duo(astroA_l_filt, 'rest', 'running', max_dff=max_dff)
#saving_utils.save_plotly_fig(fig, fig_amp_violin_path)
plt_l.append(fig)
pth_l.append(fig_amp_violin_path)
fig_amp_violin_path = os.path.join(output_experiment_path_all_comparison, 'plots', 'amplitude_histogram_comparison', 'violin_run_stick_dff={}'.format(max_dff))
fig = self.amplitude_distribution_plot_violin_duo(astroA_l_filt, 'running', 'stick_run_ind_15', max_dff=max_dff)
#saving_utils.save_plotly_fig(fig, fig_amp_violin_path2)
plt_l.append(fig)
pth_l.append(fig_amp_violin_path)
fig_amp_violin_path3 = os.path.join(output_experiment_path_all_comparison, 'plots', 'amplitude_histogram_comparison', 'violin_rest_stick_dff={}'.format(max_dff))
fig = self.amplitude_distribution_plot_violin_duo(astroA_l_filt, 'rest', 'stick_rest', max_dff=max_dff)
#saving_utils.save_plotly_fig(fig, fig_amp_violin_path)
plt_l.append(fig)
pth_l.append(fig_amp_violin_path3)
for max_area in [9, 20, 40, None]:
sizes_violin_path = os.path.join(output_experiment_path_all_comparison, 'plots', 'sizes_histogram_comparison', 'violin_rest_run_area={}'.format(max_area))
fig = self.sizes_distribution_plot_violin_duo(astroA_l_filt, 'rest', 'running', max_area=max_area)
plt_l.append(fig)
pth_l.append(sizes_violin_path)
sizes_violin_path = os.path.join(output_experiment_path_all_comparison, 'plots', 'sizes_histogram_comparison', 'violin_run_stick_area={}'.format(max_area))
fig = self.sizes_distribution_plot_violin_duo(astroA_l_filt, 'running', 'stick_run_ind_15', max_area=max_area)
plt_l.append(fig)
pth_l.append(sizes_violin_path)
sizes_violin_path = os.path.join(output_experiment_path_all_comparison, 'plots', 'sizes_histogram_comparison', 'violin_rest_stick_area={}'.format(max_area))
fig = self.sizes_distribution_plot_violin_duo(astroA_l_filt, 'rest', 'stick_rest', max_area=max_area)
plt_l.append(fig)
pth_l.append(sizes_violin_path)
for max_duration in [10, 20, 30, 40, None]:
duration_violin_path = os.path.join(output_experiment_path_all_comparison, 'plots', 'duration_histogram_comparison', 'violin_rest_run_duration={}'.format(max_duration))
fig = self.signal_duration_distribution_plot_violin_duo(astroA_l_filt, 'rest', 'running', max_duration=max_duration)
plt_l.append(fig)
pth_l.append(duration_violin_path)
duration_violin_path = os.path.join(output_experiment_path_all_comparison, 'plots', 'duration_histogram_comparison', 'violin_run_stick_duration={}'.format(max_duration))
fig = self.signal_duration_distribution_plot_violin_duo(astroA_l_filt, 'running', 'stick_run_ind_15', max_duration=max_duration)
plt_l.append(fig)
pth_l.append(duration_violin_path)
duration_violin_path = os.path.join(output_experiment_path_all_comparison, 'plots', 'duration_histogram_comparison', 'violin_rest_stick_duration={}'.format(max_duration))
fig = self.signal_duration_distribution_plot_violin_duo(astroA_l_filt, 'rest', 'stick_rest', max_duration=max_duration)
plt_l.append(fig)
pth_l.append(duration_violin_path)
save_pth_plt_l_log(plt_l, pth_l, axis='y')
'''
'''
print('Splits SELF ALL')
#STEP 1
#Take only long duration astrocytes
#Set maximum length of astrocyte duration to be 70min
#Then apply splits with xcorr
data_save_path = os.path.join(output_experiment_path_all_comparison, 'data', 'splits_self_all')
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'splits_self_all')
y_l_l = []
x_l = []
minute_frame_splits_l = [35, 30, 25, 20, 15, 10, 5, 2]
cut_duration = 70
param_str = 'cut_{}-'.format(cut_duration) + 'splits_{}-'.format('_'.join([str(m) for m in minute_frame_splits_l]))
name_l = []
for i, astroA in enumerate(astroA_long_l):
curr_save_path = os.path.join(data_save_path, 'id_{}-{}.pkl'.format(astroA.print_id, param_str))
res_d = self.get_compare_full_self_results_alt(astroA, cut_duration_min=cut_duration, minute_frame_splits_l=minute_frame_splits_l, save_pkl_path=curr_save_path)
y_l_l.append(res_d['y'])
x_l.append(res_d['x'])
name_l.append(astroA.print_id)
fig, stats_d = plotly_utils.plot_scatter_mult_with_avg(x_l[0], y_l_l, None, name_l, mode='lines', title='Splits self', x_title='Splits (minutes)', y_title='Correlation',
xrange=None, yrange=None, confidence=True, with_stats=True, point_box=True)
df_data_m = DataFrame(stats_d['mean_l_l'], columns=stats_d['x'], index=stats_d['names'])
df_ci = DataFrame(stats_d['conf_95'], columns=stats_d['x'], index=stats_d['names'])
df_mean = DataFrame([stats_d['mean'], stats_d['mean_conf']], columns=stats_d['x'], index=['mean', 'conf_95'])
df_data_m.to_csv(path + '-data_means.csv')
df_ci.to_csv(path + '-data_ci.csv')
df_mean.to_csv(path + '-mean_and_CI.csv')
saving_utils.save_plotly_fig(fig, path)
'''
'''
print('HEATMAPS V2... (astro days scaled the same (to minimum maximum scale of the 2))')
for astroA_pair in astroA_l_pairs:
for dff_mode in ['False']:
for bh in ['default', 'running', 'rest', 'stick_run_ind_15', 'stick_rest']:
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'behaviour_heatmaps_V2_comparison_scale', self.get_astro_pair_id(astroA_pair))
d = self.get_day_heatmaps_scaled(astroA_pair, bh=bh, dff_mode=dff_mode)
if d is None:
continue
try:
os.makedirs(os.path.join(path))
except:
pass
saving_utils.save_plotly_fig(d['contour_0'], os.path.join(path, 'bh_{}-day_{}-dff_{}'.format(bh, astroA_pair[0].day, dff_mode)))
saving_utils.save_plotly_fig(d['contour_x'], os.path.join(path, 'bh_{}-day_{}-dff_{}'.format(bh, astroA_pair[1].day, dff_mode)))
'''
'''
#TODO FIX THE DOT PLOTS
#TODO CAN JUST ADD ANOTHER LOOP FOR THE BEHAVIOURS LOTS OF REPETITION
bh_l_activity = ['rest', 'running', 'stick_rest', 'stick_run_ind_15']
print('Bar charts and dot plots of all amplitudes, durations, sizes')
#for type_plot in ['dot', 'bar']:
for type_plot in ['bar']:
for error_type in ['std', 'conf']:
for err_symmetric in [True, False]:
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'all_amplitudes', '{}_plot_dff_filter_event_{}_symm{}'.format(type_plot, error_type, err_symmetric))
fig, stats_d = self.get_all_signal_attribute_plot(astroA_l_s, bh_l_activity, type_plot=type_plot, type_event='dffMax2',
y_title='Amplitude', title='Amplitudes', error_type=error_type, err_symmetric=err_symmetric, with_stats=True)
saving_utils.save_plotly_fig(fig, path)
saving_utils.save_csv_dict(stats_d, path + '.csv', key_order=['behaviour', 'mean', 'std', 'conf_95'])
saving_utils.save_csv_dict(stats_d['data'], path +'-data.csv', key_order=stats_d['behaviour'])
len_d = {k: [len(stats_d['data'][k])] for k in stats_d['data'].keys()}
saving_utils.save_csv_dict(len_d, path +'-len_data.csv', key_order=stats_d['behaviour'])
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'all_amplitudes', '{}_plot_dff_notfiltered_{}_symm{}'.format(type_plot, error_type, err_symmetric))
fig, stats_d = self.get_all_signal_attribute_plot(astroA_l_s, bh_l_activity, type_plot=type_plot, type_event='dffMax',
y_title='Amplitude', title='Amplitudes', error_type=error_type, err_symmetric=err_symmetric, with_stats=True)
saving_utils.save_plotly_fig(fig, path)
saving_utils.save_csv_dict(stats_d, path + '.csv', key_order=['behaviour', 'mean', 'std', 'conf_95'])
saving_utils.save_csv_dict(stats_d['data'], path +'-data.csv', key_order=stats_d['behaviour'])
len_d = {k: [len(stats_d['data'][k])] for k in stats_d['data'].keys()}
saving_utils.save_csv_dict(len_d, path +'-len_data.csv', key_order=stats_d['behaviour'])
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'all_durations', '{}_plot_{}_symm{}'.format(type_plot, error_type, err_symmetric))
fig, stats_d = self.get_all_signal_attribute_plot(astroA_l_s, bh_l_activity, type_plot=type_plot, type_event='time_s',
y_title='Duration (s)', title='Event durations', error_type=error_type, err_symmetric=err_symmetric, with_stats=True)
saving_utils.save_plotly_fig(fig, path)
saving_utils.save_csv_dict(stats_d, path + '.csv', key_order=['behaviour', 'mean', 'std', 'conf_95'])
saving_utils.save_csv_dict(stats_d['data'], path +'-data.csv', key_order=stats_d['behaviour'])
len_d = {k: [len(stats_d['data'][k])] for k in stats_d['data'].keys()}
saving_utils.save_csv_dict(len_d, path +'-len_data.csv', key_order=stats_d['behaviour'])
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'all_sizes', '{}_plot_{}_symm{}'.format(type_plot, error_type, err_symmetric))
fig, stats_d = self.get_all_signal_attribute_plot(astroA_l_s, bh_l_activity, type_plot=type_plot, type_event='area',
y_title='Event sizes (\u03bcm<sup>2</sup>)', title='Sizes of events', error_type=error_type, err_symmetric=err_symmetric, with_stats=True)
saving_utils.save_plotly_fig(fig, path)
saving_utils.save_csv_dict(stats_d, path + '.csv', key_order=['behaviour', 'mean', 'std', 'conf_95'])
saving_utils.save_csv_dict(stats_d['data'], path +'-data.csv', key_order=stats_d['behaviour'])
len_d = {k: [len(stats_d['data'][k])] for k in stats_d['data'].keys()}
saving_utils.save_csv_dict(len_d, path +'-len_data.csv', key_order=stats_d['behaviour'])
'''
'''
print('COMPARE THIS', len(astroA_l_filt), 'WITH THIS', len(astroA_l_s))
for astroA in astroA_l_filt:
for bh_k in bh_l_activity:
if bh_k not in astroA.event_subsets.keys():
print('SHOULD NOT HAPPEND BH ', bh_k, 'NOT IN', astroA.print_id)
for type_plot in ['bar']:
for error_type in ['std', 'conf']:
for err_symmetric in [True, False]:
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'all_amplitudes_filt_bh', '{}_plot_dff_filter_event_{}_symm{}'.format(type_plot, error_type, err_symmetric))
fig, stats_d = self.get_all_signal_attribute_plot(astroA_l_filt, bh_l_activity, type_plot=type_plot, type_event='dffMax2',
y_title='Amplitude', title='Amplitudes', error_type=error_type, err_symmetric=err_symmetric, with_stats=True)
saving_utils.save_plotly_fig(fig, path)
saving_utils.save_csv_dict(stats_d, path + '.csv', key_order=['behaviour', 'mean', 'std', 'conf_95'])
saving_utils.save_csv_dict(stats_d['data'], path +'-data.csv', key_order=stats_d['behaviour'])
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'all_amplitudes_filt_bh', '{}_plot_dff_notfiltered_{}_symm{}'.format(type_plot, error_type, err_symmetric))
fig, stats_d = self.get_all_signal_attribute_plot(astroA_l_filt, bh_l_activity, type_plot=type_plot, type_event='dffMax',
y_title='Amplitude', title='Amplitudes', error_type=error_type, err_symmetric=err_symmetric, with_stats=True)
saving_utils.save_plotly_fig(fig, path)
saving_utils.save_csv_dict(stats_d, path + '.csv', key_order=['behaviour', 'mean', 'std', 'conf_95'])
saving_utils.save_csv_dict(stats_d['data'], path +'-data.csv', key_order=stats_d['behaviour'])
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'all_durations_filt_bh', '{}_plot_{}_symm{}'.format(type_plot, error_type, err_symmetric))
fig, stats_d = self.get_all_signal_attribute_plot(astroA_l_filt, bh_l_activity, type_plot=type_plot, type_event='time_s',
y_title='Duration (s)', title='Event durations', error_type=error_type, err_symmetric=err_symmetric, with_stats=True)
saving_utils.save_plotly_fig(fig, path)
saving_utils.save_csv_dict(stats_d, path + '.csv', key_order=['behaviour', 'mean', 'std', 'conf_95'])
saving_utils.save_csv_dict(stats_d['data'], path +'-data.csv', key_order=stats_d['behaviour'])
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'all_sizes_filt_bh', '{}_plot_{}_symm{}'.format(type_plot, error_type, err_symmetric))
fig, stats_d = self.get_all_signal_attribute_plot(astroA_l_filt, bh_l_activity, type_plot=type_plot, type_event='area',
y_title='Event sizes (\u03bcm<sup>2</sup>)', title='Sizes of events', error_type=error_type, err_symmetric=err_symmetric, with_stats=True)
saving_utils.save_plotly_fig(fig, path)
saving_utils.save_csv_dict(stats_d, path + '.csv', key_order=['behaviour', 'mean', 'std', 'conf_95'])
saving_utils.save_csv_dict(stats_d['data'], path +'-data.csv', key_order=stats_d['behaviour'])
'''
print('--------------------------------------------------------------------------------------------------')
print('Distribution of pixel values real vs fake...')
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'pixel_distribution')
x_l = []
y_l = []
name_l = [astroA.print_id for astroA in astroA_l]
for astroA in astroA_l:
grid = astroA.event_grids_1min['default']
grid = np.interp(grid, (grid.min(), grid.max()), (0, 1))
grid_flat = grid.flatten()
grid_flat_nz = grid_flat[grid_flat != 0]
hist, bin_edges = np.histogram(grid_flat_nz, bins=20, range=(0,1), density=True)
hist = hist * (bin_edges[1] - bin_edges[0])
print('HIST SUM', np.sum(hist))
x_l = bin_edges[:-1]
y_l.append(hist)
y_l_fmt = []
for i in range(len(y_l[0])):
y_l_fmt.append([y[i] for y in y_l])
plot_path = os.path.join(path, 'real')
fig, stats_d = plotly_utils.plot_scatter_error(x_l, y_l_fmt, x_title='Pixel intensity percentile', y_title='Frequency (Density)', exp_fit=True, with_details=True)
saving_utils.save_plotly_fig(fig, plot_path)
df_data = DataFrame(np.array(stats_d['data']).T, columns=x_l, index=name_l)
df_stats = DataFrame([stats_d['mean'], stats_d['conf_95'], stats_d['fit']], columns=x_l, index=['mean', 'conf_95', 'fit'])
df_data.to_csv(plot_path + '-data.csv')
df_stats.to_csv(plot_path +'-stats.csv')
sample_l_all = []
for astroA in astroA_l:
d = self.get_individual_heatmaps_threshold_scaled(astroA, bh='default', threshold=1, num_samples=1, dff_mode=False, with_arr=True)
sample_l_all.append(d['arrs_d']['arr_r'][0])
x_l = []
y_l = []
for grid in sample_l_all:
grid = np.interp(grid, (grid.min(), grid.max()), (0, 1))
grid_flat = grid.flatten()
grid_flat_nz = grid_flat[grid_flat != 0]
#Normalize values to 1
grid_flat_nz /= np.max(grid_flat_nz)
hist, bin_edges = np.histogram(grid_flat_nz, bins=20, range=(0,1), density=True)
hist = hist * (bin_edges[1] - bin_edges[0])
print('HIST SUM', np.sum(hist))
x_l = bin_edges[:-1]
y_l.append(hist)
y_l_fmt = []
for i in range(len(y_l[0])):
y_l_fmt.append([y[i] for y in y_l])
plot_path = os.path.join(path, 'fake')
fig, stats_d = plotly_utils.plot_scatter_error(x_l, y_l_fmt, x_title='Pixel intensity percentile', y_title='Frequency (Density)', exp_fit=False, with_details=True)
saving_utils.save_plotly_fig(fig, plot_path)
df_data = DataFrame(np.array(stats_d['data']).T, columns=x_l)
df_stats = DataFrame([stats_d['mean'], stats_d['conf_95']], columns=x_l, index=['mean', 'conf_95'])
df_data.to_csv(plot_path + '-data.csv')
df_stats.to_csv(plot_path +'-stats.csv')
print('--------------------------------------------------------------------------------------------------')
'''
print('SINGLE BAR CHART OF BEHAVIOURS (REST, RUN) of all astrocytes')
names_l = ['amplitude', 'size', 'duration']
measure_l = ['dffMax2', 'area', 'time_s' ]
for i, measure in enumerate(measure_l):
plot_path = os.path.join(output_experiment_path_all_comparison, 'plots', 'bar_rest_run_all', '{}'.format(names_l[i]))
plot = self.get_measure_all_bar_plot(astroA_l, measure, bh_list=['rest', 'running'])
saving_utils.save_plotly_fig(plot, plot_path)
'''
'''
names_l = ['Event number (per minute)', 'amplitude', 'size', 'duration']
measure_l = [None, 'dffMax2', 'area', 'time_s']
bh_list_pairs = [['rest', 'running'], ['rest', 'stick_rest'], ['running', 'stick_run_ind_15']]
bh_list_pairs_names = ['rest_run', 'rest_rest_stick', 'run_run_stick']
for j, bh_list_pair in enumerate(bh_list_pairs):
for i, measure in enumerate(measure_l):
plot_path = os.path.join(output_experiment_path_all_comparison, 'plots', 'bar_{}_all'.format(bh_list_pairs_names[j]), '{}'.format('dots_'+names_l[i]))
if 'stick_rest' in bh_list_pair:
plot, stats_d = self.get_measure_all_dot_plot(astroA_l_filt, measure, bh_list=bh_list_pair)
else:
plot, stats_d = self.get_measure_all_dot_plot(astroA_l, measure, bh_list=bh_list_pair)
saving_utils.save_plotly_fig(plot, plot_path)
with open(os.path.join(plot_path + '.csv'), mode='w') as csv_file:
writer = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
l = ['']
l.extend(stats_d['x'])
l.extend(['conf_0', 'conf_1'])
writer.writerow(l)
for i in range(len(stats_d['names'])):
l = [stats_d['names'][i]]
l.extend(stats_d['mean_l_l'][i])
if 'conf_95' in stats_d:
l.extend(stats_d['conf_95'][i])
writer.writerow(l)
writer.writerow('')
writer.writerow(['mean_0', 'mean_1', 'mean_conf_0', 'mean_conf_1'])
l = []
l.extend(stats_d['mean'])
l.extend(stats_d['mean_conf'])
writer.writerow(l)
'''
"""
print('With transitions before and after measures dot plot')
names_l = ['Event number (per minute)', 'amplitude', 'size', 'duration']
measure_l = [None, 'dffMax2', 'area', 'time_s']
delay_ranges_pairs = [ [3*astroA_l[0].fr, 6*astroA_l[0].fr],
#[1*astroA_l[0].fr, 1*astroA_l[0].fr],
#[2*astroA_l[0].fr, 4*astroA_l[0].fr]
]
delay_ranges_pairs = [[int(v[0]), int(v[1])] for v in delay_ranges_pairs]
for delay_ranges_pair in delay_ranges_pairs:
before_range, after_range = delay_ranges_pair
for i, measure in enumerate(measure_l):
plot_path = os.path.join(output_experiment_path_all_comparison, 'plots', 'bar_run_stick_run_transition_all', 'range_{}_{}_{}'.format(before_range, after_range, 'dots_'+names_l[i]))
plot, stats_d = self.get_measure_all_transition_dot_plot(astroA_l, measure, before_bh='running_semi_exact',
inds_bh='stick_exact_start', after_bh='running_semi_exact',
before_range=before_range, after_range=after_range)
saving_utils.save_plotly_fig(plot, plot_path)
with open(os.path.join(plot_path + '.csv'), mode='w') as csv_file:
writer = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
l = ['']
l.extend(stats_d['x'])
l.extend(['conf_0', 'conf_1'])
writer.writerow(l)
for i in range(len(stats_d['names'])):
l = [stats_d['names'][i]]
l.extend(stats_d['mean_l_l'][i])
if 'conf_95' in stats_d:
l.extend(stats_d['conf_95'][i])
writer.writerow(l)
writer.writerow('')
writer.writerow(['mean_0', 'mean_1', 'mean_conf_0', 'mean_conf_1'])
l = []
l.extend(stats_d['mean'])
l.extend(stats_d['mean_conf'])
writer.writerow(l)
"""
"""
#TODO ADD CSV
bh_l_activity = ['rest', 'running', 'stick_rest', 'stick_run_ind_15']
print('Activity all bar plot...')
plot, stats_d = self.get_behaviour_activity_bar_plot_all(astroA_l_s, bh_l_activity, with_stats=True)
plot_path = os.path.join(output_experiment_path_all_comparison, 'plots', 'activity_all', 'activity_bar')
saving_utils.save_plotly_fig(plot, plot_path)
print('Activity all number events per minute bar plot...')
plot, stats_d = self.get_behaviour_activity_number_bar_plot_all(astroA_l_s, bh_l_activity, with_stats=True)
plot_path = os.path.join(output_experiment_path_all_comparison, 'plots', 'activity_all_number_minute', 'activity_bar')
saving_utils.save_plotly_fig(plot, plot_path)
"""
'''
bh_l_activity = ['rest', 'running', 'stick_rest', 'stick_run_ind_15']
print('Activity all dot plot...')
plot, stats_d = self.get_behaviour_activity_dot_plot_all(astroA_l_s, bh_l_activity)
plot_path = os.path.join(output_experiment_path_all_comparison, 'plots', 'activity_all', 'activity_dot')
saving_utils.save_plotly_fig(plot, plot_path)
saving_utils.save_csv_dict(stats_d, plot_path+'.csv', key_order=['x', 'mean', 'conf_95'])
print(stats_d['data'])
#print(stats_d['data'].shape)
DataFrame(stats_d['data'], columns=[astroA.print_id for astroA in astroA_l_s], index=stats_d['x']).to_csv(plot_path + '-data.csv')
'''
'''
df_data_m = DataFrame(stats_d['mean_l_l'], columns=stats_d['x'], index=stats_d['names'])
df_mean_conf = DataFrame([stats_d['mean'], stats_d['mean_conf']], columns=stats_d['x'], index=['mean', 'conf_95'])
df_data_m.to_csv(path + '-data.csv')
df_mean_conf.to_csv(path + '-mean_and_CI.csv')
'''
"""
print('Activity all dot plot with lines...')
print(len(astroA_l_filt))
plot, stats_d = self.get_behaviour_activity_dot_plot_all(astroA_l_filt, bh_l_activity, lines=True)
plot_path = os.path.join(output_experiment_path_all_comparison, 'plots', 'activity_all', 'activity_dot_lines')
saving_utils.save_plotly_fig(plot, plot_path)
print('Activity all number events per minute dot plot...')
plot, stats_d = self.get_behaviour_activity_number_dot_plot_all(astroA_l_s, bh_l_activity)
plot_path = os.path.join(output_experiment_path_all_comparison, 'plots', 'activity_all_number_minute', 'activity_dot')
saving_utils.save_plotly_fig(plot, plot_path)
saving_utils.save_csv_dict(stats_d, plot_path+'.csv', key_order=['x', 'mean', 'conf_95'])
print('Activity all number events per minute dot plot...')
plot, stats_d = self.get_behaviour_activity_number_dot_plot_all(astroA_l_filt, bh_l_activity, lines=True)
plot_path = os.path.join(output_experiment_path_all_comparison, 'plots', 'activity_all_number_minute', 'activity_dot_lines')
saving_utils.save_plotly_fig(plot, plot_path)
"""
'''
print('Plotting bar plots (triplet plot bands) num_events, duration, amplitude for ALL TOGETHER')
measure_names = [None, 'Area', 'Amplitude', 'Time (s)']
for bh in ['default', 'rest', 'running', 'stick', 'stick_rest', 'stick_run_ind_15']:
for i, measure in enumerate([None, 'area', 'dffMax2', 'time_s']):
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'triplet_dot_all', '{}_{}'.format(bh, measure))
if bh in astroA.event_subsets:
fig, stats_d = self.triplet_dot_plot_all(astroA_l_s, bh=bh, measure=measure, n_bins=8, y_title=measure_names[i])
print('SAVING TRIPLET DOT ALL')
saving_utils.save_plotly_fig(fig, path)
print(stats_d.keys())
#Saving events only, we don't have CI's for each astrocyte
if measure is None:
df_data_m = DataFrame(stats_d['mean_l_l'], columns=stats_d['x'], index=stats_d['names'])
df_mean_conf = DataFrame([stats_d['mean'], stats_d['mean_conf']], columns=stats_d['x'], index=['mean', 'conf_95'])
df_data_m.to_csv(path + '-data.csv')
df_mean_conf.to_csv(path + '-mean_and_CI.csv')
else:
df_data_m = DataFrame(stats_d['mean_l_l'], columns=stats_d['x'], index=stats_d['names'])
df_ci = DataFrame(stats_d['conf_95'], columns=stats_d['x'], index=stats_d['names'])
df_mean = DataFrame([stats_d['mean'], stats_d['mean_conf']], columns=stats_d['x'], index=['mean', 'conf_95'])
df_data_m.to_csv(path + '-data_means.csv')
df_ci.to_csv(path + '-data_ci.csv')
df_mean.to_csv(path + '-mean_and_CI.csv')
'''
"""
#--------------------------------------------------
#--------------------------------------------------
#--------------------------------------------------
##REST TO RUN , RUN TO REST, RUN STICK RUN SECTION
#--------------------------------------------------
#--------------------------------------------------
#--------------------------------------------------
print('Alternative run-rest/rest-run averaging individual lines')
delay_ranges_pairs = [ [3*astroA_l[0].fr, 6*astroA_l[0].fr],
[1*astroA_l[0].fr, 1*astroA_l[0].fr],
[2*astroA_l[0].fr, 4*astroA_l[0].fr]]
delay_ranges_pairs = [[int(v[0]), int(v[1])] for v in delay_ranges_pairs]
#measure_l = ['dffMax2default', 'dffMax2', 'time_s', 'area']
#measure_path_l = ['amplitudes_default', 'amplitudes', 'durations', 'sizes']
#measure_y_titles = ['Amplitude', 'Amplitude', 'Duration (s)', 'Size']
measure_l = ['dffMax2default', 'time_s', 'area']
measure_path_l = ['amplitudes_default', 'durations', 'sizes']
measure_y_titles = ['Amplitude', 'Duration (s)', 'Size']
measure_l = ['dffMax2default']
measure_path_l = ['amplitudes_default']
measure_y_titles = ['Amplitude']
bh_measure_l = ['speed']
bh_measure_path_l = ['speed']
bh_measure_y_titles = ['Speed (cm/s)']
print('Alt Proportion plots...')
for delay_ranges_pair in delay_ranges_pairs:
before_range, after_range = delay_ranges_pair
for p in [#{'fit' : True, 'delay_step_size' : 1, 'confidence' : True},
#{'fit' : True, 'delay_step_size' : 5, 'confidence' : True},
{'fit' : True, 'delay_step_size' : 10, 'confidence': True}
]:
################################################
##############Proportion plots##################
################################################
print('EXTRA PARS', p, p.keys())
print('rest to run prop')
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'rest_to_run_proportions_alt')
fig_d, bin_stats = self.get_transition_proportion_delays_plot_all_alt(astroA_l, before_bh='rest_semi_exact', inds_bh='running_exact_start', after_bh='running_semi_exact',
before_range=before_range, after_range=after_range,
**p)
for fig_k in fig_d.keys():
fig_id = os.path.join(path, fig_k + 'range_{}_{}-{}-fit_{}-step_{}-conf_{}'.format(before_range, after_range, fig_k, p['fit'], p['delay_step_size'], p['confidence']))
saving_utils.save_plotly_fig(fig_d[fig_k], fig_id)
saving_utils.save_csv_dict(bin_stats, path=fig_id + '.csv', key_order=['x', 'mean', 'std', 'confidence_95'])
if p['delay_step_size'] == 10:
data_csv_path = os.path.join(path, 'range_{}_{}-step_{}-all.csv'.format(before_range, after_range, p['delay_step_size']))
DataFrame(bin_stats['y_all'], columns=bin_stats['x']).to_csv(data_csv_path, index=False)
print('run to rest prop')
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'run_to_rest_proportions_alt')
fig_d, bin_stats = self.get_transition_proportion_delays_plot_all_alt(astroA_l, before_bh='running_semi_exact', inds_bh='rest_start', after_bh='rest_semi_exact',
before_range=before_range, after_range=after_range,
**p)
for fig_k in fig_d.keys():
fig_id = os.path.join(path, fig_k + 'range_{}_{}-{}-fit_{}-step_{}-conf_{}'.format(before_range, after_range, fig_k, p['fit'], p['delay_step_size'], p['confidence']))
saving_utils.save_plotly_fig(fig_d[fig_k], fig_id)
saving_utils.save_csv_dict(bin_stats, path=fig_id + '.csv', key_order=['x', 'mean', 'std', 'confidence_95'])
if p['delay_step_size'] == 10:
data_csv_path = os.path.join(path, 'range_{}_{}-step_{}-all.csv'.format(before_range, after_range, p['delay_step_size']))
DataFrame(bin_stats['y_all'], columns=bin_stats['x']).to_csv(data_csv_path, index=False)
print('run stick hit run prop')
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'run_stick_run_proportions_alt')
fig_d, bin_stats = self.get_transition_proportion_delays_plot_all_alt(astroA_l, before_bh='running_semi_exact', inds_bh='stick_exact_start', after_bh='running_semi_exact',
before_range=before_range, after_range=after_range,
**p)
for fig_k in fig_d:
fig_id = os.path.join(path, fig_k + 'range_{}_{}-{}-fit_{}-step_{}-conf_{}'.format(before_range, after_range, fig_k, p['fit'], p['delay_step_size'], p['confidence']))
saving_utils.save_plotly_fig(fig_d[fig_k], fig_id)
saving_utils.save_csv_dict(bin_stats, path=fig_id + '.csv', key_order=['x', 'mean', 'std', 'confidence_95'])
if p['delay_step_size'] == 10:
data_csv_path = os.path.join(path, 'range_{}_{}-step_{}-all.csv'.format(before_range, after_range, p['delay_step_size']))
DataFrame(bin_stats['y_all'], columns=bin_stats['x']).to_csv(data_csv_path, index=False)
print('run stick hit run prop duration filter [None, 3]')
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'run_stick_run_proportions_alt_filter_max_3_frames')
fig_d, bin_stats = self.get_transition_proportion_delays_plot_all_alt(astroA_l, before_bh='running_semi_exact', inds_bh='stick_exact_start', after_bh='running_semi_exact',
before_range=before_range, after_range=after_range, duration_filter=[None, 3],
**p)
for fig_k in fig_d:
fig_id = os.path.join(path, fig_k + 'range_{}_{}-{}-fit_{}-step_{}-conf_{}'.format(before_range, after_range, fig_k, p['fit'], p['delay_step_size'], p['confidence']))
saving_utils.save_plotly_fig(fig_d[fig_k], fig_id)
saving_utils.save_csv_dict(bin_stats, path=fig_id + '.csv', key_order=['x', 'mean', 'std', 'confidence_95'])
if p['delay_step_size'] == 10:
data_csv_path = os.path.join(path, 'range_{}_{}-step_{}-all.csv'.format(before_range, after_range, p['delay_step_size']))
DataFrame(bin_stats['y_all'], columns=bin_stats['x']).to_csv(data_csv_path, index=False)
print('run stick hit run prop duration filter [None, 5]')
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'run_stick_run_proportions_alt_filter_max_5_frames')
fig_d, bin_stats = self.get_transition_proportion_delays_plot_all_alt(astroA_l, before_bh='running_semi_exact', inds_bh='stick_exact_start', after_bh='running_semi_exact',
before_range=before_range, after_range=after_range, duration_filter=[None, 5],
**p)
for fig_k in fig_d:
fig_id = os.path.join(path, fig_k + 'range_{}_{}-{}-fit_{}-step_{}-conf_{}'.format(before_range, after_range, fig_k, p['fit'], p['delay_step_size'], p['confidence']))
saving_utils.save_plotly_fig(fig_d[fig_k], fig_id)
saving_utils.save_csv_dict(bin_stats, path=fig_id + '.csv', key_order=['x', 'mean', 'std', 'confidence_95'])
if p['delay_step_size'] == 10:
data_csv_path = os.path.join(path, 'range_{}_{}-step_{}-all.csv'.format(before_range, after_range, p['delay_step_size']))
DataFrame(bin_stats['y_all'], columns=bin_stats['x']).to_csv(data_csv_path, index=False)
'''
################################################
##############Measure plots#####################
################################################
'''
for m_i, measure in enumerate(measure_l):
print('rest to run measure: {}'.format(measure))
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'rest_to_run_{}_alt'.format(measure_path_l[m_i]))
fig_d, bin_stats = self.get_transition_proportion_delays_plot_all_alt(astroA_l, before_bh='rest_semi_exact', inds_bh='running_exact_start', after_bh='running_semi_exact',
before_range=before_range, after_range=after_range,
measure=measure,
y_title=measure_y_titles[m_i],
**p)
for fig_k in fig_d.keys():
fig_id = os.path.join(path, fig_k + 'range_{}_{}-{}-fit_{}-step_{}-conf_{}'.format(before_range, after_range, fig_k, p['fit'], p['delay_step_size'], p['confidence']))
saving_utils.save_plotly_fig(fig_d[fig_k], fig_id)
saving_utils.save_csv_dict(bin_stats, path=fig_id + '.csv', key_order=['x', 'mean', 'std', 'confidence_95'])
if p['delay_step_size'] == 10:
data_csv_path = os.path.join(path, 'range_{}_{}-step_{}-all.csv'.format(before_range, after_range, p['delay_step_size']))
DataFrame(bin_stats['y_all'], columns=bin_stats['x']).to_csv(data_csv_path, index=False)
print('run to rest measure: {}'.format(measure))
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'run_to_rest_{}_alt'.format(measure_path_l[m_i]))
fig_d, bin_stats = self.get_transition_proportion_delays_plot_all_alt(astroA_l, before_bh='running_semi_exact', inds_bh='rest_start', after_bh='rest_semi_exact',
before_range=before_range, after_range=after_range,
measure=measure,
y_title=measure_y_titles[m_i],
**p)
for fig_k in fig_d.keys():
fig_id = os.path.join(path, fig_k + 'range_{}_{}-{}-fit_{}-step_{}-conf_{}'.format(before_range, after_range, fig_k, p['fit'], p['delay_step_size'], p['confidence']))
saving_utils.save_plotly_fig(fig_d[fig_k], fig_id)
saving_utils.save_csv_dict(bin_stats, path=fig_id + '.csv', key_order=['x', 'mean', 'std', 'confidence_95'])
if p['delay_step_size'] == 10:
data_csv_path = os.path.join(path, 'range_{}_{}-step_{}-all.csv'.format(before_range, after_range, p['delay_step_size']))
DataFrame(bin_stats['y_all'], columns=bin_stats['x']).to_csv(data_csv_path, index=False)
print('run stick hit run measure: {}'.format(measure))
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'run_stick_run_{}_alt'.format(measure_path_l[m_i]))
fig_d, bin_stats = self.get_transition_proportion_delays_plot_all_alt(astroA_l, before_bh='running_semi_exact', inds_bh='stick_exact_start', after_bh='running_semi_exact',
before_range=before_range, after_range=after_range,
measure=measure,
y_title=measure_y_titles[m_i],
**p)
for fig_k in fig_d.keys():
fig_id = os.path.join(path, fig_k + 'range_{}_{}-{}-fit_{}-step_{}-conf_{}'.format(before_range, after_range, fig_k, p['fit'], p['delay_step_size'], p['confidence']))
saving_utils.save_plotly_fig(fig_d[fig_k], fig_id)
saving_utils.save_csv_dict(bin_stats, path=fig_id + '.csv', key_order=['x', 'mean', 'std', 'confidence_95'])
if p['delay_step_size'] == 10:
data_csv_path = os.path.join(path, 'range_{}_{}-step_{}-all.csv'.format(before_range, after_range, p['delay_step_size']))
DataFrame(bin_stats['y_all'], columns=bin_stats['x']).to_csv(data_csv_path, index=False)
print('run stick hit run measure: max frames 3 {}'.format(measure))
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'run_stick_run_{}_alt_filter_max_3_frames'.format(measure_path_l[m_i]))
fig_d, bin_stats = self.get_transition_proportion_delays_plot_all_alt(astroA_l, before_bh='running_semi_exact', inds_bh='stick_exact_start', after_bh='running_semi_exact',
before_range=before_range, after_range=after_range,
measure=measure,
y_title=measure_y_titles[m_i], duration_filter=[None, 3],
**p)
for fig_k in fig_d.keys():
fig_id = os.path.join(path, fig_k + 'range_{}_{}-{}-fit_{}-step_{}-conf_{}'.format(before_range, after_range, fig_k, p['fit'], p['delay_step_size'], p['confidence']))
saving_utils.save_plotly_fig(fig_d[fig_k], fig_id)
saving_utils.save_csv_dict(bin_stats, path=fig_id + '.csv', key_order=['x', 'mean', 'std', 'confidence_95'])
if p['delay_step_size'] == 10:
data_csv_path = os.path.join(path, 'range_{}_{}-step_{}-all.csv'.format(before_range, after_range, p['delay_step_size']))
DataFrame(bin_stats['y_all'], columns=bin_stats['x']).to_csv(data_csv_path, index=False)
print('run stick hit run measure: max frames 5 {}'.format(measure))
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'run_stick_run_{}_alt_filter_max_5_frames'.format(measure_path_l[m_i]))
fig_d, bin_stats = self.get_transition_proportion_delays_plot_all_alt(astroA_l, before_bh='running_semi_exact', inds_bh='stick_exact_start', after_bh='running_semi_exact',
before_range=before_range, after_range=after_range,
measure=measure,
y_title=measure_y_titles[m_i], duration_filter=[None, 5],
**p)
for fig_k in fig_d.keys():
fig_id = os.path.join(path, fig_k + 'range_{}_{}-{}-fit_{}-step_{}-conf_{}'.format(before_range, after_range, fig_k, p['fit'], p['delay_step_size'], p['confidence']))
saving_utils.save_plotly_fig(fig_d[fig_k], fig_id)
saving_utils.save_csv_dict(bin_stats, path=fig_id + '.csv', key_order=['x', 'mean', 'std', 'confidence_95'])
if p['delay_step_size'] == 10:
data_csv_path = os.path.join(path, 'range_{}_{}-step_{}-all.csv'.format(before_range, after_range, p['delay_step_size']))
DataFrame(bin_stats['y_all'], columns=bin_stats['x']).to_csv(data_csv_path, index=False)
################################################
##############Behaviour measure plots###########
################################################
for m_i, bh_measure in enumerate(bh_measure_l):
print('BH measure {} rest-run'.format(bh_measure))
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'rest_to_run_{}_alt'.format(bh_measure_path_l[m_i]))
fig_d, bin_stats = self.get_transition_bh_values_plot_all_alt(astroA_l,
before_bh='rest_semi_exact', inds_bh='running_exact_start', after_bh='running_semi_exact',
bh_measure=bh_measure,
before_range=before_range, after_range=after_range,
y_title=bh_measure_y_titles[m_i],
**p)
for fig_k in fig_d.keys():
fig_id = os.path.join(path, fig_k + 'range_{}_{}-{}-fit_{}-step_{}-conf_{}'.format(before_range, after_range, fig_k, p['fit'], p['delay_step_size'], p['confidence']))
saving_utils.save_plotly_fig(fig_d[fig_k], fig_id)
saving_utils.save_csv_dict(bin_stats, path=fig_id + '.csv', key_order=['x', 'mean', 'std', 'confidence_95'])
if p['delay_step_size'] == 10:
data_csv_path = os.path.join(path, 'range_{}_{}-step_{}-all.csv'.format(before_range, after_range, p['delay_step_size']))
DataFrame(bin_stats['y_all'], columns=bin_stats['x']).to_csv(data_csv_path, index=False)
print('BH measure {} run-rest'.format(bh_measure))
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'run_to_rest_{}_alt'.format(bh_measure_path_l[m_i]))
fig_d, bin_stats = self.get_transition_bh_values_plot_all_alt(astroA_l,
before_bh='running_semi_exact', inds_bh='rest_start', after_bh='rest_semi_exact',
bh_measure=bh_measure,
before_range=before_range, after_range=after_range,
y_title=bh_measure_y_titles[m_i],
**p)
for fig_k in fig_d.keys():
fig_id = os.path.join(path, fig_k + 'range_{}_{}-{}-fit_{}-step_{}-conf_{}'.format(before_range, after_range, fig_k, p['fit'], p['delay_step_size'], p['confidence']))
saving_utils.save_plotly_fig(fig_d[fig_k], fig_id)
saving_utils.save_csv_dict(bin_stats, path=fig_id + '.csv', key_order=['x', 'mean', 'std', 'confidence_95'])
if p['delay_step_size'] == 10:
data_csv_path = os.path.join(path, 'range_{}_{}-step_{}-all.csv'.format(before_range, after_range, p['delay_step_size']))
DataFrame(bin_stats['y_all'], columns=bin_stats['x']).to_csv(data_csv_path, index=False)
print('BH measure {} run-stick-run'.format(bh_measure))
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'run_stick_run_{}_alt'.format(bh_measure_path_l[m_i]))
fig_d, bin_stats = self.get_transition_bh_values_plot_all_alt(astroA_l,
before_bh='running_semi_exact', inds_bh='stick_exact_start', after_bh='running_semi_exact',
bh_measure=bh_measure,
before_range=before_range, after_range=after_range,
y_title=bh_measure_y_titles[m_i],
**p)
for fig_k in fig_d.keys():
fig_id = os.path.join(path, fig_k + 'range_{}_{}-{}-fit_{}-step_{}-conf_{}'.format(before_range, after_range, fig_k, p['fit'], p['delay_step_size'], p['confidence']))
saving_utils.save_plotly_fig(fig_d[fig_k], fig_id)
saving_utils.save_csv_dict(bin_stats, path=fig_id + '.csv', key_order=['x', 'mean', 'std', 'confidence_95'])
if p['delay_step_size'] == 10:
data_csv_path = os.path.join(path, 'range_{}_{}-step_{}-all.csv'.format(before_range, after_range, p['delay_step_size']))
DataFrame(bin_stats['y_all'], columns=bin_stats['x']).to_csv(data_csv_path, index=False)
"""
"""
print('OUTLIERS TRANSITION PLOTS...')
delay_ranges_pairs = [ [3*astroA_l[0].fr, 6*astroA_l[0].fr],
[1*astroA_l[0].fr, 1*astroA_l[0].fr],
[2*astroA_l[0].fr, 4*astroA_l[0].fr]
]
delay_ranges_pairs = [[int(v[0]), int(v[1])] for v in delay_ranges_pairs]
measure_l = ['dffMax2default', 'time_s', 'area']
measure_path_l = ['amplitudes_default', 'durations', 'sizes']
measure_y_titles = ['Amplitude', 'Duration (s)', 'Size']
for delay_ranges_pair in delay_ranges_pairs:
before_range, after_range = delay_ranges_pair
for m_i, measure in enumerate(measure_l):
print('rest to run measure: {}'.format(measure))
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'rest_to_run_{}_outlier_alt'.format(measure_path_l[m_i]))
fig, stats_d = self.get_transition_outliers_plot(astroA_l, before_bh='rest_semi_exact', inds_bh='running_exact_start', after_bh='running_semi_exact',
before_range=before_range, after_range=after_range,
measure=measure,
y_title=measure_y_titles[m_i])
fig_id = os.path.join(path, 'outlier_range_{}_{}'.format(before_range, after_range))
saving_utils.save_plotly_fig(fig, fig_id)
with open(os.path.join(fig_id + '.csv'), mode='w') as csv_file:
writer = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for i in range(len(stats_d['names'])):
l = [stats_d['names'][i]]
l.extend(stats_d['mean'][i])
writer.writerow(l)
print('run to rest measure: {}'.format(measure))
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'run_to_rest_{}_outlier_alt'.format(measure_path_l[m_i]))
fig, stats_d = self.get_transition_outliers_plot(astroA_l, before_bh='running_semi_exact', inds_bh='rest_start', after_bh='rest_semi_exact',
before_range=before_range, after_range=after_range,
measure=measure,
y_title=measure_y_titles[m_i])
fig_id = os.path.join(path, 'outlier_range_{}_{}'.format(before_range, after_range))
saving_utils.save_plotly_fig(fig, fig_id)
with open(os.path.join(fig_id + '.csv'), mode='w') as csv_file:
writer = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for i in range(len(stats_d['names'])):
l = [stats_d['names'][i]]
l.extend(stats_d['mean'][i])
writer.writerow(l)
print('run stick hit run measure: {}'.format(measure))
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'run_stick_run_{}_outlier_alt'.format(measure_path_l[m_i]))
fig, stats_d = self.get_transition_outliers_plot(astroA_l, before_bh='running_semi_exact', inds_bh='stick_exact_start', after_bh='running_semi_exact',
before_range=before_range, after_range=after_range,
measure=measure,
y_title=measure_y_titles[m_i])
fig_id = os.path.join(path, 'outlier_range_{}_{}'.format(before_range, after_range))
saving_utils.save_plotly_fig(fig, fig_id)
with open(os.path.join(fig_id + '.csv'), mode='w') as csv_file:
writer = csv.writer(csv_file, delimiter=',', quotechar='"', quoting=csv.QUOTE_MINIMAL)
for i in range(len(stats_d['names'])):
l = [stats_d['names'][i]]
l.extend(stats_d['mean'][i])
writer.writerow(l)
"""
"""
print('Correlation plots ALL')
if astroA_l_pairs is not None:
for dff_mode in [False]:
#for align_setting in ['xcorr', 'xcorr_free']:
for align_setting in ['xcorr']:
#for filter_duration in [[None, None], [None, 1], [1, None]]:
for filter_duration in [[None, None], [None, 1], [1, None]]:
for bh in ['default', 'rest', 'running', 'stick']:
main_folder_id = 'correlations_no_align' if align_setting == 'xcorr_free' else 'correlations'
if (filter_duration[0] == None and filter_duration[1] == 1):
main_folder_id += '_short_events'
if (filter_duration[0] == 1 and filter_duration[1] == None):
main_folder_id += '_long_events'
fig_corr_path = os.path.join(output_experiment_path_all_comparison, 'plots', main_folder_id, 'xcorr_compare_{}_is_dff_{}'.format(bh, dff_mode))
save_results_path = os.path.join(output_experiment_path_all_comparison, 'data', main_folder_id, 'xcorr_compare_{}_is_dff_{}'.format(bh, dff_mode))
fig, pair_fakes_before, pair_fakes, pair_corrs_l_before, pair_corrs_l, days_id_l = self.get_compare_align_plot_xcorr_all(astroA_l_pairs, align_setting='xcorr', dff_mode=dff_mode, behaviour=bh, n_fake_samples=25 ,save_results_path=save_results_path)
saving_utils.save_plotly_fig(fig, fig_corr_path)
csv_corr_path = os.path.join(output_experiment_path_all_comparison, 'plots', main_folder_id + '_csv', 'xcorr_compare_{}_is_dff_{}.csv'.format(bh, dff_mode))
self.save_xcorr_pairs_align_results_csv(csv_corr_path, astroA_l_pairs, pair_fakes_before, pair_fakes, pair_corrs_l_before, pair_corrs_l)
"""
'''
print('Correlation plots (rest 0-1, run 0-1, rest-stick 0-1, run-stick 0-1, all, random)')
file_id = 'xcorr_compare_states_all'
if astroA_l_pairs is not None:
for dff_mode in [False]:
#for align_setting in ['xcorr', 'xcorr_free']:
for align_setting in ['xcorr']:
for astro_pair in astroA_l_pairs:
#for filter_duration in [[None, None], [None, 1], [1, None]]:
for filter_duration in [[None, None]]:
main_folder_id = 'correlations_no_align' if align_setting == 'xcorr_free' else 'correlations'
if (filter_duration[0] == None and filter_duration[1] == 1):
main_folder_id += '_short_events'
if (filter_duration[0] == 1 and filter_duration[1] == None):
main_folder_id += '_long_events'
fig_corr_path = os.path.join(output_experiment_path_all_comparison, 'plots', main_folder_id, 'pair_{}_type_{}_is_dff_{}'.format(self.get_astro_pair_id(astro_pair), file_id, dff_mode))
save_pkl_path = os.path.join(output_experiment_path_all_comparison, 'data', main_folder_id, 'pair_{}_type_{}_is_dff_{}.pkl'.format(self.get_astro_pair_id(astro_pair), file_id, dff_mode))
csv_corr_path = os.path.join(output_experiment_path_all_comparison, 'plots', main_folder_id + '_csv', 'pair_{}_type_{}_is_dff_{}.csv'.format(self.get_astro_pair_id(astro_pair), file_id, dff_mode))
behaviour_list_compare =['rest', 'running', 'stick_rest', 'stick_run_ind_15', 'default']
fig, res_d = self.get_compare_states_all_xcorr(astro_pair, align_setting=align_setting, dff_mode=dff_mode, n_fake_samples=1, save_pkl_path=save_pkl_path, filter_duration=filter_duration,
behaviour_l=behaviour_list_compare)
saving_utils.save_plotly_fig(fig, fig_corr_path)
saving_utils.save_csv_dict(res_d, csv_corr_path, key_order=behaviour_list_compare)
'''
'''
print('Correlation plots (rest 0 run 0, rest 1 run 1, random)')
file_id = 'xcorr_compare_between_states'
if astroA_l_pairs is not None:
for dff_mode in [False]:
#for align_setting in ['xcorr', 'xcorr_free']:
for align_setting in ['xcorr']:
for astro_pair in astroA_l_pairs:
#for filter_duration in [[None, None], [None, 1], [1, None]]:
for filter_duration in [[None, None]]:
main_folder_id = 'correlations_no_align' if align_setting == 'xcorr_free' else 'correlations'
if (filter_duration[0] == None and filter_duration[1] == 1):
main_folder_id += '_short_events'
if (filter_duration[0] == 1 and filter_duration[1] == None):
main_folder_id += '_long_events'
fig_corr_path = os.path.join(output_experiment_path_all_comparison, 'plots', main_folder_id, 'pair_{}_type_{}_is_dff_{}'.format(self.get_astro_pair_id(astro_pair), file_id, dff_mode))
save_pkl_path = os.path.join(output_experiment_path_all_comparison, 'data', main_folder_id, 'pair_{}_type_{}_is_dff_{}.pkl'.format(self.get_astro_pair_id(astro_pair), file_id, dff_mode))
csv_corr_path = os.path.join(output_experiment_path_all_comparison, 'plots', main_folder_id + '_csv', 'pair_{}_type_{}_is_dff_{}.csv'.format(self.get_astro_pair_id(astro_pair), file_id, dff_mode))
fig, res_d = self.get_compare_states_same_astro_all_xcorr(astro_pair, align_setting=align_setting, dff_mode=dff_mode, n_fake_samples=100, save_pkl_path=save_pkl_path, filter_duration=filter_duration)
print('RES D', res_d)
saving_utils.save_plotly_fig(fig, fig_corr_path)
saving_utils.save_csv_dict(res_d, csv_corr_path, key_order=list(res_d.keys()))
'''
#TODO RUUN THESE AGAIN
"""
#USING GOOD PAIRS FROM HERE ON
#RUN AFTER
file_id = 'xcorr_compare_between_group'
if astroA_l_good_pairs is not None:
for dff_mode in [False]:
#for align_setting in ['xcorr', 'xcorr_free']:
#NOT USING ALIGN SETTING
for align_setting in ['xcorr']:
#for filter_duration in [[None, None], [None, 1], [1, None]]:
#for filter_duration in [[None, None], [None, 1], [1, None]]:
for filter_duration in [[None, None]]:
main_folder_id = 'correlations_no_align' if align_setting == 'xcorr_free' else 'correlations'
if (filter_duration[0] == None and filter_duration[1] == 1):
main_folder_id += '_short_events'
if (filter_duration[0] == 1 and filter_duration[1] == None):
main_folder_id += '_long_events'
fig_corr_path = os.path.join(output_experiment_path_all_comparison, 'plots', main_folder_id, 'type_{}_is_dff_{}'.format(file_id, dff_mode))
save_pkl_path = os.path.join(output_experiment_path_all_comparison, 'data', main_folder_id, 'type_{}_is_dff_{}.pkl'.format(file_id, dff_mode))
csv_corr_path = os.path.join(output_experiment_path_all_comparison, 'plots', main_folder_id + '_csv', 'type_{}_is_dff_{}.csv'.format(file_id, dff_mode))
fig, res_d = self.get_compare_between_group_xcorr(astroA_l_good_pairs, dff_mode=dff_mode, n_fake_samples=5, save_pkl_path=save_pkl_path, filter_duration=filter_duration)
saving_utils.save_plotly_fig(fig, fig_corr_path)
saving_utils.save_csv_dict(res_d, csv_corr_path, key_order=list(res_d.keys()))
"""
"""
save_folder = os.path.join(output_experiment_path_all_comparison, 'data', 'control')
plot_folder = os.path.join(output_experiment_path_all_comparison, 'plots', 'control')
print('CONTROLS plot')
print('Recombination results...')
save_recombination_pkl_path = os.path.join(save_folder, 'recombination.pkl')
fig, res_d = self.get_compare_between_group_xcorr(astroA_l_good_pairs, dff_mode=False, n_fake_samples=1, save_pkl_path=save_recombination_pkl_path)
recombination_corrs = res_d['between']
recombination_rand_corrs = res_d['random']
print('Recombination CORRS', recombination_corrs)
print('Recombination rand corrs', recombination_rand_corrs)
#between_id
#between
#random
print('Random sample results...')
save_random_pair_pkl_path = os.path.join(save_folder, 'random_pair.pkl')
if os.path.isfile(save_random_pair_pkl_path):
print('FILE EXISTS', save_random_pair_pkl_path)
random_pair_corrs = saving_utils.load_pickle(save_random_pair_pkl_path)
else:
random_pair_corrs = []
for astroA_pair in astroA_l_good_pairs:
d = compare_astro_utils.alignment_counter(astroA_pair[0], astroA_pair[1],
n_fake_samples=10,
align_setting='xcorr',
eval_setting='xcorr',
fake_sample_setting='from_astro',
p=1,
behaviour='default',
filter_duration=[None, None],
with_output_details=True)
random_pair_corrs.extend(d['num_fake'])
saving_utils.save_pickle(random_pair_corrs, save_random_pair_pkl_path)
print('Random pair corrs:', random_pair_corrs)
print('Flip control results...')
save_flip_pkl_path = os.path.join(save_folder, 'flip.pkl')
if os.path.isfile(save_flip_pkl_path):
print('File exists', save_flip_pkl_path)
flip_corrs = saving_utils.load_pickle(save_flip_pkl_path)
else:
flip_corrs = []
for astroA in astroA_l_good:
for num_rot in range(1, 6):
astro_grid, _, _,_ = compare_astro_utils.get_filters_compare([astroA], p=1, dff_mode=False, behaviour='default')
astro_grid = astro_grid[0]
astro_grid_rot_1 = np.copy(astro_grid)
astro_grid_border_1 = np.copy(astroA.border)
if num_rot < 4:
astro_grid_rot_2 = np.rot90(np.copy(astro_grid), k=num_rot)
astro_grid_border_2 = np.rot90(np.copy(astroA.border), k=num_rot)
elif num_rot == 5:
astro_grid_rot_2 = np.flipud(np.copy(astro_grid))
astro_grid_border_2 = np.flipud(np.copy(astroA.border))
elif num_rot == 6:
astro_grid_rot_2 = np.fliplr(np.copy(astro_grid))
astro_grid_border_2 = np.fliplr(np.copy(astroA.border))
d = compare_astro_utils.alignment_counter(astroA, astroA,
n_fake_samples=0,
align_setting='param',
eval_setting='xcorr',
fake_sample_setting='from_astro',
grid_target=astro_grid_rot_1,
grid_source=astro_grid_rot_2,
target_border_grid=astro_grid_border_1,
source_border_grid=astro_grid_border_2,
move_vector=[0,0],
p=1,
behaviour='default',
with_output_details=True)
flip_corrs.append(d['num_compare'])
saving_utils.save_pickle(flip_corrs, save_flip_pkl_path)
print('Flip corrs', flip_corrs)
print('LENS, random pair, flip, recombination')
print(len(random_pair_corrs), len(flip_corrs), len(recombination_corrs))
x =['Random simulation', 'Flip Control', 'Recombination Control']
y = [random_pair_corrs, flip_corrs, recombination_corrs]
fig = plotly_utils.plot_point_box_revised(x, y, title='Mean +/- standard deviation of controls', x_title='', y_title='xcorr', err_type='std')
saving_utils.save_plotly_fig(fig, os.path.join(plot_folder, 'control_plot'))
"""
'''
plt.ioff()
print('Plotting Size vs Time correlation plot...')
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'size_v_time_corr_ALL')
areas_all = []
times_all = []
for astroA in astroA_l:
areas_all.extend(np.log(astroA.res_d['area']))
times_all.extend(astroA.res_d['time_s'])
areas_all = np.array(areas_all)
times_all = np.array(times_all)
r, p = stat_utils.get_pearsonr(times_all, areas_all)
df = pd.DataFrame({'Size': areas_all, 'Time': times_all})
title ='Size vs Time correlation plot'
text = 'r = {}, p < {}'.format(general_utils.truncate(r, 2), p)
for kind in ['reg', 'hex', 'kde']:
plotly_utils.seaborn_joint_grid(df, 'Size', 'Time', kind=kind, text=text)
plt.savefig(os.path.join(path, '{}.svg'.format(kind)))
plt.savefig(os.path.join(path, '{}.png'.format(kind)))
'''
'''
print('---------------------------------')
print('EVENTS VS SPEED PLOTS...')
print('---------------------------------')
speed_event_tuple_d = {}
n_bins_l = [3, 5, 10]
n_frame_splits_l = [15, int(astroA_l[0].minute_frames/6)]
for eval_type in ['max', 'mean']:
for n_bins in n_bins_l:
for n_frame_splits in n_frame_splits_l:
path = os.path.join(output_experiment_path_all_comparison, 'plots', 'speed_v_events_ALL', 'eval_type={}_splits={}_bins={}'.format(eval_type, n_frame_splits, n_bins))
for astroA in astroA_l:
#split n frames. Measure average speed in that bin. Measure how many events in that bin.
#add to histogram
#10 second frame splits
total_frames = len(astroA.indices_d['default'])
num_chunks = total_frames//n_frame_splits
print('NUM FRAME SPLITS {}, TOTAL FRAMES {} NUM CHUNKS {}'.format(n_frame_splits, total_frames, num_chunks))
split_arr_i_l = np.array_split(astroA.indices_d['default'], num_chunks)
speed_event_tuple_l = aqua_utils.speed_event_tuple(astroA, split_arr_i_l, num_events_only=True, eval_type=eval_type)
speed_event_tuple_d[astroA.print_id] = speed_event_tuple_l
#Find maximum speed, for bounds of histogram
max_speed = 0
for k in speed_event_tuple_d.keys():
max_speed_k = np.max(np.array([speed for speed, ev_l in speed_event_tuple_d[k]]))
#print('MAX SPEED {} : {}'.format(k, max_speed_k))
if max_speed_k > max_speed:
max_speed = max_speed_k
#print('MAX SPEED' , max_speed)
events_bins_d = {}
bin_values = np.linspace(0, max_speed, n_bins)
for astroA in astroA_l:
events_bins = [[] for i in range((n_bins-1))]
speed_event_tuple = speed_event_tuple_d[astroA.print_id]
for sp_ev_tup in speed_event_tuple:
ind = np.searchsorted(bin_values, sp_ev_tup[0], side='right')-1
if ind == len(events_bins):
ind -= 1
events_bins[ind].append(sp_ev_tup[1] / n_frame_splits)
#events_bins_avg = [np.mean(events_bins[i]) for i in range(len(events_bins))]
events_bins_d[astroA.print_id] = events_bins
x = bin_values[:-1]
names_l = list(events_bins_d.keys())
x_l = [x for i in range(len(astroA_l))]
y_l_l = [events_bins_d[k] for k in names_l]
x_l_dpl = [tup[0] for tup in speed_event_tuple]
y_l_dpl = [tup[1] for tup in speed_event_tuple]
r, p = stat_utils.get_pearsonr(y_l_dpl, x_l_dpl)
df = pd.DataFrame({'Events': y_l_dpl, 'Speed': x_l_dpl})
fig, stats_d = plotly_utils.plot_scatter_mult_with_avg(x, y_l_l, None, names_l, mode='lines', title='scatter', x_title='Speed (cm/s)', y_title='',
xrange=None, yrange=None, confidence=True, with_stats=True, point_box=False, mean_width_size=5)
saving_utils.save_plotly_fig(fig, path)
print('KEYS', stats_d.keys())
print('THE STTS D X', stats_d['x'])
df_data_m = DataFrame(stats_d['mean_l_l'], columns=stats_d['x'], index=stats_d['names'])
df_ci = DataFrame(stats_d['conf_95'], columns=stats_d['x'], index=stats_d['names'])
df_mean = DataFrame([stats_d['mean'], stats_d['mean_conf']], columns=stats_d['x'], index=['mean', 'conf_95'])
df_data_m.to_csv(path + '-data_means.csv')
df_ci.to_csv(path + '-data_ci.csv')
df_mean.to_csv(path + '-mean_and_CI.csv')
title ='Events vs Speed correlation plot'
text = 'r = {}, p < {}'.format(general_utils.truncate(r, 2), p)
for kind in ['reg', 'hex', 'kde']:
plotly_utils.seaborn_joint_grid(df, 'Speed', 'Events', kind=kind, text=text)
plt.savefig(path + '_corr_{}.svg'.format(kind))
plt.savefig(path + '_corr_{}.png'.format(kind))
print('---------------------------------')
'''
'''
print('Plotting correlation of splitted plots in 3 parts...')
save_folder = os.path.join(output_experiment_path_all_comparison, 'data', 'split_correlation_all')
plot_folder = os.path.join(output_experiment_path_all_comparison, 'plots', 'split_correlation_all')
save_splits_pkl_path = os.path.join(save_folder, 'between_splits.pkl')
save_day_splits_pkl_path = os.path.join(save_folder, 'between_days.pkl')
save_random_pkl_path = os.path.join(save_folder, 'random.pkl')
save_bh_splits_pkl_path = os.path.join(save_folder, 'between_rest_run.pkl')
#1 random simulations
#2 (correlation between splits days with variable the splits (so not between days) 3 split correlations with each other (only day 0 and day 1). day 0 splitted 3 times and correlated between each other. same with day 1
#3 (correlation between splits days with variable the between days)) the day 0 and day 1 splitted and then compared between each other between days
#'split_correlation_all'
#for bh_l in ['default', 'rest', 'running']:
#4 (correlation between split days with variable the rest-run behaviour)
for bh in ['rest']:
#2
fig, res_splits_l = self.get_between_split_split_xcorr(astroA_long_l, bh=bh, save_pkl_path=save_splits_pkl_path)
#3
fig_2, res_day_splits_l = self.get_between_day_split_xcorr(day_0_1_pairs, bh=bh, save_pkl_path=save_day_splits_pkl_path)
#4
fig_3, res_bh_splits_l = self.get_between_bh_split_xcorr(astroA_long_l, bh_pair=['rest','running'], save_pkl_path=save_bh_splits_pkl_path)
#1
if os.path.isfile(save_random_pkl_path):
print('FILE EXISTS')
random_l = saving_utils.load_pickle(save_random_pkl_path)
else:
random_l = []
for astroA in astroA_long_l:
random_l.extend(self.get_random_corrs_self(astroA, bh, n_fake_samples=3))
if save_random_pkl_path is not None:
saving_utils.save_pickle(random_l, save_random_pkl_path)
x = ['Random', 'Self splits', 'Rest-Run splits', 'Day 0-1 Splits']
y = [random_l, res_splits_l, res_bh_splits_l, res_day_splits_l]
print('LENS', [len(y_i) for y_i in y])
fig, stats_d = plotly_utils.plot_point_box_revised(x, y, title='Split correlations (between splits)- {}'.format(bh), x_title='', y_title='Xcorr value', with_stats=True)
saving_utils.save_plotly_fig(fig, os.path.join(plot_folder, 'splits'))
saving_utils.save_csv_dict(stats_d, os.path.join(plot_folder, 'splits' + '.csv'), key_order=['x', 'mean', 'conf_95'])
results_dict = {x[i] : y[i] for i in range(len(x))}
results_dict['x'] = x
key_order = ['x']
key_order.extend(x)
saving_utils.save_csv_dict(results_dict, os.path.join(plot_folder, 'splits_data' + '.csv'), key_order=key_order)
return fig
'''
def get_random_corrs_self(self, astroA, bh, n_fake_samples=3):
random_l = []
d = compare_astro_utils.alignment_counter(astroA, astroA,
n_fake_samples=n_fake_samples,
align_setting='param',
eval_setting='xcorr',
fake_sample_setting='from_astro',
move_vector=[0, 0],
p=1,
behaviour=bh)
return d['num_fake']
def get_between_bh_split_xcorr(self, astroA_l, bh_pair=['rest', 'running'], n_chunks=3, dff_mode=False, save_pkl_path=None, filter_duration=(None, None)):
'''
Split bh_pair[0] into 3 splits. Correlate with whole of bh_pair[1]
'''
if os.path.isfile(save_pkl_path):
print('FILE EXISTS')
res_l = saving_utils.load_pickle(save_pkl_path)
else:
event_grid_splits_d = {}
astros_d = {}
for astroA in astroA_l:
print(astroA.print_id)
event_grid_splits_d[astroA.print_id] = aqua_utils.split_n_event_grids(astroA, bh=bh_pair[0], n=n_chunks)
astros_d[astroA.print_id] = astroA
res_l = []
for astroA_k in event_grid_splits_d.keys():
#Get correlations of splits between splits same days
astroA_splits_l = event_grid_splits_d[astroA_k]
bh_split = astros_d[astroA_k].event_grids_1min[bh_pair[1]]
for i in range(n_chunks):
split_i = astroA_splits_l[i]
d = compare_astro_utils.alignment_counter(astros_d[astroA_k], astros_d[astroA_k],
n_fake_samples=0,
align_setting='param',
eval_setting='xcorr',
fake_sample_setting='from_astro',
grid_target=bh_split,
grid_source=split_i,
move_vector=[0, 0],
p=1,
behaviour=bh_pair[0],
filter_duration=filter_duration,
with_output_details=True)
res_l.append(d['num_compare'])
if save_pkl_path is not None:
saving_utils.save_pickle(res_l, save_pkl_path)
x = ['Split correlations']
y = [np.copy(np.array(res_l))]
print('THE Y', y)
fig = plotly_utils.plot_point_box_revised(x, y, title='{} Split correlations (between splits)- {}'.format(n_chunks, '_'.join(bh_pair)), x_title='', y_title='Xcorr value')
return fig, res_l
def get_between_split_split_xcorr(self, astroA_l, bh='default', n_chunks=3, dff_mode=False, save_pkl_path=None, filter_duration=(None, None)):
if os.path.isfile(save_pkl_path):
print('FILE EXISTS')
res_l = saving_utils.load_pickle(save_pkl_path)
else:
event_grid_splits_d = {}
astros_d = {}
for astroA in astroA_l:
print(astroA.print_id)
event_grid_splits_d[astroA.print_id] = aqua_utils.split_n_event_grids(astroA, bh=bh, n=n_chunks)
astros_d[astroA.print_id] = astroA
res_l = []
for astroA_k in event_grid_splits_d.keys():
#Get correlations of splits between splits same days
astroA_splits_l = event_grid_splits_d[astroA_k]
for i in range(n_chunks):
for j in range(i+1, n_chunks):
print(i, j)
split_i = astroA_splits_l[i]
split_j = astroA_splits_l[j]
d = compare_astro_utils.alignment_counter(astros_d[astroA_k], astros_d[astroA_k],
n_fake_samples=0,
align_setting='param',
eval_setting='xcorr',
fake_sample_setting='from_astro',
grid_target=split_i,
grid_source=split_j,
move_vector=[0, 0],
p=1,
behaviour=bh,
filter_duration=filter_duration,
with_output_details=True)
res_l.append(d['num_compare'])
if save_pkl_path is not None:
saving_utils.save_pickle(res_l, save_pkl_path)
x = ['Split correlations']
y = [np.copy(np.array(res_l))]
print('THE Y', y)
fig = plotly_utils.plot_point_box_revised(x, y, title='{} Split correlations (between splits)- {}'.format(n_chunks, bh), x_title='', y_title='Xcorr value')
return fig, res_l
def get_between_day_split_xcorr(self, astroA_l_pairs, bh='default', n_chunks=3, dff_mode=False, n_fake_samples=5, save_pkl_path=None, filter_duration=(None, None)):
if os.path.isfile(save_pkl_path):
print('FILE EXISTS')
res_l = saving_utils.load_pickle(save_pkl_path)
else:
res_l = []
event_grid_splits_d = {}
for astro_pair in astroA_l_pairs:
pair_k = self.get_astro_pair_id(astro_pair)
event_grid_splits_d[pair_k] = {'day_0' : None, 'day_x' : None}
#Split each astro into 3
event_grid_splits_d[pair_k]['day_0'] = aqua_utils.split_n_event_grids(astro_pair[0], bh=bh, n=n_chunks)
event_grid_splits_d[pair_k]['day_x'] = aqua_utils.split_n_event_grids(astro_pair[1], bh=bh, n=n_chunks)
event_grid_splits_d[pair_k]['astro_pair'] = astro_pair
#Get all split correlations between day 0 and day x of same astro pair
#All possible here (note the 2nd for loop different than function above)
astro_pair = event_grid_splits_d[pair_k]['astro_pair']
d_temp = compare_astro_utils.alignment_counter(astro_pair[0], astro_pair[1],
n_fake_samples=0,
align_setting='xcorr',
eval_setting='xcorr',
fake_sample_setting='from_astro',
p=1,
behaviour='default',
dff_mode=dff_mode)
move_vector = d_temp['move_vector']
for i in range(n_chunks):
for j in range(n_chunks):
print(i, j)
split_i = event_grid_splits_d[pair_k]['day_0'][i]
split_j = event_grid_splits_d[pair_k]['day_x'][j]
d = compare_astro_utils.alignment_counter(astro_pair[0], astro_pair[1],
n_fake_samples=0,
align_setting='param',
eval_setting='xcorr',
fake_sample_setting='from_astro',
grid_target=split_i,
grid_source=split_j,
move_vector=move_vector,
p=1,
behaviour=bh,
filter_duration=filter_duration,
with_output_details=True)
res_l.append(d['num_compare'])
if save_pkl_path is not None:
saving_utils.save_pickle(res_l, save_pkl_path)
x = ['Split correlations']
y = [np.copy(np.array(res_l))]
fig = plotly_utils.plot_point_box_revised(x, y, title='{} Split correlations (between days) - {}'.format(n_chunks, bh), x_title='', y_title='Xcorr value')
return fig, res_l
#--------#--------#--------#--------#--------#--------#--------#--------#--------#--------
def generate_corr_data(self, astroA):
output_experiment_path = self.get_output_experiment_path(astroA, self.output_folder)
print('Making dirs', output_experiment_path)
self.setup_file_folders(output_experiment_path)
print(output_experiment_path)
print('Generating fake sample correlations and split correlations...')
#Will use these to compare how much to split before losing correlation
for p in self.filter_probs:
samples_save_path = os.path.join(output_experiment_path, 'files', 'correlations', 'fake_sample_p={}.pkl'.format(p))
samples_corr_d = correlation_utils.get_corr_astro_samples_v2(astro_xc=astroA, astro_base=astroA, p=p, n_samples=self.n_samples_corr_fake)
saving_utils.save_pickle(samples_corr_d, samples_save_path)
#splits_save_path = os.path.join(output_experiment_path, 'files', 'correlations', 'splits_p={}.pkl'.format(p))
#splits_corr_d = correlation_utils.get_splits_corr(astroA, num_frames_splits_l=self.num_frames_splits_l, p=p, max_comparisons=self.max_split_comparison_samples)
#saving_utils.save_pickle(splits_corr_d, splits_save_path)
print('Writing csv...')
duration_csv_path = os.path.join(output_experiment_path, 'files', 'csv', 'duration_split_ratios.csv')
self.write_csv_duration_splits(astroA, duration_csv_path)
def generate_corr_data_pair(self, astroA_l):
output_experiment_path_comparison, days_str, day_l_s, astroA_l_s = self.setup_comparison_vars(astroA_l, self.output_folder)
print(output_experiment_path_comparison)
print('Making dirs', output_experiment_path_comparison)
self.setup_file_folders_comparison(output_experiment_path_comparison)
for p in self.filter_probs:
print(p)
d = {}
corr_compare_save_path = os.path.join(output_experiment_path_comparison, 'files', 'correlations', 'corr_compare_p={}.pkl'.format(p))
astro_filt_l, astro_all_filt, astro_nz_bool_l, astro_all_nz_bool = compare_astro_utils.get_filters_compare(astroA_l_s, p=p)
#1 - self correlation
corr_res_self, max_corr_self, move_vector_self, max_coord_self = correlation_utils.get_cross_correlation_2D_info_compare(astro_filt_l[0], astro_filt_l[0])
corr_res, max_corr, move_vector, max_coord = correlation_utils.get_cross_correlation_2D_info_compare(astro_filt_l[0], astro_filt_l[1])
#3 - astroA - astroB fake sample correlations
samples_d = correlation_utils.get_corr_astro_samples_v2(astro_xc=astroA_l[0], astro_base=astroA_l[1], p=p, n_samples=self.n_samples_corr_fake)
d['self'] = {'max_corr' : max_corr_self,
' corr_res' : corr_res_self,
'move_vector' : move_vector_self,
'max_coord' : max_coord_self }
d['compare'] = {'max_corr' : max_corr,
' corr_res' : corr_res,
'move_vector' : move_vector,
'max_coord' : max_coord}
d['samples'] = samples_d
saving_utils.save_pickle(d, corr_compare_save_path)
def parse_prob(self, path):
base_name = os.path.splitext(os.path.basename(path))[0]
prob_v = float(base_name.split('=')[-1])
return prob_v
def read_corr_pair_data(self, astroA_l):
output_experiment_path_comparison, days_str, day_l_s, astroA_l_s = self.setup_comparison_vars(astroA_l, self.output_folder)
comparison_paths = glob.glob(os.path.join(output_experiment_path_comparison, 'files/correlations/corr_compare_*.pkl'))
corr_pair_d = {}
for comparison_path in comparison_paths:
prob_k = self.parse_prob(comparison_path)
print('Prob k', prob_k)
corr_pair_d[prob_k] = saving_utils.load_pickle(comparison_path)
return corr_pair_d
def read_corr_data(self, astroA):
experiment_path = self.get_output_experiment_path(astroA, self.output_folder)
print('Experiment path', experiment_path)
fake_sample_corr_paths = glob.glob(os.path.join(experiment_path, 'files/correlations/fake_sample_*.pkl'))
#splits_corr_paths = glob.glob(os.path.join(experiment_path, 'files/correlations/splits_*.pkl'))
fake_corr_d = {}
#splits_corr_d = {}
for fake_sample_path in fake_sample_corr_paths:
fake_corr_d[str(self.parse_prob(fake_sample_path))] = saving_utils.load_pickle(fake_sample_path)
#for split_path in splits_corr_paths:
# splits_corr_d[str(self.parse_prob(split_path))] = saving_utils.load_pickle(split_path)
#return fake_corr_d, splits_corr_d
return fake_corr_d
def setup_comparison_vars(self, astroA_l, output_folder):
experiment_id_l = []
day_l = []
for astroA in astroA_l:
experiment_id_l.append('/'.join(astroA.experiment_path.split('/')[-3:-1]))
day_l.append(int(astroA.experiment_path.split('/')[-1].split('_')[-1]))
if len(set(experiment_id_l)) != 1:
print('Different experiment ids, stopping', experiment_id_l)
return
sort_i = np.argsort(day_l)
day_l_s = [day_l[i] for i in sort_i]
astroA_l_s = [astroA_l[i] for i in sort_i]
days_str = 'days_' + '_'.join([str(day) for day in day_l_s])
output_experiment_path_comparison = os.path.join(output_folder,
experiment_id_l[0],
days_str)
return output_experiment_path_comparison, days_str, day_l_s, astroA_l_s
def setup_comparison_all_vars(self, astroA_l, output_folder):
experiment_id_l = []
day_l = []
for astroA in astroA_l:
experiment_id_l.append('/'.join(astroA.experiment_path.split('/')[-3:-1]))
day_l.append(int(astroA.experiment_path.split('/')[-1].split('_')[-1]))
sort_i = np.argsort(day_l)
day_l_s = [day_l[i] for i in sort_i]
astroA_l_s = [astroA_l[i] for i in sort_i]
days_str = 'days_' + '_'.join([str(day) for day in day_l_s])
output_experiment_path_all_comparison = os.path.join(output_folder, 'all')
print('done')
return output_experiment_path_all_comparison, days_str, day_l_s, astroA_l_s
def get_behaviour_basic_plots(self, astroA):
figs = {}
figs['stick_bin'] = plotly_utils.plot_scatter_fmt(x=np.arange(len(astroA.stick_bin)), y=astroA.stick_bin, astype='int', straight_lines_only=True, title='Stick', x_title='Frame', y_title='Off whisker/On whisker')
figs['speed_bin'] = plotly_utils.plot_scatter_fmt(x=np.arange(len(astroA.speed_bin)), y=astroA.speed_bin, astype='int', straight_lines_only=True, title='Speed', x_title='Frame', y_title='Rest/Running')
figs['whisker_bin'] = plotly_utils.plot_scatter_fmt(x=np.arange(len(astroA.whisker_bin)), y=astroA.whisker_bin, astype='int', straight_lines_only=True, title='Whisker', x_title='Frame', y_title='No whisker/Whisker movement')
figs['pupil'] = plotly_utils.plot_scatter_fmt(x=np.arange(len(astroA.pupil_values)), y=astroA.pupil_values, astype='float', straight_lines_only=True, title='Pupil', x_title='Frame', y_title='Pupil value')
figs['stick_values'] = plotly_utils.plot_scatter(x=np.arange(len(astroA.roi_dict['extra']['stick'])), y=astroA.roi_dict['extra']['stick'], title='Stick', x_title='Frame', y_title='Stick value')
figs['speed_values'] = plotly_utils.plot_scatter(x=np.arange(len(astroA.roi_dict['extra']['speed'])), y=astroA.roi_dict['extra']['speed'], title='Speed', x_title='Frame', y_title='Speed value')
figs['whisker_values'] = plotly_utils.plot_scatter(x=np.arange(len(astroA.roi_dict['extra']['whiskers'])), y=astroA.roi_dict['extra']['whiskers'], title='Whisker', x_title='Frame', y_title='Whisker value')
def make_arr(inds, arr_length):
arr = np.zeros([arr_length])
arr[inds] = 1
return arr
arr_length = len(astroA.stick_bin)
for k in astroA.indices_d.keys():
arr = make_arr(astroA.indices_d[k], arr_length)
figs[k] = plotly_utils.plot_scatter_fmt(x=np.arange(len(arr)), y=arr, title=k, astype='int', straight_lines_only=True, x_title='Frame', y_title='Value')
return figs
def get_signal_durations_plot(self, astroA):
signal_duration_figs = {}
#Signal durations
for k in astroA.event_subsets.keys():
signal_duration_figs[k] = plotly_utils.plot_histogram(astroA.all_durations_d[k], title=' Signal durations histogram ({})'.format(k))
return signal_duration_figs
def get_border_plot(self, astroA):
if 'clandmark_mask' in astroA.res_d.keys():
return plotly_utils.plot_contour(astroA.res_d['border_mask'] + astroA.res_d['clandmark_mask'], title='border_and_landmark_mask', height=600, width=800)
else:
return plotly_utils.plot_contour(astroA.res_d['border_mask'], title='border_mask', height=600, width=800)
def get_behaviour_contour_plots(self, astroA):
'''
Use 1 min normalized plots
'''
fig_heatmap_grids = {}
fig_heatmap_dff_grids = {}
#fig_heatmap_dff_grids
for k in astroA.event_subsets.keys():
fig_heatmap_grids[k] = plotly_utils.plot_contour(astroA.event_grids_1min[k], title=k + '_event grid', height=600, width=800)
for k in astroA.event_subsets.keys():
fig_heatmap_dff_grids[k] = plotly_utils.plot_contour(astroA.event_grids_1min_dff[k], title=k+'_event grid dff', height=600, width=800)
return fig_heatmap_grids, fig_heatmap_dff_grids
def get_behaviour_contour_threshold_plots(self, astroA, threshold=0.5):
'''
Use 1 min normalized plots
'''
fig_heatmap_grids = {}
fig_heatmap_dff_grids = {}
#fig_heatmap_dff_grids
for k in astroA.event_subsets.keys():
fig_heatmap_grids[k] = plotly_utils.plot_contour_threshold(astroA.event_grids_1min[k], threshold_perc=threshold, title=k + '_event grid - Saturation : ' + str(threshold*100) + '%', height=600, width=800)
for k in astroA.event_subsets.keys():
fig_heatmap_dff_grids[k] = plotly_utils.plot_contour_threshold(astroA.event_grids_1min_dff[k], threshold_perc=threshold, title=k+'_event grid dff - Saturation : ' + str(threshold*100) + '%', height=600, width=800)
return fig_heatmap_grids, fig_heatmap_dff_grids
def get_behaviour_activity_plot(self, astroA):
activity_ratio_k = np.array(self.filter_keys(astroA))
activity_ratio_l = np.array([astroA.activity_ratios[k] for k in activity_ratio_k])
text_values = np.array(['Frames: ' + str(len(astroA.indices_d[k])) for k in activity_ratio_k])
activity_i = np.argsort(activity_ratio_l)
activity_ratio_k_s = activity_ratio_k[activity_i]
activity_ratio_l_s = activity_ratio_l[activity_i]
text_values_s = text_values[activity_i]
activity_ratio_k_s[np.where(activity_ratio_k_s == 'default')] = 'all'
fig = plotly_utils.plot_bar(x=activity_ratio_k_s, y=activity_ratio_l_s, text_values=['']*len(activity_ratio_l_s), text_size=20, title='Activity ratio (events per voxel)', x_title='', y_title='Events per voxel (%)', margin_b=150)
plotly_utils.apply_fun_axis_fig(fig, lambda x : x * 100, axis='y',)
return fig
def get_behaviour_activity_bar_plot_all(self, astroA_l, bh_l, with_stats=False):
activity_ratios_np = np.zeros(len(bh_l))
activity_ratios_num_added = np.zeros(len(bh_l))
for i, bh_k in enumerate(bh_l):
for astroA in astroA_l:
if bh_k in astroA.activity_ratios.keys():
activity_ratios_np[i] += astroA.activity_ratios[bh_k]
activity_ratios_num_added[i] += 1
activity_ratios_np /= activity_ratios_num_added
activity_i = np.argsort(activity_ratios_np)
activity_ratio_k_s = np.array(bh_l)[activity_i]
activity_ratio_l_s = activity_ratios_np[activity_i]
activity_ratio_k_s[np.where(activity_ratio_k_s == 'default')] = 'all'
fig = plotly_utils.plot_bar(x=activity_ratio_k_s, y=activity_ratio_l_s, text_values=['']*len(activity_ratio_l_s), text_size=20,
title='Activity ratio (events per voxel)', x_title='', y_title='Events per voxel (%)',
margin_b=150,
err_y=[], err_symmetric=None)
plotly_utils.apply_fun_axis_fig(fig, lambda x : x * 100, axis='y',)
if with_stats:
#data = {k : areas[i] for i, k in enumerate(area_keys_s)}
return fig, {}
return fig
def get_behaviour_activity_dot_plot_all(self, astroA_l, bh_l, lines=False):
activity_ratio_l = []
for bh in bh_l:
activity_bh_l = []
for i, astroA in enumerate(astroA_l):
if bh in astroA.activity_ratios.keys():
activity_bh_l.append(astroA.activity_ratios[bh])
activity_ratio_l.append(activity_bh_l)
activity_means = [np.mean(activity_ratios) for activity_ratios in activity_ratio_l]
activity_i = np.argsort(activity_means)
x = np.array(bh_l)[activity_i]
y = []
for i in activity_i:
y.append(activity_ratio_l[i])
fig, stats_d = plotly_utils.plot_point_box_revised(x, y, title='Activity ratio', x_title='', y_title='Events per voxel (%)', lines=lines, with_stats=True)
return fig, stats_d
def get_behaviour_activity_number_bar_plot_all(self, astroA_l, bh_l, with_stats=False):
activity_num_np = np.zeros(len(bh_l))
activity_num_added = np.zeros(len(bh_l))
for astroA in astroA_l:
for i, bh_k in enumerate(bh_l):
if bh_k in astroA.activity_ratios.keys():
activity_num_np[i] += (len(astroA.res_d['area'][astroA.event_subsets[bh_k]]) / len(astroA.indices_d[bh_k])) * astroA.minute_frames
activity_num_added[i] += 1
activity_num_np /= activity_num_added
activity_i = np.argsort(activity_num_np)
activity_num_k_s = np.array(bh_l)[activity_i]
activity_num_l_s = activity_num_np[activity_i]
activity_num_k_s[np.where(activity_num_k_s == 'default')] = 'all'
fig = plotly_utils.plot_bar(x=activity_num_k_s, y=activity_num_l_s, text_values=['']*len(activity_num_l_s),
text_size=20, title='Activity number',
x_title='', y_title='Events per minute in state', margin_b=150,
err_y=[], err_symmetric=None)
if with_stats:
#data = {k : areas[i] for i, k in enumerate(area_keys_s)}
return fig, {}
return fig
def get_behaviour_activity_number_dot_plot_all(self, astroA_l, bh_l, with_stats=False, lines=False):
activity_num_l = []
for bh in bh_l:
activity_bh_l = []
for i, astroA in enumerate(astroA_l):
if bh in astroA.event_subsets.keys():
num_events = len(astroA.res_d['area'][astroA.event_subsets[bh]])
num_frames = len(astroA.indices_d[bh])
activity_bh_l.append((num_events / num_frames) * astroA.minute_frames)
activity_num_l.append(activity_bh_l)
activity_means = [np.mean(activity_nums) for activity_nums in activity_num_l]
activity_i = np.argsort(activity_means)
x = np.array(bh_l)[activity_i]
y = []
for i in activity_i:
y.append(activity_num_l[i])
fig, stats_d = plotly_utils.plot_point_box_revised(x, y, title='Activity number', x_title='', y_title='Events per minute in state', lines=lines, with_stats=True)
return fig, stats_d
def get_common_keys(self, astroA_l, bh_l):
s = set(bh_l)
for astroA in astroA_l:
s &= set(astroA.indices_d.keys())
return np.sort(list(s))
def get_all_signal_attribute_plot(self, astroA_l, bh_l, type_event='area', type_plot='bar',
y_range=None, divide_y=1, title='', x_title='', y_title='',
error_type='std', err_symmetric=True, with_stats=False):
areas = [[] for i in range(len(bh_l))]
for astroA in astroA_l:
for i, k in enumerate(bh_l):
if k in astroA.event_subsets.keys():
areas_k = astroA.res_d[type_event][astroA.event_subsets[k]]
areas[i].extend(areas_k)
areas_std = np.array([np.std(v_l) for v_l in areas])
areas_mean = np.array([np.mean(v_l) for v_l in areas])
areas_conf = []
for v_l in areas:
m, l, h = stat_utils.mean_confidence_interval(v_l, confidence=0.95)
areas_conf.append(m-l)
areas_conf = np.array(areas_conf)
areas_i = np.argsort(areas_mean)
area_keys_s = np.array(bh_l)[areas_i]
areas_s = np.array(areas)[areas_i]
areas_mean_s = | np.array(areas_mean) | numpy.array |
# implement a simple shading model
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Ellipse
from shapely.geometry import Point, Polygon
class SimpleFlicker():
def __init__(self, solar_verts, T, turbine_locs):
self.turbine_locs = [[0, 0]]
self.solar_verts = solar_verts
self.turbine_locs = turbine_locs
def rotate(self, origin, point, angle):
"""
Rotate a point counterclockwise by a given angle around a given origin.
The angle should be given in radians.
"""
ox, oy = origin
px, py = point
qx = ox + np.cos(angle) * (px - ox) - np.sin(angle) * (py - oy)
qy = oy + np.sin(angle) * (px - ox) + np.cos(angle) * (py - oy)
return qx, qy
def find_angle(self, T_in):
# find the omega
from scipy import interpolate
T = np.array([6, 12, 18])
omega = np.array([-90, 0, 90])
f = interpolate.interp1d(T, omega)
if T_in < 6:
omega_out = 0
print('Sun is not high enough for a shadow...')
elif T_in > 18:
omega_out = 0
print('Sun is not high enough for a shadow...')
else:
omega_out = f(T_in)
return -np.radians(omega_out)
def calculate_shadow(self, time_idx, show=True):
# user inputs
T = time_idx # time (in military time)
d = 10 # number of days since the new year
# turbine parameters
HH = 90 # hub height
D = 126 # rotor diameter
wd = 5 # tower width is 5 m?
# turbine location
x_loc = self.turbine_locs[0]
y_loc = self.turbine_locs[1]
# position
lat = 39.7555
lon = -105.2211
# calculate the shadow
delta = np.radians(-23.45 * np.cos( np.radians(360/365 * (d + 10)) ))
omega = self.find_angle(T)
# tower shadow
Fx = -( np.cos(delta) * np.sin(omega) / (np.sin(lat) * np.sin(delta) + np.cos(lat) * np.cos(delta) * np.cos(omega)))
numY = ( np.sin(np.radians(lat)) * np.cos(delta) * np.cos(omega) - np.cos(np.radians(lat)) * np.cos(delta) )
denY = ( np.sin(np.radians(lat)) * np.sin(delta) + np.cos(np.radians(lat)) * np.cos(delta) * np.cos(omega) )
Fy = -numY / denY
# plot turbine shadow and rotor shadow
fig, ax = plt.subplots()
plt.plot(x_loc,y_loc,'bo')
plt.plot([x_loc + wd/2, (x_loc+wd/2) + (HH) * Fx], [y_loc, y_loc + (HH) * Fy],'k')
plt.plot([x_loc - wd/2, (x_loc-wd/2) + (HH) * Fx], [y_loc, y_loc + (HH) * Fy], 'k')
length = (HH + D/2) * Fx - (HH - D/2) * Fx
angle = np.degrees(-90 - np.tan(Fx/Fy))
a = length/2
b = D/2
x = np.linspace(-a,a,100)
y = b * np.sqrt( 1 - (x/a)**2 )
rx = np.zeros(len(x))
ry = np.zeros(len(y))
rx2 = np.zeros(len(x))
ry2 = np.zeros(len(y))
poly_rotor = []
for i in range(len(x)):
rx[i], ry[i] = self.rotate([0,0], [x[i],y[i]], np.radians(angle))
poly_rotor.append((rx[i]+(HH*Fx)+x_loc,ry[i]+(HH*Fy)+y_loc))
for i in range(len(x)):
rx2[i], ry2[i] = self.rotate([0, 0], [x[i], -y[i]], np.radians(angle))
poly_rotor.append((rx2[i]+(HH*Fx)+x_loc,ry2[i]+(HH*Fy)+y_loc))
plt.plot(rx+(HH*Fx)+x_loc,ry+(HH*Fy)+y_loc,'k')
plt.plot(rx2+(HH*Fx)+x_loc,ry2+(HH*Fy)+y_loc,'k')
for i in range(len(self.solar_verts)-1):
plt.plot([self.solar_verts[i][0], self.solar_verts[i+1][0]], [self.solar_verts[i][1], self.solar_verts[i+1][1]],'r')
plt.plot([self.solar_verts[0][0], self.solar_verts[i + 1][0]], [self.solar_verts[0][1], self.solar_verts[i + 1][1]], 'r')
plt.xlim([-500,500])
plt.ylim([-500, 500])
plt.grid()
if show:
plt.show()
poly_tower = [(x_loc + wd/2, y_loc), (x_loc - wd/2, y_loc),
(x_loc - wd/2 + (HH) * Fx, y_loc + HH * Fy), (x_loc + wd/2 + HH * Fx, y_loc + HH * Fy)]
return poly_rotor, poly_tower
def point_inside(self, point, coords):
# Create Point objects
p1 = Point(point[0], point[1])
# Create a Polygon
poly = Polygon(coords)
# check if point is within polygon
return p1.within(poly)
def determine_boundaries(self):
x_min = 0
x_max = 0
y_min = 0
y_max = 0
for point in self.solar_verts:
# check x points
if point[0] < x_min:
x_min = point[0]
elif point[0] > x_max:
x_max = point[0]
# check y points
if point[1] < y_min:
y_min = point[1]
elif point[1] > y_max:
y_max = point[1]
return x_min, x_max, y_min, y_max
def calculate_overlap(self, T, show=False):
# determine xmin, xmax, ymin, ymax
xmin, xmax, ymin, ymax = self.determine_boundaries(self.solar_verts)
# solar boundaries - assume rectangle
# generation points inside the solar_verts
N = 10
x = | np.linspace(xmin,xmax,N) | numpy.linspace |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
'''
:py:mod:`basecamp.py` - The Everest base class
----------------------------------------------
The :py:obj:`everest` engine. All :py:obj:`everest` models
inherit from :py:class:`Basecamp`.
'''
from __future__ import division, print_function, absolute_import, \
unicode_literals
from . import missions
from .utils import AP_SATURATED_PIXEL, prange
from .mathutils import SavGol
from .masksolve import MaskSolve
from .gp import GetCovariance
from .search import Search
from .transit import TransitModel, TransitShape
from .dvs import OVERFIT
from scipy.linalg import block_diag, cholesky, cho_factor, cho_solve
import os
import numpy as np
import matplotlib.pyplot as pl
from scipy.ndimage import zoom
from itertools import combinations_with_replacement as multichoose
import logging
import platform
import subprocess
log = logging.getLogger(__name__)
__all__ = ['Basecamp', 'Overfitting']
class Overfitting(object):
"""Stores information on the overfitting metrics for a light curve."""
def __init__(self, O1, O2, O3, O4, O5, pdf):
"""Store values."""
self._O1 = O1
self._O2 = O2
self._O3 = O3
self._O4 = O4
self._O5 = O5
self.pdf = pdf
def masked(self, depth=0.01):
"""Return the masked overfitting metric for a given transit depth."""
return np.hstack(self._O5) / depth
def unmasked(self, depth=0.01):
"""Return the unmasked overfitting metric for a given transit depth."""
return 1 - (np.hstack(self._O2) +
np.hstack(self._O3) / depth) / np.hstack(self._O1)
def show(self):
"""Show the overfitting PDF summary."""
try:
if platform.system().lower().startswith('darwin'):
subprocess.call(['open', self.pdf])
elif os.name == 'nt':
os.startfile(self.pdf)
elif os.name == 'posix':
subprocess.call(['xdg-open', self.pdf])
else:
raise IOError("")
except IOError:
log.info("Unable to open the pdf. Try opening it manually:")
log.info(self.pdf)
class Basecamp(object):
'''
'''
@property
def _mission(self):
'''
'''
return getattr(missions, self.mission)
@_mission.setter
def _mission(self, value):
'''
'''
raise NotImplementedError("Can't set this property.")
@property
def dir(self):
'''
Returns the directory where the raw data and output for the target is
stored.
'''
return self._mission.TargetDirectory(self.ID, self.season)
@dir.setter
def dir(self, value):
'''
'''
raise NotImplementedError("Can't set this property.")
@property
def logfile(self):
'''
Returns the full path to the log file for the current run.
'''
return os.path.join(self.dir, '%s.log' % self.name)
@logfile.setter
def logfile(self, value):
'''
'''
raise NotImplementedError("Can't set this property.")
@property
def season(self):
"""
Return the current observing season.
For *K2*, this is the observing campaign, while for *Kepler*,
it is the current quarter.
"""
try:
self._season
except AttributeError:
self._season = self._mission.Season(self.ID)
if hasattr(self._season, '__len__'):
raise AttributeError(
"Please choose a campaign/season for this target: %s." %
self._season)
return self._season
@season.setter
def season(self, value):
'''
'''
raise NotImplementedError("Can't set this property.")
@property
def flux(self):
'''
The corrected/de-trended flux. This is computed by subtracting
the linear model from the raw SAP flux.
'''
return self.fraw - self.model
@flux.setter
def flux(self, value):
'''
'''
raise NotImplementedError("Can't set this property.")
@property
def fcor(self):
'''
The CBV-corrected de-trended flux.
'''
if self.XCBV is None:
return None
else:
return self.flux - self._mission.FitCBVs(self)
@fcor.setter
def fcor(self, value):
'''
'''
raise NotImplementedError("Can't set this property.")
@property
def norm(self):
'''
The PLD normalization. Typically, this is just the simple aperture
photometry flux (i.e., the sum of all the pixels in the aperture).
'''
return self._norm
@norm.setter
def norm(self, value):
'''
'''
raise NotImplementedError("Can't set this property.")
@property
def cdpps(self):
'''
The string version of the current value of the CDPP in *ppm*. This
displays the CDPP for each segment of the light curve individually
(if breakpoints are present).
'''
return " / ".join(["%.2f ppm" % c for c in self.cdpp_arr]) + \
(" (%.2f ppm)" % self.cdpp)
@cdpps.setter
def cdpps(self, value):
'''
'''
raise NotImplementedError("Can't set this property.")
@property
def mask(self):
'''
The array of indices to be masked. This is the union of the sets of
outliers, bad (flagged) cadences, transit cadences, and :py:obj:`NaN`
cadences.
'''
return np.array(list(set(np.concatenate([self.outmask, self.badmask,
self.transitmask, self.nanmask]))), dtype=int)
@mask.setter
def mask(self, value):
'''
'''
raise NotImplementedError("Can't set this property.")
@property
def weights(self):
'''
The PLD weights vector. The model may be computed by dotting the design
matrix :py:attr:`X` with this vector. Note that these are computed just
for plotting purpoeses -- the actual weights are never explicitly
computed during the de-trending, since it can be rather slow.
'''
if self._weights is None:
self.get_weights()
return self._weights
@weights.setter
def weights(self, value):
'''
'''
raise NotImplementedError("Can't set this property.")
@property
def transit_model(self):
'''
'''
try:
self._transit_model
except AttributeError:
self._transit_model = None
return self._transit_model
@transit_model.setter
def transit_model(self, val):
'''
'''
if val is None:
self._transit_model = None
self.transit_depth = None
else:
val = np.atleast_1d(val)
for tm in val:
assert type(tm) is TransitModel, \
"Kwarg `transit_model` must be an instance or " + \
"a list of instances of `everest.TransitModel`."
self._transit_model = val
self.transit_depth = None
def get_norm(self):
'''
Computes the PLD normalization. In the base class, this is just
the sum of all the pixel fluxes.
'''
self._norm = self.fraw
def X(self, i, j=slice(None, None, None)):
'''
Computes the design matrix at the given *PLD* order and the given
indices. The columns are the *PLD* vectors for the target at the
corresponding order, computed as the product of the fractional pixel
flux of all sets of :py:obj:`n` pixels, where :py:obj:`n` is the *PLD*
order.
'''
X1 = self.fpix[j] / self.norm[j].reshape(-1, 1)
X = np.product(list(multichoose(X1.T, i + 1)), axis=1).T
if self.X1N is not None:
return np.hstack([X, self.X1N[j] ** (i + 1)])
else:
return X
def plot_info(self, dvs):
'''
Plots miscellaneous de-trending information on the data
validation summary figure.
:param dvs: A :py:class:`dvs.DVS` figure instance
'''
axl, axc, axr = dvs.title()
axc.annotate("%s %d" % (self._mission.IDSTRING, self.ID),
xy=(0.5, 0.5), xycoords='axes fraction',
ha='center', va='center', fontsize=18)
axc.annotate(r"%.2f ppm $\rightarrow$ %.2f ppm" %
(self.cdppr, self.cdpp),
xy=(0.5, 0.2), xycoords='axes fraction',
ha='center', va='center', fontsize=8, color='k',
fontstyle='italic')
axl.annotate("%s %s%02d: %s" %
(self.mission.upper(),
self._mission.SEASONCHAR, self.season, self.name),
xy=(0.5, 0.5), xycoords='axes fraction',
ha='center', va='center', fontsize=12,
color='k')
axl.annotate(self.aperture_name if len(self.neighbors) == 0
else "%s, %d neighbors" %
(self.aperture_name, len(self.neighbors)),
xy=(0.5, 0.2), xycoords='axes fraction',
ha='center', va='center', fontsize=8, color='k',
fontstyle='italic')
axr.annotate("%s %.3f" % (self._mission.MAGSTRING, self.mag),
xy=(0.5, 0.5), xycoords='axes fraction',
ha='center', va='center', fontsize=12,
color='k')
if not np.isnan(self.cdppg) and self.cdppg > 0:
axr.annotate(r"GP %.3f ppm" % (self.cdppg),
xy=(0.5, 0.2), xycoords='axes fraction',
ha='center', va='center', fontsize=8, color='k',
fontstyle='italic')
def compute(self):
'''
Compute the model for the current value of lambda.
'''
# Is there a transit model?
if self.transit_model is not None:
return self.compute_joint()
log.info('Computing the model...')
# Loop over all chunks
model = [None for b in self.breakpoints]
for b, brkpt in enumerate(self.breakpoints):
# Masks for current chunk
m = self.get_masked_chunk(b)
c = self.get_chunk(b)
# This block of the masked covariance matrix
mK = GetCovariance(self.kernel, self.kernel_params,
self.time[m], self.fraw_err[m])
# Get median
med = np.nanmedian(self.fraw[m])
# Normalize the flux
f = self.fraw[m] - med
# The X^2 matrices
A = np.zeros((len(m), len(m)))
B = np.zeros((len(c), len(m)))
# Loop over all orders
for n in range(self.pld_order):
# Only compute up to the current PLD order
if (self.lam_idx >= n) and (self.lam[b][n] is not None):
XM = self.X(n, m)
XC = self.X(n, c)
A += self.lam[b][n] * np.dot(XM, XM.T)
B += self.lam[b][n] * np.dot(XC, XM.T)
del XM, XC
# Compute the model
W = np.linalg.solve(mK + A, f)
model[b] = np.dot(B, W)
# Free up some memory
del A, B, W
# Join the chunks after applying the correct offset
if len(model) > 1:
# First chunk
self.model = model[0][:-self.bpad]
# Center chunks
for m in model[1:-1]:
# Join the chunks at the first non-outlier cadence
i = 1
while len(self.model) - i in self.mask:
i += 1
offset = self.model[-i] - m[self.bpad - i]
self.model = np.concatenate(
[self.model, m[self.bpad:-self.bpad] + offset])
# Last chunk
i = 1
while len(self.model) - i in self.mask:
i += 1
offset = self.model[-i] - model[-1][self.bpad - i]
self.model = np.concatenate(
[self.model, model[-1][self.bpad:] + offset])
else:
self.model = model[0]
# Subtract the global median
self.model -= np.nanmedian(self.model)
# Get the CDPP and reset the weights
self.cdpp_arr = self.get_cdpp_arr()
self.cdpp = self.get_cdpp()
self._weights = None
def compute_joint(self):
'''
Compute the model in a single step, allowing for a light curve-wide
transit model. This is a bit more expensive to compute.
'''
# Init
log.info('Computing the joint model...')
A = [None for b in self.breakpoints]
B = [None for b in self.breakpoints]
# We need to make sure that we're not masking the transits we are
# trying to fit!
# NOTE: If there happens to be an index that *SHOULD* be masked during
# a transit (cosmic ray, detector anomaly), update `self.badmask`
# to include that index.
# Bad data points are *never* used in the regression.
if self.transit_model is not None:
outmask = np.array(self.outmask)
transitmask = np.array(self.transitmask)
transit_inds = np.where(
np.sum([tm(self.time) for tm in self.transit_model],
axis=0) < 0)[0]
self.outmask = np.array(
[i for i in self.outmask if i not in transit_inds])
self.transitmask = np.array(
[i for i in self.transitmask if i not in transit_inds])
# Loop over all chunks
for b, brkpt in enumerate(self.breakpoints):
# Masks for current chunk
m = self.get_masked_chunk(b, pad=False)
c = self.get_chunk(b, pad=False)
# The X^2 matrices
A[b] = np.zeros((len(m), len(m)))
B[b] = np.zeros((len(c), len(m)))
# Loop over all orders
for n in range(self.pld_order):
# Only compute up to the current PLD order
if (self.lam_idx >= n) and (self.lam[b][n] is not None):
XM = self.X(n, m)
XC = self.X(n, c)
A[b] += self.lam[b][n] * np.dot(XM, XM.T)
B[b] += self.lam[b][n] * np.dot(XC, XM.T)
del XM, XC
# Merge chunks. BIGA and BIGB are sparse, but unfortunately
# scipy.sparse doesn't handle sparse matrix inversion all that
# well when the *result* is not itself sparse. So we're sticking
# with regular np.linalg.
BIGA = block_diag(*A)
del A
BIGB = block_diag(*B)
del B
# Compute the full covariance matrix
mK = GetCovariance(self.kernel, self.kernel_params, self.apply_mask(
self.time), self.apply_mask(self.fraw_err))
# The normalized, masked flux array
f = self.apply_mask(self.fraw)
med = np.nanmedian(f)
f -= med
# Are we computing a joint transit model?
if self.transit_model is not None:
# Get the unmasked indices
m = self.apply_mask()
# Subtract off the mean total transit model
mean_transit_model = med * \
np.sum([tm.depth * tm(self.time[m])
for tm in self.transit_model], axis=0)
f -= mean_transit_model
# Now add each transit model to the matrix of regressors
for tm in self.transit_model:
XM = tm(self.time[m]).reshape(-1, 1)
XC = tm(self.time).reshape(-1, 1)
BIGA += med ** 2 * tm.var_depth * np.dot(XM, XM.T)
BIGB += med ** 2 * tm.var_depth * np.dot(XC, XM.T)
del XM, XC
# Dot the inverse of the covariance matrix
W = np.linalg.solve(mK + BIGA, f)
self.model = np.dot(BIGB, W)
# Compute the transit weights and maximum likelihood transit model
w_trn = med ** 2 * np.concatenate([tm.var_depth * np.dot(
tm(self.time[m]).reshape(1, -1), W)
for tm in self.transit_model])
self.transit_depth = np.array(
[med * tm.depth + w_trn[i] for i, tm in
enumerate(self.transit_model)]) / med
# Remove the transit prediction from the model
self.model -= np.dot(np.hstack([tm(self.time).reshape(-1, 1)
for tm in self.transit_model]),
w_trn)
else:
# No transit model to worry about
W = np.linalg.solve(mK + BIGA, f)
self.model = np.dot(BIGB, W)
# Subtract the global median
self.model -= np.nanmedian(self.model)
# Restore the mask
if self.transit_model is not None:
self.outmask = outmask
self.transitmask = transitmask
# Get the CDPP and reset the weights
self.cdpp_arr = self.get_cdpp_arr()
self.cdpp = self.get_cdpp()
self._weights = None
def apply_mask(self, x=None):
'''
Returns the outlier mask, an array of indices corresponding to the
non-outliers.
:param numpy.ndarray x: If specified, returns the masked version of \
:py:obj:`x` instead. Default :py:obj:`None`
'''
if x is None:
return np.delete(np.arange(len(self.time)), self.mask)
else:
return np.delete(x, self.mask, axis=0)
def get_chunk(self, b, x=None, pad=True):
'''
Returns the indices corresponding to a given light curve chunk.
:param int b: The index of the chunk to return
:param numpy.ndarray x: If specified, applies the mask to array \
:py:obj:`x`. Default :py:obj:`None`
'''
M = np.arange(len(self.time))
if b > 0:
res = M[(M > self.breakpoints[b - 1] - int(pad) * self.bpad)
& (M <= self.breakpoints[b] + int(pad) * self.bpad)]
else:
res = M[M <= self.breakpoints[b] + int(pad) * self.bpad]
if x is None:
return res
else:
return x[res]
def get_masked_chunk(self, b, x=None, pad=True):
'''
Same as :py:meth:`get_chunk`, but first removes the outlier indices.
:param int b: The index of the chunk to return
:param numpy.ndarray x: If specified, applies the mask to \
array :py:obj:`x`. Default :py:obj:`None`
'''
M = self.apply_mask(np.arange(len(self.time)))
if b > 0:
res = M[(M > self.breakpoints[b - 1] - int(pad) * self.bpad)
& (M <= self.breakpoints[b] + int(pad) * self.bpad)]
else:
res = M[M <= self.breakpoints[b] + int(pad) * self.bpad]
if x is None:
return res
else:
return x[res]
def get_weights(self):
'''
Computes the PLD weights vector :py:obj:`w`.
..warning :: Deprecated and not thoroughly tested.
'''
log.info("Computing PLD weights...")
# Loop over all chunks
weights = [None for i in range(len(self.breakpoints))]
for b, brkpt in enumerate(self.breakpoints):
# Masks for current chunk
m = self.get_masked_chunk(b)
c = self.get_chunk(b)
# This block of the masked covariance matrix
_mK = GetCovariance(self.kernel, self.kernel_params,
self.time[m], self.fraw_err[m])
# This chunk of the normalized flux
f = self.fraw[m] - np.nanmedian(self.fraw)
# Loop over all orders
_A = [None for i in range(self.pld_order)]
for n in range(self.pld_order):
if self.lam_idx >= n:
X = self.X(n, m)
_A[n] = np.dot(X, X.T)
del X
# Compute the weights
A = np.sum([l * a for l, a in zip(self.lam[b], _A)
if l is not None], axis=0)
W = np.linalg.solve(_mK + A, f)
weights[b] = [l * np.dot(self.X(n, m).T, W)
for n, l in enumerate(self.lam[b]) if l is not None]
self._weights = weights
def get_cdpp_arr(self, flux=None):
'''
Returns the CDPP value in *ppm* for each of the
chunks in the light curve.
'''
if flux is None:
flux = self.flux
return np.array([self._mission.CDPP(flux[self.get_masked_chunk(b)],
cadence=self.cadence)
for b, _ in enumerate(self.breakpoints)])
def get_cdpp(self, flux=None):
'''
Returns the scalar CDPP for the light curve.
'''
if flux is None:
flux = self.flux
return self._mission.CDPP(self.apply_mask(flux), cadence=self.cadence)
def plot_aperture(self, axes, labelsize=8):
'''
Plots the aperture and the pixel images at the beginning, middle,
and end of the time series. Also plots a high resolution image of
the target, if available.
'''
log.info('Plotting the aperture...')
# Get colormap
plasma = pl.get_cmap('plasma')
plasma.set_bad(alpha=0)
# Get aperture contour
def PadWithZeros(vector, pad_width, iaxis, kwargs):
vector[:pad_width[0]] = 0
vector[-pad_width[1]:] = 0
return vector
ny, nx = self.pixel_images[0].shape
contour = np.zeros((ny, nx))
contour[np.where(self.aperture)] = 1
contour = np.lib.pad(contour, 1, PadWithZeros)
highres = zoom(contour, 100, order=0, mode='nearest')
extent = np.array([-1, nx, -1, ny])
# Plot first, mid, and last TPF image
title = ['start', 'mid', 'end']
for i, image in enumerate(self.pixel_images):
ax = axes[i]
ax.imshow(image, aspect='auto',
interpolation='nearest', cmap=plasma)
ax.contour(highres, levels=[0.5], extent=extent,
origin='lower', colors='r', linewidths=1)
# Check for saturated columns
for x in range(self.aperture.shape[0]):
for y in range(self.aperture.shape[1]):
if self.aperture[x][y] == AP_SATURATED_PIXEL:
ax.fill([y - 0.5, y + 0.5, y + 0.5, y - 0.5],
[x - 0.5, x - 0.5, x + 0.5, x + 0.5],
fill=False, hatch='xxxxx', color='r', lw=0)
ax.axis('off')
ax.set_xlim(-0.7, nx - 0.3)
ax.set_ylim(-0.7, ny - 0.3)
ax.annotate(title[i], xy=(0.5, 0.975), xycoords='axes fraction',
ha='center', va='top', size=labelsize, color='w')
if i == 1:
for source in self.nearby:
ax.annotate('%.1f' % source['mag'],
xy=(source['x'] - source['x0'],
source['y'] - source['y0']),
ha='center', va='center', size=labelsize - 2,
color='w', fontweight='bold')
# Plot hi res image
if self.hires is not None:
ax = axes[-1]
ax.imshow(self.hires, aspect='auto',
extent=(-0.5, nx - 0.5, -0.5, ny - 0.5),
interpolation='bicubic', cmap=plasma)
ax.contour(highres, levels=[0.5], extent=extent,
origin='lower', colors='r', linewidths=1)
ax.axis('off')
ax.set_xlim(-0.7, nx - 0.3)
ax.set_ylim(-0.7, ny - 0.3)
ax.annotate('hires', xy=(0.5, 0.975), xycoords='axes fraction',
ha='center', va='top', size=labelsize, color='w')
else:
ax = axes[-1]
ax.axis('off')
def search(self, pos_tol=2.5, neg_tol=50., clobber=False,
name='search', **kwargs):
'''
'''
log.info("Searching for transits...")
fname = os.path.join(self.dir, self.name + '_%s.npz' % name)
pname = os.path.join(self.dir, self.name + '_%s.pdf' % name)
# Compute
if not os.path.exists(fname) or clobber:
time, depth, vardepth, delchisq = Search(
self, pos_tol=pos_tol, neg_tol=neg_tol, **kwargs)
data = np.vstack([time, depth, vardepth, delchisq]).T
header = "TIME, DEPTH, VARDEPTH, DELTACHISQ"
np.savetxt(fname, data, fmt=str('%.10e'), header=header)
else:
time, depth, vardepth, delchisq = np.loadtxt(
fname, unpack=True, skiprows=1)
# Plot
if not os.path.exists(pname) or clobber:
fig, ax = pl.subplots(1, figsize=(10, 4))
ax.plot(time, delchisq, lw=1)
ax.set_ylabel(r'$\Delta \chi^2$', fontsize=18)
ax.set_xlabel('Time (days)', fontsize=18)
ax.set_xlim(time[0], time[-1])
fig.savefig(pname, bbox_inches='tight')
pl.close()
return time, depth, vardepth, delchisq
def overfit(self, tau=None, plot=True, clobber=False, w=9, **kwargs):
r"""
Compute the masked & unmasked overfitting metrics for the light curve.
This routine injects a transit model given by `tau` at every cadence
in the light curve and recovers the transit depth when (1) leaving
the transit unmasked and (2) masking the transit prior to performing
regression.
:param tau: A function or callable that accepts two arguments, \
`time` and `t0`, and returns an array corresponding to a \
zero-mean, unit depth transit model centered at \
`t0` and evaluated at `time`. \
The easiest way to provide this is to use an instance of \
:py:class:`everest.transit.TransitShape`. Default is \
:py:class:`everest.transit.TransitShape(dur=0.1)`, a transit \
with solar-like limb darkening and a duratio of 0.1 days.
:param bool plot: Plot the results as a PDF? Default :py:obj:`True`
:param bool clobber: Overwrite the results if present? Default \
:py:obj:`False`
:param int w: The size of the masking window in cadences for \
computing the masked overfitting metric. Default `9` \
(about 4.5 hours for `K2` long cadence).
:returns: An instance of `everest.basecamp.Overfitting`.
"""
fname = os.path.join(self.dir, self.name + '_overfit.npz')
figname = os.path.join(self.dir, self.name)
# Compute
if not os.path.exists(fname) or clobber:
# Baseline
med = np.nanmedian(self.fraw)
# Default transit model
if tau is None:
tau = TransitShape(dur=0.1)
# The overfitting metrics
O1 = [None for brkpt in self.breakpoints]
O2 = [None for brkpt in self.breakpoints]
O3 = [None for brkpt in self.breakpoints]
O4 = [None for brkpt in self.breakpoints]
O5 = [None for brkpt in self.breakpoints]
# Loop over all chunks
for b, brkpt in enumerate(self.breakpoints):
# Masks for current chunk
m = self.get_masked_chunk(b, pad=False)
time = self.time[m]
ferr = self.fraw_err[m] / med
y = self.fraw[m] / med - 1
# The metrics we're computing here
O1[b] = np.zeros(len(y)) * np.nan
O2[b] = np.zeros(len(y)) * np.nan
O3[b] = np.zeros(len(y)) * np.nan
O4[b] = np.zeros(len(y)) * np.nan
O5[b] = np.zeros(len(y)) * np.nan
# Compute the astrophysical covariance and its inverse
log.info("Computing the covariance...")
if self.kernel == 'Basic':
wh, am, ta = self.kernel_params
wh /= med
am /= med
kernel_params = [wh, am, ta]
elif self.kernel == 'QuasiPeriodic':
wh, am, ga, pe = self.kernel_params
wh /= med
am /= med
kernel_params = [wh, am, ga, pe]
K = GetCovariance(self.kernel, kernel_params, time, ferr)
Kinv = cho_solve((cholesky(K), False), np.eye(len(time)))
# Loop over all orders
log.info("Computing some large matrices...")
X = [None for n in range(self.pld_order)]
XL = [None for n in range(self.pld_order)]
XLX = [None for n in range(self.pld_order)]
for n in range(self.pld_order):
if (self.lam_idx >= n) and (self.lam[b][n] is not None):
X[n] = self.X(n, m, **kwargs)
XL[n] = (self.lam[b][n] / med ** 2) * X[n]
XLX[n] = np.dot(XL[n], X[n].T)
X = np.hstack(X)
XL = np.hstack(XL)
XLX = np.sum(XLX, axis=0)
# The full covariance
C = XLX + K
# The unmasked linear problem
log.info("Solving the unmasked linear problem...")
m = np.dot(XLX, np.linalg.solve(C, y))
m -= np.nanmedian(m)
f = y - m
R = np.linalg.solve(C, XLX.T).T
# The masked linear problem
log.info("Solving the masked linear problem...")
A = MaskSolve(C, y, w=w)
# Now loop through and compute the metric
log.info("Computing the overfitting metrics...")
for n in prange(len(y)):
#
# *** Unmasked overfitting metric ***
#
# Evaluate the sparse transit model
TAU = tau(time, t0=time[n])
i = np.where(TAU < 0)[0]
TAU = TAU.reshape(-1, 1)
# Fast sparse algebra
AA = np.dot(np.dot(TAU[i].T, Kinv[i, :][:, i]), TAU[i])
BB = np.dot(TAU[i].T, Kinv[i, :])
CC = TAU - np.dot(R[:, i], TAU[i])
O1[b][n] = AA
O2[b][n] = np.dot(BB, CC)
O3[b][n] = | np.dot(BB, f) | numpy.dot |
# -*- coding: utf-8 -*-
"""
Created on Thu Jan 8 21:33:23 2015
@author: rmitch
"""
from __future__ import print_function
import numpy as np
import mahotas
import mahotas.features
# perform any required intialization, and add any algorithm specific fields
# to the output header
def initAlgorithm( hdr ):
hdr[ "GLCM Entropy" ] = "log2"
return
def _getGLCMTestImage():
testImage = | np.array( [[0,0,1,1], [0,0,1,1], [0,2,2,2], [2,2,3,3]] ) | numpy.array |
# emacs: -*- mode: python; py-indent-offset: 4; indent-tabs-mode: nil -*-
# vi: set ft=python sts=4 ts=4 sw=4 et:
from __future__ import absolute_import
import numpy as np
from nipy.algorithms.graph.field import field_from_coo_matrix_and_data
from ..hierarchical_parcellation import hparcel
from ...utils.simul_multisubject_fmri_dataset import surrogate_2d_dataset
from ..parcellation import MultiSubjectParcellation
from ..discrete_domain import grid_domain_from_binary_array
def test_parcel_interface():
""" Simply test parcellation interface
"""
# prepare some data
shape = (5, 5, 5)
nb_parcel = 10
data = np.random.randn(np.prod(shape))
domain = grid_domain_from_binary_array(np.ones(shape))
g = field_from_coo_matrix_and_data(domain.topology, data)
u, J0 = g.ward(nb_parcel)
tmp = np.array([np.sum(u == k) for k in range(nb_parcel)])
#instantiate a parcellation
msp = MultiSubjectParcellation(domain, u, u)
assert msp.nb_parcel == nb_parcel
assert msp.nb_subj == 1
assert (msp.population().ravel() == tmp).all()
def test_parcel_interface_multi_subj():
""" test parcellation interface, with multiple subjects
"""
# prepare some data
shape = (5, 5, 5)
nb_parcel = 10
nb_subj = 5
v = []
for s in range(nb_subj):
data = np.random.randn(np.prod(shape))
domain = grid_domain_from_binary_array(np.ones(shape))
g = field_from_coo_matrix_and_data(domain.topology, data)
u, J0 = g.ward(nb_parcel)
v.append(u)
v = np.array(v).T
tmp = np.array([np.sum(v == k, 0) for k in range(nb_parcel)])
#instantiate a parcellation
msp = MultiSubjectParcellation(domain, u, v)
assert msp.nb_parcel == nb_parcel
assert msp.nb_subj == nb_subj
assert (msp.population() == tmp).all()
def test_parcel_feature():
""" Simply test parcellation feature interface
"""
# prepare some data
shape = (5, 5, 5)
nb_parcel = 10
data = np.random.randn(np.prod(shape), 1)
domain = grid_domain_from_binary_array(np.ones(shape))
g = field_from_coo_matrix_and_data(domain.topology, data)
u, J0 = g.ward(nb_parcel)
#instantiate a parcellation
msp = MultiSubjectParcellation(domain, u, u)
msp.make_feature('data', data)
assert msp.get_feature('data').shape == (nb_parcel, 1)
# test with a copy
msp2 = msp.copy()
assert (msp2.get_feature('data') == msp2.get_feature('data')).all()
# test a multi_dimensional feature
dim = 4
msp.make_feature('new', np.random.randn(np.prod(shape), 1, dim))
assert msp.get_feature('new').shape == (nb_parcel, 1, dim)
def test_parcel_feature_multi_subj():
""" Test parcellation feature interface with multiple subjects
"""
# prepare some data
shape = (5, 5, 5)
nb_parcel = 10
nb_subj = 5
v = []
for s in range(nb_subj):
data = np.random.randn(np.prod(shape))
domain = grid_domain_from_binary_array(np.ones(shape))
g = field_from_coo_matrix_and_data(domain.topology, data)
u, J0 = g.ward(nb_parcel)
v.append(u)
v = np.array(v).T
msp = MultiSubjectParcellation(domain, u, v)
# test a multi_dimensional feature
# dimension 1
msp.make_feature('data', np.random.randn(np.prod(shape), nb_subj))
assert msp.get_feature('data').shape == (nb_parcel, nb_subj)
#dimension>1
dim = 4
msp.make_feature('data', np.random.randn( | np.prod(shape) | numpy.prod |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2018 Leland Stanford Junior University
# Copyright (c) 2018 The Regents of the University of California
#
# This file is part of pelicun.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its contributors
# may be used to endorse or promote products derived from this software without
# specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
# You should have received a copy of the BSD 3-Clause License along with
# pelicun. If not, see <http://www.opensource.org/licenses/>.
#
# Contributors:
# <NAME>
"""
This subpackage performs system tests on the control module of pelicun.
"""
import pytest
import numpy as np
from numpy.testing import assert_allclose
from scipy.stats import truncnorm as tnorm
from copy import deepcopy
import os, sys, inspect
current_dir = os.path.dirname(
os.path.abspath(inspect.getfile(inspect.currentframe())))
parent_dir = os.path.dirname(current_dir)
sys.path.insert(0,os.path.dirname(parent_dir))
from pelicun.control import *
from pelicun.uq import mvn_orthotope_density as mvn_od
from pelicun.tests.test_pelicun import prob_allclose, prob_approx
# -----------------------------------------------------------------------------
# FEMA_P58_Assessment
# -----------------------------------------------------------------------------
def test_FEMA_P58_Assessment_central_tendencies():
"""
Perform a loss assessment with customized inputs that reduce the
dispersion of calculation parameters to negligible levels. This allows us
to test the results against pre-defined reference values in spite of the
randomness involved in the calculations.
"""
base_input_path = 'resources/'
DL_input = base_input_path + 'input data/' + "DL_input_test.json"
EDP_input = base_input_path + 'EDP data/' + "EDP_table_test.out"
A = FEMA_P58_Assessment()
A.read_inputs(DL_input, EDP_input, verbose=False)
A.define_random_variables()
# -------------------------------------------------- check random variables
# EDP
RV_EDP = list(A._EDP_dict.values())[0]
assert RV_EDP.theta[0] == pytest.approx(0.5 * g)
assert RV_EDP.theta[1] == pytest.approx(0.5 * g * 1e-6, abs=1e-7)
assert RV_EDP._distribution == 'lognormal'
# QNT
assert A._QNT_dict is None
#RV_QNT = A._RV_dict['QNT']
#assert RV_QNT is None
# FRG
RV_FRG = list(A._FF_dict.values())
thetas, betas = np.array([rv.theta for rv in RV_FRG]).T
assert_allclose(thetas, np.array([0.444, 0.6, 0.984]) * g, rtol=0.01)
assert_allclose(betas, np.array([0.3, 0.4, 0.5]), rtol=0.01)
rho = RV_FRG[0].RV_set.Rho()
assert_allclose(rho, np.ones((3, 3)), rtol=0.01)
assert np.all([rv.distribution == 'lognormal' for rv in RV_FRG])
# RED
RV_RED = list(A._DV_RED_dict.values())
mus, sigmas = np.array([rv.theta for rv in RV_RED]).T
assert_allclose(mus, np.ones(2), rtol=0.01)
assert_allclose(sigmas, np.array([1e-4, 1e-4]), rtol=0.01)
rho = RV_RED[0].RV_set.Rho()
assert_allclose(rho, np.array([[1, 0], [0, 1]]), rtol=0.01)
assert np.all([rv.distribution == 'normal' for rv in RV_RED])
assert_allclose (RV_RED[0].truncation_limits, [0., 2.], rtol=0.01)
assert_allclose (RV_RED[1].truncation_limits, [0., 4.], rtol=0.01)
# INJ
RV_INJ = list(A._DV_INJ_dict.values())
mus, sigmas = np.array([rv.theta for rv in RV_INJ]).T
assert_allclose(mus, np.ones(4), rtol=0.01)
assert_allclose(sigmas, np.ones(4) * 1e-4, rtol=0.01)
rho = RV_INJ[0].RV_set.Rho()
rho_target = np.zeros((4, 4))
np.fill_diagonal(rho_target, 1.)
assert_allclose(rho, rho_target, rtol=0.01)
assert np.all([rv.distribution == 'normal' for rv in RV_INJ])
assert_allclose(RV_INJ[0].truncation_limits, [0., 10./3.], rtol=0.01)
assert_allclose(RV_INJ[1].truncation_limits, [0., 10./3.], rtol=0.01)
assert_allclose(RV_INJ[2].truncation_limits, [0., 10.], rtol=0.01)
assert_allclose(RV_INJ[3].truncation_limits, [0., 10.], rtol=0.01)
# REP
RV_REP = list(A._DV_REP_dict.values())
thetas, betas = np.array([rv.theta for rv in RV_REP]).T
assert_allclose(thetas, np.ones(6), rtol=0.01)
assert_allclose(betas, np.ones(6) * 1e-4, rtol=0.01)
rho = RV_REP[0].RV_set.Rho()
rho_target = np.zeros((6, 6))
np.fill_diagonal(rho_target, 1.)
assert_allclose(rho, rho_target, rtol=0.01)
assert np.all([rv.distribution == 'lognormal' for rv in RV_REP])
# ------------------------------------------------------------------------
A.define_loss_model()
# QNT (deterministic)
QNT = A._FG_dict['T0001.001']._performance_groups[0]._quantity
assert QNT == pytest.approx(50., rel=0.01)
A.calculate_damage()
# ------------------------------------------------ check damage calculation
# TIME
T_check = A._TIME.describe().T.loc[['hour','month','weekday?'],:]
assert_allclose(T_check['mean'], np.array([11.5, 5.5, 5. / 7.]), rtol=0.05)
assert_allclose(T_check['min'], np.array([0., 0., 0.]), rtol=0.01)
assert_allclose(T_check['max'], np.array([23., 11., 1.]), rtol=0.01)
assert_allclose(T_check['50%'], np.array([12., 5., 1.]), atol=1.0)
assert_allclose(T_check['count'], np.array([10000., 10000., 10000.]),
rtol=0.01)
# POP
P_CDF = A._POP.describe(np.arange(1, 27) / 27.).iloc[:, 0].values[4:]
vals, counts = np.unique(P_CDF, return_counts=True)
assert_allclose(vals, np.array([0., 2.5, 5., 10.]), rtol=0.01)
assert_allclose(counts, np.array([14, 2, 7, 5]), atol=1)
# COL
COL_check = A._COL.describe().T
assert COL_check['mean'].values[0] == pytest.approx(0.5, rel=0.05)
assert len(A._ID_dict['non-collapse']) == pytest.approx(5000, rel=0.05)
assert len(A._ID_dict['collapse']) == pytest.approx(5000, rel=0.05)
# DMG
DMG_check = A._DMG.describe().T
assert_allclose(DMG_check['mean'], np.array([17.074, 17.074, 7.9361]),
rtol=0.1, atol=1.0)
assert_allclose(DMG_check['min'], np.zeros(3), rtol=0.01)
assert_allclose(DMG_check['max'], np.ones(3) * 50.0157, rtol=0.05)
# ------------------------------------------------------------------------
A.calculate_losses()
# -------------------------------------------------- check loss calculation
# RED
DV_RED = A._DV_dict['red_tag'].describe().T
assert_allclose(DV_RED['mean'], np.array([0.341344, 0.1586555]), rtol=0.1)
# INJ - collapse
DV_INJ_C = deepcopy(A._COL[['INJ-0', 'INJ-1']])
DV_INJ_C.dropna(inplace=True)
NC_count = DV_INJ_C.describe().T['count'][0]
assert_allclose(NC_count, np.ones(2) * 5000, rtol=0.05)
# lvl 1
vals, counts = np.unique(DV_INJ_C.iloc[:, 0].values, return_counts=True)
assert_allclose(vals, np.array([0., 2.5, 5., 10.]) * 0.1, rtol=0.01)
assert_allclose(counts / NC_count, np.array([14, 2, 7, 5]) / 28., atol=0.01, rtol=0.1)
# lvl 2
vals, counts = np.unique(DV_INJ_C.iloc[:, 1].values, return_counts=True)
assert_allclose(vals, np.array([0., 2.5, 5., 10.]) * 0.9, rtol=0.01)
assert_allclose(counts / NC_count, np.array([14, 2, 7, 5]) / 28., atol=0.01, rtol=0.1)
# INJ - non-collapse
DV_INJ_NC = deepcopy(A._DV_dict['injuries'])
DV_INJ_NC[0].dropna(inplace=True)
assert_allclose(DV_INJ_NC[0].describe().T['count'], np.ones(2) * 5000,
rtol=0.05)
# lvl 1 DS2
I_CDF = DV_INJ_NC[0].iloc[:, 0]
I_CDF = np.around(I_CDF, decimals=3)
vals, counts = np.unique(I_CDF, return_counts=True)
assert_allclose(vals, np.array([0., 0.075, 0.15, 0.3]), rtol=0.01)
target_prob = np.array(
[0.6586555, 0., 0., 0.] + 0.3413445 * np.array([14, 2, 7, 5]) / 28.)
assert_allclose(counts / NC_count, target_prob, atol=0.01, rtol=0.1)
# lvl 1 DS3
I_CDF = DV_INJ_NC[0].iloc[:, 1]
I_CDF = np.around(I_CDF, decimals=3)
vals, counts = np.unique(I_CDF, return_counts=True)
assert_allclose(vals, np.array([0., 0.075, 0.15, 0.3]), rtol=0.01)
target_prob = np.array(
[0.8413445, 0., 0., 0.] + 0.1586555 * np.array([14, 2, 7, 5]) / 28.)
assert_allclose(counts / NC_count, target_prob, atol=0.01, rtol=0.1)
# lvl 2 DS2
I_CDF = DV_INJ_NC[1].iloc[:, 0]
I_CDF = np.around(I_CDF, decimals=3)
vals, counts = np.unique(I_CDF, return_counts=True)
assert_allclose(vals, np.array([0., 0.025, 0.05, 0.1]), rtol=0.01)
target_prob = np.array(
[0.6586555, 0., 0., 0.] + 0.3413445 * np.array([14, 2, 7, 5]) / 28.)
assert_allclose(counts / NC_count, target_prob, atol=0.01, rtol=0.1)
# lvl2 DS3
I_CDF = DV_INJ_NC[1].iloc[:, 1]
I_CDF = np.around(I_CDF, decimals=3)
vals, counts = | np.unique(I_CDF, return_counts=True) | numpy.unique |
import os
import pandas as pd
import numpy as np
from matplotlib import pyplot as plt
import sys
import seaborn as sns
sns.set()
sns.set_context("paper")
from sklearn import metrics
# get colors from https://medialab.github.io/iwanthue/ or artenatevly from http://phrogz.net/css/distinct-colors.html
colors_cycle = ["#a257d4",
"#e090bf",
"#64c9a3",
"#4b68ae",
"#dc8c2f",
"#cd41a7",
"#d9344f",
"#bc599a",
"#afa1e8",
"#48c1d8",
"#b54545",
"#919233",
"#9a78be",
"#59602a",
"#4e8e2c",
"#9db935",
"#9b563c",
"#e482df",
"#5995d3",
"#6a5198",
"#b05f84",
"#b563c3",
"#5f6b18",
"#a55c21",
"#5754c2",
"#277257",
"#4f9b5e",
"#8b6b29",
"#b8381c",
"#ad2f62",
"#97ba6d",
"#45c37c",
"#5fc250",
"#8c4c7b",
"#e06e87",
"#e2672a",
"#db7756",
"#974858",
"#35743b",
"#bbaf6c",
"#8c4099",
"#e44586",
"#ed5c4c",
"#389c84",
"#cfae3d",
"#eda377",
"#778749",
"#c5935a",
"#de8784",
"#757eec"]
def plot_cluster_composition(fraction_sites, directory, level, normalise=False, label='primary_site', shuffled=False,
algorithm='topsbm'):
sns.set(font_scale=0.8)
df_clusters = pd.read_csv("%s/%s/%s_level_%d_clusters.csv" % (directory, algorithm, algorithm, level), header=[0])
x = np.arange(1, 1 + len(df_clusters.columns))
fig = plt.figure(figsize=(15, 8))
ax = fig.subplots()
fraction_bar_plot(x, fraction_sites, ax)
ax.set_xlabel("cluster", fontsize=20)
if normalise:
ax.set_ylabel("fraction of nodes", fontsize=22)
else:
ax.set_ylabel("number of nodes", fontsize=20)
ax.set_title("%s%s distribution across clusters" % ("Shuffled " if shuffled else '', label), fontsize=20)
ax.legend(ncol=3, loc='upper right')
ax.tick_params(axis='both', labelsize=20)
plt.show()
fig.savefig("%s/%s/%s%sclustercomposition_l%d_%s.pdf" % (
directory, algorithm, "shuffled" if shuffled else '', "fraction_" if normalise else '', int(level), label))
def fraction_bar_plot(x, fraction_sites, ax=None):
global current_color
current_color = -1
if ax is None:
fig = plt.figure(figsize=(15, 8))
ax = fig.subplots()
bottom = np.zeros(len(x))
ymax = 0
for site, data in fraction_sites.items():
if np.max(data) == 0:
continue
ax.bar(x, data, label=site, bottom=bottom, color=get_color_cycle())
bottom = bottom + data
def get_Palette(site):
palette_map = dict({'Brain': 'Blues',
'Breast': 'Reds',
'Kidney': 'Greens',
'Lung': 'Oranges',
'Thyroid': 'Greys',
'Uterus': 'Purples',
'Prostate': 'BuGn',
'Ovary': 'BuPu',
'Lymph Nodes': 'OrRd',
'Soft Tissue': 'PuRd',
'Esophagus': 'YlGn',
'Stomach': 'YlRd',
'Bone Marrow': 'PuBuGn',
'Skin': 'YlOrRd',
'Adipose Tissue': 'YlOrBr',
'Blood': 'RdPu',
'Pancreas': 'OrRd',
'Testis': 'GnBu'})
for k in palette_map.keys():
if k in site:
return palette_map[k]
current_color = -1
def get_color_cycle():
global current_color
current_color += 1
if current_color >= len(colors_cycle):
current_color = 0
return colors_cycle[current_color]
def get_cluster_given_l(l, directory, algorithm='topsbm'):
df_clusters = pd.read_csv("%s/%s/%s_level_%d_clusters.csv" % (directory, algorithm, algorithm, l), header=[0],
index_col=None)
cluster = {}
for i, c in enumerate(df_clusters.columns):
cluster[i] = df_clusters[c].dropna().values
return cluster
def get_topic_given_l(l, directory, algorithm='topsbm'):
df_topics = pd.read_csv("%s/%s/%s_level_%d_topics.csv" % (directory, algorithm, algorithm, l), header=[0])
topic = {}
for i, c in enumerate(df_topics.columns):
topic[i] = df_topics[c].dropna().values
return topic
def get_fraction_sites(cluster, df_files, label='primary_site', normalise=False):
fraction_sites = {}
c_fraction_site = {}
for site in df_files[label].dropna().unique():
fraction_sites[site] = []
c_fraction_site[site] = 0
for i, c in enumerate(cluster):
for sample in cluster[i]:
foundsample = get_file(sample, df_files)
if foundsample is not None:
c_fraction_site[foundsample[label]] += 1
else:
if 'unknown' in c_fraction_site.keys():
c_fraction_site['unknown'] +=1
else:
c_fraction_site['unknown'] = 1
fraction_sites['unknown']=[]
for site in fraction_sites:
if normalise:
norm = float(len(cluster[i]))
else:
norm = 1
if norm > 0:
fraction_sites[site].append(c_fraction_site[site] / norm)
else:
fraction_sites[site].append(np.nan)
c_fraction_site[site] = 0
df = pd.DataFrame(data=fraction_sites).dropna(how='all', axis=0)
##put first columns that have high values in average
avgs = df.apply(lambda x: np.average(x.to_numpy()[x.to_numpy().nonzero()[0]]), axis=0)
df = df.transpose()
df.insert(0, 'avg', avgs)
df = df.sort_values(by=['avg'], axis=0, ascending=False).drop('avg', axis=1).transpose()
df = df.sort_values(by=[tissue for tissue in df.columns], axis=0, ascending=False)
return df.to_dict(orient='list')
def get_clustersinfo(cluster, fraction_sites):
clustersinfo = {
"maximum": [],
"homogeneity": [],
"sizes": [],
"nclasses": []
}
for icluster in cluster:
maximum = 0
homo = 0
size = 0
nclass = 0
site_maximum = ''
cumulative = 0
for site, data in fraction_sites.items():
cdata = data[icluster]
cumulative += cdata
if cdata > maximum:
maximum = cdata
site_maximum = site
if cdata > 0:
nclass += 1
# using fraction_items normalised
if cdata <= 1:
homo -= cdata * np.log(cdata)
size += cdata
if cumulative > 0:
clustersinfo['maximum'].append([float(maximum) / cumulative, site_maximum])
else:
clustersinfo['maximum'].append([0, site_maximum])
clustersinfo['sizes'].append(size)
clustersinfo['nclasses'].append(nclass)
clustersinfo['homogeneity'].append(1 - homo)
return clustersinfo
def plot_maximum(clustersinfo, cluster, label, level, directory, clustersinfo_shuffle=None, algorithm='topsbm'):
fig = plt.figure(figsize=(15, 6))
ax = fig.subplots(1, 2)
bins = 10
real = np.array(clustersinfo['maximum'])[:, 0].astype(float)
ax[0].plot(np.sort(real), marker='o', ms=25, ls='')
ax[1].hist(np.sort(real), histtype='step', bins=bins, lw=4, density=True, range=(0.05, 1.05))
shuffled = False
if clustersinfo_shuffle is not None:
shuffled = np.array(clustersinfo_shuffle['maximum'])[:, 0].astype(float)
ax[0].plot(np.sort(shuffled), marker='o', ls='', ms=25)
ax[1].hist(np.sort(shuffled), histtype='step', bins=bins, lw=4, density=True, range=(0.05, 1.05))
shuffled = True
ax[0].plot(np.arange(len(cluster)), [0.8 for i in range(len(cluster))], visible=True, ls='--')
for axi in ax:
axi.tick_params(axis='both', labelsize=20)
ax[0].set_xlabel("cluster", fontsize=20)
ax[0].set_ylabel("maximum fraction\nwith same %s" % label, fontsize=20)
ax[0].set_ylim((0, 1.1))
ax[1].set_xlabel("maximum fraction\nwith same %s" % label, fontsize=20)
ax[1].set_ylabel("pdf", fontsize=20)
plt.rc('xtick', labelsize=18)
plt.rc('ytick', labelsize=18)
plt.show()
fig.savefig(
"%s/%s/%scluster_maximum_l%d_%s.pdf" % (directory, algorithm, "shuffled" if shuffled else '', level, label))
def plot_maximum_size(clustersinfo, label, level, directory, clustersinfo_shuffle=None, algorithm='topsbm'):
fig = plt.figure(figsize=(15, 6))
x = np.array(clustersinfo['sizes']).astype(int)
y = np.array(clustersinfo['maximum'])[:, 0].astype(float)
plt.scatter(x, y, lw=10, label='clusters')
plt.xlim(0, np.max(x) + np.max(x) / 10)
plt.plot(np.linspace(0.5, x.max()), 1. / np.linspace(0.5, x.max()), label='uniform')
shuffled = False
if clustersinfo_shuffle is not None:
shuffled = True
x_shuffle = np.array(clustersinfo_shuffle['sizes']).astype(int)
y_shuffle = np.array(clustersinfo_shuffle['maximum'])[:, 0].astype(float)
plt.scatter(x_shuffle, y_shuffle, lw=10, label='clusters shuffled')
plt.xlim(0, np.max(x_shuffle) + np.max(x_shuffle) / 10)
plt.xlabel("cluster size", fontsize=20)
plt.ylabel("maximum fraction\nwith same %s" % label, fontsize=20)
plt.ylim((0, 1.1))
plt.legend(loc='best', fontsize=20)
plt.rc('xtick', labelsize=18)
plt.rc('ytick', labelsize=18)
plt.show()
fig.savefig(
"%s/%s/%sclusterhomosize_l%d_%s.pdf" % (directory, algorithm, "shuffled" if shuffled else '', level, label))
def plot_maximum_label(clustersinfo, label, level, directory, clustersinfo_shuffle=None, algorithm='topsbm'):
fig = plt.figure(figsize=(10, 6))
x = np.array(clustersinfo['nclasses']).astype(int)
y = np.array(clustersinfo['maximum'])[:, 0].astype(float)
shuffled = False
plt.scatter(x, y, lw=10, alpha=0.9, label='clusters')
plt.plot(np.arange(1, np.max(x) + 2), 1. / np.arange(1, np.max(x) + 2), ls='--', c='cyan', label='uniform')
plt.xlim(0.95, np.max(x) + 0.5)
if clustersinfo_shuffle is not None:
x_shuffle = np.array(clustersinfo_shuffle['nclasses']).astype(int)
y_shuffle = np.array(clustersinfo_shuffle['maximum'])[:, 0].astype(float)
plt.scatter(x_shuffle, y_shuffle, lw=10, alpha=0.9, label='clusters shuffled')
plt.plot(np.arange(1, np.max(x_shuffle) + 2), 1. / np.arange(1, np.max(x_shuffle) + 2), ls='--', c='cyan',
label='')
shuffled = True
plt.xlim(0.95, np.max(x_shuffle) + 0.5)
plt.xlabel("number of labels", fontsize=20)
plt.ylabel("maximum fraction\nwith same %s" % label, fontsize=20)
plt.ylim((0, 1.1))
plt.rc('xtick', labelsize=16)
plt.rc('ytick', labelsize=16)
plt.legend(loc='lower right', fontsize=20)
plt.show()
fig.savefig(
"%s/%s/%scluster_homon_l%d_%s.pdf" % (directory, algorithm, "shuffled" if shuffled else '', level, label))
def plot_labels_size(clustersinfo, label, level, directory, clustersinfo_shuffle=None, algorithm='topsbm'):
fig = plt.figure(figsize=(10, 6))
x = np.array(clustersinfo['sizes']).astype(float)
y = np.array(clustersinfo['nclasses']).astype(int)
plt.xlim(x.min() - 10, x.max() + 5)
plt.ylim(y.min() - 2, y.max() + 5)
shuffled = False
plt.scatter(x, y, lw=10, alpha=0.9, label='clusters')
if clustersinfo_shuffle is not None:
x_shuffle = np.array(clustersinfo_shuffle['sizes']).astype(float)
y_shuffle = np.array(clustersinfo_shuffle['nclasses']).astype(int)
plt.scatter(x_shuffle, y_shuffle, lw=10, alpha=0.9, label='clusters shuffled')
plt.xlim(x.min() - 10, x_shuffle.max() + 5)
plt.ylim(y.min() - 2, y_shuffle.max() + 8)
shuffled = True
plt.xlabel("cluster size", fontsize=20)
plt.ylabel("number of labels", fontsize=20)
plt.legend(loc='upper right', fontsize=20)
plt.rc('xtick', labelsize=16)
plt.rc('ytick', labelsize=16)
plt.show()
fig.savefig(
"%s/%s/%scluster_shuffle_label_size_l%d_%s.pdf" % (
directory, algorithm, "shuffled" if shuffled else '', level, label))
def make_heatmap(fraction_sites, directory, label, level, shuffled=False, normalise=False, algorithm='topsbm'):
sns.set(font_scale=2)
found_classes = []
for site, data in fraction_sites.items():
if np.max(data) == 0:
continue
found_classes.append(site)
for arr in fraction_sites.values():
x = len(arr)
break
x = np.arange(1, 1 + x)
fig = plt.figure(figsize=(30, 10))
fig.subplots(1)
sns.heatmap(pd.DataFrame(data=fraction_sites).loc[:, found_classes].transpose(), vmin=0, cmap="RdYlBu_r",
xticklabels=x)
fig.savefig("%s/%s/%sheatmap_cluster%s_l%d_%s.pdf" % (
directory, algorithm, "shuffled" if shuffled else '', "fraction_" if normalise else '', int(level), label))
def get_file(sample, df_file):
for fullsample in df_file.index.values:
if sample in fullsample:
return df_file.loc[fullsample, :]
return None
def define_labels(cluster, df_files, label='primary_site', verbose=False):
true_labels = []
predicted_labels = []
for c in cluster:
if verbose:
print(c)
for sample in cluster[c]:
try:
true_labels.append(get_file(sample, df_files)[label])
predicted_labels.append(c)
except:
true_labels.append('')
predicted_labels.append('')
print(*sys.exc_info())
print("error searching %s in %s" % (label, sample))
_, true_labels = np.unique(true_labels, return_inverse=True)
return true_labels, predicted_labels
def add_score_lines(ax, scores, labels=None, h=False, c=False, alpha=0.8, **kwargs):
'''
add to ax lines in scores
add homogeneity and completness if required by h and c
'''
colors = {
'primary_site': 'blue',
'hsbm': 'blue',
'secondary_site': 'red',
'status': 'red',
'hSBM': 'blue',
'mixed': 'green',
'hierhsbm': 'purple',
'hsbm->hierachical': 'purple',
'disease_type': 'red',
'shuffle': 'orange',
'tm': 'darkcyan',
'cc': 'darkred',
'disease_tissue': 'purple',
'hierarchical': 'darkgreen',
'lda': 'violet',
'RPPA Clusters': 'red',
'wgcna': 'purple'
}
for label in labels:
if label not in scores.keys():
print("No score for %s"%label)
continue
if label not in colors.keys():
colors[label]='darkblue'
xl = scores[label]['xl']
if h:
ax.plot(xl, scores[label]['h'], ls='-.', c=colors[label], marker='x', lw=0.5, ms=25, alpha=alpha,
label='homogeneity - %s' % label)
if c:
ax.plot(xl, scores[label]['c'], ls=':', c=colors[label], marker='<', lw=0.5, ms=25, alpha=alpha,
label='completness - %s' % label)
if len(scores[label]['V']) == len(xl):
ax.plot(xl, scores[label]['V'], label='%s' % label, ls='-', c=colors[label], marker='o', lw=0.5, ms=25,
**kwargs)
else:
raise(ValueError("xl has got wrong lenght"))
customize_metric_plot(ax, xl)
def customize_metric_plot(ax, xl):
ax.set_xlabel("Number of clusters", fontsize=22)
ax.set_ylabel("NMI score", fontsize=22)
ax.set_ylim((0, 1.1))
ax.set_xlim(1, np.max(xl)*1.1)
ax.set_xscale('log')
ax.legend(loc='best', fontsize=24)
def plot_topic_size(directory, l, algorithm='topsbm'):
df_topics = pd.read_csv("%s/%s/%s_level_%d_topics.csv" % (directory, algorithm, algorithm, l))
sizes = []
for t in df_topics.columns:
sizes.append(len(df_topics.loc[:, t].dropna()))
bins = np.linspace(0.5, np.max(sizes) + 0.5, int((np.max(sizes) + 1) / (np.max(sizes) / 5)))
bin_counts, bin_edges, _ = plt.hist(sizes, histtype='step', lw=2, bins=bins)
fig = plt.figure()
ax = fig.subplots()
ax.set_title("[%d topics, level: %d]" % (len(df_topics.columns), l))
x = (bin_edges[:-1] + bin_edges[1:]) / 2
ax.plot(x[np.nonzero(bin_counts)], bin_counts[np.nonzero(bin_counts)])
ax.plot(x, 1e4 / np.power(x, 5))
ax.set_xlabel("topic size\n(number of genes)", fontsize=20)
ax.set_ylabel("number of topic", fontsize=20)
ax.set_xscale('log')
ax.set_yscale('log')
plt.show()
fig.savefig("%s/%s/topic_size_level%d.png" % (directory, algorithm, l))
def get_candles(directory, level, df_mv, ax, algorithm='topsbm'):
df_topics = pd.read_csv("%s/%s/%s_level_%d_topics.csv" % (directory, algorithm, algorithm, level))
candles = {
'open': [],
'high': [],
'low': [],
'close': [],
'size': []
}
for topic in df_topics.columns:
subarr = df_mv.loc[df_topics[topic].dropna(), :]['occurrence'].values
avg = np.average(subarr)
std = np.std(subarr)
q = np.quantile(subarr, [0.25, 0.75])
candles['high'].append(np.min([1, avg + std]))
candles['open'].append(np.min([q[1], 1]))
candles['close'].append(np.max([q[0], 0]))
candles['low'].append(np.max([0, avg - std]))
candles['size'].append(len(subarr))
ax.set_title("[level: %d]" % level)
ax.set_ylabel('$O_i$', fontsize=20)
ax.set_xlim(-1, len(df_topics.columns))
ax.set_xticks([i + 1 for i in range(-1, len(df_topics.columns))])
ax.set_xticklabels(
["Topic %d" % (i + 2) if ((i + 2) % 5 == 0 or i == -1) else '' for i in range(-1, len(df_topics.columns))],
rotation=60)
return candles
def get_tissue_style(tissue):
marker = 'o'
c = 'k'
ls = '--'
if 'gtex' in tissue:
marker = 'o'
ls = '-'
elif 'tcga' in tissue:
marker = 'x'
ls = '--'
else:
marker = '.'
ls = '-.'
if 'reast' in tissue:
c = 'darkcyan'
elif 'olon' in tissue:
c = 'b'
elif 'hyroid' in tissue:
c = 'y'
elif 'terus' in tissue:
c = 'pink'
elif 'ladder' in tissue:
c = 'gray'
elif 'sophagus' in tissue:
c = 'brown'
elif 'ung' in tissue:
c = 'magenta'
elif 'tomach' in tissue:
c = 'lime'
elif 'kin' in tissue:
c = 'wheat'
elif 'ancreas' in tissue:
c = 'forestgreen'
elif 'Adrenal Gland' in tissue:
c = 'aqua'
elif 'Adipose Tissue' in tissue:
c = 'brown'
elif 'erve' in tissue:
c = 'royalblue'
elif 'lood' in tissue:
c = 'red'
elif 'idney' in tissue:
c = 'mediumslateblue'
elif 'eart' in tissue:
c = 'darkred'
elif 'rain' in tissue:
c = 'darkgray'
elif 'estis' in tissue:
c = 'darkkhaki'
else:
c = 'k'
return (marker, c, ls)
def topic_distr_sample(doc, df, ax=None):
if ax == None:
fig = plt.figure()
ax = fig.subplots()
ax.set_title("Topic distribution: %s" % doc)
labels = [l if df[df['doc'] == doc].loc[:, l].values[0] >= 0.05 else '' for l in df.columns[2:]]
patches, texts, autotexts = ax.pie(df[df['doc'] == doc].values[0][2:], labels=labels,
autopct=lambda p: '%.1f%s' % (p, '%') if p >= 5 else '',
textprops={'fontsize': 20, 'color': 'white', 'wrap': True})
for t in texts:
t.set_fontsize(18)
t.set_wrap(True)
t.set_color('black')
plt.show()
def topic_distr_isample(idoc, df, ax=None):
topic_distr_sample(df[df['i_doc'] == idoc]['doc'].values[0], ax)
def add_tumor_location(df_files):
df_files.insert(2, 'disease_tissue', '')
for sample in df_files.index.values:
row = df_files.loc[sample, :]
df_files.at[sample, 'disease_tissue'] = '%s[%s]' % (row['primary_site'], row['disease_type'])
def get_scores(directory, labels, df_files=None, algorithm='topsbm', verbose=False):
if df_files is None:
df_files = pd.read_csv("%s/files.dat" % directory, index_col=[0], header=[0]).dropna(how='all', axis=0)
if df_files.columns.isin(['disease_type']).any():
add_tumor_location(df_files)
scores = {}
for label in labels:
xl = []
scores[label] = {
'h': [],
'c': [],
'V': [],
'xl':[]
}
l = get_max_available_L(directory, algorithm)
for l in np.arange(l + 1):
try:
true_labels, predicted_labels = define_labels(get_cluster_given_l(l, directory, algorithm), df_files, label=label)
scores[label]['h'].append(metrics.cluster.homogeneity_score(true_labels, predicted_labels))
scores[label]['c'].append(metrics.cluster.completeness_score(true_labels, predicted_labels))
scores[label]['V'].append(metrics.cluster.v_measure_score(true_labels, predicted_labels))
xl.append(len(np.unique(predicted_labels)))
if verbose:
print(l)
except:
print(*sys.exc_info())
print("Skipping level ", l)
scores[label]['xl'] = xl
if len(labels) >= 2:
h = np.array(scores[labels[0]]['h'])
c = np.array(scores[labels[1]]['c'])
scores['mixed'] = {
'h': h,
'c': c,
'V': 2 * h * c / (h + c)
}
return scores
def get_scores_shuffled(directory, df_files, algorithm='topsbm', label='primary_site', verbose=False):
scores = {
'h': [],
'c': [],
'V': [],
'xl':[]
}
xl = []
l = get_max_available_L(directory, algorithm)
df_files_shuffled = df_files.copy()
np.random.shuffle(df_files_shuffled[label])
try:
for l in np.arange(0, l + 1):
try:
if verbose:
print(l)
clusters = get_cluster_given_l(l, directory, algorithm=algorithm)
except:
print("Skipping shuffled level ", l)
continue
_, predicted_labels = define_labels(clusters, df_files, label=label)
true_labels, _ = define_labels(clusters,
df_files_shuffled,
label=label)
scores['h'].append(metrics.cluster.homogeneity_score(true_labels, predicted_labels))
scores['c'].append(metrics.cluster.completeness_score(true_labels, predicted_labels))
scores['V'].append(metrics.cluster.v_measure_score(true_labels, predicted_labels))
xl.append(len(np.unique(predicted_labels)))
except:
print(*sys.exc_info())
print("shuffled files not found")
scores['xl'] = xl
return scores
def getclustersizesarray(directory, l=3, algorithm='topsbm'):
try:
xl = [len(get_cluster_given_l(li, directory, algorithm=algorithm)) for li in np.linspace(0, l, l + 1)]
except:
try:
xl = [len(get_cluster_given_l(li, directory, algorithm=algorithm)) for li in np.linspace(1, l, l)]
except:
xl = []
for li in np.linspace(1, l, l):
try:
xl.append(len(get_cluster_given_l(li, directory, algorithm=algorithm)))
except:
pass
return xl
def gettopicsizesarray(directory, l=3, algorithm='topsbm'):
xl = []
try:
xl = [len(get_topic_given_l(li, directory, algorithm=algorithm)) for li in | np.linspace(0, l, l + 1) | numpy.linspace |
# -*- coding: utf-8 -*-
"""
Identification of spectral properties in analog signals (e.g., the power
spectrum).
:copyright: Copyright 2015-2016 by the Elephant team, see `doc/authors.rst`.
:license: Modified BSD, see LICENSE.txt for details.
"""
from __future__ import division, print_function, unicode_literals
import neo
import numpy as np
import quantities as pq
import scipy.signal
def welch_psd(signal, num_seg=8, len_seg=None, freq_res=None, overlap=0.5,
fs=1.0, window='hanning', nfft=None, detrend='constant',
return_onesided=True, scaling='density', axis=-1):
"""
Estimates power spectrum density (PSD) of a given `neo.AnalogSignal`
using Welch's method.
The PSD is obtained through the following steps:
1. Cut the given data into several overlapping segments. The degree of
overlap can be specified by parameter `overlap` (default is 0.5,
i.e. segments are overlapped by the half of their length).
The number and the length of the segments are determined according
to the parameters `num_seg`, `len_seg` or `freq_res`. By default, the
data is cut into 8 segments;
2. Apply a window function to each segment. Hanning window is used by
default. This can be changed by giving a window function or an
array as parameter `window` (see Notes [2]);
3. Compute the periodogram of each segment;
4. Average the obtained periodograms to yield PSD estimate.
Parameters
----------
signal : neo.AnalogSignal or pq.Quantity or np.ndarray
Time series data, of which PSD is estimated. When `signal` is
`pq.Quantity` or `np.ndarray`, sampling frequency should be given
through the keyword argument `fs`. Otherwise, the default value is
used (`fs` = 1.0).
num_seg : int, optional
Number of segments. The length of segments is adjusted so that
overlapping segments cover the entire stretch of the given data. This
parameter is ignored if `len_seg` or `freq_res` is given.
Default: 8.
len_seg : int, optional
Length of segments. This parameter is ignored if `freq_res` is given.
If None, it will be determined from other parameters.
Default: None.
freq_res : pq.Quantity or float, optional
Desired frequency resolution of the obtained PSD estimate in terms of
the interval between adjacent frequency bins. When given as a `float`,
it is taken as frequency in Hz.
If None, it will be determined from other parameters.
Default: None.
overlap : float, optional
Overlap between segments represented as a float number between 0 (no
overlap) and 1 (complete overlap).
Default: 0.5 (half-overlapped).
fs : pq.Quantity or float, optional
Specifies the sampling frequency of the input time series. When the
input is given as a `neo.AnalogSignal`, the sampling frequency is
taken from its attribute and this parameter is ignored.
Default: 1.0.
window : str or tuple or np.ndarray, optional
Desired window to use.
See Notes [2].
Default: 'hanning'.
nfft : int, optional
Length of the FFT used.
See Notes [2].
Default: None.
detrend : str or function or False, optional
Specifies how to detrend each segment.
See Notes [2].
Default: 'constant'.
return_onesided : bool, optional
If True, return a one-sided spectrum for real data.
If False return a two-sided spectrum.
See Notes [2].
Default: True.
scaling : {'density', 'spectrum'}, optional
If 'density', computes the power spectral density where Pxx has units
of V**2/Hz. If 'spectrum', computes the power spectrum where Pxx has
units of V**2, if `signal` is measured in V and `fs` is measured in
Hz.
See Notes [2].
Default: 'density'.
axis : int, optional
Axis along which the periodogram is computed.
See Notes [2].
Default: last axis (-1).
Returns
-------
freqs : pq.Quantity or np.ndarray
Frequencies associated with the power estimates in `psd`.
`freqs` is always a vector irrespective of the shape of the input
data in `signal`.
If `signal` is `neo.AnalogSignal` or `pq.Quantity`, a `pq.Quantity`
array is returned.
Otherwise, a `np.ndarray` containing frequency in Hz is returned.
psd : pq.Quantity or np.ndarray
PSD estimates of the time series in `signal`.
If `signal` is `neo.AnalogSignal`, a `pq.Quantity` array is returned.
Otherwise, the return is a `np.ndarray`.
Raises
------
ValueError
If `overlap` is not in the interval [0, 1).
If `freq_res` is not positive.
If `freq_res` is too high for the given data size.
If `freq_res` is None and `len_seg` is not a positive number.
If `freq_res` is None and `len_seg` is greater than the length of data
on `axis`.
If both `freq_res` and `len_seg` are None and `num_seg` is not a
positive number.
If both `freq_res` and `len_seg` are None and `num_seg` is greater
than the length of data on `axis`.
Notes
-----
1. The computation steps used in this function are implemented in
`scipy.signal` module, and this function is a wrapper which provides
a proper set of parameters to `scipy.signal.welch` function.
2. The parameters `window`, `nfft`, `detrend`, `return_onesided`,
`scaling`, and `axis` are directly passed to the `scipy.signal.welch`
function. See the respective descriptions in the docstring of
`scipy.signal.welch` for usage.
3. When only `num_seg` is given, parameter `nperseg` of
`scipy.signal.welch` function is determined according to the expression
`signal.shape[axis]` / (`num_seg` - `overlap` * (`num_seg` - 1))
converted to integer.
See Also
--------
scipy.signal.welch
"""
# initialize a parameter dict (to be given to scipy.signal.welch()) with
# the parameters directly passed on to scipy.signal.welch()
params = {'window': window, 'nfft': nfft,
'detrend': detrend, 'return_onesided': return_onesided,
'scaling': scaling, 'axis': axis}
# add the input data to params. When the input is AnalogSignal, the
# data is added after rolling the axis for time index to the last
data = np.asarray(signal)
if isinstance(signal, neo.AnalogSignal):
data = np.rollaxis(data, 0, len(data.shape))
params['x'] = data
# if the data is given as AnalogSignal, use its attribute to specify
# the sampling frequency
if hasattr(signal, 'sampling_rate'):
params['fs'] = signal.sampling_rate.rescale('Hz').magnitude
else:
params['fs'] = fs
if overlap < 0:
raise ValueError("overlap must be greater than or equal to 0")
elif 1 <= overlap:
raise ValueError("overlap must be less then 1")
# determine the length of segments (i.e. *nperseg*) according to given
# parameters
if freq_res is not None:
if freq_res <= 0:
raise ValueError("freq_res must be positive")
dF = freq_res.rescale('Hz').magnitude \
if isinstance(freq_res, pq.quantity.Quantity) else freq_res
nperseg = int(params['fs'] / dF)
if nperseg > data.shape[axis]:
raise ValueError("freq_res is too high for the given data size")
elif len_seg is not None:
if len_seg <= 0:
raise ValueError("len_seg must be a positive number")
elif data.shape[axis] < len_seg:
raise ValueError("len_seg must be shorter than the data length")
nperseg = len_seg
else:
if num_seg <= 0:
raise ValueError("num_seg must be a positive number")
elif data.shape[axis] < num_seg:
raise ValueError("num_seg must be smaller than the data length")
# when only *num_seg* is given, *nperseg* is determined by solving the
# following equation:
# num_seg * nperseg - (num_seg-1) * overlap * nperseg = data.shape[-1]
# ----------------- =============================== ^^^^^^^^^^^
# summed segment lengths total overlap data length
nperseg = int(data.shape[axis] / (num_seg - overlap * (num_seg - 1)))
params['nperseg'] = nperseg
params['noverlap'] = int(nperseg * overlap)
freqs, psd = scipy.signal.welch(**params)
# attach proper units to return values
if isinstance(signal, pq.quantity.Quantity):
if 'scaling' in params and params['scaling'] == 'spectrum':
psd = psd * signal.units * signal.units
else:
psd = psd * signal.units * signal.units / pq.Hz
freqs = freqs * pq.Hz
return freqs, psd
def welch_cohere(x, y, num_seg=8, len_seg=None, freq_res=None, overlap=0.5,
fs=1.0, window='hanning', nfft=None, detrend='constant',
scaling='density', axis=-1):
r"""
Estimates coherence between a given pair of analog signals.
The estimation is performed with Welch's method: the given pair of data
are cut into short segments, cross-spectra are calculated for each pair of
segments, and the cross-spectra are averaged and normalized by respective
auto-spectra.
By default, the data are cut into 8 segments with 50% overlap between
neighboring segments. These numbers can be changed through respective
parameters.
Parameters
----------
x : neo.AnalogSignal or pq.Quantity or np.ndarray
First time series data of the pair between which coherence is
computed.
y : neo.AnalogSignal or pq.Quantity or np.ndarray
Second time series data of the pair between which coherence is
computed.
The shapes and the sampling frequencies of `x` and `y` must be
identical. When `x` and `y` are not `neo.AnalogSignal`, sampling
frequency should be specified through the keyword argument `fs`.
Otherwise, the default value is used (`fs` = 1.0).
num_seg : int, optional
Number of segments. The length of segments is adjusted so that
overlapping segments cover the entire stretch of the given data. This
parameter is ignored if `len_seg` or `freq_res` is given.
Default: 8.
len_seg : int, optional
Length of segments. This parameter is ignored if `freq_res` is given.
If None, it is determined from other parameters.
Default: None.
freq_res : pq.Quantity or float, optional
Desired frequency resolution of the obtained coherence estimate in
terms of the interval between adjacent frequency bins. When given as a
`float`, it is taken as frequency in Hz.
If None, it is determined from other parameters.
Default: None.
overlap : float, optional
Overlap between segments represented as a float number between 0 (no
overlap) and 1 (complete overlap).
Default: 0.5 (half-overlapped).
fs : pq.Quantity or float, optional
Specifies the sampling frequency of the input time series. When the
input time series are given as `neo.AnalogSignal`, the sampling
frequency is taken from their attribute and this parameter is ignored.
Default: 1.0.
window : str or tuple or np.ndarray, optional
Desired window to use.
See Notes [1].
Default: 'hanning'.
nfft : int, optional
Length of the FFT used.
See Notes [1].
Default: None.
detrend : str or function or False, optional
Specifies how to detrend each segment.
See Notes [1].
Default: 'constant'.
scaling : {'density', 'spectrum'}, optional
If 'density', computes the power spectral density where Pxx has units
of V**2/Hz. If 'spectrum', computes the power spectrum where Pxx has
units of V**2, if `signal` is measured in V and `fs` is measured in
Hz.
See Notes [1].
Default: 'density'.
axis : int, optional
Axis along which the periodogram is computed.
See Notes [1].
Default: last axis (-1).
Returns
-------
freqs : pq.Quantity or np.ndarray
Frequencies associated with the estimates of coherency and phase lag.
`freqs` is always a vector irrespective of the shape of the input
data. If `x` and `y` are `neo.AnalogSignal` or `pq.Quantity`, a
`pq.Quantity` array is returned. Otherwise, a `np.ndarray` containing
frequency in Hz is returned.
coherency : np.ndarray
Estimate of coherency between the input time series. For each
frequency, coherency takes a value between 0 and 1, with 0 or 1
representing no or perfect coherence, respectively.
When the input arrays `x` and `y` are multi-dimensional, `coherency`
is of the same shape as the inputs, and the frequency is indexed
depending on the type of the input. If the input is
`neo.AnalogSignal`, the first axis indexes frequency. Otherwise,
frequency is indexed by the last axis.
phase_lag : pq.Quantity or np.ndarray
Estimate of phase lag in radian between the input time series. For
each frequency, phase lag takes a value between :math:`-\pi` and
:math:`\pi`, with positive values meaning phase precession of `x`
ahead of `y`, and vice versa. If `x` and `y` are `neo.AnalogSignal` or
`pq.Quantity`, a `pq.Quantity` array is returned. Otherwise, a
`np.ndarray` containing phase lag in radian is returned.
The axis for frequency index is determined in the same way as for
`coherency`.
Raises
------
ValueError
If `overlap` is not in the interval [0, 1).
If `freq_res` is not positive.
If `freq_res` is too high for the given data size.
If `freq_res` is None and `len_seg` is not a positive number.
If `freq_res` is None and `len_seg` is greater than the length of data
on `axis`.
If both `freq_res` and `len_seg` are None and `num_seg` is not a
positive number.
If both `freq_res` and `len_seg` are None and `num_seg` is greater
than the length of data on `axis`.
Notes
-----
1. The parameters `window`, `nfft`, `detrend`, `scaling`, and `axis` are
directly passed to the helper function `_welch`. See the
respective descriptions in the docstring of `_welch` for usage.
2. When only `num_seg` is given, parameter `nperseg` for `_welch` function
is determined according to the expression
`x.shape[axis]` / (`num_seg` - `overlap` * (`num_seg` - 1))
converted to integer.
See Also
--------
spectral._welch
"""
# initialize a parameter dict for scipy.signal.csd()
params = {'window': window, 'nfft': nfft,
'detrend': detrend, 'scaling': scaling, 'axis': axis}
# When the input is AnalogSignal, the axis for time index is rolled to
# the last
xdata = np.asarray(x)
ydata = np.asarray(y)
if isinstance(x, neo.AnalogSignal):
xdata = np.rollaxis(xdata, 0, len(xdata.shape))
ydata = np.rollaxis(ydata, 0, len(ydata.shape))
# if the data is given as AnalogSignal, use its attribute to specify
# the sampling frequency
if hasattr(x, 'sampling_rate'):
params['fs'] = x.sampling_rate.rescale('Hz').magnitude
else:
params['fs'] = fs
if overlap < 0:
raise ValueError("overlap must be greater than or equal to 0")
elif 1 <= overlap:
raise ValueError("overlap must be less then 1")
# determine the length of segments (i.e. *nperseg*) according to given
# parameters
if freq_res is not None:
if freq_res <= 0:
raise ValueError("freq_res must be positive")
dF = freq_res.rescale('Hz').magnitude \
if isinstance(freq_res, pq.quantity.Quantity) else freq_res
nperseg = int(params['fs'] / dF)
if nperseg > xdata.shape[axis]:
raise ValueError("freq_res is too high for the given data size")
elif len_seg is not None:
if len_seg <= 0:
raise ValueError("len_seg must be a positive number")
elif xdata.shape[axis] < len_seg:
raise ValueError("len_seg must be shorter than the data length")
nperseg = len_seg
else:
if num_seg <= 0:
raise ValueError("num_seg must be a positive number")
elif xdata.shape[axis] < num_seg:
raise ValueError("num_seg must be smaller than the data length")
# when only *num_seg* is given, *nperseg* is determined by solving the
# following equation:
# num_seg * nperseg - (num_seg-1) * overlap * nperseg = data.shape[-1]
# ----------------- =============================== ^^^^^^^^^^^
# summed segment lengths total overlap data length
nperseg = int(xdata.shape[axis] / (num_seg - overlap * (num_seg - 1)))
params['nperseg'] = nperseg
params['noverlap'] = int(nperseg * overlap)
freqs, Pxx = scipy.signal.welch(xdata, **params)
_, Pyy = scipy.signal.welch(ydata, **params)
_, Pxy = scipy.signal.csd(xdata, ydata, **params)
coherency = np.abs(Pxy) ** 2 / (Pxx * Pyy)
phase_lag = | np.angle(Pxy) | numpy.angle |
# RAFT's main model class
import os
import numpy as np
import matplotlib.pyplot as plt
from matplotlib import cm
import yaml
import moorpy as mp
import raft.raft_fowt as fowt
from raft.helpers import *
#import F6T1RNA as structural # import turbine structural model functions
raft_dir = os.path.dirname(os.path.dirname(os.path.realpath(__file__)))
TwoPi = 2.0*np.pi
class Model():
def __init__(self, design, nTurbines=1):
'''
Empty frequency domain model initialization function
design : dict
Dictionary of all the design info from turbine to platform to moorings
nTurbines
could in future be used to set up any number of identical turbines
'''
self.fowtList = []
self.coords = []
self.nDOF = 0 # number of DOFs in system
self.design = design # save design dictionary for possible later use/reference
# parse settings
if not 'settings' in design: # if settings field not in input data
design['settings'] = {} # make an empty one to avoid errors
min_freq = getFromDict(design['settings'], 'min_freq', default=0.01, dtype=float) # [Hz] lowest frequency to consider, also the frequency bin width
max_freq = getFromDict(design['settings'], 'max_freq', default=1.00, dtype=float) # [Hz] highest frequency to consider
self.XiStart = getFromDict(design['settings'], 'XiStart' , default=0.1 , dtype=float) # sets initial amplitude of each DOF for all frequencies
self.nIter = getFromDict(design['settings'], 'nIter' , default=15 , dtype=int ) # sets how many iterations to perform in Model.solveDynamics()
self.w = np.arange(min_freq, max_freq+0.5*min_freq, min_freq) *2*np.pi # angular frequencies to analyze (rad/s)
self.nw = len(self.w) # number of frequencies
# process mooring information
self.ms = mp.System()
self.ms.parseYAML(design['mooring'])
# depth and wave number
self.depth = getFromDict(design['site'], 'water_depth', dtype=float)
self.k = np.zeros(self.nw) # wave number
for i in range(self.nw):
self.k[i] = waveNumber(self.w[i], self.depth)
# set up the FOWT here <<< only set for 1 FOWT for now <<<
self.fowtList.append(fowt.FOWT(design, self.w, self.ms.bodyList[0], depth=self.depth))
self.coords.append([0.0,0.0])
self.nDOF += 6
self.ms.bodyList[0].type = -1 # need to make sure it's set to a coupled type
try:
self.ms.initialize() # reinitialize the mooring system to ensure all things are tallied properly etc.
except Exception as e:
raise RuntimeError('An error occured when initializing the mooring system: '+e.message)
self.results = {} # dictionary to hold all results from the model
def addFOWT(self, fowt, xy0=[0,0]):
'''(not used currently) Adds an already set up FOWT to the frequency domain model solver.'''
self.fowtList.append(fowt)
self.coords.append(xy0)
self.nDOF += 6
# would potentially need to add a mooring system body for it too <<<
"""
def setEnv(self, Hs=8, Tp=12, spectrum='unit', V=10, beta=0, Fthrust=0):
self.env = Env()
self.env.Hs = Hs
self.env.Tp = Tp
self.env.spectrum = spectrum
self.env.V = V
self.env.beta = beta
self.Fthrust = Fthrust
for fowt in self.fowtList:
fowt.setEnv(Hs=Hs, Tp=Tp, V=V, spectrum=spectrum, beta=beta, Fthrust=Fthrust)
"""
def analyzeUnloaded(self):
'''This calculates the system properties under undloaded coonditions: equilibrium positions, natural frequencies, etc.'''
# calculate the system's constant properties
#self.calcSystemConstantProps()
for fowt in self.fowtList:
fowt.calcStatics()
#fowt.calcBEM()
fowt.calcHydroConstants(dict(wave_spectrum='still', wave_heading=0))
# get mooring system characteristics about undisplaced platform position (useful for baseline and verification)
try:
self.C_moor0 = self.ms.getCoupledStiffness(lines_only=True) # this method accounts for eqiuilibrium of free objects in the system
self.F_moor0 = self.ms.getForces(DOFtype="coupled", lines_only=True)
except Exception as e:
raise RuntimeError('An error occured when getting linearized mooring properties in undisplaced state: '+e.message)
self.results['properties'] = {} # signal this data is available by adding a section to the results dictionary
# calculate platform offsets and mooring system equilibrium state
self.calcMooringAndOffsets()
self.results['properties']['offset_unloaded'] = self.fowtList[0].Xi0
# TODO: add printing of summary info here - mass, stiffnesses, etc
def analyzeCases(self, display=0):
'''This runs through all the specified load cases, building a dictionary of results.'''
nCases = len(self.design['cases']['data'])
nLines = len(self.ms.lineList)
# set up output arrays for load cases
self.results['case_metrics'] = {}
self.results['case_metrics']['surge_avg'] = np.zeros(nCases)
self.results['case_metrics']['surge_std'] = np.zeros(nCases)
self.results['case_metrics']['surge_max'] = np.zeros(nCases)
self.results['case_metrics']['surge_PSD'] = np.zeros([nCases,self.nw]) # adding PSDs as well. Could put behind an if statement if this slows things down
self.results['case_metrics']['sway_avg'] = np.zeros(nCases)
self.results['case_metrics']['sway_std'] = np.zeros(nCases)
self.results['case_metrics']['sway_max'] = np.zeros(nCases)
self.results['case_metrics']['sway_PSD'] = np.zeros([nCases,self.nw])
self.results['case_metrics']['heave_avg'] = np.zeros(nCases)
self.results['case_metrics']['heave_std'] = np.zeros(nCases)
self.results['case_metrics']['heave_max'] = np.zeros(nCases)
self.results['case_metrics']['heave_PSD'] = np.zeros([nCases,self.nw])
self.results['case_metrics']['roll_avg'] = np.zeros(nCases)
self.results['case_metrics']['roll_std'] = np.zeros(nCases)
self.results['case_metrics']['roll_max'] = np.zeros(nCases)
self.results['case_metrics']['roll_PSD'] = np.zeros([nCases,self.nw])
self.results['case_metrics']['pitch_avg'] = np.zeros(nCases)
self.results['case_metrics']['pitch_std'] = np.zeros(nCases)
self.results['case_metrics']['pitch_max'] = np.zeros(nCases)
self.results['case_metrics']['pitch_PSD'] = np.zeros([nCases,self.nw])
self.results['case_metrics']['yaw_avg'] = np.zeros(nCases)
self.results['case_metrics']['yaw_std'] = np.zeros(nCases)
self.results['case_metrics']['yaw_max'] = np.zeros(nCases)
self.results['case_metrics']['yaw_PSD'] = np.zeros([nCases,self.nw])
# nacelle acceleration
self.results['case_metrics']['AxRNA_avg'] = np.zeros(nCases)
self.results['case_metrics']['AxRNA_std'] = np.zeros(nCases)
self.results['case_metrics']['AxRNA_max'] = np.zeros(nCases)
self.results['case_metrics']['AxRNA_PSD'] = np.zeros([nCases,self.nw])
# tower base bending moment
self.results['case_metrics']['Mbase_avg'] = np.zeros(nCases)
self.results['case_metrics']['Mbase_std'] = np.zeros(nCases)
self.results['case_metrics']['Mbase_max'] = np.zeros(nCases)
self.results['case_metrics']['Mbase_PSD'] = np.zeros([nCases,self.nw])
self.results['case_metrics']['Mbase_DEL'] = np.zeros(nCases)
# rotor speed
self.results['case_metrics']['omega_avg'] = np.zeros(nCases)
self.results['case_metrics']['omega_std'] = np.zeros(nCases)
self.results['case_metrics']['omega_max'] = np.zeros(nCases)
self.results['case_metrics']['omega_PSD'] = np.zeros([nCases,self.nw])
# generator torque
self.results['case_metrics']['torque_avg'] = np.zeros(nCases)
self.results['case_metrics']['torque_std'] = np.zeros(nCases)
self.results['case_metrics']['torque_max'] = np.zeros(nCases)
self.results['case_metrics']['torque_PSD'] = np.zeros([nCases,self.nw])
# rotor power
self.results['case_metrics']['power_avg'] = np.zeros(nCases)
self.results['case_metrics']['power_std'] = np.zeros(nCases)
self.results['case_metrics']['power_max'] = np.zeros(nCases)
self.results['case_metrics']['power_PSD'] = np.zeros([nCases,self.nw])
# collective blade pitch
self.results['case_metrics']['bPitch_avg'] = np.zeros(nCases)
self.results['case_metrics']['bPitch_std'] = np.zeros(nCases)
self.results['case_metrics']['bPitch_max'] = np.zeros(nCases)
self.results['case_metrics']['bPitch_PSD'] = np.zeros([nCases, self.nw])
# mooring tension
self.results['case_metrics']['Tmoor_avg'] = np.zeros([nCases, 2*nLines]) # 2d array, for each line in each case?
self.results['case_metrics']['Tmoor_std'] = np.zeros([nCases, 2*nLines])
self.results['case_metrics']['Tmoor_max'] = np.zeros([nCases, 2*nLines])
self.results['case_metrics']['Tmoor_DEL'] = np.zeros([nCases, 2*nLines])
self.results['case_metrics']['Tmoor_PSD'] = np.zeros([nCases, 2*nLines, self.nw])
# wind and wave spectra for reference
self.results['case_metrics']['wind_PSD'] = np.zeros([nCases, self.nw])
self.results['case_metrics']['wave_PSD'] = np.zeros([nCases, self.nw])
# calculate the system's constant properties
for fowt in self.fowtList:
fowt.calcStatics()
fowt.calcBEM()
# loop through each case
for iCase in range(nCases):
print(f"\n--------------------- Running Case {iCase+1} ----------------------")
print(self.design['cases']['data'][iCase])
# form dictionary of case parameters
case = dict(zip( self.design['cases']['keys'], self.design['cases']['data'][iCase]))
# get initial FOWT values assuming no offset
for fowt in self.fowtList:
fowt.Xi0 = np.zeros(6) # zero platform offsets
fowt.calcTurbineConstants(case, ptfm_pitch=0.0)
fowt.calcHydroConstants(case)
# calculate platform offsets and mooring system equilibrium state
self.calcMooringAndOffsets()
# update values based on offsets if applicable
for fowt in self.fowtList:
fowt.calcTurbineConstants(case, ptfm_pitch=fowt.Xi0[4])
# fowt.calcHydroConstants(case) (hydrodynamics don't account for offset, so far)
# (could solve mooring and offsets a second time, but likely overkill)
# solve system dynamics
self.solveDynamics(case)
# process outputs that are specific to the floating unit
self.fowtList[0].saveTurbineOutputs(self.results['case_metrics'], case, iCase, fowt.Xi0, self.Xi[0:6,:])
# process mooring tension outputs
nLine = int(len(self.T_moor)/2)
T_moor_amps = np.zeros([2*nLine, self.nw], dtype=complex)
for iw in range(self.nw):
T_moor_amps[:,iw] = np.matmul(self.J_moor, self.Xi[:,iw]) # FFT of mooring tensions
self.results['case_metrics']['Tmoor_avg'][iCase,:] = self.T_moor
for iT in range(2*nLine):
TRMS = getRMS(T_moor_amps[iT,:], self.w[0]) # estimated mooring line RMS tension [N]
self.results['case_metrics']['Tmoor_std'][iCase,iT] = TRMS
self.results['case_metrics']['Tmoor_max'][iCase,iT] = self.T_moor[iT] + 3*TRMS
self.results['case_metrics']['Tmoor_PSD'][iCase,iT,:] = getPSD(T_moor_amps[iT,:]) # PSD in N^2/(rad/s)
#self.results['case_metrics']['Tmoor_DEL'][iCase,iT] =
if display > 0:
metrics = self.results['case_metrics']
# print statistics table
print(f"-------------------- Case {iCase+1} Statistics --------------------")
print("Response channel Average RMS Maximum")
print(f"surge (m) {metrics['surge_avg'][iCase] :10.2e} {metrics['surge_std'][iCase] :10.2e} {metrics['surge_max'][iCase] :10.2e}")
print(f"sway (m) {metrics['sway_avg' ][iCase] :10.2e} {metrics['sway_std' ][iCase] :10.2e} {metrics['sway_max' ][iCase] :10.2e}")
print(f"heave (m) {metrics['heave_avg'][iCase] :10.2e} {metrics['heave_std'][iCase] :10.2e} {metrics['heave_max'][iCase] :10.2e}")
print(f"roll (deg) {metrics['roll_avg' ][iCase] :10.2e} {metrics['roll_std' ][iCase] :10.2e} {metrics['roll_max' ][iCase] :10.2e}")
print(f"pitch (deg) {metrics['pitch_avg'][iCase] :10.2e} {metrics['pitch_std'][iCase] :10.2e} {metrics['pitch_max'][iCase] :10.2e}")
print(f"yaw (deg) {metrics[ 'yaw_avg'][iCase] :10.2e} {metrics[ 'yaw_std'][iCase] :10.2e} {metrics['yaw_max' ][iCase] :10.2e}")
print(f"nacelle acc. (m/s) {metrics['AxRNA_avg'][iCase] :10.2e} {metrics['AxRNA_std'][iCase] :10.2e} {metrics['AxRNA_max'][iCase] :10.2e}")
print(f"tower bending (Nm) {metrics['Mbase_avg'][iCase] :10.2e} {metrics['Mbase_std'][iCase] :10.2e} {metrics['Mbase_max'][iCase] :10.2e}")
print(f"rotor speed (RPM) {metrics['omega_avg'][iCase] :10.2e} {metrics['omega_std'][iCase] :10.2e} {metrics['omega_max'][iCase] :10.2e}")
print(f"blade pitch (deg) {metrics['bPitch_avg'][iCase] :10.2e} {metrics['bPitch_std'][iCase] :10.2e} ")
print(f"rotor power {metrics['power_avg'][iCase] :10.2e} ")
for i in range(nLine):
j = i+nLine
#print(f"line {i} tension A {metrics['Tmoor_avg'][iCase,i]:10.2e} {metrics['Tmoor_std'][iCase,i]:10.2e} {metrics['Tmoor_max'][iCase,i]:10.2e}")
print(f"line {i} tension (N) {metrics['Tmoor_avg'][iCase,j]:10.2e} {metrics['Tmoor_std'][iCase,j]:10.2e} {metrics['Tmoor_max'][iCase,j]:10.2e}")
print(f"-----------------------------------------------------------")
"""
def calcSystemConstantProps(self):
'''This gets the various static/constant calculations of each FOWT done. (Those that don't depend on load case.)'''
for fowt in self.fowtList:
fowt.calcBEM()
fowt.calcStatics()
#fowt.calcDynamicConstants()
# First get mooring system characteristics about undisplaced platform position (useful for baseline and verification)
try:
self.C_moor0 = self.ms.getCoupledStiffness(lines_only=True) # this method accounts for eqiuilibrium of free objects in the system
self.F_moor0 = self.ms.getForces(DOFtype="coupled", lines_only=True)
except Exception as e:
raise RuntimeError('An error occured when getting linearized mooring properties in undisplaced state: '+e.message)
self.results['properties'] = {} # signal this data is available by adding a section to the results dictionary
"""
def calcMooringAndOffsets(self):
'''Calculates mean offsets and linearized mooring properties for the current load case.
setEnv and calcSystemProps must be called first. This will ultimately become a method for solving mean operating point.
'''
# apply any mean aerodynamic and hydrodynamic loads
F_PRP = self.fowtList[0].F_aero0# + self.fowtList[0].F_hydro0 <<< hydro load would be nice here eventually
self.ms.bodyList[0].f6Ext = np.array(F_PRP)
# Now find static equilibrium offsets of platform and get mooring properties about that point
# (This assumes some loads have been applied)
#self.ms.display=2
try:
self.ms.solveEquilibrium3(DOFtype="both", tol=0.01) #, rmsTol=1.0E-5) # get the system to its equilibrium
except Exception as e: #mp.MoorPyError
print('An error occured when solving system equilibrium: '+e.message)
#raise RuntimeError('An error occured when solving unloaded equilibrium: '+error.message)
# ::: a loop could be added here for an array :::
fowt = self.fowtList[0]
#print("Equilibrium'3' platform positions/rotations:")
#printVec(self.ms.bodyList[0].r6)
r6eq = self.ms.bodyList[0].r6
fowt.Xi0 = np.array(r6eq) # save current mean offsets for the FOWT
#self.ms.plot()
print(f"Found mean offets with with surge = {r6eq[0]:.2f} m and pitch = {r6eq[4]*180/np.pi:.2f} deg.")
try:
C_moor, J_moor = self.ms.getCoupledStiffness(lines_only=True, tensions=True) # get stiffness matrix and tension jacobian matrix
F_moor = self.ms.getForces(DOFtype="coupled", lines_only=True) # get net forces and moments from mooring lines on Body
T_moor = self.ms.getTensions()
except Exception as e:
raise RuntimeError('An error occured when getting linearized mooring properties in offset state: '+e.message)
# add any additional yaw stiffness that isn't included in the MoorPy model (e.g. if a bridle isn't modeled)
C_moor[5,5] += fowt.yawstiff
self.C_moor = C_moor
self.J_moor = J_moor # jacobian of mooring line tensions w.r.t. coupled DOFs
self.F_moor = F_moor
self.T_moor = T_moor
# store results
self.results['means'] = {} # signal this data is available by adding a section to the results dictionary
self.results['means']['aero force' ] = self.fowtList[0].F_aero0
self.results['means']['platform offset' ] = r6eq
self.results['means']['mooring force' ] = F_moor
self.results['means']['fairlead tensions'] = np.array([np.linalg.norm(self.ms.pointList[id-1].getForces()) for id in self.ms.bodyList[0].attachedP])
def solveEigen(self):
'''finds natural frequencies of system'''
# total system coefficient arrays
M_tot = np.zeros([self.nDOF,self.nDOF]) # total mass and added mass matrix [kg, kg-m, kg-m^2]
C_tot = np.zeros([self.nDOF,self.nDOF]) # total stiffness matrix [N/m, N, N-m]
# add in mooring stiffness from MoorPy system
C_tot += np.array(self.C_moor0)
# ::: a loop could be added here for an array :::
fowt = self.fowtList[0]
# add any additional yaw stiffness that isn't included in the MoorPy model (e.g. if a bridle isn't modeled)
C_tot[5,5] += fowt.yawstiff # will need to be put in calcSystemProps() once there is more than 1 fowt in a model
# add fowt's terms to system matrices (BEM arrays are not yet included here)
M_tot += fowt.M_struc + fowt.A_hydro_morison # mass
C_tot += fowt.C_struc + fowt.C_hydro # stiffness
# check viability of matrices
message=''
for i in range(self.nDOF):
if M_tot[i,i] < 1.0:
message += f'Diagonal entry {i} of system mass matrix is less than 1 ({M_tot[i,i]}). '
if C_tot[i,i] < 1.0:
message += f'Diagonal entry {i} of system stiffness matrix is less than 1 ({C_tot[i,i]}). '
if len(message) > 0:
raise RuntimeError('System matrices computed by RAFT have one or more small or negative diagonals: '+message)
# calculate natural frequencies (using eigen analysis to get proper values for pitch and roll - otherwise would need to base about CG if using diagonal entries only)
eigenvals, eigenvectors = np.linalg.eig(np.matmul(np.linalg.inv(M_tot), C_tot)) # <<< need to sort this out so it gives desired modes, some are currently a bit messy
if any(eigenvals <= 0.0):
raise RuntimeError("Error: zero or negative system eigenvalues detected.")
# sort to normal DOF order based on which DOF is largest in each eigenvector
ind_list = []
for i in range(5,-1, -1):
vec = np.abs(eigenvectors[i,:]) # look at each row (DOF) at a time (use reverse order to pick out rotational DOFs first)
for j in range(6): # now do another loop in case the index was claimed previously
ind = np.argmax(vec) # find the index of the vector with the largest value of the current DOF
if ind in ind_list: # if a previous vector claimed this DOF, set it to zero in this vector so that we look at the other vectors
vec[ind] = 0.0
else:
ind_list.append(ind) # if it hasn't been claimed before, assign this vector to the DOF
break
ind_list.reverse() # reverse the index list since we made it in reverse order
fns = np.sqrt(eigenvals[ind_list])/2.0/np.pi # apply sorting to eigenvalues and convert to natural frequency in Hz
modes = eigenvectors[:,ind_list] # apply sorting to eigenvectors
print("")
print("--------- Natural frequencies and mode shapes -------------")
print("Mode 1 2 3 4 5 6")
print("Fn (Hz)"+"".join([f"{fn:10.4f}" for fn in fns]))
print("")
for i in range(6):
print(f"DOF {i+1} "+"".join([f"{modes[i,j]:10.4f}" for j in range(6)]))
print("-----------------------------------------------------------")
'''
print("natural frequencies from eigen values")
printVec(fns)
print(1/fns)
print("mode shapes from eigen values")
printMat(modes)
# alternative attempt to calculate natural frequencies based on diagonal entries (and taking pitch and roll about CG)
if C_tot[0,0] == 0.0:
zMoorx = 0.0
else:
zMoorx = C_tot[0,4]/C_tot[0,0] # effective z elevation of mooring system reaction forces in x and y directions
if C_tot[1,1] == 0.0:
zMoory = 0.0
else:
zMoory = C_tot[1,3]/C_tot[1,1]
zCG = fowt.rCG_TOT[2] # center of mass in z
zCMx = M_tot[0,4]/M_tot[0,0] # effective z elevation of center of mass and added mass in x and y directions
zCMy = M_tot[1,3]/M_tot[1,1]
print("natural frequencies with added mass")
fn = np.zeros(6)
fn[0] = np.sqrt( C_tot[0,0] / M_tot[0,0] )/ 2.0/np.pi
fn[1] = np.sqrt( C_tot[1,1] / M_tot[1,1] )/ 2.0/np.pi
fn[2] = np.sqrt( C_tot[2,2] / M_tot[2,2] )/ 2.0/np.pi
fn[5] = np.sqrt( C_tot[5,5] / M_tot[5,5] )/ 2.0/np.pi
fn[3] = np.sqrt( (C_tot[3,3] + C_tot[1,1]*((zCMy-zMoory)**2 - zMoory**2) ) / (M_tot[3,3] - M_tot[1,1]*zCMy**2 ))/ 2.0/np.pi # this contains adjustments to reflect rotation about the CG rather than PRP
fn[4] = np.sqrt( (C_tot[4,4] + C_tot[0,0]*((zCMx-zMoorx)**2 - zMoorx**2) ) / (M_tot[4,4] - M_tot[0,0]*zCMx**2 ))/ 2.0/np.pi # this contains adjustments to reflect rotation about the CG rather than PRP
# note that the above lines use off-diagonal term rather than parallel axis theorem since rotation will not be exactly at CG due to effect of added mass
printVec(fn)
print(1/fn)
'''
# store results
self.results['eigen'] = {} # signal this data is available by adding a section to the results dictionary
self.results['eigen']['frequencies'] = fns
self.results['eigen']['modes' ] = modes
def solveDynamics(self, case, tol=0.01, conv_plot=0, RAO_plot=0):
'''After all constant parts have been computed, call this to iterate through remaining terms
until convergence on dynamic response. Note that steady/mean quantities are excluded here.
nIter = 2 # maximum number of iterations to allow
'''
nIter = int(self.nIter) + 1 # maybe think of a better name for the first nIter
XiStart = self.XiStart
# total system complex response amplitudes (this gets updated each iteration)
XiLast = np.zeros([self.nDOF,self.nw], dtype=complex) + XiStart # displacement and rotation complex amplitudes [m, rad]
if conv_plot:
fig, ax = plt.subplots(3,1,sharex=True)
c = np.arange(nIter+1) # adding 1 again here so that there are no RuntimeErrors
c = cm.jet((c- | np.min(c) | numpy.min |
import numpy as np
import math
import abc
from random import randint, uniform
from PyQt5.QtGui import QColor
from PyQt5.QtCore import QPoint, QPointF
from game.individuals.perception import Perception
from game.individuals.desires import Desires
from game.individuals.ability import Ability
from game.individuals.statistic import Statistic
class Individual(metaclass=abc.ABCMeta):
def __init__(self, parent, color, radius=None, position=None):
self.parent = parent
self.perception = None
self.desires = None
self.abilities = None
# config parameter
self.config = self.parent.config
self.individual_config = self.config.individuals
self.predator_config = self.config.predators
self.ability_base = self.config.ability_base
# standard parameter
self.statistic = Statistic()
self.max_health = self.individual_config['max_health']
self.health = self.max_health
self.poison = self.individual_config['start_poison']
self.color = color
self.default_dmg = self.individual_config['default_dmg']
# if a position was not given
if position is None:
_left_border = 0
_right_border = int(self.parent.frame_dimension[0])
_top_border = 0
_bottom_border = int(self.parent.frame_dimension[1])
_x = float(randint(_left_border, _right_border))
_y = float(randint(_top_border, _bottom_border))
self._position = np.array([_x, _y])
else:
self._position = np.array([position[0], position[1]])
# if a radius was given
if not radius:
self.radius = self.individual_config['start_size']
else:
self.radius = radius
# let the individuals run in random directions at beginning
self.acceleration = np.array([uniform(-2,2), uniform(-2,2)])
self.velocity = | np.array([0.0, 0.0]) | numpy.array |
import os
import time
from math import ceil
import MNN
import cv2
import numpy as np
import torch
import utils.box_utils_numpy as box_utils
def predict(width, height, confidences, boxes, prob_threshold, iou_threshold=0.3, top_k=-1):
boxes = boxes[0]
confidences = confidences[0]
picked_box_probs = []
picked_labels = []
for class_index in range(1, confidences.shape[1]):
probs = confidences[:, class_index]
mask = probs > prob_threshold
probs = probs[mask]
if probs.shape[0] == 0:
continue
subset_boxes = boxes[mask, :]
box_probs = np.concatenate([subset_boxes, probs.reshape(-1, 1)], axis=1)
box_probs = box_utils.hard_nms(box_probs,
iou_threshold=iou_threshold,
top_k=top_k,
)
picked_box_probs.append(box_probs)
picked_labels.extend([class_index] * box_probs.shape[0])
if not picked_box_probs:
return np.array([]), np.array([]), np.array([])
picked_box_probs = np.concatenate(picked_box_probs)
picked_box_probs[:, 0] *= width
picked_box_probs[:, 1] *= height
picked_box_probs[:, 2] *= width
picked_box_probs[:, 3] *= height
return picked_box_probs[:, :4].astype(np.int32), | np.array(picked_labels) | numpy.array |
#!/usr/bin/env python
# coding: utf-8
# # Machine Learning using Regularized Logistic Regression
#
# Classifier model that predict whether microchips from a fabrication plant passes quality assurance (QA).
# In[1]:
import pandas as pd
import numpy as np
# import pandas_profiling
import seaborn as sns
import matplotlib.pyplot as plt
# In[2]:
data = pd.read_csv('ex2data2.txt', names = ['test1', 'test2', 'accepted'])
data = data.astype('float128')
print('Profiling Data')
print(data.info())
print(data.head())
# profile = pandas_profiling.ProfileReport(df)
# profile.to_file(outputfile="output.html")
# In[3]:
x = np.array(data[['test1', 'test2']]) # training set
y = np.array(data['accepted']) # labels
[m, n] = np.shape(x)
# ## ==================== Part 1: Plotting ====================
#
# The plot bellow shows that our dataset cannot be separated into positive and negative examples by a straight-line through the plot. Therefore, a straight-forward application of logistic regression will not perform well on this dataset since logistic regression will only be able to find a linear decision boundary.
# In[4]:
print('Plotting data with "x" indicating (y = 1) examples and "o" indicating (y = 0) examples.')
sns.scatterplot('test1', 'test2', hue='accepted', style='accepted', data=data)
# ## =========== Part 2: Mapping Features ============
#
# One way to fit the data better is to create more features from each data point. We will map the features into all polynomial terms of $x_1$ and $x_2$ up to the sixth power.
#
# As a result of this mapping, our vector of two features (the scores on two QA tests) has been transformed into a 28-dimensional vector. A logistic regression classifier trained on this higher-dimension feature vector will have a more complex decision boundary and will appear nonlinear when drawn in our 2-dimensional plot.
#
# While the feature mapping allows us to build a more expressive classifier, it also more susceptible to overfitting. To void that we will implement regularized logistic regression to fit the data and combat the overfitting problem.
# In[5]:
"""
Maps the two input features to quadratic features used in the regularization.
Returns a new feature array with more features, comprising of [1, X1, X2, X1^2, X2^2, X1*X2, X1*X2^2, etc].
Inputs x1, x2 must be the same size.
x1 = [
[],
...
]
x2 = [
[],
...
]
Return [
[1, x1, x2, x1^2, x2^2, x1*x2, x1*x2^2, etc]
...
]
"""
def mapFeature(x1, x2):
assert(len(x1) == len(x2))
degree = 6
out = [[1] for i in range(len(x1))]
for i in range(1, degree + 1):
for j in range(0, i + 1):
for k in range(len(x1)):
out[k].append((x1[k][0] ** (i - j)) * (x2[k][0] ** j))
return np.array(out)
# In[6]:
"""
Normalizes the features in X
returns a normalized version of X where the mean value of each feature is 0 and the standard deviation is 1.
This is often a good preprocessing step to do when working with learning algorithms.
First, for each feature dimension, compute the mean of the feature and subtract it from the dataset,
storing the mean value in mu. Next, compute the standard deviation of each feature and divide
each feature by it's standard deviation, storing the standard deviation in sigma.
"""
def featureNormalize(X):
X_norm = np.zeros(np.shape(X))
qty_features = np.shape(X)[1]
mu = np.zeros(qty_features)
sigma = np.zeros(qty_features)
for i in range(qty_features):
mu[i] = np.mean(X[:,i])
X_norm[:,i] = X[:,i] - mu[i]
# by default np.std calculate the population std, here we want sample std (as done in Octave)
sigma[i] = np.std(X_norm[:,i], ddof=1) # default: ddof=0 (N - 0) will predict 293092.21273075533
X_norm[:,i] = X_norm[:,i] / sigma[i]
return X_norm, mu, sigma
# ## =========== Part 3: Regularized Logistic Regression ============
# ### Activation Function
# We will use the sigmoid function as our activation function.
#
# $g(z) = \frac{1}{1 + e^{-z}}$
#
# When:
#
# $z = 0$ then $g = 0.5$
#
# $z \rightarrow +\infty$ then $g \rightarrow +1$
#
# $z \rightarrow -\infty$ then $g \rightarrow 0$
# In[7]:
def sigmoid(z):
return 1 / (1 + np.exp(-z))
# In[8]:
print(sigmoid(-5)) # ~= 0.0066929
print(sigmoid(0)) # ~= 0.5
print(sigmoid(5)) # ~= 0.99331
print(sigmoid(np.array([4, 5, 6]))) # ~= [0.98201 0.99331 0.99753]
print(sigmoid(np.array([-1, 0, 1]))) # ~= [0.26894 0.50000 0.73106]
print(sigmoid(np.array([[4, 5, 6], [-1, 0, 1]])))
# ### Hypothesis Function
# Function that defines our logistic model.
#
# Definition:
#
# $h_\theta(x) = g(\theta_0 + \theta_1 * x_1 + \theta_2 * x_2)$
#
# Vectorial form:
#
# $h_\theta(x) = g(\theta^{T} * x)$
#
# where:
#
# $g$ is the sigmoid function; $x = [x_0, x_1, x_2]$; $x_0 = 1$ and $\theta = [\theta_0, \theta_1, \theta_2]$
# In[9]:
def hypothesis(X, theta):
z = X.dot(theta)
return sigmoid(z)
# ### Regularized Logistic Cost Function
# Computes the logistic cost of our model using theta as the parameter for logistic regression to fit the data points in X and y, considering the cost of a particular choice of theta.
#
# For parameter $\theta_0$ we aren't computing its cost because it is the bias - doesn't have a feature.
#
# Function cost:
#
# $ J(\theta) = \frac{1}{m} \sum_{i=1}^{m} [ -y^{(i)} log(h_\theta(x^{(i)}) - (1 - y^{(i)}) log(1 - h_\theta(x^{(i)})) ] + \frac{\lambda}{2m} \sum_{j=1}^{n} \theta_j^{2}$
#
# Vectorial form:
#
# $ J(\theta) = [\frac{1}{m} * (-\vec{y}^{T} \cdot log(h_\theta(\vec{x})) - (1 - \vec{y})^{T} \cdot log(1 - h_\theta(\vec{x})))] + \frac{\lambda}{2m} \sum_{j=1}^{n} \theta_j^{2} $
#
# If any time we got a $log(h_\theta(x^{(i)})) = 0$ that means we need to normalize the features.
# In[10]:
"""
Inputs:
X = [
[ x_0, x_1, ..., x_n ]
]
y = [
[ y_0 ]
...
[ y_m ]
]
theta = [ theta_0, ..., theta_n ]
lambda = regularization parameter
"""
def computeRegularizedLogisticCostIterative(X, y, theta, lambd):
m = len(y)
errorSum = 0 # total error
for i in range(m):
h = hypothesis(X[i], theta)
errorSum = errorSum + (-y[i] * np.log(h) - (1 - y[i]) * np.log(1 - h))
# sum the square of parameters cost
parameterCost = 0
for i in range(1, len(theta)):
parameterCost = parameterCost + (theta[i] ** 2)
return errorSum / m + (lambd / (2 * m) * parameterCost)
# Better way using Matrix/Vectors
def computeRegularizedLogisticCostMatrix(X, y, theta, lambd):
m = len(y)
h = hypothesis(X, theta)
return ((1 / m) * (-y.T.dot(np.log(h)) - (1 - y).T.dot(np.log(1 - h)))) + (lambd / (2 * m) * theta[1:].T.dot(theta[1:]))
# In[11]:
print('Testing regularized cost function')
X_ = np.array([
[1, 8, 1, 6],
[1, 3, 5, 7],
[1, 4, 9, 2]
]);
y_ = np.array([1, 0, 1]);
theta_ = np.array([-2, -1, 1, 2]);
print('J ~= 4.6832 ->', computeRegularizedLogisticCostIterative(X_, y_, theta_, 0))
print('J ~= 4.6832 ->', computeRegularizedLogisticCostMatrix(X_, y_, theta_, 0))
print('J ~= 8.6832 ->', computeRegularizedLogisticCostIterative(X_, y_, theta_, 4))
print('J ~= 8.6832 ->', computeRegularizedLogisticCostMatrix(X_, y_, theta_, 4))
# In[12]:
# map the features and also add 1s at x_0
# X, mu, sigma = featureNormalize(x)
X = mapFeature(x[:,0].reshape(m, 1), x[:,1].reshape(m, 1))
n = X.shape[1]
initial_theta = np.zeros([n])
# Set regularization parameter lambda to 1
lambd = 1;
#initial_theta = np.array([0.1, 12.00921659, 11.26284221], dtype=np.float128)
cost = computeRegularizedLogisticCostMatrix(X, y, initial_theta, lambd)
print('Cost at initial theta (zeros):', cost)
print('Expected cost (approx): 0.693')
test_theta = np.ones([X.shape[1]])
cost = computeRegularizedLogisticCostMatrix(X, y, test_theta, 10)
print('\nCost at test theta (with lambda = 10):', cost);
print('Expected cost (approx): 3.16');
# ### Regularized Gradient Descent
# Performs gradient descent to learn $\theta$ parameters.
#
# It return an array with $\theta$ containing the values found by taking num_iters gradient steps with learning rate $\alpha$.
#
# Also it return an array with the history of $J(\theta)$ to be plotted.
#
# Step to update each parameter:
#
# $\theta_j := \theta_j - \alpha * \frac{\partial J}{\partial \theta_j} $
#
# Where:
#
# $\frac{\partial J}{\partial \theta_j} = \frac{1}{m} \sum_{i=1}^{m} [( h_\theta(x^{(i)}) - y^{(i)}) * x^{(i)}] + \frac{\lambda}{m} \theta_j$
#
# Vectorial form:
#
# $ \frac{\partial J}{\partial \theta_j} = \frac{1}{m} X^{T} ( h_\theta(x^{(i)}) - y^{(i)}) + \frac{\lambda}{m} \theta_j $
# In[13]:
def regularizedLogisticGradientDescent(X, y, theta, alpha, lambd, num_iters):
m = len(y)
J_history = np.zeros(num_iters)
for i in range(num_iters):
h_theta = hypothesis(X, theta)
# gradient of our cost function
nabla = ((1 / m) * X.T.dot(h_theta - y))
# regularization
nabla_theta_0 = nabla[0]
nabla = nabla + (lambd / m) * theta
nabla[0] = nabla_theta_0
# print(nabla) # first iteration: [ 0.31722, 0.87232, 1.64812, 2.23787 ]
theta = theta - alpha * nabla
# Save the cost J in every iteration
J_history[i] = computeRegularizedLogisticCostMatrix(X, y, theta, lambd)
return theta, J_history
# In[14]:
# gradient w/o reg: [ 0.31722, 0.87232, 1.64812, 2.23787 ]
# ~= [-2, -1, 1, 2], [4.6831]
print(regularizedLogisticGradientDescent(X_, y_, theta_, 0, 0, 1))
# gradient w/ reg: [ 0.31722, -0.46102, 2.98146, 4.90454 ]
# ~= [-2, -1, 1, 2], [8.6832]
print(regularizedLogisticGradientDescent(X_, y_, theta_, 0, 4, 1))
# In[15]:
# uncomment the line: print(nabla)
theta, J_history = regularizedLogisticGradientDescent(X, y, initial_theta, 0, lambd, 1)
print('Expected gradients (approx) - first five values only:');
print('[ 0.0085, 0.0188, 0.0001, 0.0503, 0.0115 ]\n');
test_theta = np.ones([X.shape[1]])
theta, J_history = regularizedLogisticGradientDescent(X, y, test_theta, 0, 10, 1)
print('Expected gradients (approx) - first five values only:');
print('[ 0.3460, 0.1614, 0.1948, 0.2269, 0.0922 ]');
# In[16]:
num_iters = 50; # with alpha = 0.01 we should rise the # of iterations
alphas = [0.01, 0.03, 0.1, 0.3, 1, 3, 10]
colors = ['b', 'r', 'y', 'black', 'brown', 'gray'];
# In[17]:
# To plot the J(theta) using different alphas
fig, ax = plt.subplots()
iterations = range(num_iters)
print('Running gradient descent ...\n')
for alpha, color in zip(alphas, colors):
theta = | np.zeros([n]) | numpy.zeros |
# THIS LIBRARY CONTATINS THE ALGORITHMS EXPLAINED IN THE WORK
# "Acceleration of Descent-based Optimization Algorithms via Caratheodory's Theorem"
####################################################################################
#
# This library is focused only on the development of the Caratheodory accelerated
# algorithms in the case of least-square with and without Lasso regularization.
#
# In general X represents the data/features, Y the labels and theta_0 the initial
# parameters. It returns theta (the desired argmin) and other variables to reconstruct
# the history of the algorithm.
#
# We can split the functions into three groups:
# - ADAM, SAG
# - BCD algorithm with the Caratheodory Sampling Procedure(CSP).
# The structure of the accelerated functions is this:
# a) the *_CA_* functions is the outer while of the algorithms described in
# the cited work
# b) the *_mod_* functions represent the inner while, i.e. where we use the
# the reduced measure
# c) directions_CA_steps_* functions are necessary for the parallelziation
# of the code
# - BCD w/out the Caratheodory Sampling Procedure.
# The structure of the accelerated functions is this:
# a) mom_BCD_GS_ls, mom_BCD_random_ls, BCD_GS_ls are the outer while of
# the algorithms described in the cited work w/out the CSP
# b) parallel_BCD_mom, parallel_BCD are necessary for the parallelziation
# of the code
#
####################################################################################
import os
import numpy as np
import copy, timeit, psutil
import recombination as rb
from numba import njit, prange
import multiprocessing as mp
###############################################
# ADAM
###############################################
def ADAM_ls(X,Y,theta_0,lambda_LASSO=0.,batch_size=256,
lr=1e-3,loss_accepted=1e-8,max_iter=1e2):
# it runs the ADAM algorithm specialized in the case of least-square
# with a LASSO regularization term.
# Copied from the original paper
tic = timeit.default_timer()
N = np.shape(X)[0]
iteration = 0.
loss = loss_accepted+1.
theta = np.array(theta_0)
# Adam Parameter
beta_1 = 0.9
beta_2 = 0.999
eps = 1e-8
t = 0
m = np.zeros(np.size(theta_0))
v = np.zeros(np.size(theta_0))
m_hat = np.zeros(np.size(theta_0))
v_hat = np.zeros(np.size(theta_0))
loss_story = []
time_story = []
iteration_story = []
n_cycles = int(N/batch_size)
while iteration<=max_iter:
error_persample = np.dot(X,theta)-Y
error_persample = error_persample[np.newaxis].T
loss = np.dot(error_persample.T,error_persample)[0,0]/N
loss += lambda_LASSO * np.abs(theta).sum()
loss_story.append(loss.item())
toc = timeit.default_timer()-tic
time_story.append(toc)
iteration_story.append(iteration)
print("iteration = ", int(iteration+0.5), " | loss = ", loss,
" | time = ",timeit.default_timer()-tic)
idx_shuffled = np.random.choice(N,N, replace=False)
for i in np.arange(n_cycles):
t += 1
idx = idx_shuffled[i*batch_size:i*batch_size+batch_size]
error_persample = np.dot(X[idx],theta)-Y[idx]
error_persample = error_persample[np.newaxis].T
gr = 2*np.matmul(X[idx].T,error_persample)/N
gr += lambda_LASSO * np.sign(theta).reshape(-1,1)
m = beta_1*m + (1-beta_1)*gr[:,0]
v = beta_2*v + (1-beta_2)*np.power(gr[:,0],2)
m_hat = m/(1-beta_1**t)
v_hat = v/(1-beta_2**t)
theta -= lr*m_hat/(np.sqrt(v_hat)+eps)
iteration += 1
error_persample = np.dot(X,theta)-Y
error_persample = error_persample[np.newaxis].T
loss = np.dot(error_persample.T,error_persample)[0,0]/N
loss += lambda_LASSO * np.abs(theta).sum()
loss_story.append(loss.item())
toc = timeit.default_timer()-tic
time_story.append(toc)
iteration_story.append(iteration)
print("iteration = ", int(iteration+0.5), " | loss = ", loss,
" | time = ",timeit.default_timer()-tic)
return (loss_story,iteration_story,theta,time_story)
###############################################
# SAG
# Observation: the leanring rate must be small
# or ''more clever strategy''
###############################################
def SAG_ls(X,Y,theta_0,lambda_LASSO=0.,batch_size=256,
lr=1e-3,loss_accepted=1e-8,max_iter=1e2):
# it runs the SAG algorithm specialized in the case of least-square
# with a LASSO regularization term.
# Copied from the original paper
tic = timeit.default_timer()
N, n = np.shape(X)
iteration = 0.
loss = loss_accepted+1.
theta = np.array(theta_0)
loss_story = []
time_story = []
iteration_story = []
n_cycles = int(N/batch_size)
gr_persample = np.zeros((N,n))
while iteration<=max_iter:
error_persample = np.dot(X,theta)-Y
error_persample = error_persample[np.newaxis].T
loss = np.dot(error_persample.T,error_persample)[0,0]/N
loss += lambda_LASSO * np.abs(theta).sum()
loss_story.append(loss.item())
toc = timeit.default_timer()-tic
time_story.append(toc)
iteration_story.append(iteration)
print("iteration = ", int(iteration+0.5), " | loss = ", loss,
" | time = ",timeit.default_timer()-tic)
idx_shuffled = np.random.choice(N,N, replace=False)
if iteration == 0:
sum_total = 0.
for i in range(n_cycles):
idx = idx_shuffled[i*batch_size:(i+1)*batch_size]
error_persample = np.dot(X[idx],theta)-Y[idx]
error_persample = error_persample[np.newaxis].T
gr_persample[idx,:] = 2*np.multiply(X[idx,:],error_persample)
gr_persample[idx,:] += lambda_LASSO * np.sign(theta)
sum_new_idx = np.sum(gr_persample[idx,:],0)
sum_total += sum_new_idx
theta -= lr * sum_total/((i+1)*batch_size)
else:
for i in range(n_cycles):
idx = idx_shuffled[i*batch_size:i*batch_size+batch_size]
sum_old_idx = np.sum(gr_persample[idx,:],0)
error_persample = np.dot(X[idx],theta)-Y[idx]
error_persample = error_persample[np.newaxis].T
gr_persample[idx,:] = 2*np.multiply(X[idx,:],error_persample)
gr_persample[idx,:] += lambda_LASSO * np.sign(theta)
sum_new_idx = np.sum(gr_persample[idx,:],0)
sum_total = sum_total - sum_old_idx + sum_new_idx
theta -= lr * sum_total/N
iteration += 1
error_persample = np.dot(X,theta)-Y
error_persample = error_persample[np.newaxis].T
loss = np.dot(error_persample.T,error_persample)[0,0]/N
loss += lambda_LASSO * np.abs(theta).sum()
loss_story.append(loss.item())
toc = timeit.default_timer()-tic
time_story.append(toc)
iteration_story.append(iteration)
print("iteration = ", int(iteration+0.5), " | loss = ", loss,
" | time = ",timeit.default_timer()-tic)
return (loss_story,iteration_story,theta,time_story)
###############################################
# Momentum_BCD w/out CaratheodorySP
# GS and Random
###############################################
def mom_BCD_GS_ls(X,Y,theta_0,lambda_LASSO=0.,
lr=1e-3,loss_accepted=1e-8,max_iter=1e2,
size_block=2,percentage_gr = 0.75):
# it runs the BCD with momentum ,
# using mom_CA_BCD_mod and the GS rule for the selection of the blocks
num_cpus = psutil.cpu_count(logical=False)
tic = timeit.default_timer()
N = np.shape(X)[0]
iteration = 0.
loss = loss_accepted+1.
assert np.size(theta_0)>=size_block, "less parameters than size_block, decrease the size block"
# MOMENTUM param
beta = 0.9
v = np.zeros(np.size(theta_0))
theta = np.array(theta_0)
loss_story = []
time_story = []
iteration_story = []
gr1d = np.empty(np.size(theta_0))
max_number_blocks = np.infty #8*num_cpus
to_extract = min(max_number_blocks*size_block,len(theta_0)*percentage_gr)
to_extract -= to_extract % size_block
to_extract = int(to_extract)
while (loss > loss_accepted and iteration < max_iter):
# for i in range(1):
error_persample = np.dot(X,theta)-Y
error_persample = error_persample[np.newaxis].T
if iteration == 0:
loss = np.dot(error_persample.T,error_persample)[0,0]/N
loss += lambda_LASSO * np.abs(theta).sum()
loss_story.append(loss)
toc = timeit.default_timer()-tic
time_story.append(toc)
iteration_story.append(iteration)
print("iteration = ", int(iteration),
" | loss = ", loss,
" | time = ", toc)
gr_persample = 2*np.multiply(X,error_persample)
gr_persample += lambda_LASSO * np.sign(theta)
gr1d = np.mean(gr_persample,0)
v = beta*v - lr*gr1d
# if i == 0:
theta += v
iteration += 1
loss = np.dot(error_persample.T,error_persample)[0,0]/N
loss += lambda_LASSO * np.abs(theta).sum()
loss_story.append(loss)
toc = timeit.default_timer()-tic
time_story.append(toc)
iteration_story.append(iteration)
print("iteration = ", int(iteration),
" | loss = ", loss,
" | time = ", toc)
blocks = building_blocks_cumsum(gr1d,size_block,percentage_gr,max_number_blocks,
"sorted") # sorted, random or balanced
n_blocks = len(blocks)
if loss > loss_accepted:
# start parallel part
manager = mp.Manager()
results = manager.dict()
processes = []
for i in range(n_blocks):
p = mp.Process(target = parallel_BCD_mom,
args = (results,i,X,Y,lambda_LASSO,
blocks[i], # direction_persample[:,blocks[i]],
theta, # theta_tm1,
# gr1d[blocks[i]], # gr1d_tm1[blocks[i]],
# max_iter,
v,
iteration,lr,loss_accepted))
processes.append(p)
p.start()
i = 0
for process in processes:
process.join()
i += 1
# collecting results from the parallel execution
for i in range(n_blocks):
# if results[i][3] != 0:
# continue
# if results[i][0] == 1:
# continue
theta[blocks[i]] = results[i][0][blocks[i]]
v[blocks[i]] = results[i][1][blocks[i]]
iteration += n_blocks #*(len(blocks[i])+1)/N
error_persample = np.dot(X,theta)-Y
error_persample = error_persample[np.newaxis].T
loss = np.dot(error_persample.T,error_persample)[0,0]/N
loss += lambda_LASSO * np.abs(theta).sum()
loss_story.append(loss)
toc = timeit.default_timer()-tic
time_story.append(toc)
iteration_story.append(iteration)
print("iteration = ", int(iteration),
" | loss = ", loss,
" | time = ", toc)
toc = timeit.default_timer()-tic
return loss_story,iteration_story,theta,time_story
def mom_BCD_random_ls(X,Y,theta_0,lambda_LASSO=0.,
lr=1e-3,loss_accepted=1e-8,max_iter=1e2,
size_block=2,percentage_gr = 0.75):
# it runs the BCD with momentum ,
# using parallel_BCD_mom and the random rule for the selection of the blocks
num_cpus = psutil.cpu_count(logical=False)
tic = timeit.default_timer()
N = np.shape(X)[0]
iteration = 0.
loss = loss_accepted+1.
assert np.size(theta_0)>=size_block, "less parameters than size_block, decrease the size block"
# MOMENTUM param
beta = 0.9
v = np.zeros(np.size(theta_0))
theta = np.array(theta_0)
loss_story = []
time_story = []
iteration_story = []
gr1d = np.empty(np.size(theta_0))
max_number_blocks = np.infty #8*num_cpus
to_extract = min(max_number_blocks*size_block,len(theta_0)*percentage_gr)
to_extract -= to_extract % size_block
to_extract = int(to_extract)
while (loss > loss_accepted and iteration < max_iter):
# for i in range(1):
error_persample = np.dot(X,theta)-Y
error_persample = error_persample[np.newaxis].T
if iteration == 0:
loss = np.dot(error_persample.T,error_persample)[0,0]/N
loss += lambda_LASSO * np.abs(theta).sum()
loss_story.append(loss)
toc = timeit.default_timer()-tic
time_story.append(toc)
iteration_story.append(iteration)
print("iteration = ", int(iteration),
" | loss = ", loss,
" | time = ", toc)
# gr_persample = 2*np.multiply(X,error_persample)
# gr_persample += lambda_LASSO * np.sign(theta)
# gr1d = np.mean(gr_persample,0)
# v = beta*v - lr*gr1d
# # if i == 0:
# theta += v
# iteration += 1
loss = np.dot(error_persample.T,error_persample)[0,0]/N
loss += lambda_LASSO * np.abs(theta).sum()
loss_story.append(loss)
toc = timeit.default_timer()-tic
time_story.append(toc)
iteration_story.append(iteration)
print("iteration = ", int(iteration),
" | loss = ", loss,
" | time = ", toc)
blocks = np.random.choice(len(theta),to_extract,replace = False)
n_blocks = len(blocks)
if loss > loss_accepted:
# start parallel part
manager = mp.Manager()
results = manager.dict()
processes = []
for i in range(n_blocks):
p = mp.Process(target = parallel_BCD_mom,
args = (results,i,X,Y,lambda_LASSO,
blocks[i], # direction_persample[:,blocks[i]],
theta, # theta_tm1,
# gr1d[blocks[i]], # gr1d_tm1[blocks[i]],
# max_iter,
v,
iteration,lr,loss_accepted))
processes.append(p)
p.start()
i = 0
for process in processes:
process.join()
i += 1
# collecting results from the parallel execution
for i in range(n_blocks):
# if results[i][3] != 0:
# continue
# if results[i][0] == 1:
# continue
theta[blocks[i]] = results[i][0][blocks[i]]
v[blocks[i]] = results[i][1][blocks[i]]
iteration += n_blocks #*(len(blocks[i])+1)/N
error_persample = np.dot(X,theta)-Y
error_persample = error_persample[np.newaxis].T
loss = np.dot(error_persample.T,error_persample)[0,0]/N
loss += lambda_LASSO * np.abs(theta).sum()
loss_story.append(loss)
toc = timeit.default_timer()-tic
time_story.append(toc)
iteration_story.append(iteration)
print("iteration = ", int(iteration),
" | loss = ", loss,
" | time = ", toc)
toc = timeit.default_timer()-tic
return loss_story,iteration_story,theta,time_story
def parallel_BCD_mom(results,proc_numb,X,Y,lambda_LASSO,
block, # direction_persample[:,blocks[i]],
theta, # theta_tm1,
# gr1d[blocks[i]], # gr1d_tm1[blocks[i]],
# max_iter,
v,
iteration,lr,loss_accepted):
beta = 0.9
error_persample = np.dot(X,theta)-Y
error_persample = error_persample[np.newaxis].T
gr_persample = 2*np.multiply(X,error_persample)
gr_persample += lambda_LASSO * | np.sign(theta) | numpy.sign |
# Copyright (c) Facebook, Inc. and its affiliates.
# Copyright (c) 2020, <NAME> (@e-bug).
# This source code is licensed under the MIT license found in the
# LICENSE file in the root directory of this source tree.
import os
import sys
import random
import logging
import jsonlines
import _pickle as cPickle
import base64
import numpy as np
import tensorpack.dataflow as td
import torch
from torch.nn import functional as F
from torch.utils.data import Dataset
from ._image_features_reader import ImageFeaturesH5Reader
import msgpack_numpy
msgpack_numpy.patch()
MAX_MSGPACK_LEN = 1000000000
logger = logging.getLogger(__name__)
os.environ["HDF5_USE_FILE_LOCKING"] = "FALSE"
def assert_eq(real, expected):
assert real == expected, "%s (true) vs %s (expected)" % (real, expected)
def _load_annotations(annotations_jsonpath, task):
with jsonlines.open(annotations_jsonpath) as reader:
# Build an index which maps image id with a list of caption annotations.
entries = []
imgid2entry = {}
for annotation in reader:
if task == "RetrievalCOCO":
image_id = annotation["id"]
elif task == "RetrievalFlickr30k": # or task.startswith("RetrievalMulti30k"):
image_id = int(annotation["img_path"].split(".")[0])
imgid2entry[image_id] = []
count = 0
for sentences in annotation["sentences"]:
entries.append({"caption": sentences, "image_id": image_id})
imgid2entry[image_id].append(count)
count += 1
return entries, imgid2entry
class RetrievalDataset(Dataset):
def __init__(
self,
task: str,
dataroot: str,
annotations_jsonpath: str,
split: str,
image_features_reader: ImageFeaturesH5Reader,
gt_image_features_reader: ImageFeaturesH5Reader,
tokenizer,
bert_model,
padding_index: int = 0,
max_seq_length: int = 20,
max_region_num: int = 36,
num_locs=5,
add_global_imgfeat=None,
append_mask_sep=False,
):
# All the keys in `self._entries` would be present in `self._image_features_reader`
self._entries, self.imgid2entry = _load_annotations(annotations_jsonpath, task)
self.image_id_list = [*self.imgid2entry]
self._image_features_reader = image_features_reader
self._tokenizer = tokenizer
self.num_labels = 1
self._split = split
self._padding_index = padding_index
self._max_region_num = max_region_num + int(add_global_imgfeat is not None)
self._max_seq_length = max_seq_length
self._num_locs = num_locs
self._add_global_imgfeat = add_global_imgfeat
if self._split == "train":
image_info = cPickle.load(open(os.path.join(dataroot, "hard_negative" + ".pkl"), "rb"))
for key, value in image_info.items():
setattr(self, key, value)
self.train_imgId2pool = {imageId: i for i, imageId in enumerate(self.train_image_list)}
os.makedirs(os.path.join("/".join(annotations_jsonpath.split("/")[:-1]), "cache"), exist_ok=True)
cache_path = os.path.join(
"/".join(annotations_jsonpath.split("/")[:-1]), "cache",
task
+ "_"
+ split
+ "_"
+ bert_model.split("/")[-1]
+ "_"
+ str(max_seq_length)
+ ".pkl",
)
if not os.path.exists(cache_path):
self.tokenize()
self.tensorize()
cPickle.dump(self._entries, open(cache_path, "wb"))
else:
print("loading entries from %s" % cache_path)
self._entries = cPickle.load(open(cache_path, "rb"))
def tokenize(self):
"""Tokenizes the captions.
This will add caption_tokens in each entry of the dataset.
-1 represents nil, and should be treated as padding_idx in embedding.
"""
for entry in self._entries:
tokens = self._tokenizer.encode(entry["caption"])
tokens = [tokens[0]] + tokens[1:-1][: self._max_seq_length - 2] + [tokens[-1]]
segment_ids = [0] * len(tokens)
input_mask = [1] * len(tokens)
if len(tokens) < self._max_seq_length:
# Note here we pad in front of the sentence
padding = [self._padding_index] * (self._max_seq_length - len(tokens))
tokens = tokens + padding
input_mask += [0] * len(padding)
segment_ids += [0] * len(padding)
assert_eq(len(tokens), self._max_seq_length)
entry["token"] = tokens
entry["input_mask"] = input_mask
entry["segment_ids"] = segment_ids
def tensorize(self):
for entry in self._entries:
token = torch.from_numpy(np.array(entry["token"]))
entry["token"] = token
input_mask = torch.from_numpy(np.array(entry["input_mask"]))
entry["input_mask"] = input_mask
segment_ids = torch.from_numpy(np.array(entry["segment_ids"]))
entry["segment_ids"] = segment_ids
def __getitem__(self, index):
entry = self._entries[index]
image_id = entry["image_id"]
features, num_boxes, boxes, _ = self._image_features_reader[image_id]
mix_num_boxes = min(int(num_boxes), self._max_region_num)
mix_boxes_pad = np.zeros((self._max_region_num, self._num_locs))
mix_features_pad = np.zeros((self._max_region_num, 2048))
image_mask = [1] * (int(mix_num_boxes))
while len(image_mask) < self._max_region_num:
image_mask.append(0)
mix_boxes_pad[:mix_num_boxes] = boxes[:mix_num_boxes]
mix_features_pad[:mix_num_boxes] = features[:mix_num_boxes]
features1 = torch.tensor(mix_features_pad).float()
image_mask1 = torch.tensor(image_mask).long()
spatials1 = torch.tensor(mix_boxes_pad).float()
caption1 = entry["token"]
input_mask1 = entry["input_mask"]
segment_ids1 = entry["segment_ids"]
# negative samples.
# 1: correct one, 2: random caption wrong, 3: random image wrong. 4: hard image wrong.
while True:
# sample a random image:
img_id2 = random.choice(self.image_id_list)
if img_id2 != image_id:
entry2 = self._entries[random.choice(self.imgid2entry[img_id2])]
break
elif len(self.image_id_list) == 1:
tokens = self._tokenizer.encode("[MASK]")
segment_ids = [0] * len(tokens)
input_mask = [1] * len(tokens)
padding = [self._padding_index] * (self._max_seq_length - len(tokens))
tokens = torch.from_numpy(np.array(tokens + padding))
input_mask = torch.from_numpy(np.array(input_mask + [0]*len(padding)))
segment_ids = torch.from_numpy(np.array(segment_ids + [0]*len(padding)))
entry2 = {"token": tokens, "input_mask": input_mask, "segment_ids": segment_ids}
break
features2 = features1
image_mask2 = image_mask1
spatials2 = spatials1
caption2 = entry2["token"]
input_mask2 = entry2["input_mask"]
segment_ids2 = entry2["segment_ids"]
# random image wrong
while True:
# sample a random image:
img_id3 = random.choice(self.image_id_list)
if img_id3 != image_id:
break
elif len(self.image_id_list) == 1:
img_id3 = random.choice(self._image_features_reader._image_ids).decode()
break
features3, num_boxes3, boxes3, _ = self._image_features_reader[img_id3]
image_mask3 = [1] * (int(num_boxes3))
mix_num_boxes3 = min(int(num_boxes3), self._max_region_num)
while len(image_mask3) < self._max_region_num:
image_mask3.append(0)
mix_boxes_pad[:mix_num_boxes3] = boxes3[:mix_num_boxes3]
mix_features_pad[:mix_num_boxes3] = features3[:mix_num_boxes3]
features3 = torch.tensor(mix_features_pad).float()
image_mask3 = torch.tensor(image_mask3).long()
spatials3 = torch.tensor(mix_boxes_pad).float()
caption3 = caption1
input_mask3 = input_mask1
segment_ids3 = segment_ids1
if self._split == "train":
# random hard caption.
rand_img_id_pool = self.train_hard_pool[self.train_imgId2pool[image_id]]
pool_img_idx = int(rand_img_id_pool[np.random.randint(1, len(rand_img_id_pool))])
img_id4 = self.train_image_list[pool_img_idx]
entry4 = self._entries[random.choice(self.imgid2entry[img_id4])]
else:
while True:
# sample a random image:
img_id4 = random.choice(self.image_id_list)
if img_id4 != image_id:
entry4 = self._entries[random.choice(self.imgid2entry[img_id4])]
break
elif len(self.image_id_list) == 1:
tokens = self._tokenizer.encode("[MASK]")
segment_ids = [0] * len(tokens)
input_mask = [1] * len(tokens)
padding = [self._padding_index] * (self._max_seq_length - len(tokens))
tokens = torch.from_numpy(np.array(tokens + padding))
input_mask = torch.from_numpy(np.array(input_mask + [0]*len(padding)))
segment_ids = torch.from_numpy(np.array(segment_ids + [0]*len(padding)))
entry4 = {"token": tokens, "input_mask": input_mask, "segment_ids": segment_ids}
break
features4 = features1
image_mask4 = image_mask1
spatials4 = spatials1
caption4 = entry4["token"]
input_mask4 = entry4["input_mask"]
segment_ids4 = entry4["segment_ids"]
features = torch.stack([features1, features2, features3, features4], dim=0)
spatials = torch.stack([spatials1, spatials2, spatials3, spatials4], dim=0)
image_mask = torch.stack([image_mask1, image_mask2, image_mask3, image_mask4], dim=0)
caption = torch.stack([caption1, caption2, caption3, caption4], dim=0)
input_mask = torch.stack([input_mask1, input_mask2, input_mask3, input_mask4], dim=0)
segment_ids = torch.stack([segment_ids1, segment_ids2, segment_ids3, segment_ids4], dim=0)
target = 0
return features, spatials, image_mask, caption, target, input_mask, segment_ids, image_id, index
def __len__(self):
return len(self._entries)
def _load_annotationsVal(annotations_jsonpath, task):
with jsonlines.open(annotations_jsonpath) as reader:
# Build an index which maps image id with a list of caption annotations.
image_entries = {}
caption_entries = []
for annotation in reader:
if task == "RetrievalCOCO":
image_id = annotation["id"]
elif task == "RetrievalFlickr30k":
image_id = int(annotation["img_path"].split(".")[0])
elif task == "RetrievalxFlickrCO":
image_id = annotation["img_path"]
elif task == "RetrievalWIT":
image_id = annotation["wit_ix"]
image_entries[image_id] = 1
if task == "RetrievalWIT":
caption_entries.append({"caption": annotation["caption_reference_description"], "image_id": image_id})
else:
for sentences in annotation["sentences"]:
caption_entries.append({"caption": sentences, "image_id": image_id})
image_entries = [*image_entries]
return image_entries, caption_entries
class RetrievalDatasetVal(Dataset):
def __init__(
self,
task: str,
dataroot: str,
annotations_jsonpath: str,
split: str,
image_features_reader: ImageFeaturesH5Reader,
gt_image_features_reader: ImageFeaturesH5Reader,
tokenizer,
bert_model,
padding_index: int = 0,
max_seq_length: int = 20,
max_region_num: int = 36,
num_locs=5,
add_global_imgfeat=None,
append_mask_sep=False,
num_subiters=2,
):
# All the keys in `self._entries` would be present in `self._image_features_reader`
self._image_entries, self._caption_entries = _load_annotationsVal(annotations_jsonpath, task)
self._image_features_reader = image_features_reader
self._tokenizer = tokenizer
self._split = split
self._padding_index = padding_index
self._max_region_num = max_region_num + int(add_global_imgfeat is not None)
self._max_seq_length = max_seq_length
self._num_locs = num_locs
self._add_global_imgfeat = add_global_imgfeat
self.num_labels = 1
self.num_subiters = num_subiters
self.num_images = len(self._image_entries)
self.num_entries = len(self._caption_entries)
self.max_num_images = self.num_images // self.num_subiters + int(self.num_images % self.num_subiters > 0)
os.makedirs(os.path.join("/".join(annotations_jsonpath.split("/")[:-1]), "cache"), exist_ok=True)
cache_path = os.path.join(
"/".join(annotations_jsonpath.split("/")[:-1]),
"cache",
task
+ "_"
+ split
+ "_"
+ bert_model.split("/")[-1]
+ "_"
+ str(max_seq_length)
+ ".pkl",
)
if not os.path.exists(cache_path):
self.tokenize()
self.tensorize()
cPickle.dump(self._caption_entries, open(cache_path, "wb"))
else:
print("loading entries from %s" % cache_path)
self._caption_entries = cPickle.load(open(cache_path, "rb"))
self.features_all = np.zeros((len(self._image_entries), self._max_region_num, 2048))
self.spatials_all = np.zeros((len(self._image_entries), self._max_region_num, self._num_locs))
self.image_mask_all = np.zeros((len(self._image_entries), self._max_region_num))
for i, image_id in enumerate(self._image_entries):
features, num_boxes, boxes, _ = self._image_features_reader[image_id]
mix_num_boxes = min(int(num_boxes), self._max_region_num)
mix_boxes_pad = np.zeros((self._max_region_num, self._num_locs))
mix_features_pad = np.zeros((self._max_region_num, 2048))
image_mask = [1] * (int(mix_num_boxes))
while len(image_mask) < self._max_region_num:
image_mask.append(0)
mix_boxes_pad[:mix_num_boxes] = boxes[:mix_num_boxes]
mix_features_pad[:mix_num_boxes] = features[:mix_num_boxes]
self.features_all[i] = mix_features_pad
self.image_mask_all[i] = np.array(image_mask)
self.spatials_all[i] = mix_boxes_pad
sys.stdout.write("%d/%d\r" % (i, len(self._image_entries)))
sys.stdout.flush()
self.features_all = torch.Tensor(self.features_all).float()
self.image_mask_all = torch.Tensor(self.image_mask_all).long()
self.spatials_all = torch.Tensor(self.spatials_all).float()
def tokenize(self):
"""Tokenizes the captions.
This will add caption_tokens in each entry of the dataset.
-1 represents nil, and should be treated as padding_idx in embedding.
"""
for entry in self._caption_entries:
tokens = self._tokenizer.encode(entry["caption"])
tokens = [tokens[0]] + tokens[1:-1][: self._max_seq_length - 2] + [tokens[-1]]
segment_ids = [0] * len(tokens)
input_mask = [1] * len(tokens)
if len(tokens) < self._max_seq_length:
# Note here we pad in front of the sentence
padding = [self._padding_index] * (self._max_seq_length - len(tokens))
tokens = tokens + padding
input_mask += [0] * len(padding)
segment_ids += [0] * len(padding)
assert_eq(len(tokens), self._max_seq_length)
entry["token"] = tokens
entry["input_mask"] = input_mask
entry["segment_ids"] = segment_ids
def tensorize(self):
for entry in self._caption_entries:
token = torch.from_numpy(np.array(entry["token"])).long()
entry["token"] = token
input_mask = torch.from_numpy(np.array(entry["input_mask"]))
entry["input_mask"] = input_mask
segment_ids = torch.from_numpy(np.array(entry["segment_ids"])).long()
entry["segment_ids"] = segment_ids
def __getitem__(self, index):
# we iterate through every caption here.
caption_idx = int(index / self.num_subiters)
image_idx = index % self.num_subiters
image_entries = self._image_entries[self.max_num_images * (image_idx):self.max_num_images * (image_idx + 1)]
features_all = self.features_all[self.max_num_images * (image_idx):self.max_num_images * (image_idx + 1)]
spatials_all = self.spatials_all[self.max_num_images * (image_idx):self.max_num_images * (image_idx + 1)]
image_mask_all = self.image_mask_all[self.max_num_images * (image_idx):self.max_num_images * (image_idx + 1)]
entry = self._caption_entries[caption_idx]
caption = entry["token"]
input_mask = entry["input_mask"]
segment_ids = entry["segment_ids"]
target_all = torch.zeros(len(image_entries))
for i, image_id in enumerate(image_entries):
if image_id == entry["image_id"]:
target_all[i] = 1
return (
features_all,
spatials_all,
image_mask_all,
caption,
input_mask,
segment_ids,
target_all,
caption_idx,
image_idx,
)
def __len__(self):
return len(self._caption_entries) * self.num_subiters
class RetrievalLoader(object):
def __init__(
self,
task: str,
dataroot: str,
annotations_jsonpath: str,
split: str,
image_features_reader, # features_path
gt_image_features_reader,
tokenizer,
bert_model,
padding_index: int = 0,
max_seq_length: int = 16, # seq_len,
max_region_num: int = 36, # reg_len,
num_locs=5,
add_global_imgfeat=None,
append_mask_sep=False,
norm_embeddings=False,
batch_size=512,
num_workers=25,
cache=10000,
):
self.split = split
self._max_region_num = max_region_num
self._max_seq_length = max_seq_length
self._image_features_reader = image_features_reader
self._tokenizer = tokenizer
self._padding_index = padding_index
self._norm_embeddings = norm_embeddings
lmdb_file = image_features_reader
print("Loading from %s" % lmdb_file)
ds = td.LMDBSerializer.load(lmdb_file, shuffle=False)
self.num_dataset = len(ds)
if split == "train":
ds = td.LocallyShuffleData(ds, cache)
caption_path = annotations_jsonpath
preprocess_function = BertPreprocessBatch(
caption_path,
tokenizer,
bert_model,
max_seq_length,
max_region_num,
self.num_dataset,
num_locs=num_locs,
padding_index=padding_index,
norm_embeddings=self._norm_embeddings,
)
if split == "train":
ds = td.PrefetchData(ds, cache, 1)
ds = td.MapData(ds, preprocess_function)
if split == "train":
ds = td.PrefetchDataZMQ(ds, num_workers)
self.ds = td.BatchData(ds, batch_size)
self.ds.reset_state()
self.batch_size = batch_size
self.num_workers = num_workers
self.add_global_imgfeat = add_global_imgfeat
self.num_locs = num_locs
def __iter__(self):
for ix, batch in enumerate(self.ds.get_data()):
image_feats, image_locs, image_masks, \
input_ids1s, input_mask1s, segment_ids1s, \
input_ids3s, input_mask3s, segment_ids3s, \
input_ids4s, input_mask4s, segment_ids4s, \
image_ids = batch
batch_size = input_ids1s.shape[0]
if self.add_global_imgfeat == "first":
sum_count = np.sum(image_masks == 1, axis=1, keepdims=True)
g_image_feats = np.sum(image_feats, axis=1) / sum_count
image_feats = np.concatenate([np.expand_dims(g_image_feats, axis=1), image_feats], axis=1)
image_feats = | np.array(image_feats, dtype=np.float32) | numpy.array |
#######################################################
# ---------- Network Propagation Functions ---------- #
#######################################################
import networkx as nx
import time
import numpy as np
import pandas as pd
# Normalize network (or network subgraph) for random walk propagation
# If symmetric norm is used then the adjacency matrix is normalized as D^-0.5 * A * D^-0.5
# Otherwise the network is normalized as A*D-1
# Where D is the diagonalized degree (default is colsum) of the adjacency matrix A
def normalize_network(network, symmetric_norm=False):
adj_mat = nx.adjacency_matrix(network)
adj_array = np.array(adj_mat.todense())
if symmetric_norm:
D = np.diag(1/np.sqrt(sum(adj_array)))
adj_array_norm = np.dot(np.dot(D, adj_array), D)
else:
degree = sum(adj_array)
adj_array_norm = (adj_array*1.0/degree).T
return adj_array_norm
# Closed form random-walk propagation (as seen in HotNet2) for each subgraph: Ft = (1-alpha)*Fo * (I-alpha*norm_adj_mat)^-1
# Concatenate to previous set of subgraphs
def fast_random_walk(alpha, binary_mat, subgraph_norm, prop_data_prev):
term1=(1-alpha)*binary_mat
term2=np.identity(binary_mat.shape[1])-alpha*subgraph_norm
term2_inv = np.linalg.inv(term2)
subgraph_prop = np.dot(term1, term2_inv)
prop_data_add = np.concatenate((prop_data_prev, subgraph_prop), axis=1)
return prop_data_add
# Wrapper for random walk propagation of full network by subgraphs
# Implementation is based on the closed form of the random walk model over networks presented by the HotNet2 paper
def network_propagation(network, binary_matrix, alpha=0.7, symmetric_norm=False, verbose=True, **save_args):
# Parameter error check
alpha = float(alpha)
if alpha <= 0.0 or alpha >= 1.0:
raise ValueError('Alpha must be a value between 0 and 1')
# Begin network propagation
starttime=time.time()
if verbose:
print('Performing network propagation with alpha:', alpha)
# Separate network into connected components and calculate propagation values of each sub-sample on each connected component
subgraphs = list(nx.connected_component_subgraphs(network))
# Initialize propagation results by propagating first subgraph
subgraph = subgraphs[0]
subgraph_nodes = list(subgraph.nodes)
prop_data_node_order = list(subgraph_nodes)
binary_matrix_filt = np.array(binary_matrix.T.ix[subgraph_nodes].fillna(0).T)
subgraph_norm = normalize_network(subgraph, symmetric_norm=symmetric_norm)
prop_data_empty = np.zeros((binary_matrix_filt.shape[0], 1))
prop_data = fast_random_walk(alpha, binary_matrix_filt, subgraph_norm, prop_data_empty)
# Get propagated results for remaining subgraphs
for subgraph in subgraphs[1:]:
subgraph_nodes = list(subgraph.nodes)
prop_data_node_order = prop_data_node_order + subgraph_nodes
binary_matrix_filt = np.array(binary_matrix.T.ix[subgraph_nodes].fillna(0).T)
subgraph_norm = normalize_network(subgraph, symmetric_norm=symmetric_norm)
prop_data = fast_random_walk(alpha, binary_matrix_filt, subgraph_norm, prop_data)
# Return propagated result as dataframe
prop_data_df = pd.DataFrame(data=prop_data[:,1:], index = binary_matrix.index, columns=prop_data_node_order)
# Saving the propagation result
if 'outdir' in save_args:
if 'job_name' in save_args:
if 'iteration_label' in save_args:
save_path = save_args['outdir']+str(save_args['job_name'])+'_prop_'+str(save_args['iteration_label'])+'.csv'
else:
save_path = save_args['outdir']+str(save_args['job_name'])+'_prop.csv'
else:
if 'iteration_label' in save_args:
save_path = save_args['outdir']+'prop_'+str(save_args['iteration_label'])+'.csv'
else:
save_path = save_args['outdir']+'prop.csv'
prop_data_df.to_csv(save_path)
if verbose:
print('Network Propagation Result Saved:', save_path)
else:
pass
if verbose:
print('Network Propagation Complete:', time.time()-starttime, 'seconds')
return prop_data_df
# Wrapper for propagating binary mutation matrix over network by subgraph given network propagation kernel
# The network propagation kernel can be pre-computed using the network_propagation function and a identity matrix data frame of the network
# Pre-calculating the kernel for many runs of NBS saves a significant amount of time
def network_kernel_propagation(network, network_kernel, binary_matrix, verbose=False, **save_args):
starttime=time.time()
if verbose:
print('Performing network propagation with network kernel')
# Separate network into connected components and calculate propagation values of each sub-sample on each connected component
subgraph_nodelists = list(nx.connected_components(network))
# Initialize propagation results by propagating first subgraph
prop_nodelist = list(subgraph_nodelists[0])
prop_data = np.dot(binary_matrix.T.ix[prop_nodelist].fillna(0).T,
network_kernel.ix[prop_nodelist][prop_nodelist])
# Get propagated results for remaining subgraphs
for nodelist in subgraph_nodelists[1:]:
subgraph_nodes = list(nodelist)
prop_nodelist = prop_nodelist + subgraph_nodes
subgraph_prop_data = np.dot(binary_matrix.T.ix[subgraph_nodes].fillna(0).T,
network_kernel.ix[subgraph_nodes][subgraph_nodes])
prop_data = | np.concatenate((prop_data, subgraph_prop_data), axis=1) | numpy.concatenate |
#!/usr/bin/env python
# coding: utf-8
# Solve differential flatness and check feasibility of control command
# Use NED coordinate
import os, sys, time, copy, yaml
import numpy as np
from .utils import *
# import cupy as cp
class QuadModel:
def __init__(self, cfg_path=None, drone_model=None):
if cfg_path == None:
curr_path = os.path.dirname(os.path.abspath(__file__))
cfg_path = curr_path+"/../config/multicopter_model.yaml"
if drone_model == None:
drone_model="default"
with open(cfg_path, 'r') as stream:
try:
cfg = yaml.safe_load(stream)
self.thrustCoef = np.double(cfg['motor_model']['thrust_coefficient'])
self.torqueCoef = np.double(cfg['motor_model']['torque_coefficient'])
self.armLength = np.double(cfg['motor_model']['moment_arm'])
self.mass = np.double(cfg['uav_model'][drone_model]['vehicle_mass'])
self.Ixx = np.double(cfg['uav_model'][drone_model]['vehicle_inertia_xx'])
self.Iyy = np.double(cfg['uav_model'][drone_model]['vehicle_inertia_yy'])
self.Izz = np.double(cfg['uav_model'][drone_model]['vehicle_inertia_zz'])
self.w_max = np.double(cfg['motor_model']['max_prop_speed'])
self.w_min = np.double(cfg['motor_model']['min_prop_speed'])
self.gravity = np.double(cfg['simulation']['gravity'])
self.w_sta = np.sqrt(self.mass*self.gravity/self.thrustCoef/4.0)
except yaml.YAMLError as exc:
print(exc)
lt = self.armLength*self.thrustCoef
k0 = self.torqueCoef
k1 = self.thrustCoef
self.G1 = np.array([[lt,-lt,-lt,lt],\
[lt,lt,-lt,-lt],\
[-k0,k0,-k0,k0],\
[-k1,-k1,-k1,-k1]])
self.J = np.diag(np.array([self.Ixx,self.Iyy,self.Izz]))
return
def getWs(self, status):
pos = np.array(status[0:3])
vel = np.array(status[3:6])
acc = np.array(status[6:9])
jer = np.array(status[9:12])
sna = np.array(status[12:15])
yaw = status[15]
dyaw = status[16]
ddyaw = status[17]
# Total thrust
tau_v = acc - np.array([0,0,self.gravity])
tau = -np.linalg.norm(tau_v)
bz = tau_v/tau
Thrust = self.mass*tau
# roll & pitch
roll = np.arcsin(np.dot(bz,[np.sin(yaw),-np.cos(yaw),0]))
pitch = np.arctan(np.dot(bz,[np.cos(yaw),np.sin(yaw),0])/bz[2])
bx = np.array([np.cos(yaw)*np.cos(pitch),np.sin(yaw)*np.cos(pitch),-np.sin(pitch)])
by = np.array([-np.sin(yaw)*np.cos(roll)+np.cos(yaw)*np.sin(pitch)*np.sin(roll),\
np.cos(yaw)*np.cos(roll)+np.sin(yaw)*np.sin(pitch)*np.sin(roll),\
np.cos(pitch)*np.sin(roll)])
# dzhi & Omega
dzhi = np.dot(np.array([-1*by,bx/np.cos(roll),np.zeros(3)]),jer)/tau \
+np.array([np.sin(pitch),-np.cos(pitch)*np.tan(roll),1])*dyaw
S_inv = np.array([[1,0,-np.sin(pitch)],\
[0,np.cos(roll),np.cos(pitch)*np.sin(roll)],\
[0,-np.sin(roll),np.cos(pitch)*np.cos(roll)]])
Omega = np.dot(S_inv,dzhi)
C_inv = np.array([-1*by/tau,bx/np.cos(roll)/tau,bz])
d = np.array([np.cos(yaw)*np.sin(roll)-np.cos(roll)*np.sin(yaw)*np.sin(pitch),\
np.sin(yaw)*np.sin(roll)+np.cos(roll)*np.cos(yaw)*np.sin(pitch),0])*tau
dtau = np.dot(bz,jer-dyaw*d)
# ddzhi & dOmega
dS = np.array([[0,np.cos(roll)*np.tan(pitch),-np.sin(roll)*np.tan(pitch)],\
[0,-np.sin(roll),-np.cos(roll)],\
[0,np.cos(roll)/np.cos(pitch),-np.sin(roll)/np.cos(pitch)]])*dzhi[0]\
+np.array([[0,np.sin(roll)/np.cos(pitch)/np.cos(pitch),np.cos(roll)/np.cos(pitch)/np.cos(pitch)],\
[0,0,0],\
[0,np.sin(roll)*np.tan(pitch)/np.cos(pitch),np.cos(roll)*np.tan(pitch)/np.cos(pitch)]])*dzhi[1]
e = 2*dtau*np.dot(np.array([-1*by,bx,0]).T,Omega)\
+tau*np.dot(np.array([bx,by,bz]).T,np.array([Omega[0]*Omega[2],Omega[1]*Omega[2],-Omega[0]*Omega[0]-Omega[1]*Omega[1]]))\
-tau*np.dot(np.array([-1*by,bx,0]).T,np.dot(S_inv,np.dot(dS,Omega)))
ddzhi = np.dot(C_inv,sna-ddyaw*d-e)
ddzhi[2] = ddyaw
dOmega = -np.dot(S_inv,np.dot(dS,Omega))+np.dot(S_inv,ddzhi)
Mu = np.dot(self.J,dOmega) + np.cross(Omega,np.dot(self.J,Omega))
MT = np.zeros(4)
MT[:3] = Mu
MT[3] = Thrust
G1_inv = np.linalg.inv(self.G1)
Ws2 = np.dot(G1_inv,MT)
# Ws2 = np.clip(Ws2, np.power(self.w_min,2), np.power(self.w_max,2))
# Ws = np.sqrt(Ws2)
Ws = np.copysign(np.sqrt(np.abs(Ws2)),Ws2)
rpy = np.array([roll, pitch, yaw])
rpy_q = Euler2quat(np.array([roll, pitch, yaw]))
state = {
'roll':roll,
'pitch':pitch,
'rpy':rpy,
'rpy_q':rpy_q,
'dzhi':dzhi,
'ddzhi':ddzhi,
'ut':MT
}
return Ws, state
def getWs_vector(self, status):
pos = np.array(status[:,0:3])
vel = np.array(status[:,3:6])
acc = np.array(status[:,6:9])
jer = np.array(status[:,9:12])
sna = np.array(status[:,12:15])
yaw = np.array(status[:,15:16])
dyaw = np.array(status[:,16:17])
ddyaw = np.array(status[:,17:18])
# Total thrust
tau_v = acc - np.array([0,0,self.gravity])
tau = -np.linalg.norm(tau_v,axis=1)[:,np.newaxis]
bz = tau_v/tau
Thrust = self.mass*tau
# roll & pitch
roll = np.arcsin(np.einsum('ij,ij->i', bz,
np.concatenate((
np.sin(yaw),
-np.cos(yaw),
np.zeros_like(yaw)),axis=1)))[:,np.newaxis]
pitch = np.arctan(np.einsum('ij,ij->i', bz,
np.concatenate((
np.cos(yaw)/bz[:,2:3],
np.sin(yaw)/bz[:,2:3],
np.zeros_like(yaw)),axis=1)))[:,np.newaxis]
bx = np.concatenate((
np.cos(yaw)*np.cos(pitch),
np.sin(yaw)*np.cos(pitch),
-np.sin(pitch)),axis=1)
by = np.concatenate((
-np.sin(yaw)*np.cos(roll)+ | np.cos(yaw) | numpy.cos |
from random import sample
from reinforces.reinforce_utils import *
from reinforces import rephraser
from reinforces.ReplayBuffer import SharedReplayBuffer, ReplayBuffer
from reinforces.trans_env import Translate_Env
from src.utils.logging import *
from src.utils.common_utils import *
from src.data.dataset import Dataset, TextLineDataset, ZipDataset
from src.data.data_iterator import DataIterator
from torch.utils.tensorboard import SummaryWriter
import nltk.translate.bleu_score as bleu
import argparse
import math
import torch
import numpy as np
import torch.multiprocessing as _mp
os.system('export PYTHONWARNINGS="ignore:semaphore_tracker:UserWarning" ')
parser = argparse.ArgumentParser()
parser.add_argument("--n", type=int, default=1,
help="parallel attacker process (default as 1)")
parser.add_argument("--config_path", type=str,
default="./configs/cwmt_zh2en_reinforce.yaml",
help="the path to reinforcement config file.")
parser.add_argument("--save_to", type=str, default="./reinforces/reinforce_tf_zh2en_log",
help="the path for model-saving and log saving.")
parser.add_argument("--action_roll_steps", type=int, default=35,
help="training rolling steps (default as 35)")
parser.add_argument("--max_episode_lengths", type=int, default=200,
help="maximum steps for attack (default as 200)")
parser.add_argument("--max_episodes", type=int, default=5000000,
help="maximum environment episode for learning (default as 500k)")
parser.add_argument("--use_gpu", action="store_true", default=False,
help="Whether to use GPU.(default as false)")
parser.add_argument("--seed", type=int, default=1,
help="random seed (default as 1)")
def run():
# default actor threads as 1
os.environ["OMP_NUM_THREADS"] = "1"
mp = _mp.get_context('spawn')
args = parser.parse_args()
if not os.path.exists(args.save_to):
os.mkdir(args.save_to)
# load reinforce configs
with open(args.config_path, "r") as f, \
open(os.path.join(args.save_to, "current_reinforce.yaml"), "w") as current_configs:
INFO("load reinforce configures")
configs = yaml.load(f, Loader=yaml.FullLoader)
yaml.dump(configs, current_configs)
reinforce_configs = configs["reinforce_configs"]
agent_configs = configs["agent_configs"]
rephraser_model_configs = agent_configs["rephraser_model_configs"]
rephraser_optimizer_configs = agent_configs["rephraser_optimizer_configs"]
annunciator_configs = configs["annunciator_configs"]
# the Global variable of USE_GPU is mainly used for environments
GlobalNames.SEED = reinforce_configs["seed"]
GlobalNames.USE_GPU = args.use_gpu
torch.manual_seed(GlobalNames.SEED)
# build vocabulary and data iterator for env
with open(reinforce_configs["victim_configs"], "r") as victim_f:
victim_configs = yaml.load(victim_f, Loader=yaml.FullLoader)
data_configs = victim_configs["data_configs"]
src_vocab = Vocabulary(**data_configs["vocabularies"][0])
trg_vocab = Vocabulary(**data_configs["vocabularies"][1])
data_set = ZipDataset(
TextLineDataset(data_path=data_configs["train_data"][0],
vocabulary=src_vocab, max_len=data_configs["max_len"][0]),
TextLineDataset(data_path=data_configs["train_data"][1],
vocabulary=trg_vocab, max_len=data_configs["max_len"][1]),
shuffle=reinforce_configs["shuffle"]
) # we build the parallel data sets and iterate inside a thread
# collect range of action space:
_, _, limit_dist = load_or_extract_near_vocab(
config_path=reinforce_configs["victim_configs"],
model_path=reinforce_configs["victim_model"],
init_perturb_rate=reinforce_configs["init_perturb_rate"],
save_to=os.path.join(args.save_to, "near_vocab"),
save_to_full=os.path.join(args.save_to, "full_near_vocab"),
top_reserve=12,
emit_as_id=True, use_max_dist=True)
# build global SACAgent for the final policy (on cpu)
global_agent = rephraser.SACAgent(
device="cpu",
d_word_vec=victim_configs["model_configs"]["d_word_vec"],
d_model=rephraser_model_configs["d_model"],
limit_dist=limit_dist,
dropout=rephraser_model_configs["dropout"],
learnable_temperature=rephraser_model_configs["learnable_temperature"],
init_temperature=rephraser_model_configs["init_temperature"],
rephraser_optimizer_configs=rephraser_optimizer_configs,
save_to=args.save_to,
num_kept_checkpoints=reinforce_configs["num_kept_checkpoints"]
)
# load global ckp (only for the AC parameters) if needed
global_step = global_agent.load_model()
print("global_step:", global_step)
if global_step != 0:
INFO("restarting at step %d"%global_step)
else: # save the initial model
global_agent.save_model(global_step)
global_summary_writer = SummaryWriter(
log_dir=os.path.join(args.save_to, "global_summary"))
global_replay_buffer = SharedReplayBuffer(
max_sen_len=data_configs["max_len"][0],
state_dim=victim_configs["model_configs"]["d_word_vec"],
action_dim=victim_configs["model_configs"]["d_word_vec"],
capacity=reinforce_configs["replay_buffer_capacity"])
# test_for_throughput(global_replay_buffer)
# make global objects shared memory
global_agent.share_memory()
global_replay_buffer.share_memory()
# collect available devices and distribute env on the available gpu
if args.use_gpu:
device = "cuda"
devices = []
for i in range(torch.cuda.device_count()):
devices += ["cuda:%d" % i]
print("available gpus:", devices)
else:
device = "cpu"
devices = [device]
# initialize global parameters for the current training trial
global_step_lock = mp.Lock()
global_step_counter = mp.Value("i", global_step) # "i is the type code for c_int"
# train_thread(0, device, args,
# reinforce_configs, annunciator_configs,
# src_vocab, trg_vocab, data_set,
# global_agent, global_replay_buffer,
# global_step_counter, global_step_lock,
# agent_configs)
# valid_thread(device, args,
# reinforce_configs, annunciator_configs,
# src_vocab, trg_vocab, data_set,
# global_agent, global_replay_buffer,
# global_step_counter, global_step_lock,
# agent_configs)
# build multi thread for learning and validation
process = []
for rank in range(args.n):
print("initialize training thread on cuda:%d" % (rank+1))
p=mp.Process(
target=train_thread,
args=(rank, "cuda:%d"%(rank+1), args,
reinforce_configs, annunciator_configs,
src_vocab, trg_vocab, data_set,
global_agent, global_replay_buffer,
global_step_counter, global_step_lock,
agent_configs)
)
p.start()
process.append(p)
# run the dev thread for initiation
print("initialize dev thread on cuda:0")
p = mp.Process(
target=valid_thread,
args=("cuda:0", args,
reinforce_configs, annunciator_configs,
src_vocab, trg_vocab, data_set,
global_agent, global_replay_buffer,
global_step_counter, global_step_lock,
agent_configs)
)
p.start()
process.append(p)
for p in process:
p.join()
def train_thread(rank, device, args,
reinforce_configs, annunciator_configs,
src_vocab, trg_vocab, data_set,
global_SACAgent, global_replay_buffer,
global_step_counter, global_step_lock,
local_agent_configs):
"""
build training thread for a local SACAgent on gpu device
provide parameter soft-updates for the global_models.
"""
GlobalNames.USE_GPU = args.use_gpu
GlobalNames.SEED = reinforce_configs["seed"]
torch.manual_seed(GlobalNames.SEED + rank)
# build local SACAgent
rephraser_model_configs = local_agent_configs["rephraser_model_configs"]
rephraser_optimizer_configs = local_agent_configs["rephraser_optimizer_configs"]
with open(reinforce_configs["victim_configs"], "r") as victim_f:
victim_configs = yaml.load(victim_f, Loader=yaml.FullLoader)
_, _, limit_dist = load_or_extract_near_vocab(
config_path=reinforce_configs["victim_configs"],
model_path=reinforce_configs["victim_model"],
init_perturb_rate=reinforce_configs["init_perturb_rate"],
save_to=os.path.join(args.save_to, "near_vocab"),
save_to_full=os.path.join(args.save_to, "full_near_vocab"),
top_reserve=12,
emit_as_id=True)
local_agent = rephraser.SACAgent(
device=device,
d_word_vec=victim_configs["model_configs"]["d_word_vec"],
d_model=rephraser_model_configs["d_model"],
limit_dist=limit_dist,
dropout=rephraser_model_configs["dropout"],
learnable_temperature=rephraser_model_configs["learnable_temperature"],
init_temperature=rephraser_model_configs["init_temperature"],
rephraser_optimizer_configs=rephraser_optimizer_configs,
rank=rank,
save_to=os.path.join(args.save_to),
num_kept_checkpoints=reinforce_configs["num_kept_checkpoints"]
)
local_summary_writer = SummaryWriter(
log_dir=os.path.join(args.save_to, "train_env%d" % rank))
# # this is the secondary buffer
# local_replay_buffer = ReplayBuffer(
# max_sen_len=global_replay_buffer.max_sen_len,
# state_dim=victim_configs["model_configs"]["d_word_vec"],
# action_dim=victim_configs["model_configs"]["d_word_vec"],
# capacity=max(reinforce_configs["replay_buffer_capacity"]/20, 10000)
# )
# build environment (include annunciator update settings)
reinforce_iterator = DataIterator(
dataset=data_set, batch_size=reinforce_configs["batch_size"],
use_bucket=True, buffer_size=reinforce_configs["buffer_size"],
numbering=True)
local_data_iterator = reinforce_iterator.build_generator()
local_env = Translate_Env(
reinforce_configs=reinforce_configs,
annunciator_configs=annunciator_configs,
src_vocab=src_vocab, trg_vocab=trg_vocab,
data_iterator=local_data_iterator,
save_to=args.save_to, device=device)
episode_count = 0 # a batch of sentences as an episode & learning episodes
local_step = global_step_counter.value # initiate local agent update steps
patience_t = annunciator_configs["patience"]
trust_acc = 0.5
while True: # infinite loop of data set iterator (epoch)
# we will continue with a new iterator with refreshed environments
# whenever the last iterator breaks with "StopIteration"
rephraser_iterator = reinforce_iterator.build_generator()
local_env.reset_data_iter(rephraser_iterator)
try:
while True: # loop training episodes
# the environment will be initiated by an actor as self learning
local_env.reset()
states, _, _, _ = local_env.get_state()
# x_emb = local_env.reset(local_agent.actor)
# x_emb = torch.from_numpy(x_emb)
# if device != "cpu":
# x_emb = x_emb.to(device)
annunciator_base_step = local_step
episode_length = 0 # the rollout steps
episode_rewards = np.array([0.] * states.shape[0])
done = True # whether the episode is finished & should reset episode with new batch
while episode_length <= args.max_episode_lengths: # loop TD learning rollouts
# check for the environment updates using current **global** agent
if episode_count % local_agent_configs["rephraser_update_steps"] == 0:
""" stop criterion:
until mse reaches the bound within patience.
otherwise the training thread stops
"""
try: # update environment
INFO("Update environment")
# sync from the global agent to avoid unnecessary locking.
local_agent.sync_from(global_SACAgent)
annunciator_base_step, trust_acc = local_env.update_annunciator(
local_agent,
annunciator_base_step,
min_update_steps=annunciator_configs["valid_freq"],
max_update_steps=annunciator_configs["annunciator_update_steps"],
accuracy_bound=annunciator_configs["acc_bound"],
overall_update_weight=1-trust_acc,
summary_writer=local_summary_writer)
# global_SACAgent.to("cpu").train()
# switch back to training mode
local_agent = local_agent.to(device).train()
except StopIteration:
INFO("finish one training epoch, reset data_iterator")
break
annunciator_base_step += 1 # a flag to label the scorer updates
if trust_acc < annunciator_configs["d_converged_bound"]:
# GAN target reached, scorer has reached its limit.
patience_t -= 1
INFO("scorer reached GAN convergence bound: %d times" % patience_t)
else: # reset patience if scorer is refreshed
patience_t = annunciator_configs["patience"]
if patience_t == 0 or episode_count > args.max_episodes:
WARN("maximum patience & training step reached. Thread stop")
break
local_agent = local_agent.to(device).train() # switch the agent to training mode
if done: # can't create SARSA with only one step, start a new episode
INFO("sync from global agent")
local_agent.sync_from(global_SACAgent)
local_agent = local_agent.to(device).train()
local_env.reset()
done = False
states, masks, rephrase_positions, _ = local_env.get_state()
roll_out_rewards = | np.array([0] * states.shape[0]) | numpy.array |
import ast
import copy
import os
import base64
import time
import cloudpickle as pickle
import functools
import operator
import six
import collections
import weakref
from future.utils import with_metaclass
import numpy as np
import pandas as pd
import tabulate
import pyarrow as pa
import vaex.hash
import vaex.serialize
from vaex.utils import _ensure_strings_from_expressions, _ensure_string_from_expression
from vaex.column import ColumnString, _to_string_sequence
from .hash import counter_type_from_dtype
from vaex.datatype import DataType
from vaex.docstrings import docsubst
from . import expresso
try:
from StringIO import StringIO
except ImportError:
from io import BytesIO as StringIO
try:
collectionsAbc = collections.abc
except AttributeError:
collectionsAbc = collections
# TODO: repeated from dataframe.py
default_shape = 128
PRINT_MAX_COUNT = 10
expression_namespace = {}
expression_namespace['nan'] = np.nan
expression_namespace = {}
expression_namespace['nan'] = np.nan
_binary_ops = [
dict(code="+", name='add', op=operator.add),
dict(code="in", name='contains', op=operator.contains),
dict(code="/", name='truediv', op=operator.truediv),
dict(code="//", name='floordiv', op=operator.floordiv),
dict(code="&", name='and', op=operator.and_),
dict(code="^", name='xor', op=operator.xor),
dict(code="|", name='or', op=operator.or_),
dict(code="**", name='pow', op=operator.pow),
dict(code="is", name='is', op=operator.is_),
dict(code="is not", name='is_not', op=operator.is_not),
dict(code="<<", name='lshift', op=operator.lshift),
dict(code="%", name='mod', op=operator.mod),
dict(code="*", name='mul', op=operator.mul),
dict(code=">>", name='rshift', op=operator.rshift),
dict(code="-", name='sub', op=operator.sub),
dict(code="<", name='lt', op=operator.lt),
dict(code="<=", name='le', op=operator.le),
dict(code="==", name='eq', op=operator.eq),
dict(code="!=", name='ne', op=operator.ne),
dict(code=">=", name='ge', op=operator.ge),
dict(code=">", name='gt', op=operator.gt),
]
if hasattr(operator, 'div'):
_binary_ops.append(dict(code="/", name='div', op=operator.div))
if hasattr(operator, 'matmul'):
_binary_ops.append(dict(code="@", name='matmul', op=operator.matmul))
reversable = 'add sub mul matmul truediv floordiv mod divmod pow lshift rshift and xor or'.split()
_unary_ops = [
dict(code="~", name='invert', op=operator.invert),
dict(code="-", name='neg', op=operator.neg),
dict(code="+", name='pos', op=operator.pos),
]
class Meta(type):
def __new__(upperattr_metaclass, future_class_name,
future_class_parents, attrs):
# attrs = {}
for op in _binary_ops:
def wrap(op=op):
def f(a, b):
self = a
# print(op, a, b)
if isinstance(b, str) and self.dtype.is_datetime:
b = np.datetime64(b)
if self.df.is_category(self.expression) and self.df._future_behaviour and not isinstance(b, Expression):
labels = self.df.category_labels(self.expression)
if b not in labels:
raise ValueError(f'Value {b} not present in {labels}')
b = labels.index(b)
a = self.index_values()
try:
stringy = isinstance(b, str) or b.is_string()
except:
# this can happen when expression is a literal, like '1' (used in propagate_unc)
# which causes the dtype to fail
stringy = False
if stringy:
if isinstance(b, str):
b = repr(b)
if op['code'] == '==':
expression = 'str_equals({0}, {1})'.format(a.expression, b)
elif op['code'] == '!=':
expression = 'str_notequals({0}, {1})'.format(a.expression, b)
elif op['code'] == '+':
expression = 'str_cat({0}, {1})'.format(a.expression, b)
else:
raise ValueError('operand %r not supported for string comparison' % op['code'])
return Expression(self.ds, expression=expression)
else:
if isinstance(b, Expression):
assert b.ds == a.ds
b = b.expression
elif isinstance(b, (np.timedelta64)):
unit, step = | np.datetime_data(b) | numpy.datetime_data |
import os
import re
import time
from dataclasses import dataclass, field
from timeit import repeat
from typing import Optional,Dict, Union, Any, Tuple, List
import fitlog
import nltk
import numpy as np
import datasets
import torch
import torch.nn as nn
import torch.distributed as dist
from torch.utils.data import DataLoader, Dataset
from tqdm import tqdm
import transformers
from transformers import (
DataCollatorForLanguageModeling,
DataCollatorForSeq2Seq,
AutoConfig,
AutoTokenizer,
HfArgumentParser,
TrainingArguments,
Seq2SeqTrainingArguments,
set_seed,
)
from transformers import Trainer, Seq2SeqTrainer
from transformers import TrainingArguments
from transformers import trainer_utils, training_args
from transformers.trainer_pt_utils import nested_detach
from transformers import BertForMaskedLM
from transformers.file_utils import PaddingStrategy
from transformers.modeling_utils import PreTrainedModel
from transformers.tokenization_utils_base import BatchEncoding, PreTrainedTokenizerBase
from data.DatasetLoadingHelper import (
load_ctc2021,
load_sighan,
load_sighan_enchanted,
load_sighan_gector,
load_sighan_mask,
load_sighan_expand,
load_lattice_sighan,
load_abs_pos_sighan,
load_abs_pos_sighan_lang8,
load_abs_pos_sighan_plus,
load_abs_pos_and_spe_token_sighan,
load_sighan13_test,
load_sighan14_test,
load_sighan15_test,
load_sighan_chinesebert,
load_sighan_chinesebert_mask,
load_sighan_chinesebert_holy,
load_sighan_holy,
)
def ddp_exec(command):
"""
"""
if os.environ["LOCAL_RANK"] != '0':
return
else:
exec(command)
def ddp_print(*something):
"""
out of time
"""
if os.environ["LOCAL_RANK"] != '0':
return
else:
for thing in something:
print(thing)
return
def fitlogging(training_args):
for attr in dir(training_args):
if not re.match("__.*__", attr) and isinstance(getattr(training_args, attr), (int, str, bool, float)):
fitlog.add_hyper(value=getattr(training_args, attr), name=attr)
return
@dataclass
class MySeq2SeqTrainingArguments(Seq2SeqTrainingArguments):
model_name: str=field(default="MaskedLM", metadata={"help":"which bert model "})
dataset: str = field(default="sighan", metadata={"help":"dataset"})
eval_dataset:str = field(default="sighan", metadata={"help":"dataset for eval"})
max_length: int = field(default=128, metadata={"help": "max length"})
num_beams: int = field(default=4, metadata={"help": "num beams"})
use_extra_dataset:bool = field(default=False, metadata={"help":"Only work for ctc2021, using larger v2"})
fix_cls:bool = field(default=False, metadata={"help":"whether or not fix the cls layer of BertMaskedLM"})
cl_weight:float = field(default=0.2, metadata={"help": "contrastive learning loss weight"})
repeat_weight:float = field(default=0.2, metadata={"help": "distill repeat loss"})
copy_weight:float = field(default=0.5, metadata={"help":"copy weight"})
num_gpus:int = field(default=4, metadata={"help":"num_gpus"})
pretrained_name:str = field(default="roberta", metadata={"help":"pretrained_name"})
log_path:str = field(default="Recent_train.log", metadata={"help":"log path or name"})
class mydataset(Dataset):
def __init__(self, data):
self.data = data
def __getitem__(self, index):
return self.data[index]
def __len__(self):
return len(self.data)
def argument_init(trainingarguments=Seq2SeqTrainingArguments):
"""
"""
parser = HfArgumentParser(trainingarguments)
training_args = parser.parse_args_into_dataclasses()[0]
return training_args
def get_model(model_name="MaskedLM", pretrained_model_name_or_path="hfl/chinese-roberta-wwm-ext", training_args=None):
"""
Just get model
MLP:
bert->mlp->loss
Dot:
bert->dot product with embeddings->loss
MaskedLM_v2:
lexcions ( flat
CL:
Model with Contrastive Learning Loss
MaskedLM:
bert->lmhead->loss
"""
model = None
print("Hint: Loading Model " + "*"*5 + model_name + "*"*5)
if model_name == "MLP":
from models.bert.modeling_bert_v3 import BertModelForCSC as ProtoModel
elif model_name == "Dot":
from models.bert.modeling_bert_v3 import BetterBertModelForCSC as ProtoModel
elif model_name == "MaskedLM_v2":
from models.bert.modeling_bert_v3 import BertForMaskedLM_v2 as ProtoModel
elif model_name == "CL":
from models.bert.modeling_bert_v4 import BertForMaskedLM_CL as ProtoModel
elif model_name == "CPT_NLG":
from models.bart.modeling_bart_v2 import BartForConditionalGeneration as ProtoModel
pretrained_model_name_or_path="fnlp/cpt-base" # '/remote-home/xtzhang/CTC/CTC2021/SpecialEdition/models/bart/bart-zh/arch12-2-new-iter8w'
#pretrained_model_name_or_path = '/remote-home/xtzhang/CTC/CTC2021/SpecialEdition/models/bart/bart-zh/arch12-2-new-iter8w'
elif model_name == "CPT_NLU":
from models.bart.modeling_bart_v2 import BartForMaskedLM as ProtoModel
pretrained_model_name_or_path="fnlp/cpt-large" # '/remote-home/xtzhang/CTC/CTC2021/SpecialEdition/models/bart/bart-zh/arch12-2-new-iter8w'
elif model_name == "BART-base":
from models.bart.modeling_bart_v2 import BartForConditionalGeneration as ProtoModel
pretrained_model_name_or_path="fnlp/bart-base-chinese"# '/remote-home/xtzhang/CTC/CTC2021/SpecialEdition/models/bart/bart-zh/arch12-2-new-iter8w'
elif model_name == "BART-large":
from models.bart.modeling_bart_v2 import BartForConditionalGeneration as ProtoModel
pretrained_model_name_or_path="fnlp/bart-large-chinese"# '/remote-home/xtzhang/CTC/CTC2021/SpecialEdition/models/bart/bart-zh/arch12-2-new-iter8w'
elif model_name == "Proto":
from models.bert.modeling_bert_v4 import ProtoModel as ProtoModel
elif model_name == "Gector":
from models.bert.modeling_bert_v3 import GectorModel as ProtoModel
elif model_name == "GPT":
from transformers import GPT2LMHeadModel as ProtoModel
elif model_name is None or model_name == "MaskedLM":
if training_args.pretrained_name == "chinesebert":
print("Hint: Load ChineseBert MaskedLM")
from chinesebert import ChineseBertConfig, ChineseBertForMaskedLM
config = ChineseBertConfig.from_pretrained(pretrained_model_name_or_path)
model = ChineseBertForMaskedLM.from_pretrained(pretrained_model_name_or_path, config=config)
return model
elif training_args.pretrained_name == "roformer":
from roformer import RoFormerForMaskedLM
model = RoFormerForMaskedLM.from_pretrained( pretrained_model_name_or_path )
return model
print("Hint: Load Default BertForMaskedLM.")
from transformers import BertForMaskedLM as ProtoModel
else:
print("Hint: No such " + model_name)
exit(0)
if model_name != "Proto":
model = ProtoModel.from_pretrained(pretrained_model_name_or_path=pretrained_model_name_or_path)
else:
model = ProtoModel(
pretrained_model_name_or_path=pretrained_model_name_or_path,
cl_weight=training_args.cl_weight,
repeat_weight=training_args.repeat_weight,
copy_weight=training_args.copy_weight
)
if not model:
print("Warning: wrong model name ! Check the core.py ")
exit()
return model
def get_dataset(training_args):
"""
preprocess wrapped in load_ctc2021
return : mydate
torch.LongTensor
Good day!
"""
print("Loading Dataset !")
ddp_exec("os.system('date')")
if training_args.dataset == "ctc2021":
train_data, eval_data, test_data = load_ctc2021(training_args.use_extra_dataset)
elif training_args.dataset == "sighan":
train_data, eval_data, test_data = load_sighan(path_head="")
else:
print("Error: No such dataset ")
print(training_args.dataset)
exit(0)
train_dataset, eval_dataset, test_dataset = mydataset(train_data), mydataset(eval_data), mydataset(test_data)
print("Loading Succeed !")
ddp_exec("os.system('date')")
return train_dataset, eval_dataset, test_dataset
def get_dataset_plus(training_args):
"""
preprocess wrapped in load_ctc2021
return : mydate
torch.LongTensor
Good day!
"""
print("Loading Dataset !")
ddp_exec("os.system('date')")
if training_args.dataset == "ctc2021":
train_data, eval_data, test_data = load_ctc2021(extra=training_args.use_extra_dataset)
elif "sighan" in training_args.dataset:
#train_data, eval_data, test_data = load_sighan(path_head)
if training_args.model_name == "Gector":
return _get_Gector_dataset()
if training_args.pretrained_name == "chinesebert":
if "mask" in training_args.dataset:
return _get_chinesebert_mask_dataset()
elif "holy" in training_args.dataset:
return _get_chinesebert_holy_dataset()
else:
return _get_chinesebert_dataset()
elif "mask" in training_args.dataset:
return _get_mask_dataset()
elif "holy" in training_args.dataset:
return _get_holy_dataset()
elif "enchanted" in training_args.dataset:
return _get_enchanted_dataset()
elif "raw" in training_args.dataset:
return _get_raw_dataset()
elif 'ReaLiSe' in training_args.dataset:
return _get_ReaLiSe_dataset()
elif 'expand' in training_args.dataset:
return _get_expand_dataset()
else:
print("Unclear data type, load default raw sighan")
return _get_raw_dataset()
else:
print("Error: No such dataset ")
print(training_args.dataset)
exit(0)
train_dataset, eval_dataset, test_dataset = mydataset(train_data), mydataset(eval_data), mydataset(test_data)
print("Loading Succeed !")
ddp_exec("os.system('date')")
return train_dataset, eval_dataset, test_dataset
def _get_enchanted_dataset(which="15"):
"""
Gector for sighan
"""
print("Loading Enchanted Dataset !")
ddp_exec("os.system('date')")
train_data, eval_data, test_data = load_sighan_enchanted(path_head="")
train_dataset, eval_dataset, test_dataset = mydataset(train_data), mydataset(eval_data), mydataset(test_data)
print("Loaded successfully !")
ddp_exec("os.system('date')")
return train_dataset, eval_dataset, test_dataset
def _get_raw_dataset(which="15"):
"""
Gector for sighan
"""
print("Loading Raw Dataset !")
ddp_exec("os.system('date')")
train_data, eval_data, test_data = load_sighan(path_head="")
train_dataset, eval_dataset, test_dataset = mydataset(train_data), mydataset(eval_data), mydataset(test_data)
print("Loaded successfully !")
ddp_exec("os.system('date')")
return train_dataset, eval_dataset, test_dataset
def _get_holy_dataset(which="15"):
"""
Gector for sighan
"""
print("Loading Holy Dataset !")
ddp_exec("os.system('date')")
train_data, eval_data, test_data = load_sighan_holy(path_head="")
train_dataset, eval_dataset, test_dataset = mydataset(train_data), mydataset(eval_data), mydataset(test_data)
print("Loaded successfully !")
ddp_exec("os.system('date')")
return train_dataset, eval_dataset, test_dataset
def _get_mask_dataset(which="15"):
"""
Gector for sighan
"""
print("Loading MASK Dataset !")
ddp_exec("os.system('date')")
train_data, eval_data, test_data = load_sighan_mask(path_head="")
train_dataset, eval_dataset, test_dataset = mydataset(train_data), mydataset(eval_data), mydataset(test_data)
print("Loaded successfully !")
ddp_exec("os.system('date')")
return train_dataset, eval_dataset, test_dataset
def _get_Gector_dataset(which="15"):
"""
Gector for sighan
"""
print("Loading GECTOR Dataset !")
ddp_exec("os.system('date')")
train_data, eval_data, test_data = load_sighan_gector(path_head="")
train_dataset, eval_dataset, test_dataset = mydataset(train_data), mydataset(eval_data), mydataset(test_data)
print("Loaded successfully !")
ddp_exec("os.system('date')")
return train_dataset, eval_dataset, test_dataset
def _get_chinesebert_holy_dataset(which="15"):
"""
ChineseBert for sighan
Mainly diff in no max_length and 'pinyin_idx' must be 8 * len('input_ids')
"""
print("Loading ChineseBert Holy Dataset !")
ddp_exec("os.system('date')")
train_data, eval_data, test_data = load_sighan_chinesebert_holy(path_head="")
train_dataset, eval_dataset, test_dataset = mydataset(train_data), mydataset(eval_data), mydataset(test_data)
print("Loaded successfully !")
ddp_exec("os.system('date')")
return train_dataset, eval_dataset, test_dataset
def _get_chinesebert_dataset(which="15"):
"""
ChineseBert for sighan
Mainly diff in no max_length and 'pinyin_idx' must be 8 * len('input_ids')
"""
print("Loading ChineseBert Dataset !")
ddp_exec("os.system('date')")
train_data, eval_data, test_data = load_sighan_chinesebert(path_head="")
train_dataset, eval_dataset, test_dataset = mydataset(train_data), mydataset(eval_data), mydataset(test_data)
print("Loaded successfully !")
ddp_exec("os.system('date')")
return train_dataset, eval_dataset, test_dataset
def _get_chinesebert_mask_dataset(which="15"):
"""
ChineseBert for sighan
Mainly diff in no max_length and 'pinyin_idx' must be 8 * len('input_ids')
"""
print("Loading Masked ChineseBert Dataset !")
ddp_exec("os.system('date')")
train_data, eval_data, test_data = load_sighan_chinesebert_mask(path_head="")
train_dataset, eval_dataset, test_dataset = mydataset(train_data), mydataset(eval_data), mydataset(test_data)
print("Loaded successfully !")
ddp_exec("os.system('date')")
return train_dataset, eval_dataset, test_dataset
def _get_ReaLiSe_dataset(which="15"):
"""
For its
"""
print("Loading ReaLiSe Dataset !")
print("Hint: The Data You loading now is the preprocessed sighan from ReaLise, ")
ddp_exec("os.system('date')")
path = "../SE_tmp_back/milestone/ReaLiSe/data/"
import pickle
train_dataset = pickle.load(open(path + "trainall.times2.pkl", "rb"))
eval_dataset = pickle.load(open(path + "test.sighan" + which + ".pkl", "rb"))
test_dataset = pickle.load(open(path + "test.sighan" + which + ".pkl", "rb"))
print("Hint: Using **SIGHAN" + which + "** for eval & test !")
def trans2mydataset(features):
new = []
for feature in features:
tmp = {}
tmp["input_ids"] = feature["src_idx"][:128]
tmp["labels"] = feature["tgt_idx"][:128]
tmp["attention_mask"] = ([1] * len(tmp["input_ids"]))[:128]#feature["lengths"])[:128]
new.append(tmp)
return mydataset(new)
print("Loaded successfully !")
ddp_exec("os.system('date')")
print("over")
return trans2mydataset(train_dataset), trans2mydataset(eval_dataset), trans2mydataset(test_dataset)
def _get_expand_dataset(which="15"):
"""
NLPcc and HSK Expand for sighan
"""
print("Loading Expand Dataset !")
ddp_exec("os.system('date')")
train_data, eval_data, test_data = load_sighan_expand(path_head="")
train_dataset, eval_dataset, test_dataset = mydataset(train_data), mydataset(eval_data), mydataset(test_data)
print("Loaded successfully !")
ddp_exec("os.system('date')")
return train_dataset, eval_dataset, test_dataset
def _get_sighan_test(which, path_head=""):
"""
"""
print("Loading Dataset !")
ddp_exec("os.system('date')")
if which == "13":
test_data = load_sighan13_test(path_head)
elif which == "14":
test_data = load_sighan14_test(path_head)
elif which == "15":
test_data = load_sighan15_test(path_head)
else:
print("Error: No such dataset ")
print(which)
exit(0)
test_dataset = mydataset(test_data)
print("Loading Succeed !")
ddp_exec("os.system('date')")
return test_dataset
def _get_lattice_dataset(dataset="sighan", path_head="."):
"""
"""
print("Loading Dataset !")
ddp_exec("os.system('date')")
if dataset == "sighan":
datasets, vocabs, embeddings = load_lattice_sighan(path_head=path_head)
else:
exit()
datasets["train"], datasets["valid"], datasets["test"] = mydataset(datasets["train"]), mydataset(datasets["valid"]), mydataset(datasets["test"])
return datasets, vocabs, embeddings
def _get_magic_plus_dataset(dataset="sighan", path_head=""):
"""
"""
print("Loading Dataset !")
ddp_exec("os.system('date')")
if dataset == "sighan":
train_data, eval_data, test_data = load_abs_pos_sighan_plus(path_head=path_head)
else:
exit()
train_dataset, eval_dataset, test_dataset = mydataset(train_data), mydataset(eval_data), mydataset(test_data)
print("Loading Succeed !")
ddp_exec("os.system('date')")
return train_dataset, eval_dataset, test_dataset
def _get_magic_dataset(dataset="sighan", path_head=""):
"""
"""
print("Loading Dataset !")
ddp_exec("os.system('date')")
if dataset == "sighan":
train_data, eval_data, test_data = load_abs_pos_sighan(path_head=path_head)
else:
exit()
train_dataset, eval_dataset, test_dataset = mydataset(train_data), mydataset(eval_data), mydataset(test_data)
print("Loading Succeed !")
ddp_exec("os.system('date')")
return train_dataset, eval_dataset, test_dataset
def _get_magic_lang8_dataset(dataset="sighan", path_head=""):
"""
"""
print("Loading Dataset !")
ddp_exec("os.system('date')")
if dataset == "sighan":
train_data, eval_data, test_data = load_abs_pos_sighan_lang8(path_head=path_head)
else:
exit()
train_dataset, eval_dataset, test_dataset = mydataset(train_data), mydataset(eval_data), mydataset(test_data)
print("Loading Succeed !")
ddp_exec("os.system('date')")
return train_dataset, eval_dataset, test_dataset
def _get_magic_expand_dataset(dataset="sighan", path_head=""):
"""
"""
print("Loading Dataset !")
ddp_exec("os.system('date')")
if dataset == "sighan":
train_data, eval_data, test_data = load_abs_pos_sighan_plus(path_head=path_head)
else:
exit()
train_dataset, eval_dataset, test_dataset = mydataset(train_data), mydataset(eval_data), mydataset(test_data)
print("Loading Succeed !")
ddp_exec("os.system('date')")
return train_dataset, eval_dataset, test_dataset
def _get_super_magic_dataset(dataset="sighan", path_head=""):
"""
"""
print("Loading Dataset !")
ddp_exec("os.system('date')")
if dataset == "sighan":
train_data, eval_data, test_data, tokenizer = load_abs_pos_and_spe_token_sighan(path_head=path_head)
else:
exit()
train_dataset, eval_dataset, test_dataset = mydataset(train_data), mydataset(eval_data), mydataset(test_data)
print("Loading Succeed !")
ddp_exec("os.system('date')")
return train_dataset, eval_dataset, test_dataset, tokenizer
def get_metrics(training_args):
if "sighan" in training_args.dataset:
print("Hint: Using aligned sighan F1_score as metric")
return _get_metrics(training_args)
if "ctc2021" in training_args.dataset :
print("Hint: Using Seq2Seq ctc2021 score as metric")
return _get_seq2seq_metrics(training_args)
else:
print("Error when getting metrics.")
exit(0)
def _get_metrics(training_args):
"""
#https://huggingface.co/metrics
#accuracy,bertscore, bleu, bleurt, coval, gleu, glue, meteor,
#rouge, sacrebleu, seqeval, squad, squad_v2, xnli
metric = load_metric()
"""
import numpy as np
from datasets import load_metric
def compute_metrics(eval_preds):
"""
reference: https://github.com/ACL2020SpellGCN/SpellGCN/blob/master/run_spellgcn.py
"""
Achilles = time.time()
sources, preds, labels = eval_preds# (num, length) np.array
tp, fp, fn = 0, 0, 0
sent_p, sent_n = 0, 0
for i in range(len(sources)):
#print(sources[i])
#print(preds[i])
#print(labels[i])
source, pred, label = sources[i], preds[i], labels[i]
source, label = source[ source != -100], label[label != -100]
source, label = source[source != 0], label[label != 0]#pad idx for input_ids
#we guess pretrain Masked Language Model bert lack the surpvised sighan for 101 & 102 ( [CLS] & [SEP] ) , so we just ignore
source, pred, label = np.where(source == 102, 101, source), | np.where(pred == 102, 101, pred) | numpy.where |
# -*- codeing: utf-8 -*-
import numpy as np
from scipy.stats import norm
import matplotlib.pyplot as plt
def myrand_gmm(n, mu, sigma, fill=0.0):
x = np.zeros(n)
g = np.random.randn(n)
u = np.random.rand(n)
#mu = np.array([1.0, 2.0, 3.0])
#sigma = np.array([0.1, 0.3, 0.5])
flag = (0 <= u) & (u < 1/2) # この&は論理積(ビット演算)
x = (mu[0] + sigma[0]*g)*flag
flag = (1/2 <= u) & (u < 1)
x += (mu[1] + sigma[1]*g)*flag
#flag = (2/3 <= u) & (u <= 1)
#x += (mu[2] + sigma[2]*g)*flag
return x
def myrand(n, fill=0.0): # n は生成するデータの個数
x= | np.zeros(n) | numpy.zeros |
import numpy as np
import matplotlib.pyplot as plt
import pandas as pd
import math
import cmath
import seaborn
import scipy
import functools
import time
v=1 #intracell hopping in time period 1
w=1 #intercell hopping in time period 2
aalpha=0.25 #phase index 1
bbeta=1.5 #phase index 2
t1=(np.pi*(aalpha+bbeta))/(2*v) #time period 1
t2=(np.pi*(bbeta-aalpha))/(2*w) #time period 2
print(t1,t2)
T=t1+t2 #total time period
V=v*t1
W=w*t1
# V=np.pi-0.2
# W=np.linspace(0,np.pi,50)
# length=len(W)
sig0 = np.matrix([[1,0],[0,1]])
sig1 = np.matrix([[0,1],[1,0]])
sig2 = np.matrix([[0,-1j],[1j,0]])
sig3 = np.matrix([[1,0],[0,-1]])
def nU(k):
nU0 = np.cos(W/2)*np.cos(V/2)-np.sin(W/2)*np.sin(V/2)*np.cos(k)
nU1 = -1j*(np.cos(W/2)*np.sin(V/2)+np.sin(W/2)*np.cos(V/2)*np.cos(k))
nU2 = -1j*(np.sin(W/2)*np.cos(V/2)*np.sin(k))
nU3 = 1j*(np.sin(W/2)*np.sin(V/2)*np.sin(k))
return np.dot(nU0,sig0)+np.dot(nU1,sig1)+np.dot(nU2,sig2)+np.dot(nU3,sig3)
# def aU(k):
# t0 = np.cos(V)*np.cos(W)-np.sin(V)*np.sin(W)*np.cos(k)
# t1 = -np.sin(V)*np.cos(W)-np.cos(V)*np.sin(W)*np.cos(k)
# t2 = -np.sin(W)*np.sin(k)
# aU0 = np.sqrt((1+t0)/2)
# aU1 = -1j*np.sqrt(1/(2*(1+t0)))*t1
# aU2 = -1j*np.sqrt(1/(2*(1+t0)))*t2
# return np.dot(aU0,sig0)+np.dot(aU1,sig1)+np.dot(aU2,sig2)
def aU(k):
t0 = np.cos(V)*np.cos(W)-np.sin(V)*np.sin(W)*np.cos(k)
t1 = -np.sin(V)*np.cos(W)-np.cos(V)*np.sin(W)*np.cos(k)
t2 = -np.sin(W)*np.sin(k)
aU0 = -1j*np.sqrt((1-t0)/2)
aU1 = np.sqrt(1/(2*(1-t0)))*t1
aU2 = np.sqrt(1/(2*(1+t0)))*t2
return | np.dot(aU0,sig0) | numpy.dot |
from __future__ import division, print_function
import numpy as np
import os
from scipy.stats import multivariate_normal
import sys
import struct
try:
import sounddevice as sd
have_sounddevice = True
except:
have_sounddevice = False
from .stft import stft
from .acoustics import mfcc
class CircularGaussianEmission:
def __init__(self, nstates, odim=1, examples=None):
''' Initialize the Gaussian emission object '''
# The emissions parameters
self.K = nstates
if examples is None:
# Initialize to random components
self.O = odim
self.mu = np.random.normal(size=(self.K, self.O))
self.Sigma = np.ones((self.K, self.O))*10
else:
# Initialize all components to the same mean and variance of the data
self.O = examples[0].shape[1]
X = np.concatenate(examples, axis=0)
self.mu = np.array([np.mean(X, axis=0)]*self.K)
centered = X - self.mu[0]
self.Sigma = np.array([np.mean(centered**2, axis=0)]*self.K)
def update_parameters(self, examples, gamma):
g = np.concatenate(gamma, axis=0)
X = np.concatenate(examples, axis=0)
Z = g.sum(axis=0)
for k in range(self.K):
self.mu[k] = np.sum(X.T * g[:,k], axis=1)/Z[k]
centered = (X - self.mu[k])**2
self.Sigma[k] = np.sum(centered.T * g[:,k], axis=1)/Z[k]
def get_pdfs(self):
''' Return the pdf of all the emission probabilities '''
return [multivariate_normal(self.mu[k], np.diag(self.Sigma[k])) for k in range(self.K)]
def prob_x_given_state(self, examples):
'''
Recompute the probability of the observation given the state of the
latent variables
'''
distribution = [multivariate_normal(self.mu[k], np.diag(self.Sigma[k])) for k in range(self.K)]
p_x_given_z = []
for X in examples:
p_x_given_z.append(np.zeros((X.shape[0], self.K)))
for k in range(self.K):
p_x_given_z[-1][:,k] = distribution[k].pdf(X)
return p_x_given_z
class GaussianEmission:
def __init__(self, nstates, odim=1, examples=None):
''' Initialize the Gaussian emission object '''
# The emissions parameters
self.K = nstates
if examples is None:
# initialize to random mean unit variance
self.O = odim
self.mu = np.random.normal(size=(self.K, self.O))
self.Sigma = np.random.normal(size=(self.K, self.O, self.O))
for k in range(self.K):
self.Sigma[k] = np.dot(self.Sigma[k].T, self.Sigma[k]) + np.eye(self.O)
else:
# Initialize using mean and covariance of dataset
self.O = examples[0].shape[1]
X = np.concatenate(examples, axis=0)
self.mu = np.array([np.mean(X, axis=0)]*self.K)
centered = X - self.mu[0]
self.Sigma = np.array([np.diag(np.mean(centered**2, axis=0))]*self.K)
def update_parameters(self, examples, gamma):
g = np.concatenate(gamma, axis=0)
X = np.concatenate(examples, axis=0)
Z = g.sum(axis=0)
for k in range(self.K):
self.mu[k] = np.sum(X.T * g[:,k], axis=1)/Z[k]
centered = X - self.mu[k]
self.Sigma[k] = np.dot(centered.T*g[:,k], centered/Z[k])
def get_pdfs(self):
''' Return the pdf of all the emission probabilities '''
return [multivariate_normal(self.mu[k], self.Sigma[k]) for k in range(self.K)]
def prob_x_given_state(self, examples):
'''
Recompute the probability of the observation given the state of the
latent variables
'''
distribution = [ multivariate_normal(self.mu[k], self.Sigma[k]) for k in range(self.K)]
p_x_given_z = []
for X in examples:
p_x_given_z.append(np.zeros((X.shape[0], self.K)))
for k in range(self.K):
p_x_given_z[-1][:,k] = distribution[k].pdf(X)
return p_x_given_z
class HMM:
'''
Hidden Markov Model with Gaussian emissions
Attributes
----------
K : int
Number of states in the model
O : int
Number of dimensions of the Gaussian emission distribution
A : ndarray
KxK transition matrix of the Markov chain
pi : ndarray
K dim vector of the initial probabilities of the Markov chain
emission : (GaussianEmission or CircularGaussianEmission)
An instance of emission_class
model : string, optional
The model used for the chain, can be 'full' or 'left-right'
leftright_jum_max : int, optional
The number of non-zero upper diagonals in a 'left-right' model
'''
def __init__(self, nstates, emission, model='full', leftright_jump_max=3):
'''
Initialize a Hidden Markov Model with nstates and Gaussian observations
nstates: int
The number of states in the Markov chain
emission : emission object, optional
The emission object (CircularGaussianEmission or GaussianEmission)
model : string, optional
The model used for the chain, can be 'full' or 'left-right'
leftright_jump_max : int
The maximum jump length in the Left-Right chain model
'''
self.K = nstates # number of states
self.emission = emission # The observation parameters
# The Markov chain parameters
self.model = model
self.leftright_jump_max = leftright_jump_max
self.A = np.zeros((self.K, self.K)) # the state transition matrix
self.pi = np.zeros((self.K)) # the initial distribution
# Initialize the HMM parameters to some random values
if self.model == 'full':
self.A = np.random.uniform(size=(self.K,self.K))
self.pi = np.random.uniform(size=(self.K))
elif self.model == 'left-right':
self.A = np.triu(np.tril(np.random.uniform(size=(self.K,self.K)), k=self.leftright_jump_max))
self.A += np.diag(np.sum(self.A[:,:], axis=1)*2)
self.pi = np.zeros(self.K)
self.pi[0] = 1
# Normalize the distributions
for row in self.A:
row /= row.sum()
self.pi /= self.pi.sum()
def fit(self, examples, tol=0.1, max_iter=10, verbose=False):
'''
Training of the HMM using the EM algorithm
Parameters
----------
examples : (list)
A list of examples used to train the model. Each example is
an array of feature vectors, each row is a feature vector,
the sequence runs on axis 0
tol : (float)
The training stops when the progress between to steps is less than
this number (default 0.1)
max_iter : (int)
Alternatively the algorithm stops when a maximum number of
iterations is reached (default 10)
verbose : bool, optional
When True, prints extra information about convergence
'''
# Make sure to normalize parameters that should be...
for row in self.A:
row[:] /= row.sum()
self.pi[:] /= self.pi.sum()
# Run the EM algorithm
loglikelihood_old = -np.inf # log-likelihood
n_iter = 0
while True:
# Initialize new parameters value for accumulation
loglikelihood = 0.
# We need to run the forward/backward algorithm for each example and
# and combine the result to form the new estimates
gamma = []
xhi = []
p_x_given_z = self.emission.prob_x_given_state(examples)
# Expectation-step
#-----------------
for X,pxz in zip(examples, p_x_given_z):
# check dimension of emission
if X.shape[1] != self.emission.O:
raise ValueError("Error: Emission vectors of all examples should have the same size")
# First compute alpha and beta using forward/backward algo
alpha, c = self.forward(X, pxz)
beta = self.backward(X, pxz, c)
# Recompute the likelihood of the sequence
# (Bishop 13.63)
loglikelihood += np.sum(np.log(c))
# Now the more interesting quantities
# gamma(z_n) = p(z_n | X, theta_old)
# xhi(z_{n-1}, z_n) = p(z_{n-1}, z_n | X, theta_old)
gamma.append(alpha * beta)
xhi.append(np.zeros((X.shape[0]-1, self.K, self.K)))
for n in range(1,X.shape[0]):
xhi[-1][n-1] = np.outer(alpha[n-1], beta[n]*pxz[n])*self.A/c[n]
# Maximization-step
#------------------
# update the Markov Chain parameters
self.update_parameters(examples, gamma, xhi)
# Update the emission distribution parameters
self.emission.update_parameters(examples, gamma)
# Now check for convergence
#--------------------------
n_iter += 1
epsilon = loglikelihood - loglikelihood_old
if verbose:
print('Iterations:', n_iter, 'epsilon:', epsilon, 'LL_new:', loglikelihood)
# some checks here
if epsilon < tol:
if verbose:
print('Tolerance reached: stopping.')
break
if n_iter == max_iter:
if verbose:
print('Maximum iterations reached: stopping.')
break
loglikelihood_old = loglikelihood
# return the number of iterations performed
return n_iter
def update_parameters(self, examples, gamma, xhi):
''' Update the parameters of the Markov Chain '''
X = np.concatenate(examples, axis=0)
x = np.concatenate(xhi, axis=0)
self.pi[:] = np.sum([g[0,:] for g in gamma], axis=0)
self.A = x.sum(axis=0)
# normalize to enforce distribution constraints
self.pi /= np.sum(self.pi)
for k in range(self.K):
den = | np.sum(self.A[k,:]) | numpy.sum |
#!/usr/bin/env python3
import numpy as np
def svmPredict(model, X):
#SVMPREDICT returns a vector of predictions using a trained SVM model
#(svmTrain).
# pred = SVMPREDICT(model, X) returns a vector of predictions using a
# trained SVM model (svmTrain). X is a mxn matrix where there each
# example is a row. model is a svm model returned from svmTrain.
# predictions pred is a m x 1 column of predictions of {0, 1} values.
#
# check if we are getting a vector. If so, then assume we only need to do predictions
# for a single example
if X.ndim == 1:
X = X[np.newaxis, :]
m = X.shape[0]
p = np.zeros(m)
pred = np.zeros(m)
if model['kernelFunction'].__name__ == 'linearKernel':
# we can use the weights and bias directly if working with the linear kernel
p = np.dot(X, model['w']) + model['b']
elif model['kernelFunction'].__name__ == 'gaussianKernel':
# vectorized RBF Kernel
# This is equivalent to computing the kernel on every pair of examples
X1 = np.sum(X**2, 1)
X2 = np.sum(model['X']**2, 1)
K = X2 + X1[:, None] - 2 * np.dot(X, model['X'].T)
if len(model['args']) > 0:
K /= 2*model['args'][0]**2
K = | np.exp(-K) | numpy.exp |
def fooofmodel():
import sys
import numpy as np
from scipy.io import loadmat, savemat
from fooof import FOOOFGroup
import matplotlib.pyplot as plt
data = loadmat('ModelPowSpctraForFOOOF.mat')
# Unpack data from dictionary, and squeeze numpy arrays
freqs = np.squeeze(data['fx']).astype('float')
psds = | np.squeeze(data['avgpwr']) | numpy.squeeze |
from operator import itemgetter
import math
pil_available = True
try:
from PIL import Image
except:
pil_available = False
import random
import numpy as np
from dezero import cuda
class Dataset:
def __getitem__(self, index):
raise NotImplementedError
def __len__(self):
raise NotImplementedError
class TupleDataset:
"""Dataset of tuples from multiple equal-length datasets.
"""
def __init__(self, *datasets):
self._datasets = datasets
self._length = len(datasets[0])
def __getitem__(self, index):
batches = [dataset[index] for dataset in self._datasets]
if isinstance(index, slice):
L = len(batches[0])
return [tuple([batch[i] for batch in batches]) for i in range(L)]
else:
return tuple(batches)
def __len__(self):
return self._length
class DatasetLoader:
def __init__(self, dataset, batch_size, shuffle=True, gpu=False):
self.dataset = dataset
self.batch_size = batch_size
self.shuffle = shuffle
self.data_size = len(dataset)
self.max_iter = math.ceil(self.data_size / batch_size)
self.gpu = gpu
self.reset()
def reset(self):
self.iteration = 0
if self.shuffle:
random.shuffle(self.dataset)
def __iter__(self):
return self
def _get_batch(self):
i = self.iteration % self.max_iter
start_idx = i * self.batch_size
end_idx = (i + 1) * self.batch_size
batch = self.dataset[start_idx:end_idx]
return batch
def __next__(self):
if self.iteration >= self.max_iter:
self.reset()
raise StopIteration
batch = self._get_batch()
xp = cuda.cupy if self.gpu else np
x = xp.array([example[0] for example in batch])
t = xp.array([example[1] for example in batch])
self.iteration += 1
return x, t
def next(self):
return self.__next__()
def to_cpu(self):
self.gpu = False
def to_gpu(self):
self.gpu = True
class SeqDataLoader(DatasetLoader):
def __init__(self, dataset, batch_size, gpu=False):
super().__init__(dataset=dataset, batch_size=batch_size, shuffle=False,
gpu=gpu)
def _get_batch(self):
jump = self.data_size // self.batch_size
offsets = [(i * jump + self.iteration) % self.data_size for i in
range(self.batch_size)]
batch = itemgetter(*offsets)(self.dataset)
return batch
# =============================================================================
# Preprocess function
# =============================================================================
def preprocess_vgg(image, size=(224, 224), dtype=np.float32):
"""VGGで使用する画像に対して前処理を施しndarrayへと変換する
VGGのpre-trainedモデルでは、下記の前処理を行う
- 224x224サイズへのリサイズ
- BGR順にデータを
- すべての画素から固定値を差し引く
- 軸の順番を入れ替える
Parameters
----------
image : PIL.Image or numpy.ndarray
入力画像がndarrayの場合は、その形状は(height, width)、
(hegith, width, channels) もしくは (channels, hegith, width)のいずれか
(そのチャンネルの並びはRGB)
size : None or (int, int)
リサイズする画像サイズ。Noneの場合はリサイズしない
dtype : numpy.dtype
変換後のデータ型
Returns
-------
image : numpy.ndarray
前処理を行ったndarray
"""
if not pil_available:
raise ImportError('PIL cannot be loaded. Install Pillow!')
if isinstance(image, np.ndarray):
if image.ndim == 3:
if image.shape[0] == 1:
image = image[0, :, :]
elif image.shape[0] == 3:
image = image.transpose((1, 2, 0))
image = Image.fromarray(image.astype(np.uint8))
image = image.convert('RGB')
if size:
image = image.resize(size)
image = | np.asarray(image, dtype=dtype) | numpy.asarray |
from typing import Any, Dict, Tuple, Union, Callable, Optional, Sequence
from typing_extensions import Literal
from copy import deepcopy
from types import MappingProxyType
from pathlib import Path
from anndata import AnnData
from cellrank import logging as logg
from cellrank._key import Key
from cellrank.ul._docs import d
from cellrank.tl._utils import save_fig, _unique_order_preserving
from cellrank.ul._utils import _read_graph_data
import numpy as np
import pandas as pd
from scipy.sparse import issparse, spmatrix
from pandas.api.types import is_categorical_dtype
import matplotlib as mpl
import matplotlib.pyplot as plt
from matplotlib import cm
from matplotlib.colors import ListedColormap
from matplotlib.patches import ArrowStyle, FancyArrowPatch
from matplotlib.collections import LineCollection
from mpl_toolkits.axes_grid1 import make_axes_locatable
@d.dedent
def graph(
data: Union[AnnData, np.ndarray, spmatrix],
graph_key: Optional[str] = None,
ixs: Optional[Union[range, np.array]] = None,
layout: Union[str, Dict[str, Any], Callable[..., np.ndarray]] = "umap",
keys: Sequence[
Union[str, Literal["incoming", "outgoing", "self_loops"]]
] = "incoming",
keylocs: Union[str, Sequence[str]] = "uns",
node_size: float = 400,
labels: Optional[Union[Sequence[str], Sequence[Sequence[str]]]] = None,
top_n_edges: Optional[Union[int, Tuple[int, bool, str]]] = None,
self_loops: bool = True,
self_loop_radius_frac: Optional[float] = None,
filter_edges: Optional[Tuple[float, float]] = None,
edge_reductions: Union[Callable, Sequence[Callable]] = np.sum,
edge_reductions_restrict_to_ixs: Optional[Union[range, np.ndarray]] = None,
edge_weight_scale: float = 10,
edge_width_limit: Optional[float] = None,
edge_alpha: float = 1.0,
edge_normalize: bool = False,
edge_use_curved: bool = True,
arrows: bool = True,
font_size: int = 12,
font_color: str = "black",
color_nodes: bool = True,
cat_cmap: ListedColormap = cm.Set3,
cont_cmap: ListedColormap = cm.viridis,
legend_loc: Optional[str] = "best",
title: Optional[Union[str, Sequence[Optional[str]]]] = None,
figsize: Optional[Tuple[float, float]] = None,
dpi: Optional[int] = None,
save: Optional[Union[str, Path]] = None,
layout_kwargs: Dict[str, Any] = MappingProxyType({}),
) -> None:
"""
Plot a graph, visualizing incoming and outgoing edges or self-transitions.
This is a utility function to look in more detail at the transition matrix in areas of interest, e.g. around an
endpoint of development. This function is meant to visualise a small subset of nodes (~100-500) and the most likely
transitions between them. Note that limiting edges visualized using ``top_n_edges`` will speed things up,
as well as reduce the visual clutter.
Parameters
----------
data
The graph data to be plotted.
graph_key
Key in :attr:`anndata.AnnData.obsp` where the graph is stored.
Only used when ``data`` is :class:`anndata.Anndata` object.
ixs
Subset of indices of the graph to visualize.
layout
Layout to use for graph drawing.
- If :class:`str`, search for embedding in :attr:`anndata.AnnData.obsm` ``['X_{layout}']``.
Use ``layout_kwargs={'components': [0, 1]}`` to select components.
- If :class:`dict`, keys should be values in interval `[0, len(ixs))`
and values `(x, y)` pairs corresponding to node positions.
keys
Keys in :attr:`anndata.AnnData.obs`, :attr:`anndata.AnnData.obsm` or :attr:`anndata.AnnData.obsm`
used to color the nodes.
If `'incoming'`, `'outgoing'` or `'self_loops'`, visualize reduction (see ``edge_reductions``)
for each node based on incoming or outgoing edges, respectively.
keylocs
Locations of ``keys``. Can be any attribute of ``data`` if it's :class:`anndata.AnnData` object.
node_size
Size of the nodes.
labels
Labels of the nodes.
top_n_edges
Either top N outgoing edges in descending order or a tuple
``(top_n_edges, in_ascending_order, {'incoming', 'outgoing'})``. If `None`, show all edges.
self_loops
Whether visualize self transitions and also to consider them in ``top_n_edges``.
self_loop_radius_frac
Fraction of a unit circle to visualize self transitions. If `None`, use ``node_size / 1000``.
filter_edges
Whether to remove all edges not in `[min, max]` interval.
edge_reductions
Aggregation function to use when coloring nodes by edge weights.
edge_reductions_restrict_to_ixs
Whether to use the full graph when calculating the ``edge_reductions`` or just use the nodes
marked by the ``ixs`` and this parameter. If `None`, it's the same as ``ixs``.
edge_weight_scale
Number by which to scale the width of the edges. Useful when the weights are small.
edge_width_limit
Upper bound for the width of the edges. Useful when weights are unevenly distributed.
edge_alpha
Alpha channel value for edges and arrows.
edge_normalize
If `True`, normalize edges to `[0, 1]` interval prior to applying any scaling or truncation.
edge_use_curved
If `True`, use curved edges. This can improve visualization at a small performance cost.
arrows
Whether to show the arrows. Setting this to `False` may dramatically speed things up.
font_size
Font size for node labels.
font_color
Label color of the nodes.
color_nodes
Whether to color the nodes
cat_cmap
Categorical colormap used when ``keys`` contain categorical variables.
cont_cmap
Continuous colormap used when ``keys`` contain continuous variables.
legend_loc
Location of the legend.
title
Title of the figure(s), one for each ``key``.
%(plotting)s
layout_kwargs
Keyword arguments for ``layout``.
Returns
-------
%(just_plots)s
"""
import networkx as nx
def plot_arrows(curves, G, pos, ax, edge_weight_scale):
for line, (edge, val) in zip(curves, G.edges.items()):
if edge[0] == edge[1]:
continue
mask = (~np.isnan(line)).all(axis=1)
line = line[mask, :]
if not len(line): # can be all NaNs
continue
line = line.reshape((-1, 2))
X, Y = line[:, 0], line[:, 1]
node_start = pos[edge[0]]
# reverse
if np.where(np.isclose(node_start - line, [0, 0]).all(axis=1))[0][0]:
X, Y = X[::-1], Y[::-1]
mid = len(X) // 2
posA, posB = zip(X[mid : mid + 2], Y[mid : mid + 2])
arrow = FancyArrowPatch(
posA=posA,
posB=posB,
# we clip because too small values
# cause it to crash
arrowstyle=ArrowStyle.CurveFilledB(
head_length=np.clip(
val["weight"] * edge_weight_scale * 4,
_min_edge_weight,
edge_width_limit,
),
head_width=np.clip(
val["weight"] * edge_weight_scale * 2,
_min_edge_weight,
edge_width_limit,
),
),
color="k",
zorder=float("inf"),
alpha=edge_alpha,
linewidth=0,
)
ax.add_artist(arrow)
def normalize_weights():
weights = np.array([v["weight"] for v in G.edges.values()])
minn = np.min(weights)
weights = (weights - minn) / ( | np.max(weights) | numpy.max |
"""Interfaces to modified Helmholtz operators."""
from bempp.api.operators.boundary import common as _common
import numpy as _np
def single_layer(
domain,
range_,
dual_to_range,
omega,
parameters=None,
assembler="default_nonlocal",
device_interface=None,
precision=None,
):
"""Assemble the Helmholtz single-layer boundary operator."""
if | _np.imag(omega) | numpy.imag |
import unittest
from os.path import abspath, dirname, join, isfile, normpath, relpath
import os
import numpy as np
import pandas as pd
import matplotlib.pylab as plt
import mhkit.river as river
import netCDF4
from numpy.testing import assert_array_almost_equal
from pandas.testing import assert_frame_equal
import scipy.interpolate as interp
testdir = dirname(abspath(__file__))
datadir = normpath(join(testdir,'..','..','examples','data','river'))
class TestPerformance(unittest.TestCase):
@classmethod
def setUpClass(self):
self.diameter = 1
self.height = 2
self.width = 3
self.diameters = [1,2,3,4]
@classmethod
def tearDownClass(self):
pass
def test_circular(self):
eq, ca = river.performance.circular(self.diameter)
self.assertEqual(eq, self.diameter)
self.assertEqual(ca, 4*np.pi*self.diameter**2.)
def test_ducted(self):
eq, ca =river.performance.ducted(self.diameter)
self.assertEqual(eq, self.diameter)
self.assertEqual(ca, 4*np.pi*self.diameter**2.)
def test_rectangular(self):
eq, ca = river.performance.rectangular(self.height, self.width)
self.assertAlmostEqual(eq, 2.76, places=2)
self.assertAlmostEqual(ca, self.height*self.width, places=2)
def test_multiple_circular(self):
eq, ca = river.performance.multiple_circular(self.diameters)
self.assertAlmostEqual(eq, 5.48, places=2)
self.assertAlmostEqual(ca, 23.56, places=2)
def test_tip_speed_ratio(self):
rotor_speed = [15,16,17,18] # create array of rotor speeds
rotor_diameter = 77 # diameter of rotor for GE 1.5
inflow_speed = [13,13,13,13] # array of wind speeds
TSR_answer = [4.7,5.0,5.3,5.6]
TSR = river.performance.tip_speed_ratio(np.asarray(rotor_speed)/60,rotor_diameter,inflow_speed)
for i,j in zip(TSR,TSR_answer):
self.assertAlmostEqual(i,j,delta=0.05)
def test_power_coefficient(self):
# data obtained from power performance report of wind turbine
inflow_speed = [4,6,8,10,12,14,16,18,20]
power_out = np.asarray([59,304,742,1200,1400,1482,1497,1497,1511])
capture_area = 4656.63
rho = 1.225
Cp_answer = [0.320,0.493,0.508,0.421,0.284,0.189,0.128,0.090,0.066]
Cp = river.performance.power_coefficient(power_out*1000,inflow_speed,capture_area,rho)
for i,j in zip(Cp,Cp_answer):
self.assertAlmostEqual(i,j,places=2)
class TestResource(unittest.TestCase):
@classmethod
def setUpClass(self):
self.data = pd.read_csv(join(datadir, 'tanana_discharge_data.csv'), index_col=0,
parse_dates=True)
self.data.columns = ['Q']
self.results = pd.read_csv(join(datadir, 'tanana_test_results.csv'), index_col=0,
parse_dates=True)
@classmethod
def tearDownClass(self):
pass
def test_Froude_number(self):
v = 2
h = 5
Fr = river.resource.Froude_number(v, h)
self.assertAlmostEqual(Fr, 0.286, places=3)
def test_exceedance_probability(self):
# Create arbitrary discharge between 0 and 8(N=9)
Q = pd.Series(np.arange(9))
# Rank order for non-repeating elements simply adds 1 to each element
#if N=9, max F = 100((max(Q)+1)/10) = 90%
#if N=9, min F = 100((min(Q)+1)/10) = 10%
f = river.resource.exceedance_probability(Q)
self.assertEqual(f.min().values , 10. )
self.assertEqual(f.max().values , 90. )
def test_polynomial_fit(self):
# Calculate a first order polynomial on an x=y line
p, r2 = river.resource.polynomial_fit(np.arange(8), np.arange(8),1)
# intercept should be 0
self.assertAlmostEqual(p[0], 0.0, places=2 )
# slope should be 1
self.assertAlmostEqual(p[1], 1.0, places=2 )
# r-squared should be perfect
self.assertAlmostEqual(r2, 1.0, places=2 )
def test_discharge_to_velocity(self):
# Create arbitrary discharge between 0 and 8(N=9)
Q = pd.Series(np.arange(9))
# Calculate a first order polynomial on an DV_Curve x=y line 10 times greater than the Q values
p, r2 = river.resource.polynomial_fit(np.arange(9), 10*np.arange(9),1)
# Becuase the polynomial line fits perfect we should expect the V to equal 10*Q
V = river.resource.discharge_to_velocity(Q, p)
self.assertAlmostEqual(np.sum(10*Q - V['V']), 0.00, places=2 )
def test_velocity_to_power(self):
# Calculate a first order polynomial on an DV_Curve x=y line 10 times greater than the Q values
p, r2 = river.resource.polynomial_fit(np.arange(9), 10*np.arange(9),1)
# Becuase the polynomial line fits perfect we should expect the V to equal 10*Q
V = river.resource.discharge_to_velocity(pd.Series(np.arange(9)), p)
# Calculate a first order polynomial on an VP_Curve x=y line 10 times greater than the V values
p2, r22 = river.resource.polynomial_fit(np.arange(9), 10*np.arange(9),1)
# Set cut in/out to exclude 1 bin on either end of V range
cut_in = V['V'][1]
cut_out = V['V'].iloc[-2]
# Power should be 10x greater and exclude the ends of V
P = river.resource.velocity_to_power(V['V'], p2, cut_in, cut_out)
#Cut in power zero
self.assertAlmostEqual(P['P'][0], 0.00, places=2 )
#Cut out power zero
self.assertAlmostEqual(P['P'].iloc[-1], 0.00, places=2 )
# Middle 10x greater than velocity
self.assertAlmostEqual((P['P'][1:-1] - 10*V['V'][1:-1] ).sum(), 0.00, places=2 )
def test_energy_produced(self):
# If power is always X then energy produced with be x*seconds
X=1
seconds=1
P = pd.Series(X*np.ones(10) )
EP = river.resource.energy_produced(P, seconds)
self.assertAlmostEqual(EP, X*seconds, places=1 )
# for a normal distribution of Power EP = mean *seconds
mu=5
sigma=1
power_dist = pd.Series(np.random.normal(mu, sigma, 10000))
EP2 = river.resource.energy_produced(power_dist, seconds)
# import ipdb; ipdb.set_trace()
self.assertAlmostEqual(EP2, mu*seconds, places=1 )
def test_plot_flow_duration_curve(self):
filename = abspath(join(testdir, 'river_plot_flow_duration_curve.png'))
if isfile(filename):
os.remove(filename)
f = river.resource.exceedance_probability(self.data.Q)
plt.figure()
river.graphics.plot_flow_duration_curve(self.data['Q'], f['F'])
plt.savefig(filename, format='png')
plt.close()
self.assertTrue(isfile(filename))
def test_plot_power_duration_curve(self):
filename = abspath(join(testdir, 'river_plot_power_duration_curve.png'))
if isfile(filename):
os.remove(filename)
f = river.resource.exceedance_probability(self.data.Q)
plt.figure()
river.graphics.plot_flow_duration_curve(self.results['P_control'], f['F'])
plt.savefig(filename, format='png')
plt.close()
self.assertTrue(isfile(filename))
def test_plot_velocity_duration_curve(self):
filename = abspath(join(testdir, 'river_plot_velocity_duration_curve.png'))
if isfile(filename):
os.remove(filename)
f = river.resource.exceedance_probability(self.data.Q)
plt.figure()
river.graphics.plot_velocity_duration_curve(self.results['V_control'], f['F'])
plt.savefig(filename, format='png')
plt.close()
self.assertTrue(isfile(filename))
def test_plot_discharge_timeseries(self):
filename = abspath(join(testdir, 'river_plot_discharge_timeseries.png'))
if isfile(filename):
os.remove(filename)
plt.figure()
river.graphics.plot_discharge_timeseries(self.data['Q'])
plt.savefig(filename, format='png')
plt.close()
self.assertTrue(isfile(filename))
def test_plot_discharge_vs_velocity(self):
filename = abspath(join(testdir, 'river_plot_discharge_vs_velocity.png'))
if isfile(filename):
os.remove(filename)
plt.figure()
river.graphics.plot_discharge_vs_velocity(self.data['Q'], self.results['V_control'])
plt.savefig(filename, format='png')
plt.close()
self.assertTrue(isfile(filename))
def test_plot_velocity_vs_power(self):
filename = abspath(join(testdir, 'river_plot_velocity_vs_power.png'))
if isfile(filename):
os.remove(filename)
plt.figure()
river.graphics.plot_velocity_vs_power(self.results['V_control'], self.results['P_control'])
plt.savefig(filename, format='png')
plt.close()
self.assertTrue(isfile(filename))
class TestIO(unittest.TestCase):
@classmethod
def setUpClass(self):
d3ddatadir = normpath(join(datadir,'d3d'))
filename= 'turbineTest_map.nc'
self.d3d_flume_data = netCDF4.Dataset(join(d3ddatadir,filename))
@classmethod
def tearDownClass(self):
pass
def test_load_usgs_data_instantaneous(self):
file_name = join(datadir, 'USGS_08313000_Jan2019_instantaneous.json')
data = river.io.usgs.read_usgs_file(file_name)
self.assertEqual(data.columns, ['Discharge, cubic feet per second'])
self.assertEqual(data.shape, (2972, 1)) # 4 data points are missing
def test_load_usgs_data_daily(self):
file_name = join(datadir, 'USGS_08313000_Jan2019_daily.json')
data = river.io.usgs.read_usgs_file(file_name)
expected_index = pd.date_range('2019-01-01', '2019-01-31', freq='D')
self.assertEqual(data.columns, ['Discharge, cubic feet per second'])
self.assertEqual((data.index == expected_index.tz_localize('UTC')).all(), True)
self.assertEqual(data.shape, (31, 1))
def test_request_usgs_data_daily(self):
data=river.io.usgs.request_usgs_data(station="15515500",
parameter='00060',
start_date='2009-08-01',
end_date='2009-08-10',
data_type='Daily')
self.assertEqual(data.columns, ['Discharge, cubic feet per second'])
self.assertEqual(data.shape, (10, 1))
def test_request_usgs_data_instant(self):
data=river.io.usgs.request_usgs_data(station="15515500",
parameter='00060',
start_date='2009-08-01',
end_date='2009-08-10',
data_type='Instantaneous')
self.assertEqual(data.columns, ['Discharge, cubic feet per second'])
# Every 15 minutes or 4 times per hour
self.assertEqual(data.shape, (10*24*4, 1))
def test_layer_data(self):
data=self.d3d_flume_data
variable= 'ucx'
layer=2
time_index= 3
layer_data= river.io.d3d.get_layer_data(data, variable, layer, time_index)
layer_compare = 2
time_index_compare= 4
layer_data_expected= river.io.d3d.get_layer_data(data,
variable, layer_compare,
time_index_compare)
assert_array_almost_equal(layer_data.x,layer_data_expected.x, decimal = 2)
assert_array_almost_equal(layer_data.y,layer_data_expected.y, decimal = 2)
assert_array_almost_equal(layer_data.v,layer_data_expected.v, decimal= 2)
def test_create_points(self):
x=np.linspace(1, 3, num= 3)
y=np.linspace(1, 3, num= 3)
z=1
points= river.io.d3d.create_points(x,y,z)
x=[1,2,3,1,2,3,1,2,3]
y=[1,1,1,2,2,2,3,3,3]
z=[1,1,1,1,1,1,1,1,1]
points_array= np.array([ [x_i, y_i, z_i] for x_i, y_i, z_i in zip(x, y, z)])
points_expected= pd.DataFrame(points_array, columns=('x','y','z'))
assert_array_almost_equal(points, points_expected,decimal = 2)
def test_get_all_data_points(self):
data=self.d3d_flume_data
variable= 'ucx'
time_step= 3
output = river.io.d3d.get_all_data_points(data, variable, time_step)
size_output = np.size(output)
time_step_compair=4
output_expected= river.io.d3d.get_all_data_points(data, variable, time_step_compair)
size_output_expected= np.size(output_expected)
self.assertEqual(size_output, size_output_expected)
def test_unorm(self):
x=np.linspace(1, 3, num= 3)
y=np.linspace(1, 3, num= 3)
z=np.linspace(1, 3, num= 3)
unorm = river.io.d3d.unorm(x,y,z)
unorm_expected= [np.sqrt(1**2+1**2+1**2),np.sqrt(2**2+2**2+2**2), np.sqrt(3**2+3**2+3**2)]
assert_array_almost_equal(unorm, unorm_expected, decimal = 2)
def test_turbulent_intensity(self):
data=self.d3d_flume_data
time_step= -1
x_test=np.linspace(1, 17, num= 10)
y_test=np.linspace(3, 3, num= 10)
z_test=np.linspace(1, 1, num= 10)
test_points = np.array([ [x, y, z] for x, y, z in zip(x_test, y_test, z_test)])
points= pd.DataFrame(test_points, columns=['x','y','z'])
TI= river.io.d3d.turbulent_intensity(data, points, time_step)
TI_vars= ['turkin1', 'ucx', 'ucy', 'ucz']
TI_data_raw = {}
for var in TI_vars:
#get all data
var_data_df = river.io.d3d.get_all_data_points(data, var,time_step)
TI_data_raw[var] = var_data_df
TI_data= points.copy(deep=True)
for var in TI_vars:
TI_data[var] = interp.griddata(TI_data_raw[var][['x','y','z']],
TI_data_raw[var][var], points[['x','y','z']])
u_mag=river.io.d3d.unorm(TI_data['ucx'],TI_data['ucy'], TI_data['ucz'])
turbulent_intensity_expected= | np.sqrt(2/3*TI_data['turkin1']) | numpy.sqrt |
import numpy as np
from PIL import Image
import torchvision
import torch
class TransformTwice:
def __init__(self, transform):
self.transform = transform
def __call__(self, inp):
out1 = self.transform(inp)
out2 = self.transform(inp)
return out1, out2
def get_svhn(root, n_labeled,
transform_train=None, transform_val=None,
download=True):
base_dataset = torchvision.datasets.SVHN(root, split='train', download=download)
test_dataset = torchvision.datasets.SVHN(root, split='test', download=download)
train_labeled_idxs, train_unlabeled_idxs, val_idxs = train_val_split(test_dataset.labels, int(n_labeled/10))
train_labeled_idxs2, train_unlabeled_idxs2, val_idxs2 = train_val_split(
base_dataset.labels, int(n_labeled / 10), False)
train_labeled_dataset = SVHN_labeledmod(root, train_labeled_idxs, train=False, transform=transform_train)
train_unlabeled_dataset = SVHN_unlabeled(root, train_unlabeled_idxs, train=False, transform=TransformTwice(transform_train))
val_dataset = SVHN_labeled(root, val_idxs2, train=True, transform=transform_val, download=True)
test_dataset = SVHN_labeled(root, train=False, transform=transform_val, download=True)
#print("Test shape", train_labeled_dataset[0][0].shape)
print (f"#Labeled: {len(train_labeled_idxs)} #Unlabeled: {len(train_unlabeled_idxs)} #Val: {len(val_idxs)}")
return train_labeled_dataset, train_unlabeled_dataset, val_dataset, test_dataset, val_idxs
def train_val_split(labels, n_labeled_per_class, write=True):
labels = np.array(labels)
train_labeled_idxs = []
train_unlabeled_idxs = []
val_idxs = []
ent = 0
gap = 0
temp1 = np.load("svhnent.npy")
temp2 = np.load("svhngap.npy")
#pknn = 0
total = n_labeled_per_class*10
# To get an equal number of samples per class.
# for i in range(10):
# idxs = np.where(labels == i)[0]
# np.random.shuffle(idxs)
# train_labeled_idxs.extend(idxs[:n_labeled_per_class])
# train_unlabeled_idxs.extend(idxs[n_labeled_per_class:-500])
# val_idxs.extend(idxs[-500:])
# Random selection for point:
n_labeled = n_labeled_per_class * 10
idxs = np.where(labels < 10)[0] # All points
np.random.shuffle(idxs)
train_labeled_idxs.extend(idxs[:n_labeled])
train_unlabeled_idxs.extend(idxs[n_labeled: -1000]) # was 500
val_idxs.extend(idxs[-1000:])
if write == True:
for i in train_labeled_idxs:
ent += temp1[i]
gap += temp2[i]
file = f"svhn@{total}new/stats.txt"
f = open(file, "w")
f.write("Entropy: " + str(ent) + "\n")
f.write("Gap: " + str(gap) + "\n")
f.close()
np.random.shuffle(train_labeled_idxs)
np.random.shuffle(train_unlabeled_idxs)
np.random.shuffle(val_idxs)
return train_labeled_idxs, train_unlabeled_idxs, val_idxs
svhn_mean = (0.430, 0.428, 0.443) # equals np.mean(train_set.train_data, axis=(0,1,2))/255
svhn_std = (0.196, 0.198, 0.199) # equals np.std(train_set.train_data, axis=(0,1,2))/255
def normalise(x, mean=svhn_mean, std=svhn_std):
x, mean, std = [ | np.array(a, np.float32) | numpy.array |
# Copyright 2020 Google LLC
#
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for qrecurrent.py"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import os
import copy
import json
import numpy as np
import tensorflow as tf
from numpy.testing import assert_allclose
import pytest
import tempfile
from tensorflow.keras import backend as K
from tensorflow.keras.layers import Activation
from tensorflow.keras.layers import Flatten
from tensorflow.keras.layers import Input
from tensorflow.keras.models import Model
from tensorflow.keras.backend import clear_session
from tensorflow.keras.layers import SimpleRNN
from tensorflow.keras.layers import LSTM
from tensorflow.keras.layers import GRU
from tensorflow.keras.layers import Dense
from tensorflow.keras.layers import Bidirectional
from tensorflow.keras.models import Sequential
from qkeras import QActivation
from qkeras import QSimpleRNN
from qkeras import QLSTM
from qkeras import QGRU
from qkeras import QBidirectional
from qkeras import QDense
from qkeras import quantized_bits
from qkeras import quantized_tanh
from qkeras.utils import model_save_quantized_weights
from qkeras.utils import quantized_model_from_json
from qkeras.utils import load_qmodel
from qkeras.utils import model_quantize
@pytest.mark.parametrize(
'rnn, all_weights_signature, expected_output',
[
(
QSimpleRNN,
np.array([5.109375, -1.8828125, 0.0, -0.5, 0.0],
dtype=np.float32),
np.array(
[[0.2812 , 0.4949 , 0.10254 , 0.1215 ],
[0.1874 , 0.6055 , 0.09 , 0.1173 ],
[0.3965 , 0.4778 , 0.02974 , 0.0962 ],
[0.4158 , 0.5005 , 0.0172 , 0.06665 ],
[0.3367 , 0.537 , 0.02444 , 0.1018 ],
[0.2125 , 0.584 , 0.03937 , 0.164 ],
[0.2368 , 0.639 , 0.04245 , 0.0815 ],
[0.4468 , 0.4436 , 0.01942 , 0.0902 ],
[0.622 , 0.257 , 0.03293 , 0.0878 ],
[0.4814 , 0.3923 , 0.011215, 0.11505 ]], dtype=np.float16)
),
(
QLSTM,
np.array([3.7421875, 2.1328125, 15.875, -0.5, 0.0],
dtype=np.float32),
np.array(
[[0.265 , 0.1775, 0.319 , 0.2384],
[0.2896, 0.2417, 0.2563, 0.2124],
[0.309 , 0.193 , 0.2734, 0.2246],
[0.322 , 0.17 , 0.2668, 0.2412],
[0.267 , 0.174 , 0.301 , 0.2578],
[0.311 , 0.1774, 0.2566, 0.255 ],
[0.2854, 0.174 , 0.2927, 0.248 ],
[0.2668, 0.2268, 0.2585, 0.2479],
[0.2795, 0.2113, 0.2659, 0.2434],
[0.275 , 0.2333, 0.2505, 0.2415]], dtype=np.float16)
),
(
QGRU,
np.array([4.6875, 4.3984375, 0.0, -0.5, 0.0],
dtype=np.float32),
np.array(
[[0.203 , 0.3547, 0.2854, 0.1567],
[0.294 , 0.334 , 0.1985, 0.1736],
[0.2096, 0.4392, 0.1812, 0.1702],
[0.1974, 0.4927, 0.1506, 0.1593],
[0.1582, 0.4788, 0.1968, 0.1661],
[0.2028, 0.4421, 0.1678, 0.1871],
[0.1583, 0.5464, 0.1705, 0.125 ],
[0.1956, 0.4407, 0.1703, 0.1935],
[0.1638, 0.511 , 0.1725, 0.1527],
[0.2074, 0.3862, 0.208 , 0.1982]], dtype=np.float16)
)
])
def test_qrnn(rnn, all_weights_signature, expected_output):
K.set_learning_phase(0)
np.random.seed(22)
tf.random.set_seed(22)
x = x_in = Input((2, 4), name='input')
x = rnn(
16,
activation=quantized_tanh(bits=8),
kernel_quantizer=quantized_bits(8, 0, 1, alpha=1.0),
recurrent_quantizer=quantized_bits(8, 0, 1, alpha=1.0),
bias_quantizer=quantized_bits(8, 0, 1, alpha=1.0),
name='qrnn_0')(
x)
x = QDense(
4,
kernel_quantizer=quantized_bits(6, 2, 1, alpha=1.0),
bias_quantizer=quantized_bits(4, 0, 1),
name='dense')(
x)
x = Activation('softmax', name='softmax')(x)
model = Model(inputs=[x_in], outputs=[x])
# reload the model to ensure saving/loading works
json_string = model.to_json()
clear_session()
model = quantized_model_from_json(json_string)
# Save the model as an h5 file using Keras's model.save()
fd, fname = tempfile.mkstemp('.h5')
model.save(fname)
del model # Delete the existing model
# Return a compiled model identical to the previous one
model = load_qmodel(fname)
# Clean the created h5 file after loading the model
os.close(fd)
os.remove(fname)
# apply quantizer to weights
model_save_quantized_weights(model)
all_weights = []
for layer in model.layers:
for i, weights in enumerate(layer.get_weights()):
w = np.sum(weights)
all_weights.append(w)
all_weights = np.array(all_weights)
assert all_weights.size == all_weights_signature.size
assert np.all(all_weights == all_weights_signature)
# test forward:
inputs = 2 * np.random.rand(10, 2, 4)
actual_output = model.predict(inputs).astype(np.float16)
assert_allclose(actual_output, expected_output, rtol=1e-4)
@pytest.mark.parametrize(
'rnn, all_weights_signature, expected_output',
[
(
QSimpleRNN,
np.array([
-2.6562500e+00, -4.3466797e+00, 8.6736174e-19, 6.2548828e-01,
-6.0751953e+00, 8.6736174e-19, -7.5000000e-01, 0.0], dtype=np.float32),
np.array([
[0.0851 , 0.1288 , 0.586 , 0.2002 ],
[0.1044 , 0.1643 , 0.7217 , 0.00978],
[0.04135, 0.0537 , 0.8706 , 0.03455],
[0.03354, 0.0489 , 0.889 , 0.02852],
[0.04358, 0.05246, 0.7563 , 0.1478 ],
[0.03403, 0.0743 , 0.4177 , 0.4739 ],
[0.0859 , 0.1567 , 0.3972 , 0.36 ],
[0.27 , 0.1945 , 0.4841 , 0.05124],
[0.12115, 0.05722, 0.728 , 0.0938 ],
[0.2864 , 0.1262 , 0.339 , 0.2484 ]], dtype=np.float16)
),
(
QLSTM,
np.array([
-4.1406555, 3.2921143, 16. , 7.0236816, 4.1237793,
16. , -0.75 , 0. ], dtype=np.float32),
np.array([
[0.3066, 0.2026, 0.2335, 0.2573],
[0.1796, 0.283 , 0.27 , 0.2673],
[0.1702, 0.2144, 0.308 , 0.3074],
[0.2216, 0.2153, 0.286 , 0.277 ],
[0.3533, 0.1725, 0.2322, 0.2421],
[0.2764, 0.2153, 0.227 , 0.2812],
[0.2786, 0.1711, 0.2861, 0.2642],
[0.2493, 0.1882, 0.3098, 0.2527],
[0.1926, 0.1779, 0.3137, 0.316 ],
[0.263 , 0.1783, 0.3086, 0.2502]], dtype=np.float16)
),
(
QGRU,
np.array([
-6.7578125e-01, 3.6837769e-01, 2.6020852e-18, 4.1682129e+00,
-7.5769043e-01, 2.6020852e-18, -7.5000000e-01, 0.0], dtype=np.float32),
np.array([
[0.2764, 0.1531, 0.3047, 0.2659],
[0.2012, 0.1885, 0.3638, 0.2466],
[0.2024, 0.1703, 0.3718, 0.2554],
[0.2451, 0.1581, 0.294 , 0.3027],
[0.3987, 0.117 , 0.2343, 0.25 ],
[0.2834, 0.1829, 0.2734, 0.2603],
[0.2905, 0.1345, 0.3003, 0.2747],
[0.2954, 0.1481, 0.2744, 0.2822],
[0.2336, 0.1282, 0.334 , 0.3042],
[0.2396, 0.1595, 0.3093, 0.2915]], dtype=np.float16)
)
])
def test_qbidirectional(rnn, all_weights_signature, expected_output):
K.set_learning_phase(0)
| np.random.seed(22) | numpy.random.seed |
# -*- coding: utf-8 -*-
"""
Authors
-------
<NAME> <<EMAIL>>
About
-----
Functions to handle command-line input
Known Issues
------------
None
"""
# General imports
import os
import sys
import time
from functools import partial
import shutil
if os.path.exists(os.path.join(os.getcwd(), 'config')): # You're 1 up from config?
sys.path.insert(0, os.path.join(os.getcwd(), 'config'))
else: # You're working from a directory parallel with config?
sys.path.insert(0, os.path.abspath(os.path.join(os.getcwd(), '../config')))
import pickle
# Tractor imports
from tractor import NCircularGaussianPSF, PixelizedPSF, Image, Tractor, FluxesPhotoCal, NullWCS, ConstantSky, EllipseESoft, Fluxes, PixPos
from tractor.galaxy import ExpGalaxy, DevGalaxy, FixedCompositeGalaxy, SoftenedFracDev, GalaxyShape
from tractor.sersic import SersicIndex, SersicGalaxy
from tractor.sercore import SersicCoreGalaxy
from tractor.pointsource import PointSource
from tractor.psfex import PixelizedPsfEx, PsfExModel
from tractor.psf import HybridPixelizedPSF
# Miscellaneous science imports
from astropy.io import fits, ascii
from astropy.table import Table, Column, vstack, join
from astropy.wcs import WCS
import astropy.units as u
import numpy as np
from functools import partial
import matplotlib.pyplot as plt
import weakref
from scipy import stats
import pathos as pa
from astropy.coordinates import SkyCoord
# import sfdmap
# Local imports
from .brick import Brick
from .mosaic import Mosaic
from .utils import header_from_dict, SimpleGalaxy
from .visualization import plot_background, plot_blob, plot_blobmap, plot_brick, plot_mask
try:
import config as conf
except:
raise RuntimeError('Cannot find configuration file!')
# m = sfdmap.SFDMap(conf.SFDMAP_DIR)
# Make sure no interactive plotting is going on.
plt.ioff()
import warnings
warnings.filterwarnings("ignore")
print(
f"""
====================================================================
________ _ _______ ____ ____ ________ _______
|_ __ | / \ |_ __ \ |_ \ / _||_ __ ||_ __ \
| |_ \_| / _ \ | |__) | | \/ | | |_ \_| | |__) |
| _| / ___ \ | __ / | |\ /| | | _| _ | __ /
_| |_ _/ / \ \_ _| | \ \_ _| |_\/_| |_ _| |__/ | _| | \ \_
|_____||____| |____||____| |___||_____||_____||________||____| |___|
--------------------------------------------------------------------
M O D E L P H O T O M E T R Y W I T H T H E T R A C T O R
--------------------------------------------------------------------
(C) 2020 -- <NAME> (DAWN, University of Copenhagen)
====================================================================
CONSOLE_LOGGING_LEVEL ..... {conf.CONSOLE_LOGGING_LEVEL}
LOGFILE_LOGGING_LEVEL ..... {conf.LOGFILE_LOGGING_LEVEL}
PLOT ...................... {conf.PLOT}
NTHREADS .................. {conf.NTHREADS}
OVERWRITE ................. {conf.OVERWRITE}
"""
)
print('Starting up logging system...')
# Start the logging
import logging.config
logger = logging.getLogger('farmer')
if not len(logger.handlers):
if conf.LOGFILE_LOGGING_LEVEL is not None:
logging_level = logging.getLevelName(conf.LOGFILE_LOGGING_LEVEL)
else:
logging_level = logging.DEBUG
logger.setLevel(logging_level)
formatter = logging.Formatter('[%(asctime)s] %(name)s :: %(levelname)s - %(message)s', '%H:%M:%S')
# Logging to the console at logging level
ch = logging.StreamHandler()
ch.setLevel(logging.getLevelName(conf.CONSOLE_LOGGING_LEVEL))
ch.setFormatter(formatter)
logger.addHandler(ch)
if (conf.LOGFILE_LOGGING_LEVEL is None) | (not os.path.exists(conf.LOGGING_DIR)):
print('Logging information wills stream only to console.\n')
else:
# create file handler which logs even debug messages
logging_path = os.path.join(conf.LOGGING_DIR, 'logfile.log')
print(f'Logging information will stream to console and {logging_path}\n')
# If overwrite is on, remove old logger
if conf.OVERWRITE & os.path.exists(logging_path):
print('WARNING -- Existing logfile will be overwritten.')
os.remove(logging_path)
fh = logging.FileHandler(logging_path)
fh.setLevel(logging.getLevelName(conf.LOGFILE_LOGGING_LEVEL))
fh.setFormatter(formatter)
logger.addHandler(fh)
# When a user invokes the interface, first check the translation file
# Optionally, tell the user.
# Try to import the translate file from it's usual spot first.
try:
from translate import translate
logger.info(f'interface.translation :: Imported translate file with {len(translate.keys())} entries.')
if len(conf.BANDS) != len(translate.keys()):
logger.warning(f'Configuration file only includes {len(conf.BANDS)} entries!')
# I have nicknames in the config, I need the raw names for file I/O
mask = np.ones_like(conf.BANDS, dtype=bool)
for i, band in enumerate(conf.BANDS):
if band not in translate.keys():
logger.warning(f'Cound not find {band} in translate file!')
mask[i] = False
# Re-assign bands and rawbands in config object
logger.debug(f'Assigning nicknames to raw image names:')
conf.BANDS = list(np.array(conf.BANDS)[mask])
conf.RAWBANDS = conf.BANDS.copy()
for i, band in enumerate(conf.RAWBANDS):
conf.RAWBANDS[i] = translate[band]
logger.debug(f' {i+1} :: {conf.RAWBANDS[i]} --> {conf.BANDS[i]}')
# The translation file could not be found, so make a scene.
except:
logger.warning('interface.translation :: WARNING - Could not import translate file! Will use config instead.')
logger.info('interface.translation :: Image names must be < 50 characters (FITS standard) - checking...')
# I have raw names, I need shortened raw names (i.e. nicknames)
conf.RAWBANDS = conf.BANDS.copy()
count_short = 0
for i, band in enumerate(conf.RAWBANDS):
if len(band) > 50:
conf.BANDS[i] = band[:50]
logger.debug(f' {i+1} :: {band} --> {conf.BANDS[i]}')
count_short += 1
logger.info(f'interface.translation :: Done checking. Shortened {count_short} image names.')
def make_directories():
"""Uses the existing config file to set up the directories. Must call from config.py directory!
"""
import pathlib
logger.info('Making directories!')
dir_dict = {'IMAGE_DIR': conf.IMAGE_DIR,
'PSF_DIR': conf.PSF_DIR,
'BRICK_DIR': conf.BRICK_DIR,
'INTERIM_DIR': conf.INTERIM_DIR,
'PLOT_DIR': conf.PLOT_DIR,
'CATALOG_DIR': conf.CATALOG_DIR,
'LOGGING_DIR': conf.LOGGING_DIR
}
for key in dir_dict.keys():
path = dir_dict[key]
if os.path.exists(path): # too important to allow overwrite...
logger.warning(f'{key} already exists under {path}!')
for i in dir_dict.keys():
if path == dir_dict[i]:
logger.info(f'{key} was already created for {i}...OK')
break
else:
logger.info(f'{key} --> {path}')
pathlib.Path(path).mkdir(parents=True, exist_ok=True)
def make_psf(image_type=conf.MULTIBAND_NICKNAME, band=None, sextractor_only=False, psfex_only=False, override=conf.OVERWRITE):
""" This is where we automatically construct the PSFs for Farmer.
Step 1. Run sextractor_only=True to obtain the PSF candidates
Step 2. Using the output plot, determine the selection box for the stars
Step 3. Run psfex_only=True to construct the PSF.
See config file to set box dimensions, psf spatial sampling, etc.
"""
# If the user asked to make a PSF for the detection image, tell them we don't do that
if image_type is conf.DETECTION_NICKNAME:
raise ValueError('Farmer does not use a PSF to perform detection!')
# Else if the user asks for a PSF to be made for the modeling band
elif image_type is conf.MODELING_NICKNAME:
# Make the Mosaic
logger.info(f'Making PSF for {conf.MODELING_NICKNAME}')
modmosaic = Mosaic(conf.MODELING_NICKNAME, modeling=True, mag_zeropoint=conf.MODELING_ZPT, skip_build=True)
# Make the PSF
logger.info(f'Mosaic loaded for {conf.MODELING_NICKNAME}')
modmosaic._make_psf(xlims=conf.MOD_REFF_LIMITS, ylims=conf.MOD_VAL_LIMITS, override=override, sextractor_only=sextractor_only, psfex_only=psfex_only)
logger.info(f'PSF made successfully for {conf.MODELING_NICKNAME}')
# Else if the user asks for a PSF in one of the bands
elif image_type is conf.MULTIBAND_NICKNAME:
# Sanity check
if band not in conf.BANDS:
raise ValueError(f'{band} is not a valid band nickname!')
# Use all bands or just one?
if band is not None:
sbands = [band,]
else:
sbands = conf.BANDS
# Loop over bands
for i, band in enumerate(sbands):
# Figure out PS selection box position and zeropoint
idx_band = np.array(conf.BANDS) == band
multi_xlims = np.array(conf.MULTIBAND_REFF_LIMITS)[idx_band][0]
multi_ylims = np.array(conf.MULTIBAND_VAL_LIMITS)[idx_band][0]
mag_zpt = np.array(conf.MULTIBAND_ZPT)[idx_band][0]
# Make the Mosaic
logger.info(f'Making PSF for {band}')
bandmosaic = Mosaic(band, mag_zeropoint = mag_zpt, skip_build=True)
# Make the PSF
logger.info(f'Mosaic loaded for {band}')
bandmosaic._make_psf(xlims=multi_xlims, ylims=multi_ylims, override=override, sextractor_only=sextractor_only, psfex_only=psfex_only)
if not sextractor_only:
logger.info(f'PSF made successfully for {band}')
else:
logger.info(f'interface.make_psf :: SExtraction complete for {band}')
return
def make_bricks(image_type=conf.MULTIBAND_NICKNAME, band=None, brick_id=None, insert=False, skip_psf=True, max_bricks=None, make_new_bricks=False):
""" Stage 1. Here we collect the detection, modelling, and multiband images for processing. We may also cut them up!
NB: PSFs can be automatically made at this stage too, assuming you've determined your PSF selection a priori.
"""
# Make bricks for the detection image
if (image_type==conf.DETECTION_NICKNAME) | (image_type is None):
# Detection
logger.info('Making mosaic for detection...')
detmosaic = Mosaic(conf.DETECTION_NICKNAME, detection=True)
if conf.NTHREADS > 1:
logger.warning('Parallelization of brick making is currently not supported. Continuing anyways...')
# BUGGY DUE TO MEM ALLOC
# logger.info('Making bricks for detection (in parallel)')
# pool = mp.ProcessingPool(processes=conf.NTHREADS)
# pool.map(partial(detmosaic._make_brick, detection=True, overwrite=True), np.arange(0, detmosaic.n_bricks()))
logger.info('Making bricks for detection (in serial)')
for bid in np.arange(1, detmosaic.n_bricks()+1):
detmosaic._make_brick(bid, detection=True, overwrite=True)
# Make bricks for the modeling image
elif (image_type==conf.MODELING_NICKNAME) | (image_type is None):
# Modeling
logger.info('Making mosaic for modeling...')
modmosaic = Mosaic(conf.MODELING_NICKNAME, modeling=True)
# The user wants PSFs on the fly
if not skip_psf:
mod_xlims = np.array(conf.MOD_REFF_LIMITS)
mod_ylims = np.array(conf.MOD_VAL_LIMITS)
modmosaic._make_psf(xlims=mod_xlims, ylims=mod_ylims)
# Make bricks in parallel
if (conf.NTHREADS > 1) & (brick_id is None):
logger.warning('Parallelization of brick making is currently not supported. Continuing anyways...')
# BUGGY DUE TO MEM ALLOC
# if conf.VERBOSE: print('Making bricks for detection (in parallel)')
# pool = mp.ProcessingPool(processes=conf.NTHREADS)
# pool.map(partial(modmosaic._make_brick, detection=True, overwrite=True), np.arange(0, modmosaic.n_bricks()))
# # Make bricks in serial
# else:
if brick_id is not None:
logger.info(f'Making brick #{brick_id} for modeling (in serial)')
modmosaic._make_brick(brick_id, modeling=True, overwrite=True)
else:
logger.info('Making bricks for modeling (in serial)')
if max_bricks is None:
max_bricks = modmosaic.n_bricks()
for bid in np.arange(1, max_bricks+1):
modmosaic._make_brick(bid, modeling=True, overwrite=True)
# Make bricks for one or more multiband images
elif (image_type==conf.MULTIBAND_NICKNAME) | (image_type is None):
# One variable list
if band is not None:
try:
if len(band) > 0:
sbands = band
else:
sbands = conf.BANDS
except:
sbands = [band,]
else:
sbands = conf.BANDS
# In serial, loop over images
for i, sband in enumerate(sbands):
# Assume we can overwrite files unless insertion is explicit
# First image w/o insertion will make new file anyways
if make_new_bricks:
overwrite = True
if insert | (i > 0):
overwrite = False
else:
overwrite=False
# Build the mosaic
logger.info(f'Making mosaic for image {sband}...')
bandmosaic = Mosaic(sband)
# The user wants PSFs made on the fly
if not skip_psf:
idx_band = np.array(conf.BANDS) == sband
multi_xlims = np.array(conf.MULTIBAND_REFF_LIMITS)[idx_band][0]
multi_ylims = np.array(conf.MULTIBAND_VAL_LIMITS)[idx_band][0]
bandmosaic._make_psf(xlims=multi_xlims, ylims=multi_ylims)
# Make bricks in parallel
if (conf.NTHREADS > 1) & (brick_id is None):
logger.warning('Parallelization of brick making is currently not supported. Continuing anyways...')
# logger.info(f'Making bricks for band {sband} (in parallel)')
# with pa.pools.ProcessPool(ncpus=conf.NTHREADS) as pool:
# logger.info(f'Parallel processing pool initalized with {conf.NTHREADS} threads.')
# pool.uimap(partial(bandmosaic._make_brick, detection=False, overwrite=overwrite), np.arange(0, bandmosaic.n_bricks()))
# logger.info('Parallel processing complete.')
# Make bricks in serial
# else:
if brick_id is not None:
logger.info(f'Making brick #{brick_id} for multiband (in serial)')
bandmosaic._make_brick(brick_id, detection=False, overwrite=overwrite)
else:
logger.info(f'Making bricks for band {sband} (in serial)')
if max_bricks is None:
max_bricks = bandmosaic.n_bricks()
for bid in np.arange(1, max_bricks+1):
bandmosaic._make_brick(bid, detection=False, overwrite=overwrite)
# image type is invalid
else:
raise RuntimeError(f'{image_type} is an unrecognized nickname (see {conf.DETECTION_NICKNAME}, {conf.MODELING_NICKNAME}, {conf.MULTIBAND_NICKNAME})')
return
def runblob(blob_id, blobs, modeling=None, catalog=None, plotting=0, source_id=None, source_only=False, blob_only=False):
""" Essentially a private function. Runs each individual blob and handles the bulk of the work. """
# if conf.NTHREADS != 0:
# fh = logging.FileHandler(f'B{blob_id}.log')
# fh.setLevel(logging.getLevelName(conf.LOGFILE_LOGGING_LEVEL))
# formatter = logging.Formatter('[%(asctime)s] %(name)s :: %(levelname)s - %(message)s', '%H:%M:%S')
# fh.setFormatter(formatter)
# logger = pathos.logger(level=logging.getLevelName(conf.LOGFILE_LOGGING_LEVEL), handler=fh)
logger = logging.getLogger(f'farmer.blob.{blob_id}')
logger.info(f'Starting on Blob #{blob_id}')
modblob = None
fblob = None
tstart = time.time()
logger.debug('Making weakref proxies of blobs')
if modeling is None:
modblob, fblob = weakref.proxy(blobs[0]), weakref.proxy(blobs[1])
elif modeling:
modblob = weakref.proxy(blobs)
else:
fblob = weakref.proxy(blobs)
logger.debug(f'Weakref made ({time.time() - tstart:3.3f})s')
# Make blob with modeling image
if modblob is not None:
logger.debug(f'Making blob with {conf.MODELING_NICKNAME}')
modblob.logger = logger
if modblob.rejected:
logger.info('Blob has been rejected!')
# if conf.NTHREADS != 0:
# logger.removeHandler(fh)
catout = modblob.bcatalog.copy()
del modblob
return catout
# If the user wants to just model a specific source...
if source_only & (source_id is not None):
logger.info(f'Preparing to model single source: {source_id}')
sid = modblob.bcatalog['source_id']
modblob.bcatalog = modblob.bcatalog[sid == source_id]
modblob.n_sources = len(modblob.bcatalog)
modblob.mids = np.ones(modblob.n_sources, dtype=int)
modblob.model_catalog = np.zeros(modblob.n_sources, dtype=object)
modblob.solution_catalog = np.zeros(modblob.n_sources, dtype=object)
modblob.solved_chisq = np.zeros(modblob.n_sources)
modblob.solved_bic = np.zeros(modblob.n_sources)
modblob.solution_chisq = np.zeros(modblob.n_sources)
modblob.tr_catalogs = np.zeros((modblob.n_sources, 3, 2), dtype=object)
modblob.chisq = np.zeros((modblob.n_sources, 3, 2))
modblob.rchisq = np.zeros((modblob.n_sources, 3, 2))
modblob.bic = np.zeros((modblob.n_sources, 3, 2))
assert(len(modblob.bcatalog) > 0)
if not blob_only:
if (conf.MODEL_PHOT_MAX_NBLOB > 0) & (modblob.n_sources > conf.MODEL_PHOT_MAX_NBLOB):
logger.info('Number of sources exceeds set limit. Skipping!')
# if conf.NTHREADS != 0:
# logger.removeHandler(fh)
catout = modblob.bcatalog.copy()
catout['x'] += modblob.subvector[1]
catout['y'] += modblob.subvector[0]
del modblob
return catout
# Run models
if conf.ITERATIVE_SUBTRACTION_THRESH is None:
iter_thresh = 1E31
else:
iter_thresh = conf.ITERATIVE_SUBTRACTION_THRESH
if (conf.ITERATIVE_SUBTRACTION_THRESH is not None) & (modblob.n_sources >= iter_thresh):
logger.debug(f'Performing iterative subtraction for {conf.MODELING_NICKNAME}')
astart = time.time()
for i, band in enumerate(modblob.bands):
band_name = band[len(conf.MODELING_NICKNAME)+1:]
zpt = conf.MULTIBAND_ZPT[modblob._band2idx(band_name)]
# sorting order
avg_flux = np.zeros(modblob.n_sources)
for i, item in enumerate(modblob.bcatalog):
rawfluxes = np.array([np.sum(img[modblob.segmap == item['source_id']]) for img in modblob.images])
fluxes = rawfluxes * 10**(-0.4 * (zpt - 23.9))
avg_flux[i] = np.mean(fluxes, 0)
index = np.argsort(avg_flux)[::-1] # sort by brightness
copy_images = modblob.images.copy()
import copy
modblob.solution_model_images = np.zeros_like(modblob.images)
for i, idx in enumerate(index):
logger.debug(f" ({i+1}/{modblob.n_sources}) Attemping to model source #{item['source_id']}")
itemblob = copy.deepcopy(modblob)
itemblob.bcatalog = Table(modblob.bcatalog[idx])
itemblob.n_sources = 1
itemblob.mids = np.ones(itemblob.n_sources, dtype=int)
itemblob.model_catalog = np.zeros(itemblob.n_sources, dtype=object)
itemblob.solution_catalog = np.zeros(itemblob.n_sources, dtype=object)
itemblob.solved_chisq = np.zeros(itemblob.n_sources)
itemblob.solved_bic = np.zeros(itemblob.n_sources)
itemblob.solution_chisq = np.zeros(itemblob.n_sources)
itemblob.tr_catalogs = np.zeros((itemblob.n_sources, 3, 2), dtype=object)
itemblob.chisq = np.zeros((itemblob.n_sources, 3, 2))
itemblob.rchisq = np.zeros((itemblob.n_sources, 3, 2))
itemblob.bic = np.zeros((itemblob.n_sources, 3, 2))
itemblob.images = copy_images
itemblob._is_itemblob = True
logger.debug(f'Staging images for {conf.MODELING_NICKNAME} -- blob #{modblob.blob_id}')
itemblob.stage_images()
logger.debug(f'Images staged. ({time.time() - astart:3.3f})s')
astart = time.time()
logger.debug(f'Modeling images for {conf.MODELING_NICKNAME} -- blob #{modblob.blob_id}')
status = itemblob.tractor_phot()
if status:
logger.debug(f'Morphology determined. ({time.time() - astart:3.3f})s')
logger.debug(f'Transferring results back to parent blob...')
#transfer back
modblob.bcatalog[idx] = itemblob.bcatalog[0]
modblob.solution_model_images += itemblob.solution_model_images
# subtract model from image
copy_images -= itemblob.solution_model_images
else:
logger.warning(f'Morphology failed! ({time.time() - astart:3.3f})s')
# # if conf.NTHREADS != 0:
# # logger.removeHandler(fh)
# catout = modblob.bcatalog.copy()
# catout['x'] += modblob.subvector[1]
# catout['y'] += modblob.subvector[0]
# del modblob
# return catout
else:
astart = time.time()
logger.debug(f'Staging images for {conf.MODELING_NICKNAME}')
modblob.stage_images()
logger.debug(f'Images staged. ({time.time() - astart:3.3f})s')
astart = time.time()
logger.debug(f'Modeling images for {conf.MODELING_NICKNAME}')
status = modblob.tractor_phot()
if not status:
logger.warning(f'Morphology failed! ({time.time() - astart:3.3f})s')
# if conf.NTHREADS != 0:
# logger.removeHandler(fh)
catout = modblob.bcatalog.copy()
catout['x'] += modblob.subvector[1]
catout['y'] += modblob.subvector[0]
del modblob
return catout
logger.debug(f'Morphology determined. ({time.time() - astart:3.3f})s')
# Run follow-up phot
if conf.DO_APPHOT:
for img_type in ('image', 'model', 'isomodel', 'residual', 'weight', 'chisq',):
for band in modblob.bands:
if True: #try:
modblob.aperture_phot(band, img_type, sub_background=conf.SUBTRACT_BACKGROUND)
else:
logger.warning(f'Aperture photmetry FAILED for {band} {img_type}. Likely a bad blob.')
if conf.DO_SEPHOT:
for img_type in ('image', 'model', 'isomodel', 'residual'):
for band in modblob.bands:
try:
modblob.sep_phot(band, img_type, centroid='MODEL', sub_background=conf.SUBTRACT_BACKGROUND)
modblob.sep_phot(band, img_type, centroid='DETECTION', sub_background=conf.SUBTRACT_BACKGROUND)
except:
logger.warning(f'SEP photometry FAILED for {band} {img_type}. Likely a bad blob.')
if conf.DO_SEXPHOT:
for band in modblob.bands:
try:
modblob.residual_phot(band, sub_background=conf.SUBTRACT_BACKGROUND)
except:
logger.warning(f'SEP residual photmetry FAILED. Likely a bad blob.)')
duration = time.time() - tstart
logger.info(f'Solution for Blob #{modblob.blob_id} (N={modblob.n_sources}) arrived at in {duration:3.3f}s ({duration/modblob.n_sources:2.2f}s per src)')
catout = modblob.bcatalog.copy()
del modblob
#################### FORCED PHOTOMETRY ################################
if fblob is not None:
# make new blob with band information
logger.debug(f'Making blob with {conf.MULTIBAND_NICKNAME}')
fblob.logger = logger
if fblob.rejected:
logger.info('Blob has been rejected!')
# if conf.NTHREADS != 0:
# logger.removeHandler(fh)
catout = fblob.bcatalog.copy()
del fblob
return catout
astart = time.time()
status = fblob.stage_images()
if not status:
# if conf.NTHREADS != 0:
# logger.removeHandler(fh)
catout = fblob.bcatalog.copy()
del fblob
return catout
logger.info(f'{len(fblob.bands)} images staged. ({time.time() - astart:3.3f})s')
astart = time.time()
if modblob is not None:
fblob.model_catalog = modblob.solution_catalog.copy()
fblob.position_variance = modblob.position_variance.copy()
fblob.parameter_variance = modblob.parameter_variance.copy()
logger.info(f'Solution parameters transferred. ({time.time() - astart:3.3f})s')
else:
if catalog is None:
raise ValueError('Input catalog not supplied!')
else:
blobmask = np.ones(len(catalog))
if source_id is not None:
# If the user wants to just model a specific source...
logger.info(f'Preparing to force single source: {source_id}')
sid = catalog['source_id']
bid = catalog['blob_id']
fblob.bcatalog = catalog[(sid == source_id) & (bid == blob_id)]
fblob.n_sources = len(fblob.bcatalog)
fblob.mids = np.ones(fblob.n_sources, dtype=int)
fblob.model_catalog = np.zeros(fblob.n_sources, dtype=object)
fblob.solution_catalog = np.zeros(fblob.n_sources, dtype=object)
fblob.solved_chisq = np.zeros(fblob.n_sources)
fblob.solved_bic = np.zeros(fblob.n_sources)
fblob.solution_chisq = np.zeros(fblob.n_sources)
fblob.tr_catalogs = np.zeros((fblob.n_sources, 3, 2), dtype=object)
fblob.chisq = np.zeros((fblob.n_sources, 3, 2))
fblob.rchisq = np.zeros((fblob.n_sources, 3, 2))
fblob.bic = np.zeros((fblob.n_sources, 3, 2))
assert(len(fblob.bcatalog) > 0)
else:
if blob_id is not None:
blobmask = catalog['blob_id'] == blob_id
fblob.bcatalog = catalog[blobmask]
fblob.n_sources = len(fblob.bcatalog)
catalog = catalog[blobmask]
catalog['X_MODEL'] -= fblob.subvector[1] + fblob.mosaic_origin[1] - conf.BRICK_BUFFER + 1
catalog['Y_MODEL'] -= fblob.subvector[0] + fblob.mosaic_origin[0] - conf.BRICK_BUFFER + 1
fblob.model_catalog, good_sources = models_from_catalog(catalog, fblob)
if (good_sources == False).all():
logger.warning('All sources are invalid!')
catalog['X_MODEL'] += fblob.subvector[1] + fblob.mosaic_origin[1] - conf.BRICK_BUFFER + 1
catalog['Y_MODEL'] += fblob.subvector[0] + fblob.mosaic_origin[0] - conf.BRICK_BUFFER + 1
return catalog
fblob.position_variance = None
fblob.parameter_variance = None
fblob.bcatalog = catalog[good_sources]
fblob.n_sources = len(catalog)
if fblob.rejected:
logger.info('Blob has been rejected!')
# if conf.NTHREADS != 0:
# logger.removeHandler(fh)
catout = fblob.bcatalog.copy()
del fblob
return catout
# Forced phot
astart = time.time()
logger.info(f'Starting forced photometry...')
status = fblob.forced_phot()
if not status:
# if conf.NTHREADS != 0:
# logger.removeHandler(fh)
catout = fblob.bcatalog.copy()
del fblob
return catout
logger.info(f'Force photometry complete. ({time.time() - astart:3.3f})s')
# Run follow-up phot
if conf.DO_APPHOT:
for img_type in ('image', 'model', 'isomodel', 'residual', 'weight', 'chisq',):
for band in fblob.bands:
# try:
fblob.aperture_phot(band, img_type, sub_background=conf.SUBTRACT_BACKGROUND)
# except:
# logger.warning(f'Aperture photmetry FAILED for {band} {img_type}. Likely a bad blob.')
if conf.PLOT > 0:
for i, sid in enumerate(fblob.bcatalog['source_id']):
for band in fblob.bands:
fig, ax = plt.subplots()
ax.plot(conf.APER_PHOT, fblob.bcatalog[f'FLUX_APER_{band}_image'][i], c='k', ls='dashed')
ax.plot(conf.APER_PHOT, fblob.bcatalog[f'FLUX_APER_{band}_model'][i], c='b')
ax.plot(conf.APER_PHOT, fblob.bcatalog[f'FLUX_APER_{band}_isomodel'][i], c='g')
ax.plot(conf.APER_PHOT, fblob.bcatalog[f'FLUX_APER_{band}_residual'][i], c='r')
fig.savefig(os.path.join(conf.PLOT_DIR, f'aper_{band}_{sid}.pdf'))
if conf.DO_SEPHOT:
for img_type in ('image', 'model', 'isomodel', 'residual',):
for band in fblob.bands:
try:
fblob.sep_phot(band, img_type, centroid='MODEL', sub_background=conf.SUBTRACT_BACKGROUND)
fblob.sep_phot(band, img_type, centroid='DETECTION', sub_background=conf.SUBTRACT_BACKGROUND)
except:
logger.warning(f'SEP photometry FAILED for {band} {img_type}. Likely a bad blob.')
if conf.DO_SEXPHOT:
for band in fblob.bands:
try:
fblob.residual_phot(band, sub_background=conf.SUBTRACT_BACKGROUND)
except:
logger.warning(f'SEP residual photmetry FAILED. Likely a bad blob.)')
duration = time.time() - tstart
logger.info(f'Solution for blob {fblob.blob_id} (N={fblob.n_sources}) arrived at in {duration:3.3f}s ({duration/fblob.n_sources:2.2f}s per src)')
catout = fblob.bcatalog.copy()
del fblob
# if conf.NTHREADS != 0:
# logger.removeHandler(fh)
return catout
def detect_sources(brick_id, catalog=None, segmap=None, blobmap=None, use_mask=True):
"""Now we can detect stuff and be rid of it!
Parameters
----------
brick_id : [type]
[description]
catalog : [type], optional
[description], by default None
segmap : [type], optional
[description], by default None
blobmap : [type], optional
[description], by default None
catalog : [type], optional
[description], by default None
use_mask : bool, optional
[description], by default True
Returns
-------
[type]
[description]
Raises
------
RuntimeError
[description]
ValueError
[description]
ValueError
[description]
ValueError
[description]
"""
if conf.LOGFILE_LOGGING_LEVEL is not None:
brick_logging_path = os.path.join(conf.LOGGING_DIR, f"B{brick_id}_logfile.log")
logging.info(f'Logging information will be streamed to console and to {brick_logging_path}\n')
# If overwrite is on, remove old logger
if conf.OVERWRITE & os.path.exists(brick_logging_path):
logging.warning('Existing logfile will be overwritten.')
os.remove(brick_logging_path)
# close and remove the old file handler
#fh.close()
#logger.removeHandler(fh)
# we will add an additional file handler to keep track of brick_id specific information
# set up the new file handler
shutil.copy(logging_path, brick_logging_path)
new_fh = logging.FileHandler(brick_logging_path,mode='a')
new_fh.setLevel(logging.getLevelName(conf.LOGFILE_LOGGING_LEVEL))
new_fh.setFormatter(formatter)
logger.addHandler(new_fh)
# Create detection brick
tstart = time.time()
detbrick = stage_brickfiles(brick_id, nickname=conf.DETECTION_NICKNAME, modeling=True, is_detection=True)
if detbrick is None:
return
logger.info(f'Detection brick #{brick_id} created ({time.time() - tstart:3.3f}s)')
# Sextract sources
tstart = time.time()
if (segmap is None) & (catalog is None):
try:
detbrick.sextract(conf.DETECTION_NICKNAME, sub_background=conf.DETECTION_SUBTRACT_BACKGROUND, use_mask=use_mask, incl_apphot=conf.DO_APPHOT)
logger.info(f'Detection brick #{brick_id} sextracted {detbrick.n_sources} objects ({time.time() - tstart:3.3f}s)')
detbrick.is_borrowed = False
except:
raise RuntimeError(f'Detection brick #{brick_id} sextraction FAILED. ({time.time() - tstart:3.3f}s)')
return
# or find existing catalog/segmap info
elif (catalog == 'auto') | ((segmap is not None) & (catalog is not None) & (segmap is not None)):
if (catalog == 'auto'):
search_fn = os.path.join(conf.CATALOG_DIR, f'B{brick_id}.cat')
if os.path.exists(search_fn):
catalog = Table(fits.open(search_fn)[1].data)
else:
raise ValueError(f'No valid catalog was found for {brick_id}')
logger.info(f'Overriding SExtraction with external catalog. ({search_fn})')
search_fn = os.path.join(conf.INTERIM_DIR, f'B{brick_id}_SEGMAPS.fits')
if os.path.exists(search_fn):
hdul_seg = fits.open(search_fn)
segmap = hdul_seg['SEGMAP'].data
blobmap = hdul_seg['BLOBMAP'].data
else:
raise ValueError(f'No valid segmentation map was found for {brick_id}')
if conf.X_COLNAME is not 'x':
if 'x' in catalog.colnames:
if 'x_borrowed' in catalog.colnames:
catalog.remove_column('x_borrowed')
catalog['x'].name = 'x_borrowed'
catalog[conf.X_COLNAME].name = 'x'
if conf.Y_COLNAME is not 'y':
if 'y' in catalog.colnames:
if 'y_borrowed' in catalog.colnames:
catalog.remove_column('y_borrowed')
catalog['y'].name = 'y_borrowed'
catalog[conf.Y_COLNAME].name = 'y'
# catalog['x'] = catalog['x'] - detbrick.mosaic_origin[1] + conf.BRICK_BUFFER - 1
# catalog['y'] = catalog['y'] - detbrick.mosaic_origin[0] + conf.BRICK_BUFFER - 1
detbrick.catalog = catalog
detbrick.n_sources = len(catalog)
detbrick.n_blobs = len(np.unique(catalog['blob_id']))
detbrick.is_borrowed = True
detbrick.segmap = segmap
detbrick.segmask = segmap.copy()
detbrick.segmask[segmap!=0] = 1
detbrick.blobmap = blobmap
else:
raise ValueError('No valid segmap, blobmap, and catalog provided to override SExtraction!')
return
if (~detbrick.is_borrowed):
detbrick.cleanup()
if conf.PLOT > 2:
plot_blobmap(detbrick, image=detbrick.images[0], band=conf.DETECTION_NICKNAME, mode='log')
plot_blobmap(detbrick, image=detbrick.images[0], band=conf.DETECTION_NICKNAME, mode='rms')
logger.info('Saving detection catalog...')
outpath = os.path.join(conf.INTERIM_DIR, f'B{brick_id}_{conf.DETECTION_NICKNAME}.fits')
tstart = time.time()
if os.path.exists(outpath) & (~conf.OVERWRITE):
logger.warning('Catalog file exists and I will not overwrite it!')
else:
detbrick.catalog.write(outpath, overwrite=conf.OVERWRITE)
logger.info(f'Saved to {outpath} ({time.time() - tstart:3.3f}s)')
# Save segmap and blobmaps
# if (~detbrick.is_borrowed):
tstart = time.time()
logger.info('Saving segmentation and blob maps...')
outpath = os.path.join(conf.INTERIM_DIR, f'B{brick_id}_SEGMAPS.fits')
if os.path.exists(outpath) & (~conf.OVERWRITE):
logger.warning('Segmentation file exists and I will not overwrite it!')
else:
hdul = fits.HDUList()
hdul.append(fits.PrimaryHDU())
hdul.append(fits.ImageHDU(data=detbrick.segmap, name='SEGMAP', header=detbrick.wcs.to_header()))
hdul.append(fits.ImageHDU(data=detbrick.blobmap, name='BLOBMAP', header=detbrick.wcs.to_header()))
outpath = os.path.join(conf.INTERIM_DIR, f'B{brick_id}_SEGMAPS.fits')
hdul.writeto(outpath, overwrite=conf.OVERWRITE)
hdul.close()
logger.info(f'Saved to {outpath} ({time.time() - tstart:3.3f}s)')
tstart = time.time()
# else:
# logger.info(f'You gave me a catalog and segmap, so I am not saving it again.')
# filen = open(os.path.join(conf.INTERIM_DIR, f'detbrick_N{brick_id}.pkl'), 'wb')
# dill.dump(detbrick, filen)
return detbrick
def make_models(brick_id, detbrick='auto', band=None, source_id=None, blob_id=None, multiband_model=len(conf.MODELING_BANDS)>1, source_only=False):
""" Stage 2. Detect your sources and determine the best model parameters for them """
if (band is None) & (len(conf.MODELING_BANDS) > 0):
modband = conf.MODELING_BANDS
addName = conf.MULTIBAND_NICKNAME
multiband_model = True
if (type(modband) == str) | (type(modband) == np.str_):
modband = [modband,]
else:
logger.warning(f'Disregarding MODELING_BANDS config parameter. Using {band} for modelling instead!')
if (type(band) == list) | (type(band) == np.ndarray):
multiband_model = True
modband = band
elif (type(band) == str) | (type(band) == np.str_):
multiband_model = False
modband = [band,]
else:
sys.exit('ERROR -- Input band is not a list, array, or string!')
addName = '_'.join(modband)
# create new logging file
if conf.LOGFILE_LOGGING_LEVEL is not None:
brick_logging_path = os.path.join(conf.LOGGING_DIR, f"B{brick_id}_{addName}_logfile.log")
logger.info(f'Logging information will be streamed to console and to {brick_logging_path}\n')
# If overwrite is on, remove old logger
if conf.OVERWRITE & os.path.exists(brick_logging_path):
logger.warning('Existing logfile will be overwritten.')
os.remove(brick_logging_path)
# close and remove the old file handler
#fh.close()
#logger.removeHandler(fh)
# we will add an additional file handler to keep track of brick_id specific information
# set up the new file handler
shutil.copy(logging_path, brick_logging_path)
new_fh = logging.FileHandler(brick_logging_path,mode='a')
new_fh.setLevel(logging.getLevelName(conf.LOGFILE_LOGGING_LEVEL))
new_fh.setFormatter(formatter)
logger.addHandler(new_fh)
# Warn user that you cannot plot while running multiprocessing...
if (source_id is None) & (blob_id is None):
if (conf.NBLOBS == 0) & (conf.NTHREADS > 1) & ((conf.PLOT > 0)):
conf.PLOT = 0
logger.warning('Plotting not supported while modeling in parallel!')
if detbrick=='auto':
outpath = os.path.join(conf.INTERIM_DIR, f'B{brick_id}_{conf.DETECTION_NICKNAME}.fits')
if os.path.exists(outpath):
logger.info(f'Loading in catalog from {outpath}')
catalog = Table.read(outpath)
n_blobs = len(np.unique(catalog['blob_id']))
n_sources = len(catalog)
else:
raise RuntimeError(f'Catalog was not found at {outpath}')
outpath = os.path.join(conf.INTERIM_DIR, f'B{brick_id}_SEGMAPS.fits')
if os.path.exists(outpath):
logger.info(f'Loading in segmaps from {outpath}')
hdul = fits.open(outpath)
segmap = hdul['SEGMAP'].data
segmask = segmap.copy()
segmask[segmap>1] = 1
blobmap = hdul['BLOBMAP'].data
else:
raise RuntimeError(f'Segmaps were not found at {outpath}')
# filen = open(os.path.join(conf.INTERIM_DIR, f'detbrick_N{brick_id}.pkl'), 'rb')
# detbrick = dill.load(filen)
# Create modbrick
if band is None:
if not multiband_model:
img_names = [conf.MODELING_NICKNAME,]
mod_nickname = conf.MODELING_NICKNAME
elif multiband_model:
img_names = conf.MODELING_BANDS
for iname in img_names:
if iname not in conf.BANDS:
raise ValueError(f'{iname} is listed as a band to model, but is not found in conf.BANDS!')
mod_nickname = conf.MULTIBAND_NICKNAME
else:
if type(band) == list:
img_names = band
else:
img_names = [band,]
mod_nickname = conf.MULTIBAND_NICKNAME
# Loop over bands to do the modelling on -- if model in series!
eff_area = None
if not multiband_model:
for band_num, mod_band in enumerate(img_names):
tstart = time.time()
modbrick = stage_brickfiles(brick_id, band=mod_band, nickname=mod_nickname, modeling=~(modband[band_num] in conf.BANDS))
# catalog['x'] = catalog['x'] - modbrick.mosaic_origin[1] + conf.BRICK_BUFFER - 1
# catalog['y'] = catalog['y'] - modbrick.mosaic_origin[0] + conf.BRICK_BUFFER - 1
if modbrick is None:
return
if (band is not None) & (band != conf.MODELING_NICKNAME):
modbrick.bands = [f'{conf.MODELING_NICKNAME}_{mod_band}',]
modbrick.n_bands = len(modbrick.bands)
else:
mod_band = conf.MODELING_NICKNAME
logger.info(f'Modeling brick #{brick_id} created ({time.time() - tstart:3.3f}s)')
# Inform the user about the blob occupation distribution
logger.info('Blob Occupation Distribution')
for i in np.arange(5)+1:
n_blob = np.sum(catalog['N_BLOB'] == i)
logger.info(f' {i}: {n_blob}/{n_blobs} ({n_blob/n_blobs*100:2.2f}%)')
n_blob = np.sum(catalog['N_BLOB'] > i)
logger.info(f' >{i}: {n_blob}/{n_blobs} ({n_blob/n_blobs*100:2.2f}%)')
if conf.PLOT > 3:
plot_brick(modbrick, 0, band=mod_band)
plot_background(modbrick, 0, band=mod_band)
plot_mask(modbrick, 0, band=mod_band)
if conf.SAVE_BACKGROUND:
outpath = os.path.join(conf.INTERIM_DIR, f'B{brick_id}_BACKGROUNDS.fits')
logger.info('Saving background and RMS maps...')
if os.path.exists(outpath):
hdul = fits.open(outpath)
else:
hdul = fits.HDUList()
hdul.append(fits.PrimaryHDU())
for m, mband in enumerate(modbrick.bands):
hdul.append(fits.ImageHDU(data=modbrick.background_images[m], name=f'BACKGROUND_{mband}', header=modbrick.wcs.to_header()))
hdul[f'BACKGROUND_{mband}'].header['BACK_GLOBAL'] = modbrick.backgrounds[m,0]
hdul[f'BACKGROUND_{mband}'].header['BACK_RMS'] = modbrick.backgrounds[m,1]
if (conf.SUBTRACT_BACKGROUND_WITH_MASK|conf.SUBTRACT_BACKGROUND_WITH_DIRECT_MEDIAN):
hdul[f'BACKGROUND_{mband}'].header['MASKEDIMAGE_GLOBAL'] = modbrick.masked_median[m]
hdul[f'BACKGROUND_{mband}'].header['MASKEDIMAGE_RMS'] = modbrick.masked_std[m]
hdul.append(fits.ImageHDU(data=modbrick.background_rms_images[m], name=f'RMS_{mband}', header=modbrick.wcs.to_header()))
hdul.append(fits.ImageHDU(data=1/np.sqrt(modbrick.weights[m]), name=f'UNC_{mband}', header=modbrick.wcs.to_header()))
hdul.writeto(outpath, overwrite=conf.OVERWRITE)
hdul.close()
logger.info(f'Saved to {outpath} ({time.time() - tstart:3.3f}s)')
logger.debug(f'Brick #{brick_id} -- Image statistics for {mod_band}')
shape, minmax, mean, var = stats.describe(modbrick.images[0], axis=None)[:4]
logger.debug(f' Limits: {minmax[0]:6.6f} - {minmax[1]:6.6f}')
logger.debug(f' Mean: {mean:6.6f}+/-{np.sqrt(var):6.6f}\n')
logger.debug(f'Brick #{brick_id} -- Weight statistics for {mod_band}')
shape, minmax, mean, var = stats.describe(modbrick.weights[0], axis=None)[:4]
logger.debug(f' Limits: {minmax[0]:6.6f} - {minmax[1]:6.6f}')
logger.debug(f' Mean: {mean:6.6f}+/-{np.sqrt(var):6.6f}\n')
logger.debug(f'Brick #{brick_id} -- Error statistics for {mod_band}')
shape, minmax, mean, var = stats.describe(1/np.sqrt(np.nonzero(modbrick.weights[0].flatten())), axis=None)[:4]
logger.debug(f' Limits: {minmax[0]:6.6f} - {minmax[1]:6.6f}')
logger.debug(f' Mean: {mean:6.6f}+/-{np.sqrt(var):6.6f}\n')
logger.debug(f'Brick #{brick_id} -- Background statistics for {mod_band}')
logger.debug(f' Global: {modbrick.backgrounds[0, 0]:6.6f}')
logger.debug(f' RMS: {modbrick.backgrounds[0, 1]:6.6f}\n')
modbrick.catalog = catalog.copy()
modbrick.segmap = segmap
modbrick.n_sources = n_sources
modbrick.is_modeling = True
modbrick.blobmap = blobmap
modbrick.n_blobs = n_blobs
modbrick.segmask = segmask
# Transfer to MODBRICK
tstart = time.time()
if band_num > 0:
modbrick.n_blobs, modbrick.n_sources, modbrick.segmap, modbrick.segmask, modbrick.blobmap, modbrick.catalog = n_blobs, n_sources, segmap, segmask, blobmap, catalog
if modbrick.n_blobs <= 0:
logger.critical(f'Modeling brick #{brick_id} gained {modbrick.n_blobs} blobs! Quiting.')
return
modbrick.run_weights()
modbrick.run_background()
modbrick.add_columns(modbrick_name=mod_band, multiband_model = False) # doing on detbrick gets column names wrong
logger.info(f'Modeling brick #{brick_id} gained {modbrick.n_blobs} blobs with {modbrick.n_sources} objects ({time.time() - tstart:3.3f}s)')
if source_only:
if source_id is None:
raise ValueError('Source only is set True, but no source is has been provided!')
# Run a specific source or blob
if (source_id is not None) | (blob_id is not None):
# conf.PLOT = True
outcatalog = modbrick.catalog.copy()
# print('AHHHHH ', outcatalog['x', 'y'])
mosaic_origin = modbrick.mosaic_origin
# print('MOSAIC ORIGIN ', mosaic_origin)
brick_id = modbrick.brick_id
if source_id is not None:
blob_id = np.unique(modbrick.blobmap[modbrick.segmap == source_id])
if len(blob_id) == 1:
blob_id = blob_id[0]
else:
raise ValueError('Requested source is not in brick!')
if blob_id is not None:
if blob_id not in outcatalog['blob_id']:
raise ValueError(f'No blobs exist for requested blob id {blob_id}')
logger.info(f'Running single blob {blob_id}')
modblob = modbrick.make_blob(blob_id)
modblob.is_modeling=True
# if source_id is set, then look at only that source
if modblob.rejected:
raise ValueError('Requested blob is invalid')
if source_only & (source_id not in modblob.bcatalog['source_id']):
logger.warning('Requested source is not in blob!')
for source in modblob.bcatalog:
logger.warning(source['source_id'], source['cflux'])
raise ValueError('Requested source is not in blob!')
output_rows = runblob(blob_id, modblob, modeling=True, plotting=conf.PLOT, source_id=source_id, source_only=source_only)
output_cat = vstack(output_rows)
for colname in output_cat.colnames:
if colname not in outcatalog.colnames:
shape = np.shape(output_cat[colname][0])
outcatalog.add_column(Column(length=len(outcatalog), dtype=output_cat[colname].dtype, shape=shape, name=colname))
#outcatalog = join(outcatalog, output_cat, join_type='left', )
for row in output_cat:
outcatalog[np.where(outcatalog['source_id'] == row['source_id'])[0]] = row
# vs = outcatalog['VALID_SOURCE']
# scoords = SkyCoord(ra=outcatalog[vs]['RA'], dec=outcatalog[vs]['DEC'], unit='degree')
# ebmv = m.ebv(scoords)
# col_ebmv = Column(np.zeros_like(outcatalog['RA']), name='EBV')
# col_ebmv[vs] = ebmv
# outcatalog.add_column(col_ebmv)
modbrick.catalog = outcatalog
# Else, production mode -- all objects in brick are to be run.
else:
if conf.NBLOBS > 0:
run_n_blobs = conf.NBLOBS
else:
run_n_blobs = modbrick.n_blobs
logger.info(f'Preparing to run {run_n_blobs} blobs.')
outcatalog = modbrick.catalog.copy()
mosaic_origin = modbrick.mosaic_origin
brick_id = modbrick.brick_id
logger.info('Generating blobs...')
astart = time.time()
modblobs = (modbrick.make_blob(i) for i in np.arange(1, run_n_blobs+1))
logger.info(f'{run_n_blobs} blobs generated ({time.time() - astart:3.3f}s)')
#del modbrick
tstart = time.time()
if conf.NTHREADS > 1:
with pa.pools.ProcessPool(ncpus=conf.NTHREADS) as pool:
logger.info(f'Parallel processing pool initalized with {conf.NTHREADS} threads.')
result = pool.uimap(partial(runblob, modeling=True, plotting=conf.PLOT, source_only=source_only), np.arange(1, run_n_blobs+1), modblobs)
output_rows = list(result)
logger.info('Parallel processing complete.')
else:
logger.info('Serial processing initalized.')
output_rows = [runblob(kblob_id+1, kblob, modeling=True, plotting=conf.PLOT, source_only=source_only) for kblob_id, kblob in enumerate(modblobs)]
output_cat = vstack(output_rows)
# Estimate covariance
modbrick.bcatalog = output_cat
astart = time.time()
logger.info(f'Starting covariance estimation...')
status = modbrick.estimate_error_corr(use_band_position=force_unfixed_pos, use_band_shape=use_band_shape, modeling=True)
logger.info(f'Covariance estimation complete. ({time.time() - astart:3.3f})s')
# estimate effective area
if conf.ESTIMATE_EFF_AREA:
eff_area = np.zeros(len(img_names))
for b, bname in enumerate(img_names):
eff_area[b] = modbrick.estimate_effective_area(output_cat, bname, modeling=True)[0]
ttotal = time.time() - tstart
logger.info(f'Completed {run_n_blobs} blobs with {len(output_cat)} sources in {ttotal:3.3f}s (avg. {ttotal/len(output_cat):2.2f}s per source)')
for colname in output_cat.colnames:
if colname not in outcatalog.colnames:
shape = np.shape(output_cat[colname][0])
outcatalog.add_column(Column(length=len(outcatalog), dtype=output_cat[colname].dtype, shape=shape, name=colname))
#outcatalog = join(outcatalog, output_cat, join_type='left', )
for row in output_cat:
outcatalog[np.where(outcatalog['source_id'] == row['source_id'])[0]] = row
# vs = outcatalog['VALID_SOURCE']
# scoords = SkyCoord(ra=outcatalog[vs]['RA'], dec=outcatalog[vs]['DEC'], unit='degree')
# ebmv = m.ebv(scoords)
# col_ebmv = Column(np.zeros_like(outcatalog['RA']), name='EBV')
# col_ebmv[vs] = ebmv
# outcatalog.add_column(col_ebmv)
modbrick.catalog = outcatalog
# open again and add
# If user wants model and/or residual images made:
if conf.MAKE_RESIDUAL_IMAGE:
cleancatalog = outcatalog[outcatalog[f'VALID_SOURCE_{modbrick.bands[0]}']]
modbrick.make_residual_image(catalog=cleancatalog, use_band_position=False, modeling=True)
elif conf.MAKE_MODEL_IMAGE:
cleancatalog = outcatalog[outcatalog[f'VALID_SOURCE_{modbrick.bands[0]}']]
modbrick.make_model_image(catalog=cleancatalog, use_band_position=False, modeling=True)
# Reconstuct mosaic positions of invalid sources
invalid = ~modbrick.catalog[f'VALID_SOURCE_{modbrick.bands[0]}']
# modbrick.catalog[invalid][f'X_MODEL_{modbrick.bands[0]}'] = modbrick.catalog[invalid]['x_orig'] + modbrick.mosaic_origin[1] - conf.BRICK_BUFFER
# modbrick.catalog[invalid][f'Y_MODEL_{modbrick.bands[0]}'] = modbrick.catalog[invalid]['y_orig'] + modbrick.mosaic_origin[0] - conf.BRICK_BUFFER
# print(np.sum(invalid), len(invalid))
# plt.pause(10)
# idx = np.argwhere(invalid)[:20]
# print(modbrick.catalog[idx][f'X_MODEL_{modbrick.bands[0]}'], np.array(modbrick.catalog[idx]['x_orig']) + modbrick.mosaic_origin[1] - conf.BRICK_BUFFER)
# if multiband model is enabled...
elif multiband_model:
tstart = time.time()
modbrick = stage_brickfiles(brick_id, band=img_names, nickname=mod_nickname, modeling=True)
if modbrick is None:
return
# if detbrick.is_borrowed:
# catalog['x'] = catalog['x'] - modbrick.mosaic_origin[1] + conf.BRICK_BUFFER - 1
# catalog['y'] = catalog['y'] - modbrick.mosaic_origin[0] + conf.BRICK_BUFFER - 1
modbrick.bands = [f'{conf.MODELING_NICKNAME}_{b}' for b in img_names]
modbrick.n_bands = len(modbrick.bands)
logger.info(f'Multi-band Modeling brick #{brick_id} created ({time.time() - tstart:3.3f}s)')
# Inform the user about the blob occupation distribution
logger.info('Blob Occupation Distribution')
__, idx = np.unique(catalog['blob_id'], return_index=True)
for i in np.arange(5)+1:
n_blob = np.sum(catalog['N_BLOB'][idx] == i)
logger.info(f' {i}: {n_blob}/{n_blobs} ({n_blob/n_blobs*100:2.2f}%)')
n_blob = np.sum(catalog['N_BLOB'][idx] > i)
logger.info(f' >{i}: {n_blob}/{n_blobs} ({n_blob/n_blobs*100:2.2f}%)')
for i, mod_band in enumerate(modbrick.bands):
if conf.PLOT > 3:
plot_brick(modbrick, 0, band=mod_band)
plot_background(modbrick, 0, band=mod_band)
plot_mask(modbrick, 0, band=mod_band)
logger.debug(f'Brick #{brick_id} -- Image statistics for {mod_band}')
shape, minmax, mean, var = stats.describe(modbrick.images[i], axis=None)[:4]
logger.debug(f' Limits: {minmax[0]:6.6f} - {minmax[1]:6.6f}')
logger.debug(f' Mean: {mean:6.6f}+/-{np.sqrt(var):6.6f}\n')
logger.debug(f'Brick #{brick_id} -- Weight statistics for {mod_band}')
shape, minmax, mean, var = stats.describe(modbrick.weights[i], axis=None)[:4]
logger.debug(f' Limits: {minmax[0]:6.6f} - {minmax[1]:6.6f}')
logger.debug(f' Mean: {mean:6.6f}+/-{np.sqrt(var):6.6f}\n')
logger.debug(f'Brick #{brick_id} -- Error statistics for {mod_band}')
shape, minmax, mean, var = stats.describe(1/np.sqrt(np.nonzero(modbrick.weights[i].flatten())), axis=None)[:4]
logger.debug(f' Limits: {minmax[0]:6.6f} - {minmax[1]:6.6f}')
logger.debug(f' Mean: {mean:6.6f}+/-{np.sqrt(var):6.6f}\n')
logger.debug(f'Brick #{brick_id} -- Background statistics for {mod_band}')
logger.debug(f' Global: {modbrick.backgrounds[i, 0]:6.6f}')
logger.debug(f' RMS: {modbrick.backgrounds[i, 1]:6.6f}\n')
modbrick.catalog = catalog.copy()
modbrick.segmap = segmap
modbrick.n_sources = n_sources
modbrick.is_modeling = True
modbrick.blobmap = blobmap
modbrick.n_blobs = n_blobs
modbrick.segmask = segmask
# Cleanup on MODBRICK
tstart = time.time()
modbrick.shared_params = True ## CRITICAL THING TO DO HERE!
modbrick.add_columns(multiband_model=True) # doing on detbrick gets column names wrong
logger.info(f'Modeling brick #{brick_id} has {modbrick.n_blobs} blobs with {modbrick.n_sources} objects ({time.time() - tstart:3.3f}s)')
modbrick.run_weights()
modbrick.run_background()
if conf.PLOT > 3:
plot_blobmap(modbrick)
if conf.SAVE_BACKGROUND:
outpath = os.path.join(conf.INTERIM_DIR, f'B{brick_id}_BACKGROUNDS.fits')
logger.info('Saving background and RMS maps...')
if os.path.exists(outpath):
hdul = fits.open(outpath)
else:
hdul = fits.HDUList()
hdul.append(fits.PrimaryHDU())
for m, mband in enumerate(modbrick.bands):
hdul.append(fits.ImageHDU(data=modbrick.background_images[m], name=f'BACKGROUND_{mband}', header=modbrick.wcs.to_header()))
hdul[f'BACKGROUND_{mband}'].header['BACK_GLOBAL'] = modbrick.backgrounds[m,0]
hdul[f'BACKGROUND_{mband}'].header['BACK_RMS'] = modbrick.backgrounds[m,1]
if (conf.SUBTRACT_BACKGROUND_WITH_MASK|conf.SUBTRACT_BACKGROUND_WITH_DIRECT_MEDIAN):
hdul[f'BACKGROUND_{mband}'].header['MASKEDIMAGE_GLOBAL'] = modbrick.masked_median[m]
hdul[f'BACKGROUND_{mband}'].header['MASKEDIMAGE_RMS'] = modbrick.masked_std[m]
hdul.append(fits.ImageHDU(data=modbrick.background_rms_images[m], name=f'RMS_{mband}', header=modbrick.wcs.to_header()))
hdul.append(fits.ImageHDU(data=1/np.sqrt(modbrick.weights[m]), name=f'UNC_{mband}', header=modbrick.wcs.to_header()))
hdul.writeto(outpath, overwrite=conf.OVERWRITE)
hdul.close()
logger.info(f'Saved to {outpath} ({time.time() - tstart:3.3f}s)')
if source_only:
if source_id is None:
raise ValueError('Source only is set True, but no source is has been provided!')
# Run a specific source or blob
blob_only=False
if (source_id is not None) | (blob_id is not None):
# conf.PLOT = True
outcatalog = modbrick.catalog.copy()
# print('AHHHHH ', outcatalog['x', 'y'])
mosaic_origin = modbrick.mosaic_origin
# print('MOSAIC ORIGIN ', mosaic_origin)
brick_id = modbrick.brick_id
if source_id is not None:
blob_id = np.unique(modbrick.blobmap[modbrick.segmap == source_id])
if len(blob_id) == 1:
blob_id = blob_id[0]
else:
raise ValueError('Requested source is not in brick!')
if blob_id is not None:
if blob_id not in outcatalog['blob_id']:
raise ValueError(f'No blobs exist for requested blob id {blob_id}')
blob_only=True
logger.info(f'Running single blob for blob {blob_id}')
modblob = modbrick.make_blob(blob_id)
if modblob.rejected:
raise ValueError('Requested blob is invalid')
output_rows = runblob(blob_id, modblob, modeling=True, plotting=conf.PLOT, source_id=source_id, blob_only=blob_only, source_only=source_only)
output_cat = vstack(output_rows)
# Estimate covariance
modbrick.bcatalog = output_cat
# astart = time.time()
# logger.info(f'Starting covariance estimation...')
# status = modbrick.estimate_error_corr(use_band_position=force_unfixed_pos, use_band_shape=use_band_shape, modeling=True)
# logger.info(f'Covariance estimation complete. ({time.time() - astart:3.3f})s')
for colname in output_cat.colnames:
if colname not in outcatalog.colnames:
colshape = output_cat[colname].shape
outcatalog.add_column(Column(length=len(outcatalog), dtype=output_cat[colname].dtype, shape=colshape, name=colname))
#outcatalog = join(outcatalog, output_cat, join_type='left', )
for row in output_cat:
outcatalog[np.where(outcatalog['source_id'] == row['source_id'])[0]] = row
# vs = outcatalog['VALID_SOURCE']
# scoords = SkyCoord(ra=outcatalog[vs]['RA'], dec=outcatalog[vs]['DEC'], unit='degree')
# ebmv = m.ebv(scoords)
# col_ebmv = Column(np.zeros_like(outcatalog['RA']), name='EBV')
# col_ebmv[vs] = ebmv
# outcatalog.add_column(col_ebmv)
modbrick.catalog = outcatalog
# Else, production mode -- all objects in brick are to be run.
else:
if conf.NBLOBS > 0:
run_n_blobs = conf.NBLOBS
bid_arr = np.arange(1, run_n_blobs+1)
elif conf.MODEL_PHOT_MAX_NBLOB > 0:
bid_arr = np.unique(modbrick.catalog['blob_id'][modbrick.catalog['N_BLOB'] <= conf.MODEL_PHOT_MAX_NBLOB])
run_n_blobs = len(bid_arr)
if conf.NBLOBS > 0:
bid_arr = bid_arr[:conf.NBLOBS]
run_n_blobs = len(bid_arr)
else:
run_n_blobs = modbrick.n_blobs
bid_arr = np.arange(1, run_n_blobs+1)
logger.info(f'Preparing to run {run_n_blobs}/{modbrick.n_blobs} blobs.')
outcatalog = modbrick.catalog.copy()
mosaic_origin = modbrick.mosaic_origin
brick_id = modbrick.brick_id
logger.info('Generating blobs...')
astart = time.time()
modblobs = (modbrick.make_blob(i) for i in bid_arr)
logger.info(f'{run_n_blobs} blobs generated ({time.time() - astart:3.3f}s)')
#del modbrick
tstart = time.time()
if conf.NTHREADS > 1:
with pa.pools.ProcessPool(ncpus=conf.NTHREADS) as pool:
logger.info(f'Parallel processing pool initalized with {conf.NTHREADS} threads.')
result = pool.uimap(partial(runblob, modeling=True, plotting=conf.PLOT), bid_arr, modblobs)
output_rows = list(result)
logger.info('Parallel processing complete.')
else:
logger.info('Serial processing initalized.')
output_rows = [runblob(kblob_id+1, kblob, modeling=True, plotting=conf.PLOT) for kblob_id, kblob in enumerate(modblobs)]
output_cat = vstack(output_rows)
ttotal = time.time() - tstart
logger.info(f'Completed {run_n_blobs} blobs with {len(output_cat)} sources in {ttotal:3.3f}s (avg. {ttotal/len(output_cat):2.2f}s per source)')
# Estimate covariance
modbrick.bcatalog = output_cat
# astart = time.time()
# logger.info(f'Starting covariance estimation...')
# status = modbrick.estimate_error_corr(use_band_position=force_unfixed_pos, use_band_shape=use_band_shape, modeling=True)
# logger.info(f'Covariance estimation complete. ({time.time() - astart:3.3f})s')
# estimate effective area
if conf.ESTIMATE_EFF_AREA:
eff_area = dict(zip(img_names, np.zeros(len(img_names))))
for b, bname in enumerate(img_names):
eff_area[bname] = modbrick.estimate_effective_area(output_cat, bname, modeling=True)[0]
else:
eff_area = None
for colname in output_cat.colnames:
if colname not in outcatalog.colnames:
colshape = np.shape(output_cat[colname])
if len(colshape) == 2:
colshape = (colshape[1],)
else:
colshape = (1,)
outcatalog.add_column(Column(length=len(outcatalog), dtype=output_cat[colname].dtype, shape=colshape, name=colname))
#outcatalog = join(outcatalog, output_cat, join_type='left', )
for row in output_cat:
outcatalog[np.where(outcatalog['source_id'] == row['source_id'])[0]] = row
# vs = outcatalog['VALID_SOURCE']
# scoords = SkyCoord(ra=outcatalog[vs]['RA'], dec=outcatalog[vs]['DEC'], unit='degree')
# ebmv = m.ebv(scoords)
# col_ebmv = Column(np.zeros_like(outcatalog['RA']), name='EBV')
# col_ebmv[vs] = ebmv
# outcatalog.add_column(col_ebmv)
modbrick.catalog = outcatalog
# Reconstuct mosaic positions of invalid sources
invalid = ~modbrick.catalog[f'VALID_SOURCE']
modbrick.catalog['x'] = modbrick.catalog['x'] + modbrick.mosaic_origin[1] - conf.BRICK_BUFFER
modbrick.catalog['y'] = modbrick.catalog['y'] + modbrick.mosaic_origin[0] - conf.BRICK_BUFFER
modbrick.catalog['x_orig'] = modbrick.catalog['x_orig'] + modbrick.mosaic_origin[1] - conf.BRICK_BUFFER
modbrick.catalog['y_orig'] = modbrick.catalog['y_orig'] + modbrick.mosaic_origin[0] - conf.BRICK_BUFFER
# If model bands is more than one, choose best one
# Choose based on min chisq
if (len(img_names) > 1) & ~multiband_model:
logger.info(f'Selecting best-fit models within {len(img_names)} bands')
name_arr = np.ones(shape=(len(modbrick.catalog), len(img_names)), dtype='U11')
score_arr = np.zeros(shape=(len(modbrick.catalog), len(img_names)))
valid_arr = np.zeros(shape=(len(modbrick.catalog), len(img_names)))
xmodel_arr = np.zeros(shape=(len(modbrick.catalog), len(img_names)))
ymodel_arr = np.zeros(shape=(len(modbrick.catalog), len(img_names)))
for i, mod_band in enumerate(img_names):
name_arr[:, i] = mod_band
score_arr[:, i] = modbrick.catalog[f'CHISQ_{conf.MODELING_NICKNAME}_{mod_band}']
xmodel_arr[:, i] = modbrick.catalog[f'X_MODEL_{conf.MODELING_NICKNAME}_{mod_band}']
ymodel_arr[:, i] = modbrick.catalog[f'Y_MODEL_{conf.MODELING_NICKNAME}_{mod_band}']
valid_arr[:, i] = modbrick.catalog[f'VALID_SOURCE_{conf.MODELING_NICKNAME}_{mod_band}']
score_arr[np.logical_not(valid_arr[:,i]), i] = 1E31
argmin_score = np.argmin(score_arr, 1)
argmin_zero = np.min(score_arr, 1) == 1E31
argmin_zero = np.zeros_like(argmin_zero)
modbrick.catalog['BEST_MODEL_BAND'][~argmin_zero] = [modband_opt[k] for modband_opt, k in zip(name_arr[~argmin_zero], argmin_score[~argmin_zero])]
modbrick.catalog['X_MODEL'][~argmin_zero] = [modband_opt[k] for modband_opt, k in zip(xmodel_arr[~argmin_zero], argmin_score[~argmin_zero])]
modbrick.catalog['Y_MODEL'][~argmin_zero] = [modband_opt[k] for modband_opt, k in zip(ymodel_arr[~argmin_zero], argmin_score[~argmin_zero])]
modbrick.catalog['VALID_SOURCE'][~argmin_zero] = [modband_opt[k] for modband_opt, k in zip(valid_arr[~argmin_zero], argmin_score[~argmin_zero])]
# if modbrick.wcs is not None:
# skyc = self.brick_wcs.all_pix2world(modbrick.catalog[f'X_MODEL'] - modbrick.mosaic_origin[0] + conf.BRICK_BUFFER, modbrick.catalog[f'Y_MODEL'] - modbrick.mosaic_origin[1] + conf.BRICK_BUFFER, 0)
# modbrick.bcatalog[row][f'RA'] = skyc[0]
# modbrick.bcatalog[row][f'DEC'] = skyc[1]
# logger.info(f" Sky Model RA, Dec: {skyc[0]:6.6f} deg, {skyc[1]:6.6f} deg")
elif (len(img_names) > 1) & multiband_model:
modbrick.catalog['BEST_MODEL_BAND'] = conf.MODELING_NICKNAME
# modbrick.catalog['X_MODEL']
# modbrick.catalog['Y_MODEL'] # ???? WHAT
# modbrick.catalog['VALID_SOURCE']
elif img_names[0] != conf.MODELING_NICKNAME:
modbrick.catalog['BEST_MODEL_BAND'] = f'{conf.MODELING_NICKNAME}_{img_names[0]}'
modbrick.catalog['X_MODEL'] = modbrick.catalog[f'X_MODEL_{conf.MODELING_NICKNAME}_{img_names[0]}']
modbrick.catalog['Y_MODEL'] = modbrick.catalog[f'Y_MODEL_{conf.MODELING_NICKNAME}_{img_names[0]}']
modbrick.catalog['VALID_SOURCE'] = modbrick.catalog[f'VALID_SOURCE_{conf.MODELING_NICKNAME}_{img_names[0]}']
else:
modbrick.catalog['BEST_MODEL_BAND'] = f'{conf.MODELING_NICKNAME}'
modbrick.catalog['X_MODEL'] = modbrick.catalog[f'X_MODEL_{conf.MODELING_NICKNAME}']
modbrick.catalog['Y_MODEL'] = modbrick.catalog[f'Y_MODEL_{conf.MODELING_NICKNAME}']
modbrick.catalog['VALID_SOURCE'] = modbrick.catalog[f'VALID_SOURCE_{conf.MODELING_NICKNAME}']
# write out cat
if conf.OUTPUT:
hdr = header_from_dict(conf.__dict__)
if eff_area is not None:
for b, band in enumerate(conf.BANDS):
if band in img_names:
eff_area_deg = eff_area[band] * (conf.PIXEL_SCALE / 3600)**2
hdr.set(f'AREA{b}', eff_area_deg, f'{conf.MODELING_NICKNAME} {band} EFF_AREA (deg2)')
hdu_info = fits.ImageHDU(header=hdr, name='CONFIG')
hdu_table = fits.table_to_hdu(modbrick.catalog)
hdul = fits.HDUList([fits.PrimaryHDU(), hdu_table, hdu_info])
outpath = os.path.join(conf.CATALOG_DIR, f'B{brick_id}.cat')
hdul.writeto(outpath, output_verify='ignore', overwrite=conf.OVERWRITE)
logger.info(f'Wrote out catalog to {outpath}')
# If user wants model and/or residual images made:
if conf.MAKE_RESIDUAL_IMAGE:
cleancatalog = outcatalog[outcatalog[f'VALID_SOURCE_{conf.MODELING_NICKNAME}']]
modbrick.make_residual_image(catalog=cleancatalog, use_band_position=False, modeling=True)
elif conf.MAKE_MODEL_IMAGE:
cleancatalog = outcatalog[outcatalog[f'VALID_SOURCE_{conf.MODELING_NICKNAME}']]
modbrick.make_model_image(catalog=cleancatalog, use_band_position=False, modeling=True)
# close the brick_id specific file handlers
if conf.LOGFILE_LOGGING_LEVEL is not None:
new_fh.close()
logger.removeHandler(new_fh)
def force_photometry(brick_id, band=None, source_id=None, blob_id=None, insert=False, source_only=False, unfix_bandwise_positions=(not conf.FREEZE_FORCED_POSITION), unfix_bandwise_shapes=(not conf.FREEZE_FORCED_SHAPE), rao_cramer_only=False):
if band is None:
fband = conf.BANDS
addName = conf.MULTIBAND_NICKNAME
else:
if (type(band) == list) | (type(band) == np.ndarray):
fband = band
elif (type(band) == str) | (type(band) == np.str_):
fband = [band,]
else:
sys.exit('ERROR -- Input band is not a list, array, or string!')
addName = '_'.join(fband)
# create new logging file
if conf.LOGFILE_LOGGING_LEVEL is not None:
brick_logging_path = os.path.join(conf.LOGGING_DIR, f"B{brick_id}_{addName}_logfile.log")
logger.info(f'Logging information will be streamed to console and to {brick_logging_path}\n')
# If overwrite is on, remove old logger
if conf.OVERWRITE & os.path.exists(brick_logging_path):
logger.warning('Existing logfile will be overwritten.')
os.remove(brick_logging_path)
# close and remove the old file handler
#fh.close()
#logger.removeHandler(fh)
# we will add an additional file handler to keep track of brick_id specific information
# set up the new file handler
shutil.copy(logging_path, brick_logging_path)
new_fh = logging.FileHandler(brick_logging_path,mode='a')
new_fh.setLevel(logging.getLevelName(conf.LOGFILE_LOGGING_LEVEL))
new_fh.setFormatter(formatter)
logger.addHandler(new_fh)
# TODO Check if the catalog will be too big...
if ((not unfix_bandwise_positions) & (not unfix_bandwise_shapes)) | (len(fband) == 1):
force_models(brick_id=brick_id, band=band, source_id=source_id, blob_id=blob_id, insert=insert, source_only=source_only, force_unfixed_pos=False, use_band_shape=unfix_bandwise_shapes, rao_cramer_only=rao_cramer_only)
else:
if conf.FREEZE_FORCED_POSITION:
logger.warning('Setting FREEZE_FORCED_POSITION to False!')
conf.FREEZE_FORCED_POSITION = False
for b in fband:
tstart = time.time()
logger.critical(f'Running Forced Photometry on {b}')
if rao_cramer_only:
logger.critical('WARNING -- ONLY COMPUTING RAO-CRAMER FLUX ERRORS! THIS IS NOT A NORMAL MODE!')
logger.critical('ENSURE PLOTTING IS TURNED OFF!!!')
force_models(brick_id=brick_id, band=b, source_id=source_id, blob_id=blob_id, insert=insert, source_only=source_only, force_unfixed_pos=True, use_band_shape=unfix_bandwise_shapes, rao_cramer_only=rao_cramer_only)
logger.critical(f'Forced Photometry for {b} finished in {time.time() - tstart:3.3f}s')
# TODO -- compare valid source_band and add to catalog!
if conf.PLOT > 0: # COLLECT SRCPROFILES
logger.info('Collecting srcprofile diagnostic plots...')
if (blob_id is None) & (source_id is None):
import glob
# find sids
files = glob.glob(os.path.join(conf.PLOT_DIR, f'T{brick_id}_B*_S*_*_srcprofile.pdf'))
sids= []
for f in files:
tsid = int(f[len(conf.PLOT_DIR):].split('S')[1].split('_')[0])
if tsid not in sids:
sids.append(tsid)
for sid in sids:
logger.debug(f' * source {sid}')
fnames = []
files = glob.glob(os.path.join(conf.PLOT_DIR, f'T{brick_id}_B*_S{sid}_*_srcprofile.pdf'))
if len(files) == 0:
logger.error('Source {source_id} does not have any srcprofile plots to collect!')
return
bid = int(files[0][len(conf.PLOT_DIR):].split('B')[1].split('_')[0])
for b in fband:
logger.debug(f' *** adding {b}')
fname = os.path.join(conf.PLOT_DIR, f'T{brick_id}_B{bid}_S{sid}_{b}_srcprofile.pdf')
if os.path.exists(fname):
fnames.append(fname)
else:
logger.warning(f' *** {b} does not exist at {fname}')
# collect
from PyPDF2 import PdfFileMerger
merger = PdfFileMerger()
for pdf in fnames:
merger.append(pdf)
logger.debug(f'Writing out combined srcprofile...')
merger.write(os.path.join(conf.PLOT_DIR, f'T{brick_id}_B{bid}_S{sid}_srcprofile.pdf'))
merger.close()
# remove
logger.debug(f'Removing individual srcprofiles...')
[os.system(f'rm {fname}') for fname in fnames]
else:
import glob
# find sids
files = glob.glob(os.path.join(conf.PLOT_DIR, f'T{brick_id}_B{blob_id}_S*_*_srcprofile.pdf'))
sids= []
for f in files:
tsid = int(f[len(conf.PLOT_DIR):].split('S')[1].split('_')[0])
if tsid not in sids:
sids.append(tsid)
for sid in sids:
logger.debug(f' * source {sid}')
fnames = []
files = glob.glob(os.path.join(conf.PLOT_DIR, f'T{brick_id}_B{blob_id}_S{sid}_*_srcprofile.pdf'))
if len(files) == 0:
logger.error('Source {source_id} does not have any srcprofile plots to collect!')
return
bid = int(files[0][len(conf.PLOT_DIR):].split('B')[1].split('_')[0])
for b in fband:
logger.debug(f' *** adding {b}')
fname = os.path.join(conf.PLOT_DIR, f'T{brick_id}_B{bid}_S{sid}_{b}_srcprofile.pdf')
if os.path.exists(fname):
fnames.append(fname)
else:
logger.warning(f' *** {b} does not exist at {fname}')
# collect
from PyPDF2 import PdfFileMerger
merger = PdfFileMerger()
for pdf in fnames:
merger.append(pdf)
logger.debug(f'Writing out combined srcprofile...')
merger.write(os.path.join(conf.PLOT_DIR, f'T{brick_id}_B{bid}_S{sid}_srcprofile.pdf'))
merger.close()
# remove
logger.debug(f'Removing individual srcprofiles...')
[os.system(f'rm {fname}') for fname in fnames]
def force_models(brick_id, band=None, source_id=None, blob_id=None, insert=True, source_only=False, force_unfixed_pos=(not conf.FREEZE_FORCED_POSITION), use_band_shape=(not conf.FREEZE_FORCED_SHAPE), rao_cramer_only=False):
""" Stage 3. Force the models on the other images and solve only for flux. """
# Create and update multiband brick
tstart = time.time()
eff_area = None
if source_only:
if source_id is None:
raise ValueError('Source only is set True, but no source is has been provided!')
if (source_id is None) & (blob_id is None):
if (conf.NBLOBS == 0) & (conf.NTHREADS > 1) & (conf.PLOT > 0):
conf.PLOT = 0
logger.warning('Plotting not supported while forcing models in parallel!')
if band is None:
fband = conf.BANDS
else:
if (type(band) == list) | (type(band) == np.ndarray):
fband = band
elif (type(band) == str) | (type(band) == np.str_):
fband = [band,]
else:
sys.exit('ERROR -- Input band is not a list, array, or string!')
fbrick = stage_brickfiles(brick_id, nickname=conf.MULTIBAND_NICKNAME, band=fband, modeling=False)
if fbrick is None:
return
search_fn = os.path.join(conf.CATALOG_DIR, f'B{brick_id}.cat')
if os.path.exists(search_fn):
fbrick.catalog = Table(fits.open(search_fn)[1].data)
fbrick.n_sources = len(fbrick.catalog)
fbrick.n_blobs = np.unique(fbrick.catalog['blob_id']) #.max()
else:
logger.critical(f'No valid catalog was found for {brick_id}')
return
search_fn = os.path.join(conf.INTERIM_DIR, f'B{brick_id}_SEGMAPS.fits')
if os.path.exists(search_fn):
hdul_seg = fits.open(search_fn)
fbrick.segmap = hdul_seg['SEGMAP'].data
fbrick.blobmap = hdul_seg['BLOBMAP'].data
fbrick.segmask = fbrick.segmap.copy()
fbrick.segmask[fbrick.segmap>0] = 1
else:
logger.critical(f'No valid segmentation map was found for {brick_id}')
return
if (~fbrick.catalog['VALID_SOURCE_MODELING']).all():
logger.critical(f'All sources in brick #{brick_id} are invalid. Quitting!')
return
uniq_src, index_src = np.unique(fbrick.catalog['source_id'], return_index=True)
if len(uniq_src) != len(fbrick.catalog):
n_nonuniq = len(fbrick.catalog) - len(uniq_src)
logger.warning(f'Removing {n_nonuniq} non-unique sources from catalog!')
fbrick.catalog = fbrick.catalog[index_src]
if not rao_cramer_only:
fbrick.add_columns(modeling=False)
else:
filler = np.zeros(len(fbrick.catalog))
for colname in fbrick.bands:
colname = colname.replace(' ', '_')
fbrick.catalog.add_column(Column(filler, name=f'RAW_DIRECTFLUX_{colname}'))
fbrick.catalog.add_column(Column(filler, name=f'RAW_DIRECTFLUXERR_{colname}'))
fbrick.catalog.add_column(Column(filler, name=f'DIRECTFLUX_{colname}'))
fbrick.catalog.add_column(Column(filler, name=f'DIRECTFLUXERR_{colname}'))
fbrick.run_background()
fbrick.run_weights()
logger.info(f'{conf.MULTIBAND_NICKNAME} brick #{brick_id} created ({time.time() - tstart:3.3f}s)')
if conf.PLOT > 3:
for plt_band in fband:
if (len(fband) == 1) | force_unfixed_pos:
idx = 0
else:
idx = np.argwhere(np.array(fband)==plt_band)[0][0]
plot_brick(fbrick, idx, band=plt_band)
plot_background(fbrick, idx, band=plt_band)
plot_mask(fbrick, idx, band=plt_band)
fcat = fbrick.catalog.copy()
fcat['x'] -= fbrick.mosaic_origin[1] - conf.BRICK_BUFFER + 1
fcat['y'] -= fbrick.mosaic_origin[0] - conf.BRICK_BUFFER + 1
plot_blobmap(fbrick, image=fbrick.images[idx], band=plt_band, catalog=fcat)
for i, vb_band in enumerate(fband):
logger.debug(f'Brick #{brick_id} -- Image statistics for {vb_band}')
shape, minmax, mean, var = stats.describe(fbrick.images[i], axis=None)[:4]
logger.debug(f' Limits: {minmax[0]:6.6f} - {minmax[1]:6.6f}')
logger.debug(f' Mean: {mean:6.6f}+/-{np.sqrt(var):6.6f}\n')
logger.debug(f'Brick #{brick_id} -- Weight statistics for {vb_band}')
ok = fbrick.weights[i] > 0
shape, minmax, mean, var = stats.describe(fbrick.weights[i][ok].flatten(), axis=None)[:4]
logger.debug(f' Limits: {minmax[0]:6.6f} - {minmax[1]:6.6f}')
logger.debug(f' Mean: {mean:6.6f}+/-{np.sqrt(var):6.6f}\n')
logger.debug(f'Brick #{brick_id} -- Error statistics for {vb_band}')
shape, minmax, mean, var = stats.describe(1/np.sqrt(fbrick.weights[i][ok].flatten()), axis=None)[:4]
logger.debug(f' Limits: {minmax[0]:6.6f} - {minmax[1]:6.6f}')
logger.debug(f' Mean: {mean:6.6f}+/-{np.sqrt(var):6.6f}\n')
logger.debug(f'Brick #{brick_id} -- Background statistics for {vb_band}')
logger.debug(f' Global: {fbrick.backgrounds[i, 0]:6.6f}')
logger.debug(f' RMS: {fbrick.backgrounds[i, 1]:6.6f}')
if conf.SAVE_BACKGROUND:
outpath = os.path.join(conf.INTERIM_DIR, f'B{brick_id}_BACKGROUNDS.fits')
logger.info('Saving background and RMS maps...')
if os.path.exists(outpath):
hdul = fits.open(outpath)
else:
hdul = fits.HDUList()
hdul.append(fits.PrimaryHDU())
for m, mband in enumerate(fbrick.bands):
hdul.append(fits.ImageHDU(data=fbrick.background_images[m], name=f'BACKGROUND_{mband}', header=fbrick.wcs.to_header()))
hdul[f'BACKGROUND_{mband}'].header['BACK_GLOBAL'] = fbrick.backgrounds[m,0]
hdul[f'BACKGROUND_{mband}'].header['BACK_RMS'] = fbrick.backgrounds[m,1]
if (conf.SUBTRACT_BACKGROUND_WITH_MASK|conf.SUBTRACT_BACKGROUND_WITH_DIRECT_MEDIAN):
hdul[f'BACKGROUND_{mband}'].header['MASKEDIMAGE_GLOBAL'] = fbrick.masked_median[m]
hdul[f'BACKGROUND_{mband}'].header['MASKEDIMAGE_RMS'] = fbrick.masked_std[m]
hdul.append(fits.ImageHDU(data=fbrick.background_rms_images[m], name=f'RMS_{mband}', header=fbrick.wcs.to_header()))
hdul.append(fits.ImageHDU(data=1/np.sqrt(fbrick.weights[m]), name=f'UNC_{mband}', header=fbrick.wcs.to_header()))
hdul.writeto(outpath, overwrite=conf.OVERWRITE)
hdul.close()
logger.info(f'Saved to {outpath} ({time.time() - tstart:3.3f}s)')
logger.info(f'Forcing models on {len(fband)} {conf.MULTIBAND_NICKNAME} bands')
# if conf.FORCE_SHARE_PARAMS:
# fbrick.shared_params = True
tstart = time.time()
if (source_id is not None) | (blob_id is not None):
# conf.PLOT = True
if source_id is not None:
blob_id = np.unique(fbrick.blobmap[fbrick.segmap == source_id])
assert(len(blob_id) == 1)
blob_id = blob_id[0]
fblob = fbrick.make_blob(blob_id)
if source_only & (source_id not in fbrick.catalog['source_id']):
logger.warning('Requested source is not in blob!')
for source in fbrick.catalog:
logger.debug(source['source_id'], source['cflux'])
raise ValueError('Requested source is not in blob!')
if rao_cramer_only:
output_rows = runblob_rc(blob_id, fblob, catalog=fbrick.catalog, source_id=source_id)
else:
output_rows = runblob(blob_id, fblob, modeling=False, catalog=fbrick.catalog, plotting=conf.PLOT, source_id=source_id)
output_cat = vstack(output_rows)
fbrick.bcatalog = output_cat
# Estimate covariance
astart = time.time()
logger.info(f'Starting covariance estimation...')
status = fbrick.estimate_error_corr(use_band_position=force_unfixed_pos, use_band_shape=use_band_shape, modeling=False)
logger.info(f'Covariance estimation complete. ({time.time() - astart:3.3f})s')
if not conf.OUTPUT:
logging.warning('OUTPUT is DISABLED! Quitting...')
else:
if insert & conf.OVERWRITE & (conf.NBLOBS==0):
# open old cat
path_mastercat = os.path.join(conf.CATALOG_DIR, f'B{fbrick.brick_id}.cat')
if os.path.exists(path_mastercat):
mastercat = Table.read(path_mastercat, format='fits')
# find new columns
newcols = np.in1d(output_cat.colnames, mastercat.colnames, invert=True)
# make fillers
for colname in np.array(output_cat.colnames)[newcols]:
#mastercat.add_column(output_cat[colname])
if colname not in mastercat.colnames:
if np.ndim(output_cat[colname]) > 1:
shape = np.shape(output_cat[colname][1])
else:
shape = 1
mastercat.add_column(Column(length=len(mastercat), dtype=output_cat[colname].dtype, shape=shape, name=colname))
for row in output_cat:
mastercat[np.where(mastercat['source_id'] == row['source_id'])[0]] = row
# coordinate correction
# fbrick.catalog['x'] = fbrick.catalog['x'] + fbrick.mosaic_origin[1] - conf.BRICK_BUFFER + 1.
# fbrick.catalog['y'] = fbrick.catalog['y'] + fbrick.mosaic_origin[0] - conf.BRICK_BUFFER + 1.
# save
mastercat.write(os.path.join(conf.CATALOG_DIR, f'B{fbrick.brick_id}.cat'), format='fits', overwrite=conf.OVERWRITE)
logger.info(f'Saving results for brick #{fbrick.brick_id} to existing catalog file.')
else:
for colname in output_cat.colnames:
if colname not in fbrick.catalog.colnames:
if np.ndim(output_cat[colname]) > 1:
shape = np.shape(output_cat[colname][1])
else:
shape = 1
fbrick.catalog.add_column(Column(length=len(fbrick.catalog), dtype=output_cat[colname].dtype, shape=shape, name=colname))
#fbrick.catalog = join(fbrick.catalog, output_cat, join_type='left', )
for row in output_cat:
fbrick.catalog[np.where(fbrick.catalog['source_id'] == row['source_id'])[0]] = row
mode_ext = conf.MULTIBAND_NICKNAME
if fband is not None:
if len(fband) == 1:
mode_ext = fband[0].replace(' ', '_')
else:
mode_ext = conf.MULTIBAND_NICKNAME
# write out cat
# fbrick.catalog['x'] = fbrick.catalog['x'] + fbrick.mosaic_origin[1] - conf.BRICK_BUFFER + 1.
# fbrick.catalog['y'] = fbrick.catalog['y'] + fbrick.mosaic_origin[0] - conf.BRICK_BUFFER + 1.
if conf.OUTPUT:
fbrick.catalog.write(os.path.join(conf.CATALOG_DIR, f'B{fbrick.brick_id}_{mode_ext}.cat'), format='fits', overwrite=conf.OVERWRITE)
logger.info(f'Saving results for brick #{fbrick.brick_id} to new {mode_ext} catalog file.')
else:
if conf.NBLOBS > 0:
run_n_blobs = conf.NBLOBS
else:
run_n_blobs = fbrick.n_blobs
fblobs = (fbrick.make_blob(i) for i in np.unique(fbrick.catalog['blob_id'].data))
if conf.NTHREADS > 1:
with pa.pools.ProcessPool(ncpus=conf.NTHREADS) as pool:
logger.info(f'Parallel processing pool initalized with {conf.NTHREADS} threads.')
if rao_cramer_only:
result = pool.uimap(partial(runblob_rc, catalog=fbrick.catalog), np.arange(1, run_n_blobs+1), fblobs)
else:
result = pool.uimap(partial(runblob, modeling=False, catalog=fbrick.catalog, plotting=conf.PLOT), np.arange(1, run_n_blobs+1), fblobs)
output_rows = list(result)
logger.info('Parallel processing complete.')
else:
if rao_cramer_only:
output_rows = [runblob_rc(kblob_id, fbrick.make_blob(kblob_id), catalog=fbrick.catalog) for kblob_id in np.arange(1, run_n_blobs+1)]
else:
output_rows = [runblob(kblob_id, fbrick.make_blob(kblob_id), modeling=False, catalog=fbrick.catalog, plotting=conf.PLOT) for kblob_id in np.arange(1, run_n_blobs+1)]
logger.info(f'Completed {run_n_blobs} blobs in {time.time() - tstart:3.3f}s')
#output_rows = [x for x in output_rows if x is not None]
output_cat = vstack(output_rows) # HACK -- at some point this should just UPDATE the bcatalog with the new photoms. IF the user sets NBLOBS > 0, the catalog is truncated!
uniq_src, idx_src = np.unique(output_cat['source_id'], return_index=True)
# if len(idx_src) != len(fbrick.catalog):
# raise RuntimeError(f'Output catalog is truncated! {len(idx_src)} out of {len(fbrick.catalog)}')
if len(uniq_src) < len(output_cat):
logger.warning(f'Found {len(uniq_src)} unique sources, out of {len(output_cat)} -- CLEANING!')
output_cat = output_cat[idx_src]
else:
logger.debug(f'Found {len(uniq_src)} unique sources, out of {len(output_cat)}')
# Estimate covariance
fbrick.bcatalog = output_cat
astart = time.time()
logger.info(f'Starting covariance estimation...')
status = fbrick.estimate_error_corr(use_band_position=force_unfixed_pos, use_band_shape=use_band_shape, modeling=False)
logger.info(f'Covariance estimation complete. ({time.time() - astart:3.3f})s')
# estimate effective area
if conf.ESTIMATE_EFF_AREA:
eff_area = dict(zip(fband, np.zeros(len(fband))))
for b, bname in enumerate(fband):
eff_area[bname] = fbrick.estimate_effective_area(output_cat, bname, modeling=False)[0]
else:
eff_area = None
if not conf.OUTPUT:
logging.warning('OUTPUT is DISABLED! Quitting...')
else:
if insert & conf.OVERWRITE & (conf.NBLOBS==0) & (not force_unfixed_pos):
# open old cat
path_mastercat = os.path.join(conf.CATALOG_DIR, f'B{fbrick.brick_id}.cat')
if os.path.exists(path_mastercat):
mastercat = Table.read(path_mastercat, format='fits')
# find new columns
newcols = np.in1d(output_cat.colnames, mastercat.colnames, invert=True)
# make fillers
for colname in np.array(output_cat.colnames)[newcols]:
if colname not in mastercat.colnames:
if np.ndim(output_cat[colname]) > 1:
colshape = np.shape(output_cat[colname][1])
else:
colshape = 1
mastercat.add_column(Column(length=len(mastercat), dtype=output_cat[colname].dtype, shape=colshape, name=colname))
for row in output_cat:
mastercat[np.where(mastercat['source_id'] == row['source_id'])[0]] = row
# coordinate correction
# fbrick.catalog['x'] = fbrick.catalog['x'] + fbrick.mosaic_origin[1] - conf.BRICK_BUFFER + 1.
# fbrick.catalog['y'] = fbrick.catalog['y'] + fbrick.mosaic_origin[0] - conf.BRICK_BUFFER + 1.
# save
hdr = fits.open(path_mastercat)['CONFIG'].header
lastb = 0
for b in np.arange(99):
if 'AREA{b}' not in hdr.keys():
lastb = b
if eff_area is not None:
for b, band in enumerate(conf.BANDS):
if band in fband:
eff_area_deg = eff_area[band] * (conf.PIXEL_SCALE / 3600)**2
hdr.set(f'AREA{b+lastb}', eff_area_deg, f'{band} EFF_AREA (deg2)')
hdu_info = fits.ImageHDU(header=hdr, name='CONFIG')
hdu_table = fits.table_to_hdu(mastercat)
hdul = fits.HDUList([fits.PrimaryHDU(), hdu_table, hdu_info])
hdul.writeto(os.path.join(conf.CATALOG_DIR, f'B{fbrick.brick_id}.cat'), overwrite=conf.OVERWRITE)
logger.info(f'Saving results for brick #{fbrick.brick_id} to existing catalog file.')
outcatalog = mastercat
else:
logger.critical(f'Catalog file for brick #{fbrick.brick_id} could not be found!')
return
elif (not insert) & force_unfixed_pos:
# make a new MULITBAND catalog or add to it!
path_mastercat = os.path.join(conf.CATALOG_DIR, f'B{fbrick.brick_id}_{conf.MULTIBAND_NICKNAME}.cat')
if os.path.exists(path_mastercat):
mastercat = Table.read(path_mastercat, format='fits')
# find new columns
newcols = np.in1d(output_cat.colnames, mastercat.colnames, invert=True)
if np.sum(newcols) == 0:
logger.warning('Columns exist in catalog -- defaulting to separate file output!')
hdr = fits.open(path_mastercat)['CONFIG'].header
lastb = 0
for b in np.arange(99):
if 'AREA{b}' in hdr.keys():
lastb = b
if eff_area is not None:
for b, band in enumerate(conf.BANDS):
if band in fband:
eff_area_deg = eff_area[band] * (conf.PIXEL_SCALE / 3600)**2
hdr.set(f'AREA{b+lastb}', eff_area_deg, f'{band} EFF_AREA (deg2)')
hdu_info = fits.ImageHDU(header=hdr, name='CONFIG')
hdu_table = fits.table_to_hdu(mastercat)
hdul = fits.HDUList([fits.PrimaryHDU(), hdu_table, hdu_info])
hdul.writeto(os.path.join(conf.CATALOG_DIR, f'B{fbrick.brick_id}_{conf.MULTIBAND_NICKNAME}.cat'), overwrite=conf.OVERWRITE)
logger.info(f'Saving results for brick #{fbrick.brick_id} to new catalog file.')
else:
join_cat = output_cat[list(np.array(output_cat.colnames)[newcols])]
join_cat.add_column(output_cat['source_id'])
mastercat = join(mastercat, join_cat, keys='source_id', join_type='left')
# # add new columns, filled.
# newcolnames = []
# for colname in np.array(output_cat.colnames)[newcols]:
# if colname not in mastercat.colnames:
# if colname.startswith('FLUX_APER') | colname.startswith('MAG_APER'):
# mastercat.add_column(Column(length=len(mastercat), dtype=float, shape=(len(conf.APER_PHOT),), name=colname))
# else:
# mastercat.add_column(Column(length=len(mastercat), dtype=output_cat[colname].dtype, shape=(1,), name=colname))
# newcolnames.append(colname)
# # if colname.startswith('FLUX_APER') | colname.startswith('MAG_APER'):
# # mastercat.add_column(Column(length=len(mastercat), dtype=float, shape=(len(conf.APER_PHOT),), name=colname))
# # else:
# # mastercat.add_column(Column(length=len(mastercat), dtype=output_cat[colname].dtype, shape=(1,), name=colname))
# # [print(j) for j in mastercat.colnames]
# # [print(j) for j in output_cat.colnames]
# # count = 0
# # for row in output_cat:
# # idx = np.where(mastercat['source_id'] == row['source_id'])[0]
# for colname in newcolnames:
# mastercat[colname][idx] = output_cat[colname]
# # print(mastercat[np.where(mastercat['source_id'] == row['source_id'])[0]][newcolnames])
# # print(newcolnames)
# # print(row[newcolnames])
# # print(np.where(mastercat['source_id'] == row['source_id'])[0])
# mastercat[newcolnames][idx] = row[newcolnames]
# count+=1
hdr = fits.open(path_mastercat)['CONFIG'].header
lastb = 0
for b in np.arange(99):
if 'AREA{b}' not in hdr.keys():
lastb = b
if eff_area is not None:
for b, band in enumerate(conf.BANDS):
if band in fband:
eff_area_deg = eff_area[band] * (conf.PIXEL_SCALE / 3600)**2
hdr.set(f'AREA{b+lastb}', eff_area_deg, f'{band} EFF_AREA (deg2)')
hdu_info = fits.ImageHDU(header=hdr, name='CONFIG')
hdu_table = fits.table_to_hdu(mastercat)
hdul = fits.HDUList([fits.PrimaryHDU(), hdu_table, hdu_info])
hdul.writeto(path_mastercat, overwrite=conf.OVERWRITE)
logger.info(f'Saving results for brick #{fbrick.brick_id} to existing catalog file.')
else:
mastercat = output_cat
hdr = header_from_dict(conf.__dict__)
# hdr = fits.open(path_mastercat)['CONFIG'].header
# lastb = 0
# for b in np.arange(99):
# if 'AREA{b}' not in hdr.keys():
lastb = 0
if eff_area is not None:
for b, band in enumerate(conf.BANDS):
if band in fband:
eff_area_deg = eff_area[band] * (conf.PIXEL_SCALE / 3600)**2
hdr.set(f'AREA{b+lastb}', eff_area_deg, f'{band} EFF_AREA (deg2)')
hdu_info = fits.ImageHDU(header=hdr, name='CONFIG')
hdu_table = fits.table_to_hdu(mastercat)
hdul = fits.HDUList([fits.PrimaryHDU(), hdu_table, hdu_info])
hdul.writeto(path_mastercat, overwrite=conf.OVERWRITE)
logger.info(f'Saving results for brick #{fbrick.brick_id} to new catalog file.')
outcatalog = mastercat
else:
for colname in output_cat.colnames:
if colname not in fbrick.catalog.colnames:
colshape = np.shape(output_cat[colname])
if len(colshape) == 2:
colshape = (colshape[1],)
else:
colshape = (1,)
fbrick.catalog.add_column(Column(length=len(fbrick.catalog), dtype=output_cat[colname].dtype, shape=colshape, name=colname))
#fbrick.catalog = join(fbrick.catalog, output_cat, join_type='left', )
for row in output_cat:
fbrick.catalog[np.where(fbrick.catalog['source_id'] == row['source_id'])[0]] = row
mode_ext = conf.MULTIBAND_NICKNAME
if fband is not None:
if len(fband) == 1:
mode_ext = fband[0].replace(' ', '_')
# write out cat
mastercat = fbrick.catalog
# fbrick.catalog['x'] = fbrick.catalog['x'] + fbrick.mosaic_origin[1] - conf.BRICK_BUFFER + 1.
# fbrick.catalog['y'] = fbrick.catalog['y'] + fbrick.mosaic_origin[0] - conf.BRICK_BUFFER + 1.
hdr = header_from_dict(conf.__dict__)
lastb = 0
for b in np.arange(99):
if 'AREA{b}' not in hdr.keys():
lastb = b
if eff_area is not None:
for b, band in enumerate(conf.BANDS):
if band in fband:
eff_area_deg = eff_area[band] * (conf.PIXEL_SCALE / 3600)**2
hdr.set(f'AREA{b+lastb}', eff_area_deg, f'{band} EFF_AREA (deg2)')
hdu_info = fits.ImageHDU(header=hdr, name='CONFIG')
hdu_table = fits.table_to_hdu(fbrick.catalog)
hdul = fits.HDUList([fits.PrimaryHDU(), hdu_table, hdu_info])
hdul.writeto(os.path.join(conf.CATALOG_DIR, f'B{fbrick.brick_id}_{mode_ext}.cat'), overwrite=conf.OVERWRITE)
logger.info(f'Saving results for brick #{fbrick.brick_id} to new {mode_ext} catalog file.')
outcatalog = fbrick.catalog
# If user wants model and/or residual images made:
if conf.MAKE_RESIDUAL_IMAGE:
fbrick.make_residual_image(catalog=outcatalog, use_band_position=force_unfixed_pos, use_band_shape=use_band_shape, modeling=False)
elif conf.MAKE_MODEL_IMAGE:
fbrick.make_model_image(catalog=outcatalog, use_band_position=force_unfixed_pos, use_band_shape=use_band_shape, modeling=False)
del fbrick
return
def make_model_image(brick_id, band, catalog=None, use_band_position=(not conf.FREEZE_FORCED_POSITION), use_band_shape=(not conf.FREEZE_FORCED_SHAPE), modeling=False):
# USE BAND w/ MODELING NICKNAME FOR MODELING RESULTS!
if band.startswith(conf.MODELING_NICKNAME):
nickname = conf.MULTIBAND_NICKNAME
sband = band[len(conf.MODELING_NICKNAME)+1:]
modeling=True
elif band == conf.MODELING_NICKNAME:
nickname = conf.MODELING_NICKNAME
sband = conf.MODELING_NICKNAME
modeling=True
else:
nickname = conf.MULTIBAND_NICKNAME
sband = band
modeling=False
brick = stage_brickfiles(brick_id, nickname=nickname, band=sband)
# print(brick.bands)
if catalog is not None:
brick.catalog = catalog
brick.n_sources = len(brick.catalog)
brick.n_blobs = brick.catalog['blob_id'].max()
if use_single_band_run:
use_band_position=True
else:
use_band_position=False
else:
search_fn = os.path.join(conf.CATALOG_DIR, f'B{brick_id}.cat')
search_fn2 = os.path.join(conf.CATALOG_DIR, f'B{brick_id}_{band}.cat') # this means the band was run by itself!
if os.path.exists(search_fn) & ~use_single_band_run:
brick.logger.info(f'Adopting catalog from {search_fn}')
brick.catalog = Table(fits.open(search_fn)[1].data)
brick.n_sources = len(brick.catalog)
brick.n_blobs = brick.catalog['blob_id'].max()
use_band_position=False
elif os.path.exists(search_fn2) & use_single_band_run:
brick.logger.info(f'Adopting catalog from {search_fn2}')
brick.catalog = Table(fits.open(search_fn2)[1].data)
brick.n_sources = len(brick.catalog)
brick.n_blobs = brick.catalog['blob_id'].max()
use_band_position=True
else:
raise ValueError(f'No valid catalog was found for {brick_id}')
search_fn = os.path.join(conf.INTERIM_DIR, f'B{brick_id}_SEGMAPS.fits')
if os.path.exists(search_fn):
hdul_seg = fits.open(search_fn)
brick.segmap = hdul_seg['SEGMAP'].data
brick.blobmap = hdul_seg['BLOBMAP'].data
brick.segmask = brick.segmap.copy()
brick.segmask[brick.segmap>0] = 1
else:
raise ValueError(f'No valid segmentation map was found for {brick_id}')
brick.run_background()
brick.make_model_image(brick.catalog, use_band_position=use_band_position, modeling=modeling)
def make_residual_image(brick_id, band, catalog=None, use_band_position=(not conf.FREEZE_FORCED_POSITION), use_band_shape=(not conf.FREEZE_FORCED_SHAPE), modeling=False):
# USE BAND w/ MODELING NICKNAME FOR MODELING RESULTS!
if band.startswith(conf.MODELING_NICKNAME) | ((modeling==True) & (band != conf.MODELING_NICKNAME)):
nickname = conf.MULTIBAND_NICKNAME
if band.startswith(conf.MODELING_NICKNAME):
sband = band[len(conf.MODELING_NICKNAME)+1:]
else:
sband = band
modeling=True
elif band == conf.MODELING_NICKNAME:
nickname = conf.MODELING_NICKNAME
sband = conf.MODELING_NICKNAME
modeling=True
else:
nickname = conf.MULTIBAND_NICKNAME
sband = band
modeling=False
brick = stage_brickfiles(brick_id, nickname=nickname, band=sband)
# if modeling:
# brick.bands = np.array([f'{conf.MODELING_NICKNAME}_{s}' for s in [band,]])
if catalog is not None:
brick.catalog = catalog
brick.n_sources = len(brick.catalog)
brick.n_blobs = brick.catalog['blob_id'].max()
else:
search_fn = os.path.join(conf.CATALOG_DIR, f'B{brick_id}.cat')
search_fn2 = os.path.join(conf.CATALOG_DIR, f'B{brick_id}_{conf.MULTIBAND_NICKNAME}.cat') # this means the band was run by itself!
search_fn3 = os.path.join(conf.CATALOG_DIR, f'B{brick_id}_{band}.cat')
if os.path.exists(search_fn) & ~(use_band_position | use_band_shape) & (band in conf.MODELING_BANDS):
brick.logger.info(f'Adopting catalog from {search_fn}')
brick.catalog = Table(fits.open(search_fn)[1].data)
brick.n_sources = len(brick.catalog)
brick.n_blobs = brick.catalog['blob_id'].max()
use_band_position=False
elif os.path.exists(search_fn2) & ((band not in conf.MODELING_BANDS) | (use_band_position | use_band_shape)):
brick.logger.info(f'Adopting catalog from {search_fn2}') # Tries to find BXXX_MULTIBAND.fits
brick.catalog = Table(fits.open(search_fn2)[1].data)
brick.n_sources = len(brick.catalog)
brick.n_blobs = brick.catalog['blob_id'].max()
use_band_position=('X_MODEL_{band}' in brick.catalog.colnames)
elif os.path.exists(search_fn3) & (use_band_position | use_band_shape):
brick.logger.info(f'Adopting catalog from {search_fn3}') # Tries to find BXXX_BAND.fits
brick.catalog = Table(fits.open(search_fn3)[1].data)
brick.n_sources = len(brick.catalog)
brick.n_blobs = brick.catalog['blob_id'].max()
use_band_position=True
else:
raise ValueError(f'No valid catalog was found for {brick_id}')
search_fn = os.path.join(conf.INTERIM_DIR, f'B{brick_id}_SEGMAPS.fits')
if os.path.exists(search_fn):
hdul_seg = fits.open(search_fn)
brick.segmap = hdul_seg['SEGMAP'].data
brick.blobmap = hdul_seg['BLOBMAP'].data
brick.segmask = brick.segmap.copy()
brick.segmask[brick.segmap>0] = 1
else:
raise ValueError(f'No valid segmentation map was found for {brick_id}')
brick.run_background()
brick.make_residual_image(brick.catalog, use_band_position=use_band_position, use_band_shape=use_band_shape, modeling=modeling)
def estimate_effective_area(brick_id, band, catalog=None, save=False, use_band_position=(not conf.FREEZE_FORCED_POSITION), use_band_shape=(not conf.FREEZE_FORCED_SHAPE), modeling=False):
if band.startswith(conf.MODELING_NICKNAME) | ((modeling==True) & (band != conf.MODELING_NICKNAME)):
nickname = conf.MULTIBAND_NICKNAME
if band.startswith(conf.MODELING_NICKNAME):
sband = band[len(conf.MODELING_NICKNAME)+1:]
else:
sband = band
modeling=True
elif band == conf.MODELING_NICKNAME:
nickname = conf.MODELING_NICKNAME
sband = conf.MODELING_NICKNAME
modeling=True
else:
nickname = conf.MULTIBAND_NICKNAME
sband = band
modeling=False
brick = stage_brickfiles(brick_id, nickname=nickname, band=sband)
if catalog is not None:
brick.catalog = catalog[catalog['brick_id']==brick_id]
brick.n_sources = len(brick.catalog)
brick.n_blobs = brick.catalog['blob_id'].max()
else:
search_fn = os.path.join(conf.CATALOG_DIR, f'B{brick_id}.cat')
search_fn2 = os.path.join(conf.CATALOG_DIR, f'B{brick_id}_{conf.MULTIBAND_NICKNAME}.cat') # this means the band was run by itself!
search_fn3 = os.path.join(conf.CATALOG_DIR, f'B{brick_id}_{band}.cat')
if os.path.exists(search_fn) & ~(use_band_position | use_band_shape):
brick.logger.info(f'Adopting catalog from {search_fn}')
brick.catalog = Table(fits.open(search_fn)[1].data)
brick.n_sources = len(brick.catalog)
brick.n_blobs = brick.catalog['blob_id'].max()
use_band_position=False
elif os.path.exists(search_fn2) & (use_band_position | use_band_shape):
brick.logger.info(f'Adopting catalog from {search_fn2}') # Tries to find BXXX_MULTIBAND.fits
brick.catalog = Table(fits.open(search_fn2)[1].data)
brick.n_sources = len(brick.catalog)
brick.n_blobs = brick.catalog['blob_id'].max()
use_band_position=True
elif os.path.exists(search_fn3) & (use_band_position | use_band_shape):
brick.logger.info(f'Adopting catalog from {search_fn3}') # Tries to find BXXX_BAND.fits
brick.catalog = Table(fits.open(search_fn3)[1].data)
brick.n_sources = len(brick.catalog)
brick.n_blobs = brick.catalog['blob_id'].max()
use_band_position=True
else:
raise ValueError(f'No valid catalog was found for {brick_id}')
import os
search_fn = os.path.join(conf.INTERIM_DIR, f'B{brick_id}_SEGMAPS.fits')
if os.path.exists(search_fn):
hdul_seg = fits.open(search_fn)
brick.segmap = hdul_seg['SEGMAP'].data
brick.blobmap = hdul_seg['BLOBMAP'].data
brick.segmask = brick.segmap.copy()
brick.segmask[brick.segmap>0] = 1
else:
raise ValueError(f'No valid segmentation map was found for {brick_id}')
# brick.run_background()
good_area_pix, inner_area_pix = brick.estimate_effective_area(brick.catalog, sband, modeling=modeling)
if save:
import os
outF = open(os.path.join(conf.INTERIM_DIR, f"effarea_{band}_{brick_id}.dat"), "w")
outF.write(f'{good_area_pix}\n{inner_area_pix}')
outF.close()
return good_area_pix, inner_area_pix
def stage_brickfiles(brick_id, nickname='MISCBRICK', band=None, modeling=False, is_detection=False):
""" Essentially a private function. Pre-processes brick files and relevant catalogs """
# Wraps Brick with a single parameter call
# THIS ASSUMES YOU HAVE IMG, WGT, and MSK FOR ALL BANDS!
path_brickfile = os.path.join(conf.BRICK_DIR, f'B{brick_id}_N{nickname}_W{conf.BRICK_WIDTH}_H{conf.BRICK_HEIGHT}.fits')
logger.info(f'Staging brickfile ({path_brickfile}')
if modeling & (band is None):
sbands = [nickname,]
elif band is None:
sbands = conf.BANDS
else:
if type(band) == list:
sbands = band
else:
sbands = [band,]
# conf.BANDS = sbands
[logger.debug(f' *** {i}') for i in sbands]
if os.path.exists(path_brickfile):
# Stage things
images = np.zeros((len(sbands), conf.BRICK_WIDTH + 2 * conf.BRICK_BUFFER, conf.BRICK_HEIGHT + 2 * conf.BRICK_BUFFER))
weights = np.zeros_like(images)
masks = | np.zeros_like(images, dtype=bool) | numpy.zeros_like |
# -*- coding: utf-8 -*-
"""
Created on Tue May 19 17:54:12 2020
@author: Shaji,Charu,Selva
"""
import scipy
import numpy as np
import pandas as pd
from sklearn.impute import KNNImputer
from . import helper
from . import exceptions
pd.set_option('mode.chained_assignment', None)
def get_distance(dataset,
start_latitude,
start_longitude,
end_latitude,
end_longitude):
"""
Usage: [arg1]:[Pandas DataFrame],[arg2]:[column-start_latitude],[arg3]:[column-start_longitude],[arg4]:[column-end_latitude],[arg5]:[column-end_longitude]
Returns: DataFrame with additional column [Distance in kilometers]
"""
print(
"This module (ctrl4ai.preprocessing) will be deprecated by the end of 2021. Please plan to switch to the same functions in ")
dataset['kms_' + start_latitude + '_' + end_latitude] = dataset.apply(
lambda row: helper.distance_calculator(row[start_latitude], row[start_longitude], row[end_latitude],
row[end_longitude]), axis=1)
return dataset
def get_timediff(dataset,
start_time,
end_time):
"""
Usage: [arg1]:[Pandas DataFrame],[arg2]:[column-start_time],[arg3]:[column-end_time]
Returns: DataFrame with additional column [Duration in seconds]
"""
dataset['secs_diff_' + start_time + '_' + end_time] = (dataset[end_time] - dataset[start_time]).dt.total_seconds()
return dataset
def derive_from_datetime(dataset):
"""
Usage: [arg1]:[pandas dataframe]
Prerequisite: Type for datetime columns to be defined correctly
Description: Derives the hour, weekday, year and month from a datetime column
Returns: Dataframe [with new columns derived from datetime columns]
"""
columns = []
for column, dtype in dataset.dtypes.items():
if 'datetime' in str(dtype):
columns.append(column)
dataset['hour_of_' + column] = dataset[column].apply(lambda x: x.hour)
dataset['weekday_of_' + column] = dataset[column].apply(lambda x: x.weekday())
dataset['year_of_' + column] = dataset[column].apply(lambda x: x.year)
dataset['month_of_' + column] = dataset[column].apply(lambda x: x.month)
return dataset, columns
def log_transform(dataset, method='yeojohnson', define_continuous_cols=[], ignore_cols=[], categorical_threshold=0.3):
"""
Usage: [arg1]:[pandas dataframe],[method]=['yeojohnson'/'added_constant']
Description: Checks if the a continuous column is skewed and does log transformation
Returns: Dataframe [with all skewed columns normalized using appropriate approach]
"""
continuous_columns = []
for col in define_continuous_cols:
if col not in ignore_cols:
continuous_columns.append(col)
for col in dataset.columns:
if col not in continuous_columns+ignore_cols:
if helper.check_categorical_col(dataset[col], categorical_threshold=categorical_threshold) == False and helper.check_numeric_col(dataset[col]):
continuous_columns.append(col)
for col in dataset.columns:
if col in continuous_columns and np.abs(scipy.stats.skew(dataset[col])) > 1:
print('Log Normalization(' + method + ') applied for ' + col)
if method == 'yeojohnson':
dataset[col] = dataset[col].apply(lambda x: helper.yeojohnsonlog(x))
elif method == 'added_constant':
dataset = helper.added_constant_log(dataset, col)
return dataset
def drop_null_fields(dataset,
dropna_threshold=0.7, ignore_cols=[]):
"""
Usage: [arg1]:[pandas dataframe],[dropna_threshold(default=0.7)]:[What percentage of nulls should account for the column top be removed],[ignore_cols]:[columnd that shouldn't be dropped]
Description: Drop columns that has more null values
Returns: Dataframe [with null dominated columns removed]
"""
no_of_records = dataset.shape[0]
select_cols = []
dropped_cols = []
for index, val in dataset.isnull().sum().items():
if val / no_of_records < dropna_threshold or index in ignore_cols:
select_cols.append(index)
else:
dropped_cols.append(index)
print('Dropping null dominated column(s) ' + index)
return dataset[select_cols], dropped_cols
def drop_single_valued_cols(dataset):
"""
Usage: [arg1]:[pandas dataframe]
Description: Drop columns that has only one value in it
Returns: Dataframe [without single valued columns]
"""
single_valued_cols = []
for col in dataset.columns:
if helper.single_valued_col(dataset[col]):
single_valued_cols.append(col)
if len(single_valued_cols) > 0:
print('Dropping single valued column(s) ' + ','.join(single_valued_cols))
dataset = dataset.drop(single_valued_cols, axis=1)
return dataset
def get_ohe_df(dataset,
target_variable=None,
define_nominal_cols=[],
ignore_cols=[],
drop_first=True,
categorical_threshold=0.3):
"""
Usage: [arg1]:[pandas dataframe],[target_variable(default=None)]:[Dependent variable for Regression/Classification],[ignore_cols]:[categorical columns where one hot encoding need not be done],[categorical_threshold(default=0.3)]:[Threshold for determining categorical column based on the percentage of unique values(optional)]
Description: Auto identifies categorical features in the dataframe and does one hot encoding
Note: Consumes more system mermory if the size of the dataset is huge
Returns: Dataframe [with separate column for each categorical values]
"""
nominal_cols = []
nominal_cols.extend(define_nominal_cols)
columns = []
for col in dataset:
if col not in nominal_cols+ignore_cols and col != target_variable:
if helper.check_categorical_col(dataset[col], categorical_threshold=categorical_threshold):
nominal_cols.append(col)
for col in dataset.columns:
if col in nominal_cols and col not in ignore_cols:
print('One hot encoding ' + col)
columns.append(col)
dataset = helper.one_hot_encoding(dataset, [col], drop_first=drop_first)
return dataset, columns
def drop_non_numeric(dataset):
"""
Usage: [arg1]:[pandas dataframe]
Description: Drop columns that are not numeric
Returns: Dataframe [only numeric features]
"""
drop_cols = []
for col in dataset.columns:
if helper.check_numeric_col(dataset[col]) == False:
drop_cols.append(col)
if len(drop_cols) > 0:
print("Dropping non categorical/continuous column(s):" + ','.join(drop_cols))
dataset = dataset.drop(drop_cols, axis=1)
return dataset
def impute_nulls(dataset,
method='central_tendency',
define_continuous_cols=[],
define_nominal_cols=[],
define_ordinal_cols=[],
categorical_threshold=0.3):
"""
Usage: [arg1]:[pandas dataframe],[method(default=central_tendency)]:[Choose either central_tendency or KNN]
Description: Auto identifies the type of distribution in the column and imputes null values
Note: KNN consumes more system memory if the size of the dataset is huge
Returns: Dataframe [with separate column for each categorical values]
"""
nominal_cols = []
ordinal_cols = []
continuous_cols = []
nominal_cols.extend(define_nominal_cols)
continuous_cols.extend(define_continuous_cols)
ordinal_cols.extend(define_ordinal_cols)
for col in dataset:
if col not in nominal_cols + ordinal_cols + continuous_cols:
if helper.check_categorical_col(dataset[col], categorical_threshold=categorical_threshold):
nominal_cols.append(col)
elif helper.check_numeric_col(dataset[col]):
continuous_cols.append(col)
if str.lower(method) == 'knn':
for col, value in dataset.isnull().sum().items():
if value > 0:
if col in nominal_cols + ordinal_cols:
print("KNN (Only Categorical): Replaced nulls in " + col + " with mode")
mode_val = dataset[col].mode()[0]
dataset[col] = dataset[col].fillna(mode_val)
k_knn = int(np.ceil(np.sqrt(dataset.shape[0])))
if k_knn % 2 == 0:
k_knn += 1
imputer = KNNImputer(n_neighbors=k_knn)
knn_imputed_array = imputer.fit_transform(dataset)
dataset = pd.DataFrame(knn_imputed_array, columns=dataset.columns)
return dataset
elif method == 'central_tendency':
for col, value in dataset.isnull().sum().items():
if value > 0:
if col in nominal_cols + ordinal_cols:
print("Replaced nulls in " + col + " with mode")
mode_val = dataset[col].mode()[0]
dataset[col] = dataset[col].fillna(mode_val)
elif col in continuous_cols:
if np.abs(scipy.stats.skew(dataset[col])) > 1:
print("Replaced nulls in " + col + " with median")
median_val = dataset[col].median()
dataset[col] = dataset[col].fillna(median_val)
else:
print("Replaced nulls in " + col + " with mean")
mean_val = dataset[col].mean()
dataset[col] = dataset[col].fillna(mean_val)
return dataset
else:
raise exceptions.ParameterError('Method should be either central_tendency or knn')
def label_encode(dataset,
col):
"""
Usage: [arg1]:[pandas dataframe],[arg1]:[column to be encoded]
Description: Labelling categorical features with numbers from 0 to n categories
Returns: Label Dict , Dataframe
"""
mode_val = dataset[col].mode()[0]
# dataset[col] = dataset[col].apply(lambda x: str(x).strip()).astype(str).fillna(mode_val)
dataset[col] = dataset[col].fillna(mode_val)
label_dict = dict(zip(dataset[col].unique(), np.arange(dataset[col].unique().shape[0])))
dataset = dataset.replace({col: label_dict})
dataset[col] = dataset[col].astype('int')
dataset[col] = dataset[col].astype('category')
return label_dict, dataset
def remove_outlier_df(dataset,
cols):
"""
Usage: [arg1]:[pandas dataframe],[arg2]:[list of columns to check and remove outliers]
Description: The column needs to be continuous
Returns: DataFrame with outliers removed for the specific columns
"""
for col in cols:
outlier_temp_dataset = pd.DataFrame(dataset[col])
outlier_temp_dataset = impute_nulls(outlier_temp_dataset)
Q1 = outlier_temp_dataset.quantile(0.25)
Q3 = outlier_temp_dataset.quantile(0.75)
IQR = Q3 - Q1
outlier_bool_dataset = ((outlier_temp_dataset > (Q1 - 1.5 * IQR)) & (outlier_temp_dataset < (Q3 + 1.5 * IQR)))
select_index = outlier_bool_dataset.index[outlier_bool_dataset[col] == True]
print('No. of outlier rows removed based on ' + col + ' is ' + str(
outlier_temp_dataset.shape[0] - len(select_index)))
dataset = dataset.iloc[select_index].reset_index(drop=True)
return dataset
def auto_remove_outliers(dataset,
ignore_cols=[],
categorical_threshold=0.3,
define_continuous_cols=[]):
"""
Usage: [arg1]:[pandas dataframe],[ignore_cols]:[list of columns to be ignored],[categorical_threshold(default=0.3)]:[Threshold for determining categorical column based on the percentage of unique values(optional)]
Description: Checks if the column is continuous and removes outliers
Returns: DataFrame with outliers removed
"""
continuous_columns = []
for col in define_continuous_cols:
if col not in ignore_cols:
continuous_columns.append(col)
for col in dataset.columns:
if col not in continuous_columns+ignore_cols:
if helper.check_categorical_col(dataset[col], categorical_threshold=categorical_threshold) == False and helper.check_numeric_col(dataset[col]) == True:
continuous_columns.append(col)
dataset = remove_outlier_df(dataset, continuous_columns)
return dataset
def get_label_encoded_df(dataset,
categorical_threshold=0.3,
define_nominal_cols=[],
ignore_cols=[]):
"""
Usage: [arg1]:[pandas dataframe],[categorical_threshold(default=0.3)]:[Threshold for determining categorical column based on the percentage of unique values(optional)]
Description: Auto identifies categorical features in the dataframe and does label encoding
Returns: Dictionary [Labels for columns],Dataframe [with separate column for each categorical values]
"""
nominal_cols = []
nominal_cols.extend(define_nominal_cols)
for col in dataset:
if col not in nominal_cols+ignore_cols:
if helper.check_categorical_col(dataset[col], categorical_threshold=categorical_threshold):
nominal_cols.append(col)
column_labels = dict()
for col in dataset.columns:
if col not in ignore_cols:
if helper.check_numeric_col(dataset[col]):
continue
elif col in nominal_cols:
labels, dataset = label_encode(dataset, col)
print('Labels for ' + col + ': ' + str(labels))
column_labels[col] = labels
return column_labels, dataset
def get_ordinal_encoded_df(dataset, custom_ordinal_dict=dict()):
"""
Usage: [arg1]:[pandas dataframe],[arg2]:[Pre-defined ordinal scale dictionary]
Description: Identifies ordinal columns and translate them to numbers
Returns: Dictionary [Labels for columns], Dataframe [with ordinal values converted to number]
"""
column_labels = dict()
for col in dataset:
if col in custom_ordinal_dict.keys():
dataset[col] = dataset[col].astype(str).map(custom_ordinal_dict[col])
mode_val = dataset[col].mode()[0]
dataset[col] = dataset[col].fillna(mode_val).astype('int')
else:
result, mapper = helper.check_ordinal_col(dataset[col])
if result:
dataset[col] = dataset[col].astype(str).map(mapper)
mode_val = dataset[col].mode()[0]
dataset[col] = dataset[col].fillna(mode_val).astype('int')
column_labels[col] = mapper
print('Labels for ' + col + ': ' + str(mapper))
return column_labels, dataset
def cramersv_corr(x, y):
"""
Usage: [arg1]:[categorical series],[arg2]:[categorical series]
Description: Cramer's V Correlation is a measure of association between two categorical variables
Returns: A value between 0 and +1
"""
confusion_matrix = pd.crosstab(x, y)
chi2 = scipy.stats.chi2_contingency(confusion_matrix)[0]
n = confusion_matrix.sum().sum()
phi2 = chi2 / n
r, k = confusion_matrix.shape
phi2corr = max(0, phi2 - ((k - 1) * (r - 1)) / (n - 1))
rcorr = r - ((r - 1) ** 2) / (n - 1)
kcorr = k - ((k - 1) ** 2) / (n - 1)
return np.sqrt(phi2corr / min((kcorr - 1), (rcorr - 1)))
def kendalltau_corr(x, y):
"""
Usage: [arg1]:[continuous series],[arg2]:[categorical series]
Description: Kendall Tau Correlation is a measure of association between a ordinal feature and a ordinal feature
Returns: A value between -1 and +1
"""
x_arr = np.array(impute_nulls(pd.DataFrame(x)))
y_arr = np.array(impute_nulls(pd.DataFrame(y)))
corr, _ = scipy.stats.kendalltau(x_arr, y_arr)
return corr
def spearmans_corr(x, y):
"""
Usage: [arg1]:[continuous series],[arg2]:[categorical series]
Description: Spearman Correlation is a measure of association between a continuous feature and a ordinal/continuous feature with monotonic relationship
Returns: A value between -1 and +1
"""
x_arr = np.array(impute_nulls(pd.DataFrame(x)))
y_arr = np.array(impute_nulls(pd.DataFrame(y)))
corr, _ = scipy.stats.spearmanr(x_arr, y_arr)
return corr
def pearson_corr(x, y):
"""
Usage: [arg1]:[continuous series],[arg2]:[continuous series]
Description: Pearson Correlation is a measure of association between two continuous features
Returns: A value between -1 and +1
"""
x = pd.to_numeric(x)
y = pd.to_numeric(y)
return np.corrcoef(x, y)[0, 1]
def nominal_scale_corr(nominal_series, continuous_series):
"""
Usage: [arg1]:[nominal series],[arg2]:[continuous series]
Description: Ctrl4AI's Nominal Scale Correlation is a measure of association between a nominal feature and a continuous feature
Returns: A value between 0 and 1
"""
mean_val = continuous_series.mean()
continuous_series = continuous_series.fillna(mean_val)
len_nominal = len(nominal_series.unique())
best_corr = 0
for bin_size in ['even', 'distributed']:
for bins in [None, len_nominal]:
binned_series = binning(continuous_series, bin_size=bin_size, bins=bins)
corr_val = cramersv_corr(nominal_series, binned_series)
if corr_val > best_corr:
best_corr = corr_val
return best_corr
def get_correlated_features(dataset,
target_col,
target_type,
correlation_threshold=None,
categorical_threshold=0.3,
define_continuous_cols=[],
define_nominal_cols=[],
define_ordinal_cols=[]):
"""
Usage: [arg1]:[pandas dataframe],[arg2]:[target/dependent variable],[arg3]:['continuous'/'categorical'],[correlation_threshold(default=2/sqrt(dataset.shape[0]))]:[The threshold value for a good correlation],[categorical_threshold(default=0.3)]:[Threshold for determining categorical column based on the percentage of unique values(optional)]
Description: Only for supervised learning to select independent variables that has some correlation with target/dependent variable (Uses Pearson correlation between two continuous variables, CramersV correlation between two categorical variables, Kendalls Tau correlation between a categorical and a continuos variable)
Returns: Dictionary of correlation coefficients, List of columns that have considerable correlation
"""
nominal_cols = []
ordinal_cols = []
continuous_cols = []
nominal_cols.extend(define_nominal_cols)
ordinal_cols.extend(define_ordinal_cols)
continuous_cols.extend(define_continuous_cols)
col_corr = dict()
if correlation_threshold is None:
correlation_threshold = 2 / np.sqrt(dataset.shape[0])
for col in dataset:
if col not in nominal_cols + continuous_cols + ordinal_cols:
if col != target_col:
if helper.check_categorical_col(dataset[col], categorical_threshold=categorical_threshold):
nominal_cols.append(col)
elif helper.check_numeric_col(dataset[col]):
continuous_cols.append(col)
if target_type == 'continuous':
for col in continuous_cols:
coeff = pearson_corr(dataset[col], dataset[target_col])
col_corr[col] = coeff
for col in ordinal_cols:
coeff = kendalltau_corr(dataset[col], dataset[target_col])
col_corr[col] = coeff
for col in nominal_cols:
coeff = nominal_scale_corr(dataset[col], dataset[target_col])
col_corr[col] = coeff
if target_type == 'categorical':
for col in continuous_cols:
coeff = kendalltau_corr(dataset[col], dataset[target_col])
col_corr[col] = coeff
for col in ordinal_cols + nominal_cols:
coeff = cramersv_corr(dataset[col], dataset[target_col])
col_corr[col] = coeff
selected_features = []
for col in col_corr.keys():
if np.abs(float(col_corr[col])) > np.abs(correlation_threshold):
selected_features.append(col)
return col_corr, selected_features
def binning(pdSeries, bin_size='even', bins=None):
"""
Usage: [arg1]:[Pandas Series],[bin_size(default=even)]:[even/distributed]
Description: Will split to intervals of equal size of bin size is even. Otherwise, data will be distributed to variable bin sizes with more or less same frequency of data
Returns: Pandas Series with Values converted to Intervals
"""
if bins is None:
bins = helper.freedman_diaconis(pdSeries, returnas='bins')
if str.lower(bin_size) == 'even':
new_pdSeries = pd.cut(pdSeries, bins=bins)
else:
new_pdSeries = pd.qcut(pdSeries, q=bins, duplicates='drop')
return new_pdSeries
def multicollinearity_check(corr_df, threshold=0.7):
"""
Usage: [arg1]:[Correlation Result DataFrame],[threshold(default=0.7)]:[Value in the range of 0-1]
Description: Will split to intervals of equal size of bin size is even. Otherwise, data will be distributed to variable bin sizes with more or less same frequency of data
Returns: Pandas Series with Values converted to Intervals
"""
result_set = []
for col in corr_df.columns:
for row in corr_df[col].index:
if col != row:
val = corr_df[col][row]
if helper.get_absolute(val) >= threshold:
cols = [col, row]
cols.sort()
if (cols, val) not in result_set:
result_set.append((cols, val))
return result_set
def get_multicollinearity_removals(corr_df, target_variable, threshold=0.7):
res = multicollinearity_check(corr_df, threshold=threshold)
corr = list(set([helper.get_absolute(item[1]) for item in res]))
corr.sort(reverse=True)
tgt_corr = corr_df[target_variable].to_dict()
remove_list = []
for val in corr:
for item in res:
if helper.get_absolute(item[1]) == val:
cols = item[0]
if target_variable not in cols:
if len(helper.intersection(cols, remove_list)) == 0:
if cols[0] < cols[1]:
remove_list.append(cols[0])
else:
remove_list.append(cols[1])
return remove_list
def dataset_summary(dataset,
define_continuous_cols=[],
define_nominal_cols=[],
define_ordinal_cols=[],
categorical_threshold=0.3):
"""
Usage: [arg1]:[pandas dataframe]
Description: Returns summary of DataFrame
Returns: [Summary Dict]
"""
nominal_cols = []
ordinal_cols = []
continuous_cols = []
nominal_cols.extend(define_nominal_cols)
continuous_cols.extend(define_continuous_cols)
ordinal_cols.extend(define_ordinal_cols)
for col in dataset:
if col not in nominal_cols + ordinal_cols + continuous_cols:
if helper.check_categorical_col(dataset[col], categorical_threshold=categorical_threshold):
nominal_cols.append(col)
elif helper.check_numeric_col(dataset[col]):
continuous_cols.append(col)
dataset_summary = dict()
for col in ordinal_cols:
dataset_summary[col] = dict()
dataset_summary[col]['type'] = 'ordinal'
col_summary = dataset[col].describe().to_dict()
dataset_summary[col].update(col_summary)
dataset_summary[col]['mode'] = dataset[col].mode()[0]
dataset_summary[col]['min'] = dataset[col].min()
dataset_summary[col]['max'] = dataset[col].max()
for col in nominal_cols:
dataset_summary[col] = dict()
dataset_summary[col]['type'] = 'nominal'
col_summary = dataset[col].describe().to_dict()
dataset_summary[col].update(col_summary)
dataset_summary[col]['mode'] = dataset[col].mode()[0]
for col in continuous_cols:
dataset_summary[col] = dict()
dataset_summary[col]['type'] = 'continuous'
col_summary = dataset[col].describe().to_dict()
dataset_summary[col].update(col_summary)
dataset_summary[col]['mean'] = dataset[col].mean()
dataset_summary[col]['median'] = dataset[col].median()
dataset_summary[col]['min'] = dataset[col].min()
dataset_summary[col]['max'] = dataset[col].max()
if np.abs(scipy.stats.skew(dataset[col])) > 1:
dataset_summary[col]['Skewed'] = 'Y'
else:
dataset_summary[col]['Skewed'] = 'N'
return dataset_summary
def split_dataset(dataset, n_splits, proportion=None, mode=None, shuffle=False):
if mode == 'equal':
each_proportion = int((1/n_splits)*100)
proportion = [each_proportion for i in range(n_splits-1)]
final_val = 100 - sum(proportion)
proportion.append(final_val)
proportion = [val/100 for val in proportion]
if len(proportion) != n_splits:
raise exceptions.ParameterError('n_splits should be equal to the number of values in proportion')
if sum(proportion) != 1:
raise exceptions.ParameterError('The sum of values in proportion should be 1')
indices = list(dataset.index)
if shuffle:
np.random.shuffle(indices)
df_list = []
indices_split = []
prev = 0
length = len(indices)
for ctr in range(n_splits):
max_records = int( | np.floor(proportion[ctr] * length) | numpy.floor |
#!/usr/bin/env python
## Copyright (c) 2009, <NAME>
## Original Matlab version of GC2D, <NAME>
## GC2D first converted to Python/NumPy in 2009 by <NAME>
################################################################
# NOTE: TopoFlow can provide "mass balance" for GC2D, but
# the timescales are very different. TopoFlow should
# pass some kind of "net" or cumulative "mass balance"
# to GC2D at its large timestep.
#
# NOTE: There is no "load_mask()" function yet, but it is
# called in a "try" block.
#
# NOTE: THERMAL_TOGGLE option does not work yet.
# See notes below regarding undefined vars.
#
# NOTE: Should carefully test update_vars() due to
# a bug fix and other changes to the code.
# Compare to update_vars_OLD().
#
# NOTE: Check that all "numpy" function calls include "numpy.".
# Fixed calls to "mean()", "nonzero()", "ravel()",
# abs() vs. absolute(), max(A,B) vs. maximum(A,B), etc.
#
################################################################
import numpy
import time
import sys
import logging
# import getopt
import scipy # scipy.signal.convolve, scipy.io.loadmat
from scipy import interpolate
from scipy import signal
# SDP. 10/24/11. No longer available. Deprecated?
# from scipy.io.numpyio import fwrite # used by print_watch_point()
#--------------------------------------------------------------------------------------------------
# run_model() # (for testing)
# ------------------------------
# Classes (used as structures)
# ------------------------------
# MassBalance
# BoundaryCond
# Parameters
# InputParams
# OutputParams
# Toggles
#
# -----------
# Functions
# -----------
# compress_grid()
# filter2d()
# add_halo()
# set_bc()
# difference_grid()
# basal_shear_stress()
# iceflow()
# ice_sliding()
# sum_ice_motion()
# avalanche()
# calve()
# mass_balance()
# mass_conservation()
# load_dem()
# load_dem_var()
# load_mask() ###### Not written, but called. #####
# get_timestep()
# update_vars()
# print_watch_point()
# update()
# init_valley_glacier()
# init_ice_sheet()
# resample_dem()
# init_ice_surface()
# load_state()
# #### load_state_old()
# #### run_for()
#--------------------------------------------------------------------------------------------------
def run_model(t_max=10.0, DEM_file='Animas_200.mat', SILENT=False):
Toggles.VARIABLE_DT_TOGGLE = 0 # (or change to 1)
###################################
print('Starting GC2D test run...')
print('Reading input file...')
( H, Zb, Zi, dx, dy ) = load_state(DEM_file=DEM_file,
RESTART_TOGGLE = 0,
INIT_COND_TOGGLE=1 )
ny, nx = Zb.shape
#------------------
# Initialize vars
#------------------
t = numpy.float64(0)
conserveIce = numpy.float64(0) # (total ice mass ??)
meltrate = numpy.zeros( (ny, nx), dtype='float64' )
## fd_watch = {}
## fd_watch['thick'] = open( 'thickness_py.bin' , 'wb' )
## counter = 0
while (t < t_max):
(dt, t, H, Zi, meltrate, conserveIce) = update( t, H, Zb, dx, dy,
meltrate, conserveIce,
SILENT=SILENT)
## COMPRESS_TOGGLE = Toggles.COMPRESS_TOGGLE,
## ICEFLOW_TOGGLE = Toggles.ICEFLOW_TOGGLE,
## ICESLIDE_TOGGLE = Toggles.ICESLIDE_TOGGLE,
## VARIABLE_DT_TOGGLE = Toggles.VARIABLE_DT_TOGGLE,
## dtDefault=Parameters.dtDefault,
## dtMax=Parameters.dtMax)
#-----------------------
# Print a short report
#-----------------------
print(' ')
print('(nx, ny) =', nx, ny)
print('(dx, dy) =', dx, dy)
print('(Hmin, Hmax) =', H.min(), H.max())
print('(Zbmin, Zbmax) =', Zb.min(), Zb.max())
print('(Zimin, Zimax) =', Zi.min(), Zi.max())
print('(MRmin, MRmax) =', meltrate.min(), meltrate.max())
print('conserveIce =', conserveIce)
print('Finished.')
print(' ')
# run_model()
#--------------------------------------------------------------------------------------------------
class MassBalance: # (enumeration)
( BAD_VAL ,
ZERO_BALANCE ,
CONSTANT_ELA ,
ELA_LOWERING ,
ELA_TIME_SERIES ,
EXTERNAL_FUNC ,
ELA_LOWERING2 ,
BALANCE_FILE ,
D180_TIME_SERIES ) = list(range( 9))
# class MassBalance
#--------------------------------------------------------------------------------------------------
class BoundaryCond: # (enumeration)
( BAD_VAL ,
ICE_FREE_BOUND ,
ZERO_FLUX_BOUND ,
CONST_FLUX_BOUND ,
SURF_ELEV_BOUND ,
SURF_SLOPE_BOUND ) = list(range( 6))
# class BoundaryCond
#--------------------------------------------------------------------------------------------------
class Parameters: # (structure)
# Constants
g = numpy.float64(9.81) # gravitional acceleration [m/s**2]
rhoI = numpy.float64(917) # density of ice [kg/m**3]
rhoW = numpy.float64(1000) # density of water [kg/m**3]
day = numpy.float64(0.00274) # length of a day in years [years]
# Time
t = numpy.float64(0) # set time to zero
tMax = numpy.float64(100000) # maximum simulation time in years
dtMax = numpy.float64(0.4 * 365*day) # maximum timestep in years
dtDefault = dtMax # timestep if VARIABLE_DT_TOGGLE==0
sec_per_year = numpy.float64(3600) * 24 * 365 # (SDP, 9/30/09)
# Glacier Properties
MinGlacThick = numpy.float64(1)
# Ice Deformation
glensA = numpy.float64( (6.8e-15)*3.15e7/(1e9) ) # Patterson, 1994; MacGregor, 2000
## glensA = numpy.float64( 6.8 * 3.15 * 1e-17)
# Attractor Sliding -- only if ICESLIDE_TOGGLE==1 (generally used)
UsChar = numpy.float64(10)
taubChar = numpy.float64(100000)
# Standard Sliding -- used if ICESLIDE_TOGGLE==2 (generally not used)
B = numpy.float64(0.0012) # m/(Pa*yr) -- MacGregor, 2000
DepthToWaterTable = numpy.float64(20) # distance from ice surface to water table
MaxFloatFraction = numpy.float64(80) # limits water level in ice
Hpeff = numpy.float64(20) # effective pressure (meters of water)
# Mass Balance
initELA = numpy.float64(3350) # (valley glaciers, try 3500 ice sheets)
ELAStepSize = numpy.float64(-50)
ELAStepInterval = numpy.float64(500)
gradBz = numpy.float64(0.01)
maxBz = numpy.float64(2)
tmin = numpy.float64(200) # Years, spin-up time
# Avalanching
angleOfRepose = numpy.float64(30)
avalanchFreq = numpy.float64(3) # average number per year
# Calving
seaLevel = numpy.float64(-100) # meters
calvingCoef = numpy.float64(2) # year^-1
# Thermal
c = numpy.float64(2060) # specific heat capacity (J/(kg*K))
Qg = numpy.float64(0.05 * 3.15e7) # Geothermal heat flux (W/m^2)*seconds/year = (J/year)/(m^2)
gradTz = numpy.float64(-0.0255) # Geothermal Gradient
# Only for Ice Sheets ???
Hbound = numpy.float64(2000)
Elev0 = numpy.float64(0) # reference elevation
To = numpy.float64(2.6) # temperature at Elev0
lapseRate = numpy.float64(-0.0065) # degrees per meter
# class Parameters
#--------------------------------------------------------------------------------------------------
class InputParams: # (structure)
CLEAR_FIGURE = 1
CONTOUR_INTERVAL = 50.
DEBUG_TOGGLE = 0
DT_LIMIT = 0
ELA_CONTOUR = 1.
ICE_CONTOUR = 1.
NEW_FIGURE = 0
QUIVER_VECS = 0
RECONSTRUCT = 0
SUBFIGURE = 0
THERMAL_CONTOUR = 0
# class InputParams
#--------------------------------------------------------------------------------------------------
class OutputParams: # (structure)
plotInterval = 60 * 120 # seconds
saveInterval = 100 # whole years
reportInterval = 30 # seconds
nextPlot = 0 # initialize to plot on first timestep
nextSave = 0 # initialize to save on first timestep
nextReport = 0 # initialize to report on first timestep
outputFile = 'savetmp'
# class OutputParams
#--------------------------------------------------------------------------------------------------
class Toggles: # (structure)
#------------------------
# Code behavior toggles
#-----------------------------------------------------------
# Toggle or turn on/off segments of the code or select
# between multiple possibilities for a given process.
# Values can be reset in INIT_COND segment.
# Note that many of these are unused in current version.
#-----------------------------------------------------------
GUISTART_TOGGLE = 0 # started simulation with the gui (off|on)
SAVE_TOGGLE = 1 # saving (off|on)
PLOT_TOGGLE = 1 # plotting (off|on)
REPORT_TOGGLE = 1 # reporting (off|on)
COMPRESS_TOGGLE = 0 # only simulate area with ice (off|on)
VARIABLE_DT_TOGGLE = 0 # state dependent time step (off|on)
INIT_COND_TOGGLE = 1 # load DEM and climate (synth|valley|sheet)
GENERIC_ICE_TOGGLE = 0 # start with generic ice surface (off|on)
ICEFLOW_TOGGLE = 1 # ice motion by deformation (off|on)
ICESLIDE_TOGGLE = 0 # ice motion by sliding (off|on|select)
THERMAL_TOGGLE = 0 # temp dependance of flow (off|on)
FREEZEON_TOGGLE = 0 # basal ice freeze to bed (off|on)
AVALANCHE_TOGGLE = 0 # avalanche off steep surfaces (off|on)
CALVING_TOGGLE = 0 # calving front (off|on)
ERODE_TOGGLE = 0 # erode the bed (off|on|select)
## CRN_TOGGLE = 0 # CRN accumulation (off|on)
# MASS_BALANCE_TOGGLE = MassBalance.ELA_LOWERING # select climate scenerio (off|on|select)
MASS_BALANCE_TOGGLE = MassBalance.CONSTANT_ELA # select climate scenerio (off|on|select)
WEST_BC_TOGGLE = BoundaryCond.ICE_FREE_BOUND # boundary condition (no ice|reflect|no flow)
EAST_BC_TOGGLE = BoundaryCond.ICE_FREE_BOUND # boundary condition (no ice|reflect|no flow)
SOUTH_BC_TOGGLE = BoundaryCond.ICE_FREE_BOUND # boundary condition (no ice|reflect|no flow)
NORTH_BC_TOGGLE = BoundaryCond.ICE_FREE_BOUND # boundary condition (no ice|reflect|no flow)
# class Toggles
#--------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------
def compress_grid( H , Zb , COMPRESS_TOGGLE=False , RESTART_TOGGLE=0,
THERMAL_TOGGLE=False ):
# COMPRESS - ONLY SIMULATE SUB-RECTANGLE THAT CONTAINS ICE
if (COMPRESS_TOGGLE) and (H.max() > 1) and (RESTART_TOGGLE != 2):
H_FullSpace = H.copy()
Zb_FullSpace = Zb.copy()
if (THERMAL_TOGGLE):
Ts_FullSpace = Ts.copy()
Tb_FullSpace = Tb.copy()
Tm_FullSpace = Tm.copy()
#[indrw,indcl] = find(H ~= 0);
indrw, indcl = numpy.where( H != 0 )
mxrw, mxcl = Zb.shape
mnrw = max( 0 , min(indrw) - 2 )
mxrw = min( mxrw , max(indrw) + 2 )
mncl = max( 0 , min(indcl) - 2 )
mxcl = min( mxcl , max(indcl) + 2 )
H = H [ mnrw:mxrw , mncl:mxcl ]
Zb = Zb[ mnrw:mxrw , mncl:mxcl ]
## Zi = Zb + max( H, 0 )
## Zi = Zb + numpy.choose( H<0 , (H,0) )
Zi = Zb + numpy.maximum(H, 0)
if (THERMAL_TOGGLE):
Ts = Ts[ mnrw:mxrw , mncl:mxcl ]
Tb = Tb[ mnrw:mxrw , mncl:mxcl ]
Tm = Tm[ mnrw:mxrw , mncl:mxcl ]
ny, nx = H.shape
mx_ny, mx_nx = Zb_FullSpace.shape
ny, nx = Zb.shape
compression_ratio = (mx_nx * mx_ny) / (nx * ny)
COMPRESSED_FLAG = 1
else:
## Zi = Zb + max( H, 0 ) # included for restarts
## Zi = Zb + numpy.choose( H<0 , (H,0) )
Zi = Zb + numpy.maximum(H, 0)
compression_ratio = 1.
COMPRESSED_FLAG = 0
return ( Zi , compression_ratio , COMPRESSED_FLAG )
# compress_grid()
#--------------------------------------------------------------------------------------------------
def filter2d( b , x , shape='same' ):
return scipy.signal.convolve( b , x , mode=shape )
# filter2d()
#--------------------------------------------------------------------------------------------------
def add_halo( x ):
x_ext = numpy.concatenate( ( x[:,0,numpy.newaxis] , x , x[:,-1,numpy.newaxis] ) , axis=1 )
x_ext = numpy.concatenate( ( [x_ext[0,:]] , x_ext , [x_ext[-1,:]] ) )
return x_ext
# add_halo()
#--------------------------------------------------------------------------------------------------
def set_bc( H , Zb , Zi ,
THERMAL_TOGGLE = Toggles.THERMAL_TOGGLE,
WEST_BC_TOGGLE = Toggles.WEST_BC_TOGGLE,
EAST_BC_TOGGLE = Toggles.EAST_BC_TOGGLE,
SOUTH_BC_TOGGLE = Toggles.SOUTH_BC_TOGGLE,
NORTH_BC_TOGGLE = Toggles.NORTH_BC_TOGGLE ):
## WEST_BC_TOGGLE = BoundaryCond.ICE_FREE_BOUND ,
## EAST_BC_TOGGLE = BoundaryCond.ICE_FREE_BOUND ,
## SOUTH_BC_TOGGLE = BoundaryCond.ICE_FREE_BOUND ,
## NORTH_BC_TOGGLE = BoundaryCond.ICE_FREE_BOUND ):
#-------------------------------------------------------
# MODIFY BOUNDARY CELLS TO ENFORCE BOUNDARY CONDITIONS
#-------------------------------------------------------
# DEFAULT BOUNDARY CONDITION IS ZERO FLUX
#-------------------------------------------------------
H_ext = add_halo( H )
Zb_ext = add_halo( Zb )
Zi_ext = add_halo( Zi )
if (THERMAL_TOGGLE):
Ts_ext = add_halo( Ts )
Tb_ext = add_halo( Tb )
Tm_ext = add_halo( Tm )
# WESTERN BOUNDARY CONDITION
if WEST_BC_TOGGLE == BoundaryCond.SURF_ELEV_BOUND: # Constant Ice Surface Height
ZiBound = numpy.mean(Zb[:,0]) + Hbound
H_ext[:,0] = ZiBound - Zb_ext[:,0]
elif WEST_BC_TOGGLE == BoundaryCond.CONST_FLUX_BOUND: # Constant Ice Flux B.C.
pass
elif WEST_BC_TOGGLE == BoundaryCond.SURF_SLOPE_BOUND: # Constant Ice Surface Slope
Zi_ext[:,0] = 2*Zi_ext[:,1] - Zi_ext[:,2]
H_ext [:,0] = Zi_ext[:,0] - Zb_ext[:,0]
H_ext [:,0] = numpy.maximum( H_ext[:,0], 0 )
elif WEST_BC_TOGGLE == BoundaryCond.ICE_FREE_BOUND: # Ice Free Boundary
H_ext[:,0] = 0
# EASTERN BOUNDARY CONDITION
if EAST_BC_TOGGLE == BoundaryCond.SURF_ELEV_BOUND: # Constant Ice Surface Height
ZiBound = numpy.mean(Zb[:,-1]) + Hbound
H_ext[:,-1] = ZiBound - Zb_ext[:,-1]
elif EAST_BC_TOGGLE == BoundaryCond.CONST_FLUX_BOUND: # Constant Ice Flux B.C.
pass
elif EAST_BC_TOGGLE == BoundaryCond.SURF_SLOPE_BOUND: # Constant Ice Surface Slope
Zi_ext[:,-1] = 2*Zi_ext[:,-2] - Zi_ext[:,-3]
H_ext [:,-1] = Zi_ext[:,-1] - Zb_ext[:,-1]
H_ext [:,-1] = numpy.maximum( H_ext[:,-1], 0)
elif EAST_BC_TOGGLE == BoundaryCond.ICE_FREE_BOUND: # Ice Free Boundary
H_ext[:,-1] = 0
# SOUTHERN BOUNDARY CONDITION
if SOUTH_BC_TOGGLE == BoundaryCond.SURF_ELEV_BOUND: # Constant Ice Surface Height
ZiBound = numpy.mean(Zb[0,:]) + Hbound
H_ext[0,:] = ZiBound - Zb_ext[0,:]
elif SOUTH_BC_TOGGLE == BoundaryCond.CONST_FLUX_BOUND: # Constant Ice Flux B.C.
pass
elif SOUTH_BC_TOGGLE == BoundaryCond.SURF_SLOPE_BOUND: # Constant Ice Surface Slope
Zi_ext[0,:] = 2*Zi_ext[1,:] - Zi_ext[2,:]
H_ext [0,:] = Zi_ext[0,:] - Zb_ext[0,:]
H_ext [0,:] = numpy.maximum( H_ext[0,:], 0 )
elif SOUTH_BC_TOGGLE == BoundaryCond.ICE_FREE_BOUND: # Ice Free Boundary
H_ext[0,:] = 0
# NORTHERN BOUNDARY CONDITION
if NORTH_BC_TOGGLE == BoundaryCond.SURF_ELEV_BOUND: # Constant Ice Surface Height
ZiBound = numpy.mean(Zb[-1,:]) + Hbound
H_ext[-1,:] = ZiBound - Zb_ext[-1,:]
elif NORTH_BC_TOGGLE == BoundaryCond.CONST_FLUX_BOUND: # Constant Ice Flux B.C.
pass
elif NORTH_BC_TOGGLE == BoundaryCond.SURF_SLOPE_BOUND: # Constant Ice Surface Slope
Zi_ext[-1,:] = 2*Zi_ext[-2,:] - Zi_ext[-3,:]
H_ext [-1,:] = Zi_ext[-1,:] - Zb_ext[-1,:]
H_ext [-1,:] = numpy.maximum( H_ext[-1,:], 0 )
elif NORTH_BC_TOGGLE == BoundaryCond.ICE_FREE_BOUND: # Ice Free Boundary
H_ext[-1,:] = 0
Zi_ext = Zb_ext + H_ext
return ( H_ext , Zb_ext , Zi_ext )
# set_bc()
#--------------------------------------------------------------------------------------------------
def difference_grid( A , dx , dy ):
dAdx_ext = ( A[:,1:] - A[:,:-1] ) / dx
dAdy_ext = ( A[1:,:] - A[:-1,:] ) / dy
dAdx = dAdx_ext[1:-1,:]
dAdy = dAdy_ext[:,1:-1]
return ( dAdx , dAdy )
# difference_grid()
#--------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------
def basal_shear_stress( H_ext , Zi_ext , dx=1. , dy=1. ,
g=Parameters.g , rhoI=Parameters.rhoI ):
#------------------------------------
# CALCULATE THE BASAL SHEAR STRESS
#------------------------------------
# forward differences (could use difference_grid())
dZidxX_ext = ( Zi_ext[:,1:] - Zi_ext[:,:-1] ) / dx
dZidyY_ext = ( Zi_ext[1:,:] - Zi_ext[:-1,:] ) / dy
dZidxX = dZidxX_ext[1:-1,:]
dZidyY = dZidyY_ext[:,1:-1]
HX_ext = ( H_ext[:,1:] + H_ext[:,:-1] ) / 2.
HY_ext = ( H_ext[1:,:] + H_ext[:-1,:] ) / 2.
HX = HX_ext[1:-1,:]
HY = HY_ext[:,1:-1]
taubxX_ext = -rhoI * g * HX_ext * dZidxX_ext
taubyY_ext = -rhoI * g * HY_ext * dZidyY_ext
taubxX = taubxX_ext[1:-1,:]
taubyY = taubyY_ext[:,1:-1]
taubxY = ( taubxX_ext[:-1,:-1] + taubxX_ext[:-1,1:] +
taubxX_ext[1: ,:-1] + taubxX_ext[1: ,1:] ) / 4.
taubyX = ( taubyY_ext[:-1,:-1] + taubyY_ext[:-1,1:] +
taubyY_ext[1: ,:-1] + taubyY_ext[1: ,1:] ) / 4.
taubX = numpy.sqrt( taubxX**2 + taubyX**2 )
taubY = numpy.sqrt( taubxY**2 + taubyY**2 )
taubX = numpy.choose( HX>0 , (0,taubX) )
taubY = numpy.choose( HY>0 , (0,taubY) )
# Fill in zero values with 1 for use in division
xcmpnt = numpy.choose( numpy.abs(taubX)<1e-5 , ( taubxX / taubX , 0. ) )
ycmpnt = numpy.choose( numpy.abs(taubY)<1e-5 , ( taubyY / taubY , 0. ) )
return ( ( xcmpnt , ycmpnt ) , ( taubX , taubY ) , ( HX , HY ) )
# basal_shear_stress()
#--------------------------------------------------------------------------------------------------
def iceflow( taubX , taubY , HX , HY , xcmpnt , ycmpnt ,
THERMAL_TOGGLE = Toggles.THERMAL_TOGGLE,
## THERMAL_TOGGLE=False,
glensA = Parameters.glensA,
#----------------------------------------------
# Remaining values for THERMAL_TOGGLE = True
#----------------------------------------------
MinGlacThick = Parameters.MinGlacThick,
lapseRate = Parameters.lapseRate ): # (value for ice sheets ???)
## MinGlacThick = 1.0,
## lapseRate = numpy.float64(-0.0065)): # (value for ice sheets ???)
#--------------------------------------------
# CALCULATE ICE VELOCITY DUE TO DEFORMATION
#--------------------------------------------
if (THERMAL_TOGGLE):
##################################################################
# NOTE! Many of the vars needed by this segment are undefined,
# such as: lapseRate (added above), eHs, eTs, eTm, To,
# H_ext, Ts_ext and Tm_ext. (SDP, 9/21/09)
##################################################################
A_ext = numpy.zeros(H_ext.shape , dtype='float64' )
ind = numpy.nonzero( numpy.ravel(H_ext) >= MinGlacThick )
Ts_ext = To + lapseRate*( Zi_ext - Elev0 )
#A_ext(ind) = interp3( eHs, eTs, eTm, eA, H_ext(ind), Ts_ext(ind), Tm_ext(ind) ) ;
try:
numpy.put( A_ext , ind , interpolate.interp3d( eHs , eTs , eTm )( numpy.take(H_ext,ind) , numpy.take(Ts_ext,ind) , numpy.take(Tm_ext,ind) ) )
except:
logging.error( "NaN in A, likely H_node exceeds H_glens limits" )
return -1
AX = ( A_ext[1:-1, :-1] + A_ext[1:-1,1: ] ) / 2.
AY = ( A_ext[ :-1,1:-1] + A_ext[1: ,1:-1] ) / 2.
else:
AX = glensA
AY = glensA
# Here's the guts of calculating the depth averaged velocity
UdxX = numpy.abs( .4 * AX * taubX*taubX*taubX * HX ) * xcmpnt
UdyY = numpy.abs( .4 * AY * taubY*taubY*taubY * HY ) * ycmpnt
#UdxX = numpy.fix(UdxX*1e6)*1e-6
#UdyY = numpy.fix(UdyY*1e6)*1e-6
return ( UdxX , UdyY )
# iceflow()
#--------------------------------------------------------------------------------------------------
def ice_sliding( taubX , taubY , xcmpnt , ycmpnt ,
THERMAL_TOGGLE=False,
FREEZEON_TOGGLE=False,
UsChar=Parameters.UsChar,
taubChar=Parameters.taubChar ):
#------------------------------
# CALCULATE SLIDING VELOCITY
#------------------------------
# Here's the guts of calculating the sliding velocity
UsxX = numpy.choose( numpy.abs(taubX)<1e-5 , ( UsChar * numpy.exp(1 - taubChar / taubX) * xcmpnt ,
UsChar * numpy.exp(1 - taubChar ) * xcmpnt ) )
UsyY = numpy.choose( numpy.abs(taubY)<1e-5 , ( UsChar * numpy.exp(1 - taubChar / taubY) * ycmpnt ,
UsChar * numpy.exp(1 - taubChar ) * ycmpnt ) )
if (THERMAL_TOGGLE and FREEZEON_TOGGLE):
##################################################################
# NOTE! Many of the vars needed by this segment are undefined,
# such as: Tb_ext, Zb_ext, seaLevel. (SDP, 9/21/09)
##################################################################
## notFrozen = (Tb_ext > -.5) or (Zb_ext < seaLevel)
notFrozen = numpy.logical_or( Tb_ext > -0.5, Zb_ext < seaLevel )
notFrozenX = ( notFrozen[1:-1, :-1] + notFrozen[1:-1,1: ] ) / 2.
notFrozenY = ( notFrozen[ :-1,1:-1] + notFrozen[1: ,1:-1] ) / 2.
UsxX *= notFrozenX
UsyY *= notFrozenY
return ( UsxX , UsyY )
# ice_sliding()
#--------------------------------------------------------------------------------------------------
def sum_ice_motion( UdxX , UdyY , UsxX , UsyY ):
UxX = (UdxX + UsxX)
UyY = (UdyY + UsyY)
return ( UxX , UyY )
# sum_ice_motion()
#--------------------------------------------------------------------------------------------------
def avalanche( H , angleOfRepose=Parameters.angleOfRepose ):
#---------------------------------------
# AVALANCHE SNOW OFF OF STEEP SURFACES
#---------------------------------------------------------
# move ice downslope until the ice surface is everywhere
# less then or near the angle of repose
#---------------------------------------------------------
ny, nx = Zb.shape
dHRepose = dx * numpy.tan(angleOfRepose * numpy.pi / 180.)
Ho = numpy.maximum( H, 0 )
while True:
dZidx_down = numpy.zeros( (ny,nx) , dtype='float64' )
dZidx_up = numpy.zeros( (ny,nx) , dtype='float64' )
dZidx_down[:,1:] = numpy.choose( Zi[:,1:] < Zi[:,:-1] , ( Zi[:,1:] - Zi[:,:-1] , 0 ) )
dZidx_up [:,:-1] = numpy.choose( Zi[:,:-1] < Zi[:,1:] , ( Zi[:,:-1] - Zi[:,1:] , 0 ) )
dZidx = numpy.choose( dZidx_up > dZidx_down , ( dZidx_down , dZidx_up ) )
dZidy_left = numpy.zeros( (ny,nx) , dtype='float64' )
dZidy_right = numpy.zeros( (ny,nx) , dtype='float64' )
dZidy_left [1:,:] = numpy.choose( Zi[1:,:] < Zi[:-1,:] , ( Zi[1:,:] - Zi[:-1,:] , 0 ) )
dZidy_right[:-1,:] = numpy.choose( Zi[:-1,:] < Zi[1:,:] , ( Zi[:-1,:] - Zi[1:,:] , 0 ) )
dZidy = numpy.choose( dZidy_left > dZidy_right , ( dZidy_right , dZidy_left ) )
grad = numpy.sqrt( dZidx**2 + dZidy**2 )
gradT = dZidy_left + dZidy_right + dZidx_down + dZidx_up
gradT = numpy.choose( gradT == 0, (gradT,1) )
grad = numpy.choose( Ho < 0.1, (grad ,0) )
mxGrad = grad.max()
if (mxGrad <= 1.1*dHRepose):
break
delH = numpy.choose( grad < dHRepose , ( ( grad - dHRepose)/3. , 0 ) )
Htmp = Ho.copy()
Ho = numpy.choose( Htmp<delH , ( Htmp-delH , 0 ) )
delH = Htmp - Ho
delHdn = numpy.zeros( (ny,nx) , dtype='float64' )
delHup = numpy.zeros( (ny,nx) , dtype='float64' )
delHlt = numpy.zeros( (ny,nx) , dtype='float64' )
delHrt = numpy.zeros( (ny,nx) , dtype='float64' )
delHup[:,1: ] = delH[:, :-1] * dZidx_up [:, :-1] / gradT[:, :-1]
delHdn[:, :-1] = delH[:,1: ] * dZidx_down[:,1: ] / gradT[:,1: ]
delHrt[1: ,:] = delH[ :-1,:] * dZidy_right[ :-1,:] / gradT[ :-1,:]
delHlt[ :-1,:] = delH[1: ,:] * dZidy_left [1: ,:] / gradT[1: ,:]
Ho = Ho + delHdn + delHup + delHlt + delHrt
Ho = numpy.maximum( Ho, 0 )
Zi = Zb + Ho
#H = Ho + (H<0).*H ;
H = Ho + numpy.choose( H<0 , (0,H) ) ### DOUBLE-CHECK THIS
return H
# avalanche()
#--------------------------------------------------------------------------------------------------
def calve( H , dt , CALVING_TOGGLE=True ):
if not(CALVING_TOGGLE):
return
#-------------------------
# CALVING GLACIER FRONT
#-----------------------------------------------------------------------
# one reason this is difficult is that the height of ice in the cell
# is really just recording the volume of ice, the position of the
# margin in the cell not the actual ice height. Here floation
# height is assumed (or higher if necessary to account for ice volume)
#-----------------------------------------------------------------------
Hold = H.copy()
calvedIce = 0
# Count time backwards with a sshorted timestep until the whole
# timestep used during this itteration has been simulated
dtTot = dt
while (dtTot > 0):
# Find the calving front, aka the wet glacier margin
G = H > 1
W = numpy.logical_and( G==0 , Zb <= seaLevel )
filt = numpy.array( [[0,1,0],[1,1,1],[0,1,0]] , dtype='float64' )
Wfilt = filter2d( filt , W )
Wfilt[:,(0,-1)] = Wfilt[:,(2,-3)]
Wfilt[(0,-1),:] = Wfilt[(2,-3),:]
wetGmargin = Gi * Wfilt > 0
indWGM = wetGmargin.ravel().nonzero()
# If calving front exists, find water depth, ensure it's positive
if (indWGM.size > 0):
## WDmarg = seaLevel - Zb.flatten()[indWGM]
WDmarg = seaLevel - Zb.flat[indWGM]
WDmarg = numpy.maximum( WDmarg, 0 )
ind = (WDmarg != 0).nonzero()
indWGM = numpy.take( indWGM , ind )
WDmarg = numpy.take( WDmarg , ind )
#WDmarg = max( 0, seaLevel - Zb(indWGM) ) ;
#ind = find( WDmarg == 0 ) ;
#indWGM(ind) = [] ;
#WDmarg(ind) = [] ;
# If calving front exists, remove some ice
if (indWGM.size > 0):
# ice thickness in calving cells
Hmarg = H.flatten()[indWGM]
Hmarg = numpy.choose( Hmarg<WDmarg/0.917 , (Hmarg,WDmarg/0.917) )
# A new timestep is calculated such that the calving rate times the
# timesstep does not exceed the total contents of any calving cell.
dLinCalvdt = calvingCoef * WDmarg # front migration rate
dVolCalvdt = dx * dLinCalvdt * Hmarg # rate of volume calved
volAvailMarg = dx * dx * H.flatten()[indWGM] # ice volume available
calvDt = min( dtTot, ( volAvailMarg / dVolCalvdt ).min() ) # calving timestep
# Remove this calving timestep from total time to calve
dtTot = dtTot - calvDt
# Convert the volume calved to ice thickness and remove
calve = dVolCalvdt * calvDt / ( dx * dx )
H[indWGM] = H[indWGM] - calve
# Record total volume calved for posterity
calvedIce = calvedIce + calve.sum(asis=0).sum() * dx * dx
else:
dtTot = 0
# Record ice removal by calving for conservation test
conserveIce = conserveIce + ( H - Hold ).sum(axis=0).sum()
# calve()
#--------------------------------------------------------------------------------------------------
def mass_balance( Zi, t,
MASS_BALANCE_TOGGLE=None,
initELA=None, ELAStepSize=None, ELAStepInterval=None,
tmin=None, gradBz=None, maxBz=None ):
## MASS_BALANCE_TOGGLE=Toggles.MASS_BALANCE_TOGGLE,
## initELA=Parameters.initELA,
## tmin=Parameters.tmin ,
## ELAStepSize=Parameters.ELAStepSize ,
## ELAStepInterval=Parameters.ELAStepInterval ,
## gradBz=Parameters.gradBz,
## maxBz=Parameters.maxBz ):
#------------------------------------------------------------
# (12/4/09) Experiment that worked. A function in another
# Python package can change variables stored in "classes"
# like Toggles and Parameters, but if given as defaults
# to the arguments of the update() function it will always
# use the original values in Toggles and Parameters.
#------------------------------------------------------------
if (MASS_BALANCE_TOGGLE == None):
MASS_BALANCE_TOGGLE = Toggles.MASS_BALANCE_TOGGLE
if (initELA == None):
initELA = Parameters.initELA
if (ELAStepSize == None):
ELAStepSize = Parameters.ELAStepSize
if (ELAStepInterval == None):
ELAStepInterval = Parameters.ELAStepInterval
if (tmin == None):
tmin = Parameters.tmin
if (gradBz == None):
gradBz = Parameters.gradBz
if (maxBz == None):
maxBz = Parameters.maxBz
## print 'MASS_BALANCE_TOGGLE =', MASS_BALANCE_TOGGLE
## print 'initELA =', initELA
## print 'ELAStepSize =', ELAStepSize
## print 'ELAStepInterval =', ELAStepInterval
## print 'tmin (spinup) =', tmin
#--------------------------
# CALCULATE MASS BALANCE
#---------------------------------------------------------
# The imposed mass balance is the imposed climate.
# There are many possibilities, here are only a few.
# All must populate the 2D matrix Bxy of size = size(Zb)
# with values of net precip/melt rate in m/yr. ###################################
# Define the scalar, ELA (m), for plotting.
#---------------------------------------------------------
if (MASS_BALANCE_TOGGLE == MassBalance.CONSTANT_ELA):
# Simple ELA, maxBz, gradBz
ELA = initELA
#Bxy = min( maxBz , gradBz * ( Zi - ELA ) )
Bxy = gradBz * ( Zi - ELA )
Bxy = numpy.choose( Bxy > maxBz , (Bxy, maxBz) )
elif (MASS_BALANCE_TOGGLE == MassBalance.ELA_LOWERING):
# ELA changing with time experiment
# ELAStepSize = -10 ; # positive/negative values raise/lower ELA
# ELAStepInterval = 500 ;
## ELA = initELA + ELAStepSize * max( 0 , numpy.floor( (t-tmin)/ELAStepInterval ) )
ELA = initELA + ELAStepSize * numpy.maximum(0, (t-tmin)/ELAStepInterval ) # (SDP, 12/4/09)
Bxy = gradBz * ( Zi - ELA )
Bxy = numpy.choose( Bxy > maxBz , (Bxy, maxBz) )
#----------------
# For debugging
#----------------
DEBUG = False
if (DEBUG):
print('t, ELA =', t, ', ', ELA)
print('min(Bxy), max(Bxy) =', Bxy.min(), ', ', Bxy.max())
elif (MASS_BALANCE_TOGGLE == MassBalance.ELA_LOWERING2):
# ELA changing with time experiment
tau = numpy.float64(25) # intrinsic timescale of ice dynamics
tmin = numpy.float64(0) # time to begin ELA modification
initELA = numpy.float64(4200) # initial ELA
stepSize = numpy.float64(-10) # positive/negative values raise/lower ELA
dELAdt = numpy.float64(-0.1)
ELA = initELA + stepSize * max( 0, numpy.floor( (t-tmin) / (8*tau) ) )
Bxy = gradBz * ( Zi - ELA )
Bxy = numpy.choose( Bxy > maxBz , (Bxy, maxBz) )
elif (MASS_BALANCE_TOGGLE == MassBalance.EXTERNAL_FUNC):
# external mass balance function
try: Bxy
except NameError:
# Mass Balance 2D Must Return Bxy (2d Matrix)
Bxy = mass_balance_gc2d( t , cellsize , Zi )
nextGetBxy = t + getBxyInterval
else:
if (t >= nextGetBxy):
Bxy = mass_balance_gc2d( t , cellsize , Zi )
nextGetBxy = t + getBxyInterval
elif (MASS_BALANCE_TOGGLE == MassBalance.ELA_TIME_SERIES) or \
(MASS_BALANCE_TOGGLE == MassBalance.D18O_TIME_SERIES):
# ELA time series
ELA = interpolate.interp1d( trecord , ELArecord )( t )
Bxy = gradBz * ( Zi - ELA )
Bxy = numpy.choose( Bxy > maxBz , (Bxy, maxBz) )
elif (MASS_BALANCE_TOGGLE == MassBalance.BALANCE_FILE):
# external mass balance file
Bxy = load_dem_var( DEM_file, 'Bxy' )
ind = numpy.nonzero( numpy.ravel(numpy.abs(Bxy)==min(numpy.abs(Bxy))) )
ELA = numpy.mean( numpy.take( numpy.ravel(Zi) , ind ) )
elif (MASS_BALANCE_TOGGLE == MassBalance.ZERO_BALANCE):
ELA = 0
Bxy = numpy.zeros( Zb.shape , dtype='float64' )
else:
logging.error( "Unrecognized Mass Balance" )
return -1
return ( Bxy , ELA )
# mass_balance()
#--------------------------------------------------------------------------------------------------
def mass_conservation( H_ext , UxX , UyY , HX , HY , dZidxX , dZidyY ,
dx=1., dy=1.,
MinGlacThick=Parameters.MinGlacThick,
BoundaryFlux=0., #### WAS UNDEFINED BEFORE 9/21/09 ####
WEST_BC_TOGGLE =Toggles.WEST_BC_TOGGLE,
EAST_BC_TOGGLE =Toggles.EAST_BC_TOGGLE,
SOUTH_BC_TOGGLE=Toggles.SOUTH_BC_TOGGLE,
NORTH_BC_TOGGLE=Toggles.NORTH_BC_TOGGLE ):
## WEST_BC_TOGGLE =BoundaryCond.ICE_FREE_BOUND , # (Before 12/4/09)
## EAST_BC_TOGGLE =BoundaryCond.ICE_FREE_BOUND ,
## SOUTH_BC_TOGGLE=BoundaryCond.ICE_FREE_BOUND ,
## NORTH_BC_TOGGLE=BoundaryCond.ICE_FREE_BOUND ):
#-----------------------------------
# MASS CONSERVATION -- CONTINUITY
#--------------------------------------------
# Ensure that no ice is drawn from the rock
# CLASS = H_ext >= MinGlacThick
#--------------------------------------------
CLASS = numpy.choose( H_ext >= MinGlacThick , (0.,1.) )
DCLASSx = ( CLASS[1:-1,1: ] - CLASS[1:-1, :-1] ) * numpy.sign( dZidxX )
DCLASSy = ( CLASS[1: ,1:-1] - CLASS[ :-1,1:-1] ) * numpy.sign( dZidyY )
UxX = numpy.choose( numpy.abs(DCLASSx+1)<1e-5 , (UxX,0.) )
UyY = numpy.choose( numpy.abs(DCLASSy+1)<1e-5 , (UyY,0.) )
# Calculate both components of the ice flux
qxX = UxX * HX
qyY = UyY * HY
#-----------------------------------------------------------
# Note: What is appropriate value for BoundaryFlux ??
# Was undefined in versions prior to 9/21/09 (SDP).
#-----------------------------------------------------------
if (WEST_BC_TOGGLE == BoundaryCond.CONST_FLUX_BOUND): qxX[: , 0] = BoundaryFlux
if (EAST_BC_TOGGLE == BoundaryCond.CONST_FLUX_BOUND): qxX[: ,-1] = BoundaryFlux
if (SOUTH_BC_TOGGLE == BoundaryCond.CONST_FLUX_BOUND): qyY[0 , :] = BoundaryFlux
if (NORTH_BC_TOGGLE == BoundaryCond.CONST_FLUX_BOUND): qyY[-1, :] = BoundaryFlux
# Here's the guts of the continuity equation
dqdxX = ( qxX[ :,1:] - qxX[: ,:-1] ) / dx
dqdyY = ( qyY[1:, :] - qyY[:-1,: ] ) / dy
dHdt = -dqdxX - dqdyY
return ( dHdt , ( qxX , qyY ) )
# mass_conservation()
#--------------------------------------------------------------------------------------------------
#--------------------------------------------------------------------------------------------------
def load_dem( DEM_file ):
# Assume DEM_file is in MatLab format
vars = scipy.io.loadmat( DEM_file )
cellsize = numpy.float64(vars['cellsize'])
easting = numpy.float64(vars['easting'])
northing = numpy.float64(vars['northing'])
topo = numpy.float64(vars['topo'])
ny, nx = topo.shape
logging.info( 'Shape of topo is %d by %d' , ny , nx )
logging.info( 'Shape of easting is %d' , easting.size )
logging.info( 'Shape of northing is %d' , northing.size )
if (easting.size != nx):
sys.exit( 'Easting does not match dimension of topo (%d != %d)' % (easting.size, nx) )
if (northing.size != ny):
sys.exit( 'Northing does not match dimension of topo (%d != %d)' % (northing.size, ny) )
return ( topo , easting , northing , cellsize )
# load_dem()
#--------------------------------------------------------------------------------------------------
def load_dem_var( var_file , val_s ):
# Assume var_file is in MatLab format,
# & maybe contains DEM as well.
vars = scipy.io.loadmat( var_file )
if (val_s in vars):
var = vars[val_s]
else:
var = None
return var
# load_dem_var()
#--------------------------------------------------------------------------------------------------
def get_timestep( H, Zi_ext, Zi , dHdt, Bxy,
dtMax = Parameters.dtMax,
dtDefault = Parameters.dtDefault ):
#---------------------
# CALCULATE TIMESTEP
#-----------------------------------------------------------------------
# Now that we know the rate of change in ice surface heights due to
# ice motion and due to precipitation or melt we need to know over
# what period of time we can project forward with these rates and
# maintain stability of the ice surface. The basic idea here is that
# we don't want to take a timestep any longer then it would take to
# reverse the ice surface slope between two cells, such that ice
# should be flowing in the other direction. In fact, let's make our
# timestep much less than that.
#
# This calculation sets the timestep such that the change
# in ice surface elevation nowhere exceeds a set fraction
# of the local standard deviation in ice surface elevations
#-----------------------------------------------------------------------
# include ice changes by precip and melt
dHdtTot = dHdt + Bxy
adHdt = | numpy.abs(dHdtTot) | numpy.abs |
'''
Show the effect of cycles and undecided labels in preference pairs in simple training datasets on:
- GPPL
- SVC
- Ranking using PageRank
We use these three because they have different ways of modelling preference pairs: as noisy observations at both points;
as classifications; as graphs.
Created on 20 Jul 2017
@author: simpson
'''
import os, sys
sys.path.append("./python")
sys.path.append("./python/analysis")
sys.path.append("./python/models")
sys.path.append("./python/analysis/habernal_comparison")
sys.path.append(os.path.expanduser("~/git/HeatMapBCC/python"))
sys.path.append(os.path.expanduser("~/git/pyIBCC/python"))
from gp_pref_learning import GPPrefLearning
import numpy as np
import matplotlib.pyplot as plt
from tests import get_noisy_fold_data, load_embeddings, \
compute_lengthscale_heuristic
from embeddings import get_mean_embeddings
from data_loader import load_train_test_data, load_ling_features
import networkx as nx
from sklearn.svm import SVC
import matplotlib
matplotlib.rcParams.update({'font.size': 16})
def run_pagerank(trainids_a1, trainids_a2, prefs_train):
G = nx.DiGraph()
for i in range(len(trainids_a1)):
if prefs_train[i] == 2:
G.add_edge(trainids_a1[i], trainids_a2[i])
elif prefs_train[i] == 0:
G.add_edge(trainids_a2[i], trainids_a1[i])
rank_dict = nx.pagerank_numpy(G)
rankscores = np.zeros(len(rank_dict))
rankscores[list(rank_dict.keys())] = list(rank_dict.values())
return rankscores
def run_svm(trainids_a1, trainids_a2, prefs_train, items_feat, testids_a1, testids_a2):
svc = SVC(probability=True)
prefs_train = np.copy(prefs_train)
#ignore the undecided labels
trainids_a1 = trainids_a1[prefs_train!=1]
trainids_a2 = trainids_a2[prefs_train!=1]
prefs_train = prefs_train[prefs_train!=1]
svc.fit(
#np.concatenate((items_feat[trainids_a1, :], items_feat[trainids_a2, :]), axis=1),
#np.array(prefs_train) / 2.0)
np.concatenate((np.concatenate((items_feat[trainids_a1, :], items_feat[trainids_a2, :]), axis=1),
np.concatenate((items_feat[trainids_a2, :], items_feat[trainids_a1, :]), axis=1)), axis=0),
np.concatenate((np.array(prefs_train) / 2.0, 1 - np.array(prefs_train) / 2.0)) )
#results['SVM'] = svc.decision_function(targets_single_arr)
#proba = svc.predict_proba(np.concatenate((items_feat[testids_a1, :], items_feat[testids_a2, :]), axis=1))
pred_svm = svc.predict(np.concatenate((items_feat[testids_a1, :], items_feat[testids_a2, :]), axis=1))
#return proba[:, np.argwhere(np.array(svc.classes_)==1)[0][0]]
return pred_svm
def plot_probas(total_p, label, outputdir, N, vmin=0, vmax=1):
mean_p = total_p / float(nrepeats)
# Plot classifications of all pairs as a coloured 3x3 table
plt.figure(figsize=(4,3))
data = mean_p.reshape(N, N) # do 1 - to get the preference for the argument along x axis over arg along y axis
im = plt.imshow(data, interpolation='nearest', vmin=vmin, vmax=vmax, cmap=plt.cm.get_cmap('hot'))
plt.grid('on')
plt.title('%s -- Predicted Preferences: p(arg_x > arg_y)' % label)
plt.xlabel('ID of arg_x')
plt.ylabel('ID of arg_y')
plt.xticks(list(range(N)))
plt.yticks(list(range(N)))
plt.colorbar(im, fraction=0.046, pad=0.04, shrink=0.9)
plt.savefig(outputdir + '/' + label + '_probas.pdf')
def plot_scores(total_f, var_f, label, outputdir, sample_objs, obj_labels, methodnum, max_normalize_val, fig=None):
# normalise it
total_f /= max_normalize_val
mean_f = total_f / float(nrepeats)
# Plot the latent function values for a, b, and c as a bar chart
if fig is None:
fig = plt.figure(figsize=(3,3))
else:
plt.figure(fig.number)
cols = ['steelblue', 'maroon', 'lightblue']
if methodnum == 0:
plt.plot([-0.5, len(sample_objs)-0.5], [0, 0], color='black')
if var_f is not None:
var_f /= max_normalize_val**2
var_f /= float(nrepeats)**2
plt.bar(sample_objs - (0.47*methodnum) + 0.235, mean_f.flatten(), 0.45, color=cols[methodnum],
yerr= | np.sqrt(var_f) | numpy.sqrt |
import cv2
import numpy as np
import scipy.optimize
import recordreader
WHEELTICK_SCALE = 0.066
CAM_TILT = np.array([0, 22.*np.pi/180., 0])
K = np.load("../../tools/camcal/camera_matrix.npy")
dist = np.load("../../tools/camcal/dist_coeffs.npy")
K[:2] /= 4.05
fx, fy = np.diag(K)[:2]
cx, cy = K[:2, 2]
mapsz = 300 # map size
Z = 14 # map zoom factor
uv = np.mgrid[:480, :640][[1, 0]].transpose(1, 2, 0).astype(np.float32)
ceilmask = ((uv[:, :, 1] - cy)**2 + (uv[:, :, 0] - cx + 60)**2) < (np.pi/2.4 * fx)**2
R = cv2.Rodrigues(CAM_TILT)[0]
pts = cv2.fisheye.undistortPoints(uv[None, ceilmask], K, dist, R=R)
ceilmap = np.zeros((mapsz, mapsz), np.float32)
ceilN = np.ones((mapsz, mapsz), np.float32)
ceilmean = ceilmap / ceilN
def pix2floormap():
''' undistortPoints doesn't support points behind the image plane, but we can solve for them '''
def solvetheta(thetad, k1):
theta = thetad
theta += (theta*(k1*theta**2 + 1) - thetad)/(-3*k1*theta**2 - 1)
theta += (theta*(k1*theta**2 + 1) - thetad)/(-3*k1*theta**2 - 1)
return theta
mg = np.mgrid[:480, :640]
u, v = (mg[1] - cx)/fx, (mg[0] - cy)/fy
r = np.sqrt(u**2 + v**2)
a, b = u/r, -v/r
theta = solvetheta(r, dist[0])
mask = (theta > np.pi/2) & (theta < np.pi/1.9)
t = 1.0 / np.tan(theta[mask] - np.pi/2)
return mask, np.stack([a[mask] * t, b[mask] * t]).T
floormap = np.zeros((mapsz, mapsz, 3), np.float32)
floorN = np.ones((mapsz, mapsz), np.float32) * 1e-3
floormean = floormap / floorN[:, :, None]
floormask, floorpts = pix2floormap()
def Maplookup(x, y, theta):
S, C = np.sin(theta), np.cos(theta)
R = np.array([[C, S], [-S, C]])*Z
p = np.dot(pts[0], R.T) + np.array([x, y])
pi = p.astype(np.int)
pt = p - pi
t00 = (1-pt[:, 1])*(1-pt[:, 0])
t01 = (1-pt[:, 1])*(pt[:, 0])
t10 = (pt[:, 1])*(1-pt[:, 0])
t11 = (pt[:, 1])*(pt[:, 0])
m = (t00*ceilmean[pi[:, 1], pi[:, 0]+1] +
t01*ceilmean[pi[:, 1], pi[:, 0]+1] +
t10*ceilmean[pi[:, 1]+1, pi[:, 0]+1] +
t11*ceilmean[pi[:, 1]+1, pi[:, 0]+1])
return m
def Mapupdate(xi, yi, theta, gray):
S, C = np.sin(theta), np.cos(theta)
R = np.array([[C, S], [-S, C]])*Z
p = np.dot(pts[0], R.T) + np.array([xi, yi])
pi = p.astype(np.int)
pt = p - pi
t00 = (1-pt[:, 1])*(1-pt[:, 0])
t01 = (1-pt[:, 1])*(pt[:, 0])
t10 = (pt[:, 1])*(1-pt[:, 0])
t11 = (pt[:, 1])*(pt[:, 0])
idxs = pi[:, 1] * mapsz + pi[:, 0]
ceilN[:] += np.bincount(idxs, t00.reshape(-1), mapsz*mapsz).reshape(mapsz, mapsz)
ceilN[:] += np.bincount(idxs+1, t01.reshape(-1), mapsz*mapsz).reshape(mapsz, mapsz)
ceilN[:] += np.bincount(idxs+mapsz, t10.reshape(-1), mapsz*mapsz).reshape(mapsz, mapsz)
ceilN[:] += np.bincount(idxs+mapsz+1, t11.reshape(-1), mapsz*mapsz).reshape(mapsz, mapsz)
mask = ceilmask
ceilmap[:] += np.bincount(idxs, t00*gray[mask], mapsz*mapsz).reshape(mapsz, mapsz)
ceilmap[:] += np.bincount(idxs+1, t01*gray[mask], mapsz*mapsz).reshape(mapsz, mapsz)
ceilmap[:] += np.bincount(idxs+mapsz, t10*gray[mask], mapsz*mapsz).reshape(mapsz, mapsz)
ceilmap[:] += np.bincount(idxs+mapsz+1, t11*gray[mask], mapsz*mapsz).reshape(mapsz, mapsz)
ceilmean[:] = ceilmap / ceilN
def Floorupdate(xi, yi, theta, bgr):
S, C = np.sin(-theta), np.cos(-theta)
R = np.array([[C, S], [-S, C]])
p = np.dot(floorpts, R.T) + np.array([xi, yi])
mask2 = (p[:, 0] >= 0) & (p[:, 1] >= 0) & (p[:, 0] < mapsz-1) & (p[:, 1] < mapsz-1)
p = p[mask2]
pi = p.astype(np.int)
pt = p - pi
t00 = (1-pt[:, 1])*(1-pt[:, 0])
t01 = (1-pt[:, 1])*(pt[:, 0])
t10 = (pt[:, 1])*(1-pt[:, 0])
t11 = (pt[:, 1])*(pt[:, 0])
idxs = pi[:, 1] * mapsz + pi[:, 0]
floorN[:] += np.bincount(idxs, t00.reshape(-1), mapsz*mapsz).reshape(mapsz, mapsz)
floorN[:] += np.bincount(idxs+1, t01.reshape(-1), mapsz*mapsz).reshape(mapsz, mapsz)
floorN[:] += np.bincount(idxs+mapsz, t10.reshape(-1), mapsz*mapsz).reshape(mapsz, mapsz)
floorN[:] += np.bincount(idxs+mapsz+1, t11.reshape(-1), mapsz*mapsz).reshape(mapsz, mapsz)
mask = floormask
for i in range(3):
floormap[:, :, i] += np.bincount(idxs, t00*bgr[mask, i][mask2], mapsz*mapsz).reshape(mapsz, mapsz)
floormap[:, :, i] += np.bincount(idxs+1, t01*bgr[mask, i][mask2], mapsz*mapsz).reshape(mapsz, mapsz)
floormap[:, :, i] += np.bincount(idxs+mapsz, t10*bgr[mask, i][mask2], mapsz*mapsz).reshape(mapsz, mapsz)
floormap[:, :, i] += np.bincount(idxs+mapsz+1, t11*bgr[mask, i][mask2], mapsz*mapsz).reshape(mapsz, mapsz)
floormean[:] = floormap / floorN[:, :, None]
def main(fname, skips=0):
f = open(fname, 'rb')
ts0 = None
theta = -0.09
ri = recordreader.RecordIterator(f)
for _ in range(skips):
ri.__next__()
xi, yi = mapsz/2, mapsz/2
ptrange = np.max(np.linalg.norm(pts[0], axis=1))
firstiter = False
if firstiter:
poses = []
else:
poses = np.load("poses.npy")
frameno = 0
vidout = None
if False:
vidout = cv2.VideoWriter("ba.mp4", cv2.VideoWriter_fourcc(
'X', '2', '6', '4'), 30, (640, 480), True)
lastw = None
for frame in ri:
gray = frame['yuv420'][:480]
bgr = cv2.cvtColor(frame['yuv420'], cv2.COLOR_YUV2BGR_I420)
if ts0 is None:
ts0 = frame['tstamp'] - 1.0/30
ts = frame['tstamp']
dt = ts - ts0
ts0 = ts
# throttle, steering, accel, gyro, servo, wheels, periods
gz = frame['carstate'][3][2]
wheels = frame['carstate'][5]
ds = 0
if lastw is not None:
ds = (wheels - lastw)[0]
lastw = wheels
theta -= gz*dt
S, C = np.sin(theta), np.cos(theta)
# now solve for x, y, theta
xi -= ds*WHEELTICK_SCALE*S
yi -= ds*WHEELTICK_SCALE*C
gm = gray[ceilmask]
def L(X):
x, y, theta = X
# m, n = Maplookup(x, y, theta)
# i really thought that this would work better,
# but it totally doesn't
# return (gray[mask].astype(np.float32)*n - m)
# odometry constraint
return np.concatenate([(gm - Maplookup(x, y, theta)) / 170.0,
1e-3*np.array([xi - x, yi - y])])
if False:
L0 = np.sum(L([xi, yi, theta])**2)
Lx = np.sum(L([xi+1, yi, theta])**2)
Ly = np.sum(L([xi, yi+1, theta])**2)
Lt = np.sum(L([xi, yi, theta+0.01])**2)
print("jac: ", (Lx - L0), (Ly - L0), (Lt - L0)/0.01)
if firstiter:
lb = ptrange*Z
ub = mapsz - 2 - ptrange*Z
soln = scipy.optimize.least_squares(
L, x0= | np.array([xi, yi, theta]) | numpy.array |
import numpy as np
import numba as nb
from numba import types, typed, typeof
from numba import jit
from numba.experimental import jitclass
from nrc_spifpy.spif import TIME_CHUNK
# The size of the metadata in a particle record
# Word 1 = Flag 2S
# Word 2 = word for h image metadata
# Word 3 = word for v image metadata
# Word 4 = word for particle count
# Word 5 = word for number of slices
METADATA_LENGTH = 5
# Offsets to find specific metadata in an image record
WORD_H_OFFSET = 1
WORD_V_OFFSET = 2
WORD_PC_OFFSET = 3
WORD_NUM_SLICE_OFFSET = 4
# Easier to define here than to have to flip 1 and 0 in the code
SHADED_VAL = 0
CLEAR_VAL = 1
# Useful datatypes
decoded_word_type = np.dtype([
("is_image_slice", "u2"),
("is_start_slice", "u2"),
("num_shaded", "u2"),
("num_clear", "u2")
])
class ImageMetadataContainer:
def __init__(self):
self.buffer_idx = 0
self.n_h = 0
self.timing_h = 0
self.mismatch_h = 0
self.fifo_h = 0
self.overload_h = 0
self.n_v = 0
self.timing_v = 0
self.mismatch_v = 0
self.fifo_v = 0
self.overload_v = 0
self.particle_count = 0
self.num_slices = 0
self.h_start = 0
self.h_end = 0
self.v_start = 0
self.v_end = 0
self.frame_len = 0
self.image_in_buffer = 0
class ImageMetadataProcessor:
"""
This is for words 2 in a particle frame
NH (Word 2)
-----------------------------------------------------------
Bits 0–11 Number of horizontal words–Includes Timing Words if present
Bit 12 – 1 = Timing Words not found
Bit 13 – Timing Word mismatch
Bit 14 — FIFO Empty (means the next particle was cut off)
Bit 15 – The last two words of the horizontal data record are overload timing words
NV (Word 3)
-------------------------------------------------------------
Bits 0 –11 Number of vertical words–Includes Timing Words if not same as the horizontal Timing Word and the TW were found.
Bit 12 –1 = Timing Words not found
Bit 13 –Timing Word mismatch
Bit 14-FIFO Empty before timing word found
Bit 15 –The last two words of the vertical data record are overload timing words
"""
def __init__(self) -> None:
pass
def process_metadata(self, buffer_idx, buffer):
metadata = ImageMetadataContainer()
metadata.buffer_idx = buffer_idx
metadata.n_h = num_words(buffer[buffer_idx + WORD_H_OFFSET])
metadata.timing_h = timing_words_not_found(buffer[buffer_idx + WORD_H_OFFSET])
metadata.mismatch_h = timing_word_mismatch(buffer[buffer_idx + WORD_H_OFFSET])
metadata.fifo_h = fifo_empty(buffer[buffer_idx + WORD_H_OFFSET])
metadata.overload_h = overload_timing_words_exist(buffer[buffer_idx + WORD_H_OFFSET])
metadata.n_v = num_words(buffer[buffer_idx + WORD_V_OFFSET])
metadata.timing_v = timing_words_not_found(buffer[buffer_idx + WORD_V_OFFSET])
metadata.mismatch_v = timing_word_mismatch(buffer[buffer_idx + WORD_V_OFFSET])
metadata.fifo_v = fifo_empty(buffer[buffer_idx + WORD_V_OFFSET])
metadata.overload_v = overload_timing_words_exist(buffer[buffer_idx + WORD_V_OFFSET])
metadata.particle_count = buffer[buffer_idx + WORD_PC_OFFSET]
metadata.num_slices = buffer[buffer_idx + WORD_NUM_SLICE_OFFSET]
metadata.h_start = metadata.buffer_idx + METADATA_LENGTH
metadata.h_end = metadata.h_start + metadata.n_h
metadata.v_start = metadata.buffer_idx + METADATA_LENGTH + metadata.n_h
metadata.v_end = metadata.v_start + metadata.n_v
metadata.frame_len = METADATA_LENGTH + metadata.n_h + metadata.n_v
metadata.image_in_buffer = (metadata.buffer_idx + metadata.frame_len) < 2048
return metadata
@jit(nopython = True)
def num_words(word):
# Bit masking out of a 16-bit number
# to only get the 12 bit component
return word & 0b0000111111111111
@jit(nopython = True)
def timing_words_not_found(word):
# Bitmask to get 12th bit only, then bit shift right
# 12 spots to keep only that bit
return (word & 0b0001000000000000) >> 12
@jit(nopython = True)
def timing_word_mismatch(word):
# Bitmask to get 13th bit only, then bit shift right
# 13 spots to keep only that bit
return (word & 0b0010000000000000) >> 13
@jit(nopython = True)
def fifo_empty(word):
# Bitmask to get 14th bit only, then bit shift right
# 14 spots to keep only that bit
return (word & 0b0100000000000000) >> 14
@jit(nopython = True)
def overload_timing_words_exist(word):
# Bitmask to get 15th bit only, then bit shift right
# 15 spots to keep only that bit
return (word & 0b1000000000000000) >> 15
class RawImageContainer:
def __init__(self) -> None:
self.raw_image_h = np.array([], dtype=np.uint16)
self.raw_image_v = np.array([], dtype=np.uint16)
class ImageTimewordContainer:
def __init__(self) -> None:
self.timeword_h_upper = 0
self.timeword_h_lower = 0
self.timeword_v_upper = 0
self.timeword_v_lower = 0
class RawImageExtractor:
def __init__(self) -> None:
self.raw_image_container = RawImageContainer()
self.image_timeword_container = ImageTimewordContainer()
def extract_raw_images(self, metadata, buffer):
self.raw_image_container = RawImageContainer()
raw_image_h = buffer[metadata.h_start:metadata.h_end]
raw_image_v = buffer[metadata.v_start:metadata.v_end]
if metadata.timing_h == 0:
raw_image_h = raw_image_h[:-2]
if metadata.timing_v == 0:
raw_image_v = raw_image_v[:-2]
self.raw_image_container.raw_image_h = raw_image_h
self.raw_image_container.raw_image_v = raw_image_v
return self.raw_image_container
def extract_image_timewords(self, metadata, buffer):
self.image_timeword_container = ImageTimewordContainer()
raw_image_h = buffer[metadata.h_start:metadata.h_end]
raw_image_v = buffer[metadata.v_start:metadata.v_end]
if (metadata.timing_h == 0) and (len(raw_image_h) >= 2):
self.image_timeword_container.timeword_h_upper = raw_image_h[-2]
self.image_timeword_container.timeword_h_lower = raw_image_h[-1]
if (metadata.timing_v == 0) and (len(raw_image_v) >= 2):
self.image_timeword_container.timeword_v_upper = raw_image_v[-2]
self.image_timeword_container.timeword_v_lower = raw_image_v[-1]
return self.image_timeword_container
class DecodedImageContainer:
def __init__(self) -> None:
self.decoded_image_h = np.empty(0, dtype = decoded_word_type)
self.decoded_image_v = np.empty(0, dtype = decoded_word_type)
class RawImageDecoder:
def __init__(self) -> None:
pass
def decode_dual_channel_images(self, raw_image_container):
decoded_image_container = DecodedImageContainer()
decoded_image_container.decoded_image_h = decode_image(raw_image_container.raw_image_h)
decoded_image_container.decoded_image_v = decode_image(raw_image_container.raw_image_v)
return decoded_image_container
@jit
def decode_image(encoded_image):
decoded_image = np.zeros(len(encoded_image), dtype = decoded_word_type)
for i, word in enumerate(encoded_image):
if word == 0x7fff:
decoded_image['is_image_slice'][i] = 1
decoded_image['is_start_slice'][i] = 1
decoded_image['num_clear'][i] = 128
decoded_image['num_shaded'][i] = 0
elif word == 0x4000:
decoded_image['is_image_slice'][i] = 1
decoded_image['is_start_slice'][i] = 1
decoded_image['num_clear'][i] = 0
decoded_image['num_shaded'][i] = 128
else:
decoded_image['is_image_slice'][i] = ((word & 2**15) >> 15) == 0
decoded_image['is_start_slice'][i] = (word & 2**14) >> 14
decoded_image['num_shaded'][i] = (word & 0b0011111110000000) >> 7
decoded_image['num_clear'][i] = (word & 0b0000000001111111)
valid_image_words = decoded_image['is_image_slice'] == True
return decoded_image[valid_image_words]
class DecompressedImageContainer:
def __init__(self) -> None:
self.decompressed_image_h = np.array([], np.uint8)
self.decompressed_image_v = np.array([], np.uint8)
class DecodedImageDecompressor:
def __init__(self) -> None:
pass
def decompress_image(self, decoded_image_container):
decompressed_image_container = DecompressedImageContainer()
decompressed_image_container.decompressed_image_h = self.decompress_single_channel_image(decoded_image_container.decoded_image_h)
decompressed_image_container.decompressed_image_v = self.decompress_single_channel_image(decoded_image_container.decoded_image_v)
return decompressed_image_container
def decompress_single_channel_image(self, decoded_image):
if len(decoded_image) == 0:
return []
else:
return decompress_complete_image(decoded_image)
@jit(nopython = True)
def get_complete_image_slice_inds(start_slice_flags):
image_slice_id = np.cumsum(start_slice_flags)
image_slice_inds = []
for i in np.unique(image_slice_id):
image_slice_inds.append(
np.ravel(
| np.argwhere(image_slice_id == i) | numpy.argwhere |
# Allen Institute Software License - This software license is the 2-clause BSD
# license plus a third clause that prohibits redistribution for commercial
# purposes without further permission.
#
# Copyright 2015-2016. Allen Institute. All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice,
# this list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Redistributions for commercial purposes are not permitted without the
# Allen Institute's written permission.
# For purposes of this license, commercial purposes is the incorporation of the
# Allen Institute's software into anything for which you will charge fees or
# other compensation. Contact <EMAIL> for commercial licensing
# opportunities.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
# ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE
# LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
# CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
# SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
# INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
# CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
# ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
# POSSIBILITY OF SUCH DAMAGE.
#
import sys
import math
import numpy as np
import scipy.signal as signal
import logging
# Design notes:
# to generate an average feature file, all sweeps must have all features
# to generate a fitness score of a sweep to a feature file,, the sweep
# must have all features in the file. If one is absent, a penalty
# of TODO ??? will be assessed
# set of features
class EphysFeatures( object ):
def __init__(self, name):
# feature mean and standard deviations
self.mean = {}
self.stdev = {}
# human-readable names for features
self.glossary = {}
# table indicating how to score feature
# 'hit' feature exists:
# 'ignore' do nothing
# 'stdev' score is # stdevs from target mean
# 'miss' feature absent:
# 'constant' score = scoring['constant']
# 'mean_mult' score = mean * scoring['mean_mult']
#
self.scoring = {}
self.name = name
################################################################
# ignore scores
ignore_score = { "hit": "ignore" }
self.glossary["n_spikes"] = "Number of spikes"
self.scoring["n_spikes"] = ignore_score
################################################################
# ignore misses
ignore_miss = { "hit":"stdev", "miss":"const", "const":0 }
self.glossary["adapt"] = "Adaptation index"
self.scoring["adapt"] = ignore_miss
self.glossary["latency"] = "Time to first spike (ms)"
self.scoring["latency"] = ignore_miss
################################################################
# base miss off mean
mean_score = { "hit":"stdev", "miss":"mean_mult", "mean_mult":2 }
self.glossary["ISICV"] = "ISI-CV"
self.scoring["ISICV"] = mean_score
################################################################
# normal scoring
normal_score = { "hit":"stdev", "miss":"const", "const":20 }
self.glossary["isi_avg"] = "Average ISI (ms)"
self.scoring["isi_avg"] = ignore_score
self.glossary["doublet"] = "Doublet ISI (ms)"
self.scoring["doublet"] = normal_score
self.glossary["f_fast_ahp"] = "Fast AHP (mV)"
self.scoring["f_fast_ahp"] = normal_score
self.glossary["f_slow_ahp"] = "Slow AHP (mV)"
self.scoring["f_slow_ahp"] = normal_score
self.glossary["f_slow_ahp_time"] = "Slow AHP time"
self.scoring["f_slow_ahp_time"] = normal_score
self.glossary["base_v"] = "Baseline voltage (mV)"
self.scoring["base_v"] = normal_score
#self.glossary["base_v2"] = "Baseline voltage 2 (mV)"
#self.scoring["base_v2"] = normal_score
#self.glossary["base_v3"] = "Baseline voltage 3 (mV)"
#self.scoring["base_v3"] = normal_score
################################################################
# per spike scoring
perspike_score = { "hit":"perspike", "miss":"const", "const":20, "skip_last_n":0 }
self.glossary["f_peak"] = "Spike height (mV)"
self.scoring["f_peak"] = perspike_score.copy()
self.glossary["f_trough"] = "Spike depth (mV)"
self.scoring["f_trough"] = perspike_score.copy()
self.scoring["f_trough"]["skip_last_n"] = 1
# self.glossary["f_w"] = "Spike width at -30 mV (ms)"
# self.scoring["f_w"] = perspike_score.copy()
self.glossary["upstroke"] = "Peak upstroke (mV/ms)"
self.scoring["upstroke"] = perspike_score.copy()
self.glossary["upstroke_v"] = "Vm of peak upstroke (mV)"
self.scoring["upstroke_v"] = perspike_score.copy()
self.glossary["downstroke"] = "Peak downstroke (mV/ms)"
self.scoring["downstroke"] = perspike_score.copy()
self.glossary["downstroke_v"] = "Vm of peak downstroke (mV)"
self.scoring["downstroke_v"] = perspike_score.copy()
self.glossary["threshold"] = "Threshold voltage (mV)"
self.scoring["threshold"] = perspike_score.copy()
self.glossary["width"] = "Spike width at half-max (ms)"
self.scoring["width"] = perspike_score.copy()
self.scoring["width"]["skip_last_n"] = 1
self.glossary["thresh_ramp"] = "Change in dv/dt over first 5 mV past threshold (mV/ms)"
self.scoring["thresh_ramp"] = perspike_score.copy()
################################################################
# heavily penalize when there are no spikes
spike_score = { "hit":"stdev", "miss":"const", "const":250 }
self.glossary["rate"] = "Firing rate (Hz)"
self.scoring["rate"] = spike_score
def print_out(self):
print(("Features from " + self.name))
for k in list(self.mean.keys()):
if k in self.glossary:
st = "%30s = " % self.glossary[k]
if self.mean[k] is not None:
st += "%g" % self.mean[k]
else:
st += "--------"
if k in self.stdev and self.stdev[k] is not None:
st += " +/- %g" % self.stdev[k]
print(st)
# initialize summary feature set from file
def clone(self, param_dict):
for k in list(param_dict.keys()):
self.mean[k] = param_dict[k]["mean"]
self.stdev[k] = param_dict[k]["stdev"]
class EphysFeatureExtractor( object ):
def __init__(self):
# list of feature set instances
self.feature_list = []
# names of each element in feature list
self.feature_source = []
# feature set object representing combination of all instances
self.summary = None
# adds new feature set instance to feature_list
def process_instance(self, name, v, curr, t, onset, dur, stim_name):
feature = EphysFeatures(name)
################################################################
# set stop time -- run until end of stimulus or end of sweep
# comment-out the one of the two approaches
# detect spikes only during stimulus
start = onset
stop = onset + dur
# detect spikes for all of sweep
#start = 0
#stop = t[-1]
################################################################
# pull out spike times
# calculate the derivative only within target window
# otherwise get spurious detection at ends of stimuli
# filter with 10kHz cutoff if constant 200kHz sample rate (ie experimental trace)
start_idx = np.where(t >= start)[0][0]
stop_idx = np.where(t >= stop)[0][0]
v_target = v[start_idx:stop_idx]
if np.abs(t[1] - t[0] - 5e-6) < 1e-7 and np.var(np.diff(t)) < 1e-6:
b, a = signal.bessel(4, 0.1, "low")
smooth_v = signal.filtfilt(b, a, v_target, axis=0)
dv = np.diff(smooth_v)
else:
dv = np.diff(v_target)
dvdt = dv / (np.diff(t[start_idx:stop_idx]) * 1e3) # in mV/ms
dv_cutoff = 20
thresh_pct = 0.05
spikes = []
temp_spk_idxs = np.where(np.diff(np.greater_equal(dvdt, dv_cutoff).astype(int)) == 1)[0] # find positive-going crossings of 100 mV/ms
spk_idxs = []
for i, temp in enumerate(temp_spk_idxs):
if i == 0:
spk_idxs.append(temp)
elif np.any(dvdt[temp_spk_idxs[i - 1]:temp] < 0):
# check if the dvdt has gone back down below zero between presumed spike times
# sometimes the dvdt bobbles around detection threshold and produces spurious guesses at spike times
spk_idxs.append(temp)
spk_idxs += start_idx # set back to the "index space" of the original trace
# recalculate full dv/dt for feature analysis (vs spike detection)
if np.abs(t[1] - t[0] - 5e-6) < 1e-7 and np.var(np.diff(t)) < 1e-6:
b, a = signal.bessel(4, 0.1, "low")
smooth_v = signal.filtfilt(b, a, v, axis=0)
dv = np.diff(smooth_v)
else:
dv = np.diff(v)
dvdt = dv / (np.diff(t) * 1e3) # in mV/ms
# First time through, accumulate upstrokes to calculate average threshold target
for spk_n, spk_idx in enumerate(spk_idxs):
# Etay defines spike as time of threshold crossing
spk = {}
if spk_n < len(spk_idxs) - 1:
next_idx = spk_idxs[spk_n + 1]
else:
next_idx = stop_idx
if spk_n > 0:
prev_idx = spk_idxs[spk_n - 1]
else:
prev_idx = start_idx
# Find the peak
peak_idx = np.argmax(v[spk_idx:next_idx]) + spk_idx
spk["peak_idx"] = peak_idx
spk["f_peak"] = v[peak_idx]
spk["f_peak_i"] = curr[peak_idx]
spk["f_peak_t"] = t[peak_idx]
# Check if end of stimulus interval cuts off spike - if so, don't process spike
if spk_n == len(spk_idxs) - 1 and peak_idx == next_idx-1:
continue
if spk_idx == peak_idx:
continue # this was bugfix, but why? ramp?
# Determine maximum upstroke of spike
upstroke_idx = np.argmax(dvdt[spk_idx:peak_idx]) + spk_idx
spk["upstroke"] = dvdt[upstroke_idx]
if np.isnan(spk["upstroke"]): # sometimes dvdt will be NaN because of multiple cvode points at same time step
close_idx = upstroke_idx + 1
while (np.isnan(dvdt[close_idx])):
close_idx += 1
spk["upstroke_idx"] = close_idx
spk["upstroke"] = dvdt[close_idx]
spk["upstroke_v"] = v[close_idx]
spk["upstroke_i"] = curr[close_idx]
spk["upstroke_t"] = t[close_idx]
else:
spk["upstroke_idx"] = upstroke_idx
spk["upstroke_v"] = v[upstroke_idx]
spk["upstroke_i"] = curr[upstroke_idx]
spk["upstroke_t"] = t[upstroke_idx]
# Preliminarily define threshold where dvdt = 5% * max upstroke
thresh_pct = 0.05
find_thresh_idxs = np.where(dvdt[prev_idx:upstroke_idx] <= thresh_pct * spk["upstroke"])[0]
if len(find_thresh_idxs) < 1: # Can't find a good threshold value - probably a bad simulation case
# Fall back to the upstroke value
threshold_idx = upstroke_idx
else:
threshold_idx = find_thresh_idxs[-1] + prev_idx
spk["threshold_idx"] = threshold_idx
spk["threshold"] = v[threshold_idx]
spk["threshold_v"] = v[threshold_idx]
spk["threshold_i"] = curr[threshold_idx]
spk["threshold_t"] = t[threshold_idx]
spk["rise_time"] = spk["f_peak_t"] - spk["threshold_t"]
PERIOD = t[1] - t[0]
width_volts = (v[peak_idx] + v[threshold_idx]) / 2
recording_width = False
for i in range(threshold_idx, min(len(v), threshold_idx + int(0.001 / PERIOD))):
if not recording_width and v[i] >= width_volts:
recording_width = True
idx0 = i
elif recording_width and v[i] < width_volts:
spk["half_height_width"] = t[i] - t[idx0]
break
# </KEITH>
# Check for things that are probably not spikes:
# if there is more than 2 ms between the detection event and the peak, don't count it
if t[peak_idx] - t[threshold_idx] > 0.002:
continue
# if the "spike" is less than 2 mV, don't count it
if v[peak_idx] - v[threshold_idx] < 2.0:
continue
# if the absolute value of the peak is less than -30 mV, don't count it
if v[peak_idx] < -30.0:
continue
spikes.append(spk)
# Refine threshold target based on average of all spikes
if len(spikes) > 0:
threshold_target = np.array([spk["upstroke"] for spk in spikes]).mean() * thresh_pct
for spk_n, spk in enumerate(spikes):
if spk_n < len(spikes) - 1:
next_idx = spikes[spk_n + 1]["threshold_idx"]
else:
next_idx = stop_idx
if spk_n > 0:
prev_idx = spikes[spk_n - 1]["peak_idx"]
else:
prev_idx = start_idx
# Restore variables from before
# peak_idx = spk['peak_idx']
peak_idx = np.argmax(v[spk['threshold_idx']:next_idx]) + spk['threshold_idx']
spk["peak_idx"] = peak_idx
spk["f_peak"] = v[peak_idx]
spk["f_peak_i"] = curr[peak_idx]
spk["f_peak_t"] = t[peak_idx]
# Determine maximum upstroke of spike
# upstroke_idx = spk['upstroke_idx']
upstroke_idx = np.argmax(dvdt[spk['threshold_idx']:peak_idx]) + spk['threshold_idx']
spk["upstroke"] = dvdt[upstroke_idx]
if np.isnan(spk["upstroke"]): # sometimes dvdt will be NaN because of multiple cvode points at same time step
close_idx = upstroke_idx + 1
while (np.isnan(dvdt[close_idx])):
close_idx += 1
spk["upstroke_idx"] = close_idx
spk["upstroke"] = dvdt[close_idx]
spk["upstroke_v"] = v[close_idx]
spk["upstroke_i"] = curr[close_idx]
spk["upstroke_t"] = t[close_idx]
else:
spk["upstroke_idx"] = upstroke_idx
spk["upstroke_v"] = v[upstroke_idx]
spk["upstroke_i"] = curr[upstroke_idx]
spk["upstroke_t"] = t[upstroke_idx]
# Find threshold based on average target
find_thresh_idxs = np.where(dvdt[prev_idx:upstroke_idx] <= threshold_target)[0]
if len(find_thresh_idxs) < 1: # Can't find a good threshold value - probably a bad simulation case
# Fall back to the upstroke value
threshold_idx = upstroke_idx
else:
threshold_idx = find_thresh_idxs[-1] + prev_idx
spk["threshold_idx"] = threshold_idx
spk["threshold"] = v[threshold_idx]
spk["threshold_v"] = v[threshold_idx]
spk["threshold_i"] = curr[threshold_idx]
spk["threshold_t"] = t[threshold_idx]
# Define the spike time as threshold time
spk["t_idx"] = threshold_idx
spk["t"] = t[threshold_idx]
# Save the -30 mV crossing time for backward compatibility with Etay code
overn30_idxs = np.where(v[threshold_idx:peak_idx] >= -30)[0]
if len(overn30_idxs) > 0:
spk["t_idx_n30"] = overn30_idxs[0] + threshold_idx
else: # fall back to threshold definition if spike doesn't cross -30 mV
spk["t_idx_n30"] = threshold_idx
spk["t_n30"] = t[spk["t_idx_n30"]]
# Figure out initial "slope" of phase plot post-threshold
plus_5_vec = np.where(v[threshold_idx:upstroke_idx] >= spk["threshold"] + 5)[0]
if len(plus_5_vec) > 0:
thresh_plus_5_idx = plus_5_vec[0] + threshold_idx
spk["thresh_ramp"] = dvdt[thresh_plus_5_idx] - dvdt[threshold_idx]
else:
spk["thresh_ramp"] = dvdt[upstroke_idx] - dvdt[threshold_idx]
# go forward to determine peak downstroke of spike
downstroke_idx = np.argmin(dvdt[peak_idx:next_idx]) + peak_idx
spk["downstroke_idx"] = downstroke_idx
spk["downstroke_v"] = v[downstroke_idx]
spk["downstroke_i"] = curr[downstroke_idx]
spk["downstroke_t"] = t[downstroke_idx]
spk["downstroke"] = dvdt[downstroke_idx]
if np.isnan(spk["downstroke"]): # sometimes dvdt will be NaN because of multiple cvode points at same time step
close_idx = downstroke_idx + 1
while (np.isnan(dvdt[close_idx])):
close_idx += 1
spk["downstroke"] = dvdt[close_idx]
features = {}
feature.mean["base_v"] = v[ | np.where((t > onset - 0.1) & (t < onset - 0.001)) | numpy.where |
try:
import importlib.resources as pkg_resources
except ImportError:
# Try backported to PY<37 `importlib_resources`.
import importlib_resources as pkg_resources
from . import images
from gym import Env, spaces
from time import time
import numpy as np
from copy import copy
import colorsys
import pygame
from pygame.transform import scale
class MinesweeperEnv(Env):
def __init__(self, grid_shape=(10, 15), bombs_density=0.1, n_bombs=None, impact_size=3, max_time=999, chicken=False):
self.grid_shape = grid_shape
self.grid_size = np.prod(grid_shape)
self.n_bombs = max(1, int(bombs_density * self.grid_size)) if n_bombs is None else n_bombs
self.n_bombs = min(self.grid_size - 1, self.n_bombs)
self.flaged_bombs = 0
self.flaged_empty = 0
self.max_time = max_time
if impact_size % 2 == 0:
raise ValueError('Impact_size must be an odd number !')
self.impact_size = impact_size
# Define constants
self.HIDDEN = 0
self.REVEAL = 1
self.FLAG = 2
self.BOMB = self.impact_size ** 2
# Setting up gym Env conventions
nvec_observation = (self.BOMB + 2) * np.ones(self.grid_shape)
self.observation_space = spaces.MultiDiscrete(nvec_observation)
nvec_action = np.array(self.grid_shape + (2,))
self.action_space = spaces.MultiDiscrete(nvec_action)
# Initalize state
self.state = np.zeros(self.grid_shape + (2,), dtype=np.uint8)
## Setup bombs places
idx = np.indices(self.grid_shape).reshape(2, -1)
bombs_ids = np.random.choice(range(self.grid_size), size=self.n_bombs, replace=False)
self.bombs_positions = idx[0][bombs_ids], idx[1][bombs_ids]
## Place numbers
self.semi_impact_size = (self.impact_size-1)//2
bomb_impact = np.ones((self.impact_size, self.impact_size), dtype=np.uint8)
for bombs_id in bombs_ids:
bomb_x, bomb_y = idx[0][bombs_id], idx[1][bombs_id]
x_min, x_max, dx_min, dx_max = self.clip_index(bomb_x, 0)
y_min, y_max, dy_min, dy_max = self.clip_index(bomb_y, 1)
bomb_region = self.state[x_min:x_max, y_min:y_max, 0]
bomb_region += bomb_impact[dx_min:dx_max, dy_min:dy_max]
## Place bombs
self.state[self.bombs_positions + (0,)] = self.BOMB
self.start_time = time()
self.time_left = int(time() - self.start_time)
# Setup rendering
self.pygame_is_init = False
self.chicken = chicken
self.done = False
self.score = 0
def get_observation(self):
observation = copy(self.state[:, :, 1])
revealed = observation == 1
flaged = observation == 2
observation += self.impact_size ** 2 + 1
observation[revealed] = copy(self.state[:, :, 0][revealed])
observation[flaged] -= 1
return observation
def reveal_around(self, coords, reward, done, without_loss=False):
if not done:
x_min, x_max, _, _ = self.clip_index(coords[0], 0)
y_min, y_max, _, _ = self.clip_index(coords[1], 1)
region = self.state[x_min:x_max, y_min:y_max, :]
unseen_around = np.sum(region[..., 1] == 0)
if unseen_around == 0:
if not without_loss:
reward -= 0.001
return
flags_around = np.sum(region[..., 1] == 2)
if flags_around == self.state[coords + (0,)]:
unrevealed_zeros_around = np.logical_and(region[..., 0] == 0, region[..., 1] == self.HIDDEN)
if np.any(unrevealed_zeros_around):
zeros_coords = np.argwhere(unrevealed_zeros_around)
for zero in zeros_coords:
coord = (x_min + zero[0], y_min + zero[1])
self.state[coord + (1,)] = 1
self.reveal_around(coord, reward, done, without_loss=True)
self.state[x_min:x_max, y_min:y_max, 1][self.state[x_min:x_max, y_min:y_max, 1] != self.FLAG] = 1
unflagged_bombs_around = np.logical_and(region[..., 0] == self.BOMB, region[..., 1] != self.FLAG)
if np.any(unflagged_bombs_around):
self.done = True
reward, done = -1, True
else:
if not without_loss:
reward -= 0.001
def clip_index(self, x, axis):
max_idx = self.grid_shape[axis]
x_min, x_max = max(0, x-self.semi_impact_size), min(max_idx, x + self.semi_impact_size + 1)
dx_min, dx_max = x_min - (x - self.semi_impact_size), x_max - (x + self.semi_impact_size + 1) + self.impact_size
return x_min, x_max, dx_min, dx_max
def step(self, action):
coords = action[:2]
action_type = action[2] + 1 # 0 -> 1 = reveal; 1 -> 2 = toggle_flag
case_state = self.state[coords + (1,)]
case_content = self.state[coords + (0,)]
NO_BOMBS_AROUND = 0
reward, done = 0, False
self.time_left = self.max_time - time() + self.start_time
if self.time_left <= 0:
score = -(self.n_bombs - self.flaged_bombs + self.flaged_empty)/self.n_bombs
reward, done = score, True
return self.get_observation(), reward, done, {'passed':False}
if action_type == self.REVEAL:
if case_state == self.HIDDEN:
self.state[coords + (1,)] = action_type
if case_content == self.BOMB:
if self.pygame_is_init: self.done = True
reward, done = -1, True
return self.get_observation(), reward, done, {'passed':False}
elif case_content == NO_BOMBS_AROUND:
self.reveal_around(coords, reward, done)
elif case_state == self.REVEAL:
self.reveal_around(coords, reward, done)
reward -= 0.01
else:
reward -= 0.001
self.score += reward
return self.get_observation(), reward, done, {'passed':True}
elif action_type == self.FLAG:
if case_state == self.REVEAL:
reward -= 0.001
else:
flaging = 1
if case_state == self.FLAG:
flaging = -1
self.state[coords + (1,)] = self.HIDDEN
else:
self.state[coords + (1,)] = self.FLAG
if case_content == self.BOMB:
self.flaged_bombs += flaging
else:
self.flaged_empty += flaging
if self.flaged_bombs == self.n_bombs and self.flaged_empty == 0:
reward, done = 2 + self.time_left/self.max_time, True
if np.any(np.logical_and(self.state[..., 0]==9, self.state[..., 1]==1)) or self.done:
reward, done = -1 + self.time_left/self.max_time + (self.flaged_bombs - self.flaged_empty)/self.n_bombs, True
self.score += reward
return self.get_observation(), reward, done, {'passed':False}
def reset(self):
self.__init__(self.grid_shape, n_bombs=self.n_bombs, impact_size=self.impact_size, max_time=self.max_time, chicken=self.chicken)
return self.get_observation()
def render(self):
if not self.pygame_is_init:
self._init_pygame()
self.pygame_is_init = True
for event in pygame.event.get():
if event.type == pygame.QUIT: # pylint: disable=E1101
pygame.quit() # pylint: disable=E1101
# Plot background
pygame.draw.rect(self.window, (60, 56, 53), (0, 0, self.height, self.width))
# Plot grid
for index, state in np.ndenumerate(self.state[..., 1]):
self._plot_block(index, state)
# Plot infos
## Score
score_text = self.score_font.render("SCORE", 1, (255, 10, 10))
score = self.score_font.render(str(round(self.score, 4)), 1, (255, 10, 10))
self.window.blit(score_text, (0.1*self.header_size, 0.75*self.width))
self.window.blit(score, (0.1*self.header_size, 0.8*self.width))
## Time left
time_text = self.num_font.render("TIME", 1, (255, 10, 10))
self.time_left = self.max_time - time() + self.start_time
time_left = self.num_font.render(str(int(self.time_left+1)), 1, (255, 10, 10))
self.window.blit(time_text, (0.1*self.header_size, 0.03*self.width))
self.window.blit(time_left, (0.1*self.header_size, 0.1*self.width))
## Bombs left
bombs_text = self.num_font.render("BOMBS", 1, (255, 255, 10))
left_text = self.num_font.render("LEFT", 1, (255, 255, 10))
potential_bombs_left = self.n_bombs - self.flaged_bombs - self.flaged_empty
potential_bombs_left = self.num_font.render(str(int(potential_bombs_left)), 1, (255, 255, 10))
self.window.blit(bombs_text, (0.1*self.header_size, 0.4*self.width))
self.window.blit(left_text, (0.1*self.header_size, 0.45*self.width))
self.window.blit(potential_bombs_left, (0.1*self.header_size, 0.5*self.width))
pygame.display.flip()
pygame.time.wait(10)
if self.done:
pygame.time.wait(3000)
@staticmethod
def _get_color(n, max_n):
BLUE_HUE = 0.6
RED_HUE = 0.0
HUE = RED_HUE + (BLUE_HUE - RED_HUE) * ((max_n - n) / max_n)**3
color = 255 * np.array(colorsys.hsv_to_rgb(HUE, 1, 0.7))
return color
def _plot_block(self, index, state):
position = tuple(self.origin + self.scale_factor * self.BLOCK_SIZE * np.array((index[1], index[0])))
label = None
if state == self.HIDDEN and not self.done:
img_key = 'hidden'
elif state == self.FLAG:
if not self.done:
img_key = 'flag'
else:
content = self.state[index][0]
if content == self.BOMB:
img_key = 'disabled_mine' if not self.chicken else 'disabled_chicken'
else:
img_key = 'misplaced_flag'
else:
content = self.state[index][0]
if content == self.BOMB:
if state == self.HIDDEN:
img_key = 'mine' if not self.chicken else 'chicken'
else:
img_key = 'exploded_mine' if not self.chicken else 'exploded_chicken'
else:
img_key = 'revealed'
label = self.num_font.render(str(content), 1, self._get_color(content, self.BOMB))
self.window.blit(self.images[img_key], position)
if label: self.window.blit(label, position + self.font_offset - (content > 9) * self.decimal_font_offset)
def _init_pygame(self):
pygame.init() # pylint: disable=E1101
# Open Pygame window
self.scale_factor = 2 * min(12 / self.grid_shape[0], 25 / self.grid_shape[1])
self.BLOCK_SIZE = 32
self.header_size = self.scale_factor * 100
self.origin = np.array([self.header_size, 0])
self.width = int(self.scale_factor * self.BLOCK_SIZE * self.grid_shape[0])
self.height = int(self.scale_factor * self.BLOCK_SIZE * self.grid_shape[1] + self.header_size)
self.window = pygame.display.set_mode((self.height, self.width))
# Setup font for numbers
num_font_size = 20
self.num_font = pygame.font.SysFont("monospace", int(self.scale_factor * num_font_size))
self.font_offset = self.scale_factor * self.BLOCK_SIZE * | np.array([0.325, 0.15]) | numpy.array |
import numpy as np
from matplotlib import pyplot as plt
from matplotlib import cm as cm
import click
import re
re_time = re.compile(r'^\d+.?\d*e?-?\d*')
re_point = re.compile(
r'([A-Z])\((-?\d+.?\d*e?-?\d*), (-?\d+.?\d*e?-?\d*)\)'
)
H_RANGE = tuple( np.array([-0.5, 0.5]) * 32 )
V_RANGE = tuple( np.array([-0.5, 0.5]) * 18 )
SPACE_FACTOR = 20
SPACE_BIAS = | np.array([0.25, 0.25]) | numpy.array |
from plyfile import PlyData, PlyElement
import open3d as o3d
from pyobb.obb import OBB
import numpy as np
import os
from scipy.spatial import ConvexHull, convex_hull_plot_2d
from scipy.spatial.transform import Rotation as R
import matplotlib.pyplot as plt
import argparse
import utils
def obb_calc(filename, gravity=np.array((0.0,1.0,0.0)), align_axis=np.array((0.0,0.0,-1.0))):
o3d_mesh = o3d.io.read_triangle_mesh(filename)
points = np.asarray(o3d_mesh.vertices)
obb_center, obb_size, trans_inv = gravity_aligned_mobb(points, gravity, align_axis)
obb = o3d.geometry.OrientedBoundingBox(obb_center, trans_inv, obb_size)
return obb
def gravity_aligned_mobb(points, gravity, align_axis):
def from2vectors(gravity, axis=align_axis):
gravity = gravity/np.linalg.norm(gravity)
axis = axis/np.linalg.norm(axis)
vec = np.cross(gravity, axis)
rot = np.arccos(np.dot(gravity, axis))
r = R.from_rotvec(rot * vec)
return r.as_matrix()
# trigonometry, law of sines: a/sin(A) = b/sin(B)
def intersect_lines(s0, d0, s1 ,d1):
"""
s0, s1: 2D coordinates of a point
d0, d1: direction vector determines the direction a line
"""
sin_a = np.cross(d0, d1)
vec_s = s1-s0
t = np.cross(vec_s, d1)/sin_a
return s0 + t*d0
def mobb_area(left_start, left_dir, right_start, right_dir,
top_start, top_dir, bottom_start, bottom_dir):
upper_left = intersect_lines(left_start, left_dir, top_start, top_dir)
upper_right = intersect_lines(right_start, right_dir, top_start, top_dir)
bottom_left = intersect_lines(bottom_start, bottom_dir, left_start, left_dir)
return np.linalg.norm(upper_left-upper_right) * np.linalg.norm(upper_left-bottom_left)
align_gravity = from2vectors(gravity, align_axis)
tmp_points = np.matmul(align_gravity, points.transpose()).transpose()
points_2d = tmp_points[:, 0:2]
hull = ConvexHull(points_2d)
# plot conver hull
# print(len(hull.vertices))
# plt.plot(points_2d[:,0], points_2d[:,1], '.')
# plt.plot(points_2d[hull.vertices,0], points_2d[hull.vertices,1], 'r--', lw=2)
# plt.plot(points_2d[(hull.vertices[-1], hull.vertices[0]),0], points_2d[(hull.vertices[-1], hull.vertices[0]),1], 'r--', lw=2)
# plt.plot(points_2d[hull.vertices[0],0], points_2d[hull.vertices[0],1], 'ro')
# plt.show()
assert len(hull.vertices) > 0, 'convex hull vertices number must be positive'
# the vertices are in counterclockwise order
hull_points = points_2d[hull.vertices]
edge_dirs = np.roll(hull_points, 1, axis=0) - hull_points
edge_norm = np.linalg.norm(edge_dirs, axis=1)
edge_dirs /= edge_norm[:, None]
min_idx = | np.argmin(hull_points, axis=0) | numpy.argmin |
import numpy as np
import tensorflow as tf
from tensorflow.python.layers import core as layers_core
import RAKE, math, random
from zpar import ZPar
from data import array_data
import torch, sys,os
import pickle as pkl
from copy import copy
from bert.bertinterface import BertEncoding, BertSimilarity
from utils import get_corpus_bleu_scores, appendtext
def output_p(sent, model):
# list
sent = torch.tensor(sent, dtype=torch.long).cuda()
output = model.predict(sent) # 1,15,300003
return output.squeeze(0).cpu().detach().numpy()
def keyword_pos2sta_vec(option,keyword, pos):
key_ind=[]
# pos=pos[:option.num_steps-1]
pos=pos[:option.num_steps-1]
for i in range(len(pos)):
if pos[i]=='NNP':
key_ind.append(i)
elif pos[i] in ['NN', 'NNS'] and keyword[i]==1:
key_ind.append(i)
elif pos[i] in ['VBZ'] and keyword[i]==1:
key_ind.append(i)
elif keyword[i]==1:
key_ind.append(i)
elif pos[i] in ['NN', 'NNS','VBZ']:
key_ind.append(i)
key_ind=key_ind[:max(int(option.max_key_rate*len(pos)), option.max_key)]
sta_vec=[]
for i in range(len(keyword)):
if i in key_ind:
sta_vec.append(1)
else:
sta_vec.append(0)
return sta_vec
def read_data_use(option, sen2id):
file_name = option.use_data_path
max_length = option.num_steps
dict_size = option.dict_size
Rake = RAKE.Rake(RAKE.SmartStopList())
z=ZPar(option.pos_path)
tagger = z.get_tagger()
with open(file_name) as f:
data=[]
vector=[]
sta_vec_list=[]
j=0
for line in f:
sta_vec=list(np.zeros([option.num_steps-1]))
keyword=Rake.run(line.strip())
pos_list=tagger.tag_sentence(line.strip()).split()
# pos=zip(*[x.split('/') for x in pos_list])[0]
pos=list(zip(*[x.split('/') for x in pos_list]))[0]
if keyword!=[]:
keyword=list(list(zip(*keyword))[0])
keyword_new=[]
linewords = line.strip().split()
for i in range(len(linewords)):
for item in keyword:
length11 = len(item.split())
if ' '.join(linewords[i:i+length11])==item:
keyword_new.extend([i+k for k in range(length11)])
for i in range(len(keyword_new)):
ind=keyword_new[i]
if ind<=option.num_steps-2:
sta_vec[ind]=1
if option.keyword_pos==True:
sta_vec_list.append(keyword_pos2sta_vec(option,sta_vec,pos))
else:
sta_vec_list.append(list(np.zeros([option.num_steps-1])))
data.append(sen2id(line.strip().lower().split()))
data_new=array_data(data, max_length, dict_size)
return data_new, sta_vec_list # sentence, keyvector
def read_data_use1(option, sen2id):
file_name = option.use_data_path
max_length = option.num_steps
dict_size = option.dict_size
Rake = RAKE.Rake(RAKE.SmartStopList())
z=ZPar(option.pos_path)
tagger = z.get_tagger()
with open(file_name) as f:
data=[]
vector=[]
sta_vec_list=[]
j=0
for line in f:
print('sentence:'+line)
sta_vec=list(np.zeros([option.num_steps-1]))
keyword=Rake.run(line.strip())
pos_list=tagger.tag_sentence(line.strip()).split()
# pos=zip(*[x.split('/') for x in pos_list])[0]
pos=list(zip(*[x.split('/') for x in pos_list]))[0]
print(keyword)
if keyword!=[]:
keyword=list(list(zip(*keyword))[0])
keyword_new=[]
for item in keyword:
tem1=[line.strip().split().index(x) for x in item.split() if x in line.strip().split()]
print('id',tem1)
keyword_new.extend(tem1)
print(keyword_new)
for i in range(len(keyword_new)):
ind=keyword_new[i]
if ind<=option.num_steps-2:
sta_vec[ind]=1
if option.keyword_pos==True:
sta_vec_list.append(keyword_pos2sta_vec(option,sta_vec,pos))
else:
sta_vec_list.append(list(np.zeros([option.num_steps-1])))
print(keyword_pos2sta_vec(option,sta_vec, pos))
data.append(sen2id(line.strip().lower().split()))
data_new=array_data(data, max_length, dict_size)
return data_new, sta_vec_list # sentence, keyvector
def choose_action(c):
r=np.random.random()
c=np.array(c)
for i in range(1, len(c)):
c[i]=c[i]+c[i-1]
for i in range(len(c)):
if c[i]>=r:
return i
def sigma_word(x):
if x>0.7:
return x
elif x>0.65:
return (x-0.65)*14
else:
return 0
#return max(0, 1-((x-1))**2)
#return (((np.abs(x)+x)*0.5-0.6)/0.4)**2
def sigma_word1(x):
if x>0.9:
return x
elif x>0.8:
return (x-0.8)*9
else:
return 0
#return max(0, 1-((x-1))**2)
#return (((np.abs(x)+x)*0.5-0.6)/0.4)**2
def sigma_word_bert(x):
# x:K,
x9 = torch.gt(x,0.9).float()
x8 = torch.gt(x,0.8).float()
return x*x9+(x-0.8)*9*x8
def sigma_bleu(x):
if x>0.9:
return 1-x+0.01 # 0.1-0
elif x>0.8:
return 1-(x-0.8)*9 # 0.1-1
else:
return 1
#return max(0, 1-((x-1))**2)
#return (((np.abs(x)+x)*0.5-0.6)/0.4)**2
def sigmoid(x):
s = 1 / (1 + np.exp(-x))
return s
def sen2mat(s, id2sen, emb_word, option):
mat=[]
for item in s:
if item==option.dict_size+2:
continue
if item==option.dict_size+1:
break
word=id2sen([item])[0]
if word in emb_word:
mat.append(np.array(emb_word[word]))
else:
mat.append(np.random.random([option.hidden_size]))
return np.array(mat)
def similarity_semantic(s1_list,s2, sta_vec, id2sen, emb_word, option, model):
K = 4
sourcesent = [' '.join(id2sen(s1)) for s1 in s1_list]
sourcesent2 = [' '.join(id2sen(s2))] * len(s1_list)
rep1 = model.get_encoding(sourcesent, sourcesent)
rep2 = model.get_encoding(sourcesent,sourcesent2)
rep3 = model.get_encoding(sourcesent2,sourcesent2)
rep1 = (rep1+rep3)/2
norm1 = rep1.norm(2,1)
norm2 = rep2.norm(2,1)
semantic = torch.sum(rep1*rep2,1)/(norm1*norm2)
semantic = semantic*(1- (torch.abs(norm1-norm2)/torch.max(norm1,norm2)))
semantics = semantic.cpu().numpy()
res = np.power(semantics,K)
return res
def similarity_semantic_bleu(s1_list,s2, sta_vec, id2sen, emb_word, option, model):
K = 12
sourcesent = [' '.join(id2sen(s1)) for s1 in s1_list]
sourcesent2 = [' '.join(id2sen(s2))] * len(s1_list)
rep1 = model.get_encoding(sourcesent, sourcesent)
rep2 = model.get_encoding(sourcesent,sourcesent2)
rep3 = model.get_encoding(sourcesent2,sourcesent2)
rep1 = (rep1+rep3)/2
norm1 = rep1.norm(2,1)
norm2 = rep2.norm(2,1)
semantic = torch.sum(rep1*rep2,1)/(norm1*norm2)
semantic = semantic*(1- (torch.abs(norm1-norm2)/torch.max(norm1,norm2)))
semantics = semantic.cpu().numpy()
bleus = []
for s1 in s1_list:
actual_word_lists = [[id2sen(s2)]*len(s1_list)]
generated_word_lists = [id2sen(s1)]
bleu_score = get_corpus_bleu_scores(actual_word_lists, generated_word_lists)[1]
bleus.append(bleu_score)
bleus = (1.0-sigmoid(np.minimum(bleus,0.999)))
semantics = np.power(semantics,K)
res = bleus*semantics
return res
def similarity_semantic_keyword(s1_list,s2, sta_vec, id2sen, emb_word, option, model):
C1 = 0.1
K = 4
sourcesent = [' '.join(id2sen(s1)) for s1 in s1_list]
sourcesent2 = [' '.join(id2sen(s2))] * len(s1_list)
rep1 = model.get_encoding(sourcesent, sourcesent)
rep2 = model.get_encoding(sourcesent,sourcesent2)
rep3 = model.get_encoding(sourcesent2,sourcesent2)
rep1 = (rep1+rep3)/2
norm1 = rep1.norm(2,1)
norm2 = rep2.norm(2,1)
semantic = torch.sum(rep1*rep2,1)/(norm1*norm2)
semantic = semantic*(1- (torch.abs(norm1-norm2)/torch.max(norm1,norm2)))
semantics = semantic.cpu().numpy()
res = np.power(semantics,K)
semantics = []
for s, s1 in zip(res, s1_list):
tem = 1
for i,x in zip(sta_vec,s2):
if i==1 and x not in s1:
tem *= C1
semantics.append(s*tem)
res = np.array(semantics)
return res
def similarity_keyword(s1_list, s2, sta_vec, id2sen, emb_word, option, model = None):
e=1e-5
sims= []
for s1 in s1_list:
emb1=sen2mat(s1, id2sen, emb_word, option) # M*K
#wei2=normalize( np.array([-np.log(id2freq[x]) for x in s2 if x<=config.dict_size]))
emb2=sen2mat(s2, id2sen, emb_word, option) # N*k
wei2=np.array(sta_vec[:len(emb2)]).astype(np.float32) # N*1
#wei2=normalize(wei2)
emb_mat=np.dot(emb2,emb1.T) #N*M
norm1=np.diag(1/(np.linalg.norm(emb1,2,axis=1)+e)) # M*M
norm2=np.diag(1/(np.linalg.norm(emb2,2,axis=1)+e)) #N*N
sim_mat=np.dot(norm2,emb_mat).dot(norm1) #N*M
sim_vec=sim_mat.max(axis=1) #N
# debug
# print('sss',sim_vec)
# print(wei2)
# sim=min([x for x in list(sim_vec*wei2) if x>0]+[1])
sim=min([x for x,y in zip(list(sim_vec*wei2),list(wei2)) if y>0]+[1])
sim = sigma_word(sim)
sims.append(sim)
res = np.array(sims)
return res
def similarity_batch_word(s1, s2, sta_vec, option):
return np.array([ similarity_word(x,s2,sta_vec, option) for x in s1 ])
def similarity_keyword_batch(s1_lists, s2s, sta_vecs, id2sen, emb_word, option, model = None):
simss= []
for s1_list,s2, sta_vec in zip(s1_lists,s2s, sta_vecs):
sims = similarity_keyword(s1_list, s2, sta_vec, id2sen, emb_word, option, model)
simss.append(sims)
return simss
def similarity_batch_word(s1, s2, sta_vec, option):
return np.array([ similarity_word(x,s2,sta_vec, option) for x in s1 ])
def similarity_keyword_tensor(s1_list, s2, sta_vec, id2sen, emb_word, option, model = None):
e=1e-5
N_candidant = len(s1_list)
sims= []
embs = []
for s1 in s1_list:
emb1=sen2mat(s1, id2sen, emb_word, option) # M*K
embs.append(np.expand_dims(emb1,axis=0))
emb1 = np.concatenate(embs,0) # K,8,300
emb1 = torch.tensor(emb1, dtype=torch.float).permute(0,2,1).cuda()
emb2= sen2mat(s2, id2sen, emb_word, option) # N*k
emb2 = torch.tensor(emb2, dtype=torch.float).unsqueeze(0).repeat(N_candidant,1,1).cuda()
# print(emb1.size(), emb2.size()) #bs,300,7, bs,8,300
wei2= torch.tensor([0]+sta_vec[:emb2.size(1)-1],dtype=torch.uint8) #8
emb_mat = torch.bmm(emb2,emb1) # K,8,7
norm2 = 1/(torch.norm(emb2,p= 2,dim=2)+e) # K,8,8
norm1 = 1/(torch.norm(emb1,p= 2,dim=1)+e) # K,7,7
norm2 = torch.diag_embed(norm2) # K,15,15
norm1 = torch.diag_embed(norm1)
sim_mat = torch.bmm(torch.bmm(norm2, emb_mat), norm1) # K,8,7
sim_vec,_ = torch.max(sim_mat,2) # K,8
sim,_ = torch.min(sim_vec[:,wei2],1)
sim = sigma_word_bert(sim)
return sim.cpu().numpy()
def similarity_keyword_bleu(s1_list, s2, sta_vec, id2sen, emb_word, option, model = None):
e=1e-5
sims= []
for s1 in s1_list:
emb1=sen2mat(s1, id2sen, emb_word, option) # M*K
#wei2=normalize( np.array([-np.log(id2freq[x]) for x in s2 if x<=config.dict_size]))
emb2=sen2mat(s2, id2sen, emb_word, option) # N*k
wei2=np.array(sta_vec[:len(emb2)]).astype(np.float32) # N*1
#wei2=normalize(wei2)
emb_mat=np.dot(emb2,emb1.T) #N*M
norm1=np.diag(1/(np.linalg.norm(emb1,2,axis=1)+e)) # M*M
norm2=np.diag(1/(np.linalg.norm(emb2,2,axis=1)+e)) #N*N
sim_mat=np.dot(norm2,emb_mat).dot(norm1) #N*M
sim_vec=sim_mat.max(axis=1) #N
# debug
# print('sss',sim_vec)
# print(wei2)
# sim=min([x for x in list(sim_vec*wei2) if x>0]+[1])
sim=min([x for x,y in zip(list(sim_vec*wei2),list(wei2)) if y>0]+[1])
sim = sigma_word(sim)
sims.append(sim)
bleus = []
for s1 in s1_list:
actual_word_lists = [[id2sen(s2)]*len(s1_list)]
generated_word_lists = [id2sen(s1)]
bleu_score = get_corpus_bleu_scores(actual_word_lists, generated_word_lists)[3]
bleus.append(bleu_score)
# bleus = (1.0-sigmoid(np.minimum(bleus,0.9999)))
bleus = (1.0-np.minimum(bleus,0.99))
res = np.array(sims)*bleus
return res
def similarity_keyword_bert(s1_list, s2, sta_vec, id2sen, emb_word, option, model = None):
e=1e-5
sims= []
sourcesent = [' '.join(id2sen(s1)) for s1 in s1_list]
sourcesent2 = [' '.join(id2sen(s2))]
sourcesent = sourcesent+sourcesent2
emb = model.get_representation(sourcesent)
N_candidant = len(s1_list)
emb2 = emb[-1,:,:].unsqueeze(0).repeat(N_candidant,1,1) # K,15*d
emb1 = emb[:-1,:,:].permute(0,2,1) #K,d,15
wei2= torch.tensor([0]+sta_vec,dtype=torch.uint8)
emb_mat = torch.bmm(emb2,emb1) # K,15,15
norm2 = 1/(torch.norm(emb2,p= 2,dim=2)+e) # K,15
norm1 = 1/(torch.norm(emb1,p= 2,dim=1)+e) # K,15
norm2 = torch.diag_embed(norm2) # K,15,15
norm1 = torch.diag_embed(norm1)
sim_mat = torch.bmm(torch.bmm(norm2, emb_mat), norm1) # K,15,15
sim_vec,_ = torch.max(sim_mat,2) # K,15
sim,_ = torch.min(sim_vec[:,wei2],1)
sim = sigma_word_bert(sim)
return sim.cpu().numpy()
def similarity_batch_word(s1, s2, sta_vec, option):
return np.array([ similarity_word(x,s2,sta_vec, option) for x in s1 ])
def similarity_keyword_bert_bleu(s1_list, s2, sta_vec, id2sen, emb_word, option, model = None):
e=1e-5
sims= []
sourcesent = [' '.join(id2sen(s1)) for s1 in s1_list]
sourcesent2 = [' '.join(id2sen(s2))]
sourcesent = sourcesent+sourcesent2
emb = model.get_representation(sourcesent).numpy()
emb2 = emb[-1,:,:]
actual_word_lists = [[id2sen(s2)]]
bleus = []
for i,s1 in enumerate(s1_list):
emb1 = emb[i,:,:]
wei2=np.array([0]+sta_vec).astype(np.float32) # N*1
#wei2=normalize(wei2)
emb_mat=np.dot(emb2,emb1.T) #N*M
norm1=np.diag(1/(np.linalg.norm(emb1,2,axis=1)+e)) # M*M
norm2=np.diag(1/(np.linalg.norm(emb2,2,axis=1)+e)) #N*N
sim_mat=np.dot(norm2,emb_mat).dot(norm1) #N*M
sim_vec=sim_mat.max(axis=1) #N
# debug
# print('sss',sim_vec)
# print(wei2)
# sim=min([x for x in list(sim_vec*wei2) if x>0]+[1])
sim=min([x for x,y in zip(list(sim_vec*wei2),list(wei2)) if y>0]+[1])
sim = sigma_word1(sim)
sims.append(sim)
generated_word_lists = [id2sen(s1)]
bleu_score = get_corpus_bleu_scores(actual_word_lists, generated_word_lists)[3]
bleu_score = sigma_bleu(bleu_score)
bleus.append(bleu_score)
# bleus = (1.0-sigmoid(np.minimum(bleus,0.9999)))
res = np.array(sims)*np.array(bleus)
return res
def similarity_batch_word(s1, s2, sta_vec, option):
return np.array([ similarity_word(x,s2,sta_vec, option) for x in s1 ])
def cut_from_point(input, sequence_length, ind,option, mode=0):
batch_size=input.shape[0]
num_steps=input.shape[1]
input_forward=np.zeros([batch_size, num_steps])+option.dict_size+1
input_backward=np.zeros([batch_size, num_steps])+option.dict_size+1
sequence_length_forward=np.zeros([batch_size])
sequence_length_backward=np.zeros([batch_size])
for i in range(batch_size):
input_forward[i][0]=option.dict_size+2
input_backward[i][0]=option.dict_size+2
length=sequence_length[i]-1
for j in range(ind):
input_forward[i][j+1]=input[i][j+1]
sequence_length_forward[i]=ind+1
if mode==0:
for j in range(length-ind-1):
input_backward[i][j+1]=input[i][length-j]
sequence_length_backward[i]=length-ind
elif mode==1:
for j in range(length-ind):
input_backward[i][j+1]=input[i][length-j]
sequence_length_backward[i]=length-ind+1
return input_forward.astype(np.int32), input_backward.astype(np.int32), sequence_length_forward.astype(np.int32), sequence_length_backward.astype(np.int32)
def generate_candidate_input(input, sequence_length, ind, prob, search_size, option, mode=0):
input_new=np.array([input[0]]*search_size)
sequence_length_new=np.array([sequence_length[0]]*search_size)
length=sequence_length[0]-1
if mode!=2:
ind_token=np.argsort(prob[: option.dict_size])[-search_size:]
if mode==2:
for i in range(sequence_length[0]-ind-2):
input_new[: , ind+i+1]=input_new[: , ind+i+2]
for i in range(sequence_length[0]-1, option.num_steps-1):
input_new[: , i]=input_new[: , i]*0+option.dict_size+1
sequence_length_new=sequence_length_new-1
return input_new[:1], sequence_length_new[:1]
if mode==1:
for i in range(0, sequence_length_new[0]-1-ind):
input_new[: , sequence_length_new[0]-i]=input_new[: , sequence_length_new[0]-1-i]
sequence_length_new=sequence_length_new+1
for i in range(search_size):
input_new[i][ind+1]=ind_token[i]
return input_new.astype(np.int32), sequence_length_new.astype(np.int32)
def generate_candidate_input_batch(input, sequence_length, ind, prob, search_size, option, mode=0,\
calibrated_set=None):
# input, K,L; prob, K,vocab
input_new=np.array([[inp]*search_size for inp in input]) # K,100,L
sequence_length_new=np.array([[length]*search_size for length in sequence_length]) #K,100
length=sequence_length[0]-1
if mode!=2:
ind_token=np.argsort(prob[:,: option.dict_size],1)[-search_size:] #K,100
print(ind_token.shape)
if mode==2:
for k in range(len(input)):
for i in range(sequence_length[k]-ind-2):
input_new[k,: , ind+i+1]=input_new[k,: , ind+i+2]
for i in range(sequence_length[k]-1, option.num_steps-1):
input_new[k,: , i]=input_new[k,:, i]*0+option.dict_size+1
sequence_length_new=sequence_length_new-1
return input_new, sequence_length_new
if mode==1:
for k in range(len(input)):
for i in range(0, sequence_length_new[k]-1-ind):
input_new[: , sequence_length_new[k]-i]=input_new[: , sequence_length_new[k]-1-i]
sequence_length_new=sequence_length_new+1
for i in range(search_size):
input_new[:,i,ind+1]=ind_token[:,i]
return input_new.astype(np.int32), sequence_length_new.astype(np.int32)
def generate_candidate_input_calibrated(input, sequence_length, ind, prob, searching_size, option,\
mode=0, calibrated_set = None):
search_size = searching_size
if mode!=2:
if calibrated_set is None:
ind_token=np.argsort(prob[: option.dict_size])[-search_size:]
else:
search_size = searching_size+len(calibrated_set)
ind_token=np.argsort(prob[: option.dict_size])[-search_size:]
ind_token = np.concatenate([ind_token,np.array(input[0])],0)
input_new=np.array([input[0]]*search_size)
sequence_length_new=np.array([sequence_length[0]]*search_size)
length=sequence_length[0]-1
if mode==2:
print(input_new, ind)
for i in range(sequence_length[0]-ind-2):
input_new[: , ind+i+1]=input_new[: , ind+i+2]
for i in range(sequence_length[0]-1, option.num_steps-1):
input_new[: , i]=input_new[: , i]*0+option.dict_size+1
print(input_new, ind)
sequence_length_new=sequence_length_new-1
return input_new[:1], sequence_length_new[:1]
if mode==1:
for i in range(0, sequence_length_new[0]-1-ind):
input_new[: , sequence_length_new[0]-i]=input_new[: , sequence_length_new[0]-1-i]
sequence_length_new=sequence_length_new+1
for i in range(search_size):
input_new[i][ind+1]=ind_token[i]
return input_new.astype(np.int32), sequence_length_new.astype(np.int32)
def normalize(x, e=0.05):
tem = copy(x)
return tem/tem.sum()
def sample_from_candidate(prob_candidate):
return choose_action(normalize(prob_candidate))
def samplep(probs):
N = probs.shape[1]
M = probs.shape[0]
samples = []
for i in range(M):
a = np.random.choice(range(N), 1, replace=True, p=probs[i])
samples.append(a[0])
return np.array(samples)
def just_acc(option):
r=np.random.random()
if r<option.just_acc_rate:
return 0
else:
return 1
def getp(probabilities,input, lengths, option):
tems = []
for probs,inp, length in zip(probabilities,input,lengths):
tem = 1
for i in range(length-1):
tem*= probs[i][inp[i+1]]
tem*= probs[length-1][option.dict_size+1]
tems.append(tem)
return tems
class StrToBytes:
def __init__(self, fileobj):
self.fileobj = fileobj
def read(self, size):
return self.fileobj.read(size).encode()
def readline(self, size=-1):
return self.fileobj.readline(size).encode()
def data_type():
return tf.float32
class PTBModel(object):
#The language model.
def __init__(self, is_training, option,is_test_LM=False):
self._is_training = is_training
self.batch_size = option.batch_size
self.num_steps = option.num_steps
size = option.hidden_size
self.hidden_size = option.hidden_size
self.num_layers = option.num_layers
self.keep_prob = option.keep_prob
vocab_size = option.vocab_size
self._input=tf.placeholder(shape=[None, option.num_steps], dtype=tf.int32)
self._target=tf.placeholder(shape=[None, option.num_steps], dtype=tf.int32)
self._sequence_length=tf.placeholder(shape=[None], dtype=tf.int32)
with tf.device("/cpu:0"):
embedding = tf.get_variable("embedding", [vocab_size, size], dtype=data_type())
inputs = tf.nn.embedding_lookup(embedding, self._input)
softmax_w = tf.get_variable(
"softmax_w", [size, vocab_size], dtype=data_type())
softmax_b = tf.get_variable("softmax_b", [vocab_size], dtype=data_type())
if is_training and option.keep_prob < 1:
inputs = tf.nn.dropout(inputs, option.keep_prob)
output = self._build_rnn_graph(inputs, self._sequence_length, is_training)
output=tf.reshape(output, [-1, option.hidden_size])
logits = tf.nn.xw_plus_b(output, softmax_w, softmax_b)
# Reshape logits to be a 3-D tensor for sequence loss
logits = tf.reshape(logits, [-1, self.num_steps, vocab_size])
self._output_prob=tf.nn.softmax(logits)
# Use the contrib sequence loss and average over the batches
mask=tf.sequence_mask(lengths=self._sequence_length, maxlen=self.num_steps, dtype=data_type())
loss = tf.contrib.seq2seq.sequence_loss(
logits,
self._target,
mask,
average_across_timesteps=True,
average_across_batch=True)
# Update the cost
self._cost = loss
#self._lr = tf.Variable(0.0, trainable=False)
tvars = tf.trainable_variables()
grads, _ = tf.clip_by_global_norm(tf.gradients(self._cost, tvars),
option.max_grad_norm)
optimizer = tf.train.AdamOptimizer()
self._train_op = optimizer.apply_gradients(
zip(grads, tvars),
global_step=tf.train.get_or_create_global_step())
def _build_rnn_graph(self, inputs, sequence_length, is_training):
return self._build_rnn_graph_lstm(inputs, sequence_length, is_training)
def _get_lstm_cell(self, is_training):
return tf.contrib.rnn.BasicLSTMCell(
self.hidden_size, forget_bias=0.0, state_is_tuple=True,
reuse=not is_training)
def _build_rnn_graph_lstm(self, inputs, sequence_length, is_training):
"""Build the inference graph using canonical LSTM cells."""
# Slightly better results can be obtained with forget gate biases
# initialized to 1 but the hyperparameters of the model would need to be
# different than reported in the paper.
def make_cell():
cell = self._get_lstm_cell( is_training)
if is_training and self.keep_prob < 1:
cell = tf.contrib.rnn.DropoutWrapper(
cell, output_keep_prob=self.keep_prob)
return cell
cell = tf.contrib.rnn.MultiRNNCell(
[make_cell() for _ in range(self.num_layers)], state_is_tuple=True)
outputs, states=tf.nn.dynamic_rnn(cell=cell, inputs=inputs, sequence_length=sequence_length, dtype=data_type())
return outputs
def run_epoch(sess, model, input, sequence_length, target=None, mode='train'):
#Runs the model on the given data.
if mode=='train':
#train language model
_,cost = sess.run([model._train_op, model._cost], feed_dict={model._input: input, model._target:target, model._sequence_length:sequence_length})
return cost
elif mode=='test':
#test language model
cost = sess.run(model._cost, feed_dict={model._input: input, model._target:target, model._sequence_length:sequence_length})
return cost
else:
#use the language model to calculate sentence probability
output_prob = sess.run(model._output_prob, feed_dict={model._input: input, model._sequence_length:sequence_length})
return output_prob
def metropolisHasting(option, dataclass,forwardmodel, backwardmodel):
tfflag = True
if tfflag:
with tf.name_scope("forward_train"):
with tf.variable_scope("forward", reuse=None):
m_forward = PTBModel(is_training=True,option=option)
with tf.name_scope("forward_test"):
with tf.variable_scope("forward", reuse=True):
mtest_forward = PTBModel(is_training=False,option=option)
var=tf.trainable_variables()
var_forward=[x for x in var if x.name.startswith('forward')]
saver_forward=tf.train.Saver(var_forward, max_to_keep=1)
with tf.name_scope("backward_train"):
with tf.variable_scope("backward", reuse=None):
m_backward = PTBModel(is_training=True,option=option)
with tf.name_scope("backward_test"):
with tf.variable_scope("backward", reuse=True):
mtest_backward = PTBModel(is_training=False, option=option)
var=tf.trainable_variables()
var_backward=[x for x in var if x.name.startswith('backward')]
saver_backward=tf.train.Saver(var_backward, max_to_keep=1)
init = tf.global_variables_initializer()
session = tf.Session()
session.run(init)
saver_forward.restore(session, option.forward_save_path)
saver_backward.restore(session, option.backward_save_path)
similaritymodel = BertSimilarity()
similarity = similarity_keyword #similarity_semantic
fileobj = open(option.emb_path,'r')
emb_word,emb_id=pkl.load(StrToBytes(fileobj), encoding='latin1')
fileobj.close()
sim=option.sim
sta_vec=list(np.zeros([option.num_steps-1]))
use_data, sta_vec_list = read_data_use(option, dataclass.sen2id)
id2sen = dataclass.id2sen
generateset = []
for sen_id in range(use_data.length):
#generate for each sentence
sta_vec=sta_vec_list[sen_id%len(sta_vec)]
input, sequence_length, _=use_data(1, sen_id)
input_original=input[0]
for i in range(1,option.num_steps):
if input[0][i]>option.rare_since and input[0][i]<option.dict_size:
sta_vec[i-1]=1
pos=0
print(' '.join(id2sen(input[0])))
print(sta_vec)
for iter in range(option.sample_time):
#ind is the index of the selected word, regardless of the beginning token.
ind=pos%(sequence_length[0]-1)
action=choose_action(option.action_prob)
if action==0: # word replacement (action: 0)
if tfflag:
prob_old=run_epoch(session, mtest_forward, input, sequence_length,\
mode='use')[0]
else:
prob_old= output_p(input, forwardmodel) #15,K
tem=1
for j in range(sequence_length[0]-1):
tem*=prob_old[j][input[0][j+1]]
tem*=prob_old[j+1][option.dict_size+1]
prob_old_prob=tem
if sim!=None:
similarity_old=similarity(input, input_original, sta_vec, id2sen, emb_word,
option, similaritymodel)[0]
prob_old_prob*=similarity_old
else:
similarity_old=-1
input_forward, input_backward, sequence_length_forward, sequence_length_backward =\
cut_from_point(input, sequence_length, ind, option, mode=action)
if tfflag:
prob_forward=run_epoch(session, mtest_forward, input_forward, sequence_length_forward, mode='use')[0, ind%(sequence_length[0]-1),:]
prob_backward=run_epoch(session, mtest_backward, input_backward, sequence_length_backward, mode='use')[0, sequence_length[0]-1-ind%(sequence_length[0]-1),:]
else:
prob_forward = output_p(input_forward, forwardmodel)[ind%(sequence_length[0]-1),:]
prob_backward = output_p(input_backward,backwardmodel)[
sequence_length[0]-1-ind%(sequence_length[0]-1),:]
prob_mul=(prob_forward*prob_backward)
input_candidate, sequence_length_candidate=generate_candidate_input(input,\
sequence_length, ind, prob_mul, option.search_size, option, mode=action)
if tfflag:
prob_candidate_pre=run_epoch(session, mtest_forward, input_candidate,\
sequence_length_candidate,mode='use')
else:
prob_candidate_pre = output_p(input_candidate, forwardmodel) # 100,15,300003
prob_candidate=[]
for i in range(option.search_size):
tem=1
for j in range(sequence_length[0]-1):
tem*=prob_candidate_pre[i][j][input_candidate[i][j+1]]
tem*=prob_candidate_pre[i][j+1][option.dict_size+1]
prob_candidate.append(tem)
prob_candidate=np.array(prob_candidate)
if sim!=None:
similarity_candidate=similarity(input_candidate, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)
prob_candidate=prob_candidate*similarity_candidate
prob_candidate_norm=normalize(prob_candidate)
prob_candidate_ind=sample_from_candidate(prob_candidate_norm)
prob_candidate_prob=prob_candidate[prob_candidate_ind]
if input_candidate[prob_candidate_ind][ind+1]<option.dict_size and\
(prob_candidate_prob>prob_old_prob*option.threshold or just_acc(option)==0):
input1=input_candidate[prob_candidate_ind:prob_candidate_ind+1]
if np.sum(input1[0])==np.sum(input[0]):
pass
else:
input= input1
print(' '.join(id2sen(input[0])))
elif action==1: # word insert
if sequence_length[0]>=option.num_steps:
pos += 1
break
input_forward, input_backward, sequence_length_forward, sequence_length_backward =\
cut_from_point(input, sequence_length, ind, option, mode=action)
if tfflag:
prob_forward=run_epoch(session, mtest_forward, input_forward, sequence_length_forward, mode='use')[0, ind%(sequence_length[0]-1),:]
prob_backward=run_epoch(session, mtest_backward, input_backward, sequence_length_backward, mode='use')[0, sequence_length[0]-1-ind%(sequence_length[0]-1),:]
else:
prob_forward = output_p(input_forward, forwardmodel)[ind%(sequence_length[0]-1),:]
prob_backward = output_p(input_backward,backwardmodel)[
sequence_length[0]-1-ind%(sequence_length[0]-1),:]
prob_mul=(prob_forward*prob_backward)
input_candidate, sequence_length_candidate=generate_candidate_input(input,\
sequence_length, ind, prob_mul, option.search_size, option, mode=action)
if tfflag:
prob_candidate_pre=run_epoch(session, mtest_forward, input_candidate,\
sequence_length_candidate,mode='use')
else:
prob_candidate_pre = output_p(input_candidate, forwardmodel) # 100,15,300003
prob_candidate=[]
for i in range(option.search_size):
tem=1
for j in range(sequence_length_candidate[0]-1):
tem*=prob_candidate_pre[i][j][input_candidate[i][j+1]]
tem*=prob_candidate_pre[i][j+1][option.dict_size+1]
prob_candidate.append(tem)
prob_candidate=np.array(prob_candidate)
if sim!=None:
similarity_candidate=similarity(input_candidate, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)
prob_candidate=prob_candidate*similarity_candidate
prob_candidate_norm=normalize(prob_candidate)
prob_candidate_ind=sample_from_candidate(prob_candidate_norm)
prob_candidate_prob=prob_candidate[prob_candidate_ind]
if tfflag:
prob_old=run_epoch(session, mtest_forward, input,\
sequence_length,mode='use')[0]
else:
prob_old = output_p(input, forwardmodel) # 100,15,300003
tem=1
for j in range(sequence_length[0]-1):
tem*=prob_old[j][input[0][j+1]]
tem*=prob_old[j+1][option.dict_size+1]
prob_old_prob=tem
if sim!=None:
similarity_old=similarity(input, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)[0]
prob_old_prob=prob_old_prob*similarity_old
else:
similarity_old=-1
#alpha is acceptance ratio of current proposal
alpha=min(1, prob_candidate_prob*option.action_prob[2]/(prob_old_prob*option.action_prob[1]*prob_candidate_norm[prob_candidate_ind]))
if choose_action([alpha, 1-alpha])==0 and \
input_candidate[prob_candidate_ind][ind]<option.dict_size and \
(prob_candidate_prob>prob_old_prob* option.threshold or just_acc(option)==0):
input=input_candidate[prob_candidate_ind:prob_candidate_ind+1]
sequence_length+=1
pos+=1
sta_vec.insert(ind, 0.0)
del(sta_vec[-1])
print(' '.join(id2sen(input[0])))
elif action==2: # word delete
if sequence_length[0]<=2:
pos += 1
break
if tfflag:
prob_old=run_epoch(session, mtest_forward, input, sequence_length,\
mode='use')[0]
else:
prob_old= output_p(input, forwardmodel) #15,K
tem=1
for j in range(sequence_length[0]-1):
tem*=prob_old[j][input[0][j+1]]
tem*=prob_old[j+1][option.dict_size+1]
prob_old_prob=tem
if sim!=None:
similarity_old=similarity(input, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)[0]
prob_old_prob=prob_old_prob*similarity_old
else:
similarity_old=-1
input_candidate, sequence_length_candidate=generate_candidate_input(input,\
sequence_length, ind, None, option.search_size, option, mode=action)
# delete sentence
if tfflag:
prob_new=run_epoch(session, mtest_forward, input_candidate,\
sequence_length_candidate,mode='use')[0]
else:
prob_new = output_p(input_candidate, forwardmodel)
tem=1
for j in range(sequence_length_candidate[0]-1):
tem*=prob_new[j][input_candidate[0][j+1]]
tem*=prob_new[j+1][option.dict_size+1]
prob_new_prob=tem
if sim!=None:
similarity_candidate=similarity(input_candidate, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)[0]
prob_new_prob=prob_new_prob*similarity_candidate
# original sentence
input_forward, input_backward, sequence_length_forward, sequence_length_backward =\
cut_from_point(input, sequence_length, ind, option, mode=0)
if tfflag:
prob_forward=run_epoch(session, mtest_forward, input_forward, sequence_length_forward, mode='use')[0, ind%(sequence_length[0]-1),:]
prob_backward=run_epoch(session, mtest_backward, input_backward, sequence_length_backward, mode='use')[0, sequence_length[0]-1-ind%(sequence_length[0]-1),:]
else:
prob_forward = output_p(input_forward, forwardmodel)[ind%(sequence_length[0]-1),:]
prob_backward = output_p(input_backward,backwardmodel)[
sequence_length[0]-1-ind%(sequence_length[0]-1),:]
prob_mul=(prob_forward*prob_backward)
input_candidate, sequence_length_candidate=generate_candidate_input(input,\
sequence_length, ind, prob_mul, option.search_size, option, mode=0)
if tfflag:
prob_candidate_pre=run_epoch(session, mtest_forward, input_candidate,\
sequence_length_candidate,mode='use')
else:
prob_candidate_pre = output_p(input_candidate, forwardmodel) # 100,15,300003
prob_candidate=[]
for i in range(option.search_size):
tem=1
for j in range(sequence_length[0]-1):
tem*=prob_candidate_pre[i][j][input_candidate[i][j+1]]
tem*=prob_candidate_pre[i][j+1][option.dict_size+1]
prob_candidate.append(tem)
prob_candidate=np.array(prob_candidate)
if sim!=None:
similarity_candidate=similarity(input_candidate, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)[0]
prob_candidate=prob_candidate*similarity_candidate
prob_candidate_norm=normalize(prob_candidate)
#alpha is acceptance ratio of current proposal
if input[0] in input_candidate:
for candidate_ind in range(len(input_candidate)):
if input[0] in input_candidate[candidate_ind: candidate_ind+1]:
break
pass
alpha=min(prob_candidate_norm[candidate_ind]*prob_new_prob*option.action_prob[1]/(option.action_prob[2]*prob_old_prob), 1)
else:
alpha=0
if choose_action([alpha, 1-alpha])==0 and (prob_new_prob> prob_old_prob*option.threshold or just_acc(option)==0):
input=np.concatenate([input[:,:ind+1], input[:,ind+2:], input[:,:1]*0+option.dict_size+1], axis=1)
sequence_length-=1
del(sta_vec[ind])
sta_vec.append(0)
pos -= 1
print(' '.join(id2sen(input[0])))
pos += 1
generateset.append(id2sen(input[0]))
return generateset
def simulatedAnnealing_bat(option, dataclass,forwardmodel, backwardmodel, sim_mode = 'keyword'):
tfflag = True
print('xxxxxxxxxx')
if tfflag:
with tf.name_scope("forward_train"):
with tf.variable_scope("forward", reuse=None):
m_forward = PTBModel(is_training=True,option=option)
print('xxxxxxxxxx')
with tf.name_scope("forward_test"):
with tf.variable_scope("forward", reuse=True):
mtest_forward = PTBModel(is_training=False,option=option)
var=tf.trainable_variables()
var_forward=[x for x in var if x.name.startswith('forward')]
saver_forward=tf.train.Saver(var_forward, max_to_keep=1)
print('xxxxxxxxxx')
with tf.name_scope("backward_train"):
with tf.variable_scope("backward", reuse=None):
m_backward = PTBModel(is_training=True,option=option)
with tf.name_scope("backward_test"):
with tf.variable_scope("backward", reuse=True):
mtest_backward = PTBModel(is_training=False, option=option)
var=tf.trainable_variables()
var_backward=[x for x in var if x.name.startswith('backward')]
saver_backward=tf.train.Saver(var_backward, max_to_keep=1)
print('xxxxxxxxxx')
init = tf.global_variables_initializer()
session = tf.Session()
session.run()
# saver_forward.restore(session, option.forward_save_path)
# saver_backward.restore(session, option.backward_save_path)
print('xxxxxxxxxx')
generate_candidate = generate_candidate_input_batch
similaritymodel = None
if sim_mode == 'keyword':
similarity = similarity_keyword_batch
elif sim_mode =='keyword-bleu':
similarity = similarity_keyword_bleu
elif sim_mode =='keyword-bert':
similaritymodel = BertEncoding()
similarity = similarity_keyword_bert
elif sim_mode =='keyword-bert-bleu':
similaritymodel = BertEncoding()
similarity = similarity_keyword_bert_bleu
elif sim_mode =='semantic':
similaritymodel = BertSimilarity()
similarity = similarity_semantic
elif sim_mode =='semantic-bleu':
similaritymodel = BertSimilarity()
similarity = similarity_semantic_bleu
elif sim_mode =='semantic-keyword':
similaritymodel = BertSimilarity()
similarity = similarity_semantic_keyword
fileobj = open(option.emb_path,'r')
emb_word,emb_id=pkl.load(StrToBytes(fileobj), encoding='latin1')
fileobj.close()
sim=option.sim
sta_vec=list(np.zeros([option.num_steps-1]))
use_data, sta_vec_list = read_data_use(option, dataclass.sen2id)
id2sen = dataclass.id2sen
generateset = []
C = 0.05
batch_size = 20
temperatures = C*(1.0/100)*np.array(list(range(option.sample_time+1,1,-1)))
print(temperatures, use_data.length)
print(use_data.length/batch_size)
for sen_id in range(int(use_data.length/batch_size)):
sta_vec=sta_vec_list[sen_id*batch_size:sen_id*batch_size+batch_size]
input, sequence_length, _=use_data(batch_size, sen_id)
input_original=input
N_input = len(input)
sta_vec_original = [x for x in sta_vec]
pos=0
for sta, sent in zip( sta_vec, input):
print(' '.join(id2sen(sent)))
print(sta)
calibrated_set = [x for x in input[0]]
for iter in range(option.sample_time):
temperature = temperatures[iter]
ind=pos%(np.max(sequence_length))
action=choose_action(option.action_prob)
action = 0
calibrated_set = list(set(calibrated_set))
if action==0: # word replacement (action: 0)
prob_old=run_epoch(session, mtest_forward, input, sequence_length,\
mode='use') # K,L,Vocab
prob_old_prob = getp(prob_old,input, sequence_length, option) # K,
input_ = [[x] for x in input]
similarity_old=similarity(input_, input_original, sta_vec, id2sen, emb_word,
option, similaritymodel) #K,
V_old = prob_old_prob*np.concatenate(similarity_old,0)
input_forward, input_backward, sequence_length_forward, sequence_length_backward =\
cut_from_point(input, sequence_length, ind, option, mode=action)
prob_forward=run_epoch(session, mtest_forward, input_forward,\
sequence_length_forward, mode='use')[:, ind%(sequence_length[0]-1),:]
prob_backward=run_epoch(session, mtest_backward, input_backward,\
sequence_length_backward, mode='use')[:, sequence_length[0]-1-ind%(sequence_length[0]-1),:]
prob_mul=(prob_forward*prob_backward) #K,vocab
input_candidate, sequence_length_candidate=generate_candidate(input,\
sequence_length, ind, prob_mul, option.search_size, option, mode=action,\
calibrated_set=calibrated_set) # K,100,15
input_candidate_flat = input_candidate.reshape(-1,option.num_steps)
sequence_length_candidate_flat = sequence_length_candidate.reshape(-1)
prob_candidate_pre=run_epoch(session, mtest_forward, input_candidate_flat,\
sequence_length_candidate_flat, mode='use') #K*100,15,vocab
prob_candidate = getp(prob_candidate_pre,
input_candidate_flat,sequence_length_candidate_flat, option) # K*100
prob_candidate = np.array(prob_candidate).reshape(N_input,-1) # K,100
similarity_candidate=similarity(input_candidate, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel) # K,100
similarity_candidate = np.concatenate(similarity_candidate,0).reshape(N_input,-1)
prob_candidate=prob_candidate*similarity_candidate # K,100
prob_candidate_norm= prob_candidate/prob_candidate.sum(1,keepdims=True)
prob_candidate_ind=samplep(prob_candidate_norm)
prob_candidate_prob= torch.gather(torch.tensor(prob_candidate),1,\
torch.tensor(prob_candidate_ind,dtype=torch.long).view(N_input,1)) # 5,1
prob_candidate_prob = prob_candidate_prob.squeeze().numpy()
V_new = np.log(np.maximum(np.power(prob_candidate_prob,1.0/sequence_length),1e-200))
V_old = np.log(np.maximum(np.power(prob_old_prob, 1.0/sequence_length),1e-200))
alphat = np.minimum(1,np.exp(np.minimum((V_new-V_old)/temperature,100)))
for i,inp in enumerate(input):
alpha = alphat[i]
chooseind = prob_candidate_ind[i]
if choose_action([alpha, 1-alpha])==0:
input1=input_candidate[i][chooseind]
if np.sum(input1)==np.sum(inp):
pass
else:
input[i] = input1
# calibrated_set.append(input[i][ind])
print('Temperature:{:3.3f}: '.format(temperature)+' '.join(id2sen(input[i])))
elif action==1: # word insert
if sequence_length[0]>=option.num_steps:
pos += 1
continue
# break
input_forward, input_backward, sequence_length_forward, sequence_length_backward =\
cut_from_point(input, sequence_length, ind, option, mode=action)
if tfflag:
prob_forward=run_epoch(session, mtest_forward, input_forward, sequence_length_forward, mode='use')[0, ind%(sequence_length[0]-1),:]
prob_backward=run_epoch(session, mtest_backward, input_backward, sequence_length_backward, mode='use')[0, sequence_length[0]-1-ind%(sequence_length[0]-1),:]
else:
prob_forward = output_p(input_forward, forwardmodel)[ind%(sequence_length[0]-1),:]
prob_backward = output_p(input_backward,backwardmodel)[
sequence_length[0]-1-ind%(sequence_length[0]-1),:]
prob_mul=(prob_forward*prob_backward)
input_candidate, sequence_length_candidate=generate_candidate_input_calibrated(input,\
sequence_length, ind, prob_mul, option.search_size, option, mode=action,\
calibrated_set=calibrated_set)
if tfflag:
prob_candidate_pre=run_epoch(session, mtest_forward, input_candidate,\
sequence_length_candidate,mode='use')
else:
prob_candidate_pre = output_p(input_candidate, forwardmodel) # 100,15,300003
prob_candidate=[]
#for i in range(option.search_size):
for i in range(len(input_candidate)):
tem=1
for j in range(sequence_length_candidate[0]-1):
tem*=prob_candidate_pre[i][j][input_candidate[i][j+1]]
tem*=prob_candidate_pre[i][j+1][option.dict_size+1]
prob_candidate.append(tem)
prob_candidate=np.array(prob_candidate)
if sim!=None:
similarity_candidate=similarity(input_candidate, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)
prob_candidate=prob_candidate*similarity_candidate
prob_candidate_norm=normalize(prob_candidate)
prob_candidate_ind=sample_from_candidate(prob_candidate_norm)
prob_candidate_prob=prob_candidate[prob_candidate_ind]
similarity_new = similarity_candidate[prob_candidate_ind]
if tfflag:
prob_old=run_epoch(session, mtest_forward, input,\
sequence_length,mode='use')[0]
else:
prob_old = output_p(input, forwardmodel) # 100,15,300003
tem=1
for j in range(sequence_length[0]-1):
tem*=prob_old[j][input[0][j+1]]
tem*=prob_old[j+1][option.dict_size+1]
prob_old_prob=tem
if sim!=None:
similarity_old=similarity(input, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)[0]
prob_old_prob=prob_old_prob*similarity_old
else:
similarity_old=-1
V_new = math.log(max(np.power(prob_candidate_prob,1.0/sequence_length_candidate[0]),1e-200))
V_old = math.log(max(np.power(prob_old_prob, 1.0/sequence_length),1e-200))
alphat = min(1,math.exp(min((V_new-V_old)/temperature,200)))
if choose_action([alphat, 1-alphat])==0 and input_candidate[prob_candidate_ind][ind]<option.dict_size:
input=input_candidate[prob_candidate_ind:prob_candidate_ind+1]
sequence_length+=1
pos+=1
# sta_vec.insert(ind, 0.0)
# del(sta_vec[-1])
print('ind, action,oldprob,vold, vnew, alpha,simold, simnew', ind, action,prob_old_prob,V_old,\
V_new,alphat,similarity_old,similarity_new)
print('Temperature:{:3.3f}: '.format(temperature)+' '.join(id2sen(input[0])))
elif action==2: # word delete
if sequence_length[0]<=2 or ind==0:
pos += 1
continue
if tfflag:
prob_old=run_epoch(session, mtest_forward, input, sequence_length,\
mode='use')[0]
else:
prob_old= output_p(input, forwardmodel) #15,K
tem=1
for j in range(sequence_length[0]-1):
tem*=prob_old[j][input[0][j+1]]
tem*=prob_old[j+1][option.dict_size+1]
prob_old_prob=tem
if sim!=None:
similarity_old=similarity(input, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)[0]
prob_old_prob=prob_old_prob*similarity_old
else:
similarity_old=-1
input_candidate, sequence_length_candidate=generate_candidate_input_calibrated(input,\
sequence_length, ind, None, option.search_size, option,\
mode=action,calibrated_set=calibrated_set)
# delete sentence
if tfflag:
prob_new=run_epoch(session, mtest_forward, input_candidate,\
sequence_length_candidate,mode='use')[0]
else:
prob_new = output_p(input_candidate, forwardmodel)
tem=1
for j in range(sequence_length_candidate[0]-1):
tem*=prob_new[j][input_candidate[0][j+1]]
tem*=prob_new[j+1][option.dict_size+1]
prob_new_prob=tem
if sim!=None:
similarity_candidate=similarity(input_candidate, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)[0]
prob_new_prob=prob_new_prob*similarity_candidate
#alpha is acceptance ratio of current proposal
if input[0] in input_candidate:
for candidate_ind in range(len(input_candidate)):
if input[0] in input_candidate[candidate_ind: candidate_ind+1]:
break
pass
V_new = math.log(max(np.power(prob_new_prob,1.0/sequence_length_candidate[0]),1e-200))
V_old = math.log(max(np.power(prob_old_prob, 1.0/sequence_length),1e-200))
alphat = min(1,math.exp((V_new-V_old)/temperature))
else:
alphat=0
if choose_action([alphat, 1-alphat])==0:
calibrated_set.append(input[0][ind])
input=np.concatenate([input[:,:ind+1], input[:,ind+2:], input[:,:1]*0+option.dict_size+1], axis=1)
sequence_length-=1
# del(sta_vec[ind])
# sta_vec.append(0)
pos -= 1
print('oldprob,vold, vnew, alpha,simold, simnew',prob_old_prob,V_old,\
V_new,alphat,similarity_old,similarity_candidate)
print('Temperature:{:3.3f}: '.format(temperature)+' '.join(id2sen(input[0])))
pos += 1
generateset.append(id2sen(input[0]))
appendtext(id2sen(input[0]), option.save_path)
return generateset
def simulatedAnnealing(option, dataclass,forwardmodel, backwardmodel, sim_mode = 'keyword'):
tfflag = True
with tf.name_scope("forward_train"):
with tf.variable_scope("forward", reuse=None):
m_forward = PTBModel(is_training=True, option=option)
with tf.name_scope("forward_test"):
with tf.variable_scope("forward", reuse=True):
mtest_forward = PTBModel(is_training=False, option=option)
var=tf.trainable_variables()
var_forward=[x for x in var if x.name.startswith('forward')]
saver_forward=tf.train.Saver(var_forward, max_to_keep=1)
with tf.name_scope("backward_train"):
with tf.variable_scope("backward", reuse=None):
m_backward = PTBModel(is_training=True, option=option)
with tf.name_scope("backward_test"):
with tf.variable_scope("backward", reuse=True):
mtest_backward = PTBModel(is_training=False,option=option)
var=tf.trainable_variables()
var_backward=[x for x in var if x.name.startswith('backward')]
saver_backward=tf.train.Saver(var_backward, max_to_keep=1)
print('line1295-------------------')
init = tf.global_variables_initializer()
with tf.Session() as session:
session.run(init)
saver_forward.restore(session, option.forward_save_path)
saver_backward.restore(session, option.backward_save_path)
print('line1295-------------------')
# if tfflag:
# with tf.name_scope("forward_train"):
# with tf.variable_scope("forward", reuse=None):
# m_forward = PTBModel(is_training=True,option=option)
# with tf.name_scope("forward_test"):
# with tf.variable_scope("forward", reuse=True):
# mtest_forward = PTBModel(is_training=False,option=option)
# var=tf.trainable_variables()
# var_forward=[x for x in var if x.name.startswith('forward')]
# saver_forward=tf.train.Saver(var_forward, max_to_keep=1)
# with tf.name_scope("backward_train"):
# with tf.variable_scope("backward", reuse=None):
# m_backward = PTBModel(is_training=True,option=option)
# with tf.name_scope("backward_test"):
# with tf.variable_scope("backward", reuse=True):
# mtest_backward = PTBModel(is_training=False, option=option)
# var=tf.trainable_variables()
# var_backward=[x for x in var if x.name.startswith('backward')]
# saver_backward=tf.train.Saver(var_backward, max_to_keep=1)
# init = tf.global_variables_initializer()
# session = tf.Session()
# session.run(init)
# saver_forward.restore(session, option.forward_save_path)
# saver_backward.restore(session, option.backward_save_path)
similaritymodel = None
if sim_mode == 'keyword':
similarity = similarity_keyword
elif sim_mode =='keyword-bleu':
similarity = similarity_keyword_bleu
elif sim_mode =='keyword-bert':
similaritymodel = BertEncoding()
similarity = similarity_keyword_bert
elif sim_mode =='keyword-bert-bleu':
similaritymodel = BertEncoding()
similarity = similarity_keyword_bert_bleu
elif sim_mode =='semantic':
similaritymodel = BertSimilarity()
similarity = similarity_semantic
elif sim_mode =='semantic-bleu':
similaritymodel = BertSimilarity()
similarity = similarity_semantic_bleu
elif sim_mode =='semantic-keyword':
similaritymodel = BertSimilarity()
similarity = similarity_semantic_keyword
fileobj = open(option.emb_path,'r')
emb_word,emb_id=pkl.load(StrToBytes(fileobj), encoding='latin1')
fileobj.close()
sim=option.sim
sta_vec=list(np.zeros([option.num_steps-1]))
use_data, sta_vec_list = read_data_use(option, dataclass.sen2id)
id2sen = dataclass.id2sen
generateset = []
C = 0.05
temperatures = C*(1.0/100)*np.array(list(range(option.sample_time+1,1,-1)))
print(temperatures)
for sen_id in range(use_data.length):
sta_vec=sta_vec_list[sen_id]
input, sequence_length, _=use_data(1, sen_id)
input_original=input[0]
sta_vec_original = [x for x in sta_vec]
# for i in range(1,option.num_steps):
# if input[0][i]>option.rare_since and input[0][i]<option.dict_size:
# sta_vec[i-1]=1
pos=0
print(' '.join(id2sen(input[0])))
print(sta_vec)
calibrated_set = [x for x in input[0]]
for iter in range(option.sample_time):
temperature = temperatures[iter]
ind=pos%(sequence_length[0]-1)
action=choose_action(option.action_prob)
calibrated_set = list(set(calibrated_set))
if action==0: # word replacement (action: 0)
if tfflag:
prob_old=run_epoch(session, mtest_forward, input, sequence_length,\
mode='use')[0]
else:
prob_old= output_p(input, forwardmodel) #15,K
tem=1
for j in range(sequence_length[0]-1):
tem*=prob_old[j][input[0][j+1]]
tem*=prob_old[j+1][option.dict_size+1]
prob_old_prob=tem
if sim!=None:
similarity_old=similarity(input, input_original, sta_vec, id2sen, emb_word,
option, similaritymodel)[0]
prob_old_prob*=similarity_old
else:
similarity_old=-1
input_forward, input_backward, sequence_length_forward, sequence_length_backward =\
cut_from_point(input, sequence_length, ind, option, mode=action)
if tfflag:
prob_forward=run_epoch(session, mtest_forward, input_forward, sequence_length_forward, mode='use')[0, ind%(sequence_length[0]-1),:]
prob_backward=run_epoch(session, mtest_backward, input_backward, sequence_length_backward, mode='use')[0, sequence_length[0]-1-ind%(sequence_length[0]-1),:]
else:
prob_forward = output_p(input_forward, forwardmodel)[ind%(sequence_length[0]-1),:]
prob_backward = output_p(input_backward,backwardmodel)[
sequence_length[0]-1-ind%(sequence_length[0]-1),:]
prob_mul=(prob_forward*prob_backward)
input_candidate, sequence_length_candidate=generate_candidate_input_calibrated(input,\
sequence_length, ind, prob_mul, option.search_size, option, mode=action,\
calibrated_set=calibrated_set)
if tfflag:
prob_candidate_pre=run_epoch(session, mtest_forward, input_candidate,\
sequence_length_candidate,mode='use')
else:
prob_candidate_pre = output_p(input_candidate, forwardmodel) # 100,15,300003
prob_candidate=[]
for i in range(len(input_candidate)):
tem=1
for j in range(sequence_length[0]-1):
tem*=prob_candidate_pre[i][j][input_candidate[i][j+1]]
tem*=prob_candidate_pre[i][j+1][option.dict_size+1]
prob_candidate.append(tem)
prob_candidate=np.array(prob_candidate)
if sim!=None:
similarity_candidate=similarity(input_candidate, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)
prob_candidate=prob_candidate*similarity_candidate
prob_candidate_norm=normalize(prob_candidate)
prob_candidate_ind=sample_from_candidate(prob_candidate_norm)
prob_candidate_prob=prob_candidate[prob_candidate_ind]
V_new = math.log(max(np.power(prob_candidate_prob,1.0/sequence_length),1e-200))
V_old = math.log(max(np.power(prob_old_prob, 1.0/sequence_length),1e-200))
alphat = min(1,math.exp(min((V_new-V_old)/temperature,100)))
if choose_action([alphat, 1-alphat])==0 and input_candidate[prob_candidate_ind][ind]<option.dict_size:
input1=input_candidate[prob_candidate_ind:prob_candidate_ind+1]
if np.sum(input1[0])==np.sum(input[0]):
pass
else:
calibrated_set.append(input[0][ind])
input= input1
print('ind, action,oldprob,vold, vnew, alpha,simold, simnew', ind, action,prob_old_prob,V_old,\
V_new,alphat,similarity_old,similarity_candidate[prob_candidate_ind])
print('Temperature:{:3.3f}: '.format(temperature)+' '.join(id2sen(input[0])))
elif action==1: # word insert
if sequence_length[0]>=option.num_steps:
pos += 1
continue
# break
input_forward, input_backward, sequence_length_forward, sequence_length_backward =\
cut_from_point(input, sequence_length, ind, option, mode=action)
if tfflag:
prob_forward=run_epoch(session, mtest_forward, input_forward, sequence_length_forward, mode='use')[0, ind%(sequence_length[0]-1),:]
prob_backward=run_epoch(session, mtest_backward, input_backward, sequence_length_backward, mode='use')[0, sequence_length[0]-1-ind%(sequence_length[0]-1),:]
else:
prob_forward = output_p(input_forward, forwardmodel)[ind%(sequence_length[0]-1),:]
prob_backward = output_p(input_backward,backwardmodel)[
sequence_length[0]-1-ind%(sequence_length[0]-1),:]
prob_mul=(prob_forward*prob_backward)
input_candidate, sequence_length_candidate=generate_candidate_input_calibrated(input,\
sequence_length, ind, prob_mul, option.search_size, option, mode=action,\
calibrated_set=calibrated_set)
if tfflag:
prob_candidate_pre=run_epoch(session, mtest_forward, input_candidate,\
sequence_length_candidate,mode='use')
else:
prob_candidate_pre = output_p(input_candidate, forwardmodel) # 100,15,300003
prob_candidate=[]
#for i in range(option.search_size):
for i in range(len(input_candidate)):
tem=1
for j in range(sequence_length_candidate[0]-1):
tem*=prob_candidate_pre[i][j][input_candidate[i][j+1]]
tem*=prob_candidate_pre[i][j+1][option.dict_size+1]
prob_candidate.append(tem)
prob_candidate=np.array(prob_candidate)
if sim!=None:
similarity_candidate=similarity(input_candidate, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)
prob_candidate=prob_candidate*similarity_candidate
prob_candidate_norm=normalize(prob_candidate)
prob_candidate_ind=sample_from_candidate(prob_candidate_norm)
prob_candidate_prob=prob_candidate[prob_candidate_ind]
similarity_new = similarity_candidate[prob_candidate_ind]
if tfflag:
prob_old=run_epoch(session, mtest_forward, input,\
sequence_length,mode='use')[0]
else:
prob_old = output_p(input, forwardmodel) # 100,15,300003
tem=1
for j in range(sequence_length[0]-1):
tem*=prob_old[j][input[0][j+1]]
tem*=prob_old[j+1][option.dict_size+1]
prob_old_prob=tem
if sim!=None:
similarity_old=similarity(input, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)[0]
prob_old_prob=prob_old_prob*similarity_old
else:
similarity_old=-1
V_new = math.log(max(np.power(prob_candidate_prob,1.0/sequence_length_candidate[0]),1e-200))
V_old = math.log(max(np.power(prob_old_prob, 1.0/sequence_length),1e-200))
alphat = min(1,math.exp(min((V_new-V_old)/temperature,200)))
if choose_action([alphat, 1-alphat])==0 and input_candidate[prob_candidate_ind][ind]<option.dict_size:
input=input_candidate[prob_candidate_ind:prob_candidate_ind+1]
sequence_length+=1
pos+=1
# sta_vec.insert(ind, 0.0)
# del(sta_vec[-1])
print('ind, action,oldprob,vold, vnew, alpha,simold, simnew', ind, action,prob_old_prob,V_old,\
V_new,alphat,similarity_old,similarity_new)
print('Temperature:{:3.3f}: '.format(temperature)+' '.join(id2sen(input[0])))
elif action==2: # word delete
if sequence_length[0]<=2 or ind==0:
pos += 1
continue
if tfflag:
prob_old=run_epoch(session, mtest_forward, input, sequence_length,\
mode='use')[0]
else:
prob_old= output_p(input, forwardmodel) #15,K
tem=1
for j in range(sequence_length[0]-1):
tem*=prob_old[j][input[0][j+1]]
tem*=prob_old[j+1][option.dict_size+1]
prob_old_prob=tem
if sim!=None:
similarity_old=similarity(input, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)[0]
prob_old_prob=prob_old_prob*similarity_old
else:
similarity_old=-1
input_candidate, sequence_length_candidate=generate_candidate_input_calibrated(input,\
sequence_length, ind, None, option.search_size, option,\
mode=action,calibrated_set=calibrated_set)
# delete sentence
if tfflag:
prob_new=run_epoch(session, mtest_forward, input_candidate,\
sequence_length_candidate,mode='use')[0]
else:
prob_new = output_p(input_candidate, forwardmodel)
tem=1
for j in range(sequence_length_candidate[0]-1):
tem*=prob_new[j][input_candidate[0][j+1]]
tem*=prob_new[j+1][option.dict_size+1]
prob_new_prob=tem
if sim!=None:
similarity_candidate=similarity(input_candidate, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)[0]
prob_new_prob=prob_new_prob*similarity_candidate
#alpha is acceptance ratio of current proposal
if input[0] in input_candidate:
for candidate_ind in range(len(input_candidate)):
if input[0] in input_candidate[candidate_ind: candidate_ind+1]:
break
pass
V_new = math.log(max(np.power(prob_new_prob,1.0/sequence_length_candidate[0]),1e-200))
V_old = math.log(max(np.power(prob_old_prob, 1.0/sequence_length),1e-200))
alphat = min(1,math.exp((V_new-V_old)/temperature))
else:
alphat=0
if choose_action([alphat, 1-alphat])==0:
calibrated_set.append(input[0][ind])
input=np.concatenate([input[:,:ind+1], input[:,ind+2:], input[:,:1]*0+option.dict_size+1], axis=1)
sequence_length-=1
# del(sta_vec[ind])
# sta_vec.append(0)
pos -= 1
print('oldprob,vold, vnew, alpha,simold, simnew',prob_old_prob,V_old,\
V_new,alphat,similarity_old,similarity_candidate)
print('Temperature:{:3.3f}: '.format(temperature)+' '.join(id2sen(input[0])))
pos += 1
generateset.append(id2sen(input[0]))
appendtext(id2sen(input[0]), option.save_path)
return generateset
def simulatedAnnealing_std(option, dataclass,forwardmodel, backwardmodel, sim_mode = 'keyword'):
tfflag = True
if tfflag:
with tf.name_scope("forward_train"):
with tf.variable_scope("forward", reuse=None):
m_forward = PTBModel(is_training=True,option=option)
with tf.name_scope("forward_test"):
with tf.variable_scope("forward", reuse=True):
mtest_forward = PTBModel(is_training=False,option=option)
var=tf.trainable_variables()
var_forward=[x for x in var if x.name.startswith('forward')]
saver_forward=tf.train.Saver(var_forward, max_to_keep=1)
with tf.name_scope("backward_train"):
with tf.variable_scope("backward", reuse=None):
m_backward = PTBModel(is_training=True,option=option)
with tf.name_scope("backward_test"):
with tf.variable_scope("backward", reuse=True):
mtest_backward = PTBModel(is_training=False, option=option)
var=tf.trainable_variables()
var_backward=[x for x in var if x.name.startswith('backward')]
saver_backward=tf.train.Saver(var_backward, max_to_keep=1)
init = tf.global_variables_initializer()
session = tf.Session()
session.run(init)
saver_forward.restore(session, option.forward_save_path)
saver_backward.restore(session, option.backward_save_path)
similaritymodel = None
if sim_mode == 'keyword':
similarity = similarity_keyword
elif sim_mode =='keyword-bleu':
similarity = similarity_keyword_bleu
elif sim_mode =='keyword-bert':
similaritymodel = BertEncoding()
similarity = similarity_keyword_bert
elif sim_mode =='keyword-bert-bleu':
similaritymodel = BertEncoding()
similarity = similarity_keyword_bert_bleu
elif sim_mode =='semantic':
similaritymodel = BertSimilarity()
similarity = similarity_semantic
elif sim_mode =='semantic-bleu':
similaritymodel = BertSimilarity()
similarity = similarity_semantic_bleu
elif sim_mode =='semantic-keyword':
similaritymodel = BertSimilarity()
similarity = similarity_semantic_keyword
fileobj = open(option.emb_path,'r')
emb_word,emb_id=pkl.load(StrToBytes(fileobj), encoding='latin1')
fileobj.close()
sim=option.sim
sta_vec=list(np.zeros([option.num_steps-1]))
use_data, sta_vec_list = read_data_use(option, dataclass.sen2id)
id2sen = dataclass.id2sen
generateset = []
C = 0.05
temperatures = C*(1.0/100)*np.array(list(range(option.sample_time+1,1,-1)))
print(temperatures)
for sen_id in range(use_data.length):
sta_vec=sta_vec_list[sen_id]
input, sequence_length, _=use_data(1, sen_id)
input_original=input[0]
sta_vec_original = [x for x in sta_vec]
for i in range(1,option.num_steps):
if input[0][i]>option.rare_since and input[0][i]<option.dict_size:
sta_vec[i-1]=1
pos=0
print(' '.join(id2sen(input[0])))
print(sta_vec)
calibrated_set = [x for x in input[0]]
for iter in range(option.sample_time):
temperature = temperatures[iter]
print(temperature)
ind=pos%(sequence_length[0]-1)
action=choose_action(option.action_prob)
calibrated_set = list(set(calibrated_set))
if action==0: # word replacement (action: 0)
if tfflag:
prob_old=run_epoch(session, mtest_forward, input, sequence_length,\
mode='use')[0]
else:
prob_old= output_p(input, forwardmodel) #15,K
tem=1
for j in range(sequence_length[0]-1):
tem*=prob_old[j][input[0][j+1]]
tem*=prob_old[j+1][option.dict_size+1]
prob_old_prob=tem
if sim!=None:
similarity_old=similarity(input, input_original, sta_vec, id2sen, emb_word,
option, similaritymodel)[0]
prob_old_prob*=similarity_old
else:
similarity_old=-1
input_forward, input_backward, sequence_length_forward, sequence_length_backward =\
cut_from_point(input, sequence_length, ind, option, mode=action)
if tfflag:
prob_forward=run_epoch(session, mtest_forward, input_forward, sequence_length_forward, mode='use')[0, ind%(sequence_length[0]-1),:]
prob_backward=run_epoch(session, mtest_backward, input_backward, sequence_length_backward, mode='use')[0, sequence_length[0]-1-ind%(sequence_length[0]-1),:]
else:
prob_forward = output_p(input_forward, forwardmodel)[ind%(sequence_length[0]-1),:]
prob_backward = output_p(input_backward,backwardmodel)[
sequence_length[0]-1-ind%(sequence_length[0]-1),:]
prob_mul=(prob_forward*prob_backward)
input_candidate, sequence_length_candidate=generate_candidate_input_calibrated(input,\
sequence_length, ind, prob_mul, option.search_size, option, mode=action,\
calibrated_set=calibrated_set)
if tfflag:
prob_candidate_pre=run_epoch(session, mtest_forward, input_candidate,\
sequence_length_candidate,mode='use')
else:
prob_candidate_pre = output_p(input_candidate, forwardmodel) # 100,15,300003
prob_candidate=[]
for i in range(len(input_candidate)):
tem=1
for j in range(sequence_length[0]-1):
tem*=prob_candidate_pre[i][j][input_candidate[i][j+1]]
tem*=prob_candidate_pre[i][j+1][option.dict_size+1]
prob_candidate.append(tem)
prob_candidate=np.array(prob_candidate)
if sim!=None:
similarity_candidate=similarity(input_candidate, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)
prob_candidate=prob_candidate*similarity_candidate
prob_candidate_norm=normalize(prob_candidate)
prob_candidate_ind=sample_from_candidate(prob_candidate_norm)
prob_candidate_prob=prob_candidate[prob_candidate_ind]
V_new = math.log(max(np.power(prob_candidate_prob,1.0/sequence_length),1e-200))
V_old = math.log(max(np.power(prob_old_prob, 1.0/sequence_length),1e-200))
alphat = min(1,math.exp(min((V_new-V_old)/temperature,100)))
if choose_action([alphat, 1-alphat])==0 and input_candidate[prob_candidate_ind][ind]<option.dict_size:
input1=input_candidate[prob_candidate_ind:prob_candidate_ind+1]
if np.sum(input1[0])==np.sum(input[0]):
pass
else:
calibrated_set.append(input[0][ind])
input= input1
print('ind, action,oldprob,vold, vnew, alpha,simold, simnew', ind, action,prob_old_prob,V_old,\
V_new,alphat,similarity_old,similarity_candidate[prob_candidate_ind])
print('Temperature:{:3.3f}: '.format(temperature)+' '.join(id2sen(input[0])))
elif action==1: # word insert
if sequence_length[0]>=option.num_steps:
pos += 1
continue
# break
input_forward, input_backward, sequence_length_forward, sequence_length_backward =\
cut_from_point(input, sequence_length, ind, option, mode=action)
if tfflag:
prob_forward=run_epoch(session, mtest_forward, input_forward, sequence_length_forward, mode='use')[0, ind%(sequence_length[0]-1),:]
prob_backward=run_epoch(session, mtest_backward, input_backward, sequence_length_backward, mode='use')[0, sequence_length[0]-1-ind%(sequence_length[0]-1),:]
else:
prob_forward = output_p(input_forward, forwardmodel)[ind%(sequence_length[0]-1),:]
prob_backward = output_p(input_backward,backwardmodel)[
sequence_length[0]-1-ind%(sequence_length[0]-1),:]
prob_mul=(prob_forward*prob_backward)
input_candidate, sequence_length_candidate=generate_candidate_input_calibrated(input,\
sequence_length, ind, prob_mul, option.search_size, option, mode=action,\
calibrated_set=calibrated_set)
if tfflag:
prob_candidate_pre=run_epoch(session, mtest_forward, input_candidate,\
sequence_length_candidate,mode='use')
else:
prob_candidate_pre = output_p(input_candidate, forwardmodel) # 100,15,300003
prob_candidate=[]
#for i in range(option.search_size):
for i in range(len(input_candidate)):
tem=1
for j in range(sequence_length_candidate[0]-1):
tem*=prob_candidate_pre[i][j][input_candidate[i][j+1]]
tem*=prob_candidate_pre[i][j+1][option.dict_size+1]
prob_candidate.append(tem)
prob_candidate=np.array(prob_candidate)
if sim!=None:
similarity_candidate=similarity(input_candidate, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)
prob_candidate=prob_candidate*similarity_candidate
prob_candidate_norm=normalize(prob_candidate)
prob_candidate_ind=sample_from_candidate(prob_candidate_norm)
prob_candidate_prob=prob_candidate[prob_candidate_ind]
similarity_new = similarity_candidate[prob_candidate_ind]
if tfflag:
prob_old=run_epoch(session, mtest_forward, input,\
sequence_length,mode='use')[0]
else:
prob_old = output_p(input, forwardmodel) # 100,15,300003
tem=1
for j in range(sequence_length[0]-1):
tem*=prob_old[j][input[0][j+1]]
tem*=prob_old[j+1][option.dict_size+1]
prob_old_prob=tem
if sim!=None:
similarity_old=similarity(input, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)[0]
prob_old_prob=prob_old_prob*similarity_old
else:
similarity_old=-1
V_new = math.log(max(np.power(prob_candidate_prob,1.0/sequence_length_candidate[0]),1e-200))
V_old = math.log(max(np.power(prob_old_prob, 1.0/sequence_length),1e-200))
alphat = min(1,math.exp(min((V_new-V_old)/temperature,200)))
if choose_action([alphat, 1-alphat])==0 and input_candidate[prob_candidate_ind][ind]<option.dict_size:
input=input_candidate[prob_candidate_ind:prob_candidate_ind+1]
sequence_length+=1
pos+=1
# sta_vec.insert(ind, 0.0)
# del(sta_vec[-1])
print('ind, action,oldprob,vold, vnew, alpha,simold, simnew', ind, action,prob_old_prob,V_old,\
V_new,alphat,similarity_old,similarity_new)
print('Temperature:{:3.3f}: '.format(temperature)+' '.join(id2sen(input[0])))
elif action==2: # word delete
if sequence_length[0]<=2 or ind==0:
pos += 1
continue
if tfflag:
prob_old=run_epoch(session, mtest_forward, input, sequence_length,\
mode='use')[0]
else:
prob_old= output_p(input, forwardmodel) #15,K
tem=1
for j in range(sequence_length[0]-1):
tem*=prob_old[j][input[0][j+1]]
tem*=prob_old[j+1][option.dict_size+1]
prob_old_prob=tem
if sim!=None:
similarity_old=similarity(input, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)[0]
prob_old_prob=prob_old_prob*similarity_old
else:
similarity_old=-1
input_candidate, sequence_length_candidate=generate_candidate_input_calibrated(input,\
sequence_length, ind, None, option.search_size, option,\
mode=action,calibrated_set=calibrated_set)
# delete sentence
if tfflag:
prob_new=run_epoch(session, mtest_forward, input_candidate,\
sequence_length_candidate,mode='use')[0]
else:
prob_new = output_p(input_candidate, forwardmodel)
tem=1
for j in range(sequence_length_candidate[0]-1):
tem*=prob_new[j][input_candidate[0][j+1]]
tem*=prob_new[j+1][option.dict_size+1]
prob_new_prob=tem
if sim!=None:
similarity_candidate=similarity(input_candidate, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)[0]
prob_new_prob=prob_new_prob*similarity_candidate
#alpha is acceptance ratio of current proposal
if input[0] in input_candidate:
for candidate_ind in range(len(input_candidate)):
if input[0] in input_candidate[candidate_ind: candidate_ind+1]:
break
pass
V_new = math.log(max(np.power(prob_new_prob,1.0/sequence_length_candidate[0]),1e-200))
V_old = math.log(max(np.power(prob_old_prob, 1.0/sequence_length),1e-200))
alphat = min(1,math.exp((V_new-V_old)/temperature))
else:
alphat=0
if choose_action([alphat, 1-alphat])==0:
calibrated_set.append(input[0][ind])
input=np.concatenate([input[:,:ind+1], input[:,ind+2:], input[:,:1]*0+option.dict_size+1], axis=1)
sequence_length-=1
# del(sta_vec[ind])
# sta_vec.append(0)
pos -= 1
print('oldprob,vold, vnew, alpha,simold, simnew',prob_old_prob,V_old,\
V_new,alphat,similarity_old,similarity_candidate)
print('Temperature:{:3.3f}: '.format(temperature)+' '.join(id2sen(input[0])))
pos += 1
generateset.append(id2sen(input[0]))
appendtext(id2sen(input[0]), option.save_path)
return generateset
def simulatedAnnealing_calibrated(option, dataclass,forwardmodel, backwardmodel, sim_mode = 'keyword'):
tfflag = True
if tfflag:
with tf.name_scope("forward_train"):
with tf.variable_scope("forward", reuse=None):
m_forward = PTBModel(is_training=True,option=option)
with tf.name_scope("forward_test"):
with tf.variable_scope("forward", reuse=True):
mtest_forward = PTBModel(is_training=False,option=option)
var=tf.trainable_variables()
var_forward=[x for x in var if x.name.startswith('forward')]
saver_forward=tf.train.Saver(var_forward, max_to_keep=1)
with tf.name_scope("backward_train"):
with tf.variable_scope("backward", reuse=None):
m_backward = PTBModel(is_training=True,option=option)
with tf.name_scope("backward_test"):
with tf.variable_scope("backward", reuse=True):
mtest_backward = PTBModel(is_training=False, option=option)
var=tf.trainable_variables()
var_backward=[x for x in var if x.name.startswith('backward')]
saver_backward=tf.train.Saver(var_backward, max_to_keep=1)
init = tf.global_variables_initializer()
session = tf.Session()
session.run(init)
saver_forward.restore(session, option.forward_save_path)
saver_backward.restore(session, option.backward_save_path)
similaritymodel = None
if sim_mode == 'keyword':
similarity = similarity_keyword
elif sim_mode =='keyword-bleu':
similarity = similarity_keyword_bleu
elif sim_mode =='keyword-bert':
similaritymodel = BertEncoding()
similarity = similarity_keyword_bert
elif sim_mode =='semantic':
similaritymodel = BertSimilarity()
similarity = similarity_semantic
elif sim_mode =='semantic-bleu':
similaritymodel = BertSimilarity()
similarity = similarity_semantic_bleu
elif sim_mode =='semantic-keyword':
similaritymodel = BertSimilarity()
similarity = similarity_semantic_keyword
fileobj = open(option.emb_path,'r')
emb_word,emb_id=pkl.load(StrToBytes(fileobj), encoding='latin1')
fileobj.close()
sim=option.sim
sta_vec=list(np.zeros([option.num_steps-1]))
use_data, sta_vec_list = read_data_use(option, dataclass.sen2id)
id2sen = dataclass.id2sen
generateset = []
C = 2
temperatures = 0.3+ C*(1.0/100)*np.array(list(range(option.sample_time+1,1,-1)))
print(temperatures)
for sen_id in range(use_data.length):
sta_vec=sta_vec_list[sen_id%len(sta_vec)]
input, sequence_length, _=use_data(1, sen_id)
input_original=input[0]
sta_vec_original = [x for x in sta_vec]
for i in range(1,option.num_steps):
if input[0][i]>option.rare_since and input[0][i]<option.dict_size:
sta_vec[i-1]=1
pos=0
print(' '.join(id2sen(input[0])))
print(sta_vec)
calibrated_set = [x for x in input[0]]
for iter in range(option.sample_time):
temperature = temperatures[iter]
ind=pos%(sequence_length[0]-1)
action=choose_action(option.action_prob)
calibrated_set = list(set(calibrated_set))
if action==0: # word replacement (action: 0)
if tfflag:
prob_old=run_epoch(session, mtest_forward, input, sequence_length,\
mode='use')[0]
else:
prob_old= output_p(input, forwardmodel) #15,K
tem=1
for j in range(sequence_length[0]-1):
tem*=prob_old[j][input[0][j+1]]
tem*=prob_old[j+1][option.dict_size+1]
prob_old_prob=tem
if sim!=None:
similarity_old=similarity(input, input_original, sta_vec, id2sen, emb_word,
option, similaritymodel)[0]
prob_old_prob*=similarity_old
else:
similarity_old=-1
input_forward, input_backward, sequence_length_forward, sequence_length_backward =\
cut_from_point(input, sequence_length, ind, option, mode=action)
if tfflag:
prob_forward=run_epoch(session, mtest_forward, input_forward, sequence_length_forward, mode='use')[0, ind%(sequence_length[0]-1),:]
prob_backward=run_epoch(session, mtest_backward, input_backward, sequence_length_backward, mode='use')[0, sequence_length[0]-1-ind%(sequence_length[0]-1),:]
else:
prob_forward = output_p(input_forward, forwardmodel)[ind%(sequence_length[0]-1),:]
prob_backward = output_p(input_backward,backwardmodel)[
sequence_length[0]-1-ind%(sequence_length[0]-1),:]
prob_mul=(prob_forward*prob_backward)
input_candidate, sequence_length_candidate=generate_candidate_input_calibrated(input,\
sequence_length, ind, prob_mul, option.search_size, option, mode=action,\
calibrated_set=calibrated_set)
if tfflag:
prob_candidate_pre=run_epoch(session, mtest_forward, input_candidate,\
sequence_length_candidate,mode='use')
else:
prob_candidate_pre = output_p(input_candidate, forwardmodel) # 100,15,300003
prob_candidate=[]
for i in range(len(input_candidate)):
tem=1
for j in range(sequence_length[0]-1):
tem*=prob_candidate_pre[i][j][input_candidate[i][j+1]]
tem*=prob_candidate_pre[i][j+1][option.dict_size+1]
prob_candidate.append(tem)
prob_candidate=np.array(prob_candidate)
if sim!=None:
similarity_candidate=similarity(input_candidate, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)
prob_candidate=prob_candidate*similarity_candidate
prob_candidate_norm=normalize(prob_candidate)
prob_candidate_ind=sample_from_candidate(prob_candidate_norm)
prob_candidate_prob=prob_candidate[prob_candidate_ind]
V_new = math.log(max(prob_candidate_prob,1e-200))
V_old = math.log(max(prob_old_prob,1e-200))
alphat = min(1,math.exp(min((V_new-V_old)/temperature,100)))
if choose_action([alphat, 1-alphat])==0 and input_candidate[prob_candidate_ind][ind]<option.dict_size:
input1=input_candidate[prob_candidate_ind:prob_candidate_ind+1]
if np.sum(input1[0])==np.sum(input[0]):
pass
else:
calibrated_set.append(input[0][ind])
input= input1
print('ind, action,oldprob,vold, vnew, alpha,simold, simnew', ind, action,prob_old_prob,V_old,\
V_new,alphat,0,0)
print('Temperature:{:3.3f}: '.format(temperature)+' '.join(id2sen(input[0])))
elif action==1: # word insert
if sequence_length[0]>=option.num_steps:
pos += 1
break
input_forward, input_backward, sequence_length_forward, sequence_length_backward =\
cut_from_point(input, sequence_length, ind, option, mode=action)
if tfflag:
prob_forward=run_epoch(session, mtest_forward, input_forward, sequence_length_forward, mode='use')[0, ind%(sequence_length[0]-1),:]
prob_backward=run_epoch(session, mtest_backward, input_backward, sequence_length_backward, mode='use')[0, sequence_length[0]-1-ind%(sequence_length[0]-1),:]
else:
prob_forward = output_p(input_forward, forwardmodel)[ind%(sequence_length[0]-1),:]
prob_backward = output_p(input_backward,backwardmodel)[
sequence_length[0]-1-ind%(sequence_length[0]-1),:]
prob_mul=(prob_forward*prob_backward)
input_candidate, sequence_length_candidate=generate_candidate_input_calibrated(input,\
sequence_length, ind, prob_mul, option.search_size, option, mode=action,\
calibrated_set=calibrated_set)
if tfflag:
prob_candidate_pre=run_epoch(session, mtest_forward, input_candidate,\
sequence_length_candidate,mode='use')
else:
prob_candidate_pre = output_p(input_candidate, forwardmodel) # 100,15,300003
prob_candidate=[]
#for i in range(option.search_size):
for i in range(len(input_candidate)):
tem=1
for j in range(sequence_length_candidate[0]-1):
tem*=prob_candidate_pre[i][j][input_candidate[i][j+1]]
tem*=prob_candidate_pre[i][j+1][option.dict_size+1]
prob_candidate.append(tem)
prob_candidate=np.array(prob_candidate)
if sim!=None:
similarity_candidate=similarity(input_candidate, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)
prob_candidate=prob_candidate*similarity_candidate
prob_candidate_norm=normalize(prob_candidate)
prob_candidate_ind=sample_from_candidate(prob_candidate_norm)
prob_candidate_prob=prob_candidate[prob_candidate_ind]
if tfflag:
prob_old=run_epoch(session, mtest_forward, input,\
sequence_length,mode='use')[0]
else:
prob_old = output_p(input, forwardmodel) # 100,15,300003
tem=1
for j in range(sequence_length[0]-1):
tem*=prob_old[j][input[0][j+1]]
tem*=prob_old[j+1][option.dict_size+1]
prob_old_prob=tem
if sim!=None:
similarity_old=similarity(input, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)[0]
prob_old_prob=prob_old_prob*similarity_old
else:
similarity_old=-1
V_new = math.log(max(prob_candidate_prob, 1e-200))
V_old = math.log(max(prob_old_prob*prob_candidate_norm[prob_candidate_ind],1e-200))
alphat = min(1,math.exp(min((V_new-V_old)/temperature,200)))
if choose_action([alphat, 1-alphat])==0 and input_candidate[prob_candidate_ind][ind]<option.dict_size:
input=input_candidate[prob_candidate_ind:prob_candidate_ind+1]
sequence_length+=1
# debug
# print('xxxx', sequence_length, sta_vec)
# tem=1
# prob_old=run_epoch(session, mtest_forward, input,\
# sequence_length,mode='use')[0]
# for j in range(sequence_length[0]-1):
# tem*=prob_old[j][input[0][j+1]]
# print(tem,)
# tem*=prob_old[j+1][option.dict_size+1]
# print(tem)
# similarity_old=similarity(input, input_original,sta_vec,\
# id2sen, emb_word, option, similaritymodel)[0]
# print(similarity_old)
pos+=1
# sta_vec.insert(ind, 0.0)
# del(sta_vec[-1])
print('ind, action,oldprob,vold, vnew, alpha,simold, simnew', ind, action,prob_old_prob,V_old,\
V_new,alphat,0,0)
print('Temperature:{:3.3f}: '.format(temperature)+' '.join(id2sen(input[0])))
elif action==2: # word delete
if sequence_length[0]<=2:
pos += 1
break
if tfflag:
prob_old=run_epoch(session, mtest_forward, input, sequence_length,\
mode='use')[0]
else:
prob_old= output_p(input, forwardmodel) #15,K
tem=1
for j in range(sequence_length[0]-1):
tem*=prob_old[j][input[0][j+1]]
tem*=prob_old[j+1][option.dict_size+1]
prob_old_prob=tem
if sim!=None:
similarity_old=similarity(input, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)[0]
prob_old_prob=prob_old_prob*similarity_old
else:
similarity_old=-1
input_candidate, sequence_length_candidate=generate_candidate_input_calibrated(input,\
sequence_length, ind, None, option.search_size, option,\
mode=action,calibrated_set=calibrated_set)
# delete sentence
if tfflag:
prob_new=run_epoch(session, mtest_forward, input_candidate,\
sequence_length_candidate,mode='use')[0]
else:
prob_new = output_p(input_candidate, forwardmodel)
tem=1
for j in range(sequence_length_candidate[0]-1):
tem*=prob_new[j][input_candidate[0][j+1]]
tem*=prob_new[j+1][option.dict_size+1]
prob_new_prob=tem
if sim!=None:
similarity_candidate=similarity(input_candidate, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)[0]
prob_new_prob=prob_new_prob*similarity_candidate
# original sentence
input_forward, input_backward, sequence_length_forward, sequence_length_backward =\
cut_from_point(input, sequence_length, ind, option, mode=0)
if tfflag:
prob_forward=run_epoch(session, mtest_forward, input_forward, sequence_length_forward, mode='use')[0, ind%(sequence_length[0]-1),:]
prob_backward=run_epoch(session, mtest_backward, input_backward, sequence_length_backward, mode='use')[0, sequence_length[0]-1-ind%(sequence_length[0]-1),:]
else:
prob_forward = output_p(input_forward, forwardmodel)[ind%(sequence_length[0]-1),:]
prob_backward = output_p(input_backward,backwardmodel)[
sequence_length[0]-1-ind%(sequence_length[0]-1),:]
prob_mul=(prob_forward*prob_backward)
input_candidate, sequence_length_candidate=generate_candidate_input_calibrated(input,\
sequence_length, ind, prob_mul, option.search_size, option, mode=0,\
calibrated_set=calibrated_set)
if tfflag:
prob_candidate_pre=run_epoch(session, mtest_forward, input_candidate,\
sequence_length_candidate,mode='use')
else:
prob_candidate_pre = output_p(input_candidate, forwardmodel) # 100,15,300003
prob_candidate=[]
for i in range(option.search_size):
tem=1
for j in range(sequence_length[0]-1):
tem*=prob_candidate_pre[i][j][input_candidate[i][j+1]]
tem*=prob_candidate_pre[i][j+1][option.dict_size+1]
prob_candidate.append(tem)
prob_candidate=np.array(prob_candidate)
if sim!=None:
similarity_candidate=similarity(input_candidate, input_original,sta_vec,\
id2sen, emb_word, option, similaritymodel)[0]
prob_candidate=prob_candidate*similarity_candidate
prob_candidate_norm=normalize(prob_candidate)
#alpha is acceptance ratio of current proposal
if input[0] in input_candidate:
for candidate_ind in range(len(input_candidate)):
if input[0] in input_candidate[candidate_ind: candidate_ind+1]:
break
pass
V_new = math.log(max(prob_new_prob*prob_candidate_norm[candidate_ind],1e-300))
V_old = math.log(max(prob_old_prob,1e-300))
alphat = min(1,math.exp((V_new-V_old)/temperature))
else:
alphat=0
if choose_action([alphat, 1-alphat])==0:
calibrated_set.append(input[0][ind])
input=np.concatenate([input[:,:ind+1], input[:,ind+2:], input[:,:1]*0+option.dict_size+1], axis=1)
sequence_length-=1
# del(sta_vec[ind])
# sta_vec.append(0)
pos -= 1
print('oldprob,vold, vnew, alpha,simold, simnew',prob_old_prob,V_old,\
V_new,alphat,0,0)
print('Temperature:{:3.3f}: '.format(temperature)+' '.join(id2sen(input[0])))
pos += 1
generateset.append(id2sen(input[0]))
appendtext(id2sen(input[0]), option.save_path)
return generateset
def simulatedAnnealing_pytorch(option, dataclass,forwardmodel, backwardmodel, sim_mode = 'keyword'):
sim=option.sim
similaritymodel = None
if sim_mode == 'keyword':
similarity = similarity_keyword
elif sim_mode =='semantic':
similaritymodel = BertSimilarity()
similarity = similarity_semantic
elif sim_mode =='semantic-bleu':
similaritymodel = BertSimilarity()
similarity = similarity_semantic_bleu
elif sim_mode =='semantic-keyword':
similaritymodel = BertSimilarity()
similarity = similarity_semantic_keyword
generated_sentence = []
fileemb = open(option.emb_path,'rb')
emb_word,emb_id=pkl.load(fileemb, encoding = 'latin1')
sta_vec=list(np.zeros([option.num_steps-1]))
use_data, sta_vec_list = read_data_use(option, dataclass.sen2id)
id2sen = dataclass.id2sen
C = 1 # 0.2
for sen_id in range(use_data.length):
#generate for each sentence
sta_vec=sta_vec_list[sen_id%len(sta_vec)]
sta_vec.insert(0, 0.0)
del(sta_vec[-1])
input, sequence_length, _=use_data(1, sen_id)
input_original=input[0]
for i in range(1,option.num_steps):
if input[0][i]>option.rare_since and input[0][i]<option.dict_size:
sta_vec[i-1]=1
pos=0
print('Origin Sentence:')
print(' '.join(id2sen(input[0])))
print(sta_vec)
print('Paraphrase:')
for iter in range(option.sample_time):
#ind is the index of the selected word, regardless of the beginning token.
ind=pos%(sequence_length[0]-1)
action=choose_action(option.action_prob)
steps = float(iter/(sequence_length[0]-1))
temperature = C/(math.log(steps+2))
if action==0: # word replacement (action: 0)
prob_old= output_p(input, forwardmodel) #15,K
tem=1
for j in range(sequence_length[0]-1):
tem*=prob_old[j][input[0][j+1]]
tem*=prob_old[j+1][option.dict_size+1]
prob_old_prob=tem
if sim!=None:
similarity_old=similarity(input, input_original, sta_vec, id2sen, emb_word,
option, similaritymodel)[0]
prob_old_prob*=similarity_old
else:
similarity_old=-1
input_forward, input_backward, sequence_length_forward, sequence_length_backward =\
cut_from_point(input, sequence_length, ind, option, mode=action)
prob_forward = output_p(input_forward, forwardmodel)[ind%(sequence_length[0]-1),:]
prob_backward = output_p(input_backward,backwardmodel)[
sequence_length[0]-1-ind%(sequence_length[0]-1),:]
prob_mul=(prob_forward*prob_backward)
input_candidate, sequence_length_candidate=generate_candidate_input(input,\
sequence_length, ind, prob_mul, option.search_size, option, mode=action)
prob_candidate_pre = output_p(input_candidate, forwardmodel) # 100,15,300003
prob_candidate=[]
for i in range(option.search_size):
tem=1
for j in range(sequence_length[0]-1):
tem*=prob_candidate_pre[i][j][input_candidate[i][j+1]]
tem*=prob_candidate_pre[i][j+1][option.dict_size+1]
prob_candidate.append(tem)
prob_candidate= | np.array(prob_candidate) | numpy.array |
# ------------------
# this module, grid.py, deals with calculations of all microbe-related activites on a spatial grid with a class, Grid().
# by <NAME>
# ------------------
import numpy as np
import pandas as pd
from microbe import microbe_osmo_psi
from microbe import microbe_mortality_prob as MMP
from enzyme import Arrhenius, Allison
from monomer import monomer_leaching
from utility import expand
class Grid():
"""
This class holds all variables related to microbe, substrate, monomer, and enzyme over the spatial grid.
Accepts returns from the module 'initialization.py' and includes methods as follows:
1) degrdation(): explicit substrate degradation
2) uptake(): explicit monomers uptake
3) metabolism(): cellular processes and emergent CUE and respiration
4) mortality(): determine mortality of microbial cells based on mass thresholds
5) reproduction(): compute cell division and dispersal
6) repopulation(): resample taxa from the microbial pool and place them on the grid
Coding philosophy:
Each method starts with passing some global variables to local ones and creating
some indices facilitating dataframe index/column processing and ends up with updating
state variables and passing them back to the global ones. All computation stays in between.
Reminder:
Keep a CLOSE EYE on the indexing throughout the matrix/dataframe operations
"""
def __init__(self,runtime,data_init):
"""
The constructor of Grid class.
Parameters:
runtime: user-specified parameters
data_init: dictionary;initialized data from the module 'initialization.py'
"""
self.cycle = int(runtime.loc['end_time',1])
self.gridsize = int(runtime.loc['gridsize',1])
self.n_taxa = int(runtime.loc["n_taxa",1])
self.n_substrates = int(runtime.loc["n_substrates",1])
self.n_enzymes = int(runtime.loc["n_enzymes",1])
self.n_monomers = self.n_substrates + 2
#Degradation
#self.Substrates_init = data_init['Substrates'] # Substrates initialized
self.Substrates = data_init['Substrates'].copy(deep=True) # Substrates;df; w/ .copy() avoiding mutation
self.SubInput = data_init['SubInput'] # Substrate inputs
#self.Enzymes_init = data_init['Enzymes'] # Initial pool of Enzymes
self.Enzymes = data_init['Enzymes'].copy(deep=True) # Enzymes
self.ReqEnz = data_init['ReqEnz'] # Enzymes required by each substrate
self.Ea = data_init['Ea'] # Enzyme activatin energy
self.Vmax0 = data_init['Vmax0'] # Max. reaction speed
self.Km0 = data_init['Km0'] # Half-saturation constant
self.SubstrateRatios = np.float32('nan') # Substrate stoichiometry
self.DecayRates = np.float32('nan') # Substrate decay rate
#Uptake
#self.Microbes_init = data_init['Microbes_pp'] # microbial community before placement
self.Microbes = data_init['Microbes'].copy(deep=True) # microbial community after placement
#self.Monomers_init = data_init['Monomers'] # Monomers initialized
self.Monomers = data_init['Monomers'].copy(deep=True) # Monomers
self.MonInput = data_init['MonInput'] # Inputs of monomers
self.Uptake_Ea = data_init['Uptake_Ea'] # transporter enzyme Ea
self.Uptake_Vmax0 = data_init['Uptake_Vmax0'] # transporter Vmax
self.Uptake_Km0 = data_init['Uptake_Km0'] # transporter Km
self.Monomer_ratios = data_init['Monomer_ratio'].copy(deep=True) # monomer stoichiometry
self.Uptake_ReqEnz = data_init['Uptake_ReqEnz'] # Enzymes required by monomers
self.Uptake_Enz_Cost = data_init['UptakeGenesCost'] # Cost of encoding each uptake gene
self.Taxon_Uptake_C = np.float32('nan') # taxon uptake of C
self.Taxon_Uptake_N = np.float32('nan') # taxon uptake of N
self.Taxon_Uptake_P = np.float32('nan') # taxon uptake of P
#Metabolism
self.Consti_Enzyme_C = data_init["EnzProdConstit"] # C cost of encoding constitutive enzyme
self.Induci_Enzyme_C = data_init["EnzProdInduce"] # C Cost of encoding inducible enzyme
self.Consti_Osmo_C = data_init['OsmoProdConsti'] # C Cost of encoding constitutive osmolyte
self.Induci_Osmo_C = data_init['OsmoProdInduci'] # C Cost of encoding inducible osmolyte
self.Uptake_Maint_Cost = data_init['Uptake_Maint_cost'] # Respiration cost of uptake transporters: 0.01 mg C transporter-1 day-1
self.Enz_Attrib = data_init['EnzAttrib'] # Enzyme attributes; dataframe
self.AE_ref = data_init['AE_ref'] # Reference AE:constant of 0.5;scalar
self.AE_temp = data_init['AE_temp'] # AE sensitivity to temperature;scalar
self.Respiration = np.float32('nan') # Respiration
self.CUE_system = np.float32('nan') # emergent CUE
#self.Transporters = float('nan')
#self.Osmolyte_Con = float('nan')
#self.Osmolyte_Ind = float('nan')
#self.Enzyme_Con = float('nan')
#self.Enzyme_Ind = float('nan')
#self.CUE_Taxon = float('nan')
#self.Growth_Yield = float('nan')
#Mortality
self.MinRatios = data_init['MinRatios'] # minimal cell quotas
self.C_min = data_init['C_min'] # C threshold value of living cell
self.N_min = data_init['N_min'] # N threshold value of living cell
self.P_min = data_init['P_min'] # P threshold value of living cell
self.basal_death_prob = data_init['basal_death_prob'] # basal death probability of microbes
self.death_rate = data_init['death_rate'] # change rate of mortality with water potential
self.tolerance = data_init['TaxDroughtTol'] # taxon drought tolerance
self.wp_fc = data_init['wp_fc'] # scalar; max threshold value of water potential
self.wp_th = data_init['wp_th'] # scalar; min threshold value of water potential
self.alpha = data_init['alpha'] # scalar; moisture sensitivity; 1
self.Kill = np.float32('nan') # total number of cells stochastically killed
# Reproduction
self.fb = data_init['fb'] # index of fungal taxa (=1)
self.max_size_b = data_init['max_size_b'] # threshold of cell division
self.max_size_f = data_init['max_size_f'] # threshold of cell division
self.x = int(runtime.loc['x',1]) # x dimension of grid
self.y = int(runtime.loc['y',1]) # y dimension of grid
self.dist = int(runtime.loc['dist',1]) # maximum dispersal distance: 1 cell
self.direct = int(runtime.loc['direct',1]) # dispersal direction: 0.95
# Climate data
self.temp = data_init['Temp'] # series; temperature
self.psi = data_init['Psi'] # series; water potential
# Global constants
self.Km_Ea = np.float32(20) # kj mol-1;activation energy for both enzyme and transporter
self.Tref = np.float32(293) # reference temperature of 20 celcius
# tradeoff
self.Taxon_Enzyme_Cost_C = np.float32('nan')
self.Taxon_Osmo_Cost_C = np.float32('nan')
self.Microbe_C_Gain = np.float32('nan')
def degradation(self,day):
"""
Explicit degradation of different substrates.
Calculation procedure:
1. Determine substates pool: incl. inputs
2. Compute Vmax & Km and make them follow the index of Substrates
3. Follow the Michaelis-Menten equation to compute full degradation rate
4. Impose the substrate-required enzymes upon the full degradation rate
5. Adjust cellulose rate with LCI(lignocellulose index)
"""
# constant of lignocellulose index--LCI
LCI_slope = np.float32(-0.8)
# Substrates index by subtrate names
Sub_index = self.Substrates.index
# Calculate total mass of each substrate (C+N+P) and derive substrate stoichiometry
rss = self.Substrates.sum(axis=1)
SubstrateRatios = self.Substrates.divide(rss,axis=0)
SubstrateRatios = SubstrateRatios.fillna(0) # NOTE:ensure NA(b/c of 0/0 in df) = 0
# Arrhenius equation for Vmax and Km multiplied by exponential decay for Psi sensitivity
Vmax = Arrhenius(self.Vmax0, self.Ea, self.temp[day]) * Allison(0.05, self.wp_fc, self.psi[day]) # Vmax: (enz*gridsize) * sub
Km = Arrhenius(self.Km0, self.Km_Ea, self.temp[day]) # Km: (sub*gridsize) * enz
# Multiply Vmax by enzyme concentration
tev_transition = Vmax.mul(self.Enzymes,axis=0) # (enz*gridsize) * sub
tev_transition.index = [np.arange(self.gridsize).repeat(self.n_enzymes),tev_transition.index] # create a MultiIndex
tev = tev_transition.stack().unstack(1).reset_index(level=0,drop=True) # (sub*gridsize) * enz
tev = tev[Km.columns] # ensure to re-order the columns b/c of python's default alphabetical ordering
# Michaelis-Menten equation
Decay = tev.mul(rss,axis=0)/Km.add(rss,axis=0)
# Pull out each batch of required enzymes and sum across redundant enzymes
batch1 = (self.ReqEnz.loc['set1'].values * Decay).sum(axis=1)
#batch2 = (self.ReqEnz.loc['set2'].values * Decay).sum(axis=1)
# Assess the rate-limiting enzyme and set decay to that rate
#DecaySums = pd.concat([batch1, batch2],axis=1)
#DecayRates0 = DecaySums.min(axis=1, skipna=True)
# Compare to substrate available and take the min, allowing for a tolerance of 1e-9
DecayRates = pd.concat([batch1,rss],axis=1,sort=False).min(axis=1,skipna=True)
# Adjust cellulose rate by linking cellulose degradation to lignin concentration (LCI)
ss7 = self.Substrates.loc[Sub_index=='Lignin'].sum(axis=1).values
DecayRates.loc[Sub_index=='Cellulose'] *= np.float32(1) + (ss7/(ss7 + self.Substrates.loc[Sub_index=='Cellulose','C'])) * LCI_slope
# Update Substrates Pool by removing decayed C, N, & P. Depending on specific needs, adding inputs of substrates can be done here
self.Substrates -= SubstrateRatios.mul(DecayRates,axis=0) #+ self.SubInput
# Pass these two back to the global variables to be used in the next method
self.SubstrateRatios = SubstrateRatios
self.DecayRates = DecayRates
def uptake(self,day):
"""
Explicit uptake of different monomers by transporters following the Michaelis-Menten equation.
Calculaton procedure:
1. Average monomers across the grid:
2. Determine pool of monomers: add degradation and input, update stoichimoetry
3. Maximum uptake:
4. Uptake by Monomer:
5. Uptake by Taxon:
"""
# Every monomer averaged over the grid in each time step
self.Monomers = expand(self.Monomers.groupby(level=0,sort=False).sum()/self.gridsize,self.gridsize)
# Indices
is_org = (self.Monomers.index != "NH4") & (self.Monomers.index != "PO4") # organic monomers
#is_mineral = (Monomers.index == "NH4") | (Monomers.index == "PO4")
# Update monomer ratios in each time step with organic monomers following the substrates
self.Monomer_ratios[is_org] = self.SubstrateRatios.values
# Determine monomer pool from decay and input
# Organic monomers derived from substrate-decomposition
Decay_Org = self.Monomer_ratios[is_org].mul(self.DecayRates.values,axis=0)
# inputs of organic and mineral monomers
#Input_Org = MR_transition[is_org].mul(self.MonInput[is_org].tolist(),axis=0)
#Input_Mineral = MR_transition[is_mineral].mul((self.MonInput[is_mineral]).tolist(),axis=0)
# Monomer pool determined
self.Monomers.loc[is_org] += Decay_Org #+ Input_Org
#self.Monomers.loc[is_mineral] += Input_Mineral
# Get the total mass of each monomer: C+N+P
rsm = self.Monomers.sum(axis=1)
# Recalculate monomer ratios after updating monomer pool and before uptake calculation
self.Monomer_ratios.loc[is_org] = self.Monomers.loc[is_org].divide(rsm[is_org],axis=0)
self.Monomer_ratios = self.Monomer_ratios.fillna(0)
# Start calculating monomer uptake
# Caculate uptake enzyme kinetic parameters, multiplied by moisture multiplier accounting for the diffusivity implications
Uptake_Vmax = Arrhenius(self.Uptake_Vmax0, self.Uptake_Ea, self.temp[day]) * Allison(0.1, self.wp_fc, self.psi[day])
Uptake_Km = Arrhenius(self.Uptake_Km0, self.Km_Ea, self.temp[day])
# Equation for hypothetical potential uptake (per unit of compatible uptake protein)
Potential_Uptake = (self.Uptake_ReqEnz * Uptake_Vmax).mul(rsm.values,axis=0)/Uptake_Km.add(rsm.values,axis=0)
# Derive the mass of each transporter of each taxon NOTE: transpose the df to Upt*(Taxa*grid)
MicCXGenes = (self.Uptake_Enz_Cost.mul(self.Microbes.sum(axis=1),axis=0)).T
# Define Max_Uptake: (Monomer*gridsize) * Taxon
Max_Uptake_array = np.zeros((self.n_monomers*self.gridsize,self.n_taxa), dtype='float32')
Max_Uptake = pd.DataFrame(data=Max_Uptake_array, index=self.Monomers.index, columns=self.Microbes.index[0:self.n_taxa])
# Matrix multiplication to get max possible uptake by monomer(extract each grid point separately for operation)
for i in range(self.gridsize):
i_monomer = np.arange(i * self.n_monomers, (i+1) * self.n_monomers)
i_taxa = np.arange(i * self.n_taxa, (i+1) * self.n_taxa)
Max_Uptake.iloc[i_monomer,:] = Potential_Uptake.iloc[i_monomer,:].values @ MicCXGenes.iloc[:,i_taxa].values
# Take the min of the monomer available and the max potential uptake, and scale the uptake to what's available
csmu = Max_Uptake.sum(axis=1) # total potential uptake of each monomer
Uptake = Max_Uptake.mul(pd.concat([csmu,rsm],axis=1).min(axis=1,skipna=True)/csmu,axis=0) #(Monomer*gridsize) * Taxon
Uptake.loc[csmu==0] = np.float32(0)
# End computing monomer uptake
# Update Monomers
# By monomer: total uptake (monomer*gridsize) * 3(C-N-P)
self.Monomers -= self.Monomer_ratios.mul(Uptake.sum(axis=1),axis=0)
# Derive Taxon-specific total uptake of C, N, & P
# By taxon: total uptake; (monomer*gridsize) * taxon
C_uptake_df = Uptake.mul(self.Monomer_ratios["C"],axis=0)
N_uptake_df = Uptake.mul(self.Monomer_ratios["N"],axis=0)
P_uptake_df = Uptake.mul(self.Monomer_ratios["P"],axis=0)
# generic multi-index
C_uptake_df.index = N_uptake_df.index = P_uptake_df.index = [np.arange(self.gridsize).repeat(self.n_monomers),C_uptake_df.index]
TUC_df = C_uptake_df.groupby(level=[0]).sum()
TUN_df = N_uptake_df.groupby(level=[0]).sum()
TUP_df = P_uptake_df.groupby(level=[0]).sum()
# Update these 3 global variables
self.Taxon_Uptake_C = TUC_df.stack().values # spatial C uptake: array
self.Taxon_Uptake_N = TUN_df.stack().values # spatial N uptake: array
self.Taxon_Uptake_P = TUP_df.stack().values # spatial P uptake: array
def metabolism(self,day):
"""
Explicitly calculate intra-cellular production of metabolites.
Handles both constitutive (standing biomass) and inducible (immediate monomers uptake) pathways following:
1. constitutive enzyme and osmolyte production
2. inducible enzyme and osmolyte production
3. emergent CUE & Respiration
4. update both Enzymes (with production & loss) and Substrates (with dead enzymes)
"""
# Constants
Osmo_N_cost = np.float32(0.3) # N cost per unit of osmo-C production
Osmo_Maint_cost = np.float32(5.0) # C loss per unit of osmo-C production
Enzyme_Loss_Rate = np.float32(0.04) # enzyme turnover rate(=0.04; Allison 2006)
# index of dead enzyme in Substrates
is_deadEnz = self.Substrates.index == "DeadEnz"
#---------------------------------------------------------------------#
#......................constitutive processes.........................#
#---------------------------------------------------------------------#
# Variable Acronyms:
# OECCN : Osmo_Enzyme_Consti_Cost_N
# ARROEC: Avail_Req_ratio_osmo_enzyme_consti
# MNAOEC: Min_N_Avail_Osmo_Enzyme_Consti
#...............................................
# Taxon-specific respiration cost of producing transporters: self.uptake_maint_cost = 0.01
# NOTE Microbes['C'],as opposed to Microbes.sum(axis=1) in DEMENT
Taxon_Transporter_Maint = self.Uptake_Enz_Cost.mul(self.Microbes['C'],axis=0).sum(axis=1) * self.Uptake_Maint_Cost
# Osmolyte before adjustment
Taxon_Osmo_Consti = self.Consti_Osmo_C.mul(self.Microbes['C'],axis=0)
Taxon_Osmo_Consti_Cost_N = (Taxon_Osmo_Consti * Osmo_N_cost).sum(axis=1)
# Enzyme before adjustment
Taxon_Enzyme_Consti = self.Consti_Enzyme_C.mul(self.Microbes['C'],axis=0)
Taxon_Enzyme_Consti_Cost_N = (Taxon_Enzyme_Consti.mul(self.Enz_Attrib['N_cost'],axis=1)).sum(axis=1)
# Adjust osmolyte & enzyme production based on available N in microbial biomass
OECCN = Taxon_Osmo_Consti_Cost_N + Taxon_Enzyme_Consti_Cost_N # Total N cost
MNAOEC = (pd.concat([OECCN[OECCN>0],self.Microbes['N'][OECCN>0]],axis=1)).min(axis=1,skipna=True) # get the minimum value
ARROEC = (MNAOEC/OECCN[OECCN>0]).fillna(0) # Derive ratio of availabe N to required N
# Osmolyte adjusted
Taxon_Osmo_Consti[OECCN>0] = Taxon_Osmo_Consti[OECCN>0].mul(ARROEC,axis=0) # adjusted osmolyte
Taxon_Osmo_Consti_Maint = (Taxon_Osmo_Consti * Osmo_Maint_cost).sum(axis=1) # maintenece
Taxon_Osmo_Consti_Cost_N = (Taxon_Osmo_Consti * Osmo_N_cost).sum(axis=1) # N cost (no P)
Taxon_Osmo_Consti_Cost_C = Taxon_Osmo_Consti.sum(axis=1) + Taxon_Osmo_Consti_Maint # total C consumption
# Enzyme adjusted
Taxon_Enzyme_Consti.loc[OECCN>0] = Taxon_Enzyme_Consti.loc[OECCN>0].mul(ARROEC,axis=0) # adjusted enzyme
Taxon_Enzyme_Consti_Maint = (Taxon_Enzyme_Consti.mul(self.Enz_Attrib['Maint_cost'],axis=1)).sum(axis=1) # maintinence
Taxon_Enzyme_Consti_Cost_N = (Taxon_Enzyme_Consti.mul(self.Enz_Attrib['N_cost'], axis=1)).sum(axis=1) # N cost
Taxon_Enzyme_Consti_Cost_P = (Taxon_Enzyme_Consti.mul(self.Enz_Attrib['P_cost'], axis=1)).sum(axis=1) # P cost
Taxon_Enzyme_Consti_Cost_C = Taxon_Enzyme_Consti.sum(axis=1) + Taxon_Enzyme_Consti_Maint # C cost (total)
#---------------------------------------------------------------------#
#.....Inducible processes.............................................#
#---------------------------------------------------------------------#
# Variable Acronyms:
# OEICN : Osmo_Enzyme_Induci_Cost_N
# OEIAN : Osmo_Enzyme_Induci_Avail_N
# ARROEI: Avail_Req_ratio_osmo_enzyme_induci
# MNAOEI: Min_N_Avail_Osmo_Enzyme_Induci
#..................................................
# Assimilation efficiency constrained by temperature
Taxon_AE = self.AE_ref + (self.temp[day] - (self.Tref - np.float32(273))) * self.AE_temp #scalar
# Taxon growth respiration
Taxon_Growth_Respiration = self.Taxon_Uptake_C * (np.float32(1) - Taxon_AE)
# derive the water potential modifier by calling the function microbe_osmo_psi()
f_psi = microbe_osmo_psi(self.alpha,self.wp_fc,self.psi[day])
# Inducible Osmolyte production only when psi reaches below wp_fc
Taxon_Osmo_Induci = self.Induci_Osmo_C.mul(self.Taxon_Uptake_C*Taxon_AE, axis=0) * f_psi
Taxon_Osmo_Induci_Cost_N = (Taxon_Osmo_Induci * Osmo_N_cost).sum(axis=1) # Total osmotic N cost of each taxon (.sum(axis=1))
# Inducible enzyme production
Taxon_Enzyme_Induci = self.Induci_Enzyme_C.mul(self.Taxon_Uptake_C*Taxon_AE, axis=0)
Taxon_Enzyme_Induci_Cost_N = (Taxon_Enzyme_Induci.mul(self.Enz_Attrib['N_cost'],axis=1)).sum(axis=1) # Total enzyme N cost of each taxon (.sum(axis=1))
# Adjust production based on N availabe
OEICN = Taxon_Osmo_Induci_Cost_N + Taxon_Enzyme_Induci_Cost_N # Total N cost of osmolyte and enzymes
OEIAN = pd.Series(data=self.Taxon_Uptake_N, index=self.Microbes.index) # N available
MNAOEI = (pd.concat([OEICN[OEICN>0],OEIAN[OEICN>0]],axis=1)).min(axis=1,skipna=True) # Get the minimum value by comparing N cost to N available
ARROEI = (MNAOEI/OEICN[OEICN>0]).fillna(0) # Ratio of Available to Required
# Osmolyte adjusted: accompanying maintenence and N cost
Taxon_Osmo_Induci[OEICN>0] = Taxon_Osmo_Induci.loc[OEICN>0].mul(ARROEI,axis=0)
Taxon_Osmo_Induci_Maint = (Taxon_Osmo_Induci * Osmo_Maint_cost).sum(axis=1)
Taxon_Osmo_Induci_Cost_N = (Taxon_Osmo_Induci * Osmo_N_cost).sum(axis=1)
Taxon_Osmo_Induci_Cost_C = Taxon_Osmo_Induci.sum(axis=1) + Taxon_Osmo_Induci_Maint
# Enzyme adjusted: Total enzyme carbon cost (+ CO2 loss), N cost, and P cost for each taxon
Taxon_Enzyme_Induci[OEICN>0] = Taxon_Enzyme_Induci.loc[OEICN>0].mul(ARROEI,axis=0)
Taxon_Enzyme_Induci_Maint = (Taxon_Enzyme_Induci.mul(self.Enz_Attrib["Maint_cost"],axis=1)).sum(axis=1)
Taxon_Enzyme_Induci_Cost_N = (Taxon_Enzyme_Induci.mul(self.Enz_Attrib["N_cost"], axis=1)).sum(axis=1)
Taxon_Enzyme_Induci_Cost_P = (Taxon_Enzyme_Induci.mul(self.Enz_Attrib["P_cost"], axis=1)).sum(axis=1)
Taxon_Enzyme_Induci_Cost_C = Taxon_Enzyme_Induci.sum(axis=1) + Taxon_Enzyme_Induci_Maint
# Derive C, N, & P deposited as biomass from Uptake; ensure no negative values
Microbe_C_Gain = self.Taxon_Uptake_C - Taxon_Growth_Respiration - Taxon_Enzyme_Induci_Cost_C - Taxon_Osmo_Induci_Cost_C
Microbe_N_Gain = self.Taxon_Uptake_N - Taxon_Enzyme_Induci_Cost_N - Taxon_Osmo_Induci_Cost_N
Microbe_P_Gain = self.Taxon_Uptake_P - Taxon_Enzyme_Induci_Cost_P
self.Taxon_Enzyme_Cost_C = Taxon_Enzyme_Induci_Cost_C + Taxon_Enzyme_Consti_Cost_C
self.Taxon_Osmo_Cost_C = Taxon_Osmo_Induci_Cost_C + Taxon_Osmo_Consti_Cost_C
self.Microbe_C_Gain = Microbe_C_Gain - Taxon_Enzyme_Consti_Cost_C - Taxon_Osmo_Consti_Cost_C - Taxon_Transporter_Maint
#------------------------------------------------#
#...............Integration......................#
#------------------------------------------------#
# Update Microbial pools with GAINS (from uptake) and LOSSES (from constitutive production)
self.Microbes.loc[:,'C'] += Microbe_C_Gain - Taxon_Enzyme_Consti_Cost_C - Taxon_Osmo_Consti_Cost_C - Taxon_Transporter_Maint
self.Microbes.loc[:,'N'] += Microbe_N_Gain - Taxon_Enzyme_Consti_Cost_N - Taxon_Osmo_Consti_Cost_N
self.Microbes.loc[:,'P'] += Microbe_P_Gain - Taxon_Enzyme_Consti_Cost_P
self.Microbes[self.Microbes<0] = np.float32(0) # avoid negative values
# Taxon-specific emergent CUE
#CUE_taxon = Microbes['C'].copy() # create a dataframe and set all vals to 0
#CUE_taxon[:] = 0
#pos_uptake_index = self.Taxon_Uptake_C > 0
#CUE_taxon[pos_uptake_index] = Microbe_C_Gain[pos_uptake_index]/self.Taxon_Uptake_C[pos_uptake_index]
# System-level emergent CUE
Taxon_Uptake_C_grid = self.Taxon_Uptake_C.sum(axis=0) # Total C Uptake
if Taxon_Uptake_C_grid == 0:
self.CUE_system = np.float32(0)
else:
self.CUE_system = Microbe_C_Gain.sum(axis=0)/Taxon_Uptake_C_grid
# Respiration from Constitutive + Inducible(NOTE: missing sum(MicLoss[,"C"]) in the Mortality below)
self.Respiration = (
Taxon_Transporter_Maint + Taxon_Growth_Respiration + Taxon_Osmo_Consti_Maint +
Taxon_Osmo_Induci_Maint + Taxon_Enzyme_Consti_Maint + Taxon_Enzyme_Induci_Maint
).sum(axis=0)
# Derive Enzyme production
Taxon_Enzyme_Production = Taxon_Enzyme_Consti + Taxon_Enzyme_Induci # gene-specific prod of enzyme of each taxon: (taxon*gridsize) * enzyme
Taxon_Enzyme_Production.index = [np.arange(self.gridsize).repeat(self.n_taxa),Taxon_Enzyme_Production.index] # create a multi-index
EP_df = Taxon_Enzyme_Production.groupby(level=0).sum() # enzyme-specific production in each grid cell
Enzyme_Production = EP_df.stack().values # 1-D array
# Derive Enzyme turnover
Enzyme_Loss = self.Enzymes * Enzyme_Loss_Rate
# Update Enzyme pools by adding enzymes produced and substracting the 'dead' enzymes
self.Enzymes += Enzyme_Production - Enzyme_Loss
# Update Substrates pools with dead enzymes
DeadEnz_df = pd.concat(
[Enzyme_Loss,
Enzyme_Loss.mul(self.Enz_Attrib['N_cost'].tolist()*self.gridsize,axis=0),
Enzyme_Loss.mul(self.Enz_Attrib['P_cost'].tolist()*self.gridsize,axis=0)],
axis=1
)
DeadEnz_df.index = [np.arange(self.gridsize).repeat(self.n_enzymes), DeadEnz_df.index] # create a multi-index
DeadEnz_gridcell = DeadEnz_df.groupby(level=0).sum() # total dead mass across taxa in each grid cell
self.Substrates.loc[is_deadEnz] += DeadEnz_gridcell.values
def mortality(self,day):
"""
Calculate microbial mortality, and update stoichiometry of the alive and microbial pools.
Kill microbes that are starving deterministically and microbes that are drought intolerant stochastically
Also update Substrates with input from dead microbes, monomers(with leaching loss), and respiration
"""
# Indices
Mic_index = self.Microbes.index
is_DeadMic = self.Substrates.index == 'DeadMic'
is_NH4 = self.Monomers.index == 'NH4'
is_PO4 = self.Monomers.index == 'PO4'
# Reset the index to arabic numerals from taxa series
self.Microbes = self.Microbes.reset_index(drop=True)
MinRatios = self.MinRatios.reset_index(drop=True)
# Create a blank dataframe, Death, having the same structure as Microbes
Death = self.Microbes.copy(deep=True)
Death[:] = np.float32(0)
# Create a series, kill, holding boolean value of False
kill = pd.Series([False]*self.n_taxa*self.gridsize)
# Start to calculate mortality
# --Kill microbes deterministically based on threshold values: C_min: 0.086; N_min:0.012; P_min: 0.002
starve_index = (self.Microbes['C']>0) & ((self.Microbes['C']<self.C_min)|(self.Microbes['N']<self.N_min)|(self.Microbes['P']<self.P_min))
# Index the dead, put them in Death, and set them to 0 in Microbes
Death.loc[starve_index] = self.Microbes[starve_index]
self.Microbes.loc[starve_index] = np.float32(0)
# Index the locations where microbial cells remain alive
mic_index = self.Microbes['C'] > 0
# --Kill microbes stochastically based on mortality prob as a function of water potential and drought tolerance
# call the function MMP:microbe_mortality_psi()
r_death = MMP(self.basal_death_prob,self.death_rate,self.tolerance,self.wp_fc,self.psi[day])
kill.loc[mic_index] = r_death[mic_index] > np.random.uniform(0,1,sum(mic_index)).astype('float32')
# Index the dead, put them in Death, and set them to 0 in Microbes
Death.loc[kill] = self.Microbes[kill]
self.Microbes.loc[kill] = np.float32(0)
# Index locations where microbes remain alive
mic_index = self.Microbes['C']>0
# Calculate the total dead mass (threshold & drought) across taxa in each grid cell
Death_gridcell = Death.groupby(Death.index//self.n_taxa).sum()
# Distinguish between conditions of complete death VS partial death
# All cells die
if sum(mic_index) == 0:
#...Update Substrates pool by adding dead microbial biomass
self.Substrates.loc[is_DeadMic] += Death_gridcell.values
# Partly die and adjust stoichiometry of those remaining alive
else:
# Index only those taxa in Microbes that have below-minimum quotas: Mic_subset
MicrobeRatios = self.Microbes[mic_index].divide(self.Microbes[mic_index].sum(axis=1),axis=0)
mic_index_sub = (MicrobeRatios["C"]<MinRatios[mic_index]["C"])|(MicrobeRatios["N"]<MinRatios[mic_index]["N"])|(MicrobeRatios["P"]<MinRatios[mic_index]["P"])
rat_index = self.Microbes.index.map(mic_index_sub).fillna(False)
# Derive the Microbes wanted
Mic_subset = self.Microbes[rat_index]
StartMicrobes = Mic_subset.copy(deep=True)
# Derive new ratios and Calculate difference between actual and min ratios
MicrobeRatios = Mic_subset.divide(Mic_subset.sum(axis=1),axis=0)
MinRat = MinRatios[rat_index]
Ratio_dif = MicrobeRatios - MinRat
# Create a df recording the ratio differences < 0
Ratio_dif_0 = Ratio_dif.copy(deep=True)
Ratio_dif_0[Ratio_dif>0] = np.float32(0)
# Create a df recording the ratio differences > 0
Excess = Ratio_dif.copy(deep=True)
Excess[Ratio_dif<0] = np.float32(0)
# Determine the limiting nutrient that will be conserved
Limiting = (-Ratio_dif/MinRat).idxmax(axis=1) # Series of index of the first occurrence of maximum in each row
# Set all deficient ratios to their minima
MicrobeRatios[Ratio_dif<0] = MinRat[Ratio_dif<0]
# Reduce the mass fractions for non-deficient elements in proportion to the distance from the minimum
# ....Partition the total deficit to the excess element(s) in proportion to their distances from their minima
MicrobeRatios[Ratio_dif>0] += Excess.mul((Ratio_dif_0.sum(axis=1)/Excess.sum(axis=1)),axis=0)[Ratio_dif>0]
# Construct hypothetical nutrient quotas for each possible minimum nutrient
MC = Mic_subset["C"]
MN = Mic_subset["N"]
MP = Mic_subset["P"]
MRC = MicrobeRatios["C"]
MRN = MicrobeRatios["N"]
MRP = MicrobeRatios["P"]
new_C = pd.concat([MC, MN*MRC/MRN, MP*MRC/MRP],axis=1)
new_C = new_C.fillna(0)
new_C[np.isinf(new_C)] = np.float32(0)
new_C.columns = ['C','N','P']
new_N = pd.concat([MC*MRN/MRC, MN, MP*MRN/MRP],axis=1)
new_N = new_N.fillna(0)
new_N[np.isinf(new_N)] = np.float32(0)
new_N.columns = ['C','N','P']
new_P = pd.concat([MC*MRP/MRC, MN*MRP/MRN, MP],axis=1)
new_P = new_P.fillna(0)
new_P[np.isinf(new_P)] = np.float32(0)
new_P.columns = ['C','N','P']
# Insert the appropriate set of nutrient quotas scaled to the minimum nutrient
C = [new_C.loc[i,Limiting[i]] for i in Limiting.index] #list
N = [new_N.loc[i,Limiting[i]] for i in Limiting.index] #list
P = [new_P.loc[i,Limiting[i]] for i in Limiting.index] #list
# Update Microbes
self.Microbes.loc[rat_index] = np.vstack((C,N,P)).transpose()
# Sum up the element losses from biomass across whole grid and calculate average loss
MicLoss = StartMicrobes - self.Microbes[rat_index]
# Update total respiration by adding ...
self.Respiration += sum(MicLoss['C'])
# Update monomer pools
self.Monomers.loc[is_NH4,"N"] += sum(MicLoss["N"])/self.gridsize
self.Monomers.loc[is_PO4,"P"] += sum(MicLoss["P"])/self.gridsize
# Update Substrates pool by adding dead microbial biomass
self.Substrates.loc[is_DeadMic] += Death_gridcell.values
# End of if else clause
# Calculate monomers' leaching and update Monomers
leaching_rate = monomer_leaching(self.psi[day])
self.Monomers.loc[is_NH4,"N"] -= self.Monomers.loc[is_NH4,"N"] * leaching_rate
self.Monomers.loc[is_PO4,"P"] -= self.Monomers.loc[is_PO4,"P"] * leaching_rate
# Restore the index to taxa series
self.Microbes.index = Mic_index
# Update the death toll of cells
self.Kill = kill.sum().astype('uint32')
def reproduction(self,day):
"""
Calculate reproduction and dispersal.
Update microbial composition/distrituion on the spatial grid.
Parameters:
fb : index of fungal taxa
max_size_b : threshold of cell division
max_size_f : threshold of cell division
x,y : x,y dimension of grid
dist : maximum dispersal distance: 1 cell
direct : dispersal direction: 0.95
"""
# index of Microbes
Mic_index = self.Microbes.index
# Set up the Colonization dataframe: taxon * 3(C,N,&P)
Colonization = self.Microbes.copy(deep=True)
Colonization = Colonization.reset_index(drop=True)
Colonization[:] = np.float32(0)
#STEP 1: Fungal translocation by calculating average biomass within fungal taxa
# Count the fungal taxa before cell division
Fungi_df = pd.Series(data=[0]*self.n_taxa*self.gridsize, index=Mic_index, name='Count', dtype='int8')
# Add one or two fungi to the count series based on size
Fungi_df.loc[(self.fb==1)&(self.Microbes['C']>0)] = np.int8(1)
Fungi_df.loc[(self.fb==1)&(self.Microbes['C']>self.max_size_f)] = np.int8(2)
Fungi_count = Fungi_df.groupby(level=0,sort=False).sum()
# Derive average biomass of fungal taxa
Microbes_grid = self.Microbes.groupby(level=0,sort=False).sum()
Mean_fungi = Microbes_grid.divide(Fungi_count,axis=0)
Mean_fungi[Fungi_count==0] = np.float32(0)
# Expand the fungal average across the grid
eMF = expand(Mean_fungi,self.gridsize)
#STEP 2: Cell division & translocate nutrients
MicrobesBeforeDivision = self.Microbes.copy(deep=True)
#bacterial cell division
bac_index = (self.fb==0) & (self.Microbes['C']>self.max_size_b)
self.Microbes[bac_index] = self.Microbes[bac_index]/2
#fungal cell division
fun_index = (self.fb==1) & (self.Microbes['C']>self.max_size_f)
self.Microbes[fun_index] = self.Microbes[fun_index]/2
# Put daughter cells into a seperate dataframe, Reprod
Reprod = MicrobesBeforeDivision - self.Microbes
# Translocate nutrients within fungal taxa after reproduction
self.Microbes[(self.fb==1)&(self.Microbes['C']>0)] = eMF[(self.fb==1)&(self.Microbes['C']>0)]
# Index the daughter cells of fungi vs bacteria
daughters_b = (Reprod['C']>0) & (self.fb==0)
daughters_f = (Reprod['C']>0) & (self.fb==1)
# set all fungi equal to their grid averages for translocation before colonization
Reprod[daughters_f] = eMF[daughters_f]
#STEP 3: dispersal calculation
num_b = sum(daughters_b)
num_f = sum(daughters_f)
shift_x = pd.Series(data=[0] * self.gridsize*self.n_taxa, index=Mic_index, dtype='int8')
shift_y = pd.Series(data=[0] * self.gridsize*self.n_taxa, index=Mic_index, dtype='int8')
# Bacterial dispersal movements in X & Y direction
shift_x[daughters_b] = np.random.choice([i for i in range(-self.dist, self.dist+1)],num_b,replace=True).astype('int8')
shift_y[daughters_b] = np.random.choice([i for i in range(-self.dist, self.dist+1)],num_b,replace=True).astype('int8')
# Fungi always move positively in x direction, and in y direction constrained to one box away determined by probability "direct"
shift_x[daughters_f] = np.int8(1)
shift_y[daughters_f] = np.random.choice([-1,0,1], num_f, replace=True, p=[0.5*(1-self.direct),self.direct,0.5*(1-self.direct)]).astype('int8')
# Calculate x,y coordinates of dispersal destinations (% remainder of x/x) and substitute coordinates when there is no shift
new_x = (shift_x + list(np.repeat(range(1,self.x+1),self.n_taxa)) * self.y + self.x) % self.x
new_y = (shift_y + list(np.repeat(range(1,self.y+1),self.n_taxa*self.x)) + self.y) % self.y
new_x[new_x==0] = self.x
new_y[new_y==0] = self.y
# Convert x,y coordinates to a Series of destination locations; NOTE: must -1
index_series = ((new_y-1)*self.x + (new_x-1)) * self.n_taxa + list(range(1,self.n_taxa+1)) * self.gridsize - 1
#Step 4: colonization of dispersed microbes
# Transfer reproduced cells to new locations and sum when two or more of the same taxa go to same location
Colonization.iloc[index_series[daughters_b],:] = Reprod[daughters_b].values
Colonization.iloc[index_series[daughters_f],:] = Reprod[daughters_f].values
# Colonization of dispersing microbes
self.Microbes += Colonization.values
def reinitialization(self,initialization,microbes_pp,output,mode,pulse,switch):
"""
Reinitialize the system in a new pulse.
Initialize Subsrates, Monomers, and Enzymes on the grid that are initialized from the very beginning
Parameters:
initialization: dictionary; site-specific initialization
microbes_pp: dataframe;taxon-specific mass in C, N, &P
output: an instance of the Output class, from which the var, MicrobesSeries_repop,
referring to taxon-specific total mass over the grid is retrieved
mode: string; 'defaul' or 'dispersal'
pulse: integer; the pulse index
Returns:
update temp, psi, Substrates, Monomers, Enzymes, and Microbes
"""
# reinitialize temperature and water potential
if (pulse < switch-1):
self.temp = initialization['Temp'].copy(deep=True)
self.psi = initialization['Psi'].copy(deep=True)
else:
self.temp = initialization['Temp'][(pulse-(switch-1))*365:(pulse-(switch-2))*365]
self.psi = initialization['Psi'][(pulse-(switch-1))*365:(pulse-(switch-2))*365]
# reinitialize site-based substrates, monomers, and enzymes in a new pulse
self.Substrates = initialization['Substrates'].copy(deep=True)
self.Monomers = initialization['Monomers'].copy(deep=True)
self.Enzymes = initialization['Enzymes'].copy(deep=True)
# reinitialize microbial community in a new pulse as per the mode in three steps
# first: retrieve the microbial pool; NOTE: copy()
#self.Microbes = self.Microbes_init.copy(deep=True)
#self.Microbes = initialization['Microbes_pp'].copy(deep=True)
self.Microbes = microbes_pp.copy(deep=True)
# then: derive cumulative abundance of each taxon over "a certain period" in the prior year/pulse
# default(mode==0): last day; dispersal(mode==1): whole previous year (NOTE: the column index)
if mode == 0:
index_l = (pulse+1)*self.cycle - 1
index_u = (pulse+1)*self.cycle + 1
cum_abundance = output.MicrobesSeries_repop.iloc[:,index_l:index_u].sum(axis=1)
else:
index_l = pulse*self.cycle + 1
index_u = (pulse+1)*self.cycle + 1
cum_abundance = output.MicrobesSeries_repop.iloc[:,index_l:index_u].sum(axis=1)
# account for the cell mass size difference of bacteria vs fungi
cum_abundance[self.fb[0:self.n_taxa]==1] *= self.max_size_b/self.max_size_f
# calculate frequency of every taxon
frequencies = cum_abundance/cum_abundance.sum()
frequencies = frequencies.fillna(0)
# last: assign microbes to each grid box randomly based on prior densities
choose_taxa = np.zeros((self.n_taxa,self.gridsize), dtype='int8')
for i in range(self.n_taxa):
choose_taxa[i,:] = np.random.choice([1,0], self.gridsize, replace=True, p=[frequencies[i], 1-frequencies[i]])
self.Microbes.loc[ | np.ravel(choose_taxa,order='F') | numpy.ravel |
import unittest
import numpy as np
import nglpy_cuda as ngl
# import objgraph
import os
f32 = np.float32
i32 = np.int32
def create_edge_set(edge_matrix):
""" A helper method for putting the output edge_matrices into a
format that is easy to test with the ground truth.
"""
edge_set = set()
for i, row in enumerate(edge_matrix):
for j in row:
if j != -1:
edge_set.add((min(i, j), max(i, j)))
return edge_set
class TestAPI(unittest.TestCase):
""" Class for testing the direct to CUDA API
"""
def setup(self):
"""
Setup repeatable test case with a known ground truth
"""
# User-editable variables
dir_path = os.path.dirname(os.path.realpath(__file__))
X = np.loadtxt(os.path.join(dir_path, 'data', 'points.txt'))
self.X = np.array(X, dtype=f32)
edges = np.loadtxt(os.path.join(dir_path, 'data', 'edges.txt'))
self.edges = | np.array(edges, dtype=i32) | numpy.array |
# Copyright (c) 2020 Cognitive & Perceptual Developmental Lab
# Washington University School of Medicine
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
# SOFTWARE.
# Primary Contact: <NAME>
# Email: <EMAIL>
# Secondary Contact: <NAME>
# Email: <EMAIL>
# The following functions have been vetted:
# percentThresh: (AT checked 05/11/2020)
# multipleRegressionShuffle: (MT checked)
# univariatePoissonScreen (MT checked 10/09/2020) - Max error = 9.96416e-07
# univariatePoissonScreenShuffle (MT checked 10/09/2020) - Max error = 9.96416e-07
# --------------------------------- #
# Import necessary built-in functions
import math
import numpy as np
from copy import copy
# --------------------------------- #
# Linear Regression (from Linear Models package - entire code not yet available to the public)
def multipleRegressionShuffle(nshuffles, Y, X, Fc):
if nshuffles > 0:
YS = np.array([Y, ]* (nshuffles + 1) ).transpose()
for i in range(1, nshuffles + 1):
np.random.shuffle(YS[:, i])
else:
YS = Y
npairs, nobs = Fc.shape
if X is not None:
df = nobs - 2 - X.shape[1]
X = np.hstack((np.ones((nobs, 1)),X))
else:
df = nobs - 2
X = np.ones((nobs, 1))
XpX = X.T.dot(X)
A = np.linalg.inv(XpX)
M = A.dot(X.T)
Q = np.sum(Fc**2, axis=1)
Z = (YS - X.dot(M.dot(YS)))
sse0 = np.sum(Z ** 2, axis=0)
FX = Fc.dot(X)
FM = Fc.dot(M.T)
K = (Q - np.sum(FX*FM, axis=1)).reshape(-1,1)
G = Fc.dot(Z)
B = G/K
S = np.sqrt((sse0 - (G*B))/(df*K))
B /= S
return B.T
# --------------------------------- #
# Poisson (from GLM package - entire code not yet available to the public)
def univariatePoissonScreen(Y, Fc):
convtol = 1e-6
npairs, nobs = Fc.shape
sumY = np.sum(Y)
mu0 = sumY / nobs
b0 = np.log(mu0)
LL0 = (b0 - 1.) * sumY
D0 = Y - mu0
chi2 = np.zeros(npairs)
beta = np.zeros(npairs)
std = np.zeros(npairs)
B = np.zeros(2)
mu = np.zeros(nobs, dtype=float)
Z1 = np.hstack((np.ones((nobs, 1)), np.zeros((nobs, 1))))
Z2 = np.hstack((np.ones((nobs, 1)), np.zeros((nobs, 1)), np.zeros((nobs, 1))))
for pair in range(0, npairs):
Z1[:, 1] = Fc[pair, :]
Z2[:, 1] = Fc[pair, :]
Z2[:, 2] = Fc[pair, :]**2
B[0] = b0
B[1] = 0
dLdb0, dLdb1 = D0.dot(Z1)
d2Ld00, d2Ld10, d2Ld11 = mu0 * np.sum(Z2, axis=0)
for iter in range(1, 11):
det = d2Ld00 * d2Ld11 - d2Ld10 ** 2
H00 = d2Ld11 / det
H10 = -d2Ld10 / det
H11 = d2Ld00 / det
delta0 = H00 * dLdb0 + H10 * dLdb1
delta1 = H10 * dLdb0 + H11 * dLdb1
if math.sqrt(delta0 ** 2 + delta1 ** 2) < convtol:
break
B[0] += delta0
B[1] += delta1
xb = Z1.dot(B)
np.exp(xb, out=mu)
D = Y - mu
dLdb0, dLdb1 = D.dot(Z1)
d2Ld00, d2Ld10, d2Ld11 = mu.dot(Z2)
chi2[pair] = Y.dot(xb) - np.sum(mu)
beta[pair] = B[1]
std[pair] = math.sqrt(H11)
chi2 = 2*(chi2 - LL0)
return beta, beta/std, chi2
def univariatePoissonScreenShuffle(nshuffles, Y, Fc):
npairs, nobs = Fc.shape
beta = np.zeros((nshuffles+1, npairs), dtype=float)
betastd = np.zeros((nshuffles+1, npairs), dtype=float)
chi2 = np.zeros((nshuffles + 1, npairs), dtype=float)
YS = copy(Y)
for i in range(0, nshuffles + 1):
beta[i,:], betastd[i, :], chi2[i,:] = univariatePoissonScreen(YS, Fc)
| np.random.shuffle(YS) | numpy.random.shuffle |
import numpy as np
import ReactionNetworkClass as rxn
import tensorflow as tf
import itertools
from scipy.integrate import odeint
class independent_birth_death(rxn.ReactionNetworkDefinition):
"""independent birth death network"""
def __init__(self, num_species):
num_reactions = 2 * num_species
species_labels = ["X%d" % i for i in range(num_species)]
output_species_labels = [species_labels[-1]]
reactant_matrix = np.zeros([num_reactions, num_species], dtype=int)
product_matrix = np.zeros([num_reactions, num_species], dtype=int)
# 1. Birth of all the species
for i in np.arange(num_species):
product_matrix[i, i] = 1
# 2. degradation of all the species
for i in np.arange(num_species):
reactant_matrix[num_species + i, i] = 1
# define parameters
parameter_dict = {'production rate': 10, 'degradation rate': 1}
reaction_dict = {}
for i in np.arange(num_species):
reaction_dict[i] = ['mass action', 'production rate']
for i in np.arange(num_species):
reaction_dict[i + num_species] = ['mass action', 'degradation rate']
super(independent_birth_death, self).__init__(num_species, num_reactions, reactant_matrix,
product_matrix, parameter_dict, reaction_dict,
species_labels, output_species_labels)
self.set_propensity_vector()
self.set_propensity_sensitivity_matrix()
self.output_function_size = 2
self.initial_state = np.zeros(self.num_species)
# define output function
def output_function(self, state):
output_list = [state[:, i] for i in self.output_species_indices]
output_list_second_moment = [state[:, i] ** 2 for i in self.output_species_indices]
output_list_cross_moments = [state[:, subset[0]] * state[:, subset[1]] for subset
in itertools.combinations(self.output_species_indices, 2)]
for elem in output_list_second_moment + output_list_cross_moments:
output_list.append(elem)
return tf.stack(output_list, axis=1)
# here we compute the exact outputs and their sensitivities for this example
def moment_eqn_sens(self, y, t):
dydt = np.zeros(np.shape(y))
k = self.parameter_dict['production rate']
g = self.parameter_dict['degradation rate']
dydt[0] = k - g * y[0]
dydt[1] = -2 * g * y[1] + (2 * k + g) * y[0] + k
dydt_sens = np.zeros([len(self.parameter_dict.keys()), self.output_function_size])
y_sens = np.reshape(y[self.output_function_size:], np.shape(dydt_sens), order='C')
dydt_sens[0, 0] = 1 - g * y_sens[0, 0]
dydt_sens[1, 0] = - y[0] - g * y_sens[1, 0]
dydt_sens[0, 1] = - 2 * g * y_sens[0, 1] + 2 * y[0] + 2 * k * y_sens[0, 0] + 1
dydt_sens[1, 1] = -2 * y[1] - 2 * g * y_sens[1, 1] + y[0] + (2 * k + g) * y_sens[1, 0]
dydt[self.output_function_size:] = np.ndarray.flatten(dydt_sens, order='C')
return dydt
def exact_values(self, finaltime):
y0 = np.zeros([self.output_function_size + self.output_function_size * len(self.parameter_dict.keys())])
t = np.linspace(0, finaltime, 101)
# solve the moment equations
sol = odeint(self.moment_eqn_sens, y0, t)
exact_sens = sol[-1, :]
exact_function_vals = exact_sens[:self.output_function_size]
exact_sens_vals = np.reshape(exact_sens[self.output_function_size:], [len(self.parameter_dict.keys()),
self.output_function_size])
return exact_function_vals, exact_sens_vals
class linear_signalling_cascade(rxn.ReactionNetworkDefinition):
"""linear signalling cascade network"""
def __init__(self, num_species):
num_reactions = 2 * num_species
species_labels = ["X%d" % i for i in range(num_species)]
output_species_labels = [species_labels[-1]]
reactant_matrix = np.zeros([num_reactions, num_species], dtype=int)
product_matrix = np.zeros([num_reactions, num_species], dtype=int)
# 1. Constitutive production of the first species
product_matrix[0, 0] = 1
# 2. Catalytic production of the other species
for i in np.arange(num_species - 1):
reactant_matrix[i + 1, i] = 1
product_matrix[i + 1, i] = 1
product_matrix[i + 1, i + 1] = 1
# 3. Dilution of all the species
for i in np.arange(num_species):
reactant_matrix[num_species + i, i] = 1
# define parameters
parameter_dict = {'base production rate': 10.0, 'translation rate': 5.0, 'dilution rate': 1.0}
reaction_dict = {0: ['mass action', 'base production rate']}
for i in np.arange(num_species - 1):
reaction_dict[i + 1] = ['mass action', 'translation rate']
for i in np.arange(num_species):
reaction_dict[i + num_species] = ['mass action', 'dilution rate']
super(linear_signalling_cascade, self).__init__(num_species, num_reactions, reactant_matrix,
product_matrix, parameter_dict, reaction_dict,
species_labels, output_species_labels)
self.initial_state = np.zeros(self.num_species)
self.set_propensity_vector()
self.set_propensity_sensitivity_matrix()
self.output_function_size = 2
# define output function
def output_function(self, state):
output_list = [state[:, i] for i in self.output_species_indices]
output_list_second_moment = [state[:, i] ** 2 for i in self.output_species_indices]
output_list_cross_moments = [state[:, subset[0]] * state[:, subset[1]] for subset
in itertools.combinations(self.output_species_indices, 2)]
for elem in output_list_second_moment + output_list_cross_moments:
output_list.append(elem)
return tf.stack(output_list, axis=1)
# here we compute the exact outputs and their sensitivities for this example
def moment_eqn_sens(self, y, t):
dydt = np.zeros(np.shape(y))
beta = self.parameter_dict['base production rate']
k = self.parameter_dict['translation rate']
g = self.parameter_dict['dilution rate']
n = self.num_species
num_params = 3
W = np.zeros([2 * n, n], dtype=float)
w_0 = np.zeros(2 * n, dtype=float)
w_0[0] = beta
W[0:n, :] = k * np.diag(np.ones(n - 1), -1)
W[n: 2 * n, :] = g * np.diag(np.ones(n))
A = np.matmul(np.transpose(self.stoichiometry_matrix), W)
b = np.matmul(np.transpose(self.stoichiometry_matrix), w_0)
dydt[0:n] = np.matmul(A, y[0:n]) + b
Sigma = np.reshape(y[n:n * (n + 1)], [n, n], order='C')
dsigma_dt = np.matmul(A, Sigma) + np.matmul(Sigma, np.transpose(A))
dsigma_dt += np.matmul(np.matmul(np.transpose(self.stoichiometry_matrix), np.diag(np.matmul(W, y[0:n]) + w_0)),
self.stoichiometry_matrix)
dydt[n:n * (n + 1)] = np.ndarray.flatten(dsigma_dt, order='C')
W_sens = np.zeros([num_params, 2 * n, n], dtype=float)
A_sens = np.zeros([num_params, n, n], dtype=float)
w_0_sens = np.zeros([num_params, 2 * n], dtype=float)
b_sens = np.zeros([num_params, n], dtype=float)
temp_dydt = np.zeros([num_params, n], dtype=float)
temp2_dydt = np.zeros([num_params, n, n], dtype=float)
# der w.r.t. beta
w_0_sens[0, 0] = 1
# der w.r.t. k
W_sens[1, 0:n, :] = np.diag(np.ones(n - 1), -1)
# der w.r.t. gamma
W_sens[2, n:2 * n, :] = np.diag(np.ones(n))
y_sens = np.reshape(y[n * (n + 1):n * (n + 1) + num_params * n], [num_params, n], order='C')
Sigma_sens = np.reshape(y[n * (n + 1) + num_params * n:], [num_params, n, n], order='C')
for i in np.arange(num_params):
A_sens[i, :, :] = np.matmul(np.transpose(self.stoichiometry_matrix), W_sens[i, :, :])
b_sens[i, :] = np.matmul(np.transpose(self.stoichiometry_matrix), w_0_sens[i, :])
temp_dydt[i, :] = np.matmul(A_sens[i, :, :], y[0:n]) + np.matmul(A, y_sens[i, :]) + b_sens[i, :]
temp2_dydt[i, :, :] = np.matmul(A_sens[i, :, :], Sigma) + np.matmul(A, Sigma_sens[i, :, :]) \
+ np.matmul(Sigma, np.transpose(A_sens[i, :, :])) + np.matmul(Sigma_sens[i, :, :],
np.transpose(A))
temp2_dydt[i, :, :] += np.matmul(np.matmul(np.transpose(self.stoichiometry_matrix),
np.diag(np.matmul(W_sens[i, :, :], y[0: n])
+ np.matmul(W, y_sens[i, :]) + w_0_sens[i, :])),
self.stoichiometry_matrix)
dydt[n * (n + 1):n * (n + 1) + num_params * n] = np.ndarray.flatten(temp_dydt, order='C')
dydt[n * (n + 1) + num_params * n:] = np.ndarray.flatten(temp2_dydt, order='C')
return dydt
def exact_values(self, finaltime):
n = self.num_species
num_params = 3
y0 = np.zeros([n * (n + 1) + num_params * n * (n + 1)])
t = np.linspace(0, finaltime, 1001)
# solve the moment equations
sol = odeint(self.moment_eqn_sens, y0, t)
exact_vals = sol[-1, :]
Sigma = np.reshape(exact_vals[n:n * (n + 1)], [n, n], order='C')
y_sens = np.reshape(exact_vals[n * (n + 1):n * (n + 1) + num_params * n], [num_params, n], order='C')
Sigma_sens = np.reshape(exact_vals[n * (n + 1) + num_params * n:], [num_params, n, n], order='C')
exact_function_vals = np.array([exact_vals[n - 1], Sigma[n - 1, n - 1] + exact_vals[n - 1] ** 2])
exact_sens_vals = np.zeros([num_params, self.output_function_size])
exact_sens_vals[:, 0] = y_sens[:, n - 1]
exact_sens_vals[:, 1] = Sigma_sens[:, n - 1, n - 1] + 2 * exact_vals[n - 1] * exact_sens_vals[:, 0]
return exact_function_vals, exact_sens_vals
class nonlinear_signalling_cascade(rxn.ReactionNetworkDefinition):
"""nonlinear_signalling_cascade network"""
def __init__(self, num_species):
num_reactions = 2 * num_species
species_labels = ["X%d" % i for i in range(num_species)]
output_species_labels = [species_labels[-1]]
reactant_matrix = np.zeros([num_reactions, num_species], dtype=int)
product_matrix = | np.zeros([num_reactions, num_species], dtype=int) | numpy.zeros |
"""
A module for generic classification purpose.
Funtionality include:
normalize_l2norm: Normalize each row has unit l_2 norm.
normalize_mean0std1: Normalize each feature to have mean 0 and std 1.
balance_sample_size: Balance sample size of a data set among classes.
change_class_labels: Change class labels to {0,1,2,3,...,C-1}.
change_class_labels_to_given: Change original class labels to a given labels.
merge_class_labels: Merge class labels into several super groups/classes.
take_some_classes: Only take sevaral classes, and remove the rest.
partition_train_valid_test: Partition the whole data into training, validation, and test sets.
reduce_sample_size: Reduce sample by to 1/times.
perform: Compute the classification performance.
write_feature_weight: Write the input layer weights to a file.
Only applicable to deep feature selection.
write_feature_weight2: Write the input layer weights and other information to a file.
Only applicable to deep feature selection.
<NAME>
CMMT, UBC, Vancouver
Sep 23, 2014
Contact: <EMAIL>
"""
from __future__ import division
import numpy as np
from sklearn import cross_validation
import math
def normalize_l2norm(data):
"""
Normalize each row has unit l_2 norm.
INPUTS:
data: numpy 2d array or matrix, each row should be a sample.
OUTPUTS:
data: numpy 2d array or matrix, normalized data.
Example:
data=[[3,5,7,9],[3.0,2,1.1,8.4],[5.9,9,8,10]]
data=np.array(data)
data_normalized=normalize_l2norm(data)
print data_normalized
"""
data_sqrt=np.sqrt(np.square(data).sum(axis=1))
data_sqrt.shape=(data_sqrt.shape[0],1)
tol=2**-30
data=(data+tol)/(data_sqrt+tol)
return data
def normalize_mean0std1(data,data_mean=None,data_std=None):
"""
Normalize each feature to have mean 0 and std 1.
INPUTS:
data: numpy 2d array or matrix, each row should be a sample.
data_mean: numpy 1d array or vector, the given means of samples, useful for normalize test data.
data_std: numpy 1d array or vector, the given standard deviation of samples, useful for normalize test data.
OUTPUTS:
data: numpy 2d array or matrix, normalized data.
data_mean: numpy 1d array or vector, the given means of samples, useful for normalize test data.
data_std: numpy 1d array or vector, the given standard deviation of samples, useful for normalize test data.
"""
if data_mean is None:
data_mean=np.mean(data,axis=0)
data_mean.reshape((1,data_mean.shape[0]))
if data_std is None:
data_std=np.std(data,axis=0)
data_std.reshape((1,data_std.shape[0]))
tol=1e-16
return (data-data_mean)/(data_std+tol),data_mean,data_std
def balance_sample_size(data,classes,others=None,min_size_given=None,rng=np.random.RandomState(100)):
"""
Balance sample size of a data set among classes.
INPUTS:
data: numpy 2d array or matrix, each row should be a sample.
classes: numpy 1d array or vector, class labels.
others: numpy 2d array or matrix, extra information of samples if available,
each row should associated to a row of data.
min_size_given: int, the size of each class wanted.
rng: numpy random state.
OUTPUTS:
data: numpy 2d array or matrix, each row should be a sample, balanced data.
classes: numpy 1d array or vector, balanced class labels.
others: numpy 2d array or matrix, balanced other information.
Example:
data=[[1,1,1],[2,2,2],[3,3,3],[4,4,4],[5,5,5],[6,6,6],[7,7,7]]
data=np.array(data)
classes=np.array(['zz','xx','xx','yy','zz','yy','xx'])
balance_sample_size(data,classes)
"""
u, indices = np.unique(classes,return_inverse=True)
indices=np.asarray(indices)
num_u=len(u)
sample_sizes=[]
# get sample size of each class
for i in xrange(num_u):
sample_size_this=np.sum(indices==i)
sample_sizes.append(sample_size_this)
size_min=np.amin(sample_sizes) # smallest sample size
if min_size_given and size_min>min_size_given:
size_min=min_size_given
indices_all=np.array([],dtype=indices.dtype)
indices_range=np.array(range(len(indices)))
for i in xrange(num_u):
ind_this_num=indices_range[indices==i]
ind_this_reduced=ind_this_num[rng.choice(len(ind_this_num),size=size_min,replace=False)]
indices_all=np.append(indices_all,ind_this_reduced)
# reduce the data
data=data[indices_all]
classes=classes[indices_all]
if others:
others=others[indices_all]
return data,classes,others
def change_class_labels(classes):
"""
Change class labels to {0,1,2,3,...,C-1}.
INPUTS:
classes: numpy 1d array or vector, the original class labels.
OUTPUTS:
u: numpy 1d array or vector, the unique class labels of the original class labels.
indices: numpy 1d array or vector, the new class labels from {0,1,2,3,...,C-1}.
Example:
classes=['c2','c3','c2','c1','c2','c1','c3','c2']
change_class_labels(classes)
<NAME>, in UBC
Aug 22, 2014.
"""
u,indices=np.unique(classes,return_inverse=True)
return u,indices
def change_class_labels_to_given(classes,given):
"""
Change original class labels to a given labels.
INPUTS:
classes: numpy 1 d array or vector, the original class labels.
given: dic, pairs of old and new labels.
OUTPUTS:
classes_new: numpy 1 d array or vector, changed class labels.
Example:
classes=[1,2,0,0,2,1,1,2]
given={1:"class1", 2:"class2", 0:"class0"}
change_class_labels_to_given(classes,given)
"""
classes=np.asarray(classes)
classes_new=np.zeros(classes.shape,dtype=object)
for i in given:
classes_new[classes==i]=given[i]
return classes_new
def merge_class_labels(classes,group):
"""
Merge class labels into several super groups/classes.
INPUTS:
classes: numpy 1 d array or vector, the original class labels.
group: tuple of tuples or lists,
group[i] indicates which original classes to be merged to the i-th super class.
OUTPUTS:
classes_merged: numpy 1 d array or vector, the merged class labels.
If original labels are strings, they are concatenated by "+".
If original lables are numbers, they are renumbered starting from 0.
Example
classes=[0,3,4,2,1,3,3,2,4,1,1,0,0,1,2,3,4,1]
group=([0],[1,2],[3,4])
merge_class_labels(classes,group)
classes=['c2','c1','c0','c0','c1','c2','c1']
group=(['c0'],['c1','c2'])
merge_class_labels(classes,group)
"""
classes=np.asarray(classes)
if (classes.dtype != int) and (classes.dtype != 'int64') and (classes.dtype != 'int32'):
classes_merged=np.zeros(classes.shape,dtype=object)
for subgroup in group:
subgroup_label='+'.join(subgroup)
for member in subgroup:
classes_merged[classes==member]=subgroup_label
else: # int class labels
classes_merged=np.zeros(classes.shape,dtype=int)
for i in range(len(group)):
subgroup=group[i]
for member in subgroup:
classes_merged[classes==member]=i
return classes_merged
def take_some_classes(data,classes,given):
"""
Only take sevaral classes, and remove the rest.
INPUTS:
data: numpy 2d array or matrix, each row is a sample, the original data.
classes: numpy 1d array or vector, class labels, the original labels.
given: numpy 1d array or vector, indicates which classes to be taken.
OUTPUTS:
data: numpy 2d array or matrix, each row is a sample, the taken data.
classes: numpy 1d array or vector, class labels, the taken labels.
"""
classes=np.asarray(classes)
log_ind=np.zeros(classes.shape,dtype=bool)
for i in range(len(given)):
log_ind[classes==given[i]]=True
classes=classes[log_ind]
data=data[log_ind]
return data,classes
def partition_train_valid_test(data,classes,ratio=(1,1,1)):
"""
Partition the whole data into training, validation, and test sets.
INPUTS:
data: numpy 2d array or matrix, each row is a sample, the original data.
classes: numpy 1d array or vector, class labels, the original labels.
ratio, int tuple or list of length 3, (ratio_of_train_set,ratio_of_valid_set,ratio_test_set).
OUTPUTS:
train_set_x: data of training set.
train_set_y: class labels of training set.
valid_set_x: data of validation set.
valid_set_y: class labels of validation set.
test_set_x: data of test set.
test_set_y: class labels of test set.
Example:
data=np.random.random((20,3))
classes=np.array([0,2,2,2,0,0,1,1,0,0,0,2,2,2,0,0,1,1,0,0],dtype=int)
train_set_x,train_set_y,valid_set_x,valid_set_y,test_set_x,test_set_y \
=partition_train_valid_test(data,classes,ratio=(2,1,1))
<NAME>, in UBC.
August 22, 2014.
"""
k=sum(ratio) # ratio must be a vector of integers
skf = cross_validation.StratifiedKFold(classes, n_folds=k)
train_ind=np.array([],dtype=int)
valid_ind=np.array([],dtype=int)
test_ind=np.array([],dtype=int)
count=0
for (tr,te) in skf:
if count<ratio[0]:
train_ind=np.append(train_ind,te)
count=count+1
continue
if count>=ratio[0] and count <ratio[0]+ratio[1]:
valid_ind=np.append(valid_ind,[te])
count=count+1
continue
if count>=ratio[0]+ratio[1]:
test_ind=np.append(test_ind,[te])
count=count+1
continue
train_set_x=data[train_ind]
train_set_y=classes[train_ind]
valid_set_x=data[valid_ind]
valid_set_y=classes[valid_ind]
test_set_x=data[test_ind]
test_set_y=classes[test_ind]
return train_set_x,train_set_y,valid_set_x,valid_set_y,test_set_x,test_set_y
def perform(y,y_predicted,unique_classes):
"""
Compute the classification performance.
INPUTS:
y: numpy 1d array or vector, the actual class labels.
y_predicted: numpy 1d array or vector, the predicted class labels.
unique_classes: numpy 1d array or vector of length C (# classes), all unique actual class labels.
OUTPUTS:
perf: numpy 1d array or vector of length C+2,
[acc_0, acc_1, acc_{C-1}, accuracy, balanced accuracy].
confusion_matrix: numpy 2d array of size C X C, confusion matrix.
Example:
y=[0,0,1,1,1,2,2,2,2]
y_predicted=[0,1,1,1,2,2,2,0,1]
perform(y,y_predicted,[0,1,2])
<NAME>, in UBC.
August 23, 2014.
"""
y=np.asarray(y,dtype=int)
y_predicted=np.asarray(y_predicted,dtype=int)
numcl=len(unique_classes)
confusion_matrix=np.zeros((numcl,numcl),dtype=float)
for i in xrange(len(y)):
confusion_matrix[y[i],y_predicted[i]]=confusion_matrix[y[i],y_predicted[i]]+1
perf=np.zeros((numcl+2,)) # acc_0,acc_1,...,acc_C-1, acc, BACC
perf[0:numcl]=confusion_matrix.diagonal()/confusion_matrix.sum(axis=1)
perf[numcl]=confusion_matrix.diagonal().sum()/confusion_matrix.sum(axis=1).sum()
perf[numcl+1]= | np.mean(perf[0:numcl]) | numpy.mean |
import os
import sys
import numpy as np
from pycocotools import mask as maskUtils
import imgaug
import skimage
from matplotlib import pyplot as plt
import cv2
import time
from pycocotools.cocoeval import COCOeval
from pycocotools.coco import COCO
ROOT_DIR = os.path.abspath("../")
# Import Mask RCNN
sys.path.append(ROOT_DIR)
print(ROOT_DIR)
COCO_MODEL_PATH = os.path.join(ROOT_DIR, "mask_rcnn_coco.h5")
from mrcnn.config import Config
from mrcnn import model as modellib, utils
from mrcnn import visualize
from angiodataset import AngioDataset
import json
import tensorflow as tf
gpus = tf.config.experimental.list_physical_devices('GPU')
if gpus:
try:
# Currently, memory growth needs to be the same across GPUs
for gpu in gpus:
tf.config.experimental.set_memory_growth(gpu, True)
logical_gpus = tf.config.experimental.list_logical_devices('GPU')
print(len(gpus), "Physical GPUs,", len(logical_gpus), "Logical GPUs")
except RuntimeError as e:
# Memory growth must be set before GPUs have been initialized
print(e)
"""Arrange resutls to match COCO specs in http://cocodataset.org/#format
"""
def build_coco_results(dataset, image_ids, rois, class_ids, scores, masks):
# If no results, return an empty list
if rois is None:
return []
results = []
for image_id in image_ids:
# Loop through detections
for i in range(rois.shape[0]):
class_id = class_ids[i]
score = scores[i]
bbox = | np.around(rois[i], 1) | numpy.around |
import multiprocessing
from py_wake.examples.data.hornsrev1 import Hornsrev1Site, wt_x, wt_y
from py_wake import IEA37SimpleBastankhahGaussian
from py_wake.tests.check_speed import timeit
import numpy as np
from py_wake.tests import npt
from py_wake.wind_turbines import WindTurbines
from py_wake.examples.data import wtg_path
import pytest
def get_wfm(grad=True):
wt = WindTurbines.from_WAsP_wtg(wtg_path + "Vestas-V80.wtg", )
site = Hornsrev1Site()
return IEA37SimpleBastankhahGaussian(site, wt)
wd_lst = np.arange(0, 360, 180)
def aep_wd(args):
x, y, wd = args
return get_wfm()(x, y, wd=wd, ws=None).aep().sum()
def aep_all_multiprocessing(pool, x, y):
return np.sum(pool.map(aep_wd, [(x, y, i) for i in wd_lst]))
def aep_wfm_xy(args):
wfm, x, y = args
return wfm(x, y, wd=wd_lst).aep().sum()
def aep_xy(args):
x, y = args
return get_wfm()(x, y, wd=wd_lst).aep().sum()
@pytest.fixture(scope='module')
def pool():
return multiprocessing.Pool(2)
debug = False
def test_multiprocessing_wd(pool):
# compare result of vectorized call 12wd with result of multiprocessing 1wd/cpu
# Slow down is expected
aep1, t_lst1 = timeit(aep_wd, min_runs=1)((wt_x, wt_y, wd_lst))
aep2, t_lst2 = timeit(aep_all_multiprocessing, min_runs=1)(pool, wt_x, wt_y)
t1, t2 = np.mean(t_lst1), np.mean(t_lst2)
if debug:
print("1 CPU, 12wd/CPU: %.2fs, %d CPUs, 1wd/CPU: %.2fs, speedup: %d%%" %
(t1, pool._processes, t2, (t1 - t2) / t1 * 100))
npt.assert_almost_equal(aep1, aep2 / len(wd_lst))
def test_multiprocessing_wfm_xy():
pool = multiprocessing.Pool(2)
arg_lst = [(get_wfm(grad=False), np.array(wt_x) + i, wt_y) for i in range(4)]
aep1, t_lst1 = timeit(lambda arg_lst: [aep_wfm_xy(arg) for arg in arg_lst])(arg_lst)
aep2, t_lst2 = timeit(lambda arg_lst: pool.map(aep_wfm_xy, arg_lst))(arg_lst)
t1, t2 = | np.mean(t_lst1) | numpy.mean |
import functools
import numpy
import numpy.testing
import pytest
import six.moves
import skimage.util
import tests.modules
import cellprofiler_core.image
import cellprofiler_core.measurement
import cellprofiler_core.module
import cellprofiler.modules.imagemath
import cellprofiler_core.object
import cellprofiler_core.pipeline
import cellprofiler_core.preferences
import cellprofiler_core.workspace
cellprofiler_core.preferences.set_headless()
MEASUREMENT_NAME = "mymeasurement"
@pytest.fixture(scope="function")
def module():
return cellprofiler.modules.imagemath.ImageMath()
@pytest.fixture(scope="function")
def workspace(image_a, image_b, module):
image_set_list = cellprofiler_core.image.ImageSetList()
image_set = image_set_list.get_image_set(0)
workspace = cellprofiler_core.workspace.Workspace(
image_set=image_set,
image_set_list=image_set_list,
module=module,
pipeline=cellprofiler_core.pipeline.Pipeline(),
measurements=cellprofiler_core.measurement.Measurements(),
object_set=cellprofiler_core.object.ObjectSet(),
)
workspace.image_set.add("input_a", image_a)
workspace.image_set.add("input_b", image_b)
module.images[0].image_name.value = "input_a"
module.images[0].factor.value = 1.0
module.images[1].image_name.value = "input_b"
module.images[1].factor.value = 1.0
module.truncate_low.value = False
module.truncate_high.value = False
module.output_image_name.value = "output"
return workspace
def run_operation(operation, expected, module, workspace):
module.operation.value = operation
module.replace_nan.value = False
module.run(workspace)
output = workspace.image_set.get_image("output")
actual = output.pixel_data
numpy.testing.assert_array_equal(actual, expected)
class TestVolumes(object):
@staticmethod
@pytest.fixture(scope="function")
def image_a():
k, i, j = numpy.mgrid[-5:6, -5:6, -5:10]
data_a = numpy.zeros((11, 11, 15))
data_a[k ** 2 + i ** 2 + j ** 2 <= 25] = 1
image_a = cellprofiler_core.image.Image()
image_a.pixel_data = data_a
image_a.dimensions = 3
return image_a
@staticmethod
@pytest.fixture(scope="function")
def image_b():
k, i, j = numpy.mgrid[-5:6, -5:6, -10:5]
data_b = numpy.zeros((11, 11, 15))
data_b[k ** 2 + i ** 2 + j ** 2 <= 25] = 0.5
image_b = cellprofiler_core.image.Image()
image_b.pixel_data = data_b
image_b.dimensions = 3
return image_b
@staticmethod
def test_add(image_a, image_b, module, workspace):
operation = "Add"
expected = image_a.pixel_data + image_b.pixel_data
run_operation(operation, expected, module, workspace)
@staticmethod
def test_subtract(image_a, image_b, module, workspace):
operation = "Subtract"
expected = image_a.pixel_data - image_b.pixel_data
run_operation(operation, expected, module, workspace)
@staticmethod
def test_absolute_difference(image_a, image_b, module, workspace):
operation = "Absolute Difference"
expected = numpy.abs(image_a.pixel_data - image_b.pixel_data)
run_operation(operation, expected, module, workspace)
@staticmethod
def test_multiply(image_a, image_b, module, workspace):
operation = "Multiply"
expected = image_a.pixel_data * image_b.pixel_data
run_operation(operation, expected, module, workspace)
@staticmethod
def test_divide(image_a, image_b, module, workspace):
operation = "Divide"
expected = image_a.pixel_data / image_b.pixel_data
run_operation(operation, expected, module, workspace)
@staticmethod
def test_average(image_a, image_b, module, workspace):
operation = "Average"
expected = (image_a.pixel_data + image_b.pixel_data) / 2.0
run_operation(operation, expected, module, workspace)
@staticmethod
def test_minimum(image_a, image_b, module, workspace):
operation = "Minimum"
expected = numpy.minimum(image_a.pixel_data, image_b.pixel_data)
run_operation(operation, expected, module, workspace)
@staticmethod
def test_maximum(image_a, image_b, module, workspace):
operation = "Maximum"
expected = numpy.maximum(image_a.pixel_data, image_b.pixel_data)
run_operation(operation, expected, module, workspace)
@staticmethod
def test_invert(image_a, module, workspace):
operation = "Invert"
expected = skimage.util.invert(image_a.pixel_data)
run_operation(operation, expected, module, workspace)
@staticmethod
def test_log_transform(image_a, module, workspace):
operation = "Log transform (base 2)"
expected = numpy.log2(image_a.pixel_data + 1)
run_operation(operation, expected, module, workspace)
@staticmethod
def test_and(image_a, image_b, module, workspace):
operation = "And"
expected = 1.0 * numpy.logical_and(image_a.pixel_data, image_b.pixel_data)
run_operation(operation, expected, module, workspace)
@staticmethod
def test_or(image_a, image_b, module, workspace):
operation = "Or"
expected = numpy.logical_or(image_a.pixel_data, image_b.pixel_data)
run_operation(operation, expected, module, workspace)
@staticmethod
def test_not(image_a, module, workspace):
operation = "Not"
expected = numpy.logical_not(image_a.pixel_data)
run_operation(operation, expected, module, workspace)
@staticmethod
def test_equals(image_a, image_b, module, workspace):
operation = "Equals"
expected = image_a.pixel_data == image_b.pixel_data
run_operation(operation, expected, module, workspace)
class TestBinaryImages(object):
@staticmethod
@pytest.fixture()
def image_a():
data_a = numpy.random.rand(128, 128) > 0.5
image_a = cellprofiler_core.image.Image()
image_a.pixel_data = data_a
image_a.dimensions = 2
return image_a
@staticmethod
@pytest.fixture()
def image_b():
data_b = numpy.random.rand(128, 128) > 0.5
image_b = cellprofiler_core.image.Image()
image_b.pixel_data = data_b
image_b.dimensions = 2
return image_b
@staticmethod
def test_add(image_a, image_b, module, workspace):
operation = "Add"
expected = numpy.logical_or(image_a.pixel_data, image_b.pixel_data)
run_operation(operation, expected, module, workspace)
@staticmethod
def test_subtract(image_a, image_b, module, workspace):
operation = "Subtract"
expected = image_a.pixel_data.copy()
expected[image_b.pixel_data] = False
run_operation(operation, expected, module, workspace)
@staticmethod
def test_absolute_difference(image_a, image_b, module, workspace):
operation = "Absolute Difference"
expected = numpy.logical_xor(image_a.pixel_data, image_b.pixel_data)
run_operation(operation, expected, module, workspace)
@staticmethod
def test_multiply(image_a, image_b, module, workspace):
operation = "Multiply"
expected = numpy.logical_and(image_a.pixel_data, image_b.pixel_data)
run_operation(operation, expected, module, workspace)
@staticmethod
def test_divide(image_a, image_b, module, workspace):
operation = "Divide"
expected = image_a.pixel_data / image_b.pixel_data
run_operation(operation, expected, module, workspace)
@staticmethod
def test_average(image_a, image_b, module, workspace):
operation = "Average"
expected = numpy.logical_or(image_a.pixel_data, image_b.pixel_data)
run_operation(operation, expected, module, workspace)
@staticmethod
def test_minimum(image_a, image_b, module, workspace):
operation = "Minimum"
expected = numpy.logical_and(image_a.pixel_data, image_b.pixel_data)
run_operation(operation, expected, module, workspace)
@staticmethod
def test_maximum(image_a, image_b, module, workspace):
operation = "Maximum"
expected = numpy.logical_or(image_a.pixel_data, image_b.pixel_data)
run_operation(operation, expected, module, workspace)
@staticmethod
def test_invert(image_a, module, workspace):
operation = "Invert"
expected = numpy.logical_not(image_a.pixel_data)
run_operation(operation, expected, module, workspace)
@staticmethod
def test_log_transform(image_a, module, workspace):
operation = "Log transform (base 2)"
expected = image_a.pixel_data
run_operation(operation, expected, module, workspace)
@staticmethod
def test_and(image_a, image_b, module, workspace):
operation = "And"
expected = numpy.logical_and(image_a.pixel_data, image_b.pixel_data)
run_operation(operation, expected, module, workspace)
@staticmethod
def test_or(image_a, image_b, module, workspace):
operation = "Or"
expected = numpy.logical_or(image_a.pixel_data, image_b.pixel_data)
run_operation(operation, expected, module, workspace)
@staticmethod
def test_not(image_a, module, workspace):
operation = "Not"
expected = numpy.logical_not(image_a.pixel_data)
run_operation(operation, expected, module, workspace)
@staticmethod
def test_equals(image_a, image_b, module, workspace):
operation = "Equals"
expected = image_a.pixel_data == image_b.pixel_data
run_operation(operation, expected, module, workspace)
def test_load_v3():
file = tests.modules.get_test_resources_directory("imagemath/v3.pipeline")
with open(file, "r") as fd:
data = fd.read()
pipeline = cellprofiler_core.pipeline.Pipeline()
def callback(caller, event):
assert not isinstance(event, cellprofiler_core.pipeline.event.LoadException)
pipeline.add_listener(callback)
pipeline.load(six.moves.StringIO(data))
module = pipeline.modules()[-1]
assert isinstance(module, cellprofiler.modules.imagemath.ImageMath)
assert module.operation == cellprofiler.modules.imagemath.O_LOG_TRANSFORM_LEGACY
assert module.exponent == 1.5
assert module.after_factor == 0.5
assert module.addend == 0.1
assert module.truncate_low
assert not module.truncate_high
assert module.ignore_mask
assert module.output_image_name == "LogTransformed"
assert (
module.images[0].image_or_measurement == cellprofiler.modules.imagemath.IM_IMAGE
)
assert module.images[0].image_name == "DNA"
assert module.images[0].factor == 1.2
assert (
module.images[1].image_or_measurement
== cellprofiler.modules.imagemath.IM_MEASUREMENT
)
assert module.images[1].measurement == "Count_Nuclei"
assert module.images[1].factor == 1.5
def test_load_v4():
file = tests.modules.get_test_resources_directory("imagemath/v4.pipeline")
with open(file, "r") as fd:
data = fd.read()
pipeline = cellprofiler_core.pipeline.Pipeline()
def callback(caller, event):
assert not isinstance(event, cellprofiler_core.pipeline.event.LoadException)
pipeline.add_listener(callback)
pipeline.load(six.moves.StringIO(data))
module = pipeline.modules()[-1]
assert isinstance(module, cellprofiler.modules.imagemath.ImageMath)
assert module.operation == cellprofiler.modules.imagemath.O_LOG_TRANSFORM
assert module.exponent == 1.5
assert module.after_factor == 0.5
assert module.addend == 0.1
assert module.truncate_low
assert not module.truncate_high
assert module.ignore_mask
assert module.output_image_name == "LogTransformed"
assert (
module.images[0].image_or_measurement == cellprofiler.modules.imagemath.IM_IMAGE
)
assert module.images[0].image_name == "DNA"
assert module.images[0].factor == 1.2
assert (
module.images[1].image_or_measurement
== cellprofiler.modules.imagemath.IM_MEASUREMENT
)
assert module.images[1].measurement == "Count_Nuclei"
assert module.images[1].factor == 1.5
def run_imagemath(images, modify_module_fn=None, measurement=None):
"""Run the ImageMath module, returning the image created
images - a list of dictionaries. The dictionary has keys:
pixel_data - image pixel data
mask - mask for image
cropping - cropping mask for image
modify_module_fn - a function of the signature, fn(module)
that allows the test to modify the module.
measurement - an image measurement value
"""
image_set_list = cellprofiler_core.image.ImageSetList()
image_set = image_set_list.get_image_set(0)
module = cellprofiler.modules.imagemath.ImageMath()
module.set_module_num(1)
for i, image in enumerate(images):
pixel_data = image["pixel_data"]
mask = image.get("mask", None)
cropping = image.get("cropping", None)
if i >= 2:
module.add_image()
name = "inputimage%s" % i
module.images[i].image_name.value = name
img = cellprofiler_core.image.Image(pixel_data, mask=mask, crop_mask=cropping)
image_set.add(name, img)
module.output_image_name.value = "outputimage"
if modify_module_fn is not None:
modify_module_fn(module)
pipeline = cellprofiler_core.pipeline.Pipeline()
pipeline.add_module(module)
measurements = cellprofiler_core.measurement.Measurements()
if measurement is not None:
measurements.add_image_measurement(MEASUREMENT_NAME, str(measurement))
workspace = cellprofiler_core.workspace.Workspace(
pipeline,
module,
image_set,
cellprofiler_core.object.ObjectSet(),
measurements,
image_set_list,
)
module.run(workspace)
return image_set.get_image("outputimage")
def check_expected(image, expected, mask=None, ignore=False):
if mask is None and not image.has_crop_mask:
numpy.testing.assert_array_almost_equal(image.pixel_data, expected)
assert not image.has_mask
elif mask is not None and ignore:
numpy.testing.assert_array_almost_equal(image.pixel_data, expected)
assert image.has_mask
elif mask is not None and not ignore:
assert image.has_mask
if not image.has_crop_mask:
assert numpy.all(mask == image.mask)
numpy.testing.assert_array_almost_equal(
image.pixel_data[image.mask], expected[image.mask]
)
def test_exponent():
"""Test exponentiation of an image"""
def fn(module):
module.exponent.value = 2
module.operation.value = cellprofiler.modules.imagemath.O_NONE
numpy.random.seed(0)
image = numpy.random.uniform(size=(10, 10)).astype(numpy.float32)
expected = image ** 2
output = run_imagemath([{"pixel_data": image}], fn)
check_expected(output, expected)
def test_factor():
"""Test multiplicative factor"""
def fn(module):
module.after_factor.value = 0.5
module.operation.value = cellprofiler.modules.imagemath.O_NONE
numpy.random.seed(0)
image = numpy.random.uniform(size=(10, 10))
expected = image * 0.5
output = run_imagemath([{"pixel_data": image}], fn)
check_expected(output, expected)
def test_addend():
"""Test adding a value to image"""
def fn(module):
module.addend.value = 0.5
module.operation.value = cellprofiler.modules.imagemath.O_NONE
numpy.random.seed(0)
image = numpy.random.uniform(size=(10, 10)) * 0.5
image = image.astype(numpy.float32)
expected = image + 0.5
output = run_imagemath([{"pixel_data": image}], fn)
check_expected(output, expected)
def test_mask():
"""Test a mask in the first image"""
def fn(module):
module.operation.value = cellprofiler.modules.imagemath.O_NONE
numpy.random.seed(0)
image = numpy.random.uniform(size=(10, 10)).astype(numpy.float32)
mask = numpy.random.uniform(size=(10, 10)) > 0.3
output = run_imagemath([{"pixel_data": image, "mask": mask}], fn)
check_expected(output, image, mask)
def test_add():
"""Test adding"""
def fn(module):
module.operation.value = cellprofiler.modules.imagemath.O_ADD
module.truncate_high.value = False
numpy.random.seed(0)
for n in range(2, 5):
images = [
{"pixel_data": numpy.random.uniform(size=(10, 10)).astype(numpy.float32)}
for i in range(n)
]
expected = functools.reduce(numpy.add, [x["pixel_data"] for x in images])
output = run_imagemath(images, fn)
check_expected(output, expected)
def test_add_mask():
"""Test adding masked images"""
"""Test adding"""
def fn(module):
module.operation.value = cellprofiler.modules.imagemath.O_ADD
module.truncate_high.value = False
numpy.random.seed(0)
for n in range(2, 5):
images = [
{
"pixel_data": numpy.random.uniform(size=(50, 50)).astype(numpy.float32),
"mask": (numpy.random.uniform(size=(50, 50)) > 0.1),
}
for i in range(n)
]
expected = functools.reduce(numpy.add, [x["pixel_data"] for x in images])
mask = functools.reduce(numpy.logical_and, [x["mask"] for x in images])
output = run_imagemath(images, fn)
check_expected(output, expected, mask)
def test_add_mask_truncate():
def fn(module):
module.operation.value = cellprofiler.modules.imagemath.O_ADD
module.truncate_high.value = True
numpy.random.seed(0)
for n in range(2, 5):
images = [
{
"pixel_data": numpy.random.uniform(size=(50, 50)).astype(numpy.float32),
"mask": (numpy.random.uniform(size=(50, 50)) > 0.1),
}
for i in range(n)
]
expected = functools.reduce(numpy.add, [x["pixel_data"] for x in images])
expected[expected > 1] = 1
mask = functools.reduce(numpy.logical_and, [x["mask"] for x in images])
output = run_imagemath(images, fn)
check_expected(output, expected, mask)
def test_add_crop():
"""Add images, cropping to border"""
def fn(module):
module.operation.value = cellprofiler.modules.imagemath.O_ADD
module.truncate_high.value = False
numpy.random.seed(0)
crop_mask = numpy.zeros((20, 20), bool)
crop_mask[5:15, 5:15] = True
for n in range(2, 3):
for m in range(n):
images = [
{
"pixel_data": | numpy.random.uniform(size=(20, 20)) | numpy.random.uniform |
# -*- coding: utf-8 -*-
"""
Interpies - a libray for the interpretation of gravity and magnetic data.
transforms.py:
Functions for applying derivatives, transforms and filters to grids.
@author: <NAME>
Geophysics Labs, 2017
"""
# Import numpy and scipy
import numpy as np
from scipy import signal
from scipy.ndimage import filters
#from scipy import interpolate
from scipy import ndimage as nd
# Import scikit-learn modules (used for the find_trend function)
from sklearn.linear_model import LinearRegression
from sklearn.preprocessing import PolynomialFeatures
from sklearn.pipeline import Pipeline
### definitions
pi = np.pi
# kernels for convolution filters
derfilt3 = np.array([-0.5, 0, 0.5], np.float32)
derfilt5 = np.array([1, -8, 0, 8, -1], np.float32)/12 # Five-point stencil vector
prewitt1d = np.array([-1, 0, 1], np.float32)/2
#===============================================================================
# miscellaneous functions
#===============================================================================
def replace_edges(data, ncells=1):
"""Replace the values at the edges of an array with the values calculated
with reflection padding. Useful to correct edge effects due to convolution
filters.
"""
return np.pad(data[ncells:-ncells, ncells:-ncells],
ncells, mode='reflect', reflect_type='odd')
def fill_nodata(data, invalid=None):
"""Replace the value of invalid 'data' cells (indicated by 'invalid')
by the value of the nearest valid data cell. Not very pretty but enough
for making sure the calculation works.
Parameters
----------
data: numpy array of any dimension
invalid: a binary array of same shape as 'data'. True cells set where data
value should be replaced.
If None (default), use: invalid = np.isnan(data)
Returns
-------
Return a filled array.
Credits
-------
http://stackoverflow.com/a/9262129
"""
if np.any(np.isnan(data)):
if invalid is None:
invalid = np.isnan(data)
ind = nd.distance_transform_edt(invalid,
return_distances=False,
return_indices=True)
return data[tuple(ind)]
else:
return data
def simple_resample(data, sampling=2):
'''
Resample grid by simply picking cells at a given sampling rate.
The starting point is the lower-left corner of grid so the location
of the grid is unchanged.
'''
return np.flipud(np.flipud(data)[::sampling, ::sampling])
def find_trend(X, data, degree=1, returnModel=False):
'''
Calculate trend in 2D data. The fit is made with a polynomial function of
chosen degree. A least-square method is used for the fit.
'''
nrows, ncols = data.shape
# get location of NaNs
mask = np.isnan(data)
# Fit data with a polynomial surface (or a plane if degree=1)
model = Pipeline([('poly', PolynomialFeatures(degree)),
('linear', LinearRegression())])
model.fit(X[~mask.flatten(), :], data[~mask])
# calculate resulting trend
trend = model.predict(X).reshape((nrows, ncols))
if returnModel:
return model
else:
return trend
def stats(data):
'''
Return a list of descriptive statistical values.
'''
mean = np.nanmean(data)
sigma = np.nanstd(data)
minimum = np.nanmin(data)
maximum = np.nanmax(data)
return (mean, sigma, minimum, maximum)
#==============================================================================
# Derivatives with Savitzky-Golay coeficients
#==============================================================================
#-------------------------------------------
# Pre-calculated Savitzky-Golay coeficients
#-------------------------------------------
# <NAME>, Microsoft Research, August 2001
#
# SavGolSize<m>Order<n>X<i>Y<j> is a filter in row-major order for one polynomial with:
# filter size m x m
# polynomial order n
# filter for coefficient of term (x^i)(y^j)
# These are grouped by size
# http://homepages.inf.ed.ac.uk/rbf/CVonline/LOCAL_COPIES/KRUMM1/SavGol.htm
# Size 2 Order 1
SavGolSize2Order1X0Y0 = np.array([0.25000000,0.25000000,
0.25000000,0.25000000]).reshape((2,2))
SavGolSize2Order1X1Y0 = np.array([-0.50000000,0.50000000,
-0.50000000,0.50000000]).reshape((2,2))
SavGolSize2Order1X0Y1 = np.array([-0.50000000,-0.50000000,
0.50000000,0.50000000]).reshape((2,2))
# Size 3 Order 1
SavGolSize3Order1X0Y0 = np.array([0.11111111,0.11111111,0.11111111,
0.11111111,0.11111111,0.11111111,
0.11111111,0.11111111,0.11111111]).reshape((3,3))
SavGolSize3Order1X1Y0 = np.array([-0.16666667,0.00000000,0.16666667,
-0.16666667,0.00000000,0.16666667,
-0.16666667,0.00000000,0.16666667]).reshape((3,3))
SavGolSize3Order1X0Y1 = np.array([-0.16666667,-0.16666667,-0.16666667,
0.00000000,0.00000000,0.00000000,
0.16666667,0.16666667,0.16666667]).reshape((3,3))
# Size 3 Order 2 ## can be used for quadratic polynomial fit
SavGolSize3Order2X0Y0 = np.array([-0.11111111,0.22222222,-0.11111111,
0.22222222,0.55555556,0.22222222,
-0.11111111,0.22222222,-0.11111111]).reshape((3,3))
SavGolSize3Order2X1Y0 = np.array([-0.16666667,0.00000000,0.16666667,
-0.16666667,0.00000000,0.16666667,
-0.16666667,0.00000000,0.16666667]).reshape((3,3))
SavGolSize3Order2X2Y0 = np.array([0.16666667,-0.33333333,0.16666667,
0.16666667,-0.33333333,0.16666667,
0.16666667,-0.33333333,0.16666667]).reshape((3,3))
SavGolSize3Order2X0Y1 = np.array([-0.16666667,-0.16666667,-0.16666667,
0.00000000,0.00000000,0.00000000,
0.16666667,0.16666667,0.16666667]).reshape((3,3))
SavGolSize3Order2X1Y1 = np.array([0.25000000,0.00000000,-0.25000000,
0.00000000,0.00000000,0.00000000,
-0.25000000,0.00000000,0.25000000]).reshape((3,3))
SavGolSize3Order2X0Y2 = np.array([0.16666667,0.16666667,0.16666667,
-0.33333333,-0.33333333,-0.33333333,
0.16666667,0.16666667,0.16666667]).reshape((3,3))
#----------------------------------------
def savgol2d(degree, window_size):
'''
Calculate coefficients of two-dimensional Savitzky-Golay filters.
Derived from https://github.com/whatasunnyday/Savitzky-Golay-Filter
Checked against Krumm's coefficients (see list above).
Parameters
----------
degree: positive integer
The degree of the polynomial that is fitted to the data points. The
greater the degree, the larger the fitting window must be.
window_size: positive odd integer
The size of the square window that is used to calculate the fitting
polynomial.
Returns
-------
coeffs : 2D array of shape (n, `window_size**2`), where n is the number of
coefficients in a polynomial of degree `degree` with 2 variables (x and y).
n is equal to (2+d)! / 2d!
Each of the n rows is a kernel of size `window_size` that can be used
to smooth 2D data (with the first one) or to calculate derivatives (with
the others).
'''
if not isinstance(degree, int) or degree < 0:
raise ValueError("Degree of polynomial must be a positive integer")
if not isinstance(window_size, int) or window_size % 2 == 0 or window_size < 0 :
raise ValueError("Window size must be a positive odd integer")
if window_size ** 2 < ((degree + 2) * (degree + 1)) / 2.0:
raise ValueError("Degree too high for window size")
# create dictionary of exponents
exps = [ {"x": k - n, "y": n } for k in range(degree + 1) for n in range(k + 1)]
# coordinates of points in window
n = np.arange(-(window_size - 1)//2, (window_size - 1)//2 + 1,
dtype = np.float64)
dx = np.tile(n, [window_size, 1]).reshape(window_size ** 2, )
dy = np.repeat(n, window_size)
# array
A = np.empty((window_size ** 2, len(exps)))
for i, exp in enumerate(exps):
A[:,i] = (dx ** exp["x"]) * (dy ** exp["y"])
return np.linalg.pinv(A)
#----------------------------------------
# Dictionary to associate types of derivative with Savitzky-Golay coeficients
# and parameters
sg_dicts = {}
sg_dicts['dx'] = {'index':1,'factor':1,'exponent':1,'flipfunc':np.fliplr}
sg_dicts['dy'] = {'index':2,'factor':-1,'exponent':1,'flipfunc':np.flipud}
sg_dicts['dx2'] = {'index':3,'factor':2,'exponent':2,'flipfunc':np.fliplr}
sg_dicts['dxdy'] = {'index':4,'factor':-1,'exponent':2,'flipfunc':lambda x: np.flipud(np.fliplr(x))}
sg_dicts['dy2'] = {'index':5,'factor':2,'exponent':2,'flipfunc':np.flipud}
def savgol_smooth(data, deg=3, win=5, doEdges=False):
'''
Smooth an array by 2D convolution with a Savitzky-Golay (SG) filter.
It works even if NaNs are present in the data.
The SG filter is controlled by two parameters, `deg` (degree) and `win` (window
size). The amount of smoothing will increase with `win` and decrease with
`deg`.
Parameters
----------
data: 2D array
Input data
deg: positive integer, default 3
The degree of the Savitzky-Golay filter. The greater the degree, the
larger the fitting window must be.
win: positive odd integer, default 5
The size of the fitting window that is used to calculate the SG
coefficients.
doEdges: boolean, default True
Replace the values at the edges of the output array with values calculated
by reflection padding. Useful to correct bad edge effects.
'''
# retrieve Savitzky-Golay coeficients and make kernel
sg_coeffs = savgol2d(deg,win)
sg_kernel = sg_coeffs[0].reshape((win,win))
# calculate filtered result by convolution
convResult = signal.convolve2d(data,sg_kernel,mode='same',
boundary='symm')
# fill edges
if doEdges:
convResult = replace_edges(convResult, (win-1)//2)
return convResult
def savgol_deriv(data, cellsize, direction='dx', deg=3, win=5, doEdges=True):
'''
Calculate horizontal derivatives by convolution with a Savitzky-Golay (SG)
filter. It works even if NaNs are present in the data.
Parameters
----------
data : 2D array
Input array
cellsize: float
Size of grid cells. Dimensions are assumed to be identical in both the
x and y directions.
direction : {'dx','dy','dx2','dxdy','dy2'}, optional
Type of derivative. Default is 'dx', first horizontal derivative in the
x direction. The x axis is "West to East", i.e. along rows of the array.
The y axis is "South to North", i.e. along columns of the array.
deg: positive integer, default 3
The degree of the Savitzky-Golay filter. The greater the degree, the
larger the fitting window must be.
win: positive odd integer, default 5
The size of the fitting window that is used to calculate the SG
coefficients.
doEdges: boolean, default True
Replace the values at the edges of the output array with values calculated
by reflection padding. Useful to correct bad edge effects.
'''
sg_dict = sg_dicts[direction]
index = sg_dict['index']
factor = sg_dict['factor']
exponent = sg_dict['exponent']
flipfunc = sg_dict['flipfunc']
# retrieve Savitzky-Golay coeficients and make kernel
sg_coeffs = savgol2d(deg, win)
sg_kernel = flipfunc(sg_coeffs[index].reshape((win, win))) # flip for convolution
# calculate derivative by convolution
convResult = factor*signal.convolve2d(data, sg_kernel, mode='same',
boundary='symm')/cellsize**exponent
# fill edges
if doEdges:
convResult = replace_edges(convResult, (win-1)//2)
return convResult
#==============================================================================
# fs_deriv - 5-Tap and 7-tap 1st and 2nd discrete derivatives
#==============================================================================
# ** Adapted from Matlab code by <NAME> **
#
# These functions compute 1st and 2nd derivatives of an image using
# coefficients given by <NAME> Simoncelli (2004). The results are significantly
# more accurate than MATLAB's GRADIENT function on edges that are at angles
# other than vertical or horizontal. This in turn improves gradient orientation
# estimation enormously. If you are after extreme accuracy try using the 7-tap
# coefficients.
#
# Reference: <NAME> and <NAME> "Differentiation of Discrete
# Multi-Dimensional Signals" IEEE Trans. Image Processing. 13(4): 496-508 (2004)
#
# Copyright (c) 2010 <NAME>
# http://www.peterkovesi.com/matlabfns/index.html
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# The Software is provided "as is", without warranty of any kind.
# April 2010
def _conv1(a, h):
return np.convolve(a, h, mode='same')
def _conv2(h1, h2, A):
'''
Performs a 1D convolution down the columns using h1 then a 1D
convolution along the rows using h2.
'''
result = np.apply_along_axis(_conv1, 0, A, h1)
result = np.apply_along_axis(_conv1, 1, result, h2)
return result
def fs_coefficients(tap=5, direction='dx'):
'''
This function returns the 5-tap or 7-tap coefficients given by Farid
and Simoncelli (2004).
'''
if tap==5:
if direction in ['dx', 'dy', 'dxdy']:
# 5-tap 1st derivative coefficients. These are optimal if you are just
# seeking the 1st deriavtives.
p = np.array([0.037659, 0.249153, 0.426375, 0.249153, 0.037659])
d1 = np.array([0.109604, 0.276691, 0.000000, -0.276691, -0.109604])
d2 = 0
elif direction in ['dx2', 'dy2', 'dxdy']:
# 5-tap 2nd derivative coefficients. The associated 1st derivative
# coefficients are not quite as optimal as the ones above but are
# consistent with the 2nd derivative interpolator p and thus are
# appropriate to use if you are after both 1st and 2nd derivatives.
p = np.array([0.030320, 0.249724, 0.439911, 0.249724, 0.030320])
d1 = np.array([0.104550, 0.292315, 0.000000, -0.292315, -0.104550])
d2 = np.array([0.232905, 0.002668, -0.471147, 0.002668, 0.232905])
elif tap==7:
# 7-tap interpolant and 1st and 2nd derivative coefficients
p = np.array([0.004711, 0.069321, 0.245410,
0.361117, 0.245410, 0.069321, 0.004711])
d1 = np.array([0.018708, 0.125376, 0.193091,
0.000000, -0.193091, -0.125376, -0.018708])
d2 = np.array([0.055336, 0.137778, -0.056554,
-0.273118, -0.056554, 0.137778, 0.055336])
else:
raise ValueError('The tap value must be either 5 or 7.')
return p, d1, d2
def fs_deriv(data, cellsize, direction='dx', tap=5):
'''
Compute 1st or 2nd derivative of an array using the method of Farid and
Simoncelli (2004).
Parameters
----------
data : 2D array
Input array
cellsize: float
Size of grid cells. Dimensions are assumed to be identical in both the
x and y directions.
direction : {'dx','dy','dx2','dxdy','dy2'}, optional
Type of derivative. Default is 'dx', first horizontal derivative in the
x direction. The x axis is "West to East", i.e. along rows of the array.
The y axis is "South to North", i.e. along columns of the array.
tap: {5, 7}, default 5
Size of the kernel that is used to calculate the derivative by
convolution.
'''
# Compute coefficients
p, d1, d2 = fs_coefficients(tap, direction)
# Compute derivatives
if direction=='dx':
result = _conv2(p,d1,data)/cellsize
elif direction=='dy':
result = -1 * _conv2(d1,p,data)/cellsize # origin is in lower left corner
elif direction=='dx2':
result = _conv2(p,d2,data)/cellsize/cellsize
elif direction=='dy2':
result = _conv2(d2,p,data)/cellsize/cellsize
elif direction=='dxdy':
result = _conv2(p,d1,data)/cellsize
result = -1 * _conv2(d1,p,result)/cellsize
return result
#==============================================================================
# Fourier functions
#==============================================================================
def getk(nx, ny, dx, dy):
'''
Given the size `nx` and `ny` of a FFT and the spacing `dx` and `dy`
of the space domain grid, this routine returns the spatial
frequency grid components `kx`, `ky` and `k = sqrt(kx.^2 + ky.^2)`
Makes use of numpy function `fftfreq`.
Returns
-------
[kx,ky,k]
'''
# Discrete Fourier Transform sample frequencies
kx = 2*np.pi*np.fft.fftfreq(nx,dx)
ky = 2*np.pi*np.fft.fftfreq(ny,dy)
# Create matrices for 2D case
kx = | np.tile(kx,(ny,1)) | numpy.tile |
import cv2
import numpy as np
def abs_sobel_thresh(img, sobel_kernel=3, orient='x', thresh_min=0, thresh_max=255):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
if (orient == 'x'):
sobel = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
else:
sobel = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
sobel_absolute = np.absolute(sobel)
scaled = np.uint8(255 * sobel_absolute / np.max(sobel_absolute))
binary = np.zeros_like(scaled)
binary[(scaled >= thresh_min) & (scaled <= thresh_max)] = 255
return binary
def mag_thresh(img, sobel_kernel=3, thresh=(0, 255)):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
sobel_x = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobel_y = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
sobel_mag = np.sqrt(np.multiply(sobel_x, sobel_x) + np.multiply(sobel_y, sobel_y))
scaled_mag = 255 * sobel_mag / np.max(sobel_mag)
binary_output = np.zeros_like(scaled_mag)
binary_output[(scaled_mag > thresh[0]) & (scaled_mag < thresh[1])] = 255
return binary_output
def dir_threshold(img, sobel_kernel=3, thresh=(0, np.pi / 2)):
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
sobel_x = np.abs(cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize=sobel_kernel))
sobel_y = np.abs(cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize=sobel_kernel))
direction = np.arctan2(sobel_y, sobel_x)
threshed_direction = np.zeros_like(direction)
threshed_direction[(direction > thresh[0]) & (direction < thresh[1])] = 255
return threshed_direction
def hls_s_threshold(img, thresh=(0, 255)):
hls = cv2.cvtColor(img, cv2.COLOR_BGR2HLS)
s_channel = hls[:, :, 2]
binary = np.zeros_like(s_channel)
binary[(s_channel > thresh[0]) & (s_channel <= thresh[1])] = 255
return binary
def hsv_s_threshold(img, thresh=(0, 255)):
hls = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
s_channel = hls[:, :, 1]
binary = np.zeros_like(s_channel)
binary[(s_channel > thresh[0]) & (s_channel <= thresh[1])] = 255
return binary
def hsv_v_threshold(img, thresh=(0, 255)):
hls = cv2.cvtColor(img, cv2.COLOR_BGR2HSV)
s_channel = hls[:, :, 2]
binary = np.zeros_like(s_channel)
binary[(s_channel > thresh[0]) & (s_channel <= thresh[1])] = 255
return binary
def perspective_transfrom(img, src_points, dst_points):
img_size = img.shape[1::-1]
transformation = cv2.getPerspectiveTransform(src_points, dst_points)
perspective_transformed = cv2.warpPerspective(img, transformation, img_size)
Minv = cv2.getPerspectiveTransform(dst_points, src_points)
# aa = cv2.warpPerspective(perspective_transformed, Minv, img_size)
# cv2.imshow('aa', aa)
# cv2.waitKey(0)
return perspective_transformed, Minv
def get_warpback_overlay_img(raw_img, fit_points, inverse_perspevtive_trans):
if fit_points[0] is None or fit_points[1] is None:
return raw_img
left_fit_x = fit_points[0]
right_fit_x = fit_points[1]
ploty = fit_points[2]
# Create an image to draw the lines on
color_warp = | np.zeros_like(raw_img) | numpy.zeros_like |
"""CorEx Hierarchical Topic Models
Use the principle of Total Cor-relation Explanation (CorEx) to construct
hierarchical topic models. This module is specially designed for sparse count
data and implements semi-supervision via the information bottleneck.
<NAME> and <NAME>. "Maximally Informative Hierarchical
Representations of High-Dimensional Data." AISTATS, 2015.
Gallagher et al. "Anchored Correlation Explanation: Topic Modeling with Minimal
Domain Knowledge." TACL, 2017.
License: Apache V2
"""
import warnings
import numpy as np # Tested with 1.8.0
from os import makedirs
from os import path
try:
from scipy.special import logsumexp
except ImportError:
from scipy.misc import logsumexp # Tested with 0.13.0
import scipy.sparse as ss
from six import string_types # For Python 2&3 compatible string checking
import joblib
class Corex(object):
"""
Anchored CorEx hierarchical topic models
Code follows sklearn naming/style (e.g. fit(X) to train)
Parameters
----------
n_hidden : int, optional, default=2
Number of hidden units.
max_iter : int, optional
Maximum number of iterations before ending.
verbose : int, optional
The verbosity level. The default, zero, means silent mode. 1 outputs TC(X;Y) as you go
2 output alpha matrix and MIs as you go.
tree : bool, default=True
In a tree model, each word can only appear in one topic. tree=False is not yet implemented.
count : string, {'binarize', 'fraction'}
Whether to treat counts (>1) by directly binarizing them, or by constructing a fractional count in [0,1].
seed : integer or numpy.RandomState, optional
A random number generator instance to define the state of the
random permutations generator. If an integer is given, it fixes the
seed. Defaults to the global numpy random number generator.
Attributes
----------
labels : array, [n_samples, n_hidden]
Label for each hidden unit for each sample.
clusters : array, [n_visible]
Cluster label for each input variable.
p_y_given_x : array, [n_samples, n_hidden]
p(y_j=1|x) for each sample.
alpha : array-like, shape [n_hidden, n_visible]
Adjacency matrix between input variables and hidden units. In range [0,1].
mis : array, [n_hidden, n_visible]
Mutual information between each (visible/observed) variable and hidden unit
tcs : array, [n_hidden]
TC(X_Gj;Y_j) for each hidden unit
tc : float
Convenience variable = Sum_j tcs[j]
tc_history : array
Shows value of TC over the course of learning. Hopefully, it is converging.
words : list of strings
Feature names that label the corresponding columns of X
References
----------
[1] <NAME> and <NAME>. "Discovering Structure in
High-Dimensional Data Through Correlation Explanation."
NIPS, 2014. arXiv preprint arXiv:1406.1222.
[2] <NAME> and <NAME>. "Maximally Informative
Hierarchical Representations of High-Dimensional Data"
AISTATS, 2015. arXiv preprint arXiv:1410.7404.
"""
def __init__(self, n_hidden=2, max_iter=200, eps=1e-5, seed=None, verbose=False, count='binarize',
tree=True, **kwargs):
self.n_hidden = n_hidden # Number of hidden factors to use (Y_1,...Y_m) in paper
self.max_iter = max_iter # Maximum number of updates to run, regardless of convergence
self.eps = eps # Change to signal convergence
self.tree = tree
np.random.seed(seed) # Set seed for deterministic results
self.verbose = verbose
self.t = 20 # Initial softness of the soft-max function for alpha (see NIPS paper [1])
self.count = count # Which strategy, if necessary, for binarizing count data
if verbose > 0:
np.set_printoptions(precision=3, suppress=True, linewidth=200)
print('corex, rep size:', n_hidden)
if verbose:
np.seterr(all='warn')
# Can change to 'raise' if you are worried to see where the errors are
# Locally, I "ignore" underflow errors in logsumexp that appear innocuous (probabilities near 0)
else:
np.seterr(all='ignore')
def label(self, p_y_given_x):
"""Maximum likelihood labels for some distribution over y's"""
return (p_y_given_x > 0.5).astype(bool)
@property
def labels(self):
"""Maximum likelihood labels for training data. Can access with self.labels (no parens needed)"""
return self.label(self.p_y_given_x)
@property
def clusters(self):
"""Return cluster labels for variables"""
return np.argmax(self.alpha, axis=0)
@property
def sign(self):
"""Return the direction of correlation, positive or negative, for each variable-latent factor."""
return np.sign(self.theta[3] - self.theta[2]).T
@property
def tc(self):
"""The total correlation explained by all the Y's.
"""
return np.sum(self.tcs)
def fit(self, X, y=None, anchors=None, anchor_strength=1, words=None, docs=None):
"""
Fit CorEx on the data X. See fit_transform.
"""
self.fit_transform(X, anchors=anchors, anchor_strength=anchor_strength, words=words, docs=docs)
return self
def fit_transform(self, X, y=None, anchors=None, anchor_strength=1, words=None, docs=None):
"""Fit CorEx on the data
Parameters
----------
X : scipy sparse CSR or a numpy matrix, shape = [n_samples, n_visible]
Count data or some other sparse binary data.
anchors : A list of variables anchor each corresponding latent factor to.
anchor_strength : How strongly to weight the anchors.
words : list of strings that label the corresponding columns of X
docs : list of strings that label the corresponding rows of X
Returns
-------
Y: array-like, shape = [n_samples, n_hidden]
Learned values for each latent factor for each sample.
Y's are sorted so that Y_1 explains most correlation, etc.
"""
X = self.preprocess(X)
self.initialize_parameters(X, words, docs)
if anchors is not None:
anchors = self.preprocess_anchors(list(anchors))
p_y_given_x = np.random.random((self.n_samples, self.n_hidden))
if anchors is not None:
for j, a in enumerate(anchors):
p_y_given_x[:, j] = 0.5 * p_y_given_x[:, j] + 0.5 * X[:, a].mean(axis=1).A1 # Assumes X is a binary matrix
for nloop in range(self.max_iter):
if nloop > 1:
for j in range(self.n_hidden):
if self.sign[j, np.argmax(self.mis[j])] < 0:
# Switch label for Y_j so that it is correlated with the top word
p_y_given_x[:, j] = 1. - p_y_given_x[:, j]
self.log_p_y = self.calculate_p_y(p_y_given_x)
self.theta = self.calculate_theta(X, p_y_given_x, self.log_p_y) # log p(x_i=1|y) nv by m by k
if nloop > 0: # Structure learning step
self.alpha = self.calculate_alpha(X, p_y_given_x, self.theta, self.log_p_y, self.tcs)
if anchors is not None:
for a in flatten(anchors):
self.alpha[:, a] = 0
for ia, a in enumerate(anchors):
self.alpha[ia, a] = anchor_strength
p_y_given_x, _, log_z = self.calculate_latent(X, self.theta)
self.update_tc(log_z) # Calculate TC and record history to check convergence
self.print_verbose()
if self.convergence():
break
if self.verbose:
print('Overall tc:', self.tc)
if anchors is None:
self.sort_and_output(X)
self.p_y_given_x, self.log_p_y_given_x, self.log_z = self.calculate_latent(X, self.theta) # Needed to output labels
self.mis = self.calculate_mis(self.theta, self.log_p_y) # / self.h_x # could normalize MIs
return self.labels
def transform(self, X, details=False):
"""
Label hidden factors for (possibly previously unseen) samples of data.
Parameters: samples of data, X, shape = [n_samples, n_visible]
Returns: , shape = [n_samples, n_hidden]
"""
X = self.preprocess(X)
p_y_given_x, _, log_z = self.calculate_latent(X, self.theta)
labels = self.label(p_y_given_x)
if details == 'surprise':
# TODO: update
# Totally experimental
n_samples = X.shape[0]
alpha = np.zeros((self.n_hidden, self.n_visible))
for i in range(self.n_visible):
alpha[np.argmax(self.alpha[:, i]), i] = 1
log_p = np.empty((2, n_samples, self.n_hidden))
c0 = | np.einsum('ji,ij->j', alpha, self.theta[0]) | numpy.einsum |
import pickle
import pandas as pd
import numpy as np
import pkg_resources
import scanpy as sc
from anndata import AnnData
import seaborn as sns
def plot_matrixplot(adata, groupby, cmap='coolwarm', ax=None):
# Get progeny data
X = np.array(adata.obsm['progeny'])
p_names = adata.obsm['progeny'].columns.tolist()
# Get group categroies
cats = adata.obs[groupby].cat.categories
# Compute mean for each group
arr = np.zeros((len(cats),X.shape[1]))
for i, cat in enumerate(cats):
msk = adata.obs[groupby] == cat
mean_group = | np.mean(X[msk,], axis=0) | numpy.mean |
"""
File: continuous.py
Author: <NAME>
Email: <EMAIL>
Github: https://github.com/ComeBertrand
Description: Classical continuous functions for performance evaluation of
metaheuristics. All theses functions were taken from the following website :
https://www.sfu.ca/~ssurjano/optimization.html
"""
import numpy as np
from ...models import Problem
from ...common.representation import RealEncoding, Boundaries
from ...common.fitness import Objective
from ...operators.neighborhood import NeighborhoodOperator, move_distance_continuous, ContinuousLogMoveRange
class ContinuousProblem(Problem):
"""Problems that are defined by a continuous function.
# TODO: Do it in a more abstract way and move it in abstract
Args:
n_dim (int): Number of dimensions.
min_vals (np.array): Minimum values for each dimension.
max_vals (np.array): Maximum values for each dimension.
move_range (MoveRange): Range of the move step.
known_min (float): Minimum of the continuous function. None means that
the minimum is not known.
"""
def __init__(self, n_dim, min_vals, max_vals, move_range, known_min):
nb_neighbors = n_dim * 100 # TODO: shall be an argument of the object
neighborhood = NeighborhoodOperator(move_distance_continuous, move_range, nb_neighbors)
boundaries = Boundaries(min_vals, max_vals, np.float)
encoding = RealEncoding(boundaries)
objective = Objective(self._eval_func)
super().__init__(objective, encoding, neighborhood=neighborhood, known_min=known_min)
def _eval_func(self, solution):
"""Actual evaluation of a solution by the continuous function.
Args:
solution (Solution): Solution to be evaluated.
Returns:
float: function value of the solution.
"""
raise NotImplementedError("Abstract Class")
# --------------------------------------------------------------------------- #
# Functions with many local minima #
# --------------------------------------------------------------------------- #
class Ackleys(ContinuousProblem):
"""Ackley's function.
Args:
n_dim (int): Number of dimensions.
"""
def __init__(self, n_dim):
min_vals = np.array([-32.768] * n_dim, np.float)
max_vals = np.array([32.768] * n_dim, np.float)
move_range = ContinuousLogMoveRange(0.01, 1.0)
known_min = 0.0
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
n = len(solution)
part1 = -0.2 * np.sqrt(1/n * np.sum(solution * solution))
part2 = 1/n * np.sum(np.cos(2 * np.pi * solution))
return 20 - 20 * np.exp(part1) + np.e - np.exp(part2)
class Bukin6(ContinuousProblem):
"""Bukin funtion N.6."""
def __init__(self):
n_dim = 2
min_vals = np.array([-15.0, -3.0], np.float)
max_vals = np.array([-5.0, 3.0], np.float)
move_range = ContinuousLogMoveRange(0.01, 1.0)
known_min = 0.0
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
part1 = np.abs(solution[1] - 0.01 * solution[0] * solution[0])
part2 = np.abs(solution[0] + 10)
return 100 * np.sqrt(part1) + 0.01 * part2
class CrossInTray(ContinuousProblem):
"""Cross-in-tray function."""
def __init__(self):
n_dim = 2
min_vals = np.array([-10.0, -10.0], np.float)
max_vals = np.array([10.0, 10.0], np.float)
move_range = ContinuousLogMoveRange(0.01, 1.0)
known_min = -2.06261
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
part1 = np.abs(100 - np.sqrt(np.sum(solution * solution)) / np.pi)
part2 = np.sin(solution[0]) * np.sin(solution[1])
final = np.abs(part2 * np.exp(part1)) + 1.0
return -0.0001 * np.power(final, 0.1)
class DropWave(ContinuousProblem):
"""Drop-Wave function."""
def __init__(self):
n_dim = 2
min_vals = np.array([-5.12, -5.12], np.float)
max_vals = np.array([5.12, 5.12], np.float)
move_range = ContinuousLogMoveRange(0.01, 1.0)
known_min = -1.0
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
sum_sol_sq = np.sum(solution * solution)
part1 = 1.0 + np.cos(12 * np.sqrt(sum_sol_sq))
part2 = 0.5 * sum_sol_sq + 2.0
return -1.0 * (part1 / part2)
class Eggholder(ContinuousProblem):
"""Eggholder function."""
def __init__(self):
n_dim = 2
min_vals = np.array([-512, -512], np.float)
max_vals = np.array([512, 512], np.float)
move_range = ContinuousLogMoveRange(0.01, 10.0)
known_min = -959.6407
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
part1 = np.sin(np.sqrt(np.abs(solution[1] + (solution[0]/2.) + 47)))
part2 = np.sin(np.sqrt(np.abs(solution[0] - (solution[1] + 47))))
return -1.0 * (solution[1] + 47) * part1 - 1.0 * part2
class GramacyLee(ContinuousProblem):
"""Gramacy & Lee function."""
def __init__(self):
n_dim = 1
min_vals = np.array([0.5], np.float)
max_vals = np.array([2.5], np.float)
move_range = ContinuousLogMoveRange(0.01, 1.0)
known_min = None
super().__init__(n_dim, min_vals, max_vals, move_range, known_min)
def _eval_func(self, solution):
part1 = np.sin(10 * np.pi * solution[0]) / (2 * solution[0])
part2 = | np.power(solution[0] - 1.0, 4) | numpy.power |
"""
Copyright 2018 Johns Hopkins University (Author: <NAME>)
Apache 2.0 (http://www.apache.org/licenses/LICENSE-2.0)
"""
from __future__ import absolute_import
from __future__ import print_function
from __future__ import division
from six.moves import xrange
import pytest
import os
import copy
import numpy as np
from numpy.testing import assert_allclose
from hyperion.utils import Utt2Info
from hyperion.io import H5DataWriter
from hyperion.generators.sequence_batch_generator_v2 import SequenceBatchGeneratorV2 as SBG
output_dir = './tests/data_out/generators'
if not os.path.exists(output_dir):
os.makedirs(output_dir)
h5_file = output_dir + '/seqbgv2.h5'
key_file = output_dir + '/seqbgv2.scp'
num_seqs = 10
dim = 2
min_seq_length = 100
delta = 10
max_seq_length = min_seq_length + (num_seqs-1)*delta
seq_lengths = np.arange(100, max_seq_length+1, delta)
def create_dataset():
file_path = [str(k) for k in xrange(num_seqs)]
key=[]
i = 0
j = 0
while i < num_seqs:
key_i = (j+1)*str(j)
i += (i+1)
j += 1
key += key_i
key = key[:num_seqs]
u2c = Utt2Info.create(file_path, key)
if os.path.exists(h5_file):
return u2c
u2c.save(key_file, sep=' ')
h = H5DataWriter(h5_file)
rng = np.random.RandomState(seed=0)
for i in xrange(num_seqs):
x_i = rng.randn(seq_lengths[i], dim)
h.write(file_path[i], x_i)
return u2c
def test_num_seqs():
create_dataset()
sr = SBG(h5_file, key_file)
assert sr.num_seqs==num_seqs
def test_seq_lengths():
create_dataset()
sr = SBG(h5_file, key_file, shuffle_seqs=False)
assert np.all(sr.seq_lengths==seq_lengths)
assert sr.total_length== | np.sum(seq_lengths) | numpy.sum |
import warnings
import pickle
import sys
from decorated_options import optioned, Options
import numpy as np
from collections import defaultdict
import logging
import multiprocessing as mp
import pandas as pd
from utils import time_in_top_k, average_rank, int_r_2, logTime, find_opt_oracle, sweep_q, is_sorted
from opt_model import SimOpts
try:
import broadcast.opt.optimizer as Bopt
except ImportError:
warnings.warn('broadcast.opt.optimizer was NOT imported. '
'Comparison against method of Karimi et. al. method will '
'not be possible.')
# Workers for metrics
# Ks = [1, 5, 10]
# Ks = [1, 5]
Ks = [1]
perf_opts = Options(oracle_eps=1e-10, # This is how much after the event that the Oracle tweets.
Ks=Ks,
performance_fields=['seed', 'q', 'type'] +
['top_' + str(k) for k in Ks] +
['avg_rank', 'r_2', 'num_events', 'world_events'])
def add_perf(op, df, sim_opts):
for k in perf_opts.Ks:
op['top_' + str(k)] = time_in_top_k(df=df, K=k, sim_opts=sim_opts)
op['avg_rank'] = average_rank(df, sim_opts=sim_opts)
op['r_2'] = int_r_2(df, sim_opts=sim_opts)
op['world_events'] = len(df.event_id[df.src_id != sim_opts.src_id].unique())
op['num_events'] = len(df.event_id[df.src_id == sim_opts.src_id].unique())
def worker_opt(params):
try:
seed, sim_opts, num_segments, queue = params
except ValueError:
logging.warning('Setting num_segments=10 for world-rate in worker_opt.')
seed, sim_opts, queue = params
num_segments = 10
sim_mgr = sim_opts.create_manager_with_opt(seed=seed)
sim_mgr.run_dynamic()
df = sim_mgr.state.get_dataframe()
# If Capacity if calculated this way, then the Optimal Broadcaster
# May end up with number of tweets higher than the number of tweets
# produced by the rest of the world.
# capacity = u_int_opt(df=df, sim_opts=sim_opts)
# All tweets except by the optimal worker are wall tweets.
# this is valid only if other broadcasters do not react to the optimal
# broadcaster or do not deviate from their strategy (modulo variation due
# to different seeds of the random number generator).
wall_df = df[df.src_id != sim_opts.src_id]
T = sim_opts.end_time
seg_idx = (wall_df.t.values / T * num_segments).astype(int)
intensity_df = (
(wall_df.groupby(['sink_id', pd.Series(seg_idx, name='segment')]).size() / (T / num_segments))
.sort_index()
.reset_index(name='intensity')
)
# Order of walls is ambiguous here.
wall_intensities = (
intensity_df.pivot_table(values='intensity', index='sink_id', columns='segment')
.ix[sim_opts.sink_ids] # Sort the data according to the sink_ids in sim_opts.
.values
)
# Note: this works only if the optimal follower has exactly one follower. It is better to count the number
# of distinct times that the optimal broadcaster tweeted.
# capacity = (df.src_id == sim_opts.src_id).sum() * 1.0
num_events = len(df.event_id[df.src_id == sim_opts.src_id].unique())
capacity = num_events * 1.0
op = {
'type' : 'Opt',
'seed' : seed,
'capacity' : capacity,
'sim_opts' : sim_opts,
'q' : sim_opts.q,
'wall_intensities' : wall_intensities
}
add_perf(op, df, sim_opts)
if queue is not None:
queue.put(op)
return op
def worker_poisson(params):
seed, capacity, sim_opts, queue = params
sim_mgr = sim_opts.create_manager_with_poisson(seed=seed, capacity=capacity)
sim_mgr.run_dynamic()
df = sim_mgr.state.get_dataframe()
op = {
'type': 'Poisson',
'seed': seed,
'sim_opts': sim_opts,
'q': sim_opts.q
}
add_perf(op, df, sim_opts)
if queue is not None:
queue.put(op)
return op
def worker_oracle(params):
seed, capacity, max_events, sim_opts, queue = params
opt_oracle = find_opt_oracle(capacity, sim_opts, max_events=max_events)
oracle_df = opt_oracle['df']
# TODO: This method of extracting times works before oracle is always run only
# for one follower.
opt_oracle_mgr = sim_opts.create_manager_with_times(oracle_df.t[oracle_df.events == 1] +
perf_opts.oracle_eps)
opt_oracle_mgr.run_dynamic()
df = opt_oracle_mgr.state.get_dataframe()
op = {
'type' : 'Oracle',
'seed' : seed,
'sim_opts' : sim_opts,
'q' : sim_opts.q,
'r0_num_events' : np.sum(oracle_df.events == 1),
'num_events' : np.sum(df.src_id == sim_opts.src_id)
}
add_perf(op, df, sim_opts)
if queue is not None:
queue.put(op)
return op
def worker_kdd(params, window_start=0, verbose=False, Ks=None):
seed, capacity, num_segments, sim_opts, world_changing_rates, queue = params
T = sim_opts.end_time - window_start
seg_len = T / num_segments
if world_changing_rates is None:
wall_mgr = sim_opts.create_manager_for_wall()
wall_mgr.run_dynamic()
wall_df = wall_mgr.state.get_dataframe()
seg_idx = (wall_df.t.values / T * num_segments).astype(int)
intensity_df = (wall_df.groupby(['sink_id', pd.Series(seg_idx, name='segment')]).size() / (T / num_segments)).reset_index(name='intensity')
wall_intensities = intensity_df.pivot_table(values='intensity', index='sink_id', columns='segment').values
else:
wall_intensities = np.asarray(world_changing_rates)
follower_wall_intensities = wall_intensities
follower_conn_prob = np.asarray([[1.0] * num_segments] * len(sim_opts.sink_ids))
follower_weights = [1.0] * len(sim_opts.sink_ids)
upper_bounds = np.array([1e11] * num_segments)
threshold = 0.02
op = {
'type' : 'kdd',
'seed' : seed,
'sim_opts' : sim_opts,
'q' : sim_opts.q
}
best_avg_rank, best_avg_k = np.inf, -1
best_r_2, best_r_2_k = np.inf, -1
if Ks is None:
Ks = perf_opts.Ks
for k in Ks:
if k != 1:
def _util(x):
return Bopt.utils.weighted_top_k(x,
follower_wall_intensities,
follower_conn_prob,
follower_weights,
k)
def _util_grad(x):
return Bopt.utils.weighted_top_k_grad(x,
follower_wall_intensities,
follower_conn_prob,
follower_weights,
k)
else:
# For k = 1, special case of gradient calculation
def _util(x):
return Bopt.utils.weighted_top_one(x,
follower_wall_intensities,
follower_conn_prob,
follower_weights)
def _util_grad(x):
return Bopt.utils.weighted_top_one_grad(x,
follower_wall_intensities,
follower_conn_prob,
follower_weights)
# Initial guess is close to Poisson solution
x0 = np.ones(num_segments) * capacity / num_segments
kdd_opt, iters = Bopt.optimize(
util = _util,
util_grad = _util_grad,
budget = capacity,
upper_bounds = upper_bounds,
threshold = threshold,
x0 = x0,
verbose = verbose,
with_iter = True
)
op['kdd_opt_' + str(k)] = kdd_opt
op['kdd_opt_iters_' + str(k)] = iters
if iters > 49900:
logging.warning('Setting {} took {} iters to converge.'.format(op, iters),
file=sys.stderr)
piecewise_const_mgr = sim_opts.create_manager_with_piecewise_const(
seed=seed,
change_times=window_start + np.arange(num_segments) * seg_len,
rates=kdd_opt / seg_len
)
piecewise_const_mgr.state.time = window_start
piecewise_const_mgr.run_dynamic()
df = piecewise_const_mgr.state.get_dataframe()
perf = time_in_top_k(df=df, K=k, sim_opts=sim_opts)
op['top_' + str(k)] = perf
op['top_' + str(k) + '_num_events'] = len(df.event_id[df.src_id == sim_opts.src_id].unique())
avg_rank = average_rank(df, sim_opts=sim_opts)
r_2 = int_r_2(df, sim_opts=sim_opts)
op['avg_rank_' + str(k)] = avg_rank
op['r_2_' + str(k)] = r_2
if avg_rank < best_avg_rank:
best_avg_rank = avg_rank
best_avg_k = k
if r_2 < best_r_2:
best_r_2 = r_2
best_r_2_k = k
op['avg_rank'] = best_avg_rank
op['avg_rank_k'] = best_avg_k
op['r_2'] = best_r_2
op['r_2_k'] = best_r_2_k
op['world_events'] = len(df.event_id[df.src_id != sim_opts.src_id].unique())
op['num_events'] = len(df.event_id[df.src_id == sim_opts.src_id].unique())
if queue is not None:
queue.put(op)
return op
# This is an approach using the multiprocessing module without the Pool and using a queue to accumulate the results.
# This will lead to a better utilization of the CPU resources (hopefully) because the previous method only allowed
# parallization of the number of seeds.
dilation = 100.0
simulation_opts = Options(world_rate=1000.0 / dilation, world_alpha=1.0, world_beta=10.0,
N=10, T=1.0 * dilation, num_segments=10,
log_q_low=-6 + np.log10(dilation), log_q_high=5 + np.log10(dilation))
@optioned(option_arg='opts')
def piecewise_sim_opt_factory(N, T, num_segments, world_rate, opts):
random_state = np.random.RandomState(42)
world_changing_rates = random_state.uniform(low=world_rate / 2.0, high=world_rate, size=num_segments)
world_change_times = np.arange(num_segments) * T / num_segments
def sim_opts_gen(seed):
return SimOpts.std_piecewise_const(world_rates=world_changing_rates,
world_change_times=world_change_times,
world_seed=seed + 42).update({'end_time': T})
return opts.set_new(N=N, T=T, num_segments=num_segments, sim_opts_gen=sim_opts_gen)
poisson_inf_opts = simulation_opts.set_new(
sim_opts_gen=lambda seed: SimOpts.std_poisson(world_rate=simulation_opts.world_rate,
world_seed=seed + 42)
.update({'end_time': simulation_opts.T}))
piecewise_inf_opts = piecewise_sim_opt_factory(opts=simulation_opts)
hawkes_inf_opts = simulation_opts.set_new(
sim_opts_gen=lambda seed: SimOpts.std_hawkes(world_seed=seed,
world_lambda_0=simulation_opts.world_rate,
world_alpha=simulation_opts.world_alpha,
world_beta=simulation_opts.world_beta)
.update({'end_time': simulation_opts.T}))
def extract_perf_fields(return_obj, exclude_fields=None, include_fields=None):
"""Extracts the relevant fields from the return object and returns them in a new dict."""
result_dict = {}
include_fields = include_fields if include_fields is not None else set()
exclude_fields = exclude_fields if exclude_fields is not None else set()
fields = set(perf_opts.performance_fields).union(include_fields) - exclude_fields
for field in fields:
result_dict[field] = return_obj[field]
return result_dict
real_performance_fields = [x for x in perf_opts.performance_fields if x != 'q'] + ['user_id']
def extract_real_perf_fields(return_obj, exclude_fields=None, include_fields=None):
"""Extracts the relevant fields from the return object and returns them in a new dict."""
result_dict = {}
include_fields = include_fields if include_fields is not None else set()
exclude_fields = exclude_fields if exclude_fields is not None else set()
fields = set(real_performance_fields).union(include_fields) - exclude_fields
for field in fields:
result_dict[field] = return_obj[field]
return result_dict
@optioned(option_arg='opts')
def run_inference(N, T, num_segments, sim_opts_gen, log_q_high, log_q_low):
"""Run inference for the given sim_opts_gen by sweeping over 'q' and
running the simulation for different seeds."""
processes = []
queue = mp.Queue()
results = []
capacities = {}
raw_results = []
try:
active_processes = 0
for q in np.logspace(log_q_low, log_q_high, num=10):
capacities[q] = []
for seed in range(N):
active_processes += 1
sim_opts = sim_opts_gen(seed).update({'q' : q})
p = mp.Process(target=worker_opt,
args=((seed, sim_opts, queue),))
processes.append(p)
p.daemon = True
p.start()
logTime('Started all processes: {}'.format(active_processes))
while active_processes > 0:
# logTime('active_processes = {}'.format(active_processes))
r = queue.get()
raw_results.append(r)
results.append(extract_perf_fields(r))
active_processes -= 1
if r['type'] == 'Opt':
seed = r['seed']
capacity = r['capacity']
s = r['sim_opts'].s
sim_opts = r['sim_opts']
world_events = r['world_events']
capacities[s].append((seed, capacity))
# Poisson
p = mp.Process(target=worker_poisson, args=((seed, capacity, sim_opts, queue),))
processes.append(p)
p.daemon = True
p.start()
active_processes += 1
# Oracle
oracle_args = (seed, capacity, world_events, sim_opts, queue)
p = mp.Process(target=worker_oracle, args=(oracle_args,))
processes.append(p)
p.daemon = True
p.start()
active_processes += 1
# KDD solution
# kdd_args = (seed, capacity, num_segments, sim_opts, world_changing_rates, queue)
# kdd_args = (seed, capacity, num_segments, sim_opts, None, queue)
# p = mp.Process(target=worker_kdd, args=(kdd_args,))
# processes.append(p)
# p.daemon = True
# p.start()
# active_processes += 1
elif r['type'] == 'Poisson':
if active_processes % 10 == 0:
logTime('Active processes = {}'.format(active_processes))
elif r['type'] == 'Oracle':
if active_processes % 10 == 0:
logTime('Active processes = {}'.format(active_processes))
elif r['type'] == 'kdd':
if active_processes % 10 == 0:
logTime('Active processes = {}'.format(active_processes))
else:
raise ValueError('Unknown type: {}'.format(r['type']))
finally:
# Attempt at cleanup
logging.info("Cleaning up {} processes".format(len(processes)))
for p in processes:
p.terminate()
p.join()
return Options(df=pd.DataFrame.from_records(results),
raw_results=raw_results,
capacities=capacities)
def worker_combined(input_queue, output_queue):
while True:
broadcaster_type, broadcaster_args = input_queue.get()
if broadcaster_type == 'Stop':
break
try:
all_args = broadcaster_args + (output_queue,)
if broadcaster_type == 'Opt':
worker_opt(all_args)
elif broadcaster_type == 'Poisson':
worker_poisson(all_args)
elif broadcaster_type == 'Oracle':
worker_oracle(all_args)
elif broadcaster_type == 'kdd':
worker_kdd(all_args)
else:
raise RuntimeError('Unknown broadcaster type: {}'.format(broadcaster_type))
except Exception as e:
output_queue.put({
'type' : 'Exception',
'error' : e,
'broadcaster_type' : broadcaster_type,
'broadcaster_args' : broadcaster_args
})
raise
@optioned(option_arg='opts')
def run_inference_queue_kdd(N, T, num_segments, sim_opts_gen, log_q_high, log_q_low, num_procs=None):
"""Run inference for the given sim_opts_gen by sweeping over 'q' and
running the simulation for different seeds."""
if num_procs is None:
num_procs = mp.cpu_count() - 1
in_queue = mp.Queue()
out_queue = mp.Queue()
results = []
raw_results = []
capacities = {}
# Start consumers
processes = [mp.Process(target=worker_combined, args=(in_queue, out_queue))
for _ in range(num_procs)]
for p in processes:
p.daemon = True # Terminate if the parent dies.
p.start()
active_procs = 0
type_procs = defaultdict(lambda: 0)
def add_task(task_type, args):
in_queue.put((task_type, args))
type_procs[task_type] += 1
try:
for q in np.logspace(log_q_low, log_q_high, num=10):
capacities[q] = []
for seed in range(N):
in_queue.put(('Opt', (seed, sim_opts_gen(seed).update({'q': q}))))
active_procs += 1
type_procs['Opt'] = active_procs
while active_procs > 0:
r = out_queue.get()
active_procs -= 1
type_procs[r['type']] -= 1
if active_procs % 10 == 0:
logTime('active_procs = {}, procs = {}'
.format(active_procs, list(type_procs.items())))
if r['type'] == 'Exception':
logging.error('Exception while handling: ', r)
else:
raw_results.append(r)
results.append(extract_perf_fields(r))
if r['type'] == 'Opt':
seed = r['seed']
capacity = r['capacity']
q = r['sim_opts'].q
sim_opts = r['sim_opts']
world_events = r['world_events']
capacities[q].append((seed, capacity))
# add_task('Poisson', (seed, capacity, sim_opts))
# active_procs += 1
# add_task('Oracle', (seed, capacity, world_events, sim_opts))
# active_procs += 1
add_task('kdd', (seed, capacity, num_segments, sim_opts, None))
active_procs += 1
for p in range(num_procs):
in_queue.put(('Stop', None))
except:
# In case of exceptions, do not block the parent thread and just
# discard all data on the queues.
in_queue.cancel_join_thread()
out_queue.cancel_join_thread()
raise
finally:
logging.info('Cleaning up {} processes'.format(len(processes)))
for p in processes:
p.terminate()
p.join()
return Options(df=pd.DataFrame.from_records(results),
raw_results=raw_results,
capacities=capacities)
@optioned(option_arg='opts')
def run_inference_queue(N, T, num_segments, sim_opts_gen, log_q_high, log_q_low, num_procs=None):
"""Run inference for the given sim_opts_gen by sweeping over 'q' and
running the simulation for different seeds."""
if num_procs is None:
num_procs = mp.cpu_count() - 1
in_queue = mp.Queue()
out_queue = mp.Queue()
results = []
raw_results = []
capacities = {}
# Start consumers
processes = [mp.Process(target=worker_combined, args=(in_queue, out_queue))
for _ in range(num_procs)]
for p in processes:
p.daemon = True # Terminate if the parent dies.
p.start()
active_procs = 0
type_procs = defaultdict(lambda: 0)
def add_task(task_type, args):
in_queue.put((task_type, args))
type_procs[task_type] += 1
try:
for q in np.logspace(log_q_low, log_q_high, num=10):
capacities[q] = []
for seed in range(N):
in_queue.put(('Opt', (seed, sim_opts_gen(seed).update({'q': q}), num_segments)))
active_procs += 1
type_procs['Opt'] = active_procs
while active_procs > 0:
r = out_queue.get()
active_procs -= 1
type_procs[r['type']] -= 1
if active_procs % 10 == 0:
logTime('active_procs = {}, procs = {}'
.format(active_procs, list(type_procs.items())))
if r['type'] == 'Exception':
logging.error('Exception while handling: ', r)
else:
raw_results.append(r)
results.append(extract_perf_fields(r))
if r['type'] == 'Opt':
seed = r['seed']
capacity = r['capacity']
q = r['sim_opts'].q
sim_opts = r['sim_opts']
world_events = r['world_events']
capacities[q].append((seed, capacity))
add_task('Poisson', (seed, capacity, sim_opts))
active_procs += 1
add_task('Oracle', (seed, capacity, world_events, sim_opts))
active_procs += 1
add_task('kdd', (seed, capacity, num_segments, sim_opts, r['wall_intensities']))
active_procs += 1
for p in range(num_procs):
in_queue.put(('Stop', None))
except:
# In case of exceptions, do not block the parent thread and just
# discard all data on the queues.
in_queue.cancel_join_thread()
out_queue.cancel_join_thread()
raise
finally:
logging.info('Cleaning up {} processes'.format(len(processes)))
for p in processes:
p.terminate()
p.join()
return Options(df=pd.DataFrame.from_records(results),
raw_results=raw_results,
capacities=capacities)
## Experiment with multiple followers
def make_piecewise_const(num_segments):
"""Makes a piecewise constant semi-sinusoid curve with num_segments segments."""
true_values = np.sin(np.arange(0, np.pi, step=0.001))
seg_idx = | np.arange(true_values.shape[0]) | numpy.arange |
import datetime
from collections import defaultdict
from decimal import Decimal, ROUND_HALF_UP
import numpy as np
from matplotlib import pyplot as plt
import utils.globals_variable as gv
from enums import ReportType
from history.capital import LocalCapital
from history.statistics import descriptiveStatistics, decilePercentage
from history.trade_record import LocalTradeRecord
from submodule.Xu3.utils import getLogger
# 單筆 Order 的交易紀錄(容許分次買賣)
class TradeRecord:
def __init__(self):
# record dict value:
# [buy_time, buy_price, buy_volumn, sell_time, sell_price, sell_volumn, revenue, buy_cost, sell_cost,
# stop_value_moving, income, return_rate, annual_return_rate]
self.record = None
def __getattr__(self, item):
if self.record.__contains__(item):
return self.record[item]
else:
return None
def __len__(self):
return len(self.income)
def __repr__(self):
return self.toString(guid=None)
__str__ = __repr__
def __add__(self, other):
pass
def toString(self, guid=None):
# TODO: self.record
if self.record is None:
return "self.record is None"
if guid is None:
description = f"TradeRecord({self.record['buy_time']} ~ {self.record['sell_time']})"
else:
description = f"TradeRecord(guid: {guid}, {self.record['buy_time']} ~ {self.record['sell_time']})"
for key, value in self.record.items():
if key != 'buy_time' or key != 'sell_time':
description += f"\n{key}: {value}"
return description
def add(self, buy_time: datetime.datetime, buy_price: Decimal, buy_volumn: int,
sell_time: datetime.datetime, sell_price: Decimal, sell_volumn: int,
revenue: Decimal, buy_cost: Decimal, sell_cost: Decimal, stop_value_moving: list,
first_add: bool = False):
if first_add:
income, return_rate, annual_return_rate = self.derivedData(buy_time=buy_time,
buy_cost=buy_cost,
sell_time=sell_time,
sell_cost=sell_cost,
revenue=revenue)
self.record = dict(buy_time=buy_time, buy_price=buy_price, buy_volumn=buy_volumn,
sell_time=sell_time, sell_price=sell_price, sell_volumn=sell_volumn,
revenue=revenue, buy_cost=buy_cost, sell_cost=sell_cost,
stop_value_moving=stop_value_moving,
income=income, return_rate=return_rate, annual_return_rate=annual_return_rate)
else:
self.record["sell_time"] = sell_time
self.getWeightedData(kind="buy", new_price=buy_price, new_volumn=buy_volumn)
self.getWeightedData(kind="sell", new_price=sell_price, new_volumn=sell_volumn)
self.record["revenue"] += revenue
self.record["buy_cost"] += buy_cost
self.record["sell_cost"] += sell_cost
self.record["stop_value_moving"] += stop_value_moving
# 更新衍生數據
income, return_rate, annual_return_rate = self.derivedData(buy_time=self.record["buy_time"],
buy_cost=self.record["buy_cost"],
sell_time=sell_time,
sell_cost=self.record["sell_cost"],
revenue=self.record["revenue"])
self.record["income"] = income
self.record["return_rate"] = return_rate
self.record["annual_return_rate"] = annual_return_rate
@staticmethod
def derivedData(buy_time: datetime.datetime, buy_cost: Decimal, sell_time: datetime.datetime, sell_cost: Decimal,
revenue: Decimal):
cost = buy_cost + sell_cost
income = revenue - cost
return_rate = (revenue / cost).quantize(Decimal('.0000'), ROUND_HALF_UP)
during_days = Decimal(str((sell_time - buy_time) / datetime.timedelta(days=1)))
during_days = max(during_days, Decimal("1"))
annual_index = Decimal("365.25") / during_days
annual_return_rate = np.power(return_rate, annual_index).quantize(Decimal('.0000'), ROUND_HALF_UP)
return income, return_rate, annual_return_rate
def getWeightedData(self, kind: str, new_price: Decimal, new_volumn: int):
price = f"{kind}_price"
volumn = f"{kind}_volumn"
# 根據先後買入的數量為權重,對購買價做加權(若沒加買,則 new_weight 會是 0)
origin_price = self.record[price]
origin_volumn = self.record[volumn]
result_volumn = origin_volumn + new_volumn
origin_weight = Decimal(str(origin_volumn)) / result_volumn
new_weight = Decimal(str(new_volumn)) / result_volumn
self.record[price] = origin_weight * origin_price + new_weight * new_price
self.record[volumn] = result_volumn
class History:
"""
revenu: 營業額,revenu = income + cost
income: 收入,正值為'利潤(profit)',負值為'虧損(loss)'
profit: 利潤(History 當中沒有細分到此項目)
loss: 虧損(History 當中沒有細分到此項目)
cost: 成本(買和賣都會產生成本)
falling_price: 跌價(前一次購買到下一次購買之間為計算區間,購買價與區間最低價的落差,是為跌價)
交易紀錄數據
stock_id,buy_time,sell_time,buy_price,sell_price,volumn,buy_cost,sell_cost,revenue
"""
def __init__(self, stock_id,
logger_dir="strategy", logger_name=datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")):
self.stock_id = stock_id
self.logger_dir = logger_dir
self.logger_name = logger_name
# 紀錄交易(key: guid, value: TradeRecord)
self.trade_record = dict()
# 紀錄資金的變化
self.funds_history = []
# 紀錄收益
self.income = []
# 跌價
self.falling_price = []
self.report = None
def __len__(self):
return len(self.trade_record)
def __repr__(self):
description = f"===== History({self.stock_id}) ====="
for guid, trade_record in self.trade_record.items():
description += f"\n{trade_record.toString(guid=guid)}"
description += f"\nFallingPrice ~ {self.getFallingPriceParam()}"
# stop_value_moving: 平均調整金額;平均調整次數: n_stop_value_moving
stop_value_moving, n_stop_value_moving = self.getStopValueParam()
description += f"\nStopValue 平均調整金額: {stop_value_moving}, 平均調整次數: {n_stop_value_moving}, " \
f"每個 order 預期調整總金額: {stop_value_moving * n_stop_value_moving}"
description += f"\n===== Total income: {self.getIncome()} ====="
return description
__str__ = __repr__
def add(self, *trade_records):
for trade_record in trade_records:
(guid, buy_time, buy_price, buy_volumn,
sell_time, sell_price, sell_volumn,
revenue, buy_cost, sell_cost, stop_value_moving) = trade_record
# 不包含 = 第一次添加
first_add = not self.trade_record.__contains__(guid)
if first_add:
self.trade_record[guid] = TradeRecord()
self.trade_record[guid].add(buy_time=buy_time,
buy_price=buy_price,
buy_volumn=buy_volumn,
sell_time=sell_time,
sell_price=sell_price,
sell_volumn=sell_volumn,
revenue=revenue,
buy_cost=buy_cost,
sell_cost=sell_cost,
stop_value_moving=stop_value_moving,
first_add=first_add)
# 記錄歷史(完成買賣後的)資金變化
def recordFunds(self, funds: Decimal):
self.funds_history.append(funds)
def getTradeRecord(self, guid):
if self.trade_record.__contains__(guid):
return self.trade_record[guid]
else:
return None
def iterTradeRecord(self):
for guid, trade_record in self.trade_record.items():
yield guid, trade_record
def display(self, *args):
if self.report is None:
self.report = Report(history=self, logger_dir=self.logger_dir, logger_name=self.logger_name)
self.report.display(*args)
def getIncome(self):
income = Decimal("0")
for trade_record in self.trade_record.values():
income += trade_record.income
return income
def recordFallingPrice(self, falling_price: Decimal):
"""
前一次購買到下一次購買之間為計算區間,購買價與區間最低價的落差
:param falling_price: 跌價
:return:
"""
self.falling_price.append(falling_price)
def getFallingPriceParam(self):
if len(self.falling_price) > 0:
self.falling_price.sort()
mean = Decimal(str(np.mean(self.falling_price)))
std = Decimal(str(np.std(self.falling_price)))
mean = mean.quantize(Decimal('.0000'), ROUND_HALF_UP)
std = std.quantize(Decimal('.0000'), ROUND_HALF_UP)
return mean, std
else:
return Decimal("0.0000"), Decimal("0.0000")
def resetFallingPrice(self):
self.falling_price = []
def getStopValueParam(self):
stop_value_movings = []
n_stop_value_movings = []
for trade_record in self.trade_record.values():
# stop_value_moving: 一維陣列 of Decimal
stop_value_moving = trade_record.stop_value_moving
# 併入 stop_value_movings(同為一維陣列)
stop_value_movings += stop_value_moving
# trade_record.n_stop_value_moving: int
n_stop_value_movings.append(Decimal(str(len(stop_value_moving))))
if len(stop_value_movings) > 0:
stop_value_moving = np.mean(stop_value_movings)
stop_value_moving = Decimal(str(stop_value_moving)).quantize(Decimal('.00'), ROUND_HALF_UP)
n_stop_value_moving = np.mean(n_stop_value_movings)
n_stop_value_moving = Decimal(str(n_stop_value_moving)).quantize(Decimal('.00'), ROUND_HALF_UP)
return stop_value_moving, n_stop_value_moving
else:
# print("交易期間尚未發生 stop_value 的調整")
return Decimal("0.00"), Decimal("0.00")
def getTimeRange(self):
start_time = datetime.datetime.today()
stop_time = datetime.datetime(1970, 1, 1)
for trade_record in self.trade_record.values():
start_time = min(start_time, trade_record.buy_time)
stop_time = max(stop_time, trade_record.sell_time)
return stop_time - start_time
def reset(self):
# 紀錄交易(key: guid, value: TradeRecord)
self.trade_record = dict()
# 紀錄資金的變化
self.funds_history = []
# 紀錄收益
self.income = []
# 跌價
self.falling_price = []
self.report = None
def reportResult(self, *args):
if self.report is None:
self.report = Report(history=self, logger_dir=self.logger_dir, logger_name=self.logger_name)
self.report.report_(*args)
# TODO: 各指標皆須考慮無數值的問題(可能執行期間不足以產生特定數據)
class Report:
def __init__(self, history: History,
logger_dir="report", logger_name=datetime.datetime.now().strftime("%Y-%m-%d_%H-%M-%S")):
self.logger_dir = logger_dir
self.logger_name = logger_name
self.extra = {"className": self.__class__.__name__}
self.logger = getLogger(logger_name=self.logger_name,
to_file=True,
time_file=False,
file_dir=self.logger_dir,
instance=True)
self.history = history
self.stock_id = self.history.stock_id
self.n_trade = Decimal(str(len(self.history)))
self.zero_trading_msg = f"({self.stock_id}) 總交易次數為 0,因某些原因,一次交易都沒有成立"
gv.initialize()
# region 初始數據
"""
數據報告應劃分為兩種導向: 1.交易導向 2.時間導向
1. 交易導向: 以一次次的交易為計算單位,衡量每次交易的效益,history 原始數據即為'交易導向'
2. 時間導向: 將同一天內的交易合併為一筆數據,衡量時間報酬率,評估每天、每年等時間區段可獲得的報酬率
時間導向不討論 profit 和 loss,畢竟已經根據日期做了整合性的計算,再細分 profit 和 loss 的意義不大
"""
self.trading_time = None
self.trading_revenue = None
self.trading_income = None
self.trading_cost = None
self.trading_profit = None
self.trading_loss = None
self.date_times = None
self.date_incomes = None
self.date_costs = None
self.funds_history = self.history.funds_history
# MDD(最大交易回落): 指帳戶淨值從最高點的滑落程度,意義在於,從任一時間點進場可能遇到的最糟狀況。
self.max_drawdown = self.computeMaxDrawdown()
# 年化風報比: 為了這些獲利須承擔多大的風險
self.annualized_risk_ratio = Decimal("0")
# 權益曲線(Equity Curve)反映的就是帳戶淨值的變化。 -> 剩餘資金 + cumDateIncome
self.date_cum_incomes = []
self.initTradingDict()
self.initDateDict()
# endregion
# 跌價紀錄
self.falling_price = self.history.falling_price
def __str__(self):
description = descriptiveStatistics(self.trading_income, f"Report({self.stock_id})")
return description
__repr__ = __str__
def initTradingDict(self):
if self.n_trade > 0:
trading_time = []
trading_revenue = []
trading_income = []
trading_cost = []
for guid, trade_record in self.history.iterTradeRecord():
trading_time.append(trade_record.buy_time)
trading_revenue.append(trade_record.revenue)
trading_income.append(trade_record.income)
trading_cost.append(trade_record.buy_cost + trade_record.sell_cost)
# TODO: 若未能交易,這些數值都會是空的
self.trading_time = np.array(trading_time)
self.trading_revenue = np.array(trading_revenue)
self.trading_income = np.array(trading_income)
self.trading_cost = np.array(trading_cost)
# 風報比:常常聽到「風報比」這個詞,白話講就是「為了這些獲利須承擔多大的風險」。
# 公式是 風報比 = 淨獲利 / MDD,這項在績效報告中並沒有,須自己運算。
# 也可以進一步把風報比年化,以利不同回測長度的策略間比較,公式是 年化風報比 = (淨獲利 / 回測年數) / MDD。
if len(self.trading_income) > 0:
income = Decimal(str(self.trading_income.sum()))
during_years = Decimal(str(self.history.getTimeRange() / datetime.timedelta(days=365.25)))
# 若資金未曾下跌,max_drawdown 會是 0,這裡在避免 annualized_risk_ratio 除以 0
self.max_drawdown = max(self.max_drawdown, Decimal("1e-8"))
self.annualized_risk_ratio = ((income / during_years) / self.max_drawdown).quantize(
Decimal('.0000'), ROUND_HALF_UP)
self.logger.info(f"({self.stock_id}) 年化風報比: {self.annualized_risk_ratio}", extra=self.extra)
self.logger.info(f"({self.stock_id}) 最大交易回落: {self.max_drawdown}", extra=self.extra)
self.logger.debug(f"({self.stock_id}) funds_history: {self.funds_history}", extra=self.extra)
# TODO: income = 0 也被算入 profit,檢視是否會有何不協調的地方
self.trading_profit = self.trading_income[np.where(self.trading_income >= 0)]
self.trading_loss = self.trading_income[np.where(self.trading_income < 0)]
else:
self.logger.info(self.zero_trading_msg, extra=self.extra)
def initDateDict(self):
def zero():
return 0
if self.n_trade > 0:
date_times = [trading_time.date() for trading_time in self.trading_time]
n_data = len(date_times)
# 根據日期區分的數據
date_income_dict = defaultdict(zero)
date_cost_dict = defaultdict(zero)
for i in range(n_data):
date_time = date_times[i]
date_income_dict[date_time] += self.trading_income[i]
date_cost_dict[date_time] += self.trading_cost[i]
# 有序、不重複 日期陣列
self.date_times = list(set(date_times))
self.date_times.sort()
date_incomes = []
date_costs = []
# 從'有序、不重複 日期陣列',依序取出日期,再根據日期取出數據
for date_time in self.date_times:
date_incomes.append(date_income_dict[date_time])
date_costs.append(date_cost_dict[date_time])
self.date_incomes = | np.array(date_incomes) | numpy.array |
""" Tests for functions in imaging module
Run at the project directory with:
nosetests code/utils/tests/test_imaging.py
"""
# Loading modules.
from __future__ import absolute_import, division, print_function
import numpy as np
import nibabel as nib
import os
import sys
from numpy.testing import assert_almost_equal, assert_array_equal, assert_equal
# Add path to functions to the system path.
sys.path.append(os.path.join(os.path.dirname(__file__), "../functions/"))
# Load our visualization functions.
from Image_Visualizing import present_3d, make_mask,present_3d_options
# all tests of present are looking at the output sizes of the 2d arrays
def test_present():
# Read in the image data.
data = np.arange(100000)
data = data.reshape((100,100,10))
full=present_3d(data)
assert full.shape == (400,300)
def test_present_options_2():
data = np.arange(100000)
data = data.reshape((100,100,10))
full=present_3d_options(data,axis=2)
first=np.ceil(np.sqrt(10))
second=np.ceil(10/first)
assert full.shape == (100*first,100*second)
def test_present_options_1():
data = np.arange(100000)
data = data.reshape((100,100,10))
full=present_3d_options(data,axis=1)
assert full.shape == (10*10,100*10)
def test_present_options_0():
data = | np.arange(100000) | numpy.arange |
# Copyright 2020 The PyMC Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import aesara
import aesara.tensor as at
import numpy as np
import pytest
import scipy.stats as st
from aesara.graph.basic import ancestors
from aesara.tensor.random.op import RandomVariable
from aesara.tensor.random.var import (
RandomGeneratorSharedVariable,
RandomStateSharedVariable,
)
from aesara.tensor.sort import SortOp
from arviz.data.inference_data import InferenceData
import pymc as pm
from pymc.aesaraf import floatX
from pymc.backends.base import MultiTrace
from pymc.smc.smc import IMH
from pymc.tests.helpers import SeededTest, assert_random_state_equal
class TestSMC(SeededTest):
"""Tests for the default SMC kernel"""
def setup_class(self):
super().setup_class()
self.samples = 1000
n = 4
mu1 = np.ones(n) * 0.5
mu2 = -mu1
stdev = 0.1
sigma = np.power(stdev, 2) * np.eye(n)
isigma = np.linalg.inv(sigma)
dsigma = np.linalg.det(sigma)
w1 = stdev
w2 = 1 - stdev
def two_gaussians(x):
"""
Mixture of gaussians likelihood
"""
log_like1 = (
-0.5 * n * at.log(2 * np.pi)
- 0.5 * at.log(dsigma)
- 0.5 * (x - mu1).T.dot(isigma).dot(x - mu1)
)
log_like2 = (
-0.5 * n * at.log(2 * np.pi)
- 0.5 * at.log(dsigma)
- 0.5 * (x - mu2).T.dot(isigma).dot(x - mu2)
)
return at.log(w1 * at.exp(log_like1) + w2 * at.exp(log_like2))
with pm.Model() as self.SMC_test:
X = pm.Uniform("X", lower=-2, upper=2.0, shape=n)
llk = pm.Potential("muh", two_gaussians(X))
self.muref = mu1
with pm.Model() as self.fast_model:
x = pm.Normal("x", 0, 1)
y = pm.Normal("y", x, 1, observed=0)
def test_sample(self):
initial_rng_state = np.random.get_state()
with self.SMC_test:
mtrace = pm.sample_smc(draws=self.samples, return_inferencedata=False)
# Verify sampling was done with a non-global random generator
assert_random_state_equal(initial_rng_state, np.random.get_state())
x = mtrace["X"]
mu1d = np.abs(x).mean(axis=0)
np.testing.assert_allclose(self.muref, mu1d, rtol=0.0, atol=0.03)
def test_discrete_rounding_proposal(self):
"""
Test that discrete variable values are automatically rounded
in SMC logp functions
"""
with pm.Model() as m:
z = pm.Bernoulli("z", p=0.7)
like = pm.Potential("like", z * 1.0)
smc = IMH(model=m)
smc.initialize_population()
smc._initialize_kernel()
assert smc.prior_logp_func(floatX(np.array([-0.51]))) == -np.inf
assert np.isclose(smc.prior_logp_func(floatX(np.array([-0.49]))), np.log(0.3))
assert np.isclose(smc.prior_logp_func(floatX(np.array([0.49]))), np.log(0.3))
assert np.isclose(smc.prior_logp_func(floatX( | np.array([0.51]) | numpy.array |
## Copyright (c) 2017 <NAME> GmbH
## All rights reserved.
##
## This source code is licensed under the MIT license found in the
## LICENSE file in the root directory of this source tree.
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch import optim
import numpy as np
from torch.nn.utils import weight_norm
import pickle
import sys
from termcolor import colored
from modules.hierarchical_embedding import HierarchicalEmbedding
from modules.embeddings import LearnableEmbedding, SineEmbedding
def sqdist(A, B):
return (A**2).sum(dim=2)[:,:,None] + (B**2).sum(dim=2)[:,None,:] - 2 * torch.bmm(A, B.transpose(1,2))
class ResidualBlock(nn.Module):
def __init__(self, d_in, d_out, groups=1, dropout=0.0):
super().__init__()
assert d_in % groups == 0, "Input dimension must be a multiple of groups"
assert d_out % groups == 0, "Output dimension must be a multiple of groups"
self.d_in = d_in
self.d_out = d_out
self.proj = nn.Sequential(nn.Conv1d(d_in, d_out, kernel_size=1, groups=groups),
nn.ReLU(inplace=True),
nn.Dropout(dropout),
nn.Conv1d(d_out, d_out, kernel_size=1, groups=groups),
nn.Dropout(dropout))
if d_in != d_out:
self.downsample = nn.Conv1d(d_in, d_out, kernel_size=1, groups=groups)
def forward(self, x):
assert x.size(1) == self.d_in, "x dimension does not agree with d_in"
return x + self.proj(x) if self.d_in == self.d_out else self.downsample(x) + self.proj(x)
class GraphLayer(nn.Module):
def __init__(self, d_model, d_inner, n_head, d_head, dropout=0.0, attn_dropout=0.0, wnorm=False, use_quad=False, lev=0):
super().__init__()
self.d_model = d_model
self.d_inner = d_inner
self.n_head = n_head
self.d_head = d_head
self.dropout = nn.Dropout(dropout)
self.attn_dropout = nn.Dropout(attn_dropout)
self.lev = lev
self.use_quad = use_quad
# To produce the query-key-value for the self-attention computation
self.qkv_net = nn.Linear(d_model, 3*d_model)
self.o_net = nn.Linear(n_head*d_head, d_model, bias=False)
self.norm1 = nn.LayerNorm(d_model)
self.norm2 = nn.LayerNorm(d_model)
self.proj1 = nn.Linear(d_model, d_inner)
self.proj2 = nn.Linear(d_inner, d_model)
self.gamma = nn.Parameter(torch.ones(4, 4)) # For different sub-matrices of D
self.sqrtd = np.sqrt(d_head)
if wnorm:
self.wnorm()
def wnorm(self):
self.qkv_net = weight_norm(self.qkv_net, name="weight")
self.o_net = weight_norm(self.o_net, name="weight")
self.proj1 = weight_norm(self.proj1, name="weight")
self.proj2 = weight_norm(self.proj2, name="weight")
def forward(self, Z, D, new_mask, mask, RA, RB, RT, RQ, store=False):
# RA = slice(0,N), RB = slice(N,N+M), RT = slice(N+M, N+M+P)
bsz, n_elem, nhid = Z.size()
n_head, d_head, d_model = self.n_head, self.d_head, self.d_model
assert nhid == d_model, "Hidden dimension of Z does not agree with d_model"
# create gamma mask
gamma_mask = torch.ones_like(D)
all_slices = [RA, RB, RT, RQ] if self.use_quad else [RA, RB, RT]
for i, slice_i in enumerate(all_slices):
for j, slice_j in enumerate(all_slices):
gamma_mask[:, slice_i, slice_j] = self.gamma[i, j]
# N+M+P+Q = 333
# d_model = 650
# n_head = 10
# Z.shape= torch.Size([48, 333, 650])
# D.shape= torch.Size([48, 333, 333])
# new_mask.shape= torch.Size([48, 333, 333]) torch.Size([48, 333, 333])
# V.shape= torch.Size([48, 333, 10, 65])
# Q.shape= torch.Size([48, 333, 10, 65])
# K.shape= torch.Size([48, 333, 10, 65])
# V.shape= torch.Size([48, 333, 10, 65])
# W.shape= torch.Size([48, 10, 333, 333])
# WV.shape= torch.Size([48, 333, 10, 65])
# attn_out.shape= torch.Size([48, 333, 650])
# ret.shape= torch.Size([48, 333, 650])
# Self-attention
inp = Z
Z = self.norm1(Z)
V, Q, K = self.qkv_net(Z).view(bsz, n_elem, n_head, 3*d_head).chunk(3, dim=3) # "V, Q, K"
W = -(gamma_mask*D)[:,None] + torch.einsum('bnij, bmij->binm', Q, K).type(D.dtype) / self.sqrtd + new_mask[:,None]
W = self.attn_dropout(F.softmax(W, dim=3).type(mask.dtype) * mask[:, None]) # softmax(-gamma*D + Q^T K)
if store:
pickle.dump(W.cpu().detach().numpy(), open(f'analysis/layer_{self.lev}_W.pkl', 'wb'))
attn_out = torch.einsum('binm,bmij->bnij', W, V.type(W.dtype)).contiguous().view(bsz, n_elem, d_model)
attn_out = self.dropout(self.o_net(F.leaky_relu(attn_out)))
Z = attn_out + inp
# Position-wise feed-forward
inp = Z
Z = self.norm2(Z)
# d_model -> d_inner -> d_model
return self.proj2(self.dropout(F.relu(self.proj1(Z)))) + inp
class GraphTransformer(nn.Module):
def __init__(self,
dim, # model dim
n_layers,
final_dim,
d_inner,
fdim=30, # feature dim; embed_dim = dim - fdim
dropout=0.0,
dropatt=0.0,
final_dropout=0.0,
n_head=10,
num_atom_types=[5, 13, 27],
num_bond_types=[28, 53, 69],
num_triplet_types=[29, 118],
num_quad_types=[62],
#min_bond_dist=0.9586,
#max_bond_dist=3.9244,
dist_embedding="sine",
atom_angle_embedding="learnable",
trip_angle_embedding="learnable",
quad_angle_embedding="learnable",
wnorm=False,
use_quad=False
):
super().__init__()
num_atom_types = np.array(num_atom_types)
num_bond_types = np.array(num_bond_types)
num_triplet_types = np.array(num_triplet_types)
num_quad_types = np.array(num_quad_types)
if atom_angle_embedding == 'learnable':
# features = [closes atoms angle, partial charge]
self.atom_embedding = LearnableEmbedding(len(num_atom_types), num_atom_types+1,
d_embeds=dim-fdim, d_feature=fdim, n_feature=2)
else:
self.atom_embedding = SineEmbedding(len(num_atom_types), num_atom_types+1, dim, n_feature=2)
if dist_embedding == 'learnable':
# features: [bond_dist]
self.bond_embedding = LearnableEmbedding(len(num_bond_types), num_bond_types+1,
d_embeds=dim-fdim, d_feature=fdim, n_feature=1)
else:
self.bond_embedding = SineEmbedding(len(num_bond_types), num_bond_types+1, dim, n_feature=1)
if trip_angle_embedding == 'learnable':
# features: [angle]
self.triplet_embedding = LearnableEmbedding(len(num_triplet_types), num_triplet_types+1,
d_embeds=dim-fdim, d_feature=fdim, n_feature=1)
else:
self.triplet_embedding = SineEmbedding(len(num_triplet_types), num_triplet_types+1, dim)
if use_quad:
if quad_angle_embedding == 'learnable':
self.quad_embedding = LearnableEmbedding(len(num_quad_types), num_quad_types+1,
d_embeds=dim-fdim, d_feature=fdim, n_feature=1)
else:
self.quad_embedding = SineEmbedding(len(num_quad_types), num_quad_types+1, dim)
self.fdim = fdim
self.dim = dim
#self.min_bond_dist = min_bond_dist
#self.max_bond_dist = max_bond_dist
self.wnorm = wnorm
self.use_quad = use_quad
print(f"{'' if use_quad else colored('Not ', 'cyan')}Using Quadruplet Features")
self.n_head = n_head
assert dim % n_head == 0, "dim must be a multiple of n_head"
self.layers = nn.ModuleList([GraphLayer(d_model=dim, d_inner=d_inner, n_head=n_head, d_head=dim//n_head, dropout=dropout,
attn_dropout=dropatt, wnorm=wnorm, use_quad=use_quad, lev=i+1) for i in range(n_layers)])
self.final_norm = nn.LayerNorm(dim)
# TODO: Warning: we are predicting with the second-hierarchy bond (sub)types!!!!!
self.final_dropout = final_dropout
final_dim = num_bond_types[1] * final_dim
self.final_lin1 = nn.Conv1d(dim, final_dim, kernel_size=1)
self.final_res = nn.Sequential(
ResidualBlock(final_dim, final_dim, groups=int(num_bond_types[1]), dropout=final_dropout),
nn.Conv1d(final_dim, num_bond_types[1], kernel_size=1, groups=int(num_bond_types[1]))
)
self.apply(self.weights_init)
def forward(self, x_atom, x_atom_pos, x_bond, x_bond_dist, x_triplet, x_triplet_angle, x_quad, x_quad_angle):
# PART I: Form the embeddings and the distance matrix
bsz = x_atom.shape[0]
N = x_atom.shape[1]
M = x_bond.shape[1]
P = x_triplet.shape[1]
Q = x_quad.shape[1] if self.use_quad else 0
D = torch.zeros(x_atom.shape[0], N+M+P+Q, N+M+P+Q, device=x_atom.device)
RA = slice(0,N)
RB = slice(N,N+M)
RT = slice(N+M, N+M+P)
RQ = slice(N+M+P, N+M+P+Q)
D[:,RA,RA] = sqdist(x_atom_pos[:,:,:3], x_atom_pos[:,:,:3]) # Only the x,y,z information, not charge/angle
for i in range(D.shape[0]):
# bonds
a1,a2 = x_bond[i,:,3], x_bond[i,:,4]
D[i, RA, RB] = torch.min(D[i, RA, a1], D[i, RA, a2])
D[i, RB, RA] = D[i, RA, RB].transpose(0,1)
D[i, RB, RB] = (D[i,a1,RB] + D[i,a2,RB])/2
D[i, RB ,RB] = (D[i,RB,RB] + D[i,RB,RB].transpose(0,1))/2
# triplets
a1, a2, a3 = x_triplet[i,:,1], x_triplet[i,:,2], x_triplet[i,:,3]
b1, b2 = x_triplet[i,:,4], x_triplet[i,:,5]
D[i, RA, RT] = torch.min(torch.min(D[i,RA,a1], D[i,RA,a2]), D[i,RA, a3]) + D[i,RA,a1]
D[i, RT, RA] = D[i,RA,RT].transpose(0,1)
D[i, RB, RT] = torch.min(D[i,RB,b1], D[i,RB,b2])
D[i, RT, RB] = D[i,RB,RT].transpose(0,1)
D[i, RT, RT] = (D[i,b1,RT] + D[i,b2,RT]) / 2
D[i, RT, RT] = (D[i,RT,RT] + D[i,RT,RT].transpose(0,1)) / 2
if self.use_quad:
# quad
a1,a2,a3,a4 = x_quad[i,:,1], x_quad[i,:,2], x_quad[i,:,3], x_quad[i,:,4]
b1,b2,b3 = x_quad[i,:,5], x_quad[i,:,6], x_quad[i,:,7]
t1,t2 = x_quad[i,:,8], x_quad[i,:,9]
D[i,RA,RQ] = torch.min(torch.min(torch.min(D[i,RA,a1], D[i,RA,a2]), D[i,RA, a3]), D[i,RA,a4]) + \
torch.min(D[i,RA,a1], D[i,RA,a2])
D[i,RQ,RA] = D[i,RA,RQ].transpose(0,1)
D[i,RB,RQ] = torch.min(torch.min(D[i,RB,b1], D[i,RB,b2]), D[i,RB, b3]) + D[i,RB,b1]
D[i,RQ,RB] = D[i,RB,RQ].transpose(0,1)
D[i,RT,RQ] = torch.min(D[i,RT,t1], D[i,RT,t2])
D[i,RQ,RT] = D[i,RT,RQ].transpose(0,1)
D[i,RQ,RQ] = (D[i,t1,RQ] + D[i,t2,RQ]) / 2
D[i,RQ,RQ] = (D[i,RQ,RQ] + D[i,RQ,RQ].transpose(0,1))/2
# No interaction (as in attention = 0) if query or key is the zero padding...
if self.use_quad:
mask = torch.cat([x_atom[:,:,0] > 0, x_bond[:,:,0] > 0, x_triplet[:,:,0] > 0, x_quad[:,:,0] > 0], dim=1).type(x_atom_pos.dtype)
else:
mask = torch.cat([x_atom[:,:,0] > 0, x_bond[:,:,0] > 0, x_triplet[:,:,0] > 0], dim=1).type(x_atom_pos.dtype)
mask = torch.einsum('bi, bj->bij', mask, mask)
new_mask = -1e20 * torch.ones_like(mask).to(mask.device)
new_mask[mask > 0] = 0
if self.use_quad:
Z = torch.cat([
self.atom_embedding(x_atom[:,:,:3], x_atom_pos[:,:,3:]),
self.bond_embedding(x_bond[:,:,:3], x_bond_dist),
self.triplet_embedding(x_triplet[:,:,:2], x_triplet_angle),
self.quad_embedding(x_quad[:,:,:1], x_quad_angle)], dim=1)
else:
Z = torch.cat([
self.atom_embedding(x_atom[:,:,:3], x_atom_pos[:,:,3:]),
self.bond_embedding(x_bond[:,:,:3], x_bond_dist),
self.triplet_embedding(x_triplet[:,:,:2], x_triplet_angle)], dim=1)
# PART II: Pass through a bunch of self-attention and position-wise feed-forward blocks
seed = | np.random.uniform(0,1) | numpy.random.uniform |
from torch import nn
import torch.nn.functional as F
import numpy as np
from PIL import Image
import os
import shutil
from config import cfg
import pdb
import math
def weights_normal_init(*models):
for model in models:
dev=0.01
if isinstance(model, list):
for m in model:
weights_normal_init(m, dev)
else:
for m in model.modules():
if isinstance(m, nn.Conv2d):
m.weight.data.normal_(0.0, dev)
if m.bias is not None:
m.bias.data.fill_(0.0)
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0.0, dev)
def initialize_weights(*models):
for model in models:
if isinstance(model, list):
for m in model:
initialize_weights(m)
else:
for module in model.modules():
if isinstance(module, nn.Conv2d):
n = module.kernel_size[0] * module.kernel_size[1] * module.out_channels
module.weight.data.normal_(0, math.sqrt(2. / n))
if module.bias is not None:
module.bias.data.zero_()
elif isinstance(module, nn.BatchNorm2d):
module.weight.data.fill_(1)
module.bias.data.zero_()
elif isinstance(module, nn.Linear):
n = module.weight.size(1)
module.weight.data.normal_(0, math.sqrt(2. / n))
module.bias.data.zero_()
def weights_init_kaiming(*models):
for model in models:
if isinstance(model, list):
for m in model:
weights_init_kaiming(m)
else:
for module in model.modules():
if isinstance(module, nn.Conv2d):
#kaiming is first name of author whose last name is 'He' lol
nn.init.kaiming_uniform(module.weight)
module.bias.data.zero_()
def adjust_learning_rate(lr, decay, optimizer, cur_epoch, n_epochs):
"""Sets the learning rate to the initially
configured `lr` decayed by `decay` every `n_epochs`"""
new_lr = lr * (decay ** (cur_epoch // n_epochs))
for param_group in optimizer.param_groups:
param_group['lr'] = new_lr
def calculate_mean_iu(predictions, gts, num_classes):
sum_iu = 0
class_iu = []
for i in range(num_classes):
n_ii = t_i = sum_n_ji = 1e-9
for p, gt in zip(predictions, gts):
n_ii += np.sum(gt[p == i] == i)
t_i += np.sum(gt == i)
sum_n_ji += np.sum(p == i)
sum_iu += float(n_ii) / (t_i + sum_n_ji - n_ii)
class_iu.append(float(n_ii) / (t_i + sum_n_ji - n_ii))
mean_iu = sum_iu / num_classes
return mean_iu,class_iu
def calculate_lane_metrics(predictions, gts, num_classes):
sum_iu = 0
class_iu = []
acc = []
rec = []
f1_m = []
for i in range(num_classes):
tp = fp = fn = 0.
for p, gt in zip(predictions, gts):
tp += np.sum(gt[p == i] == i)
fp += np.sum( (gt[p == i] != i ))
fn += np.sum(gt[p != i] == i)
class_iu.append(tp / (tp + fp + fn + 1e-9))
acc.append(tp/(tp+fp+1e-9))
rec.append(tp/(tp+fn+1e-9))
f1_m.append(2*acc[i]*rec[i]/(acc[i]+rec[i]+1e-9))
sum_iu += tp / (tp + fp + fn + 1e-9)
mean_iu = sum_iu / num_classes
return {'miu':mean_iu,
'ciu':class_iu
},\
{'acc':acc,
'rec':rec,
'f1_m':f1_m
}
class CrossEntropyLoss2d(nn.Module):
def __init__(self, weight=None, size_average=True):
super(CrossEntropyLoss2d, self).__init__()
self.nll_loss = nn.NLLLoss2d(weight, size_average)
def forward(self, inputs, targets):
return self.nll_loss(F.log_softmax(inputs), targets)
def rmrf_mkdir(dir_name):
if os.path.exists(dir_name):
shutil.rmtree(dir_name)
os.mkdir(dir_name)
def rm_file(path_file):
if os.path.exists(path_file):
os.remove(path_file)
def colorize_mask(mask):
# mask: numpy array of the mask
new_mask = Image.fromarray(mask.astype(np.uint8)).convert('P')
new_mask.putpalette(cfg.VIS.PALETTE_LABEL_COLORS)
return new_mask
def scores(predictions, gts, num_classes):
hist = np.zeros((num_classes, num_classes))
for lp, lt in zip(predictions, gts):
hist += _fast_hist(lp.flatten(), lt.flatten(), num_classes)
# axis 0: gt, axis 1: prediction
# acc = np.diag(hist).sum() / hist.sum()
# acc_cls = np.diag(hist) / hist.sum(axis=1)
# acc_cls = np.nanmean(acc_cls)
iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist))
mean_iu = np.nanmean(iu)
# freq = hist.sum(axis=1) / hist.sum()
# fwavacc = (freq[freq > 0] * iu[freq > 0]).sum()
# return acc, acc_cls, mean_iu, fwavacc
# pdb.set_trace()
return {'miu':mean_iu,
'ciu':iu
}
def acc(predictions, gts, num_classes=2):
predictions = predictions.data.numpy().astype(np.int64)
gts = gts.data.numpy().astype(np.int64)
predictions[predictions>=0.5]=1
predictions[predictions<0.5]=0
t = predictions==gts
acc = np.sum(t)/float((t.shape[0]*t.shape[1]))
return acc
def _fast_hist(label_pred, label_true, num_classes):
mask = (label_true >= 0) & (label_true < num_classes)
hist = np.bincount(
num_classes * label_true[mask].astype(int) +
label_pred[mask], minlength=num_classes ** 2).reshape(num_classes, num_classes)
return hist
def streaming_scores(hist,predictions, gts, num_classes):
for lp, lt in zip(predictions, gts):
hist += _fast_hist(lp.flatten(), lt.flatten(), num_classes)
# axis 0: gt, axis 1: prediction
# acc = np.diag(hist).sum() / hist.sum()
# acc_cls = np.diag(hist) / hist.sum(axis=1)
# acc_cls = np.nanmean(acc_cls)
iu = np.diag(hist) / (hist.sum(axis=1) + hist.sum(axis=0) - np.diag(hist))
mean_iu = | np.nanmean(iu) | numpy.nanmean |
# plotting
import matplotlib
matplotlib.use('agg')
import matplotlib.pyplot as plt
import seaborn as sns
# numpy
import numpy as np
# scipy
import scipy as sp
import scipy.interpolate
from scipy.special import erfinv, erf
from scipy.stats import poisson as pss
import scipy.fftpack
import scipy.sparse
# jit
from numba import jit
import ctypes
import astropy
import astropy as ap
from astropy.convolution import convolve_fft, AiryDisk2DKernel
import pickle
# multiprocessing
import multiprocessing as mp
from copy import deepcopy
# utilities
import os, time, sys, glob, fnmatch, inspect, traceback, functools
# HealPix
import healpy as hp
# ignore warnings if not in diagnostic mode
import warnings
#seterr(divide='raise', over='raise', invalid='raise')
#seterr(all='raise')
#seterr(under='ignore')
#warnings.simplefilter('ignore')
#np.set_printoptions(linewidth=180)
#sns.set(context='poster', style='ticks', color_codes=True)
import h5py
# utilities
# secondaries
## Symbolic Jacobian calculation
#import sympy
# tdpy
import tdpy
from tdpy.util import summgene
# photometry related
### find the spectra of sources
def retr_spec(gdat, flux, sind=None, curv=None, expc=None, sindcolr=None, elin=None, edisintp=None, sigm=None, gamm=None, spectype='powr', plot=False):
if gdat.numbener == 1:
spec = flux[None, :]
else:
if plot:
meanener = gdat.meanpara.enerplot
else:
meanener = gdat.meanpara.ener
if gmod.spectype == 'gaus':
spec = 1. / edis[None, :] / np.sqrt(2. * pi) * flux[None, :] * np.exp(-0.5 * ((gdat.meanpara.ener[:, None] - elin[None, :]) / edis[None, :])**2)
if gmod.spectype == 'voig':
args = (gdat.meanpara.ener[:, None] + 1j * gamm[None, :]) / np.sqrt(2.) / sigm[None, :]
spec = 1. / sigm[None, :] / np.sqrt(2. * pi) * flux[None, :] * real(scipy.special.wofz(args))
if gmod.spectype == 'edis':
edis = edisintp(elin)[None, :]
spec = 1. / edis / np.sqrt(2. * pi) * flux[None, :] * np.exp(-0.5 * ((gdat.meanpara.ener[:, None] - elin[None, :]) / edis)**2)
if gmod.spectype == 'pvoi':
spec = 1. / edis / np.sqrt(2. * pi) * flux[None, :] * np.exp(-0.5 * ((gdat.meanpara.ener[:, None] - elin[None, :]) / edis)**2)
if gmod.spectype == 'lore':
spec = 1. / edis / np.sqrt(2. * pi) * flux[None, :] * np.exp(-0.5 * ((gdat.meanpara.ener[:, None] - elin[None, :]) / edis)**2)
if gmod.spectype == 'powr':
spec = flux[None, :] * (meanener / gdat.enerpivt)[:, None]**(-sind[None, :])
if gmod.spectype == 'colr':
if plot:
spec = np.zeros((gdat.numbenerplot, flux.size))
else:
spec = np.empty((gdat.numbener, flux.size))
for i in gdat.indxener:
if i < gdat.indxenerpivt:
spec[i, :] = flux * (gdat.meanpara.ener[i] / gdat.enerpivt)**(-sindcolr[i])
elif i == gdat.indxenerpivt:
spec[i, :] = flux
else:
spec[i, :] = flux * (gdat.meanpara.ener[i] / gdat.enerpivt)**(-sindcolr[i-1])
if gmod.spectype == 'curv':
spec = flux[None, :] * meanener[:, None]**(-sind[None, :] - gdat.factlogtenerpivt[:, None] * curv[None, :])
if gmod.spectype == 'expc':
spec = flux[None, :] * (meanener / gdat.enerpivt)[:, None]**(-sind[None, :]) * np.exp(-(meanener - gdat.enerpivt)[:, None] / expc[None, :])
return spec
### find the surface brightness due to one point source
def retr_sbrtpnts(gdat, lgal, bgal, spec, psfnintp, indxpixlelem):
# calculate the distance to all pixels from each point source
dist = retr_angldistunit(gdat, lgal, bgal, indxpixlelem)
# interpolate the PSF onto the pixels
if gdat.kernevaltype == 'ulip':
psfntemp = psfnintp(dist)
if gdat.kernevaltype == 'bspx':
pass
# scale by the PS spectrum
sbrtpnts = spec[:, None, None] * psfntemp
return sbrtpnts
def retr_psfnwdth(gdat, psfn, frac):
'''
Return the PSF width
'''
wdth = np.zeros((gdat.numbener, gdat.numbevtt))
for i in gdat.indxener:
for m in gdat.indxevtt:
psfntemp = psfn[i, :, m]
indxanglgood = np.argsort(psfntemp)
intpwdth = max(frac * np.amax(psfntemp), np.amin(psfntemp))
if intpwdth >= np.amin(psfntemp[indxanglgood]) and intpwdth <= np.amax(psfntemp[indxanglgood]):
wdthtemp = sp.interpolate.interp1d(psfntemp[indxanglgood], gdat.binspara.angl[indxanglgood], fill_value='extrapolate')(intpwdth)
else:
wdthtemp = 0.
wdth[i, m] = wdthtemp
return wdth
# lensing-related
def samp_lgalbgalfromtmpl(gdat, probtmpl):
indxpixldraw = np.random.choice(gdat.indxpixl, p=probtmpl)
lgal = gdat.lgalgrid[indxpixldraw] + randn(gdat.sizepixl)
bgal = gdat.bgalgrid[indxpixldraw] + randn(gdat.sizepixl)
return lgal, bgal
## custom random variables, pdfs, cdfs and icdfs
### probability distribution functions
def retr_lprbpois(data, modl):
lprb = data * np.log(modl) - modl - sp.special.gammaln(data + 1)
return lprb
### probability density functions
def pdfn_self(xdat, minm, maxm):
pdfn = 1. / (maxm - minm)
return pdfn
def pdfn_expo(xdat, maxm, scal):
if (xdat > maxm).any():
pdfn = 0.
else:
pdfn = 1. / scal / (1. - np.exp(-maxm / scal)) * np.exp(-xdat / scal)
return pdfn
def pdfn_dexp(xdat, maxm, scal):
pdfn = 0.5 * pdfn_expo(np.fabs(xdat), maxm, scal)
return pdfn
def pdfn_dpow(xdat, minm, maxm, brek, sloplowr, slopuppr):
if np.isscalar(xdat):
xdat = np.array([xdat])
faca = 1. / (brek**(sloplowr - slopuppr) * (brek**(1. - sloplowr) - minm**(1. - sloplowr)) / \
(1. - sloplowr) + (maxm**(1. - slopuppr) - brek**(1. - slopuppr)) / (1. - slopuppr))
facb = faca * brek**(sloplowr - slopuppr) / (1. - sloplowr)
pdfn = np.empty_like(xdat)
indxlowr = np.where(xdat <= brek)[0]
indxuppr = np.where(xdat > brek)[0]
if indxlowr.size > 0:
pdfn[indxlowr] = faca * brek**(sloplowr - slopuppr) * xdat[indxlowr]**(-sloplowr)
if indxuppr.size > 0:
pdfn[indxuppr] = faca * xdat[indxuppr]**(-slopuppr)
return pdfn
def pdfn_powr(xdat, minm, maxm, slop):
norm = (1. - slop) / (maxm**(1. - slop) - minm**(1. - slop))
pdfn = norm * xdat**(-slop)
return pdfn
def pdfn_logt(xdat, minm, maxm):
pdfn = 1. / (np.log(maxm) - np.log(minm)) / xdat
return pdfn
def pdfn_igam(xdat, slop, cutf):
pdfn = sp.stats.invgamma.pdf(xdat, slop - 1., scale=cutf)
return pdfn
def pdfn_lnor(xdat, mean, stdv):
pdfn = pdfn_gaus(np.log(xdat), np.log(mean), stdv)
return pdfn
def pdfn_gaus(xdat, mean, stdv):
pdfn = 1. / np.sqrt(2. * pi) / stdv * np.exp(-0.5 * ((xdat - mean) / stdv)**2)
return pdfn
def pdfn_lgau(xdat, mean, stdv):
pdfn = pdfn_gaus(np.log(xdat), np.log(mean), stdv)
return pdfn
def pdfn_atan(para, minmpara, maxmpara):
pdfn = 1. / (para**2 + 1.) / (np.arctan(maxmpara) - np.arctan(minmpara))
return pdfn
def cdfn_paragenrscalbase(gdat, strgmodl, paragenrscalbase, thisindxparagenrbase):
gmod = getattr(gdat, strgmodl)
scalparagenrbase = gmod.scalpara.genrbase[thisindxparagenrbase]
if scalparagenrbase == 'self' or scalparagenrbase == 'logt' or scalparagenrbase == 'atan':
listminmparagenrscalbase = gmod.minmpara.genrbase[thisindxparagenrbase]
factparagenrscalbase = gmod.factparagenrscalbase[thisindxparagenrbase]
if scalparagenrbase == 'self':
paragenrscalbaseunit = cdfn_self(paragenrscalbase, listminmparagenrscalbase, factparagenrscalbase)
elif scalparagenrbase == 'logt':
paragenrscalbaseunit = cdfn_logt(paragenrscalbase, listminmparagenrscalbase, factparagenrscalbase)
elif scalparagenrbase == 'atan':
gmod.listmaxmparagenrscalbase = gmod.listmaxmparagenrscalbase[thisindxparagenrbase]
paragenrscalbaseunit = cdfn_atan(paragenrscalbase, listminmparagenrscalbase, gmod.listmaxmparagenrscalbase)
elif scalparagenrbase == 'gaus' or scalparagenrbase == 'eerr':
gmod.listmeanparagenrscalbase = gmod.listmeanparagenrscalbase[thisindxparagenrbase]
gmod.liststdvparagenrscalbase = gmod.liststdvparagenrscalbase[thisindxparagenrbase]
if scalparagenrbase == 'eerr':
gmod.cdfnlistminmparagenrscalbaseunit = gmod.cdfnlistminmparagenrscalbaseunit[thisindxparagenrbase]
gmod.listparagenrscalbaseunitdiff = gmod.listparagenrscalbaseunitdiff[thisindxparagenrbase]
paragenrscalbaseunit = cdfn_eerr(paragenrscalbase, gmod.listmeanparagenrscalbase, gmod.liststdvparagenrscalbase, \
gmod.cdfnlistminmparagenrscalbaseunit, gmod.listparagenrscalbaseunitdiff)
else:
paragenrscalbaseunit = cdfn_gaus(paragenrscalbase, gmod.listmeanparagenrscalbase, gmod.liststdvparagenrscalbase)
elif scalparagenrbase == 'pois':
paragenrscalbaseunit = paragenrscalbase
if gdat.booldiagmode:
if paragenrscalbaseunit == 0:
print('Warning. CDF is zero.')
return paragenrscalbaseunit
def icdf_paragenrscalfull(gdat, strgmodl, paragenrunitfull, indxparagenrfullelem):
gmod = getattr(gdat, strgmodl)
# tobechanged
# temp -- change zeros to empty
paragenrscalfull = np.zeros_like(paragenrunitfull)
for scaltype in gdat.listscaltype:
listindxparagenrbasescal = gmod.listindxparagenrbasescal[scaltype]
if len(listindxparagenrbasescal) == 0:
continue
paragenrscalfull[listindxparagenrbasescal] = icdf_paragenrscalbase(gdat, strgmodl, paragenrunitfull[listindxparagenrbasescal], scaltype, listindxparagenrbasescal)
if not np.isfinite(paragenrscalfull).all():
raise Exception('')
if indxparagenrfullelem is not None:
for l in gmod.indxpopl:
for g in gmod.indxparagenrelemsing[l]:
indxparagenrfulltemp = indxparagenrfullelem[l][gmod.namepara.genrelem[l][g]]
if indxparagenrfulltemp.size == 0:
continue
paragenrscalfull[indxparagenrfulltemp] = icdf_trap(gdat, strgmodl, paragenrunitfull[indxparagenrfulltemp], paragenrscalfull, \
gmod.listscalparagenrelem[l][g], gmod.namepara.genrelem[l][g], l)
if gdat.booldiagmode:
if not np.isfinite(paragenrscalfull[indxparagenrfulltemp]).all():
raise Exception('')
if not np.isfinite(paragenrscalfull).all():
raise Exception('')
return paragenrscalfull
def icdf_paragenrscalbase(gdat, strgmodl, paragenrunitbase, scaltype, indxparagenrbasescal):
gmod = getattr(gdat, strgmodl)
if scaltype == 'self' or scaltype == 'logt' or scaltype == 'atan':
minmparagenrscalbase = gmod.minmpara.genrbase[indxparagenrbasescal]
factparagenrscalbase = gmod.factpara.genrbase[indxparagenrbasescal]
if scaltype == 'self':
paragenrscalbase = tdpy.icdf_self(paragenrunitbase, minmparagenrscalbase, factparagenrscalbase)
elif scaltype == 'logt':
paragenrscalbase = tdpy.icdf_logt(paragenrunitbase, minmparagenrscalbase, factparagenrscalbase)
elif scaltype == 'atan':
listmaxmparagenrscalbase = gmod.listmaxmparagenrscalbase[indxparagenrbasescal]
paragenrscalbase = tdpy.icdf_atan(paragenrunitbase, minmparagenrscalbase, listmaxmparagenrscalbase)
elif scaltype == 'gaus' or scaltype == 'eerr':
listmeanparagenrscalbase = gmod.listmeanparagenrscalbase[indxparagenrbasescal]
liststdvparagenrscalbase = gmod.liststdvparagenrscalbase[indxparagenrbasescal]
if scaltype == 'eerr':
cdfnminmparagenrscalbaseunit = gmod.cdfnminmparagenrscalbaseunit[indxparagenrbasescal]
listparagenrscalbaseunitdiff = gmod.listparagenrscalbaseunitdiff[indxparagenrbasescal]
paragenrscalbase = tdpy.icdf_eerr(paragenrunitbase, listmeanparagenrscalbase, liststdvparagenrscalbase, cdfnminmparagenrscalbaseunit, listparagenrscalbaseunitdiff)
else:
paragenrscalbase = tdpy.icdf_gaus(paragenrunitbase, listmeanparagenrscalbase, liststdvparagenrscalbase)
elif scaltype == 'pois':
paragenrscalbase = paragenrunitbase
if gdat.booldiagmode:
if not np.isfinite(paragenrscalbase).all():
print('scaltype')
print(scaltype)
print('paragenrscalbase')
print(paragenrscalbase)
print('type(paragenrscalbase)')
print(type(paragenrscalbase))
print('paragenrscalbase.dtype')
print(paragenrscalbase.dtype)
raise Exception('')
return paragenrscalbase
def icdf_trap(gdat, strgmodl, cdfn, paragenrscalfull, scalcomp, nameparagenrelem, l):
gmod = getattr(gdat, strgmodl)
if scalcomp == 'self' or scalcomp == 'powr' or scalcomp == 'dpowslopbrek' or scalcomp == 'logt':
minm = getattr(gmod.minmpara, nameparagenrelem)
if scalcomp != 'self':
maxm = getattr(gmod.maxmpara, nameparagenrelem)
if scalcomp == 'powr':
slop = paragenrscalfull[getattr(gmod.indxpara, 'slopprio%spop%d' % (nameparagenrelem, l))]
if gdat.booldiagmode:
if not np.isfinite(slop):
raise Exception('')
if maxm < minm:
raise Exception('')
icdf = tdpy.icdf_powr(cdfn, minm, maxm, slop)
if scalcomp == 'dpowslopbrek':
distbrek = paragenrscalfull[getattr(gmod.indxpara, 'brekprio' + nameparagenrelem)[l]]
sloplowr = paragenrscalfull[getattr(gmod.indxpara, 'sloplowrprio' + nameparagenrelem)[l]]
slopuppr = paragenrscalfull[getattr(gmod.indxpara, 'slopupprprio' + nameparagenrelem)[l]]
icdf = tdpy.icdf_dpow(cdfn, minm, maxm, distbrek, sloplowr, slopuppr)
if scalcomp == 'expo':
sexp = getattr(gmod, nameparagenrelem + 'distsexppop%d' % l)
icdf = tdpy.icdf_expo(cdfn, maxm, sexp)
if scalcomp == 'self':
fact = getattr(gmod.factpara, nameparagenrelem)
icdf = tdpy.icdf_self_fact(cdfn, minm, fact)
if scalcomp == 'logt':
icdf = tdpy.icdf_logt(cdfn, minm, fact)
if scalcomp == 'dexp':
scal = paragenrscalfull[getattr(gmod.indxpara, nameparagenrelem + 'distscal')[l]]
icdf = tdpy.icdf_dexp(cdfn, maxm, scal)
if scalcomp == 'lnormeanstdv':
distmean = paragenrscalfull[getattr(gmod.indxpara, nameparagenrelem + 'distmean')[l]]
diststdv = paragenrscalfull[getattr(gmod.indxpara, nameparagenrelem + 'diststdv')[l]]
icdf = tdpy.icdf_lnor(cdfn, distmean, diststdv)
if scalcomp == 'igam':
slop = paragenrscalfull[getattr(gmod.indxpara, 'slopprio' + nameparagenrelem)[l]]
cutf = getattr(gdat, 'cutf' + nameparagenrelem)
icdf = tdpy.icdf_igam(cdfn, slop, cutf)
if scalcomp == 'gaus':
distmean = paragenrscalfull[getattr(gmod.indxpara, nameparagenrelem + 'distmean')[l]]
diststdv = paragenrscalfull[getattr(gmod.indxpara, nameparagenrelem + 'diststdv')[l]]
icdf = tdpy.icdf_gaus(cdfn, distmean, diststdv)
if gdat.booldiagmode:
if not np.isfinite(icdf).all():
print('icdf')
print(icdf)
raise Exception('')
return icdf
def cdfn_trap(gdat, gdatmodi, strgmodl, icdf, indxpoplthis):
gmod = getattr(gdat, strgmodl)
gdatobjt = retr_gdatobjt(gdat, gdatmodi, strgmodl)
gmod.listscalparagenrelem = gmod.listscalparagenrelem[indxpoplthis]
cdfn = np.empty_like(icdf)
for k, nameparagenrelem in enumerate(gmod.namepara.genrelem[indxpoplthis]):
if gmod.listscalparagenrelem[k] == 'self' or gmod.listscalparagenrelem[k] == 'dexp' or gmod.listscalparagenrelem[k] == 'expo' \
or gmod.listscalparagenrelem[k] == 'powr' or gmod.listscalparagenrelem[k] == 'dpowslopbrek':
minm = getattr(gdat.fitt.minm, nameparagenrelem)
if gmod.listscalparagenrelem[k] == 'powr':
maxm = getattr(gdat.fitt.maxm, nameparagenrelem)
slop = gdatobjt.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'slop')[indxpoplthis]]
cdfn[k] = cdfn_powr(icdf[k], minm, maxm, slop)
elif gmod.listscalparagenrelem[k] == 'dpowslopbrek':
maxm = getattr(gdat.fitt.maxm, nameparagenrelem)
brek = gdatobjt.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'distbrek')[indxpoplthis]]
sloplowr = gdatobjt.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'sloplowr')[indxpoplthis]]
slopuppr = gdatobjt.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'slopuppr')[indxpoplthis]]
cdfn[k] = cdfn_dpow(icdf[k], minm, maxm, brek, sloplowr, slopuppr)
else:
fact = getattr(gdat.fitt, 'fact' + nameparagenrelem)
cdfn[k] = cdfn_self(icdf[k], minm, fact)
if gmod.listscalparagenrelem[k] == 'lnormeanstdv':
distmean = gdatmodi.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'distmean')[indxpoplthis]]
diststdv = gdatmodi.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'diststdv')[indxpoplthis]]
cdfn[k] = cdfn_lnor(icdf[k], distmean, slop)
if gmod.listscalparagenrelem[k] == 'igam':
slop = gdatmodi.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'slop')[indxpoplthis]]
cutf = getattr(gdat, 'cutf' + nameparagenrelem)
cdfn[k] = cdfn_igam(icdf[k], slop, cutf)
if gmod.listscalparagenrelem[k] == 'gaus':
distmean = gdatmodi.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'distmean')[indxpoplthis]]
diststdv = gdatmodi.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'diststdv')[indxpoplthis]]
cdfn[k] = cdfn_gaus(icdf[k], distmean, diststdv)
return cdfn
### update sampler state
def updt_stat(gdat, gdatmodi):
if gdat.typeverb > 1:
print('updt_stat()')
# update the sample and the unit sample vectors
gdatmodi.this.lpritotl = gdatmodi.next.lpritotl
gdatmodi.this.lliktotl = gdatmodi.next.lliktotl
gdatmodi.this.lpostotl = gdatmodi.next.lpostotl
gdatmodi.this.paragenrscalfull[gdatmodi.indxsampmodi] = np.copy(gdatmodi.next.paragenrscalfull[gdatmodi.indxsampmodi])
gdatmodi.this.paragenrunitfull[gdatmodi.indxsampmodi] = np.copy(gdatmodi.next.paragenrunitfull[gdatmodi.indxsampmodi])
if gdatmodi.this.indxproptype > 0:
gdatmodi.this.indxelemfull = deepcopy(gdatmodi.next.indxelemfull)
gdatmodi.this.indxparagenrfullelem = retr_indxparagenrfullelem(gdat, gdatmodi.this.indxelemfull, 'fitt')
def initcompfromstat(gdat, gdatmodi, namerefr):
for l in gmod.indxpopl:
for g, nameparagenrelem in enumerate(gmod.namepara.genrelem[l]):
minm = getattr(gdat.fitt.minmpara, nameparagenrelem)
maxm = getattr(gdat.fitt.maxmpara, nameparagenrelem)
try:
comp = getattr(gdat, namerefr + nameparagenrelem)[l][0, :]
if gmod.listscalparagenrelem[l][g] == 'self' or gmod.listscalparagenrelem[l][g] == 'logt':
fact = getattr(gdat.fitt, 'fact' + nameparagenrelem)
if gmod.listscalparagenrelem[l][g] == 'self':
compunit = cdfn_self(comp, minm, fact)
if gmod.listscalparagenrelem[l][g] == 'logt':
compunit = cdfn_logt(comp, minm, fact)
if gmod.listscalparagenrelem[l][g] == 'expo':
scal = getattr(gdat.fitt, 'gangdistsexp')
maxm = getattr(gdat.fitt.maxm, nameparagenrelem)
compunit = cdfn_expo(icdf, maxm, scal)
if gmod.listscalparagenrelem[l][g] == 'powr' or gmod.listscalparagenrelem[l][g] == 'igam':
slop = gdatmodi.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'slop')[l]]
if gmod.listscalparagenrelem[l][g] == 'powr':
compunit = cdfn_powr(comp, minm, maxm, slop)
if gmod.listscalparagenrelem[l][g] == 'igam':
cutf = getattr(gdat, 'cutf' + nameparagenrelem)
compunit = cdfn_igam(comp, slop, cutf)
if gmod.listscalparagenrelem[l][g] == 'dpowslopbrek':
brek = gdatmodi.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'distbrek')[l]]
sloplowr = gdatmodi.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'sloplowr')[l]]
slopuppr = gdatmodi.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'slopuppr')[l]]
compunit = cdfn_powr(comp, minm, maxm, brek, sloplowr, slopuppr)
if gmod.listscalparagenrelem[l][g] == 'gaus':
distmean = gdatmodi.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'distmean')[l]]
diststdv = gdatmodi.this.paragenrscalfull[getattr(gdat.fitt, 'indxparagenrbase' + nameparagenrelem + 'diststdv')[l]]
compunit = cdfn_gaus(comp, distmean, diststdv)
except:
if gdat.typeverb > 0:
print('Initialization from the reference catalog failed for %s. Sampling randomly...' % nameparagenrelem)
compunit = np.random.rand(gdatmodi.this.paragenrscalfull[gmod.indxpara.numbelem[l]].astype(int))
gdatmodi.this.paragenrunitfull[gdatmodi.this.indxparagenrfullelem[l][nameparagenrelem]] = compunit
### find the set of pixels in proximity to a position on the map
def retr_indxpixlelemconc(gdat, strgmodl, dictelem, l):
gmod = getattr(gdat, strgmodl)
lgal = dictelem[l]['lgal']
bgal = dictelem[l]['bgal']
varbampl = dictelem[l][gmod.nameparagenrelemampl[l]]
if gmod.typeelemspateval[l] == 'locl':
listindxpixlelem = [[] for k in range(lgal.size)]
for k in range(lgal.size):
indxpixlpnts = retr_indxpixl(gdat, bgal[k], lgal[k])
indxfluxproxtemp = np.digitize(varbampl[k], gdat.binspara.prox)
if indxfluxproxtemp > 0:
indxfluxproxtemp -= 1
if indxfluxproxtemp == gdat.binspara.prox.size - 1:
print('Warning! Index of the proximity pixel list overflew. Taking the largest list...')
indxfluxproxtemp -= 1
indxpixlelem = gdat.indxpixlprox[indxfluxproxtemp][indxpixlpnts]
if isinstance(indxpixlelem, int):
indxpixlelem = gdat.indxpixl
listindxpixlelem[k] = indxpixlelem
listindxpixlelemconc = np.unique(np.concatenate(listindxpixlelem))
else:
listindxpixlelemconc = gdat.indxpixl
listindxpixlelem = gdat.indxpixl
return listindxpixlelem, listindxpixlelemconc
### find the distance between two points on the map
def retr_angldistunit(gdat, lgal, bgal, indxpixlelem, retranglcosi=False):
if gdat.typepixl == 'heal':
xdat, ydat, zaxi = retr_unit(lgal, bgal)
anglcosi = gdat.xdatgrid[indxpixlelem] * xdat + gdat.ydatgrid[indxpixlelem] * ydat + gdat.zaxigrid[indxpixlelem] * zaxi
if retranglcosi:
return anglcosi
else:
angldist = np.arccos(anglcosi)
return angldist
else:
angldist = np.sqrt((lgal - gdat.lgalgrid[indxpixlelem])**2 + (bgal - gdat.bgalgrid[indxpixlelem])**2)
return angldist
### find the pixel index of a point on the map
def retr_indxpixl(gdat, bgal, lgal):
if gdat.typepixl == 'heal':
indxpixl = gdat.pixlcnvt[hp.ang2pix(gdat.numbsideheal, np.pi / 2. - bgal, lgal)]
if gdat.booldiagmode:
if (indxpixl == -1).any():
raise Exception('pixlcnvt went negative!')
if gdat.typepixl == 'cart':
indxlgcr = np.floor(gdat.numbsidecart * (lgal - gdat.minmlgaldata) / 2. / gdat.maxmgangdata).astype(int)
indxbgcr = np.floor(gdat.numbsidecart * (bgal - gdat.minmbgaldata) / 2. / gdat.maxmgangdata).astype(int)
if np.isscalar(indxlgcr):
if indxlgcr < 0:
indxlgcr = 0
if indxlgcr >= gdat.numbsidecart:
indxlgcr = gdat.numbsidecart - 1
else:
indxlgcr[np.where(indxlgcr < 0)] = 0
indxlgcr[np.where(indxlgcr >= gdat.numbsidecart)] = gdat.numbsidecart - 1
if np.isscalar(indxbgcr):
if indxbgcr < 0:
indxbgcr = 0
if indxbgcr >= gdat.numbsidecart:
indxbgcr = gdat.numbsidecart - 1
else:
indxbgcr[np.where(indxbgcr < 0)] = 0
indxbgcr[np.where(indxbgcr >= gdat.numbsidecart)] = gdat.numbsidecart - 1
indxpixl = indxlgcr * gdat.numbsidecart + indxbgcr
# convert to an index of non-zero exposure pixels
#indxpixl = gdat.indxpixlroficnvt[indxpixl]
return indxpixl
## obtain count maps
def retr_cntp(gdat, sbrt):
cntp = sbrt * gdat.expo * gdat.apix
if gdat.enerdiff:
cntp *= gdat.deltener[:, None, None]
return cntp
## plotting
### construct path for plots
def retr_plotpath(gdat, gdatmodi, strgpdfn, strgstat, strgmodl, strgplot, nameinte=''):
if strgmodl == 'true' or strgstat == '':
path = gdat.pathinit + nameinte + strgplot + '.pdf'
elif strgstat == 'pdfn' or strgstat == 'mlik':
path = gdat.pathplotrtag + strgpdfn + '/finl/' + nameinte + strgstat + strgplot + '.pdf'
elif strgstat == 'this':
path = gdat.pathplotrtag + strgpdfn + '/fram/' + nameinte + strgstat + strgplot + '_swep%09d.pdf' % gdatmodi.cntrswep
return path
### determine the marker size
def retr_mrkrsize(gdat, strgmodl, compampl, nameparagenrelemampl):
gmod = getattr(gdat, strgmodl)
minm = getattr(gdat.minmpara, nameparagenrelemampl)
maxm = getattr(gdat.maxmpara, nameparagenrelemampl)
mrkrsize = (np.sqrt(compampl) - np.sqrt(minm)) / (np.sqrt(maxm) - np.sqrt(minm)) * (gdat.maxmmrkrsize - gdat.minmmrkrsize) + gdat.minmmrkrsize
return mrkrsize
## experiment specific
def retr_psfphubb(gmod):
# temp
gmod.psfpexpr = np.array([0.080, 0.087]) / gdat.anglfact
def retr_psfpchan(gmod):
# temp
#gmod.psfpexpr = np.array([0.25, 0.3, 0.4, 0.6, 0.7]) / gdat.anglfact
if gdat.numbenerfull == 5:
gmod.psfpexpr = np.array([0.424 / gdat.anglfact, 2.75, 0.424 / gdat.anglfact, 2.59, 0.440 / gdat.anglfact, 2.47, 0.457 / gdat.anglfact, 2.45, 0.529 / gdat.anglfact, 3.72])
if gdat.numbenerfull == 2:
gmod.psfpexpr = np.array([0.427 / gdat.anglfact, 2.57, 0.449 / gdat.anglfact, 2.49])
#gdat.psfpchan = gmod.psfpexpr[(2 * gdat.indxenerincl[:, None] + np.arange(2)[None, :]).flatten()]
#gmod.psfpexpr = np.array([0.25 / gdat.anglfact,
# 0.30 / gdat.anglfacti\
# 0.40 / gdat.anglfacti\
# 0.60 / gdat.anglfacti\
# 0.70 / gdat.anglfacti
#gmod.psfpexpr = np.array([0.35 / gdat.anglfact, 2e-1, 1.9, 0.5 / gdat.anglfact, 1.e-1, 2.])
#gmod.psfpexpr = np.array([0.25 / gdat.anglfact, 2.0e-1, 1.9, \
# 0.30 / gdat.anglfact, 1.0e-1, 2.0, \
# 0.40 / gdat.anglfact, 1.0e-1, 2.0, \
# 0.60 / gdat.anglfact, 1.0e-1, 2.0, \
# 0.70 / gdat.anglfact, 1.0e-1, 2.0])
def retr_psfpsdyn(gmod):
gmod.psfpexpr = np.array([0.05])
def retr_psfpferm(gmod):
if gdat.anlytype.startswith('rec8'):
path = gdat.pathdata + 'expr/irfn/psf_P8R2_SOURCE_V6_PSF.fits'
else:
path = gdat.pathdata + 'expr/irfn/psf_P7REP_SOURCE_V15_back.fits'
irfn = astropy.io.fits.getdata(path, 1)
minmener = irfn['energ_lo'].squeeze() * 1e-3 # [GeV]
maxmener = irfn['energ_hi'].squeeze() * 1e-3 # [GeV]
enerirfn = np.sqrt(minmener * maxmener)
numbpsfpscal = 3
numbpsfpform = 5
fermscal = np.zeros((gdat.numbevtt, numbpsfpscal))
fermform = np.zeros((gdat.numbener, gdat.numbevtt, numbpsfpform))
strgpara = ['score', 'gcore', 'stail', 'gtail', 'ntail']
for m in gdat.indxevtt:
if gdat.anlytype.startswith('rec8'):
irfn = astropy.io.fits.getdata(path, 1 + 3 * gdat.indxevttincl[m])
fermscal[m, :] = astropy.io.fits.getdata(path, 2 + 3 * gdat.indxevttincl[m])['PSFSCALE']
else:
if m == 1:
path = gdat.pathdata + 'expr/irfn/psf_P7REP_SOURCE_V15_front.fits'
elif m == 0:
path = gdat.pathdata + 'expr/irfn/psf_P7REP_SOURCE_V15_back.fits'
else:
continue
irfn = astropy.io.fits.getdata(path, 1)
fermscal[m, :] = astropy.io.fits.getdata(path, 2)['PSFSCALE']
for k in range(numbpsfpform):
fermform[:, m, k] = sp.interpolate.interp1d(enerirfn, np.mean(irfn[strgpara[k]].squeeze(), axis=0), fill_value='extrapolate')(gdat.meanpara.ener)
# convert N_tail to f_core
for m in gdat.indxevtt:
for i in gdat.indxener:
fermform[i, m, 4] = 1. / (1. + fermform[i, m, 4] * fermform[i, m, 2]**2 / fermform[i, m, 0]**2)
# calculate the scale factor
gdat.fermscalfact = np.sqrt((fermscal[None, :, 0] * (10. * gdat.meanpara.ener[:, None])**fermscal[None, :, 2])**2 + fermscal[None, :, 1]**2)
# store the fermi PSF parameters
gmod.psfpexpr = np.zeros(gdat.numbener * gdat.numbevtt * numbpsfpform)
for m in gdat.indxevtt:
for k in range(numbpsfpform):
indxfermpsfptemp = m * numbpsfpform * gdat.numbener + gdat.indxener * numbpsfpform + k
gmod.psfpexpr[indxfermpsfptemp] = fermform[:, m, k]
def retr_refrchaninit(gdat):
gdat.indxrefr = np.arange(gdat.numbrefr)
gdat.dictrefr = []
for q in gdat.indxrefr:
gdat.dictrefr.append(dict())
gdat.refr.namepara.elemsign = ['flux', 'magt']
gdat.refr.lablelem = ['Xue+2011', 'Wolf+2008']
gdat.listnamerefr += ['xu11', 'wo08']
setattr(gdat, 'plotminmotyp', 0.)
setattr(gdat, 'plottmaxmotyp', 1.)
setattr(gmod.lablrootpara, 'otyp', 'O')
setattr(gdat, 'scalotypplot', 'self')
setattr(gmod.lablrootpara, 'otypxu11', 'O')
for name in gdat.listnamerefr:
setattr(gdat, 'plotminmotyp' + name, 0.)
setattr(gdat, 'plotmaxmotyp' + name, 1.)
if gdat.strgcnfg == 'pcat_chan_inpt_home4msc':
with open(gdat.pathinpt + 'ECDFS_Cross_ID_Hsu2014.txt', 'r') as thisfile:
for k, line in enumerate(thisfile):
if k < 18:
continue
rasccand =line[2]
declcand =line[2]
gdat.refr.namepara.elem[0] += ['lgal', 'bgal', 'flux', 'sind', 'otyp', 'lumi']
gdat.refr.namepara.elem[1] += ['lgal', 'bgal', 'magt', 'reds', 'otyp']
def retr_refrchanfinl(gdat):
booltemp = False
if gdat.anlytype.startswith('extr'):
if gdat.numbsidecart == 300:
gdat.numbpixllgalshft[0] = 1490
gdat.numbpixlbgalshft[0] = 1430
else:
booltemp = True
elif gdat.anlytype.startswith('home'):
gdat.numbpixllgalshft[0] = 0
gdat.numbpixlbgalshft[0] = 0
if gdat.numbsidecart == 600:
pass
elif gdat.numbsidecart == 100:
indxtile = int(gdat.anlytype[-4:])
numbsidecntr = int(gdat.anlytype[8:12])
numbtileside = numbsidecntr / gdat.numbsidecart
indxtilexaxi = indxtile // numbtileside
indxtileyaxi = indxtile % numbtileside
gdat.numbpixllgalshft[0] += indxtilexaxi * gdat.numbsidecart
gdat.numbpixlbgalshft[0] += indxtileyaxi * gdat.numbsidecart
elif gdat.numbsidecart == 300:
gdat.numbpixllgalshft[0] += 150
gdat.numbpixlbgalshft[0] += 150
else:
booltemp = True
else:
booltemp = True
if booltemp:
raise Exception('Reference elements cannot be aligned with the spatial axes!')
## WCS object for rotating reference elements into the ROI
if gdat.numbener == 2:
gdat.listpathwcss[0] = gdat.pathinpt + 'CDFS-4Ms-0p5to2-asca-im-bin1.fits'
else:
gdat.listpathwcss[0] = gdat.pathinpt + '0.5-0.91028_flux_%sMs.img' % gdat.anlytype[4]
# Xue et al. (2011)
#with open(gdat.pathinpt + 'chancatl.txt', 'r') as thisfile:
pathfile = gdat.pathinpt + 'Xue2011.fits'
hdun = pf.open(pathfile)
hdun.info()
lgalchan = hdun[1].data['_Glon'] / 180. * pi
bgalchan = hdun[1].data['_Glat'] / 180. * pi
fluxchansoft = hdun[1].data['SFlux']
fluxchanhard = hdun[1].data['HFlux']
objttypechan = hdun[1].data['Otype']
gdat.refrlumi[0][0] = hdun[1].data['Lx']
# position
gdat.refr.dictelem[0]['lgal'] = lgalchan
gdat.refr.dictelem[0]['bgal'] = bgalchan
# spectra
gdat.refrspec = [[np.zeros((3, gdat.numbener, lgalchan.size))]]
if gdat.numbener == 2:
gdat.refrspec[0][0, 0, :] = fluxchansoft * 0.624e9
gdat.refrspec[0][0, 1, :] = fluxchanhard * 0.624e9 / 16.
else:
gdat.refrspec[0][0, :, :] = 2. * fluxchansoft[None, :] * 0.624e9
gdat.refrspec[0][1, :, :] = gdat.refrspec[0][0, :, :]
gdat.refrspec[0][2, :, :] = gdat.refrspec[0][0, :, :]
# fluxes
gdat.refrflux[0] = gdat.refrspec[0][:, gdat.indxenerpivt, :]
# spectral indices
if gdat.numbener > 1:
gdat.refrsind[0] = -np.log(gdat.refrspec[0][0, 1, :] / gdat.refrspec[0][0, 0, :]) / np.log(np.sqrt(7. / 2.) / np.sqrt(0.5 * 2.))
## object type
objttypechantemp = np.zeros(lgalchan.size) - 1.
indx = np.where(objttypechan == 'AGN')[0]
objttypechantemp[indx] = 0.165
indx = np.where(objttypechan == 'Galaxy')[0]
objttypechantemp[indx] = 0.495
indx = np.where(objttypechan == 'Star')[0]
objttypechantemp[indx] = 0.835
gdat.refrotyp[0][0] = objttypechantemp
# Wolf et al. (2011)
path = gdat.pathdata + 'inpt/Wolf2008.fits'
data = astropy.io.fits.getdata(path)
gdat.refrlgal[1] = np.deg2rad(data['_Glon'])
gdat.refrlgal[1] = ((gdat.refrlgal[1] - pi) % (2. * pi)) - pi
gdat.refrbgal[1] = np.deg2rad(data['_Glat'])
gdat.refrmagt[1][0] = data['Rmag']
gdat.refrreds[1][0] = data['MCz']
#listname = []
#for k in range(data['MCclass'].size):
# if not data['MCclass'][k] in listname:
# listname.append(data['MCclass'][k])
listname = ['Galaxy', 'Galaxy (Uncl!)', 'QSO (Gal?)', 'Galaxy (Star?)', 'Star', 'Strange Object', 'QSO', 'WDwarf']
gdat.refrotyp[1][0] = np.zeros_like(gdat.refrreds[1][0]) - 1.
for k, name in enumerate(listname):
indx = np.where(data['MCclass'] == name)[0]
gdat.refrotyp[1][0][indx] = k / 10.
# error budget
for name in ['lgal', 'bgal', 'sind', 'otyp', 'lumi', 'magt', 'reds']:
refrtile = [[] for q in gdat.indxrefr]
refrfeat = getattr(gdat.refr, name)
for q in gdat.indxrefr:
if len(refrfeat[q]) > 0:
refrtile[q] = np.tile(refrfeat[q], (3, 1))
setattr(gdat.refr, name, refrtile)
def retr_refrferminit(gdat):
gdat.listnamerefr += ['ac15', 'ma05']
gdat.indxrefr = np.arange(gdat.numbrefr)
gdat.refr.lablelem = ['Acero+2015', 'Manchester+2005']
gdat.refr.namepara.elemsign = ['flux', 'flux0400']
setattr(gmod.lablrootpara, 'curvac15', '%s_{3FGL}' % gdat.lablcurv)
setattr(gmod.lablrootpara, 'expcac15', 'E_{c,3FGL}')
for name in gdat.listnamerefr:
setattr(gdat.minmpara, 'curv' + name, -1.)
setattr(gdat.maxmpara, 'curv' + name, 1.)
setattr(gdat.minmpara, 'expc' + name, 0.1)
setattr(gdat.maxmpara, 'expc' + name, 10.)
gdat.refr.namepara.elem[0] += ['lgal', 'bgal', 'flux', 'sind', 'curv', 'expc', 'tvar', 'etag', 'styp', 'sindcolr0001', 'sindcolr0002']
gdat.refr.namepara.elem[1] += ['lgal', 'bgal', 'flux0400', 'per0', 'per1']
def retr_refrfermfinl(gdat):
gdat.minmstyp = -0.5
gdat.maxmstyp = 3.5
gdat.lablstyp = 'S'
gmod.scalstypplot = 'self'
gdat.minmtvar = 0.
gdat.maxmtvar = 400.
gdat.labltvar = 'T'
gmod.scaltvarplot = 'logt'
# Acero+2015
path = gdat.pathdata + 'expr/pnts/gll_psc_v16.fit'
fgl3 = astropy.io.fits.getdata(path)
gdat.refr.dictelem[0]['lgal'] = np.deg2rad(fgl3['glon'])
gdat.refr.dictelem[0]['lgal'] = np.pi - ((gdat.refr.dictelem[0]['lgal'] - np.pi) % (2. * np.pi))
gdat.refr.dictelem[0]['bgal'] = np.deg2rad(fgl3['glat'])
gdat.refr.numbelemfull = gdat.refr.dictelem[0]['lgal'].size
gdat.refrspec = [np.empty((3, gdat.numbener, gdat.refr.dictelem[0]['lgal'].size))]
gdat.refrspec[0][0, :, :] = np.stack((fgl3['Flux300_1000'], fgl3['Flux1000_3000'], fgl3['Flux3000_10000']))[gdat.indxenerincl, :] / gdat.deltener[:, None]
fgl3specstdvtemp = np.stack((fgl3['Unc_Flux100_300'], fgl3['Unc_Flux300_1000'], fgl3['Unc_Flux1000_3000'], fgl3['Unc_Flux3000_10000'], \
fgl3['Unc_Flux10000_100000']))[gdat.indxenerincl, :, :] / gdat.deltener[:, None, None]
gdat.refrspec[0][1, :, :] = gdat.refrspec[0][0, :, :] + fgl3specstdvtemp[:, :, 0]
gdat.refrspec[0][2, :, :] = gdat.refrspec[0][0, :, :] + fgl3specstdvtemp[:, :, 1]
gdat.refrspec[0][np.where(np.isfinite(gdat.refrspec[0]) == False)] = 0.
gdat.refrflux[0] = gdat.refrspec[0][:, gdat.indxenerpivt, :]
gdat.refrsindcolr0001[0] = -np.log(gdat.refrspec[0][:, 1, :] / gdat.refrflux[0]) / np.log(gdat.meanpara.ener[1] / gdat.enerpivt)
gdat.refrsindcolr0002[0] = -np.log(gdat.refrspec[0][:, 2, :] / gdat.refrflux[0]) / np.log(gdat.meanpara.ener[2] / gdat.enerpivt)
fgl3axisstdv = (fgl3['Conf_68_SemiMinor'] + fgl3['Conf_68_SemiMajor']) * 0.5
fgl3anglstdv = np.deg2rad(fgl3['Conf_68_PosAng']) # [rad]
fgl3lgalstdv = fgl3axisstdv * abs(np.cos(fgl3anglstdv))
fgl3bgalstdv = fgl3axisstdv * abs(np.sin(fgl3anglstdv))
gdat.refretag[0] = np.zeros(gdat.refr.dictelem[0]['lgal'].size, dtype=object)
for k in range(gdat.refr.dictelem[0]['lgal'].size):
gdat.refretag[0][k] = '%s, %s, %s' % (fgl3['Source_Name'][k], fgl3['CLASS1'][k], fgl3['ASSOC1'][k])
gdat.refrtvar[0] = fgl3['Variability_Index']
gdat.refrstyp[0] = np.zeros_like(gdat.refr.dictelem[0]['lgal']) - 1
gdat.refrstyp[0][np.where(fgl3['SpectrumType'] == 'PowerLaw ')] = 0
gdat.refrstyp[0][np.where(fgl3['SpectrumType'] == 'LogParabola ')] = 1
gdat.refrstyp[0][np.where(fgl3['SpectrumType'] == 'PLExpCutoff ')] = 2
gdat.refrstyp[0][np.where(fgl3['SpectrumType'] == 'PLSuperExpCutoff')] = 3
indx = np.where(gdat.refrstyp[0] == -1)[0]
if indx.size > 0:
raise Exception('')
gdat.refrsind[0] = fgl3['Spectral_Index']
gdat.refrcurv[0] = fgl3['beta']
gdat.refrexpc[0] = fgl3['Cutoff'] * 1e-3
gdat.refrcurv[0][np.where(np.logical_not(np.isfinite(gdat.refrcurv[0])))] = -10.
gdat.refrexpc[0][np.where(np.logical_not(np.isfinite(gdat.refrexpc[0])))] = 0.
gdat.refrsind[0] = np.tile(gdat.refrsind[0], (3, 1))
gdat.refrcurv[0] = np.tile(gdat.refrcurv[0], (3, 1))
gdat.refrexpc[0] = np.tile(gdat.refrexpc[0], (3, 1))
# Manchester+2005
path = gdat.pathdata + 'inpt/Manchester2005.fits'
data = astropy.io.fits.getdata(path)
gdat.refrlgal[1] = np.deg2rad(data['glon'])
gdat.refrlgal[1] = ((gdat.refrlgal[1] - np.pi) % (2. * np.pi)) - np.pi
gdat.refrbgal[1] = np.deg2rad(data['glat'])
gdat.refrper0[1] = data['P0']
gdat.refrper1[1] = data['P1']
gdat.refrflux0400[1] = data['S400']
#gdat.refrdism[1] = data['DM']
#gdat.refrdlos[1] = data['Dist']
# error budget
for name in ['lgal', 'bgal', 'per0', 'per1', 'flux0400', 'tvar', 'styp']:
refrtile = [[] for q in gdat.indxrefr]
refrfeat = getattr(gdat.refr, name)
for q in gdat.indxrefr:
if len(refrfeat[q]) > 0:
refrtile[q] = np.tile(refrfeat[q], (3, 1))
setattr(gdat.refr, name, refrtile)
def retr_singgaus(scaldevi, sigc):
psfn = 1. / 2. / np.pi / sigc**2 * np.exp(-0.5 * scaldevi**2 / sigc**2)
return psfn
def retr_singking(scaldevi, sigc, gamc):
psfn = 1. / 2. / np.pi / sigc**2 * (1. - 1. / gamc) * (1. + scaldevi**2 / 2. / gamc / sigc**2)**(-gamc)
return psfn
def retr_doubgaus(scaldevi, frac, sigc, sigt):
psfn = frac / 2. / np.pi / sigc**2 * np.exp(-0.5 * scaldevi**2 / sigc**2) + (1. - frac) / 2. / np.pi / sigc**2 * np.exp(-0.5 * scaldevi**2 / sigc**2)
return psfn
def retr_gausking(scaldevi, frac, sigc, sigt, gamt):
psfn = frac / 2. / np.pi / sigc**2 * np.exp(-0.5 * scaldevi**2 / sigc**2) + (1. - frac) / 2. / np.pi / sigt**2 * (1. - 1. / gamt) * (1. + scaldevi**2 / 2. / gamt / sigt**2)**(-gamt)
return psfn
def retr_doubking(scaldevi, frac, sigc, gamc, sigt, gamt):
psfn = frac / 2. / np.pi / sigc**2 * (1. - 1. / gamc) * (1. + scaldevi**2 / 2. / gamc / sigc**2)**(-gamc) + \
(1. - frac) / 2. / np.pi / sigt**2 * (1. - 1. / gamt) * (1. + scaldevi**2 / 2. / gamt / sigt**2)**(-gamt)
return psfn
def retr_lgalbgal(gang, aang):
lgal = gang * np.cos(aang)
bgal = gang * np.sin(aang)
return lgal, bgal
def retr_gang(lgal, bgal):
gang = np.arccos(np.cos(lgal) * np.cos(bgal))
return gang
def retr_aang(lgal, bgal):
aang = np.arctan2(bgal, lgal)
return aang
def show_paragenrscalfull(gdat, gdatmodi, strgstat='this', strgmodl='fitt', indxsampshow=None):
gmod = getattr(gdat, strgmodl)
gdatobjt = retr_gdatobjt(gdat, gdatmodi, strgmodl)
gmodstat = getattr(gdatobjt, strgstat)
print('strgmodl: ' + strgmodl)
print('strgstat: ' + strgstat)
print('%5s %20s %30s %30s %15s' % ('index', 'namepara', 'paragenrunitfull', 'paragenrscalfull', 'scalpara'))
for k in gmod.indxparagenrfull:
if indxsampshow is not None and not k in indxsampshow:
continue
if gmod.numbparaelem > 0:
booltemp = False
for l in gmod.indxpopl:
if k == gmod.indxparagenrelemsing[l][0]:
booltemp = True
if booltemp:
print('')
print('%5d %20s %30g %30g %15s' % (k, gmod.namepara.genrfull[k], gmodstat.paragenrunitfull[k], gmodstat.paragenrscalfull[k], gmod.scalpara.genrfull[k]))
def prop_stat(gdat, gdatmodi, strgmodl, thisindxelem=None, thisindxpopl=None, brth=False, deth=False):
if gdat.typeverb > 1:
print('prop_stat()')
#indxproptype
# within, birth, death, split, merge
# 0, 1, 2, 3, 4
gmod = getattr(gdat, strgmodl)
gdatobjt = retr_gdatobjt(gdat, gdatmodi, strgmodl)
gmodthis = getattr(gdatobjt, 'this')
gmodnext = getattr(gdatobjt, 'next')
if gmod.numbparaelem > 0:
if gdat.booldiagmode:
for l in gmod.indxpopl:
if len(gmodthis.indxelemfull[l]) > len(set(gmodthis.indxelemfull[l])):
raise Exception('Repeating entry in the element index list!')
thisindxparagenrfullelem = retr_indxparagenrfullelem(gdat, gmodthis.indxelemfull, strgmodl)
setattr(gmodthis, 'indxparagenrfullelem', thisindxparagenrfullelem)
else:
thisindxparagenrfullelem = None
gdatmodi.this.boolpropfilt = True
# index of the population in which a transdimensional proposal will be attempted
if gmod.numbparaelem > 0:
if thisindxpopl is None:
gdatmodi.indxpopltran = np.random.choice(gmod.indxpopl)
else:
gdatmodi.indxpopltran = thisindxpopl
numbelemtemp = gmodthis.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]]
# forced death or birth does not check for the prior on the dimensionality on purpose!
if gmod.numbparaelem > 0 and (deth or brth or np.random.rand() < gdat.probtran) and \
not (numbelemtemp == gmod.minmpara.numbelem[gdatmodi.indxpopltran] and numbelemtemp == gmod.maxmpara.numbelem[gdatmodi.indxpopltran]):
if brth or deth or np.random.rand() < gdat.probbrde or \
numbelemtemp == gmod.maxmpara.numbelem[gdatmodi.indxpopltran] and numbelemtemp == 1 or numbelemtemp == 0:
## births and deaths
if numbelemtemp == gmod.maxmpara.numbelem[gdatmodi.indxpopltran] or deth:
gdatmodi.this.indxproptype = 2
elif numbelemtemp == gmod.minmpara.numbelem[gdatmodi.indxpopltran] or brth:
gdatmodi.this.indxproptype = 1
else:
if np.random.rand() < 0.5:
gdatmodi.this.indxproptype = 1
else:
gdatmodi.this.indxproptype = 2
else:
## splits and merges
if numbelemtemp == gmod.minmpara.numbelem[gdatmodi.indxpopltran] or numbelemtemp < 2:
gdatmodi.this.indxproptype = 3
elif numbelemtemp == gmod.maxmpara.numbelem[gdatmodi.indxpopltran]:
gdatmodi.this.indxproptype = 4
else:
if np.random.rand() < 0.5:
gdatmodi.this.indxproptype = 3
else:
gdatmodi.this.indxproptype = 4
else:
if gdat.booldiagmode and (gdatmodi.stdp > 1e2).any():
raise Exception('')
thisindxparagenrfullelemconc = []
for l in gmod.indxpopl:
thisindxparagenrfullelemconc.append(thisindxparagenrfullelem[l]['full'])
# get the indices of the current parameter vector
if gmod.numbparaelem > 0:
thisindxsampfull = np.concatenate([gmod.indxparagenrbasestdv] + thisindxparagenrfullelemconc)
else:
thisindxsampfull = gmod.indxparagenrbasestdv
thisstdp = gdatmodi.stdp[gdat.indxstdppara[thisindxsampfull]]
if not np.isfinite(thisstdp).all():
raise Exception('')
gdatmodi.this.indxproptype = 0
if gdat.booldiagmode and gdat.probspmr == 0 and gdatmodi.this.indxproptype > 2:
raise Exception('')
if gdat.typeverb > 1:
print('gdatmodi.this.indxproptype')
print(gdatmodi.this.indxproptype)
if gdatmodi.this.indxproptype == 0:
gmodnext.paragenrunitfull = np.copy(gmodthis.paragenrunitfull)
if gmod.numbparaelem > 0:
gmodnext.indxelemfull = gmodthis.indxelemfull
if gdatmodi.this.indxproptype > 0:
gmodnext.paragenrunitfull = np.copy(gmodthis.paragenrunitfull)
gmodnext.paragenrscalfull = np.copy(gmodthis.paragenrscalfull)
if gmod.numbparaelem > 0:
gmodnext.indxelemfull = deepcopy(gmodthis.indxelemfull)
if gdatmodi.this.indxproptype == 0:
## proposal scale
if False:
# amplitude-dependent proposal scale
for l in gmod.indxpopl:
thiscompampl = gmodthis.paragenrscalfull[thisindxparagenrfullelem[indxelemfull][gmod.nameparagenrelemampl[l]][l]]
compampl = gmodnext.paragenrscalfull[thisindxparagenrfullelem[gmod.nameparagenrelemampl[l]][l][indxelemfull]]
minmcompampl = getattr(gmod.minmpara, gmod.nameparagenrelemampl[l])
thiscompunit = gmodthis.paragenrscalfull[thisindxparagenrfullelem[gmod.nameparagenrelemampl[l]][l][indxelemfull]]
compunit = gmodnext.paragenrscalfull[thisindxparagenrfullelem[gmod.nameparagenrelemampl[l]][l][indxelemfull]]
if nameparagenrelem == gmod.nameparagenrelemampl[l]:
# temp -- this only works if compampl is powr distributed
gdatmodi.this.stdp = stdpcomp / (thiscompampl / minmcompampl)**2.
gdatmodi.this.stdv = stdpcomp / (compampl / minmcompampl)**2.
gdatmodi.this.ltrp += np.sum(0.5 * (nextcompunit - thiscompunit)**2 * (1. / gdatmodi.this.stdv**2 - 1. / gdatmodi.this.stdv**2))
else:
gdatmodi.this.stdp = stdpcomp / (np.minimum(thiscompampl, compampl) / minmcompampl)**0.5
## propose a step
diffparagenrunitfull = np.random.normal(size=thisindxsampfull.size) * thisstdp
gmodnext.paragenrunitfull[thisindxsampfull] = gmodthis.paragenrunitfull[thisindxsampfull] + diffparagenrunitfull
if gdat.booldiagmode:
if (gmodnext.paragenrunitfull[gmod.numbpopl:] == 1).any():
raise Exception('')
if (gmodnext.paragenrunitfull[gmod.numbpopl:] == 0).any():
raise Exception('')
if not np.isfinite(gmodnext.paragenrunitfull).all():
raise Exception('')
indxsamplowr = np.where(gmodnext.paragenrunitfull[gmod.numbpopl:] < 0.)[0]
if indxsamplowr.size > 0:
gmodnext.paragenrunitfull[gmod.numbpopl+indxsamplowr] = abs(gmodnext.paragenrunitfull[gmod.numbpopl+indxsamplowr]) % 1.
if gdat.booldiagmode:
if (gmodnext.paragenrunitfull[gmod.numbpopl:] == 1).any():
raise Exception('')
if (gmodnext.paragenrunitfull[gmod.numbpopl:] == 0).any():
raise Exception('')
indxsampuppr = np.where(gmodnext.paragenrunitfull[gmod.numbpopl:] > 1.)[0]
if indxsampuppr.size > 0:
gmodnext.paragenrunitfull[gmod.numbpopl+indxsampuppr] = (gmodnext.paragenrunitfull[gmod.numbpopl+indxsampuppr] - 1.) % 1.
if gdat.booldiagmode:
if (gmodnext.paragenrunitfull[gmod.numbpopl:] == 1).any():
raise Exception('')
if (gmodnext.paragenrunitfull[gmod.numbpopl:] == 0).any():
raise Exception('')
if not np.isfinite(gmodnext.paragenrunitfull).all():
raise Exception('')
gmodnext.paragenrscalfull = icdf_paragenrscalfull(gdat, strgmodl, gmodnext.paragenrunitfull, thisindxparagenrfullelem)
if gdat.booldiagmode:
if not np.isfinite(gmodnext.paragenrunitfull).all():
raise Exception('')
if np.amin(gmodnext.paragenrunitfull[gmod.numbpopl:]) < 0.:
raise Exception('')
if np.amax(gmodnext.paragenrunitfull[gmod.numbpopl:]) > 1.:
raise Exception('')
if not np.isfinite(gmodnext.paragenrscalfull).all():
raise Exception('')
if gdatmodi.this.indxproptype > 0:
gdatmodi.indxsamptran = []
if gdatmodi.this.indxproptype == 1:
gdatmodi.this.auxipara = np.random.rand(gmod.numbparagenrelemsing[gdatmodi.indxpopltran])
elif gdatmodi.this.indxproptype != 2:
gdatmodi.this.auxipara = np.empty(gmod.numbparagenrelemsing[gdatmodi.indxpopltran])
if gdatmodi.this.indxproptype == 1 or gdatmodi.this.indxproptype == 3:
# find an empty slot in the element list
for u in range(gmod.maxmpara.numbelem[gdatmodi.indxpopltran]):
if not u in gdatmodi.this.indxelemfull[gdatmodi.indxpopltran]:
break
gdatmodi.indxelemmodi = [u]
gdatmodi.indxelemfullmodi = [gmodthis.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]].astype(int)]
# sample indices to add the new element
gdatmodi.indxparagenrfullelemaddd = retr_indxparaelem(gmod, gdatmodi.indxpopltran, gdatmodi.indxelemmodi[0])
gdatmodi.indxsamptran.append(gdatmodi.indxparagenrfullelemaddd)
gmodnext.indxelemfull[gdatmodi.indxpopltran].append(gdatmodi.indxelemmodi[0])
if gdatmodi.this.indxproptype == 1:
# sample auxiliary variables
gmodnext.paragenrscalfull[gdatmodi.indxsamptran[0]] = gdatmodi.this.auxipara
# death
if gdatmodi.this.indxproptype == 2:
# occupied element index to be killed
if thisindxelem is None:
dethindxindxelem = np.random.choice(np.arange(gmodthis.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]], dtype=int))
else:
dethindxindxelem = thisindxelem
# element index to be killed
gdatmodi.indxelemmodi = []
gdatmodi.indxelemfullmodi = []
if gdat.typeverb > 1:
print('dethindxindxelem')
print(dethindxindxelem)
gdatmodi.indxelemmodi.append(gmodthis.indxelemfull[gdatmodi.indxpopltran][dethindxindxelem])
gdatmodi.indxelemfullmodi.append(dethindxindxelem)
# parameter indices to be killed
indxparagenrfullelemdeth = retr_indxparaelem(gmod, gdatmodi.indxpopltran, gdatmodi.indxelemmodi[0])
gdatmodi.indxsamptran.append(indxparagenrfullelemdeth)
gdatmodi.this.auxipara = gmodthis.paragenrscalfull[indxparagenrfullelemdeth]
if gdatmodi.this.indxproptype > 2:
gdatmodi.comppare = np.empty(gmod.numbparagenrelemsing[gdatmodi.indxpopltran])
gdatmodi.compfrst = np.empty(gmod.numbparagenrelemsing[gdatmodi.indxpopltran])
gdatmodi.compseco = np.empty(gmod.numbparagenrelemsing[gdatmodi.indxpopltran])
# split
if gdatmodi.this.indxproptype == 3:
# find the probability of splitting elements
gdatmodi.indxelemfullsplt = np.random.choice(np.arange(gmodthis.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]], dtype=int))
gdatmodi.indxelemsplt = gmodthis.indxelemfull[gdatmodi.indxpopltran][gdatmodi.indxelemfullsplt]
gdatmodi.indxelemfullmodi.insert(0, gdatmodi.indxelemfullsplt)
gdatmodi.indxelemmodi.insert(0, gdatmodi.indxelemsplt)
# sample indices for the first element
gdatmodi.indxparagenrfullelemfrst = retr_indxparaelem(gmod, l, gdatmodi.indxelemmodi[0])
gdatmodi.indxsamptran.insert(0, gdatmodi.indxparagenrfullelemfrst)
# sample indices for the second element
gdatmodi.indxsampseco = gdatmodi.indxparagenrfullelemaddd
# take the parent element parameters
for k, nameparagenrelem in enumerate(gmod.namepara.genrelem[gdatmodi.indxpopltran]):
gdatmodi.comppare[k] = np.copy(gmodthis.paragenrscalfull[thisindxparagenrfullelem[gdatmodi.indxpopltran][nameparagenrelem][gdatmodi.indxelemfullmodi[0]]])
# draw the auxiliary parameters
for g, nameparagenrelem in enumerate(gmod.namepara.genrelem[gdatmodi.indxpopltran]):
if gmod.boolcompposi[gdatmodi.indxpopltran][g]:
gdatmodi.this.auxipara[g] = np.random.randn() * gdat.radispmr
elif g == gmod.indxparagenrelemampl[gdatmodi.indxpopltran]:
gdatmodi.this.auxipara[g] = np.random.rand()
else:
gdatmodi.this.auxipara[g] = icdf_trap(gdat, strgmodl, np.random.rand(), gmodthis.paragenrscalfull, gmod.listscalparagenrelem[gdatmodi.indxpopltran][g], \
gmod.namepara.genrelem[gdatmodi.indxpopltran][g], l)
# determine the new parameters
if gmod.typeelem[gdatmodi.indxpopltran].startswith('lghtline'):
gdatmodi.compfrst[0] = gdatmodi.comppare[0] + (1. - gdatmodi.this.auxipara[1]) * gdatmodi.this.auxipara[0]
else:
gdatmodi.compfrst[0] = gdatmodi.comppare[0] + (1. - gdatmodi.this.auxipara[2]) * gdatmodi.this.auxipara[0]
gdatmodi.compfrst[1] = gdatmodi.comppare[1] + (1. - gdatmodi.this.auxipara[2]) * gdatmodi.this.auxipara[1]
gdatmodi.compfrst[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] = gdatmodi.this.auxipara[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] * \
gdatmodi.comppare[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]]
if gmod.typeelem[gdatmodi.indxpopltran].startswith('lghtline'):
gdatmodi.compseco[0] = gdatmodi.comppare[0] - gdatmodi.this.auxipara[1] * gdatmodi.this.auxipara[0]
else:
gdatmodi.compseco[0] = gdatmodi.comppare[0] - gdatmodi.this.auxipara[2] * gdatmodi.this.auxipara[0]
gdatmodi.compseco[1] = gdatmodi.comppare[1] - gdatmodi.this.auxipara[2] * gdatmodi.this.auxipara[1]
gdatmodi.compseco[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] = (1. - gdatmodi.this.auxipara[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]]) * \
gdatmodi.comppare[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]]
for g in range(gmod.numbparagenrelemsing[gdatmodi.indxpopltran]):
if not gmod.boolcompposi[gdatmodi.indxpopltran][g] and g != gmod.indxparagenrelemampl[gdatmodi.indxpopltran]:
gdatmodi.compfrst[g] = gdatmodi.comppare[g]
gdatmodi.compseco[g] = gdatmodi.this.auxipara[g]
# place the new parameters into the sample vector
gmodnext.paragenrscalfull[gdatmodi.indxsamptran[0]] = cdfn_trap(gdat, gdatmodi, strgmodl, gdatmodi.compfrst, gdatmodi.indxpopltran)
gmodnext.paragenrscalfull[gdatmodi.indxsamptran[0]] = gdatmodi.compfrst
gmodnext.paragenrscalfull[gdatmodi.indxsamptran[1]] = cdfn_trap(gdat, gdatmodi, strgmodl, gdatmodi.compseco, gdatmodi.indxpopltran)
gmodnext.paragenrscalfull[gdatmodi.indxsamptran[1]] = gdatmodi.compseco
# check for prior boundaries
if gmod.typeelem[gdatmodi.indxpopltran].startswith('lghtline'):
if np.fabs(gdatmodi.compfrst[0]) > gdat.maxmelin or np.fabs(gdatmodi.compseco[0]) > gdat.maxmelin:
gdatmodi.this.boolpropfilt = False
else:
if np.fabs(gdatmodi.compfrst[0]) > maxmlgal or np.fabs(gdatmodi.compseco[0]) > maxmlgal or \
np.fabs(gdatmodi.compfrst[1]) > maxmbgal or np.fabs(gdatmodi.compseco[1]) > maxmbgal:
gdatmodi.this.boolpropfilt = False
if gdatmodi.compfrst[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] < getattr(gmod.minmpara, gmod.nameparagenrelemampl[gdatmodi.indxpopltran]) or \
gdatmodi.compseco[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] < getattr(gmod.minmpara, gmod.nameparagenrelemampl[gdatmodi.indxpopltran]):
gdatmodi.this.boolpropfilt = False
if gdat.typeverb > 1:
if not gdatmodi.this.boolpropfilt:
print('Rejecting the proposal due to a split that falls out of the prior...')
if gdatmodi.this.indxproptype == 4:
# determine the index of the primary element to be merged (in the full element list)
gdatmodi.indxelemfullmergfrst = np.random.choice(np.arange(len(gmodthis.indxelemfull[gdatmodi.indxpopltran])))
## first element index to be merged
gdatmodi.mergindxelemfrst = gmodthis.indxelemfull[gdatmodi.indxpopltran][gdatmodi.indxelemfullmergfrst]
# find the probability of merging this element with the others
probmerg = retr_probmerg(gdat, gdatmodi, gmodthis.paragenrscalfull, thisindxparagenrfullelem, gdatmodi.indxpopltran, 'seco', typeelem=gmod.typeelem)
indxelemfulltemp = np.arange(len(gmodthis.indxelemfull[gdatmodi.indxpopltran]))
if gdat.booldiagmode:
if indxelemfulltemp.size < 2:
raise Exception('')
gdatmodi.indxelemfullmergseco = np.random.choice(np.setdiff1d(indxelemfulltemp, np.array([gdatmodi.indxelemfullmergfrst])), p=probmerg)
gdatmodi.indxelemfullmodi = np.sort(np.array([gdatmodi.indxelemfullmergfrst, gdatmodi.indxelemfullmergseco]))
# parameters of the first element to be merged
for k, nameparagenrelem in enumerate(gmod.namepara.genrelem[gdatmodi.indxpopltran]):
## first
gdatmodi.compfrst[k] = gmodthis.paragenrscalfull[thisindxparagenrfullelem[gdatmodi.indxpopltran][nameparagenrelem][gdatmodi.indxelemfullmodi[0]]]
# determine indices of the modified elements in the sample vector
## first element
# temp -- this would not work for multiple populations !
gdatmodi.indxparagenrfullelemfrst = retr_indxparaelem(gmod, l, gdatmodi.mergindxelemfrst)
gdatmodi.indxsamptran.append(gdatmodi.indxparagenrfullelemfrst)
## second element index to be merged
gdatmodi.mergindxelemseco = gmodthis.indxelemfull[gdatmodi.indxpopltran][gdatmodi.indxelemfullmergseco]
## second element
gdatmodi.indxparagenrfullelemseco = retr_indxparaelem(gmod, l, gdatmodi.mergindxelemseco)
gdatmodi.indxsamptran.append(gdatmodi.indxparagenrfullelemseco)
# parameters of the elements to be merged
for k, nameparagenrelem in enumerate(gmod.namepara.genrelem[gdatmodi.indxpopltran]):
## second
gdatmodi.compseco[k] = gmodthis.paragenrscalfull[thisindxparagenrfullelem[gdatmodi.indxpopltran][nameparagenrelem][gdatmodi.indxelemfullmodi[1]]]
# indices of the element to be merged
gdatmodi.indxelemmodi = [gdatmodi.mergindxelemfrst, gdatmodi.mergindxelemseco]
# auxiliary parameters
if gmod.typeelem[gdatmodi.indxpopltran].startswith('lghtline'):
gdatmodi.this.auxipara[0] = gdatmodi.compseco[0] - gdatmodi.compfrst[0]
else:
gdatmodi.this.auxipara[0] = gdatmodi.compseco[0] - gdatmodi.compfrst[0]
gdatmodi.this.auxipara[1] = gdatmodi.compseco[1] - gdatmodi.compfrst[1]
gdatmodi.this.auxipara[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] = gdatmodi.compfrst[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] / \
(gdatmodi.compfrst[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] + gdatmodi.compseco[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]])
for g, nameparagenrelem in enumerate(gmod.namepara.genrelem[gdatmodi.indxpopltran]):
if not gmod.boolcompposi[gdatmodi.indxpopltran][g] and g != gmod.indxparagenrelemampl[gdatmodi.indxpopltran]:
gdatmodi.this.auxipara[g] = gdatmodi.compseco[g]
# merged element
gdatmodi.comppare[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] = gdatmodi.compfrst[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] + \
gdatmodi.compseco[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]]
if gdatmodi.comppare[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]] > getattr(gdat, 'maxm' + gmod.nameparagenrelemampl[gdatmodi.indxpopltran]):
gdatmodi.this.boolpropfilt = False
if gdat.typeverb > 1:
print('Proposal rejected due to falling outside the prior.')
return
if gmod.typeelem[gdatmodi.indxpopltran].startswith('lghtline'):
gdatmodi.comppare[0] = gdatmodi.compfrst[0] + (1. - gdatmodi.this.auxipara[1]) * (gdatmodi.compseco[0] - gdatmodi.compfrst[0])
else:
gdatmodi.comppare[0] = gdatmodi.compfrst[0] + (1. - gdatmodi.this.auxipara[2]) * (gdatmodi.compseco[0] - gdatmodi.compfrst[0])
gdatmodi.comppare[1] = gdatmodi.compfrst[1] + (1. - gdatmodi.this.auxipara[2]) * (gdatmodi.compseco[1] - gdatmodi.compfrst[1])
for g, nameparagenrelem in enumerate(gmod.namepara.genrelem[gdatmodi.indxpopltran]):
if gmod.boolcompposi[gdatmodi.indxpopltran][g]:
gdatmodi.comppare[g] = gdatmodi.compfrst[g] + (1. - gdatmodi.this.auxipara[gmod.indxparagenrelemampl[gdatmodi.indxpopltran]]) * \
(gdatmodi.compseco[g] - gdatmodi.compfrst[g])
elif g == gmod.indxparagenrelemampl[gdatmodi.indxpopltran]:
gdatmodi.comppare[g] = gdatmodi.compfrst[g] + gdatmodi.compseco[g]
else:
gdatmodi.comppare[g] = gdatmodi.compfrst[g]
gmodnext.paragenrscalfull[gdatmodi.indxsamptran[0]] = cdfn_trap(gdat, gdatmodi, strgmodl, gdatmodi.comppare, gdatmodi.indxpopltran)
gmodnext.paragenrscalfull[gdatmodi.indxsamptran[0]] = gdatmodi.comppare
# calculate the proposed list of pairs
if gdat.typeverb > 1:
print('mergindxfrst: ', gdatmodi.mergindxelemfrst)
print('gdatmodi.indxelemfullmergfrst: ', gdatmodi.indxelemfullmergfrst)
print('mergindxseco: ', gdatmodi.mergindxelemseco)
print('gdatmodi.indxelemfullmergseco: ', gdatmodi.indxelemfullmergseco)
print('indxparagenrfullelemfrst: ', gdatmodi.indxparagenrfullelemfrst)
print('indxparagenrfullelemseco: ', gdatmodi.indxparagenrfullelemseco)
if gdat.typeverb > 1 and (gdatmodi.this.indxproptype == 3 or gdatmodi.this.boolpropfilt and gdatmodi.this.indxproptype == 4):
if gmod.typeelem[gdatmodi.indxpopltran].startswith('lghtline'):
print('elinfrst: ', gdatmodi.compfrst[0])
print('amplfrst: ', gdatmodi.compfrst[1])
print('elinseco: ', gdatmodi.compseco[0])
print('amplseco: ', gdatmodi.compseco[1])
print('elinpare: ', gdatmodi.comppare[0])
print('fluxpare: ', gdatmodi.comppare[1])
print('auxipara[0][0]: ', gdatmodi.this.auxipara[0])
print('auxipara[0][1]: ', gdatmodi.this.auxipara[1])
else:
print('lgalfrst: ', gdat.anglfact * gdatmodi.compfrst[0])
print('bgalfrst: ', gdat.anglfact * gdatmodi.compfrst[1])
print('amplfrst: ', gdatmodi.compfrst[2])
print('lgalseco: ', gdat.anglfact * gdatmodi.compseco[0])
print('bgalseco: ', gdat.anglfact * gdatmodi.compseco[1])
print('amplseco: ', gdatmodi.compseco[2])
print('lgalpare: ', gdat.anglfact * gdatmodi.comppare[0])
print('bgalpare: ', gdat.anglfact * gdatmodi.comppare[1])
print('fluxpare: ', gdatmodi.comppare[2])
print('auxipara[0][0]: ', gdat.anglfact * gdatmodi.this.auxipara[0])
print('auxipara[0][1]: ', gdat.anglfact * gdatmodi.this.auxipara[1])
print('auxipara[0][2]: ', gdatmodi.this.auxipara[2])
if gmod.numbparaelem > 0 and gdatmodi.this.indxproptype > 0 and gdatmodi.this.boolpropfilt:
# change the number of elements
if gdatmodi.this.indxproptype == 1 or gdatmodi.this.indxproptype == 3:
gmodnext.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]] = gmodthis.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]] + 1
if gdatmodi.this.indxproptype == 2 or gdatmodi.this.indxproptype == 4:
gmodnext.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]] = gmodthis.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]] - 1
gmodnext.paragenrunitfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]] = gmodnext.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]]
# remove the element from the occupied element list
if (gdatmodi.this.indxproptype == 2 or gdatmodi.this.indxproptype == 4):
for a, indxelem in enumerate(gdatmodi.indxelemmodi):
if a == 0 and gdatmodi.this.indxproptype == 2 or a == 1 and gdatmodi.this.indxproptype == 4:
gmodnext.indxelemfull[gdatmodi.indxpopltran].remove(indxelem)
if gdatmodi.this.indxproptype == 0:
gdatmodi.indxsampmodi = thisindxsampfull
else:
if gdatmodi.this.indxproptype == 1:
gdatmodi.indxsampmodi = np.concatenate((np.array([gmod.indxpara.numbelem[gdatmodi.indxpopltran]]), gdatmodi.indxsamptran[0]))
if gdatmodi.this.indxproptype == 2:
gdatmodi.indxsampmodi = [gmod.indxpara.numbelem[gdatmodi.indxpopltran]]
if gdatmodi.this.indxproptype == 3:
gdatmodi.indxsampmodi = np.concatenate((np.array([gmod.indxpara.numbelem[gdatmodi.indxpopltran]]), \
gdatmodi.indxsamptran[0], gdatmodi.indxsamptran[1]))
if gdatmodi.this.indxproptype == 4:
gdatmodi.indxsampmodi = np.concatenate((np.array([gmod.indxpara.numbelem[gdatmodi.indxpopltran]]), gdatmodi.indxsamptran[0]))
if gmod.numbparaelem > 0:
if gdatmodi.this.indxproptype == 0:
indxparagenrfullelem = thisindxparagenrfullelem
else:
indxparagenrfullelem = retr_indxparagenrfullelem(gdat, gmodnext.indxelemfull, strgmodl)
if gdat.typeverb > 1:
print('gdatmodi.indxsampmodi')
print(gdatmodi.indxsampmodi)
if gmod.numbparaelem > 0:
print('gmodthis.indxelemfull')
print(gmodthis.indxelemfull)
print('gmodthis.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]].astype(int)')
print(gmodthis.paragenrscalfull[gmod.indxpara.numbelem[gdatmodi.indxpopltran]].astype(int))
if gdatmodi.this.indxproptype > 0:
print('gdatmodi.indxelemmodi')
print(gdatmodi.indxelemmodi)
print('gdatmodi.indxelemfullmodi')
print(gdatmodi.indxelemfullmodi)
print('gdatmodi.this.boolpropfilt')
print(gdatmodi.this.boolpropfilt)
print('indxparagenrfullelem')
print(indxparagenrfullelem)
if gdatmodi.this.indxproptype == 1:
for g, nameparagenrelem in enumerate(gmod.namepara.genrelem[gdatmodi.indxpopltran]):
gmodnext.paragenrscalfull[gdatmodi.indxsamptran[0][g]] = icdf_trap(gdat, strgmodl, gdatmodi.this.auxipara[g], gmodthis.paragenrscalfull, \
gmod.listscalparagenrelem[gdatmodi.indxpopltran][g], \
gmod.namepara.genrelem[gdatmodi.indxpopltran][g], gdatmodi.indxpopltran)
if gdat.booldiagmode:
if gmod.numbparaelem > 0:
for l in gmod.indxpopl:
if gmodthis.paragenrunitfull[gmod.indxpara.numbelem[l]] != round(gmodthis.paragenrunitfull[gmod.indxpara.numbelem[l]]):
print('l')
print(l)
print('gmod.indxpara.numbelem')
print(gmod.indxpara.numbelem)
print('gmodthis.paragenrunitfull')
print(gmodthis.paragenrunitfull)
raise Exception('')
if gmodthis.paragenrscalfull[gmod.indxpara.numbelem[l]] != round(gmodthis.paragenrscalfull[gmod.indxpara.numbelem[l]]):
raise Exception('')
if gmodnext.paragenrunitfull[gmod.indxpara.numbelem[l]] != round(gmodnext.paragenrunitfull[gmod.indxpara.numbelem[l]]):
raise Exception('')
if gmodnext.paragenrscalfull[gmod.indxpara.numbelem[l]] != round(gmodnext.paragenrscalfull[gmod.indxpara.numbelem[l]]):
raise Exception('')
if strgmodl == 'fitt':
diffparagenrscalfull = abs(gmodnext.paragenrscalfull - gmodthis.paragenrscalfull)
#size = np.where(((gmodthis.paragenrscalfull == 0.) & (diffparagenrscalfull > 0.)) | ((gmodthis.paragenrscalfull != 0.) & (diffparagenrscalfull / gmodthis.paragenrscalfull > 0)))[0].size
size = np.where(diffparagenrscalfull != 0.)[0].size
if gdatmodi.this.indxproptype == 1:
if size - 1 != gmod.numbparagenrelemsing[gdatmodi.indxpopltran]:
raise Exception('')
def calc_probprop(gdat, gdatmodi):
gmod = gdat.fitt
# calculate the factor to multiply the acceptance rate, i.e.,
## probability of the auxiliary parameters,
if gdatmodi.this.indxproptype == 0:
gdatmodi.this.lpau = 0.
elif gdatmodi.this.indxproptype == 1 or gdatmodi.this.indxproptype == 2:
gdatmodi.this.lpau = gdatmodi.next.lpritotl - gdatmodi.this.lpritotl
lpautemp = 0.5 * gdat.priofactdoff * gmod.numbparagenrelemsing[gdatmodi.indxpopltran]
if gdatmodi.this.indxproptype == 1:
gdatmodi.this.lpau += lpautemp
if gdatmodi.this.indxproptype == 2:
gdatmodi.this.lpau -= lpautemp
elif gdatmodi.this.indxproptype == 3 or gdatmodi.this.indxproptype == 4:
gdatmodi.this.lpau = 0.
dictelemtemp = [dict()]
for g, nameparagenrelem in enumerate(gmod.namepara.genrelem[gdatmodi.indxpopltran]):
if gmod.gmod.boolcompposi[gdatmodi.indxpopltran][g]:
gdatmodi.this.lpau += -0.5 * np.log(2. * np.pi * gdat.radispmr**2) - 0.5 * (gdatmodi.this.auxipara[g] / gdat.radispmr)**2
elif g != gmod.indxparagenrelemampl[gdatmodi.indxpopltran]:
dictelemtemp[0][nameparagenrelem] = gdatmodi.this.auxipara[g]
gdatmodi.this.lpau += retr_lprielem(gdat, 'fitt', gdatmodi.indxpopltran, g, \
gmod.namepara.genrelem[gdatmodi.indxpopltran][g], gmod.listscalparagenrelem[gdatmodi.indxpopltran][g], \
gdatmodi.this.paragenrscalfull, dictelemtemp, [1])
if gdatmodi.this.indxproptype == 4:
gdatmodi.this.lpau *= -1.
if gdatmodi.this.indxproptype > 2 and gdatmodi.this.boolpropfilt:
## the ratio of the probability of the reverse and forward proposals, and
if gdatmodi.this.indxproptype == 3:
gdatmodi.this.probmergtotl = retr_probmerg(gdat, gdatmodi, gdatmodi.next.paragenrscalfull, gdatmodi.next.indxparagenrfullelem, gdatmodi.indxpopltran, 'pair', \
typeelem=gmod.typeelem)
gdatmodi.this.ltrp = np.log(gdatmodi.this.numbelem[gdatmodi.indxpopltran] + 1) + np.log(gdatmodi.this.probmergtotl)
else:
gdatmodi.this.probmergtotl = retr_probmerg(gdat, gdatmodi, gdatmodi.this.paragenrscalfull, gdatmodi.this.indxparagenrfullelem, gdatmodi.indxpopltran, 'pair', \
typeelem=gmod.typeelem)
gdatmodi.this.ltrp = -np.log(gdatmodi.this.numbelem[gdatmodi.indxpopltran]) - np.log(gdatmodi.this.probmergtotl)
## Jacobian
if gmod.typeelem[gdatmodi.indxpopltran].startswith('lghtline'):
gdatmodi.this.ljcb = np.log(gdatmodi.comppare[1])
else:
gdatmodi.this.ljcb = np.log(gdatmodi.comppare[2])
if gdatmodi.this.indxproptype == 4:
gdatmodi.this.ljcb *= -1.
else:
gdatmodi.this.ljcb = 0.
gdatmodi.this.ltrp = 0.
for l in gmod.indxpopl:
if gdatmodi.this.indxproptype > 0:
setattr(gdatmodi, 'auxiparapop%d' % l, gdatmodi.this.auxipara)
def retr_indxparagenrfullelem(gdat, indxelemfull, strgmodl):
gmod = getattr(gdat, strgmodl)
## element parameters
if gmod.numbparaelem > 0:
indxparagenrfullelem = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
indxparagenrfulltemp = gmod.indxparagenrfulleleminit + gmod.numbparagenrelemcuml[l] + np.array(indxelemfull[l], dtype=int) * gmod.numbparagenrelemsing[l]
cntr = tdpy.cntr()
indxparagenrfullelem[l] = dict()
for nameparagenrelem in gmod.namepara.genrelem[l]:
indxparagenrfullelem[l][nameparagenrelem] = indxparagenrfulltemp + cntr.incr()
indxparagenrfullelem[l]['full'] = np.repeat(indxparagenrfulltemp, gmod.numbparagenrelemsing[l]) + np.tile(gmod.indxparagenrelemsing[l], len(indxelemfull[l]))
if gdat.booldiagmode:
for l in gmod.indxpopl:
if len(indxparagenrfullelem[l]['full']) > 0:
if np.amax(indxparagenrfullelem[l]['full']) > gmod.numbparagenrelem[l] + gmod.numbparagenrbase:
print('strgmodl')
print(strgmodl)
print('strgstat')
print(strgstat)
print('gmod.numbparagenrbase')
print(gmod.numbparagenrbase)
print('gmod.numbparagenrelem[l]')
print(gmod.numbparagenrelem[l])
print('indxparagenrfullelem[l][full]')
summgene(indxparagenrfullelem[l]['full'])
print('gdat.fitt.minmpara.numbelempop0')
print(gdat.fitt.minmpara.numbelempop0)
print('gdat.fitt.maxmpara.numbelempop0')
print(gdat.fitt.maxmpara.numbelempop0)
raise Exception('Element parameter indices are bad.')
else:
indxparagenrfullelem = None
return indxparagenrfullelem
def retr_weigmergodim(gdat, elin, elinothr):
weigmerg = np.exp(-0.5 * ((elin - elinothr) / gdat.radispmr)**2)
return weigmerg
def retr_weigmergtdim(gdat, lgal, lgalothr, bgal, bgalothr):
weigmerg = np.exp(-0.5 * (((lgal - lgalothr) / gdat.radispmr)**2 + ((bgal - bgalothr) / gdat.radispmr)**2))
return weigmerg
def retr_probmerg(gdat, gdatmodi, paragenrscalfull, indxparagenrfullelem, indxpopltran, strgtype, typeelem=None):
# calculate the weights
if strgtype == 'seco':
numb = 1
if strgtype == 'pair':
numb = 2
listweigmerg = []
for a in range(numb):
if gmod.typeelem[indxpopltran].startswith('lghtline'):
elintotl = paragenrscalfull[indxparagenrfullelem['elin'][indxpopltran]]
elin = elintotl[gdatmodi.indxelemfullmodi[0]]
elinothr = np.concatenate((elintotl[:gdatmodi.indxelemfullmodi[0]], elintotl[gdatmodi.indxelemfullmodi[0]+1:]))
weigmerg = retr_weigmergodim(gdat, elin, elinothr)
else:
lgaltotl = paragenrscalfull[indxparagenrfullelem['lgal'][indxpopltran]]
bgaltotl = paragenrscalfull[indxparagenrfullelem['bgal'][indxpopltran]]
lgal = lgaltotl[gdatmodi.indxelemfullmodi[0]]
bgal = bgaltotl[gdatmodi.indxelemfullmodi[0]]
lgalothr = np.concatenate((lgaltotl[:gdatmodi.indxelemfullmodi[0]], lgaltotl[gdatmodi.indxelemfullmodi[0]+1:]))
bgalothr = np.concatenate((bgaltotl[:gdatmodi.indxelemfullmodi[0]], bgaltotl[gdatmodi.indxelemfullmodi[0]+1:]))
weigmerg = retr_weigmergtdim(gdat, lgal, lgalothr, bgal, bgalothr)
listweigmerg.append(weigmerg)
# determine the probability of merging the second element given the first element
if strgtype == 'seco':
probmerg = listweigmerg[0] / np.sum(listweigmerg[0])
# determine the probability of merging the pair
if strgtype == 'pair':
if gmod.typeelem[indxpopltran].startswith('lghtline'):
weigpair = retr_weigmergtdim(gdat, elin, elintotl[gdatmodi.indxelemfullmodi[1]])
else:
weigpair = retr_weigmergtdim(gdat, lgal, lgaltotl[gdatmodi.indxelemfullmodi[1]], bgal, bgaltotl[gdatmodi.indxelemfullmodi[1]])
probmerg = weigpair / np.sum(listweigmerg[0]) + weigpair / np.sum(listweigmerg[1])
if gdat.booldiagmode:
if not np.isfinite(probmerg).all():
raise Exception('Merge probability is infinite.')
return probmerg
def retr_indxparaelem(gmod, l, u):
indxsamppnts = gmod.indxparagenrfulleleminit + gmod.numbparagenrelemcuml[l] + u * gmod.numbparagenrelemsing[l] + gmod.indxparagenrelemsing[l]
return indxsamppnts
def gang_detr():
gang, aang, lgal, bgal = sympy.symbols('gang aang lgal bgal')
AB = sympy.matrices.Matrix([[a1*b1,a1*b2,a1*b3],[a2*b1,a2*b2,a2*b3],[a3*b1,a3*b2,a3*b3]])
def retr_psfn(gdat, psfp, indxenertemp, thisangl, typemodlpsfn, strgmodl):
gmod = getattr(gdat, strgmodl)
indxpsfpinit = gmod.numbpsfptotl * (indxenertemp[:, None] + gdat.numbener * gdat.indxevtt[None, :])
if gdat.typeexpr == 'ferm':
scalangl = 2. * np.arcsin(np.sqrt(2. - 2. * np.cos(thisangl)) / 2.)[None, :, None] / gdat.fermscalfact[:, None, :]
scalanglnorm = 2. * np.arcsin(np.sqrt(2. - 2. * np.cos(gdat.binspara.angl)) / 2.)[None, :, None] / gdat.fermscalfact[:, None, :]
else:
scalangl = thisangl[None, :, None]
if typemodlpsfn == 'singgaus':
sigc = psfp[indxpsfpinit]
sigc = sigc[:, None, :]
psfn = retr_singgaus(scalangl, sigc)
elif typemodlpsfn == 'singking':
sigc = psfp[indxpsfpinit]
gamc = psfp[indxpsfpinit+1]
sigc = sigc[:, None, :]
gamc = gamc[:, None, :]
psfn = retr_singking(scalangl, sigc, gamc)
elif typemodlpsfn == 'doubking':
sigc = psfp[indxpsfpinit]
gamc = psfp[indxpsfpinit+1]
sigt = psfp[indxpsfpinit+2]
gamt = psfp[indxpsfpinit+3]
frac = psfp[indxpsfpinit+4]
sigc = sigc[:, None, :]
gamc = gamc[:, None, :]
sigt = sigt[:, None, :]
gamt = gamt[:, None, :]
frac = frac[:, None, :]
psfn = retr_doubking(scalangl, frac, sigc, gamc, sigt, gamt)
if gdat.typeexpr == 'ferm':
psfnnorm = retr_doubking(scalanglnorm, frac, sigc, gamc, sigt, gamt)
# normalize the PSF
if gdat.typeexpr == 'ferm':
fact = 2. * np.pi * np.trapz(psfnnorm * np.sin(gdat.binspara.angl[None, :, None]), gdat.binspara.angl, axis=1)[:, None, :]
psfn /= fact
return psfn
def retr_unit(lgal, bgal):
xdat = np.cos(bgal) * np.cos(lgal)
ydat = -np.cos(bgal) * np.sin(lgal)
zaxi = np.sin(bgal)
return xdat, ydat, zaxi
def retr_psec(gdat, conv):
# temp
conv = conv.reshape((gdat.numbsidecart, gdat.numbsidecart))
psec = (abs(scipy.fftpack.fft2(conv))**2)[:gdat.numbsidecarthalf, :gdat.numbsidecarthalf] * 1e-3
psec = psec.flatten()
return psec
def retr_psecodim(gdat, psec):
psec = psec.reshape((gdat.numbsidecarthalf, gdat.numbsidecarthalf))
psecodim = np.zeros(gdat.numbsidecarthalf)
for k in gdat.indxmpolodim:
indxmpol = np.where((gdat.meanpara.mpol > gdat.binspara.mpolodim[k]) & (gdat.meanpara.mpol < gdat.binspara.mpolodim[k+1]))
psecodim[k] = np.mean(psec[indxmpol])
psecodim *= gdat.meanpara.mpolodim**2
return psecodim
def retr_eerrnorm(minmvarb, maxmvarb, meanvarb, stdvvarb):
cdfnminm = 0.5 * (sp.special.erf((minmvarb - meanvarb) / stdvvarb / np.sqrt(2.)) + 1.)
cdfnmaxm = 0.5 * (sp.special.erf((maxmvarb - meanvarb) / stdvvarb / np.sqrt(2.)) + 1.)
cdfndiff = cdfnmaxm - cdfnminm
return cdfnminm, cdfndiff
def retr_condcatl(gdat):
# setup
## number of stacked samples
numbstks = 0
indxtupl = []
indxstks = []
indxstksparagenrscalfull = []
for n in gdat.indxsamptotl:
indxstks.append([])
indxstkssamptemp = []
for l in gmod.indxpopl:
indxstks[n].append([])
for k in range(len(gdat.listpostindxelemfull[n][l])):
indxstks[n][l].append(numbstks)
indxstkssamptemp.append(numbstks)
indxtupl.append([n, l, k])
numbstks += 1
indxstkssamp.append(np.array(indxstkssamptemp))
if gdat.typeverb > 1:
print('indxstks')
print(indxstks)
print('indxtupl')
print(indxtupl)
print('indxstkssamp')
print(indxstksparagenrscalfull)
print('numbstks')
print(numbstks)
cntr = 0
arrystks = np.zeros((numbstks, gmod.numbparagenrelemtotl))
for n in gdat.indxsamptotl:
indxparagenrfullelem = retr_indxparagenrfullelem(gdat, gdat.listpostindxelemfull[n], 'fitt')
for l in gmod.indxpopl:
for k in np.arange(len(gdat.listpostindxelemfull[n][l])):
for m, nameparagenrelem in enumerate(gmod.namepara.genrelem[l]):
arrystks[indxstks[n][l][k], m] = gdat.listpostparagenrscalfull[n, gmodstat.indxparagenrfullelem[l][nameparagenrelem][k]]
if gdat.typeverb > 0:
print('Constructing the distance matrix for %d stacked samples...' % arrystks.shape[0])
timeinit = gdat.functime()
gdat.distthrs = np.empty(gmod.numbparagenrelemtotl)
for k, nameparagenrelem in enumerate(gmod.namepara.elem):
# temp
l = 0
gdat.distthrs[k] = gdat.stdp[getattr(gdat, 'indxstdppop%d' % l + nameparagenrelem)]
# construct lists of samples for each proposal type
listdisttemp = [[] for k in range(gmod.numbparagenrelemtotl)]
indxstksrows = [[] for k in range(gmod.numbparagenrelemtotl)]
indxstkscols = [[] for k in range(gmod.numbparagenrelemtotl)]
thisperc = 0
cntr = 0
for k in gmod.indxparagenrelemtotl:
for n in range(numbstks):
dist = np.fabs(arrystks[n, k] - arrystks[:, k])
indxstks = np.where(dist < gdat.distthrs[k])[0]
if indxstks.size > 0:
for j in indxstks:
cntr += 1
listdisttemp[k].append(dist[j])
indxstksrows[k].append(n)
indxstkscols[k].append(j)
nextperc = np.floor(100. * float(k * numbstks + n) / numbstks / gmod.numbparagenrelemtotl)
if nextperc > thisperc:
thisperc = nextperc
if cntr > 1e6:
break
listdisttemp[k] = np.array(listdisttemp[k])
indxstksrows[k] = np.array(indxstksrows[k])
indxstkscols[k] = np.array(indxstkscols[k])
if cntr > 1e6:
break
listdist = [[] for k in range(gmod.numbparagenrelemtotl)]
for k, nameparagenrelem in enumerate(gmod.namepara.elem):
listdist[k] = scipy.sparse.csr_matrix((listdisttemp[k], (indxstksrows[k], indxstkscols[k])), shape=(numbstks, numbstks))
listindxstkspair = []
indxstksleft = []
if gdat.typeverb > 0:
timefinl = gdat.functime()
indxstksleft = range(numbstks)
# list of sample lists of the labeled element
indxstksassc = []
cntr = 0
gdat.prvlthrs = 0.05
while len(indxstksleft) > 0:
# count number of associations
numbdist = np.zeros(numbstks, dtype=int) - 1
for p in range(len(indxstksleft)):
indxindx = np.where((listdist[0][indxstksleft[p], :].tonp.array().flatten() * 2. * gdat.maxmlgal < gdat.anglassc) & \
(listdist[1][indxstksleft[p], :].tonp.array().flatten() * 2. * gdat.maxmbgal < gdat.anglassc))[0]
numbdist[indxstksleft[p]] = indxindx.size
prvlmaxmesti = np.amax(numbdist) / float(gdat.numbsamptotl)
if prvlmaxmesti < gdat.prvlthrs:
break
# determine the element with the highest number of neighbors
indxstkscntr = np.argmax(numbdist)
indxsamptotlcntr = indxtupl[indxstkscntr][0]
indxpoplcntr = indxtupl[indxstkscntr][1]
indxelemcntr = indxtupl[indxstkscntr][2]
# add the central element sample
indxstksassc.append([])
indxstksassc[cntr].append(indxstkscntr)
indxstksleft.remove(indxstkscntr)
if gdat.typeverb > 1:
print('Match step %d' % cntr)
print('numbdist')
print(numbdist)
print('indxstkscntr')
print(indxstkscntr)
print('indxstksleft')
print(indxstksleft)
# add the associated element samples
if len(indxstksleft) > 0:
for n in gdat.indxsamptotl:
indxstkstemp = np.intersect1d(np.array(indxstksleft), indxstksparagenrscalfull[n])
if n == indxsamptotlcntr:
continue
if indxstkstemp.size > 0:
totl = np.zeros_like(indxstkstemp)
for k in gmod.indxparagenrelemtotl:
temp = listdist[k][indxstkscntr, indxstkstemp].tonp.array()[0]
totl = totl + temp**2
indxleft = np.argsort(totl)[0]
indxstksthis = indxstkstemp[indxleft]
thisbool = True
for k in gmod.indxparagenrelemtotl:
if listdist[k][indxstkscntr, indxstksthis] > gdat.distthrs[k]:
thisbool = False
if thisbool:
indxstksassc[cntr].append(indxstksthis)
indxstksleft.remove(indxstksthis)
# temp
#if gdat.makeplot:
# gdatmodi = tdpy.gdatstrt()
# gdatmodi.this.indxelemfull = deepcopy(listindxelemfull[n])
# for r in range(len(indxstksassc)):
# calc_poststkscond(gdat, indxstksassc)
# gdatmodi.this.indxelemfull = [[] for l in gmod.indxpopl]
# for indxstkstemp in indxstksleft:
# indxsamptotlcntr = indxtupl[indxstkstemp][0]
# indxpoplcntr = indxtupl[indxstkstemp][1]
# indxelemcntr = indxtupl[indxstkstemp][2]
# gdatmodi.this.paragenrscalfull = gdat.listparagenrscalfull[indxsamptotlcntr, :]
# gdatmodi.this.indxelemfull[].append()
# plot_genemaps(gdat, gdatmodi, 'this', 'cntpdata', strgpdfn, indxenerplot=0, indxevttplot=0, cond=True)
cntr += 1
gdat.dictglob['poststkscond'] = []
gdat.dictglob['liststkscond'] = []
# for each condensed element
for r in range(len(indxstksassc)):
gdat.dictglob['liststkscond'].append([])
gdat.dictglob['liststkscond'][r] = {}
gdat.dictglob['poststkscond'].append([])
gdat.dictglob['poststkscond'][r] = {}
for strgfeat in gmod.namepara.genrelem:
gdat.dictglob['liststkscond'][r][strgfeat] = []
# for each associated sample associated with the central stacked sample
for k in range(len(indxstksassc[r])):
indxsamptotlcntr = indxtupl[indxstksassc[r][k]][0]
indxpoplcntr = indxtupl[indxstksassc[r][k]][1]
indxelemcntr = indxtupl[indxstksassc[r][k]][2]
for strgfeat in gmod.namepara.genrelem:
temp = getattr(gdat, 'list' + strgfeat)
if temp[indxsamptotlcntr][indxpoplcntr].size > 0:
temp = temp[indxsamptotlcntr][indxpoplcntr][..., indxelemcntr]
gdat.dictglob['liststkscond'][r][strgfeat].append(temp)
for r in range(len(gdat.dictglob['liststkscond'])):
for strgfeat in gmod.namepara.genrelem:
arry = np.stack(gdat.dictglob['liststkscond'][r][strgfeat], axis=0)
gdat.dictglob['poststkscond'][r][strgfeat] = np.zeros(([3] + list(arry.shape[1:])))
gdat.dictglob['poststkscond'][r][strgfeat][0, ...] = median(arry, axis=0)
gdat.dictglob['poststkscond'][r][strgfeat][1, ...] = percennp.tile(arry, 16., axis=0)
gdat.dictglob['poststkscond'][r][strgfeat][2, ...] = percennp.tile(arry, 84., axis=0)
gdat.numbstkscond = len(gdat.dictglob['liststkscond'])
gdat.indxstkscond = np.arange(gdat.numbstkscond)
gdat.prvl = np.empty(gdat.numbstkscond)
for r in gdat.indxstkscond:
gdat.prvl[r] = len(gdat.dictglob['liststkscond'][r]['deltllik'])
gdat.prvl /= gdat.numbsamptotl
gdat.minmprvl = 0.
gdat.maxmprvl = 1.
retr_axis(gdat, 'prvl')
gdat.histprvl = np.histogram(gdat.prvl, bins=gdat.binspara.prvl)[0]
if gdat.makeplot:
pathcond = getattr(gdat, 'path' + strgpdfn + 'finlcond')
for k, nameparagenrelem in enumerate(gmod.namepara.elem):
path = pathcond + 'histdist' + nameparagenrelem
listtemp = np.copy(listdist[k].tonp.array()).flatten()
listtemp = listtemp[np.where(listtemp != 1e20)[0]]
tdpy.mcmc.plot_hist(path, listtemp, r'$\Delta \tilde{' + getattr(gmod.lablrootpara, nameparagenrelem) + '}$')
path = pathcond + 'histprvl'
tdpy.mcmc.plot_hist(path, gdat.prvl, r'$p$')
gdat.prvlthrs = 0.1
gdat.indxprvlhigh = np.where(gdat.prvl > gdat.prvlthrs)[0]
gdat.numbprvlhigh = gdat.indxprvlhigh.size
def retr_conv(gdat, defl):
defl = defl.reshape((gdat.numbsidecart, gdat.numbsidecart, 2))
# temp
conv = abs(np.gradient(defl[:, :, 0], gdat.sizepixl, axis=0) + np.gradient(defl[:, :, 1], gdat.sizepixl, axis=1)) / 2.
conv = conv.flatten()
return conv
def retr_invm(gdat, defl):
# temp
defl = defl.reshape((gdat.numbsidecart, gdat.numbsidecart, 2))
invm = (1. - np.gradient(defl[:, :, 0], gdat.sizepixl, axis=0)) * (1. - np.gradient(defl[:, :, 1], gdat.sizepixl, axis=1)) - \
np.gradient(defl[:, :, 0], gdat.sizepixl, axis=1) * np.gradient(defl[:, :, 1], gdat.sizepixl, axis=0)
invm = invm.flatten()
return invm
def setp_indxswepsave(gdat):
gdat.indxswep = np.arange(gdat.numbswep)
gdat.boolsave = np.zeros(gdat.numbswep, dtype=bool)
gdat.indxswepsave = np.arange(gdat.numbburn, gdat.numbburn + gdat.numbsamp * gdat.factthin, gdat.factthin)
gdat.boolsave[gdat.indxswepsave] = True
gdat.indxsampsave = np.zeros(gdat.numbswep, dtype=int) - 1
gdat.indxsampsave[gdat.indxswepsave] = np.arange(gdat.numbsamp)
def retr_cntspnts(gdat, listposi, spec):
cnts = np.zeros((gdat.numbener, spec.shape[1]))
if gdat.boolbinsspat:
lgal = listposi[0]
bgal = listposi[1]
indxpixlpnts = retr_indxpixl(gdat, bgal, lgal)
else:
elin = listposi[0]
indxpixlpnts = np.zeros_like(elin, dtype=int)
for k in range(spec.shape[1]):
cnts[:, k] += spec[:, k] * gdat.expototl[:, indxpixlpnts[k]]
if gdat.enerdiff:
cnts *= gdat.deltener[:, None]
cnts = np.sum(cnts, axis=0)
return cnts
def retr_mdencrit(gdat, adissour, adishost, adishostsour):
mdencrit = gdat.factnewtlght / 4. / np.pi * adissour / adishostsour / adishost
return mdencrit
def retr_massfrombein(gdat, adissour, adishost, adishostsour):
mdencrit = retr_mdencrit(gdat, adissour, adishost, adishostsour)
massfrombein = np.pi * adishost**2 * mdencrit
return massfrombein
def retr_factmcutfromdefs(gdat, adissour, adishost, adishostsour, asca, acut):
mdencrit = retr_mdencrit(gdat, adissour, adishost, adishostsour)
fracacutasca = acut / asca
factmcutfromdefs = np.pi * adishost**2 * mdencrit * asca * retr_mcutfrommscl(fracacutasca)
return factmcutfromdefs
def retr_mcut(gdat, defs, asca, acut, adishost, mdencrit):
mscl = defs * np.pi * adishost**2 * mdencrit * asca
fracacutasca = acut / asca
mcut = mscl * retr_mcutfrommscl(fracacutasca)
return mcut
def retr_mcutfrommscl(fracacutasca):
mcut = fracacutasca**2 / (fracacutasca**2 + 1.)**2 * ((fracacutasca**2 - 1.) * np.log(fracacutasca) + fracacutasca * np.pi - (fracacutasca**2 + 1.))
return mcut
def retr_negalogt(varb):
negalogt = sign(varb) * np.log10(np.fabs(varb))
return negalogt
def retr_gradmaps(gdat, maps):
# temp -- this does not work with vanishing exposure
maps = maps.reshape((gdat.numbsidecart, gdat.numbsidecart))
grad = np.dstack((np.gradient(maps, gdat.sizepixl, axis=0), np.gradient(maps, gdat.sizepixl, axis=1))).reshape((gdat.numbsidecart, gdat.numbsidecart, 2))
grad = grad.reshape((gdat.numbpixlcart, 2))
return grad
def retr_spatmean(gdat, inpt, boolcntp=False):
listspatmean = [[] for b in gdat.indxspatmean]
listspatstdv = [[] for b in gdat.indxspatmean]
for b, namespatmean in enumerate(gdat.listnamespatmean):
if boolcntp:
cntp = inpt[gdat.listindxcubespatmean[b]]
else:
cntp = inpt[gdat.listindxcubespatmean[b]] * gdat.expo[gdat.listindxcubespatmean[b]] * gdat.apix
if gdat.enerdiff:
cntp *= gdat.deltener[:, None, None]
spatmean = np.mean(np.sum(cntp, 2), axis=1) / gdat.apix
spatstdv = np.sqrt(np.sum(cntp, axis=(1, 2))) / gdat.numbdata / gdat.apix
if gdat.boolcorrexpo:
spatmean /= gdat.expototlmean
spatstdv /= gdat.expototlmean
if gdat.enerdiff:
spatmean /= gdat.deltener
spatstdv /= gdat.deltener
listspatmean[b] = spatmean
listspatstdv[b] = spatstdv
return listspatmean, listspatstdv
def retr_rele(gdat, maps, lgal, bgal, defs, asca, acut, indxpixlelem, absv=True, cntpmodl=None):
grad = retr_gradmaps(gdat, maps)
defl = retr_defl(gdat, indxpixlelem, lgal, bgal, defs, asca=asca, acut=acut)
prod = grad * defl
if cntpmodl is not None:
prod /= cntpmodl[:, None]
dotstemp = np.sum(prod, 1)
if absv:
dotstemp = np.fabs(dotstemp)
else:
dotstemp = dotstemp
dots = np.mean(dotstemp)
return dots
def retr_fromgdat(gdat, gdatmodi, strgstat, strgmodl, strgvarb, strgpdfn, strgmome='pmea', indxvarb=None, indxlist=None):
if strgvarb.startswith('cntpdata'):
varb = getattr(gdat, strgvarb)
elif strgvarb.startswith('histcntpdata'):
varb = getattr(gdat, strgvarb)
else:
if strgmodl == 'true':
gmod = getattr(gdat, strgmodl)
gmodstat = getattr(gmod, strgstat)
varb = getattr(gmodstat, strgvarb)
if strgmodl == 'fitt':
if strgstat == 'this':
if strgmome == 'errr':
varb = getattr(gdatmodi, strgstat + 'errr' + strgvarb)
else:
varb = getattr(gdatmodi, strgstat + strgvarb)
if strgstat == 'pdfn':
varb = getattr(gdat, strgmome + strgpdfn + strgvarb)
if indxlist is not None:
varb = varb[indxlist]
if indxvarb is not None:
if strgmome == 'errr':
varb = varb[[slice(None)] + indxvarb]
else:
varb = varb[indxvarb]
return np.copy(varb)
def setp_indxpara(gdat, typesetp, strgmodl='fitt'):
print('setp_indxpara(): Building parameter indices for model %s with type %s...' % (strgmodl, typesetp))
gmod = getattr(gdat, strgmodl)
if typesetp == 'init':
if strgmodl == 'fitt':
gmod.lablmodl = 'Model'
if strgmodl == 'true':
gmod.lablmodl = 'True'
# transdimensional element populations
gmod.numbpopl = len(gmod.typeelem)
gmod.indxpopl = np.arange(gmod.numbpopl)
if gdat.typeexpr != 'user':
# background component
gmod.numbback = 0
gmod.indxback = []
for c in range(len(gmod.typeback)):
if isinstance(gmod.typeback[c], str):
if gmod.typeback[c].startswith('bfunfour') or gmod.typeback[c].startswith('bfunwfou'):
namebfun = gmod.typeback[c][:8]
ordrexpa = int(gmod.typeback[c][8:])
numbexpa = 4 * ordrexpa**2
indxexpa = np.arange(numbexpa)
del gmod.typeback[c]
for k in indxexpa:
gmod.typeback.insert(c+k, namebfun + '%04d' % k)
gmod.numbback = len(gmod.typeback)
gmod.indxback = np.arange(gmod.numbback)
gmod.numbbacktotl = np.sum(gmod.numbback)
gmod.indxbacktotl = np.arange(gmod.numbbacktotl)
# galaxy components
gmod.indxsersfgrd = np.arange(gmod.numbsersfgrd)
# name of the generative element parameter used for the amplitude
gmod.nameparagenrelemampl = [[] for l in gmod.indxpopl]
gmod.indxparagenrelemampl = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelem[l] == 'lghtpntspuls':
gmod.nameparagenrelemampl[l] = 'per0'
gmod.indxparagenrelemampl[l] = 2
elif gmod.typeelem[l] == 'lghtpntsagnntrue':
gmod.nameparagenrelemampl[l] = 'lum0'
gmod.indxparagenrelemampl[l] = 2
elif gmod.typeelem[l].startswith('lghtline'):
gmod.nameparagenrelemampl[l] = 'flux'
gmod.indxparagenrelemampl[l] = 1
elif gmod.typeelem[l].startswith('lghtpnts'):
gmod.nameparagenrelemampl[l] = 'flux'
gmod.indxparagenrelemampl[l] = 2
elif gmod.typeelem[l].startswith('lghtgausbgrd'):
gmod.nameparagenrelemampl[l] = 'flux'
gmod.indxparagenrelemampl[l] = 2
if gmod.typeelem[l] == 'lens':
gmod.nameparagenrelemampl[l] = 'defs'
gmod.indxparagenrelemampl[l] = 2
if gmod.typeelem[l].startswith('clus'):
gmod.nameparagenrelemampl[l] = 'nobj'
gmod.indxparagenrelemampl[l] = 2
if gmod.typeelem[l] == 'lens':
gmod.nameparagenrelemampl[l] = 'defs'
if gmod.typeelem[l] == 'clus':
gmod.nameparagenrelemampl[l] = 'nobj'
if len(gmod.nameparagenrelemampl[l]) == 0:
raise Exception('Amplitude feature undefined.')
for featpara in gdat.listfeatpara:
for strggrop in gdat.liststrggroppara:
setattr(gmod, 'list' + featpara + 'para' + strggrop, [])
if typesetp == 'finl':
# number of elements in the current state of the true model
if strgmodl == 'true':
gmod.numbelem = np.zeros(gmod.numbpopl)
for l in gmod.indxpopl:
gmod.numbelem[l] += getattr(gmod.maxmpara, 'numbelempop%d' % l)
gmod.numbelemtotl = np.sum(gmod.numbelem)
# element setup
## flag to calculate the kernel approximation errors
boolcalcerrr = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelemspateval[l] == 'locl' and gdat.numbpixlfull < 1e5:
# temp
boolcalcerrr[l] = False
else:
boolcalcerrr[l] = False
setp_varb(gdat, 'boolcalcerrr', valu=boolcalcerrr, strgmodl=strgmodl)
# maximum number of elements for each population
gmod.maxmpara.numbelem = np.zeros(gmod.numbpopl, dtype=int)
for l in gmod.indxpopl:
gmod.maxmpara.numbelem[l] = getattr(gmod.maxmpara, 'numbelempop%d' % l)
# maximum number of elements summed over all populations
gmod.maxmpara.numbelemtotl = np.sum(gmod.maxmpara.numbelem)
## sorting feature
nameparaelemsort = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
# feature to be used to sort elements
if gmod.typeelem[l].startswith('lght'):
nameparaelemsort[l] = 'flux'
if gmod.typeelem[l] == 'lens':
nameparaelemsort[l] = 'defs'
if gmod.typeelem[l].startswith('clus'):
nameparaelemsort[l] = 'nobj'
## label extensions
gmod.lablelemextn = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gdat.numbgrid > 1:
if gmod.typeelem[l] == 'lghtpnts':
gmod.lablelemextn[l] = r'\rm{fps}'
if gmod.typeelem[l] == 'lghtgausbgrd':
gmod.lablelemextn[l] = r'\rm{bgs}'
else:
if gmod.typeelem[l].startswith('lghtpntspuls'):
gmod.lablelemextn[l] = r'\rm{pul}'
if gmod.typeelem[l].startswith('lghtpntsagnn'):
gmod.lablelemextn[l] = r'\rm{agn}'
elif gmod.typeelem[l] == 'lghtpnts':
gmod.lablelemextn[l] = r'\rm{pts}'
if gmod.typeelem[l] == 'lens':
gmod.lablelemextn[l] = r'\rm{sub}'
if gmod.typeelem[l].startswith('clus'):
gmod.lablelemextn[l] = r'\rm{cls}'
if gmod.typeelem[l].startswith('lghtline'):
gmod.lablelemextn[l] = r'\rm{lin}'
gmod.indxpoplgrid = [[] for y in gdat.indxgrid]
for y in gdat.indxgrid:
for indx, typeelemtemp in enumerate(gmod.typeelem):
# foreground grid (image plane) -- the one np.where the data is measured
if y == 0:
if typeelemtemp.startswith('lght') and not typeelemtemp.endswith('bgrd') or typeelemtemp.startswith('clus'):
gmod.indxpoplgrid[y].append(indx)
# foreground mass grid
if y == 1:
if typeelemtemp.startswith('lens'):
gmod.indxpoplgrid[y].append(indx)
# background grid (source plane)
if y == 2:
if typeelemtemp.endswith('bgrd'):
gmod.indxpoplgrid[y].append(indx)
indxgridpopl = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
for y in gdat.indxgrid:
if l in gmod.indxpoplgrid[y]:
indxgridpopl[l] = y
calcelemsbrt = False
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lghtpnts'):
calcelemsbrt = True
if 'lghtgausbgrd' in gmod.typeelem:
calcelemsbrtbgrd = True
else:
calcelemsbrtbgrd = False
if gmod.boollenssubh:
calcelemdefl = True
else:
calcelemdefl = False
## element Boolean flags
gmod.boolelemlght = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lght'):
gmod.boolelemlght[l] = True
else:
gmod.boolelemlght[l] = False
gmod.boolelemlghtanyy = True in gmod.boolelemlght
gmod.boolelemlens = False
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lens'):
gmod.boolelemlens = True
gmod.boolelemsbrtdfnc = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.maxmpara.numbelem[l] > 0 and (gmod.typeelem[l].startswith('lght') and not gmod.typeelem[l].endswith('bgrd') or gmod.typeelem[l].startswith('clus')):
gmod.boolelemsbrtdfnc[l] = True
else:
gmod.boolelemsbrtdfnc[l] = False
gmod.boolelemsbrtdfncanyy = True in gmod.boolelemsbrtdfnc
gmod.boolelemdeflsubh = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelem[l] == 'lens':
gmod.boolelemdeflsubh[l] = True
else:
gmod.boolelemdeflsubh[l] = False
gmod.boolelemdeflsubhanyy = True in gmod.boolelemdeflsubh
gmod.boolelemsbrtextsbgrd = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lght') and gmod.typeelem[l].endswith('bgrd'):
gmod.boolelemsbrtextsbgrd[l] = True
else:
gmod.boolelemsbrtextsbgrd[l] = False
gmod.boolelemsbrtextsbgrdanyy = True in gmod.boolelemsbrtextsbgrd
if gmod.boolelemsbrtextsbgrdanyy:
gmod.indxpopllens = 1
else:
gmod.indxpopllens = 0
gmod.boolelemsbrtpnts = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lght') and gmod.typeelem[l] != 'lghtline' or gmod.typeelem[l] == 'clus':
gmod.boolelemsbrtpnts[l] = True
else:
gmod.boolelemsbrtpnts[l] = False
gmod.boolelemsbrtpntsanyy = True in gmod.boolelemsbrtpnts
# temp -- because there is currently no extended source
gmod.boolelemsbrt = gmod.boolelemsbrtdfnc
gmod.boolelempsfn = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lghtpnts') or gmod.typeelem[l] == 'clus':
gmod.boolelempsfn[l] = True
else:
gmod.boolelempsfn[l] = False
gmod.boolelempsfnanyy = True in gmod.boolelempsfn
spectype = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.boolelemlght[l]:
spectype[l] = 'powr'
else:
spectype[l] = 'none'
setp_varb(gdat, 'spectype', valu=spectype, strgmodl=strgmodl)
minmgwdt = 2. * gdat.sizepixl
maxmgwdt = gdat.maxmgangdata / 4.
setp_varb(gdat, 'gwdt', minm=minmgwdt, maxm=maxmgwdt, strgmodl=strgmodl)
setp_varb(gdat, 'aerr', minm=-100, maxm=100, strgmodl=strgmodl, popl='full')
if gmod.boolelemlghtanyy:
# flux
if gdat.typeexpr == 'ferm':
minmflux = 1e-9
maxmflux = 1e-6
if gdat.typeexpr == 'tess':
minmflux = 1.
maxmflux = 1e3
if gdat.typeexpr == 'chan':
if gdat.anlytype == 'spec':
minmflux = 1e4
maxmflux = 1e7
else:
minmflux = 3e-9
maxmflux = 1e-6
if gdat.typeexpr == 'gene':
minmflux = 0.1
maxmflux = 100.
if gdat.typeexpr == 'hubb':
minmflux = 1e-20
maxmflux = 1e-17
if gdat.typeexpr == 'fire':
minmflux = 1e-20
maxmflux = 1e-17
setp_varb(gdat, 'flux', limt=[minmflux, maxmflux], strgmodl=strgmodl)
if gdat.typeexpr == 'ferm':
setp_varb(gdat, 'brekprioflux', limt=[3e-9, 1e-6], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'sloplowrprioflux', limt=[0.5, 3.], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'slopupprprioflux', limt=[0.5, 3.], popl=l, strgmodl=strgmodl)
if gdat.boolbinsener:
### spectral parameters
if gdat.typeexpr == 'ferm':
sind = [1., 3.]
minmsind = 1.
maxmsind = 3.
if gdat.typeexpr == 'chan':
minmsind = 0.4
maxmsind = 2.4
sind = [0.4, 2.4]
if gdat.typeexpr == 'hubb':
minmsind = 0.5
maxmsind = 2.5
sind = [0.4, 2.4]
if gdat.typeexpr != 'fire':
setp_varb(gdat, 'sind', limt=[minmsind, maxmsind], strgmodl=strgmodl)
setp_varb(gdat, 'curv', limt=[-1., 1.], strgmodl=strgmodl)
setp_varb(gdat, 'expc', limt=[0.1, 10.], strgmodl=strgmodl)
setp_varb(gdat, 'sinddistmean', limt=sind, popl='full', strgmodl=strgmodl)
#### standard deviations should not be too small
setp_varb(gdat, 'sinddiststdv', limt=[0.3, 2.], popl='full', strgmodl=strgmodl)
setp_varb(gdat, 'curvdistmean', limt=[-1., 1.], popl='full', strgmodl=strgmodl)
setp_varb(gdat, 'curvdiststdv', limt=[0.1, 1.], popl='full', strgmodl=strgmodl)
setp_varb(gdat, 'expcdistmean', limt=[1., 8.], popl='full', strgmodl=strgmodl)
setp_varb(gdat, 'expcdiststdv', limt=[0.01 * gdat.maxmener, gdat.maxmener], popl='full', strgmodl=strgmodl)
for i in gdat.indxenerinde:
setp_varb(gdat, 'sindcolr0001', limt=[-2., 6.], strgmodl=strgmodl)
setp_varb(gdat, 'sindcolr0002', limt=[0., 8.], strgmodl=strgmodl)
setp_varb(gdat, 'sindcolr%04d' % i, limt=[-5., 10.], strgmodl=strgmodl)
for l in gmod.indxpopl:
if gmod.typeelem[l] == 'lghtpntspuls':
setp_varb(gdat, 'gang', limt=[1e-1 * gdat.sizepixl, gdat.maxmgangdata], strgmodl=strgmodl)
setp_varb(gdat, 'geff', limt=[0., 0.4], strgmodl=strgmodl)
setp_varb(gdat, 'dglc', limt=[10., 3e3], strgmodl=strgmodl)
setp_varb(gdat, 'phii', limt=[0., 2. * np.pi], strgmodl=strgmodl)
setp_varb(gdat, 'thet', limt=[0., np.pi], strgmodl=strgmodl)
setp_varb(gdat, 'per0distmean', limt=[5e-4, 1e1], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'magfdistmean', limt=[1e7, 1e16], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'per0diststdv', limt=[1e-2, 1.], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'magfdiststdv', limt=[1e-2, 1.], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'gangslop', limt=[0.5, 4.], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'dglcslop', limt=[0.5, 2.], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'spatdistcons', limt=[1e-4, 1e-2], popl='full')
setp_varb(gdat, 'bgaldistscal', limt=[0.5 / gdat.anglfact, 5. / gdat.anglfact], popl='full', strgmodl=strgmodl)
if gmod.typeelem[l] == 'lghtpntsagnntrue':
setp_varb(gdat, 'dlos', limt=[1e7, 1e9], strgmodl=strgmodl)
setp_varb(gdat, 'dlosslop', limt=[-0.5, -3.], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'lum0', limt=[1e43, 1e46], strgmodl=strgmodl)
setp_varb(gdat, 'lum0distbrek', limt=[1e42, 1e46], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'lum0sloplowr', limt=[0.5, 3.], popl=l, strgmodl=strgmodl)
setp_varb(gdat, 'lum0slopuppr', limt=[0.5, 3.], popl=l, strgmodl=strgmodl)
# construct background surface brightness templates from the user input
gmod.sbrtbacknorm = [[] for c in gmod.indxback]
gmod.boolunifback = np.ones(gmod.numbback, dtype=bool)
for c in gmod.indxback:
gmod.sbrtbacknorm[c] = np.empty((gdat.numbenerfull, gdat.numbpixlfull, gdat.numbevttfull))
if gmod.typeback[c] == 'data':
gmod.sbrtbacknorm[c] = np.copy(gdat.sbrtdata)
gmod.sbrtbacknorm[c][np.where(gmod.sbrtbacknorm[c] == 0.)] = 1e-100
elif isinstance(gmod.typeback[c], float):
gmod.sbrtbacknorm[c] = np.zeros((gdat.numbenerfull, gdat.numbpixlfull, gdat.numbevttfull)) + gmod.typeback[c]
elif isinstance(gmod.typeback[c], list) and isinstance(gmod.typeback[c], float):
gmod.sbrtbacknorm[c] = retr_spec(gdat, np.array([gmod.typeback[c]]), sind=np.array([gmod.typeback[c]]))[:, 0, None, None]
elif isinstance(gmod.typeback[c], np.ndarray) and gmod.typeback[c].ndim == 1:
gmod.sbrtbacknorm[c] = np.zeros((gdat.numbenerfull, gdat.numbpixlfull, gdat.numbevttfull)) + gmod.typeback[c][:, None, None]
elif gmod.typeback[c].startswith('bfunfour') or gmod.typeback[c].startswith('bfunwfou'):
indxexpatemp = int(gmod.typeback[c][8:])
indxterm = indxexpatemp // ordrexpa**2
indxexpaxdat = (indxexpatemp % ordrexpa**2) // ordrexpa + 1
indxexpaydat = (indxexpatemp % ordrexpa**2) % ordrexpa + 1
if namebfun == 'bfunfour':
ampl = 1.
func = gdat.meanpara.bgalcart
if namebfun == 'bfunwfou':
functemp = np.exp(-0.5 * (gdat.meanpara.bgalcart / (1. / gdat.anglfact))**2)
ampl = np.sqrt(functemp)
func = functemp
argslgal = 2. * np.pi * indxexpaxdat * gdat.meanpara.lgalcart / gdat.maxmgangdata
argsbgal = 2. * np.pi * indxexpaydat * func / gdat.maxmgangdata
if indxterm == 0:
termfrst = np.sin(argslgal)
termseco = ampl * np.sin(argsbgal)
if indxterm == 1:
termfrst = np.sin(argslgal)
termseco = ampl * np.cos(argsbgal)
if indxterm == 2:
termfrst = np.cos(argslgal)
termseco = ampl * np.sin(argsbgal)
if indxterm == 3:
termfrst = np.cos(argslgal)
termseco = ampl * np.cos(argsbgal)
gmod.sbrtbacknorm[c] = (termfrst[None, :] * termseco[:, None]).flatten()[None, :, None] * \
np.ones((gdat.numbenerfull, gdat.numbpixlfull, gdat.numbevttfull))
else:
path = gdat.pathinpt + gmod.typeback[c]
gmod.sbrtbacknorm[c] = astropy.io.fits.getdata(path)
if gdat.typepixl == 'cart':
if not gdat.boolforccart:
if gmod.sbrtbacknorm[c].shape[2] != gdat.numbsidecart:
raise Exception('Provided background template must have the chosen image dimensions.')
gmod.sbrtbacknorm[c] = gmod.sbrtbacknorm[c].reshape((gmod.sbrtbacknorm[c].shape[0], -1, gmod.sbrtbacknorm[c].shape[-1]))
if gdat.typepixl == 'cart' and gdat.boolforccart:
sbrtbacknormtemp = np.empty((gdat.numbenerfull, gdat.numbpixlfull, gdat.numbevttfull))
for i in gdat.indxenerfull:
for m in gdat.indxevttfull:
sbrtbacknormtemp[i, :, m] = tdpy.retr_cart(gmod.sbrtbacknorm[c][i, :, m], \
numbsidelgal=gdat.numbsidecart, numbsidebgal=gdat.numbsidecart, \
minmlgal=gdat.anglfact*gdat.minmlgaldata, maxmlgal=gdat.anglfact*gdat.maxmlgaldata, \
minmbgal=gdat.anglfact*gdat.minmbgaldata, maxmbgal=gdat.anglfact*gdat.maxmbgaldata).flatten()
gmod.sbrtbacknorm[c] = sbrtbacknormtemp
# determine spatially uniform background templates
for i in gdat.indxenerfull:
for m in gdat.indxevttfull:
if np.std(gmod.sbrtbacknorm[c][i, :, m]) > 1e-6:
gmod.boolunifback[c] = False
boolzero = True
gmod.boolbfun = False
for c in gmod.indxback:
if np.amin(gmod.sbrtbacknorm[c]) < 0. and isinstance(gmod.typeback[c], str) and not gmod.typeback[c].startswith('bfun'):
booltemp = False
raise Exception('Background templates must be positive-definite every where.')
if not np.isfinite(gmod.sbrtbacknorm[c]).all():
raise Exception('Background template is not finite.')
if np.amin(gmod.sbrtbacknorm[c]) > 0. or gmod.typeback[c] == 'data':
boolzero = False
if isinstance(gmod.typeback[c], str) and gmod.typeback[c].startswith('bfun'):
gmod.boolbfun = True
if boolzero and not gmod.boolbfun:
raise Exception('At least one background template must be positive everynp.where.')
# temp -- does not take into account dark hosts
gmod.boolhost = gmod.typeemishost != 'none'
# type of PSF evaluation
if gmod.maxmpara.numbelemtotl > 0 and gmod.boolelempsfnanyy:
if gmod.typeemishost != 'none' or not gmod.boolunifback.all():
# the background is not convolved by a kernel and point sources exist
typeevalpsfn = 'full'
else:
# the background is not convolved by a kernel and point sources exist
typeevalpsfn = 'kern'
else:
if gmod.typeemishost != 'none' or not gmod.boolunifback.all():
# the background is convolved by a kernel, no point source exists
typeevalpsfn = 'conv'
else:
# the background is not convolved by a kernel, no point source exists
typeevalpsfn = 'none'
setp_varb(gdat, 'typeevalpsfn', valu=typeevalpsfn, strgmodl=strgmodl)
if gdat.typeverb > 1:
print('gmod.typeevalpsfn')
print(gmod.typeevalpsfn)
gmod.boolapplpsfn = gmod.typeevalpsfn != 'none'
### PSF model
if gmod.typeevalpsfn != 'none':
if gmod.typemodlpsfn == 'singgaus':
numbpsfpform = 1
elif gmod.typemodlpsfn == 'singking':
numbpsfpform = 2
elif gmod.typemodlpsfn == 'doubgaus':
numbpsfpform = 3
elif gmod.typemodlpsfn == 'gausking':
numbpsfpform = 4
elif gmod.typemodlpsfn == 'doubking':
numbpsfpform = 5
gmod.numbpsfptotl = numbpsfpform
if gdat.boolpriopsfninfo:
for i in gdat.indxener:
for m in gdat.indxevtt:
meansigc = gmod.psfpexpr[i * gmod.numbpsfptotl + m * gmod.numbpsfptotl * gdat.numbener]
stdvsigc = meansigc * 0.1
setp_varb(gdat, 'sigcen%02devt%d' % (i, m), mean=meansigc, stdv=stdvsigc, lablroot='$\sigma$', scal='gaus', \
strgmodl=strgmodl)
if gmod.typemodlpsfn == 'doubking' or gmod.typemodlpsfn == 'singking':
meangamc = gmod.psfpexpr[i * numbpsfpform + m * numbpsfpform * gdat.numbener + 1]
stdvgamc = meangamc * 0.1
setp_varb(gdat, 'gamcen%02devt%d' % (i, m), mean=meangamc, stdv=stdvgamc, strgmodl=strgmodl)
if gmod.typemodlpsfn == 'doubking':
meansigt = gmod.psfpexpr[i * numbpsfpform + m * numbpsfpform * gdat.numbener + 2]
stdvsigt = meansigt * 0.1
setp_varb(gdat, 'sigten%02devt%d' % (i, m), mean=meansigt, stdv=stdvsigt, strgmodl=strgmodl)
meangamt = gmod.psfpexpr[i * numbpsfpform + m * numbpsfpform * gdat.numbener + 3]
stdvgamt = meangamt * 0.1
setp_varb(gdat, 'gamten%02devt%d' % (i, m), mean=meangamt, stdv=stdvgamt, strgmodl=strgmodl)
meanpsff = gmod.psfpexpr[i * numbpsfpform + m * numbpsfpform * gdat.numbener + 4]
stdvpsff = meanpsff * 0.1
setp_varb(gdat, 'psffen%02devt%d' % (i, m), mean=meanpsff, stdv=stdvpsff, strgmodl=strgmodl)
else:
if gdat.typeexpr == 'gene':
minmsigm = 0.01 / gdat.anglfact
maxmsigm = 0.1 / gdat.anglfact
if gdat.typeexpr == 'ferm':
minmsigm = 0.1
maxmsigm = 10.
if gdat.typeexpr == 'hubb':
minmsigm = 0.01 / gdat.anglfact
maxmsigm = 0.1 / gdat.anglfact
if gdat.typeexpr == 'chan':
minmsigm = 0.1 / gdat.anglfact
maxmsigm = 2. / gdat.anglfact
minmgamm = 1.5
maxmgamm = 20.
setp_varb(gdat, 'sigc', minm=minmsigm, maxm=maxmsigm, lablroot='$\sigma_c$', ener='full', evtt='full', strgmodl=strgmodl)
setp_varb(gdat, 'sigt', minm=minmsigm, maxm=maxmsigm, ener='full', evtt='full', strgmodl=strgmodl)
setp_varb(gdat, 'gamc', minm=minmgamm, maxm=maxmgamm, ener='full', evtt='full', strgmodl=strgmodl)
setp_varb(gdat, 'gamt', minm=minmgamm, maxm=maxmgamm, ener='full', evtt='full', strgmodl=strgmodl)
setp_varb(gdat, 'psff', minm=0., maxm=1., ener='full', evtt='full', strgmodl=strgmodl)
# background
## number of background parameters
numbbacp = 0
for c in gmod.indxback:
if gmod.boolspecback[c]:
numbbacp += 1
else:
numbbacp += gdat.numbener
## background parameter indices
gmod.indxbackbacp = np.zeros(numbbacp, dtype=int)
indxenerbacp = np.zeros(numbbacp, dtype=int)
cntr = 0
for c in gmod.indxback:
if gmod.boolspecback[c]:
gmod.indxbackbacp[cntr] = c
cntr += 1
else:
for i in gdat.indxener:
indxenerbacp[cntr] = i
gmod.indxbackbacp[cntr] = c
cntr += 1
# indices of background parameters for each background component
gmod.indxbacpback = [[] for c in gmod.indxback]
for c in gmod.indxback:
gmod.indxbacpback[c] = np.where((gmod.indxbackbacp == c))[0]
# list of names of diffuse components
gmod.listnamediff = []
for c in gmod.indxback:
gmod.listnamediff += ['back%04d' % c]
if gmod.typeemishost != 'none':
for e in gmod.indxsersfgrd:
gmod.listnamediff += ['hostisf%d' % e]
if gmod.boollens:
gmod.listnamediff += ['lens']
# list of names of emission components
listnameecom = deepcopy(gmod.listnamediff)
for l in gmod.indxpopl:
if gmod.boolelemsbrt[l]:
if strgmodl == 'true' and gmod.numbelem[l] > 0 or strgmodl == 'fitt' and gmod.maxmpara.numbelem[l] > 0:
if not 'dfnc' in listnameecom:
listnameecom += ['dfnc']
if not 'dfncsubt' in listnameecom:
listnameecom += ['dfncsubt']
gmod.listnameecomtotl = listnameecom + ['modl']
for c in gmod.indxback:
setp_varb(gdat, 'cntpback%04d' % c, lablroot='$C_{%d}$' % c, minm=1., maxm=100., scal='logt', strgmodl=strgmodl)
gmod.listnamegcom = deepcopy(gmod.listnameecomtotl)
if gmod.boollens:
gmod.listnamegcom += ['bgrd']
if gmod.numbparaelem > 0 and gmod.boolelemsbrtextsbgrdanyy:
gmod.listnamegcom += ['bgrdgalx', 'bgrdexts']
numbdiff = len(gmod.listnamediff)
convdiff = np.zeros(numbdiff, dtype=bool)
for k, namediff in enumerate(gmod.listnamediff):
if not (gdat.boolthindata or gmod.typeevalpsfn == 'none' or gmod.typeevalpsfn == 'kern'):
if namediff.startswith('back'):
indx = int(namediff[-4:])
convdiff[k] = not gmod.boolunifback[indx]
else:
convdiff[k] = True
# element parameters that correlate with the statistical significance of the element
gmod.namepara.elemsign = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lght'):
gmod.namepara.elemsign[l] = 'flux'
if gmod.typeelem[l] == 'lens':
gmod.namepara.elemsign[l] = 'defs'
if gmod.typeelem[l].startswith('clus'):
gmod.namepara.elemsign[l] = 'nobj'
if gdat.typeverb > 0:
if strgmodl == 'true':
strgtemp = 'true'
if strgmodl == 'fitt':
strgtemp = 'fitting'
print('Building elements for the %s model...' % strgtemp)
# define the names and scalings of element parameters
gmod.namepara.genrelem = [[] for l in gmod.indxpopl]
gmod.listscalparagenrelem = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lghtline'):
gmod.namepara.genrelem[l] = ['elin']
gmod.listscalparagenrelem[l] = ['logt']
elif gmod.typespatdist[l] == 'diskscal':
gmod.namepara.genrelem[l] = ['lgal', 'bgal']
gmod.listscalparagenrelem[l] = ['self', 'dexp']
elif gmod.typespatdist[l] == 'gangexpo':
gmod.namepara.genrelem[l] = ['gang', 'aang']
gmod.listscalparagenrelem[l] = ['expo', 'self']
elif gmod.typespatdist[l] == 'glc3':
gmod.namepara.genrelem[l] = ['dglc', 'thet', 'phii']
gmod.listscalparagenrelem[l] = ['powr', 'self', 'self']
else:
gmod.namepara.genrelem[l] = ['lgal', 'bgal']
gmod.listscalparagenrelem[l] = ['self', 'self']
# amplitude
if gmod.typeelem[l] == 'lghtpntsagnntrue':
gmod.namepara.genrelem[l] += ['lum0']
gmod.listscalparagenrelem[l] += ['dpowslopbrek']
elif gmod.typeelem[l] == 'lghtpntspuls':
gmod.namepara.genrelem[l] += ['per0']
gmod.listscalparagenrelem[l] += ['lnormeanstdv']
elif gmod.typeelem[l].startswith('lght'):
gmod.namepara.genrelem[l] += ['flux']
gmod.listscalparagenrelem[l] += [gmod.typeprioflux[l]]
elif gmod.typeelem[l] == 'lens':
gmod.namepara.genrelem[l] += ['defs']
gmod.listscalparagenrelem[l] += ['powr']
elif gmod.typeelem[l].startswith('clus'):
gmod.namepara.genrelem[l] += ['nobj']
gmod.listscalparagenrelem[l] += ['powr']
# shape
if gmod.typeelem[l] == 'lghtgausbgrd' or gmod.typeelem[l] == 'clusvari':
gmod.namepara.genrelem[l] += ['gwdt']
gmod.listscalparagenrelem[l] += ['powr']
if gmod.typeelem[l] == 'lghtlinevoig':
gmod.namepara.genrelem[l] += ['sigm']
gmod.listscalparagenrelem[l] += ['logt']
gmod.namepara.genrelem[l] += ['gamm']
gmod.listscalparagenrelem[l] += ['logt']
# others
if gmod.typeelem[l] == 'lghtpntspuls':
gmod.namepara.genrelem[l] += ['magf']
gmod.listscalparagenrelem[l] += ['lnormeanstdv']
gmod.namepara.genrelem[l] += ['geff']
gmod.listscalparagenrelem[l] += ['self']
elif gmod.typeelem[l] == 'lghtpntsagnntrue':
gmod.namepara.genrelem[l] += ['dlos']
gmod.listscalparagenrelem[l] += ['powr']
if gdat.numbener > 1 and gmod.typeelem[l].startswith('lghtpnts'):
if gmod.spectype[l] == 'colr':
for i in gdat.indxener:
if i == 0:
continue
gmod.namepara.genrelem[l] += ['sindcolr%04d' % i]
gmod.listscalparagenrelem[l] += ['self']
else:
gmod.namepara.genrelem[l] += ['sind']
gmod.listscalparagenrelem[l] += ['self']
if gmod.spectype[l] == 'curv':
gmod.namepara.genrelem[l] += ['curv']
gmod.listscalparagenrelem[l] += ['self']
if gmod.spectype[l] == 'expc':
gmod.namepara.genrelem[l] += ['expc']
gmod.listscalparagenrelem[l] += ['self']
if gmod.typeelem[l] == 'lens':
if gdat.variasca:
gmod.namepara.genrelem[l] += ['asca']
gmod.listscalparagenrelem[l] += ['self']
if gdat.variacut:
gmod.namepara.genrelem[l] += ['acut']
gmod.listscalparagenrelem[l] += ['self']
# names of element parameters for each scaling
gmod.namepara.genrelemscal = [{} for l in gmod.indxpopl]
for l in gmod.indxpopl:
for scaltype in gdat.listscaltype:
gmod.namepara.genrelemscal[l][scaltype] = []
for k, nameparagenrelem in enumerate(gmod.namepara.genrelem[l]):
if scaltype == gmod.listscalparagenrelem[l][k]:
gmod.namepara.genrelemscal[l][scaltype].append(nameparagenrelem)
# variables for which whose marginal distribution and pair-correlations will be plotted
gmod.namepara.derielemodim = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
gmod.namepara.derielemodim[l] = deepcopy(gmod.namepara.genrelem[l])
gmod.namepara.derielemodim[l] += ['deltllik']
if gdat.boolbinsspat:
if not 'lgal' in gmod.namepara.derielemodim[l]:
gmod.namepara.derielemodim[l] += ['lgal']
if not 'bgal' in gmod.namepara.derielemodim[l]:
gmod.namepara.derielemodim[l] += ['bgal']
if not 'gang' in gmod.namepara.derielemodim[l]:
gmod.namepara.derielemodim[l] += ['gang']
if not 'aang' in gmod.namepara.derielemodim[l]:
gmod.namepara.derielemodim[l] += ['aang']
if gmod.typeelem[l].startswith('lght'):
gmod.namepara.derielemodim[l] += ['cnts']
if gdat.typeexpr == 'ferm':
gmod.namepara.derielemodim[l] + ['sbrt0018']
if gmod.typeelem[l] == 'lghtpntsagnntrue':
gmod.namepara.derielemodim[l] += ['reds']
gmod.namepara.derielemodim[l] += ['lumi']
gmod.namepara.derielemodim[l] += ['flux']
if gmod.typeelem[l] == 'lghtpntspuls':
gmod.namepara.derielemodim[l] += ['lumi']
gmod.namepara.derielemodim[l] += ['flux']
gmod.namepara.derielemodim[l] += ['mass']
gmod.namepara.derielemodim[l] += ['dlos']
if gmod.typeelem[l] == 'lens':
gmod.namepara.derielemodim[l] += ['mcut', 'diss', 'rele', 'reln', 'relk', 'relf', 'relm', 'reld', 'relc']
#for k in range(len(gmod.namepara.derielemodim[l])):
# gmod.namepara.derielemodim[l][k] += 'pop%d' % l
# check later
# temp
#if strgmodl == 'fitt':
# for q in gdat.indxrefr:
# if gmod.nameparagenrelemampl[l] in gdat.refr.namepara.elem[q]:
# gmod.namepara.derielemodim[l].append('aerr' + gdat.listnamerefr[q])
if gdat.typeverb > 1:
print('gmod.namepara.derielemodim')
print(gmod.namepara.derielemodim)
# derived element parameters
gmod.namepara.derielem = gmod.namepara.derielemodim[:]
if gdat.typeverb > 1:
print('gmod.namepara.derielem')
print(gmod.namepara.derielem)
# derived parameters
gmod.listnameparaderitotl = [temptemp for temp in gmod.namepara.derielem for temptemp in temp]
#gmod.listnameparaderitotl += gmod.namepara.scal
for namediff in gmod.listnamediff:
gmod.listnameparaderitotl += ['cntp' + namediff]
if gdat.typeverb > 1:
print('gmod.listnameparaderitotl')
print(gmod.listnameparaderitotl)
if strgmodl == 'fitt':
# add reference element parameters that are not available in the fitting model
gdat.refr.namepara.elemonly = [[[] for l in gmod.indxpopl] for q in gdat.indxrefr]
gmod.namepara.extrelem = [[] for l in gmod.indxpopl]
for q in gdat.indxrefr:
if gdat.refr.numbelem[q] == 0:
continue
for name in gdat.refr.namepara.elem[q]:
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lght') and (name == 'defs' or name == 'acut' or name == 'asca' or name == 'mass'):
continue
if gmod.typeelem[l] == ('lens') and (name == 'cnts' or name == 'flux' or name == 'spec' or name == 'sind'):
continue
if not name in gmod.namepara.derielemodim[l]:
nametotl = name + gdat.listnamerefr[q]
if name == 'etag':
continue
gmod.namepara.derielemodim[l].append(nametotl)
if gdat.refr.numbelem[q] == 0:
continue
gdat.refr.namepara.elemonly[q][l].append(name)
if not nametotl in gmod.namepara.extrelem[l]:
gmod.namepara.extrelem[l].append(nametotl)
#if name == 'reds':
# for nametemp in ['lumi', 'dlos']:
# nametemptemp = nametemp + gdat.listnamerefr[q]
# if not nametemptemp in gmod.namepara.extrelem[l]:
# gmod.namepara.derielemodim[l].append(nametemp + gdat.listnamerefr[q])
# gmod.namepara.extrelem[l].append(nametemptemp)
if gdat.typeverb > 1:
print('gdat.refr.namepara.elemonly')
print(gdat.refr.namepara.elemonly)
if gdat.typeexpr == 'chan' and gdat.typedata == 'inpt':
for l in gmod.indxpopl:
if gmod.typeelem[l] == 'lghtpnts':
gmod.namepara.extrelem[l].append('lumiwo08')
gmod.namepara.derielemodim[l].append('lumiwo08')
if gdat.typeverb > 1:
print('gmod.namepara.extrelem')
print(gmod.namepara.extrelem)
# defaults
gmod.liststrgpdfnmodu = [[] for l in gmod.indxpopl]
gmod.namepara.genrelemmodu = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lght'):
if gdat.typeexpr == 'ferm' and gdat.lgalcntr == 0.:
if l == 1:
gmod.liststrgpdfnmodu[l] += ['tmplnfwp']
gmod.namepara.genrelemmodu[l] += ['lgalbgal']
if l == 2:
gmod.liststrgpdfnmodu[l] += ['tmplnfwp']
gmod.namepara.genrelemmodu[l] += ['lgalbgal']
gmod.namepara.elem = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
for liststrg in [gmod.namepara.genrelem[l], gmod.namepara.derielemodim[l]]:
for strgthis in liststrg:
if not strgthis in gmod.namepara.elem[l]:
gmod.namepara.elem[l].append(strgthis)
# temp
for l in gmod.indxpopl:
if gmod.typeelem[l].startswith('lghtline'):
gmod.namepara.genrelem[l] += ['spec']
if gmod.typeelem[l].startswith('lght'):
gmod.namepara.genrelem[l] += ['spec', 'specplot']
if gmod.typeelem[l] == 'lens':
gmod.namepara.genrelem[l] += ['deflprof']
#gmod.namepara.genrelemeval = [[] for l in gmod.indxpopl]
#for l in gmod.indxpopl:
# if gmod.typeelem[l].startswith('clus'):
# gmod.namepara.genrelemeval[l] = ['lgal', 'bgal', 'nobj']
# if gmod.typeelem[l] == 'clusvari':
# gmod.namepara.genrelemeval[l] += ['gwdt']
# if gmod.typeelem[l] == 'lens':
# gmod.namepara.genrelemeval[l] = ['lgal', 'bgal', 'defs', 'asca', 'acut']
# if gmod.typeelem[l].startswith('lghtline'):
# gmod.namepara.genrelemeval[l] = ['elin', 'spec']
# elif gmod.typeelem[l] == 'lghtgausbgrd':
# gmod.namepara.genrelemeval[l] = ['lgal', 'bgal', 'gwdt', 'spec']
# elif gmod.typeelem[l].startswith('lght'):
# gmod.namepara.genrelemeval[l] = ['lgal', 'bgal', 'spec']
## element legends
lablpopl = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gdat.numbgrid > 1:
if gmod.typeelem[l] == 'lghtpnts':
lablpopl[l] = 'FPS'
if gmod.typeelem[l] == 'lghtgausbgrd':
lablpopl[l] = 'BGS'
else:
if gmod.typeelem[l] == 'lghtpntspuls':
lablpopl[l] = 'Pulsar'
elif gmod.typeelem[l].startswith('lghtpntsagnn'):
lablpopl[l] = 'AGN'
elif gmod.typeelem[l].startswith('lghtpnts'):
lablpopl[l] = 'PS'
if gmod.typeelem[l] == 'lens':
lablpopl[l] = 'Subhalo'
if gmod.typeelem[l].startswith('clus'):
lablpopl[l] = 'Cluster'
if gmod.typeelem[l].startswith('lghtline'):
lablpopl[l]= 'Line'
setp_varb(gdat, 'lablpopl', valu=lablpopl, strgmodl=strgmodl)
if strgmodl == 'true':
gmod.indxpoplassc = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
if gmod.numbpopl == 3 and gmod.typeelem[1] == 'lens':
gmod.indxpoplassc[l] = [l]
else:
gmod.indxpoplassc[l] = gmod.indxpopl
# variables for which two dimensional histograms will be calculated
gmod.namepara.genrelemcorr = [[] for l in gmod.indxpopl]
if gdat.boolplotelemcorr:
for l in gmod.indxpopl:
for strgfeat in gmod.namepara.derielemodim[l]:
gmod.namepara.genrelemcorr[l].append(strgfeat)
# number of element parameters
if gmod.numbpopl > 0:
gmod.numbparagenrelemsing = np.zeros(gmod.numbpopl, dtype=int)
gmod.numbparaderielemsing = np.zeros(gmod.numbpopl, dtype=int)
gmod.numbparaelemsing = np.zeros(gmod.numbpopl, dtype=int)
gmod.numbparagenrelem = np.zeros(gmod.numbpopl, dtype=int)
gmod.numbparagenrelemcuml = np.zeros(gmod.numbpopl, dtype=int)
gmod.numbparagenrelemcumr = np.zeros(gmod.numbpopl, dtype=int)
gmod.numbparaderielem = np.zeros(gmod.numbpopl, dtype=int)
gmod.numbparaelem = np.zeros(gmod.numbpopl, dtype=int)
for l in gmod.indxpopl:
# number of generative element parameters for a single element of a specific population
gmod.numbparagenrelemsing[l] = len(gmod.namepara.genrelem[l])
# number of derived element parameters for a single element of a specific population
gmod.numbparaderielemsing[l] = len(gmod.namepara.derielem[l])
# number of element parameters for a single element of a specific population
gmod.numbparaelemsing[l] = len(gmod.namepara.elem[l])
# number of generative element parameters for all elements of a specific population
gmod.numbparagenrelem[l] = gmod.numbparagenrelemsing[l] * gmod.maxmpara.numbelem[l]
# number of generative element parameters up to the beginning of a population
gmod.numbparagenrelemcuml[l] = np.sum(gmod.numbparagenrelem[:l])
# number of generative element parameters up to the end of a population
gmod.numbparagenrelemcumr[l] = np.sum(gmod.numbparagenrelem[:l+1])
# number of derived element parameters for all elements of a specific population
gmod.numbparaderielem[l] = gmod.numbparaderielemsing[l] * gmod.numbelem[l]
# number of element parameters for all elements of a specific population
gmod.numbparaelem[l] = gmod.numbparaelemsing[l] * gmod.numbelem[l]
# number of generative element parameters summed over all populations
gmod.numbparagenrelemtotl = np.sum(gmod.numbparagenrelem)
# number of derived element parameters summed over all populations
gmod.numbparaderielemtotl = np.sum(gmod.numbparaderielem)
# number of element parameters summed over all populations
gmod.numbparaelemtotl = np.sum(gmod.numbparaderielem)
gmod.indxparagenrelemsing = []
for l in gmod.indxpopl:
gmod.indxparagenrelemsing.append(np.arange(gmod.numbparagenrelemsing[l]))
gmod.indxparaderielemsing = []
for l in gmod.indxpopl:
gmod.indxparaderielemsing.append(np.arange(gmod.numbparaderielemsing[l]))
gmod.indxparaelemsing = []
for l in gmod.indxpopl:
gmod.indxparaelemsing.append(np.arange(gmod.numbparaelemsing[l]))
# size of the auxiliary variable propobability density vector
if gmod.maxmpara.numbelemtotl > 0:
gmod.numblpri = 3 + gmod.numbparagenrelem * gmod.numbpopl
else:
gmod.numblpri = 0
if gdat.penalpridiff:
gmod.numblpri += 1
indxlpri = np.arange(gmod.numblpri)
# append the population tags to element parameter names
#for l in gmod.indxpopl:
# gmod.namepara.genrelem[l] = [gmod.namepara.genrelem[l][g] + 'pop%d' % l for g in gmod.indxparagenrelemsing[l]]
gmod.boolcompposi = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
gmod.boolcompposi[l] = np.zeros(gmod.numbparagenrelemsing[l], dtype=bool)
if gmod.typeelem[l].startswith('lghtline'):
gmod.boolcompposi[l][0] = True
else:
gmod.boolcompposi[l][0] = True
gmod.boolcompposi[l][1] = True
# list of strings across all populations
## all (generative and derived) element parameters
gmod.numbparaelem = len(gmod.namepara.elem)
gmod.indxparaelem = np.arange(gmod.numbparaelem)
# flattened list of generative element parameters
gmod.listnameparagenfelem = []
for l in gmod.indxpopl:
for nameparagenrelem in gmod.namepara.genrelem[l]:
gmod.listnameparagenfelem.append(nameparagenrelem + 'pop%d' % l)
# concatenated list of flattened generative and derived element parameters
gmod.listnameparatotlelem = gmod.listnameparagenfelem + gmod.namepara.derielem
gmod.numbparaelem = np.empty(gmod.numbpopl, dtype=int)
for l in gmod.indxpopl:
gmod.numbparaelem[l] = len(gmod.namepara.elem[l])
numbdeflsubhplot = 2
numbdeflsingplot = numbdeflsubhplot
if gmod.numbparaelem > 0:
numbdeflsingplot += 3
gmod.convdiffanyy = True in convdiff
cntr = tdpy.cntr()
if gmod.boollens:
adishost = gdat.adisobjt(redshost)
adissour = gdat.adisobjt(redssour)
adishostsour = adissour - (1. + redshost) / (1. + redssour) * adishost
massfrombein = retr_massfrombein(gdat, adissour, adishost, adishostsour)
mdencrit = retr_mdencrit(gdat, adissour, adishost, adishostsour)
# object of parameter indices
gmod.indxpara = tdpy.gdatstrt()
# define parameter indices
if gmod.numbparaelem > 0:
# number of elements
#gmod.indxpara.numbelem = np.empty(gmod.numbpopl, dtype=int)
for l in gmod.indxpopl:
indx = cntr.incr()
setattr(gmod.indxpara, 'numbelempop%d' % l, indx)
#gmod.indxpara.numbelem[l] = indx
# hyperparameters
## mean number of elements
if gmod.typemodltran == 'pois':
#gmod.indxpara.meanelem = np.empty(gmod.numbpopl, dtype=int)
for l in gmod.indxpopl:
if gmod.maxmpara.numbelem[l] > 0:
indx = cntr.incr()
setattr(gmod.indxpara, 'meanelempop%d' % l, indx)
#gmod.indxpara.meanelem[l] = indx
## parameters parametrizing priors on element parameters
liststrgvarb = []
for l in gmod.indxpopl:
if gmod.maxmpara.numbelem[l] > 0:
for strgpdfnelemgenr, strgfeat in zip(gmod.listscalparagenrelem[l], gmod.namepara.genrelem[l]):
if strgpdfnelemgenr == 'expo' or strgpdfnelemgenr == 'dexp':
liststrgvarb += [strgfeat + 'distscal']
if strgpdfnelemgenr == 'powr':
liststrgvarb += ['slopprio' + strgfeat + 'pop%d' % l]
if strgpdfnelemgenr == 'dpow':
liststrgvarb += [strgfeat + 'distbrek']
liststrgvarb += [strgfeat + 'sloplowr']
liststrgvarb += [strgfeat + 'slopuppr']
if strgpdfnelemgenr == 'gausmean' or strgpdfnelemgenr == 'lnormean':
liststrgvarb += [strgfeat + 'distmean']
if strgpdfnelemgenr == 'gausstdv' or strgpdfnelemgenr == 'lnorstdv':
liststrgvarb += [strgfeat + 'diststdv']
if strgpdfnelemgenr == 'gausmeanstdv' or strgpdfnelemgenr == 'lnormeanstdv':
liststrgvarb += [nameparagenrelem + 'distmean', nameparagenrelem + 'diststdv']
for strgvarb in liststrgvarb:
setattr(gmod.indxpara, strgvarb, np.zeros(gmod.numbpopl, dtype=int) - 1)
for l in gmod.indxpopl:
strgpopl = 'pop%d' % l
if gmod.maxmpara.numbelem[l] > 0:
for k, nameparagenrelem in enumerate(gmod.namepara.genrelem[l]):
if gmod.listscalparagenrelem[l][k] == 'self':
continue
indx = cntr.incr()
if gmod.listscalparagenrelem[l][k] == 'dpow':
for nametemp in ['brek', 'sloplowr', 'slopuppr']:
strg = '%s' % nametemp + nameparagenrelem
setattr(gmod.indxpara, strg, indx)
setattr(gmod.indxpara, strg, indx)
else:
if gmod.listscalparagenrelem[l][k] == 'expo' or gmod.listscalparagenrelem[l][k] == 'dexp':
strghypr = 'scal'
if gmod.listscalparagenrelem[l][k] == 'powr':
strghypr = 'slop'
if gmod.listscalparagenrelem[l][k] == 'gausmean' or gmod.listscalparagenrelem[l][k] == 'gausmeanstdv' or \
gmod.listscalparagenrelem[l][k] == 'lnormean' or gmod.listscalparagenrelem[l][k] == 'lnormeanstdv':
strghypr = 'mean'
if gmod.listscalparagenrelem[l][k] == 'gausstdv' or gmod.listscalparagenrelem[l][k] == 'gausmeanstdv' or \
gmod.listscalparagenrelem[l][k] == 'lnorstdv' or gmod.listscalparagenrelem[l][k] == 'lnormeanstdv':
strghypr = 'stdv'
strg = strghypr + 'prio' + nameparagenrelem + 'pop%d' % l
setattr(gmod.indxpara, strg, indx)
# group PSF parameters
if gmod.typeevalpsfn == 'kern' or gmod.typeevalpsfn == 'full':
for m in gdat.indxevtt:
for i in gdat.indxener:
setattr(gmod.indxpara, 'sigcen%02devt%d' % (i, m), cntr.incr())
if gmod.typemodlpsfn == 'doubking' or gmod.typemodlpsfn == 'singking':
setattr(gmod.indxpara, 'gamcen%02devt%d' % (i, m), cntr.incr())
if gmod.typemodlpsfn == 'doubking':
setattr(gmod.indxpara, 'sigten%02devt%d' % (i, m), cntr.incr())
setattr(gmod.indxpara, 'gamten%02devt%d' % (i, m), cntr.incr())
setattr(gmod.indxpara, 'ffenen%02devt%d' % (i, m), cntr.incr())
gmod.indxpara.psfp = []
for strg, valu in gmod.indxpara.__dict__.items():
if strg.startswith('sigce') or strg.startswith('sigte') or strg.startswith('gamce') or strg.startswith('gamte') or strg.startswith('psffe'):
gmod.indxpara.psfp.append(valu)
gmod.indxpara.psfp = np.array(gmod.indxpara.psfp)
gmod.numbpsfptotlevtt = gdat.numbevtt * gmod.numbpsfptotl
gmod.numbpsfptotlener = gdat.numbener * gmod.numbpsfptotl
numbpsfp = gmod.numbpsfptotl * gdat.numbener * gdat.numbevtt
indxpsfpform = np.arange(numbpsfpform)
indxpsfptotl = np.arange(gmod.numbpsfptotl)
indxpsfp = np.arange(numbpsfp)
gmod.indxpara.psfp = np.sort(gmod.indxpara.psfp)
gmod.indxparapsfpinit = gmod.indxpara.psfp[0]
# group background parameters
gmod.indxpara.bacp = []
for c in gmod.indxback:
if gmod.boolspecback[c]:
indx = cntr.incr()
setattr(gmod.indxpara, 'bacpback%04d' % c, indx)
gmod.indxpara.bacp.append(indx)
else:
for i in gdat.indxener:
indx = cntr.incr()
setattr(gmod.indxpara, 'bacpback%04den%02d' % (c, i), indx)
gmod.indxpara.bacp.append(indx)
gmod.indxpara.bacp = np.array(gmod.indxpara.bacp)
# temp
#gmod.indxpara.anglsour = []
#gmod.indxpara.anglhost = []
#gmod.indxpara.angllens = []
if gmod.typeemishost != 'none':
gmod.indxpara.specsour = []
gmod.indxpara.spechost = []
if gmod.boollens:
gmod.indxpara.lgalsour = cntr.incr()
gmod.indxpara.bgalsour = cntr.incr()
gmod.indxpara.fluxsour = cntr.incr()
if gdat.numbener > 1:
gmod.indxpara.sindsour = cntr.incr()
gmod.indxpara.sizesour = cntr.incr()
gmod.indxpara.ellpsour = cntr.incr()
gmod.indxpara.anglsour = cntr.incr()
if gmod.typeemishost != 'none' or gmod.boollens:
for e in gmod.indxsersfgrd:
if gmod.typeemishost != 'none':
setattr(gmod.indxpara, 'lgalhostisf%d' % e, cntr.incr())
setattr(gmod.indxpara, 'bgalhostisf%d' % e, cntr.incr())
setattr(gmod.indxpara, 'fluxhostisf%d' % e, cntr.incr())
if gdat.numbener > 1:
setattr(gmod.indxpara, 'sindhostisf%d' % e, cntr.incr())
setattr(gmod.indxpara, 'sizehostisf%d' % e, cntr.incr())
if gmod.boollens:
setattr(gmod.indxpara, 'beinhostisf%d' % e, cntr.incr())
if gmod.typeemishost != 'none':
setattr(gmod.indxpara, 'ellphostisf%d' % e, cntr.incr())
setattr(gmod.indxpara, 'anglhostisf%d' % e, cntr.incr())
setattr(gmod.indxpara, 'serihostisf%d' % e, cntr.incr())
if gmod.boollens:
gmod.indxpara.sherextr = cntr.incr()
gmod.indxpara.sangextr = cntr.incr()
gmod.indxpara.sour = []
if gmod.boollens and gmod.typeemishost == 'none':
raise Exception('Lensing cannot be modeled without host galaxy emission.')
# collect groups of parameters
if gdat.typeexpr == 'hubb':
gmod.listnamecomplens = ['hostlght', 'hostlens', 'sour', 'extr']
for namecomplens in gmod.listnamecomplens:
setattr(gmod, 'liststrg' + namecomplens, [])
setattr(gmod.indxpara, namecomplens, [])
if gmod.boollens or gmod.typeemishost != 'none':
gmod.liststrghostlght += ['lgalhost', 'bgalhost', 'ellphost', 'anglhost']
gmod.liststrghostlens += ['lgalhost', 'bgalhost', 'ellphost', 'anglhost']
if gmod.typeemishost != 'none':
gmod.liststrghostlght += ['fluxhost', 'sizehost', 'serihost']
if gdat.numbener > 1:
gmod.liststrghostlght += ['sindhost']
if gmod.boollens:
gmod.liststrghostlens += ['beinhost']
gmod.liststrgextr += ['sherextr', 'sangextr']
gmod.liststrgsour += ['lgalsour', 'bgalsour', 'fluxsour', 'sizesour', 'ellpsour', 'anglsour']
if gdat.numbener > 1:
gmod.liststrgsour += ['sindsour']
for strg, valu in gmod.__dict__.items():
if isinstance(valu, list) or isinstance(valu, np.ndarray):
continue
if gdat.typeexpr == 'hubb':
for namecomplens in gmod.listnamecomplens:
for strgtemp in getattr(gmod, 'liststrg' + namecomplens):
if strg[12:].startswith(strgtemp):
if isinstance(valu, list):
for valutemp in valu:
gmod['indxparagenr' + namecomplens].append(valutemp)
else:
gmod['indxparagenr' + namecomplens].append(valu)
# remove indxpara. from strg
strg = strg[12:]
if strg.startswith('fluxsour') or strg.startswith('sindsour'):
gmod.indxpara.specsour.append(valu)
if strg.startswith('fluxhost') or strg.startswith('sindhost'):
gmod.indxpara.spechost.append(valu)
if gmod.boollens or gmod.boolhost:
gmod.indxpara.host = gmod.indxparahostlght + gmod.indxparahostlens
gmod.indxpara.lens = gmod.indxpara.host + gmod.indxpara.sour + gmod.indxpara.extr
## number of model spectral parameters for each population
#numbspep = np.empty(gmod.numbpopl, dtype=int)
#liststrgspep = [[] for l in range(gmod.numbpopl)]
#for l in gmod.indxpopl:
# if gdat.numbener > 1:
# liststrgspep[l] += ['sind']
# if gmod.spectype[l] == 'expc':
# liststrgspep[l] += ['expc']
# if gmod.spectype[l] == 'curv':
# liststrgspep[l] = ['curv']
# numbspep[l] = len(liststrgspep[l])
def setp_paragenrscalbase(gdat, strgmodl='fitt'):
'''
Setup labels and scales for base parameters
'''
print('setp_paragenrscalbase(): Building the %s model base paremeter names and scales...' % strgmodl)
gmod = getattr(gdat, strgmodl)
listlablback = []
listlablback = []
for nameback in gmod.listnameback:
if nameback == 'isot':
listlablback.append('Isotropic')
listlablback.append(r'$\mathcal{I}$')
if nameback == 'fdfm':
listlablback.append('FDM')
listlablback.append(r'$\mathcal{D}$')
if nameback == 'dark':
listlablback.append('NFW')
listlablback.append(r'$\mathcal{D}_{dark}$')
if nameback == 'part':
listlablback.append('Particle Back.')
listlablback.append(r'$\mathcal{I}_p$')
# background templates
listlablsbrt = deepcopy(listlablback)
numblablsbrt = 0
for l in gmod.indxpopl:
if gmod.boolelemsbrt[l]:
listlablsbrt.append(gmod.lablpopl[l])
listlablsbrt.append(gmod.lablpopl[l] + ' subt')
numblablsbrt += 2
if gmod.boollens:
listlablsbrt.append('Source')
numblablsbrt += 1
if gmod.typeemishost != 'none':
for e in gmod.indxsersfgrd:
listlablsbrt.append('Host %d' % e)
numblablsbrt += 1
if gmod.numbpopl > 0:
if 'clus' in gmod.typeelem or 'clusvari' in gmod.typeelem:
listlablsbrt.append('Uniform')
numblablsbrt += 1
listlablsbrtspec = ['Data']
listlablsbrtspec += deepcopy(listlablsbrt)
if len(listlablsbrt) > 1:
listlablsbrtspec.append('Total Model')
numblablsbrtspec = len(listlablsbrtspec)
# number of generative parameters per element, depends on population
#numbparaelem = gmod.numbparagenrelem + numbparaelemderi
# maximum total number of parameters
#numbparagenrfull = gmod.numbparagenrbase + gmod.numbparaelem
#numbparaelemkind = gmod.numbparagenrbase
#for l in gmod.indxpopl:
# numbparaelemkind += gmod.numbparagenrelemsing[l]
#nameparagenrbase
#gmod.namepara.genrelem
#listnameparaderifixd
#listnameparaderielem
#gmod.namepara.genrelemextd = gmod.namepara.genrelem * maxm.numbelem
#listnameparaderielemextd = gmod.namepara.genrelem * maxm.numbelem
gmod.listindxparakindscal = {}
for scaltype in gdat.listscaltype:
gmod.listindxparakindscal[scaltype] = np.where(scaltype == gmod.listscalparakind)[0]
#
## stack
## gmod.listnameparastck
#gmod.listnameparastck = np.zeros(gmod.maxmnumbpara, dtype=object)
#gmod.listscalparastck = np.zeros(gmod.maxmnumbpara, dtype=object)
#
#gmod.listnameparastck[gmod.indxparagenrbase] = gmod.nameparagenrbase
#gmod.listscalparastck[gmod.indxparagenrbase] = gmod.listscalparagenrbase
#for k in range(gmod.numbparaelem):
# for l in gmod.indxpopl:
# if k >= gmod.numbparagenrelemcuml[l]:
# indxpopltemp = l
# indxelemtemp = (k - gmod.numbparagenrelemcuml[indxpopltemp]) // gmod.numbparagenrelemsing[indxpopltemp]
# gmod.indxparagenrelemtemp = (k - gmod.numbparagenrelemcuml[indxpopltemp]) % gmod.numbparagenrelemsing[indxpopltemp]
# break
# gmod.listnameparastck[gmod.numbparagenrbase+k] = '%spop%d%04d' % (gmod.namepara.genrelem[indxpopltemp][gmod.indxparagenrelemtemp], indxpopltemp, indxelemtemp)
# gmod.listscalparastck[gmod.numbparagenrbase+k] = gmod.listscalparagenrelem[indxpopltemp][gmod.indxparagenrelemtemp]
#
#
#if np.where(gmod.listscalpara == 0)[0].size > 0:
# print('gmod.listscalpara[gmod.indxparagenrbase]')
# print(gmod.listscalpara[gmod.indxparagenrbase])
# raise Exception('')
#
## labels and scales for variables
if gmod.boollens:
setattr(gmod.lablrootpara, 'masssubhintg', r'$M_{\rm{sub}}$')
setattr(gmod.lablrootpara, 'masssubhdelt', r'$\rho_{\rm{sub}}$')
setattr(gmod.lablrootpara, 'masssubhintgbein', r'$M_{\rm{sub,E}}$')
setattr(gmod.lablrootpara, 'masssubhdeltbein', r'$\rho_{\rm{sub,E}}$')
setattr(gmod.lablrootpara, 'masssubhintgunit', '$10^9 M_{\odot}$')
setattr(gmod.lablrootpara, 'masssubhdeltunit', '$M_{\odot}$/kpc')
setattr(gmod.lablrootpara, 'masssubhintgbeinunit', '$10^9 M_{\odot}$')
setattr(gmod.lablrootpara, 'masssubhdeltbeinunit', '$M_{\odot}$/kpc')
setattr(gmod.lablrootpara, 'fracsubhintg', r'f_{\rm{sub}}')
setattr(gmod.lablrootpara, 'fracsubhdelt', r'f_{\rho,\rm{sub}}')
setattr(gmod.lablrootpara, 'fracsubhintgbein', r'$f_{\rm{sub,E}}$')
setattr(gmod.lablrootpara, 'fracsubhdeltbein', r'$f_{\rho,\rm{sub,E}}$')
for e in gmod.indxsersfgrd:
setattr(gmod.lablrootpara, 'masshostisf%dbein' % e, r'$M_{\rm{hst,%d,C}}$' % e)
setattr(gmod.lablrootpara, 'masshostisf%dintg' % e, r'$M_{\rm{hst,%d<}}$' % e)
setattr(gmod.lablrootpara, 'masshostisf%ddelt' % e, r'$M_{\rm{hst,%d}}$' % e)
setattr(gmod.lablrootpara, 'masshostisf%dintgbein' % e, r'$M_{\rm{hst,E,%d<}}$' % e)
setattr(gmod.lablrootpara, 'masshostisf%ddeltbein' % e, r'$M_{\rm{hst,E,%d}}$' % e)
for namevarb in ['fracsubh', 'masssubh']:
for strgcalcmasssubh in gdat.liststrgcalcmasssubh:
for nameeval in ['', 'bein']:
setattr(gdat, 'scal' + namevarb + strgcalcmasssubh + nameeval, 'logt')
for e in gmod.indxsersfgrd:
setattr(gdat, 'scalmasshostisf%d' % e + 'bein', 'logt')
for strgcalcmasssubh in gdat.liststrgcalcmasssubh:
for nameeval in ['', 'bein']:
setattr(gdat, 'scalmasshostisf%d' % e + strgcalcmasssubh + nameeval, 'logt')
# scalar variable setup
gdat.lablhistcntplowrdfncsubten00evt0 = 'N_{pix,l}'
gdat.lablhistcntphigrdfncsubten00evt0 = 'N_{pix,h}'
gdat.lablhistcntplowrdfncen00evt0 = 'N_{pix,l}'
gdat.lablhistcntphigrdfncen00evt0 = 'N_{pix,h}'
gdat.lablbooldfncsubt = 'H'
gdat.lablpriofactdoff = r'$\alpha_{p}$'
gmod.scalpriofactdoff = 'self'
gdat.minmreds = 0.
gdat.maxmreds = 1.5
gdat.minmmagt = 19.
gdat.maxmmagt = 28.
gmod.scalpara.numbelem = 'logt'
gmod.scalpara.lliktotl = 'logt'
gdat.lablener = 'E'
#gdat.lablenertotl = '$%s$ [%s]' % (gdat.lablener, gdat.strgenerunit)
# width of the Gaussian clusters
gdat.lablgwdt = r'\sigma_G'
gdat.lablgang = r'\theta'
gdat.lablaang = r'\phi'
gdat.labllgalunit = gdat.lablgangunit
gdat.lablbgalunit = gdat.lablgangunit
gdat.lablanglfromhost = r'\theta_{\rm{0,hst}}'
gdat.lablanglfromhostunit = gdat.lablgangunit
gdat.labldefs = r'\alpha_s'
gdat.lablflux = 'f'
gdat.lablnobj = 'p'
gdat.lablelin = r'\mathcal{E}'
gdat.lablsbrt = r'\Sigma'
gdat.labldeflprof = r'\alpha_a'
gdat.labldeflprofunit = u'$^{\prime\prime}$'
gdat.strgenerkevv = 'keV'
gdat.strgenergevv = 'GeV'
gdat.strgenerergs = 'erg'
gdat.strgenerimum = '\mu m^{-1}'
gdat.labldefsunit = u'$^{\prime\prime}$'
gdat.lablprat = 'cm$^{-2}$ s$^{-1}$'
### labels for derived fixed dimensional parameters
if gdat.boolbinsener:
for i in gdat.indxener:
setattr(gmod.lablrootpara, 'fracsdenmeandarkdfncsubten%02d' % i, 'f_{D/ST,%d}' % i)
else:
gmod.lablrootpara.fracsdenmeandarkdfncsubt = 'f_{D/ST}'
setattr(gmod.lablrootpara, 'fracsdenmeandarkdfncsubt', 'f_{D/ST}')
### labels for background units
if gdat.typeexpr == 'ferm':
for nameenerscaltype in ['en00', 'en01', 'en02', 'en03']:
for labltemptemp in ['flux', 'sbrt']:
# define the label
if nameenerscaltype == 'en00':
strgenerscal = '%s' % labltemp
if nameenerscaltype == 'en01':
strgenerscal = 'E%s' % labltemp
if nameenerscaltype == 'en02':
strgenerscal = 'E^2%s' % labltemp
if nameenerscaltype == 'en03':
strgenerscal = '%s' % labltemp
labl = '%s' % strgenerscal
for nameenerunit in ['gevv', 'ergs', 'kevv', 'imum']:
strgenerunit = getattr(gdat, 'strgener' + nameenerunit)
if nameenerscaltype == 'en00':
strgenerscalunit = '%s$^{-1}$' % strgenerunit
if nameenerscaltype == 'en01':
strgenerscalunit = ''
if nameenerscaltype == 'en02':
strgenerscalunit = '%s' % strgenerunit
if nameenerscaltype == 'en03':
strgenerscalunit = '%s' % strgenerunit
# define the label unit
for namesoldunit in ['ster', 'degr']:
if labltemptemp == 'flux':
lablunit = '%s %s' % (strgenerscalunit, gdat.lablprat)
setattr(gmod.lablunitpara, 'lablflux' + nameenerscaltype + nameenerunit + 'unit', lablunit)
else:
if namesoldunit == 'ster':
lablunit = '%s %s sr$^{-1}$' % (strgenerscalunit, gdat.lablprat)
if namesoldunit == 'degr':
lablunit = '%s %s deg$^{-2}$' % (strgenerscalunit, gdat.lablprat)
setattr(gmod.lablunitpara, 'sbrt' + nameenerscaltype + nameenerunit + namesoldunit + 'unit', lablunit)
if gdat.boolbinsener:
gdat.lablfluxunit = getattr(gmod.lablunitpara, 'fluxen00' + gdat.nameenerunit + 'unit')
gdat.lablsbrtunit = getattr(gmod.lablunitpara, 'sbrten00' + gdat.nameenerunit + 'sterunit')
gdat.lablexpo = r'$\epsilon$'
gdat.lablexpounit = 'cm$^2$ s'
gdat.lablprvl = '$p$'
gdat.lablreds = 'z'
gdat.lablmagt = 'm_R'
gdat.lablper0 = 'P_0'
gmod.scalper0plot = 'logt'
gdat.labldglc = 'd_{gc}'
gmod.scaldglcplot = 'logt'
gdat.labldlos = 'd_{los}'
gmod.scaldlosplot = 'logt'
if gdat.typeexpr == 'ferm':
gdat.labldlosunit = 'kpc'
gdat.labllumi = r'L_{\gamma}'
if gdat.typeexpr == 'chan':
gdat.labldlosunit = 'Mpc'
gdat.labllumi = r'L_{X}'
gdat.labllum0 = r'L_{X, 0}'
gdat.lablgeff = r'\eta_{\gamma}'
gmod.scalgeffplot = 'logt'
gmod.scallumiplot = 'logt'
gdat.labllumiunit = 'erg s$^{-1}$'
gdat.labllum0unit = 'erg s$^{-1}$'
gdat.lablthet = r'\theta_{gc}'
gmod.scalthetplot = 'self'
gdat.lablphii = r'\phi_{gc}'
gmod.scalphiiplot = 'self'
setattr(gmod.lablrootpara, 'magf', 'B')
setattr(gdat, 'scalmagfplot', 'logt')
setattr(gmod.lablrootpara, 'per1', 'P_1')
if gdat.typedata == 'inpt':
gdat.minmpara.per0 = 1e-3
gdat.maxmpara.per0 = 1e1
gdat.minmpara.per1 = 1e-20
gdat.maxmpara.per1 = 1e-10
gdat.minmpara.per1 = 1e-20
gdat.maxmpara.per1 = 1e-10
gdat.minmpara.flux0400 = 1e-1
gdat.maxmpara.flux0400 = 1e4
setattr(gdat, 'scalper1plot', 'logt')
setattr(gmod.lablrootpara, 'flux0400', 'S_{400}')
setattr(gdat, 'scalflux0400plot', 'logt')
for q in gdat.indxrefr:
setattr(gmod.lablrootpara, 'aerr' + gdat.listnamerefr[q], '\Delta_{%d}' % q)
gdat.lablsigm = '\sigma_l'
gdat.lablgamm = '\gamma_l'
gdat.lablbcom = '\eta'
gdat.lablinfopost = 'D_{KL}'
gdat.lablinfopostunit = 'nat'
gdat.lablinfoprio = 'D_{KL,pr}'
gdat.lablinfopriounit = 'nat'
gdat.labllevipost = '\ln P(D)'
gdat.labllevipostunit = 'nat'
gdat.lablleviprio = '\ln P_{pr}(D)'
gdat.labllevipriounit = 'nat'
gdat.lablsind = 's'
if gdat.boolbinsener:
for i in gdat.indxenerinde:
setattr(gmod.lablrootpara, 'sindcolr%04d' % i, 's_%d' % i)
gdat.lablexpcunit = gdat.strgenerunit
gdat.labllliktotl = r'\ln P(D|M)'
gdat.labllpripena = r'\ln P(N)'
gdat.lablasca = r'\theta_s'
gdat.lablascaunit = gdat.lablgangunit
gdat.lablacut = r'\theta_c'
gdat.lablacutunit = gdat.lablgangunit
gdat.lablmcut = r'M_{c,n}'
gdat.lablmcutunit = r'$M_{\odot}$'
gdat.lablmcutcorr = r'\bar{M}_{c,n}'
gdat.lablmcutcorrunit = r'$M_{\odot}$'
gdat.lablspec = gdat.lablflux
gdat.lablspecunit = gdat.lablfluxunit
gdat.lablspecplot = gdat.lablflux
gdat.lablspecplotunit = gdat.lablfluxunit
gdat.lablcnts = 'C'
gdat.labldeltllik = r'\Delta_n \ln P(D|M)'
gdat.labldiss = r'\theta_{sa}'
gdat.labldissunit = gdat.lablgangunit
gdat.lablrele = r'\langle|\vec{\alpha}_n \cdot \vec{\nabla} k_l| \rangle'
gdat.lablrelc = r'\langle\vec{\alpha}_n \cdot \vec{\nabla} k_l \rangle'
gdat.lablreld = r'\langle|\vec{\alpha}_n \cdot \vec{\nabla} k_d| \rangle'
gdat.lablreln = r'\langle \Delta \theta_{pix} |\hat{\alpha}_n \cdot \vec{\nabla} k_l| / \alpha_{s,n} \rangle'
gdat.lablrelm = r'\langle |\vec{\nabla}_{\hat{\alpha}} k_l| / \alpha_{s,n} \rangle'
gdat.lablrelk = r'\langle |\vec{\nabla}_{\hat{\alpha}} k_l| / \alpha_{s,n} \rangle'
gdat.lablrelf = r'\langle |\vec{\nabla}_{\hat{\alpha}} k_l| / \alpha_{s,n} \rangle / k_m'
for q in gdat.indxrefr:
for l in gmod.indxpopl:
setp_varb(gdat, 'fdispop%dpop%d' % (l, q), minm=0., maxm=1., lablroot='$F_{%d%d}$' % (l, q))
setp_varb(gdat, 'cmplpop%dpop%d' % (l, q), minm=0., maxm=1., lablroot='$C_{%d%d}$' % (l, q))
if gdat.typeexpr == 'chan':
if gdat.anlytype == 'spec':
gdat.minmspec = 1e-2
gdat.maxmspec = 1e1
else:
gdat.minmspec = 1e-11
gdat.maxmspec = 1e-7
else:
gdat.minmspec = 1e-11
gdat.maxmspec = 1e-7
if gdat.typeexpr == 'ferm':
gdat.minmlumi = 1e32
gdat.maxmlumi = 1e36
elif gdat.typeexpr == 'chan':
if gdat.typedata == 'inpt':
gdat.minmlum0 = 1e42
gdat.maxmlum0 = 1e46
gdat.minmlumi = 1e41
gdat.maxmlumi = 1e45
try:
gdat.minmdlos
except:
if gdat.typeexpr == 'chan':
gdat.minmdlos = 1e7
gdat.maxmdlos = 1e9
else:
gdat.minmdlos = 6e3
gdat.maxmdlos = 1.1e4
if gdat.typeexpr == 'ferm':
gdat.minmcnts = 1e1
gdat.maxmcnts = 1e5
if gdat.typeexpr == 'chan':
if gdat.numbpixlfull == 1:
gdat.minmcnts = 1e4
gdat.maxmcnts = 1e8
else:
gdat.minmcnts = 1.
gdat.maxmcnts = 1e3
if gdat.typeexpr == 'hubb':
gdat.minmcnts = 1.
gdat.maxmcnts = 1e3
if gdat.typeexpr == 'fire':
gdat.minmcnts = 1.
gdat.maxmcnts = 1e3
gdat.minmspecplot = gdat.minmspec
gdat.maxmspecplot = gdat.maxmspec
gdat.minmdeltllik = 1.
gdat.maxmdeltllik = 1e3
gdat.minmdiss = 0.
gdat.maxmdiss = gdat.maxmgangdata * np.sqrt(2.)
gdat.minmrele = 1e-3
gdat.maxmrele = 1e1
gdat.minmreln = 1e-3
gdat.maxmreln = 1.
gdat.minmrelk = 1e-3
gdat.maxmrelk = 1.
gdat.minmrelf = 1e-5
gdat.maxmrelf = 1e-1
gdat.minmrelm = 1e-3
gdat.maxmrelm = 1e1
gdat.minmreld = 1e-3
gdat.maxmreld = 1e1
gdat.minmrelc = 1e-3
gdat.maxmrelc = 1.
gdat.minmmcut = 3e7
gdat.maxmmcut = 2e9
gdat.minmmcutcorr = gdat.minmmcut
gdat.maxmmcutcorr = gdat.maxmmcut
if gdat.boolbinsspat:
gdat.minmbein = 0.
gdat.maxmbein = 1. / gdat.anglfact
# scalar variables
if gdat.boolbinsspat:
gdat.minmdeflprof = 1e-3 / gdat.anglfact
gdat.maxmdeflprof = 0.1 / gdat.anglfact
#gdat.minmfracsubh = 0.
#gdat.maxmfracsubh = 0.3
#gmod.scalfracsubh = 'self'
#gdat.minmmasshost = 1e10
#gdat.maxmmasshost = 1e13
#gmod.scalmasshost = 'self'
#
#gdat.minmmasssubh = 1e8
#gdat.maxmmasssubh = 1e10
#gmod.scalmasssubh = 'self'
# collect groups of parameter indices into lists
## labels and scales for base parameters
gmod.nameparagenrbase = []
for name, k in gmod.indxpara.__dict__.items():
if not np.isscalar(k):
print('name')
print(name)
print('temp: no nonscalar should be here!')
continue
gmod.nameparagenrbase.append(name)
gmod.numbparagenrbase = len(gmod.nameparagenrbase)
gmod.indxparagenrbase = np.arange(gmod.numbparagenrbase)
gmod.indxparagenrbasestdv = gmod.indxparagenrbase[gmod.numbpopl:]
## list of scalar variable names
gmod.namepara.scal = list(gmod.nameparagenrbase)
gmod.namepara.scal += ['lliktotl']
# derived parameters
print('Determining the list of derived, fixed-dimensional parameter names...')
gmod.namepara.genrelemextd = [[[] for g in gmod.indxparagenrelemsing[l]] for l in gmod.indxpopl]
gmod.namepara.derielemextd = [[[] for k in gmod.indxparaderielemsing[l]] for l in gmod.indxpopl]
gmod.namepara.genrelemflat = []
gmod.namepara.derielemflat = []
gmod.namepara.genrelemextdflat = []
gmod.namepara.derielemextdflat = []
for l in gmod.indxpopl:
for g in gmod.indxparagenrelemsing[l]:
gmod.namepara.genrelemflat.append(gmod.namepara.genrelem[l][g] + 'pop%d' % l)
for d in range(gmod.maxmpara.numbelem[l]):
gmod.namepara.genrelemextd[l][g].append(gmod.namepara.genrelem[l][g] + 'pop%d' % l + '%04d' % d)
gmod.namepara.genrelemextdflat.append(gmod.namepara.genrelemextd[l][g][d])
for k in gmod.indxparaderielemsing[l]:
gmod.namepara.derielemflat.append(gmod.namepara.derielem[l][k] + 'pop%d' % l)
for d in range(gmod.maxmpara.numbelem[l]):
gmod.namepara.derielemextd[l][k].append(gmod.namepara.derielem[l][k] + 'pop%d' % l + '%04d' % d)
gmod.namepara.derielemextdflat.append(gmod.namepara.derielemextd[l][k][d])
# list of element parameter names (derived and generative), counting label-degenerate element parameters only once
gmod.namepara.elem = [[] for l in gmod.indxpopl]
for l in gmod.indxpopl:
gmod.namepara.elem[l].extend(gmod.namepara.genrelem[l])
gmod.namepara.elem[l].extend(gmod.namepara.derielem[l])
gmod.namepara.elemflat = []
for l in gmod.indxpopl:
gmod.namepara.elemflat.extend(gmod.namepara.elem[l])
gmod.namepara.genrelemdefa = deepcopy(gmod.namepara.elemflat)
if gmod.boolelemlghtanyy:
for strgfeat in ['sind', 'curv', 'expc'] + ['sindcolr%04d' % i for i in gdat.indxenerinde]:
if not strgfeat in gmod.namepara.genrelemdefa:
gmod.namepara.genrelemdefa.append(strgfeat)
# list of flattened generative element parameter names, counting label-degenerate element parameters only once
gmod.namepara.genrelemkind = gmod.namepara.genrelemflat + gmod.namepara.derielemflat
gmod.numbparagenrelemkind = len(gmod.namepara.genrelemkind)
#gmod.inxparagenrscalelemkind = np.arange(gmod.numbparagenrelemkind)
gmod.inxparagenrscalelemkind = tdpy.gdatstrt()
gmod.numbparagenrelemextdflat = len(gmod.namepara.genrelemextdflat)
gmod.indxparagenrelemextdflat = np.arange(gmod.numbparagenrelemextdflat)
# list of parameter names (derived and generative), counting label-degenerate element parameters only once, element lists flattened
gmod.namepara.kind = gmod.nameparagenrbase + gmod.listnameparaderitotl + gmod.namepara.genrelemflat + gmod.namepara.derielemflat
gmod.numbparakind = len(gmod.namepara.kind)
gmod.indxparakind = np.arange(gmod.numbparakind)
# list of generative parameter names, separately including all label-degenerate element parameters, element lists flattened
gmod.namepara.genrscalfull = gmod.nameparagenrbase + gmod.namepara.genrelemextdflat
gmod.namepara.genrscalfull = np.array(gmod.namepara.genrscalfull)
gmod.numbparagenrfull = len(gmod.namepara.genrscalfull)
gmod.indxparagenrfull = np.arange(gmod.numbparagenrfull)
# list of generative parameter names, counting label-degenerate element parameters only once, element lists flattened
gmod.listnameparagenrscal = gmod.nameparagenrbase + gmod.namepara.genrelemflat
gmod.numbparagenr = len(gmod.listnameparagenrscal)
gmod.indxparagenr = np.arange(gmod.numbparagenr)
# list of parameter names (derived and generative), element lists flattened
gmod.listnameparatotl = gmod.nameparagenrbase + gmod.listnameparaderitotl + \
gmod.namepara.genrelemextdflat + gmod.namepara.derielemextdflat
gmod.nameparagenrbase = np.array(gmod.nameparagenrbase)
for e in gmod.indxsersfgrd:
gmod.namepara.scal += ['masshost' + strgsersfgrd + 'bein']
for strgcalcmasssubh in gdat.liststrgcalcmasssubh:
gmod.namepara.scal += ['masshost' + strgsersfgrd + strgcalcmasssubh + 'bein']
if gmod.numbparaelem > 0:
if gmod.boollenssubh:
for strgcalcmasssubh in gdat.liststrgcalcmasssubh:
gmod.namepara.scal += ['masssubh' + strgcalcmasssubh + 'bein', 'fracsubh' + strgcalcmasssubh + 'bein']
if gmod.numbparaelem > 0:
gmod.namepara.scal += ['lpripena']
if False and gmod.boolelemsbrtdfncanyy:
for strgbins in ['lowr', 'higr']:
gmod.namepara.scal += ['histcntp%sdfncen00evt0' % strgbins]
gmod.namepara.scal += ['histcntp%sdfncsubten00evt0' % strgbins]
for i in gdat.indxener:
gmod.namepara.scal += ['fracsdenmeandarkdfncsubten%02d' % i]
gmod.namepara.scal += ['booldfncsubt']
if gmod.numbparaelem > 0:
for q in gdat.indxrefr:
if gdat.boolasscrefr[q]:
for l in gmod.indxpopl:
gmod.namepara.scal += ['cmplpop%dpop%d' % (l, q)]
gmod.namepara.scal += ['fdispop%dpop%d' % (q, l)]
gmod.numbvarbscal = len(gmod.namepara.scal)
gmod.indxvarbscal = np.arange(gmod.numbvarbscal)
# determine total label
gmod.listnameparaglob = gmod.namepara.kind + gmod.namepara.genrelemextdflat + gmod.namepara.derielemextdflat
gmod.listnameparaglob += ['cntpmodl']
for l in gmod.indxpopl:
for g in gmod.indxparagenrelemsing[l]:
if not gmod.namepara.genrelem[l][g] in gmod.listnameparaglob:
gmod.listnameparaglob.append(gmod.namepara.genrelem[l][g])
gmod.listnameparaglob.append(gmod.namepara.derielem[l][g])
for name in gmod.listnameparaglob:
lablroot = getattr(gmod.lablrootpara, name)
lablunit = getattr(gmod.lablunitpara, name)
labltotl = tdpy.retr_labltotlsing(lablroot, lablunit)
setattr(gmod.labltotlpara, name, labltotl)
# define fact
for l in gmod.indxpopl:
for k in gmod.indxparakind:
name = gmod.namepara.kind[k]
scal = getattr(gmod.scalpara, name)
if scal == 'self' or scal == 'logt':
minm = getattr(gmod.minmpara, name)
maxm = getattr(gmod.maxmpara, name)
if scal == 'self':
fact = maxm - minm
if scal == 'logt':
fact = np.log(maxm / minm)
if fact == 0:
print('name')
print(name)
raise Exception('')
setattr(gmod.factpara, name, fact)
if gmod.numbparaelem > 0:
gmod.indxparagenrfulleleminit = gmod.indxparagenrbase[-1] + 1
else:
gmod.indxparagenrfulleleminit = -1
## arrays of parameter features (e.g., minm, maxm, labl, scal, etc.)
for featpara in gdat.listfeatparalist:
gmodfeat = getattr(gmod, featpara + 'para')
### elements
#for strgtypepara in gdat.liststrgtypepara:
# listname = getattr(gmod.namepara, strgtypepara + 'elem')
# listfeat = [[] for l in gmod.indxpopl]
# listfeatflat = []
# for l in gmod.indxpopl:
#
# numb = getattr(gmod, 'numbpara' + strgtypepara + 'elemsing')[l]
# listfeat[l] = [[] for k in range(numb)]
# for k in range(numb):
# scal = getattr(gmod.scalpara, listname[l][k])
# if featpara == 'fact' and not (scal == 'self' or scal == 'logt'):
# continue
# if featpara == 'mean' and (scal != 'gaus' and scal != 'lnor'):
# continue
# if featpara == 'stdv' and (scal != 'gaus' and scal != 'lnor'):
# continue
#
# if strgtypepara == 'genr':
# strgextn = 'pop%d' % l
# else:
# strgextn = ''
# print('featpara')
# print(featpara)
# print('listname')
# print(listname)
# listfeat[l][k] = getattr(gmodfeat, listname[l][k] + strgextn)
# listfeatflat.append(listfeat[l][k])
# setattr(gmodfeat, strgtypepara + 'elem', listfeat)
# setattr(gmodfeat, strgtypepara + 'elemflat', listfeatflat)
### groups of parameters inside the parameter vector
### 'base': all fixed-dimensional generative parameters
### 'full': all generative parameters
for strggroppara in ['base', 'full']:
indx = getattr(gmod, 'indxparagenr' + strggroppara)
feat = [0. for k in indx]
for attr, valu in gmod.indxpara.__dict__.items():
if not np.isscalar(valu):
continue
scal = getattr(gmod.scalpara, attr)
if not (scal == 'self' or scal == 'logt') and featpara == 'fact':
continue
if scal != 'gaus' and (featpara == 'mean' or featpara == 'stdv'):
print('Mean or Std for non-Gaussian')
continue
if featpara == 'name':
feat[valu] = attr
else:
feat[valu] = getattr(gmodfeat, attr)
feat = np.array(feat)
setattr(gmodfeat, 'genr' + strggroppara, feat)
#print('gmod.minmpara')
#for attr, varb in gmod.minmpara.__dict__.items():
# print(attr, varb)
#print('gmod.maxmpara')
#for attr, varb in gmod.maxmpara.__dict__.items():
# print(attr, varb)
#print('gmod.scalpara')
#for attr, varb in gmod.scalpara.__dict__.items():
# print(attr, varb)
#raise Exception('')
## population groups
### number of elements
for strgvarb in ['numbelem', 'meanelem']:
listindxpara = []
if strgmodl == 'true':
listpara = []
for strg, valu in gmod.indxpara.__dict__.items():
if strg.startswith(strgvarb + 'p'):
listindxpara.append(valu)
if strgmodl == 'true':
listpara.append(getattr(gmod.this, strg))
listindxpara = np.array(listindxpara)
setattr(gmod.indxpara, strgvarb, listindxpara)
if strgmodl == 'true':
listpara = np.array(listpara)
setattr(gmod, strgvarb, listpara)
### parameters of priors for element parameters
gmod.indxpara.prioelem = []
for strg, valu in gmod.indxpara.__dict__.items():
if strg == 'dist' and np.isscalar(valu):
gmod.indxpara.prioelem.append(valu)
gmod.indxpara.prioelem = np.array(gmod.indxpara.prioelem)
### hyperparameters
if gmod.typemodltran == 'pois':
gmod.indxpara.hypr = np.array(list(gmod.indxpara.prioelem) + list(gmod.indxpara.meanelem))
else:
gmod.indxpara.hypr = gmod.indxpara.prioelem
## generative base parameter indices for each scaling
gmod.listindxparagenrbasescal = dict()
for scaltype in gdat.listscaltype:
gmod.listindxparagenrbasescal[scaltype] = np.where(np.array(gmod.scalpara.genrbase) == scaltype)[0]
if gdat.booldiagmode:
if np.where(gmod.scalpara.genrfull == 0)[0].size > 0:
raise Exception('')
def plot_lens(gdat):
if gmod.boolelemdeflsubh:
xdat = gdat.binspara.angl[1:] * gdat.anglfact
lablxdat = gdat.labltotlpara.gang
listdeflscal = np.array([4e-2, 4e-2, 4e-2]) / gdat.anglfact
listanglscal = np.array([0.05, 0.1, 0.05]) / gdat.anglfact
listanglcutf = np.array([1., 1., 10.]) / gdat.anglfact
listasym = [False, False, False]
listydat = []
for deflscal, anglscal, anglcutf, asym in zip(listdeflscal, listanglscal, listanglcutf, listasym):
listydat.append(retr_deflcutf(gdat.binspara.angl[1:], deflscal, anglscal, anglcutf, asym=asym) * gdat.anglfact)
for scalxdat in ['self', 'logt']:
path = gdat.pathinitintr + 'deflcutf' + scalxdat + '.pdf'
tdpy.plot_gene(path, xdat, listydat, scalxdat=scalxdat, scalydat='logt', lablxdat=lablxdat, \
lablydat=r'$\alpha_n$ [$^{\prime\prime}$]', limtydat=[1e-3, 1.5e-2], limtxdat=[None, 2.])
# pixel-convoltuion of the Sersic profile
# temp -- y axis labels are wrong, should be per solid angle
xdat = gdat.binspara.lgalsers * gdat.anglfact
for n in range(gdat.numbindxsers + 1):
for k in range(gdat.numbhalfsers + 1):
if k != 5:
continue
path = gdat.pathinitintr + 'sersprofconv%04d%04d.pdf' % (n, k)
tdpy.plot_gene(path, xdat, gdat.sersprof[:, n, k], scalydat='logt', lablxdat=lablxdat, lablydat=gdat.lablfluxtotl, limtydat=[1e6, 1e12])
#path = gdat.pathinitintr + 'sersprofcntr%04d%04d.pdf' % (n, k)
#tdpy.plot_gene(path, xdat, gdat.sersprofcntr[:, n, k], scalydat='logt', lablxdat=lablxdat, lablydat=gdat.lablfluxtotl, limtydat=[1e6, 1e12])
path = gdat.pathinitintr + 'sersprofdiff%04d%04d.pdf' % (n, k)
tdpy.plot_gene(path, xdat, abs(gdat.sersprof[:, n, k] - gdat.sersprofcntr[:, n, k]) / gdat.sersprofcntr[:, n, k], \
scalydat='logt', lablxdat=lablxdat, lablydat=gdat.lablfluxtotl, limtydat=[1e-6, 1.])
path = gdat.pathinitintr + 'sersprofdiff%04d%04d.pdf' % (n, k)
tdpy.plot_gene(path, xdat, abs(gdat.sersprof[:, n, k] - gdat.sersprofcntr[:, n, k]) / gdat.sersprofcntr[:, n, k], scalxdat='logt', \
scalydat='logt', lablxdat=lablxdat, lablydat=gdat.lablfluxtotl, limtydat=[1e-6, 1.])
xdat = gdat.binspara.angl * gdat.anglfact
listspec = np.array([1e-19, 1e-18, 1e-18, 1e-18]) / gdat.anglfact
listsize = np.array([0.3, 1., 1., 1.]) / gdat.anglfact
listindx = np.array([4., 2., 4., 10.])
listydat = []
listlabl = []
for spec, size, indx in zip(listspec, listsize, listindx):
listydat.append(spec * retr_sbrtsersnorm(gdat.binspara.angl, size, indxsers=indx))
listlabl.append('$R_e = %.3g ^{\prime\prime}, n = %.2g$' % (size * gdat.anglfact, indx))
path = gdat.pathinitintr + 'sersprof.pdf'
tdpy.plot_gene(path, xdat, listydat, scalxdat='logt', scalydat='logt', lablxdat=lablxdat, lablydat=gdat.lablfluxtotl, \
listlegd=listlegd, listhlin=1e-7, limtydat=[1e-8, 1e0])
minmredshost = 0.01
maxmredshost = 0.4
minmredssour = 0.01
maxmredssour = 2.
numbreds = 200
retr_axis(gdat, 'redshost')
retr_axis(gdat, 'redssour')
gdat.meanpara.adishost = np.empty(numbreds)
for k in range(numbreds):
gdat.meanpara.adishost[k] = gdat.adisobjt(gdat.meanpara.redshost[k])
asca = 0.1 / gdat.anglfact
acut = 1. / gdat.anglfact
minmmass = np.zeros((numbreds + 1, numbreds + 1))
maxmmass = np.zeros((numbreds + 1, numbreds + 1))
for k, redshost in enumerate(gdat.binspara.redshost):
for n, redssour in enumerate(gdat.binspara.redssour):
if redssour > redshost:
adishost = gdat.adisobjt(redshost)
adissour = gdat.adisobjt(redssour)
adishostsour = adissour - (1. + redshost) / (1. + redssour) * adishost
factmcutfromdefs = retr_factmcutfromdefs(gdat, adissour, adishost, adishostsour, asca, acut)
minmmass[n, k] = np.log10(factmcutfromdefs * gdat.minmdefs)
maxmmass[n, k] = np.log10(factmcutfromdefs * gdat.maxmdefs)
#valulevl = np.linspace(7.5, 9., 5)
valulevl = [7.0, 7.3, 7.7, 8., 8.6]
figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))
cont = axis.contour(gdat.binspara.redshost, gdat.binspara.redssour, minmmass, 10, colors='g', levels=valulevl)
axis.clabel(cont, inline=1, fontsize=20, fmt='%.3g')
axis.set_xlabel(r'$z_{\rm{hst}}$')
axis.set_ylabel(r'$z_{\rm{src}}$')
axis.set_title(r'$M_{c,min}$ [$M_{\odot}$]')
path = gdat.pathinitintr + 'massredsminm.pdf'
plt.tight_layout()
figr.savefig(path)
plt.close(figr)
valulevl = np.linspace(9., 11., 20)
figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))
imag = axis.imshow(maxmmass, extent=[minmredshost, maxmredshost, minmredssour, maxmredssour], aspect='auto', vmin=9., vmax=11.)
cont = axis.contour(gdat.binspara.redshost, gdat.binspara.redssour, maxmmass, 10, colors='g', levels=valulevl)
axis.clabel(cont, inline=1, fontsize=15, fmt='%.3g')
axis.set_xlabel('$z_{hst}$')
axis.set_ylabel('$z_{src}$')
axis.set_title(r'$M_{c,max}$ [$M_{\odot}$]')
path = gdat.pathinitintr + 'massredsmaxm.pdf'
plt.colorbar(imag)
plt.tight_layout()
figr.savefig(path)
plt.close(figr)
figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))
axis.plot(gdat.meanpara.redshost, gdat.meanpara.adishost * gdat.sizepixl * 1e-3)
axis.plot(gdat.meanpara.redshost, gdat.meanpara.adishost * 2. * gdat.maxmgangdata * 1e-3)
axis.set_xlabel('$z_h$')
axis.set_yscale('log')
axis.set_ylabel(r'$\lambda$ [kpc]')
path = gdat.pathinitintr + 'wlenreds.pdf'
plt.tight_layout()
figr.savefig(path)
plt.close(figr)
figr, axis = plt.subplots(figsize=(gdat.plotsize, gdat.plotsize))
fracacutasca = np.logspace(-1., 2., 20)
mcut = retr_mcutfrommscl(fracacutasca)
axis.lognp.log(fracacutasca, mcut)
axis.set_xlabel(r'$\tau_n$')
axis.set_ylabel(r'$M_{c,n} / M_{0,n}$')
axis.axhline(1., ls='--')
path = gdat.pathinitintr + 'mcut.pdf'
plt.tight_layout()
figr.savefig(path)
plt.close(figr)
def retr_listrtagprev(strgcnfg, pathpcat):
# list of PCAT run plot outputs
pathimag = pathpcat + '/imag/'
listrtag = fnmatch.filter(os.listdir(pathimag), '2*')
listrtagprev = []
for rtag in listrtag:
strgstat = pathpcat + '/data/outp/' + rtag
if chec_statfile(pathpcat, rtag, 'gdatmodipost', typeverb=0) and strgcnfg + '_' + rtag[16:].split('_')[-1] == rtag[16:]:
listrtagprev.append(rtag)
listrtagprev.sort()
return listrtagprev
def make_legd(axis, offs=None, loca=1, numbcols=1, ptch=None, line=None):
hand, labl = axis.get_legend_handles_labels()
legd = axis.legend(hand, labl, fancybox=True, frameon=True, bbox_to_anchor=offs, bbox_transform=axis.transAxes, ncol=numbcols, loc=loca, labelspacing=1, handlelength=2)
legd.get_frame().set_fill(True)
legd.get_frame().set_facecolor('white')
def setp_namevarbsing(gdat, gmod, strgmodl, strgvarb, popl, ener, evtt, back, isfr, iele):
if popl == 'full':
indxpopltemp = gmod.indxpopl
elif popl != 'none':
indxpopltemp = [popl]
if ener == 'full':
indxenertemp = gdat.indxener
elif ener != 'none':
indxenertemp = [ener]
if evtt == 'full':
indxevtttemp = gdat.indxevtt
elif evtt != 'none':
indxevtttemp = [evtt]
if back == 'full':
gmod.indxbacktemp = gmod.indxback
elif isinstance(back, int):
gmod.indxbacktemp = | np.array([back]) | numpy.array |
# -*- coding: utf-8 -*-
import operator
import dxchange
import numpy as np
import tomopy
from util import *
class Sinogram(object):
def __init__(self, sinogram, type, coords=None, normalize_bg=False, minus_log=False, center=None, fin_angle=180,
max_count=None):
assert type in ('local', 'tomosaic', 'full', 'raw')
self.padded = False
# self.normalized_bg = False
self.type = type
self.shape = sinogram.shape # unpadded shape
self.is_mlogged = False
if normalize_bg:
self.scaler = ( | np.mean(sinogram[:, 0]) | numpy.mean |
#! /usr/bin/env python
# Mathematica nb from Alex & Laurent
# <EMAIL> major reorg as LG++ 2018 01
# python3 required (int( (len(coeffs) -1)/2 )) because of float int/int result change from python2
import numpy as np
import scipy.special
import numpy.linalg as linalg
import sys
from scipy.special import comb
import os, pickle
from uncertainties import unumpy # pip install if you need
m = 1.0
mm = 1.0e-3 * m
um = 1.0e-6 * m
def scaling(img, photons): # RENAME this function
# img gives a perfect psf to count its total flux
# photons is the desired number of photons (total flux in data)
total = np.sum(img)
print("total", total)
return photons / total
def matrix_operations(img, model, flux = None, verbose=False, linfit=False, dqm=None):
# meta-question: why & when do we use linfit?
# least squares matrix operations to solve A x = b, where A is the model,
# b is the data (image), and x is the coefficient vector we are solving for.
# In 2-D data x = inv(At.A).(At.b)
#
# img 2d array of image data
# dqm 2d bool array of bad pixel locations (same shape as 2d img), or None (for all-good data)
print("leastsqnrm.matrix_operations() - equally-weighted")
flatimg = img.reshape(np.shape(img)[0] * np.shape(img)[1])
flatdqm = dqm.reshape(np.shape(img)[0] * np.shape(img)[1])
if verbose:
print(f'fringefitting.leastsqnrm.matrix_operations(): ', end='')
print(f'\n\timg {img.shape:} \n\tdqm {dqm.shape:}', end='')
print(f'\n\tL x W = {img.shape[0]:d} x {img.shape[1]:d} = {img.shape[0] * img.shape[1]:d}', end='')
print(f'\n\tflatimg {flatimg.shape:}', end='')
print(f'\n\tflatdqm {flatdqm.shape:}', end='')
# Originally Alex had nans coding bad pixels in the image.
# Anand: re-use the nan terminology code but driven by bad pixel frame
# nanlist shoud get renamed eg donotuselist
if verbose: print('\n\ttype(dqm)', type(dqm), end='')
if dqm is not None: nanlist = np.where(flatdqm==True) # where DO_NOT_USE up.
else: nanlist = (np.array(()), ) # shouldn't occur w/MAST JWST data
if verbose:
print(f'\n\ttype(nanlist) {type(nanlist):}, len={len(nanlist):}', end='')
print(f'\n\tnumber of nanlist pixels: {len(nanlist[0]):d} items', end='')
print(f'\n\t{len(nanlist[0]):d} DO_NOT_USE pixels found in data slice',
end='')
else:
print(f'\t{len(nanlist[0]):d} DO_NOT_USE pixels found in data slice')
flatimg = np.delete(flatimg, nanlist)
if verbose: print(f'\n\tflatimg {flatimg.shape:} after deleting {len(nanlist[0]):d}',
end='')
if flux is not None:
flatimg = flux * flatimg / flatimg.sum()
# A
flatmodel_nan = model.reshape(np.shape(model)[0] * np.shape(model)[1],
np.shape(model)[2])
flatmodel = np.zeros((len(flatimg), np.shape(model)[2]))
if verbose:
print(f'\n\tflatmodel_nan {flatmodel_nan.shape:}', end='')
print(f'\n\tflatmodel {flatmodel.shape:}', end='')
print(f'\n\tdifference {flatmodel_nan.shape[0] - flatmodel.shape[0]:}', end='')
print()
print("flat model dimensions ", np.shape(flatmodel))
print("flat image dimensions ", np.shape(flatimg))
for fringe in range(np.shape(model)[2]):
flatmodel[:,fringe] = np.delete(flatmodel_nan[:,fringe], nanlist)
# At (A transpose)
flatmodeltransp = flatmodel.transpose()
# At.A (makes square matrix)
modelproduct = np.dot(flatmodeltransp, flatmodel)
# At.b
data_vector = np.dot(flatmodeltransp, flatimg)
# inv(At.A)
inverse = linalg.inv(modelproduct)
cond = np.linalg.cond(inverse)
x = np.dot(inverse, data_vector)
res = np.dot(flatmodel, x) - flatimg
# put bad pixels back
naninsert = nanlist[0] - np.arange(len(nanlist[0]))
# calculate residuals with fixed but unused bad pixels as nans
res = np.insert(res, naninsert, np.nan)
res = res.reshape(img.shape[0], img.shape[1])
if verbose:
print('model flux', flux)
print('data flux', flatimg.sum())
print("flat model dimensions ", np.shape(flatmodel))
print("model transpose dimensions ", np.shape(flatmodeltransp))
print("flat image dimensions ", np.shape(flatimg))
print("transpose * image data dimensions", np.shape(data_vector))
print("flat img * transpose dimensions", np.shape(inverse))
if linfit:
try:
from linearfit import linearfit
# dependent variables
M = np.mat(flatimg)
# photon noise
noise = np.sqrt(np.abs(flatimg))
# this sets the weights of pixels fulfilling condition to zero
weights = np.where(np.abs(flatimg)<=1.0, 0.0, 1.0/(noise**2))
# uniform weight
wy = weights
S = np.mat(np.diag(wy));
# matrix of independent variables
C = np.mat(flatmodeltransp)
# initialize object
result = linearfit.LinearFit(M,S,C)
# do the fit
result.fit()
# delete inverse_covariance_matrix to reduce size of pickled file
result.inverse_covariance_matrix = []
linfit_result = result
print("Returned linearfit result")
except ImportError:
linfit_result = None
# if verbose:
print("linearfit module not imported, no covariances saved.")
else:
linfit_result = None
print("linearfit module not imported, no covariances saved.")
return x, res, cond, linfit_result
#######################################################################
def weighted_operations(img, model, verbose=False, dqm=None):
# return x, res, condition_number (None=>no condition number yet), singvals
# x: solution vector
# res: residuals array, nan-flagged for bad dq values?
# cond: condition number not calculateds (no inversion done here, so not available)
# singvals: singular values returned by the SVD solution for the parameters
#
# meta-question: why & when do we use linfit? I removed it here - anand 2022 Jan
# least squares matrix operations to solve A x = b, where
# A is the model,
# b is the data (image),
# x is the coefficient vector we are solving for.
#
# Solution 1: equal weighting of data (matrix_operations()).
# x = inv(At.A).(At.b)
#
# Solution 2: weighting data by Poisson variance (weighted_operations())
# x = inv(At.W.A).(At.W.b)
# where W is a diagonal matrix of weights w_i,
# weighting each data point i by the inverse of its variance:
# w_i = 1 / sigma_i^2
# For photon noise, the data, i.e. the image values b_i have variance
# proportional to b_i with an e.g. ADU to electrons coonversion factor.
# If this factor is the same for all pixels, we do not need to include
# it here (is that really true? Yes I think so because we're not
# normalizing wts here, just ascribing rel wts.).
#
# Possibly replace or campare with a MAD minimization using fast simplex
# https://theoryl1.wordpress.com/2016/08/03/solve-weighted-least-squares-with-numpy/
# Solve for x in Ax = b
#
# np.set_printoptions(formatter={'float': lambda x: '{:+.1e}'.format(x)}, linewidth=80)
#
# Ax = b
# b: data vector nd long; nd=5
# A: model matrix; np x nd matrix 4 x 5: np=4 parameters, nd=5 data points.
# x: parameter, vector np=4 long, unknown
#
# A=np.array([[3,1,4,2],[2,7,1,2],[1,6,1,8],[6,1,8,2],[1,4,1,4]])
# print("A:", A.shape)
# b = np.array([1.2,1.3,1.4,1.5,1.6])
# print("b:", b.shape)
# w = np.array([1,2,3,4,5])
# print("w:", w.shape)
# Aw = A * np.sqrt(w[:,np.newaxis])
# print("Aw:", Aw.shape)
# bw = w * np.sqrt(w)
# x, r, rank, s = np.linalg.lstsq(Aw, bw, rcond=None)
# print("x.shape:", x.shape)
# print("x:", x)
# print("r:", r)
# print("rank:", rank)
# print("s:", s)
# Also a good summary at:
# https://math.stackexchange.com/questions/3094925/weighted-least-squares
# Remove not-to-be-fit data from the flattened "img" data vector
flatimg = img.reshape(np.shape(img)[0] * np.shape(img)[1])
flatdqm = dqm.reshape(np.shape(img)[0] * np.shape(img)[1])
if dqm is not None: nanlist = np.where(flatdqm==True) # where DO_NOT_USE up.
else: nanlist = (np.array(()), ) # shouldn't occur w/MAST JWST data
# see original linearfit https://github.com/agreenbaum/ImPlaneIA:
# agreenbaum committed on May 21, 2017 1 parent 3e0fb8b
# commit bf02eb52c5813cb5d77036174a7caba703f9d366
#
flatimg = np.delete(flatimg, nanlist) # DATA values
# photon noise variance - proportional to ADU
# (for roughly uniform adu2electron factor)
variance = np.abs(flatimg)
# this resets the weights of pixels with negative or unity values to zero
# we ignore data with unity or lower values - weight it not-at-all..
weights = np.where(flatimg <= 1.0, 0.0, 1.0/ | np.sqrt(variance) | numpy.sqrt |
# Built-in
import os
import warnings
# Common
import numpy as np
import scipy.optimize as scpopt
import scipy.constants as scpct
import scipy.sparse as sparse
from scipy.interpolate import BSpline
import matplotlib.pyplot as plt
# --------------
# TO BE MOVED TO tofu.data WHEN FINISHED !!!!
# --------------
_NPEAKMAX = 12
###########################################################
###########################################################
#
# Preliminary
# utility tools for 1d spectral fitting
#
###########################################################
###########################################################
def remove_bck(x, y):
# opt = np.polyfit(x, y, deg=0)
opt = [np.nanmin(y)]
return y-opt[0], opt[0]
def get_peaks(x, y, nmax=None):
if nmax is None:
nmax = _NPEAKMAX
# Prepare
ybis = np.copy(y)
A = np.empty((nmax,), dtype=y.dtype)
x0 = np.empty((nmax,), dtype=x.dtype)
sigma = np.empty((nmax,), dtype=y.dtype)
def gauss(xx, A, x0, sigma): return A*np.exp(-(xx-x0)**2/sigma**2)
def gauss_jac(xx, A, x0, sigma):
jac = np.empty((xx.size, 3), dtype=float)
jac[:, 0] = np.exp(-(xx-x0)**2/sigma**2)
jac[:, 1] = A*2*(xx-x0)/sigma**2 * np.exp(-(xx-x0)**2/sigma**2)
jac[:, 2] = A*2*(xx-x0)**2/sigma**3 * np.exp(-(xx-x0)**2/sigma**2)
return jac
dx = np.nanmin(np.diff(x))
# Loop
nn = 0
while nn < nmax:
ind = np.nanargmax(ybis)
x00 = x[ind]
if np.any(np.diff(ybis[ind:], n=2) >= 0.):
wp = min(x.size-1,
ind + np.nonzero(np.diff(ybis[ind:],n=2)>=0.)[0][0] + 1)
else:
wp = ybis.size-1
if np.any(np.diff(ybis[:ind+1], n=2) >= 0.):
wn = max(0, np.nonzero(np.diff(ybis[:ind+1],n=2)>=0.)[0][-1] - 1)
else:
wn = 0
width = x[wp]-x[wn]
assert width>0.
indl = np.arange(wn, wp+1)
sig = np.ones((indl.size,))
if (np.abs(np.mean(np.diff(ybis[ind:wp+1])))
> np.abs(np.mean(np.diff(ybis[wn:ind+1])))):
sig[indl < ind] = 1.5
sig[indl > ind] = 0.5
else:
sig[indl < ind] = 0.5
sig[indl > ind] = 1.5
p0 = (ybis[ind], x00, width)#,0.)
bounds = (np.r_[0., x[wn], dx/2.],
np.r_[5.*ybis[ind], x[wp], 5.*width])
try:
(Ai, x0i, sigi) = scpopt.curve_fit(gauss, x[indl], ybis[indl],
p0=p0, bounds=bounds, jac=gauss_jac,
sigma=sig, x_scale='jac')[0]
except Exception as err:
print(str(err))
import ipdb
ipdb.set_trace()
pass
ybis = ybis - gauss(x, Ai, x0i, sigi)
A[nn] = Ai
x0[nn] = x0i
sigma[nn] = sigi
nn += 1
return A, x0, sigma
def get_p0bounds_all(x, y, nmax=None, lamb0=None):
yflat, bck = remove_bck(x, y)
amp, x0, sigma = get_peaks(x, yflat, nmax=nmax)
lamb0 = x0
nmax = lamb0.size
p0 = amp.tolist() + [0 for ii in range(nmax)] + sigma.tolist() + [bck]
lx = [np.nanmin(x), np.nanmax(x)]
Dx = np.diff(lx)
dx = np.nanmin(np.diff(x))
bamp = (np.zeros(nmax,), np.full((nmax,),3.*np.nanmax(y)))
bdlamb = (np.full((nmax,), -Dx/2.), np.full((nmax,), Dx/2.))
bsigma = (np.full((nmax,), dx/2.), np.full((nmax,), Dx/2.))
bbck0 = (0., np.nanmax(y))
bounds = (np.r_[bamp[0], bdlamb[0], bsigma[0], bbck0[0]],
np.r_[bamp[1], bdlamb[1], bsigma[1], bbck0[1]])
if not np.all(bounds[0]<bounds[1]):
msg = "Lower bounds must be < upper bounds !\n"
msg += " lower : %s\n"+str(bounds[0])
msg += " upper : %s\n"+str(bounds[1])
raise Exception(msg)
return p0, bounds, lamb0
def get_p0bounds_lambfix(x, y, nmax=None, lamb0=None):
nmax = lamb0.size
# get typical x units
Dx = x.nanmax()-x.nanmin()
dx = np.nanmin(np.diff(x))
# Get background and background-subtracted y
yflat, bck = remove_bck(x, y)
# get initial guesses
amp = [yflat[np.nanargmin(np.abs(x-lamb))] for lamb in lamb0]
sigma = [Dx/nmax for ii in range(nmax)]
p0 = A + sigma + [bck]
# Get bounding boxes
bamp = (np.zeros(nmax,), np.full((nmax,),3.*np.nanmax(y)))
bsigma = (np.full((nmax,), dx/2.), np.full((nmax,), Dx/2.))
bbck0 = (0., np.nanmax(y))
bounds = (np.r_[bamp[0], bsigma[0], bbck0[0]],
np.r_[bamp[1], bsigma[1], bbck0[1]])
if not np.all(bounds[0]<bounds[1]):
msg = "Lower bounds must be < upper bounds !\n"
msg += " lower : %s\n"+str(bounds[0])
msg += " upper : %s\n"+str(bounds[1])
raise Exception(msg)
return p0, bounds, lamb0
def get_func1d_all(n=5, lamb0=None):
if lamb0 is None:
lamb0 = np.zeros((n,), dtype=float)
assert lamb0.size == n
def func_vect(x, amp, dlamp, sigma, bck0, lamb0=lamb0, n=n):
y = np.full((n+1, x.size), np.nan)
y[:-1, :] = amp[:, None]*np.exp(-(x[None, :]-(lamb0+dlamb)[:, None])**2
/sigma[:, None]**2)
y[-1, :] = bck0
return y
def func_sca(x, *args, lamb0=lamb0, n=n):
amp = np.r_[args[0:n]][:, None]
dlamb = np.r_[args[n:2*n]][:, None]
sigma = np.r_[args[2*n:3*n]][:, None]
bck0 = np.r_[args[3*n]]
gaus = amp * np.exp(-(x[None, :]-(lamb0[:, None] + dlamb))**2/sigma**2)
back = bck0
return np.sum(gaus, axis=0) + back
def func_sca_jac(x, *args, lamb0=lamb0, n=n):
amp = np.r_[args[0:n]][None, :]
dlamb = np.r_[args[n:2*n]][None, :]
sigma = np.r_[args[2*n:3*n]][None, :]
bck0 = np.r_[args[3*n]]
lamb0 = lamb0[None, :]
x = x[:, None]
jac = np.full((x.size, 3*n+1,), np.nan)
jac[:, :n] = np.exp(-(x - (lamb0+dlamb))**2/sigma**2)
jac[:, n:2*n] = amp*2*((x - (lamb0+dlamb))/(sigma**2)
* np.exp(-(x - (lamb0+dlamb))**2/sigma**2))
jac[:, 2*n:3*n] = amp*2*((x - (lamb0+dlamb))**2/sigma**3
* np.exp(-(x - (lamb0+dlamb))**2/sigma**2))
jac[:, -1] = 1.
return jac
return func_vect, func_sca, func_sca_jac
def get_func1d_lamb0fix(n=5, lamb0=None):
if lamb0 is None:
lamb0 = np.zeros((n,), dtype=float)
assert lamb0.size == n
def func_vect(x, amp, sigma, bck0, lamb0=lamb0, n=n):
y = np.full((n+1, x.size), np.nan)
for ii in range(n):
y[ii, :] = amp[ii]*np.exp(-(x-lamb0[ii])**2/sigma[ii]**2)
y[-1, :] = bck0
return y
def func_sca(x, *args, lamb0=lamb0, n=n):
amp = np.r_[args[0:n]][:, None]
sigma = np.r_[args[2*n:3*n]][:, None]
bck0 = np.r_[args[3*n]]
gaus = amp * np.exp(-(x[None, :]-lamb0[:, None])**2/sigma**2)
back = bck0
return np.sum(gaus, axis=0) + back
def func_sca_jac(x, *args, lamb0=lamb0, n=n):
amp = np.r_[args[0:n]][None, :]
sigma = np.r_[args[2*n:3*n]][None, :]
bck0 = np.r_[args[3*n]]
lamb0 = lamb0[None, :]
x = x[:, None]
jac = np.full((x.size, 2*n+1,), np.nan)
jac[:, :n] = np.exp(-(x - lamb0)**2/sigma**2)
jac[:, n:2*n] = amp*2*((x - lamb0)**2/sigma**3
* np.exp(-(x-lamb0)**2/sigma**2))
jac[:, -1] = 1.
return jac
return func_vect, func_sca, func_sca_jac
def multiplegaussianfit1d(x, spectra, nmax=None,
lamb0=None, forcelamb=None,
p0=None, bounds=None,
max_nfev=None, xtol=None, verbose=0,
percent=None, plot_debug=False):
# Check inputs
if xtol is None:
xtol = 1.e-8
if percent is None:
percent = 20
# Prepare
if spectra.ndim == 1:
spectra = spectra.reshape((1,spectra.size))
nt = spectra.shape[0]
# Prepare info
if verbose is not None:
print("----- Fitting spectra with {0} gaussians -----".format(nmax))
nspect = spectra.shape[0]
nstr = max(nspect//max(int(100/percent), 1), 1)
# get initial guess function
if forcelamb is True:
get_p0bounds = get_p0bounds_lambfix
else:
get_p0bounds = get_p0bounds_all
# lamb0
if p0 is None or bounds is None or lamb0 is None:
p00, bounds0, lamb00 = get_p0bounds_all(x, spectra[0,:],
nmax=nmax, lamb0=lamb0)
if lamb0 is None:
lamb0 = lamb00
assert lamb0 is not None
if forcelamb is True:
p00 = p00[:nmax] + p00[2*nmax:]
bounds0 = bounds0[:nmax] + bounds0[2*nmax:]
if p0 is None:
p0 = p00
if bounds is None:
bounds = bounds0
if nmax is None:
nmax = lamb0.size
assert nmax == lamb0.size
# Get fit vector, scalar and jacobian functions
if forcelamb is True:
func_vect, func_sca, func_sca_jac = get_func1d_lambfix(n=nmax,
lamb0=lamb0)
else:
func_vect, func_sca, func_sca_jac = get_func1d_all(n=nmax,
lamb0=lamb0)
# Prepare index for splitting p0
if forcelamb is True:
indsplit = nmax*np.r_[1, 2]
else:
indsplit = nmax*np.r_[1, 2, 3]
# Prepare output
fit = np.full(spectra.shape, np.nan)
amp = np.full((nt, nmax), np.nan)
sigma = np.full((nt, nmax), np.nan)
bck = np.full((nt,), np.nan)
ampstd = np.full((nt, nmax), np.nan)
sigmastd = np.full((nt, nmax), np.nan)
bckstd = np.full((nt,), np.nan)
if not forcelamb is True:
dlamb = np.full((nt, nmax), np.nan)
dlambstd = np.full((nt, nmax), np.nan)
else:
dlamb, dlambstd = None, None
# Loop on spectra
lch = []
for ii in range(0, nspect):
if verbose is not None and ii%nstr==0:
print("=> spectrum {0} / {1}".format(ii+1, nspect))
try:
popt, pcov = scpopt.curve_fit(func_sca, x, spectra[ii,:],
jac=func_sca_jac,
p0=p0, bounds=bounds,
max_nfev=max_nfev, xtol=xtol,
x_scale='jac',
verbose=verbose)
except Exception as err:
msg = " Convergence issue for {0} / {1}\n".format(ii+1, nspect)
msg += " => %s\n"%str(err)
msg += " => Resetting initial guess and bounds..."
print(msg)
try:
p0, bounds, _ = get_p0bounds(x, spectra[ii,:],
nmax=nmax, lamb0=lamb0)
popt, pcov = scpopt.curve_fit(func_sca, x, spectra[ii,:],
jac=func_sca_jac,
p0=p0, bounds=bounds,
max_nfev=max_nfev, xtol=xtol,
x_scale='jac',
verbose=verbose)
p0 = popt
popt, pcov = scpopt.curve_fit(func_sca, x, spectra[ii,:],
jac=func_sca_jac,
p0=p0, bounds=bounds,
max_nfev=max_nfev, xtol=xtol,
x_scale='jac',
verbose=verbose)
lch.append(ii)
except Exception as err:
print(str(err))
import ipdb
ipdb.set_trace()
raise err
out = np.split(popt, indsplit)
outstd = np.split(np.sqrt(np.diag(pcov)), indsplit)
if forcelamb is True:
amp[ii, :], sigma[ii, :], bck[ii] = out
ampstd[ii, :], sigmastd[ii, :], bckstd[ii] = outstd
else:
amp[ii, :], dlamb[ii, :], sigma[ii, :], bck[ii] = out
ampstd[ii,:], dlambstd[ii,:], sigmastd[ii,:], bckstd[ii] = outstd
fit[ii, :] = func_sca(x, *popt)
p0[:] = popt[:]
if plot_debug and ii in [0,1]:
fit = func_vect(x, amp[ii,:], x0[ii,:], sigma[ii,:], bck0[ii])
plt.figure()
ax0 = plt.subplot(2,1,1)
ax1 = plt.subplot(2,1,2, sharex=ax0, sharey=ax0)
ax0.plot(x,spectra[ii,:], '.k',
x, np.sum(fit, axis=0), '-r')
ax1.plot(x, fit.T)
std = np.sqrt(np.sum((spectra-fit)**2, axis=1))
dout = {'fit': fit, 'lamb0': lamb0, 'std': std, 'lch': lch,
'amp': amp, 'ampstd': ampstd,
'sigma': sigma, 'sigmastd': sigmastd,
'bck': bck, 'bckstd': bckstd,
'dlamb': dlamb, 'dlambstd': dlambstd}
return dout
###########################################################
###########################################################
#
# 1d spectral fitting with physics parameters
#
###########################################################
###########################################################
def get_lamb0_from_dlines(dlines):
lamb0, ions = zip(*[(vv['lamb0'],
np.full((len(vv['lamb0']),), kk))
for kk, vv in dlines.items()])
lamb0 = np.r_[lamb0]
ind = np.argsort(lamb0)
return lamb0[ind], np.concatenate(ions)[ind]
def get_dindx(bckdeg=None, dlines=None, nbs=None):
nbck = bckdeg + 1
if nbs is None:
# 1d spectral fit
nbs = 1
i0 = nbck
lk = ['sigma', 'dlamb', 'amp', 'ntot', 'nlamb']
dindx= {'bck': np.r_[:nbck],
'ions':dict.fromkeys(sorted(dlines.keys())),
'nbs': nbs}
for kk in dindx['ions'].keys():
dindx['ions'][kk] = dict.fromkeys(lk)
dindx['ions'][kk]['sigma'] = i0 + np.r_[:nbs]
dindx['ions'][kk]['dlamb'] = i0+nbs + np.r_[:nbs]
nlamb = len(dlines[kk]['lamb0'])
dindx['ions'][kk]['amp'] = i0+2*nbs + np.r_[:nlamb*nbs]
dindx['ions'][kk]['nlamb'] = nlamb
dindx['ions'][kk]['ntot'] = (2 + nlamb)*nbs
i0 += dindx['ions'][kk]['ntot']
dindx['nall'] = i0
return dindx
def get_x0_bounds(x01d=None, dlines=None, dindx=None,
lamb=None, data=None):
x0 = np.zeros((dindx['nall'],), dtype=float)
if x01d is None:
# Get average spectral width and separation
lamb0_Delta = lamb0.max() - lamb0.min()
nlamb0 = lamb0.size
lamb0_delta = lamb0_Delta / nlamb0
nbs = dindx['nbs']
x0[dindx['bck']] = np.zeros((dindx['bck'].size,))
for kk in dindx['ions'].keys():
# sigma
x0[dindx[kk]['sigma']] = lamb0_delta
# dlamb
x0[dindx[kk]['dlamb']] = 0.
# amp
x0[dindx[kk]['amp']] = ampmean
else:
x0[dindx['bck']] = x01d[dindx['bck']]
i0 = dindx['bck'].size
for kk in dindx['ions'].keys():
# TBF
# x0[dindx[kk]['sigma']] = x01d[]
pass
# Get bounds
lamb_delta = np.mean(np.abs(np.diff(np.unique(lamb))))
datamax = np.nanmax(data)
bampup = min(datamax, np.nanmean(data) + np.nanstd(data))
bounds0 = np.zeros((dindx['nall'],), dtype=float)
bounds1 = np.zeros((dindx['nall'],), dtype=float)
if dindx['bck'].size == 1:
bounds0[dindx['bck']] = 0.
bounds1[dindx['bck']] = bampup
elif dindx['bck'].size == 2:
bounds0[dindx['bck'][0]] = 0.
bounds1[dindx['bck'][0]] = bampup
bounds0[dindx['bck'][0]] = 0. # TBC
bounds1[dindx['bck'][0]] = bampup # TBC
for kk in dindx['ions'].keys():
bounds0[dindx[kk]['sigma']] = 2.*lamb_delta
bounds1[dindx[kk]['sigma']] = lamb0_delta*5.
bounds0[dindx[kk]['dlamb']] = -3.*lamb0_delta
bounds1[dindx[kk]['dlamb']] = 3.*lamb0_delta
bounds0[dindx[kk]['amp']] = 0.
bounds1[dindx[kk]['amp']] = datamax
return x0, bounds
def get_funccostjac():
def func():
pass
def cost():
pass
def jac():
pass
return func, cost, jac
###########################################################
###########################################################
#
# 2d spectral fitting
#
###########################################################
###########################################################
def get_knots_nbs_for_bsplines(knots_unique, deg):
if deg > 0:
knots = np.r_[[knots_unique[0]]*deg, knots_unique,
[knots_unique[-1]]*deg]
else:
knots = knots_unique
nbknotsperbs = 2 + deg
nbs = knots_unique.size - 1 + deg
assert nbs == knots.size - 1 - deg
return knots, nbknotsperbs, nbs
def get_2dspectralfit_func(lamb0, forcelamb=False,
deg=None, knots=None):
lamb0 = np.atleast_1d(lamb0).ravel()
nlamb = lamb0.size
knots = np.atleast_1d(knots).ravel()
nknots = knots.size
nbsplines = np.unique(knots).size - 1 + deg
# Define function
def func(lamb, phi,
camp=None, cwidth=None, cshift=None,
lamb0=lamb0, nlamb=nlamb,
knots=knots, deg=deg, forcelamb=forcelamb,
nbsplines=nbsplines, mesh=True):
assert phi.ndim in [1, 2]
if camp is not None:
assert camp.shape[0] == nbsplines
bsamp = BSpline(knots, camp, deg,
extrapolate=False, axis=0)
if csigma is not None:
assert csigma.shape[0] == nbsplines
bssigma = BSpline(knots, csigma, deg,
extrapolate=False, axis=0)
if mesh or phi.ndim == 2:
lamb0 = lamb0[None, None, :]
else:
lamb0 = lamb0[None, :]
if forcelamb:
if mesh:
assert angle.ndim == lamb.ndim == 1
# shape (lamb, angle, lines)
return np.sum(bsamp(phi)[None,:,:]
* np.exp(-(lamb[:,None,None]
- lamb0)**2
/(bssigma(phi)[None,:,:]**2)), axis=-1)
else:
assert phi.shape == lamb.shape
lamb = lamb[..., None]
# shape (lamb/angle, lines)
return np.sum(bsamp(phi)
* np.exp(-(lamb
- lamb0)**2
/(bssigma(phi)**2)), axis=-1)
else:
if cdlamb is not None:
assert cdlamb.shape[0] == nbsplines
bsdlamb = BSpline(knots, cdlamb, deg,
extrapolate=False, axis=0)
return func
def get_multigaussianfit2d_costfunc(lamb=None, phi=None, data=None, std=None,
lamb0=None, forcelamb=None,
deg=None, knots=None,
nlamb0=None, nkperbs=None, nbs=None,
nc=None, debug=None):
assert lamb.shape == phi.shape == data.shape
assert lamb.ndim == 1
assert nc == nbs*nlamb0
if forcelamb is None:
forcelamb = False
if debug is None:
debug = False
# Define func assuming all inpus properly formatted
if forcelamb:
# x = [camp[1-nbs,...,nbs*(nlamb0-1)-nc}, csigma[1-nc]]
def func(x,
lamb=lamb, phi=phi, data=data, std=std,
lamb0=lamb0, knots=knots, deg=deg, nc=nc):
amp = BSpline(knots, x[:nc], deg,
extrapolate=False, axis=0)(phi)
sigma = BSpline(knots, x[nc:], deg,
extrapolate=False, axis=0)(phi)
val = np.sum(amp[:, None]
* np.exp(-(lamb[:, None] - lamb0[None, :])**2
/(sigma[:, None]**2)), axis=-1)
return (val-data)/(std*data.size)
def jac(x,
lamb=lamb, phi=phi, std=std,
lamb0=lamb0, knots=knots, deg=deg,
nlamb0=nlamb0, nkperbs=nkperbs, nbs=nbs, nc=nc):
amp = BSpline(knots, x[:nc], deg,
extrapolate=False, axis=0)(phi)
sigma = BSpline(knots, x[nc:], deg,
extrapolate=False, axis=0)(phi)
jacx = sparse.csr_matrix((phi.size, 2*nc), dtype=float)
#jacx = np.zeros((phi.size, 2*nc), dtype=float)
for ii in range(nlamb0):
expi = np.exp(-(lamb-lamb0[ii])**2/sigma**2)
for jj in range(nbs):
ind = ii*nbs + jj
indk = np.r_[jj*nkperbs:(jj+1)*nkperbs]
# all bsplines are the same, only coefs (x) are changing
bj = BSpline.basis_element(knots[indk],
extrapolate=False)(phi)
#bj[np.isnan(bj)] = 0.
indok = ~np.isnan(bj)
# Differentiate wrt camp
jacx[indok, ind] = (bj * expi)[indok]
# Differentiate wrt csigma
jacx[indok, nc+ind] = (
amp * (2*(lamb-lamb0[ii])**2*bj/sigma**3) * expi
)[indok]
return jacx/(std*phi.size)
else:
# x = [camp1-nbs*nlamb, csigma1-nbs*nlamb, cdlamb1-nbs*nlamb]
def func(x,
lamb=lamb, phi=phi, data=data, std=std,
lamb0=lamb0, knots=knots, deg=deg,
nbs=nbs, nlamb0=nlamb0, nc=nc, debug=debug):
amp = BSpline(knots, x[:nc].reshape((nbs, nlamb0), order='F'),
deg, extrapolate=False, axis=0)(phi)
sigma = BSpline(knots, x[nc:2*nc].reshape((nbs, nlamb0), order='F'),
deg, extrapolate=False, axis=0)(phi)
dlamb = BSpline(knots, x[2*nc:-1].reshape((nbs, nlamb0), order='F'),
deg, extrapolate=False, axis=0)(phi)
val = np.nansum(amp
* np.exp(-(lamb[:, None] - (lamb0[None, :]+dlamb))**2
/ sigma**2),
axis=-1) + x[-1]
if debug:
vmin, vmax = 0, np.nanmax(data)
fig = plt.figure(figsize=(14, 10));
ax0 = fig.add_axes([0.05,0.55,0.25,0.4])
ax1 = fig.add_axes([0.35,0.55,0.25,0.4], sharex=ax0, sharey=ax0)
ax2 = fig.add_axes([0.65,0.55,0.25,0.4], sharex=ax0, sharey=ax0)
ax3 = fig.add_axes([0.05,0.05,0.25,0.4], sharex=ax0, sharey=ax0)
ax4 = fig.add_axes([0.35,0.05,0.25,0.4], sharex=ax0, sharey=ax0)
ax5 = fig.add_axes([0.65,0.05,0.25,0.4], sharex=ax0, sharey=ax0)
ax0.scatter(lamb, phi, c=data, s=2, marker='s', edgecolors='None',
vmin=vmin, vmax=vmax) # DB
ax1.scatter(lamb, phi, c=val, s=2, marker='s', edgecolors='None', # DB
vmin=vmin, vmax=vmax) # DB
errmax = np.nanmax(np.abs((val-data) / (std*data.size)))
ax2.scatter(lamb, phi, c=(val-data) / (std*data.size),
s=2, marker='s', edgecolors='None', # DB
vmin=-errmax, vmax=errmax, cmap=plt.cm.seismic) # DB
dlamb0_amp = np.max(np.diff(lamb0))/np.nanmax(np.abs(amp))
dlamb0_sigma = np.max(np.diff(lamb0))/np.nanmax(np.abs(sigma))
dlamb0_dlamb = np.max(np.diff(lamb0))/np.nanmax(np.abs(dlamb))
for ii in range(nlamb0):
ax3.axvline(lamb0[ii], ls='--', c='k')
ax4.axvline(lamb0[ii], ls='--', c='k')
ax5.axvline(lamb0[ii], ls='--', c='k')
ax3.plot(lamb0[ii] + dlamb0_amp*amp[:, ii], phi, '.', ms=4)
ax4.plot(lamb0[ii] + dlamb0_sigma*sigma[:, ii], phi, '.', ms=4)
ax5.plot(lamb0[ii] + dlamb0_dlamb*dlamb[:, ii], phi, '.', ms=4)
import ipdb # DB
ipdb.set_trace() # DB
return (val-data) / (std*data.size)
def jac(x,
lamb=lamb, phi=phi, std=std,
lamb0=lamb0, knots=knots, deg=deg,
nlamb0=nlamb0, nkperbs=nkperbs, nbs=nbs, nc=nc):
amp = BSpline(knots, x[:nc], deg,
extrapolate=False, axis=0)(phi)
sigma = BSpline(knots, x[nc:2*nc], deg,
extrapolate=False, axis=0)(phi)
dlamb = BSpline(knots, x[2*nc:], deg,
extrapolate=False, axis=0)(phi)
#jacx = sparse.csr_matrix((phi.size, 2*nc), dtype=float)
jacx = np.zeros((phi.size, 3*nc+1), dtype=float)
for ii in range(nlamb0):
expi = np.exp(-(lamb-(lamb0[ii]+dlamb))**2/sigma**2)
indlamb = expi > 0.001
for jj in range(nbs):
kk = ii*nbs + jj
indk = jj + np.r_[:nkperbs]
# all bsplines are the same, only coefs (x) are changing
bj = BSpline.basis_element(knots[indk],
extrapolate=False)(phi)
# bj[np.isnan(bj)] = 0.
indok = (~np.isnan(bj)) & indlamb
# Differentiate wrt camp
jacx[indok, kk] = (bj[indok] * expi[indok])
# Differentiate wrt csigma
jacx[indok, nc+kk] = (
amp * 2*(lamb-(lamb0[ii]+dlamb))**2*bj/sigma**3 * expi
)[indok]
# Differentiate wrt dlamb
jacx[indok, 2*nc+kk] = (
amp * 2*(lamb-(lamb0[ii]+dlamb))*bj/sigma**2 * expi
)[indok]
jacx[:, -1] = 1.
return jacx/(std*phi.size)
return func, jac
def multigaussianfit2d(lamb, phi, data, std=None,
lamb0=None, forcelamb=None,
knots=None, deg=None, nbsplines=None,
x0=None, bounds=None,
method=None, max_nfev=None,
xtol=None, ftol=None, gtol=None,
loss=None, verbose=0, debug=None):
# Check inputs
if deg is None:
deg = 3
if nbsplines is None:
nbsplines = 5
if method is None:
method = 'trf'
# Only 2 method for pb. with bounds
assert method in ['trf', 'dogbox'], method
if xtol is None:
xtol = 1.e-6
if ftol is None:
ftol = 1.e-6
if gtol is None:
gtol = 1.e-8
if loss is None:
loss = 'linear'
if max_nfev is None:
max_nfev = None
if std is None:
std = 0.1*np.nanmean(data)
assert lamb0 is not None
# Get knots
if knots is None:
phimin, phimax = np.nanmin(phi), np.nanmax(phi)
knots = np.linspace(phimin, phimax, nbsplines+1-deg)
knots, nkperbs, nbs = get_knots_nbs_for_bsplines(np.unique(knots), deg)
# Scaling
lambmin = np.nanmin(lamb)
lamb0Delta = np.max(lamb0) - np.min(lamb0)
nlamb0 = np.size(lamb0)
nc = nbs*nlamb0
dlambscale = lamb0Delta / nlamb0
ampscale = np.nanmean(data) + np.nanstd(data)
datascale = data / ampscale
lambscale = lamb / dlambscale
lamb0scale = lamb0 / dlambscale
stdscale = std / ampscale
# Get cost function and jacobian
func, jac = get_multigaussianfit2d_costfunc(lamb=lambscale,
phi=phi,
data=datascale,
std=stdscale,
lamb0=lamb0scale,
forcelamb=forcelamb,
deg=deg, knots=knots,
nlamb0=nlamb0, nbs=nbs,
nkperbs=nkperbs, nc=nc,
debug=debug)
# Get initial guess
if x0 is None:
x0 = np.r_[np.ones((nc,)), np.ones((nc,))]
if not forcelamb:
x0 = np.r_[x0, np.zeros((nc,))]
x0 = np.r_[x0, 0.]
# Get bounds
if bounds is None:
bounds = (np.r_[np.zeros((nc,)),
np.full((nc,), nlamb0/100)],
np.r_[np.full((nc,), np.nanmax(data)/ampscale),
np.full((nc,), 3.)])
if not forcelamb:
bounds = (np.r_[bounds[0], -np.full((nc,), 2.)],
np.r_[bounds[1], np.full((nc,), 2.)])
bounds = (np.r_[bounds[0], 0.],
np.r_[bounds[1], 0.1*np.nanmax(data)/ampscale])
# Minimize
res = scpopt.least_squares(func, x0, jac=jac, bounds=bounds,
method=method, ftol=ftol, xtol=xtol,
gtol=gtol, x_scale=1.0, f_scale=1.0, loss=loss,
diff_step=None, tr_solver=None,
tr_options={}, jac_sparsity=None,
max_nfev=max_nfev, verbose=verbose,
args=(), kwargs={})
# Separate and reshape output
camp = res.x[:nc].reshape((nlamb0, nbs)) * ampscale
csigma = res.x[nc:2*nc].reshape((nlamb0, nbs)) * dlambscale
if forcelamb:
cdlamb = None
else:
cdlamb = res.x[2*nc:3*nc].reshape((nlamb0, nbs)) * dlambscale
# Create output dict
dout = {'camp': camp, 'csigma': csigma, 'cdlamb': cdlamb, 'bck':res.x[-1],
'fit':(func(res.x)*stdscale*data.size + datascale) * ampscale,
'lamb0':lamb0, 'knots': knots, 'deg':deg, 'nbsplines': nbsplines,
'cost': res.cost, 'fun': res.fun, 'active_mask': res.active_mask,
'nfev': res.nfev, 'njev': res.njev, 'status': res.status}
return dout
###########################################################
#
# From DataCam2D
#
###########################################################
###########################################################
# DEPRECATED
def fit_spectra2d_x0_per_row():
# Loop from centre to edges
for jj in range(ny):
out = multiplegaussianfit(x, datat[jj,:], nmax=nmax, p0=p0u, bounds=None,
max_nfev=None, xtol=1.e-8, verbose=0,
percent=20, plot_debug=False)
x0[jj,:], x0_std[jj,:] = out[:2]
for jj in range(nybis):
# Upper half
ju = indy1[jj]
out = multiplegaussianfit(x, spect, nmax=nmax, p0=p0u, bounds=None,
max_nfev=None, xtol=1.e-8, verbose=0,
percent=20, plot_debug=False)
x0[ju,:], x0_std[ju,:] = out[:2]
p0u[:nmax], p0u[nmax:2*nmax], = amp[ii,ju,:], x0[ii,ju,:]
p0u[2*nmax:3*nmax], p0u[3*nmax:] = sigma[ii,ju,:], bck0
# Lower half
jl = indy2[jj]
return x0
def get_func2d(y0, y1, x0_y, bspl_n, bspl_deg):
knots = np.linspace(y0,y1, 6)
bspliney = scpinterp.LSQUnivariateSpline()
def func(x, y, ampy_coefs, sigy_coefs, bcky_coefs):
amp_bs = BSpline(knots, ampy_coefs, k=bspl_deg,
extrapolate=False, axis=0)
amp = amp_bs(y)
x0y = x0_y(y)
return np.sum(amp*np.exp((x-xoy)**2/sig**2) + bck0, axis=-1)
return func
def fit_spectra_2d(data2d, indt=None, nbin_init=None,
nmax=None, bck=None, nbsplines=None):
""" Return fitted spectra
Can handle unique or multiple time
Takes already formatted 2d data:
- (nx, ny)
- (nt, nx, ny)
x being the horizontal / spectral direction (lambda)
"""
#####################
# Check / format input
#####################
# Check data
assert isinstance(data, np.ndarray)
assert data.ndim in [2,3]
if data.ndim == 2:
data = np.reshape((1,data.shape[0],data.shape[1]))
if indt is not None:
data = data[indt,...]
# Set bck type
if bck is None:
bck = 0
assert type(bck) in [int, str]
if type(bck) is int:
nbck = bck + 1
elif bck == 'exp':
nbck = 3
# Extract shape
nt = data.shape[0]
nlamb, ny = data.shape[1:]
x = | np.arange(0,nlamb) | numpy.arange |
from ._accumulate_data import AccumulateData
from ..util import MaxSamplesWarning
from ..discrete_distribution import Lattice
from numpy import array, nan
import warnings
import numpy as np
from scipy.optimize import fminbound as fminbnd
from scipy.optimize import fmin, fmin_bfgs
from numpy import sqrt, exp, log
from scipy.stats import norm as gaussnorm
from scipy.stats import t as tnorm
class LDTransformBayesData(AccumulateData):
"""
Update and store transformation data based on low-discrepancy sequences.
See the stopping criterion that utilize this object for references.
"""
def __init__(self, stopping_crit, integrand, true_measure, discrete_distrib, m_min: int, m_max: int,
fbt, merge_fbt, kernel):
"""
Args:
stopping_crit (StoppingCriterion): a StoppingCriterion instance
integrand (Integrand): an Integrand instance
true_measure (TrueMeasure): A TrueMeasure instance
discrete_distrib (DiscreteDistribution): a DiscreteDistribution instance
m_min (int): initial n == 2^m_min
m_max (int): max n == 2^m_max
"""
self.parameters = ['solution', 'error_bound', 'n_total']
self.stopping_crit = stopping_crit
self.integrand = integrand
self.true_measure = true_measure
self.discrete_distrib = discrete_distrib
self.distribution_name = type(self.discrete_distrib).__name__
# Bayes cubature properties
self.errbd_type = self.stopping_crit.errbd_type
self.arb_mean = self.stopping_crit.arb_mean
self.order = self.stopping_crit.order
self.kernType = self.stopping_crit.kernType
self.avoid_cancel_error = self.stopping_crit.avoid_cancel_error
self.abs_tol = self.stopping_crit.abs_tol
self.rel_tol = self.stopping_crit.rel_tol
self.debug_enable = self.stopping_crit.debug_enable
# Credible interval : two-sided confidence, i.e., 1-alpha percent quantile
# quantile value for the error bound
if self.errbd_type == 'full_Bayes':
# degrees of freedom = 2^mmin - 1
self.uncert = -tnorm.ppf(self.stopping_crit.alpha / 2, (2 ** m_min) - 1)
else:
self.uncert = -gaussnorm.ppf(self.stopping_crit.alpha / 2)
# Set Attributes
self.m_min = m_min
self.m_max = m_max
self.debugEnable = True
self.n_total = 0 # total number of samples generated
self.solution = nan
self.iter = 0
self.m = self.m_min
self.mvec = np.arange(self.m_min, self.m_max + 1, dtype=int)
# Initialize various temporary storage between iterations
self.xpts_ = array([]) # shifted lattice points
self.xun_ = array([]) # un-shifted lattice points
self.ftilde_ = array([]) # fourier transformed integrand values
if isinstance(self.discrete_distrib, Lattice):
# integrand after the periodization transform
self.ff = lambda x, *args, **kwargs: self.integrand.f(x,
periodization_transform=stopping_crit.ptransform,
*args, **kwargs).squeeze()
else:
self.ff = self.integrand.f
self.fbt = fbt
self.merge_fbt = merge_fbt
self.kernel = kernel
super(LDTransformBayesData, self).__init__()
def update_data(self, y_val_new=None, xnew=None, xunnew=None):
""" See abstract method. """
# Generate sample values
if self.iter < len(self.mvec):
if y_val_new is None:
self.ftilde_, self.xun_, self.xpts_ = self.iter_fbt(self.iter, self.xun_, self.xpts_, self.ftilde_)
else:
self.ftilde_, self.xun_, self.xpts_ = self.iter_fbt(self.iter, self.xun_, self.xpts_, self.ftilde_,
y_val_new, xunnew, xnew)
self.m = self.mvec[self.iter]
self.iter += 1
# update total samples
self.n_total = 2 ** self.m # updated the total evaluations
else:
warnings.warn('''
Already used maximum allowed sample size %d.
Note that error tolerances may no longer be satisfied.''' % (2 ** self.m_max),
MaxSamplesWarning)
return self._stopping_criterion(self.xun_, self.ftilde_, self.m)
# decides if the user-defined error threshold is met
def _stopping_criterion(self, xpts, ftilde, m):
r = self.stopping_crit.order
ftilde = ftilde.squeeze()
n = 2 ** m
success = False
lna_range = [-5, 0] # reduced from [-5, 5], to avoid kernel values getting too big causing error
# search for optimal shape parameter
if self.stopping_crit.one_theta == True:
lna_MLE = fminbnd(lambda lna: self.objective_function(exp(lna), xpts, ftilde)[0],
x1=lna_range[0], x2=lna_range[1], xtol=1e-2, disp=0)
aMLE = exp(lna_MLE)
_, vec_lambda, vec_lambda_ring, RKHS_norm = self.objective_function(aMLE, xpts, ftilde)
else:
if self.stopping_crit.use_gradient == True:
warnings.warn('Not implemented !')
lna_MLE = 0
else:
# Nelder-Mead Simplex algorithm
theta0 = np.ones((xpts.shape[1], 1)) * (0.05)
theta0 = np.ones((1, xpts.shape[1])) * (0.05)
lna_MLE = fmin(lambda lna: self.objective_function(exp(lna), xpts, ftilde)[0],
theta0, xtol=1e-2, disp=False)
aMLE = exp(lna_MLE)
# print(n, aMLE)
_, vec_lambda, vec_lambda_ring, RKHS_norm = self.objective_function(aMLE, xpts, ftilde)
# Check error criterion
# compute DSC
if self.errbd_type == 'full_Bayes':
# full Bayes
if self.avoid_cancel_error:
DSC = abs(vec_lambda_ring[0] / n)
else:
DSC = abs((vec_lambda[0] / n) - 1)
# 1-alpha two sided confidence interval
err_bd = self.uncert * sqrt(DSC * RKHS_norm / (n - 1))
elif self.errbd_type == 'GCV':
# GCV based stopping criterion
if self.avoid_cancel_error:
DSC = abs(vec_lambda_ring[0] / (n + vec_lambda_ring[0]))
else:
DSC = abs(1 - (n / vec_lambda[0]))
temp = vec_lambda
temp[0] = n + vec_lambda_ring[0]
mC_inv_trace = sum(1. / temp(temp != 0))
err_bd = self.uncert * sqrt(DSC * RKHS_norm / mC_inv_trace)
else:
# empirical Bayes
if self.avoid_cancel_error:
DSC = abs(vec_lambda_ring[0] / (n + vec_lambda_ring[0]))
else:
DSC = abs(1 - (n / vec_lambda[0]))
err_bd = self.uncert * sqrt(DSC * RKHS_norm / n)
if self.arb_mean: # zero mean case
muhat = ftilde[0] / n
else: # non zero mean case
muhat = ftilde[0] / vec_lambda[0]
self.error_bound = err_bd
muhat = | np.abs(muhat) | numpy.abs |
################################################################################
#
# Copyright (c) 2017 University of Oxford
# Authors:
# <NAME> (<EMAIL>)
#
# This work is licensed under the Creative Commons
# Attribution-NonCommercial-ShareAlike 4.0 International License.
# To view a copy of this license, visit
# http://creativecommons.org/licenses/by-nc-sa/4.0/ or send a letter to
# Creative Commons, PO Box 1866, Mountain View, CA 94042, USA.
#
################################################################################
import bisect
import csv
import numpy as np
import numpy.matlib as ml
from .transform import *
def interpolate_vo_poses(vo_path, pose_timestamps, origin_timestamp):
"""Interpolate poses from visual odometry.
Args:
vo_path (str): path to file containing relative poses from visual odometry.
pose_timestamps (list[int]): UNIX timestamps at which interpolated poses are required.
origin_timestamp (int): UNIX timestamp of origin frame. Poses will be reported relative to this frame.
Returns:
list[numpy.matrixlib.defmatrix.matrix]: SE3 matrix representing interpolated pose for each requested timestamp.
"""
with open(vo_path) as vo_file:
vo_reader = csv.reader(vo_file)
headers = next(vo_file)
vo_timestamps = [0]
abs_poses = [ml.identity(4)]
lower_timestamp = min(min(pose_timestamps), origin_timestamp)
upper_timestamp = max(max(pose_timestamps), origin_timestamp)
for row in vo_reader:
timestamp = int(row[0])
if timestamp < lower_timestamp:
vo_timestamps[0] = timestamp
continue
vo_timestamps.append(timestamp)
xyzrpy = [float(v) for v in row[2:8]]
rel_pose = build_se3_transform(xyzrpy)
abs_pose = abs_poses[-1] * rel_pose
abs_poses.append(abs_pose)
if timestamp >= upper_timestamp:
break
return interpolate_poses(vo_timestamps, abs_poses, pose_timestamps, origin_timestamp)
def interpolate_ins_poses(ins_path, pose_timestamps, use_rtk=False):
"""Interpolate poses from INS.
Args:
ins_path (str): path to file containing poses from INS.
pose_timestamps (list[int]): UNIX timestamps at which interpolated poses are required.
Returns:
list[numpy.matrixlib.defmatrix.matrix]: SE3 matrix representing interpolated pose for each requested timestamp.
QUT CHANGE: Removed origin timestamp, so interpolated poses are absolute
"""
with open(ins_path) as ins_file:
ins_reader = csv.reader(ins_file)
headers = next(ins_file)
ins_timestamps = [0]
abs_poses = [ml.identity(4)]
upper_timestamp = max(pose_timestamps)
for row in ins_reader:
timestamp = int(row[0])
ins_timestamps.append(timestamp)
utm = row[5:8] if not use_rtk else row[4:7]
rpy = row[-3:] if not use_rtk else row[11:14]
xyzrpy = [float(v) for v in utm] + [float(v) for v in rpy]
abs_pose = build_se3_transform(xyzrpy)
abs_poses.append(abs_pose)
if timestamp >= upper_timestamp:
break
ins_timestamps = ins_timestamps[1:]
abs_poses = abs_poses[1:]
return interpolate_poses(ins_timestamps, abs_poses, pose_timestamps)
def interpolate_poses(pose_timestamps, abs_poses, requested_timestamps):
"""Interpolate between absolute poses.
Args:
pose_timestamps (list[int]): Timestamps of supplied poses. Must be in ascending order.
abs_poses (list[numpy.matrixlib.defmatrix.matrix]): SE3 matrices representing poses at the timestamps specified.
requested_timestamps (list[int]): Timestamps for which interpolated timestamps are required.
Returns:
list[numpy.matrixlib.defmatrix.matrix]: SE3 matrix representing interpolated pose for each requested timestamp.
Raises:
ValueError: if pose_timestamps and abs_poses are not the same length
ValueError: if pose_timestamps is not in ascending order
QUT CHANGE: Removed origin timestamp, so interpolated poses are absolute
"""
requested_timestamps = np.array(requested_timestamps)
pose_timestamps = np.array(pose_timestamps)
if len(pose_timestamps) != len(abs_poses):
raise ValueError('Must supply same number of timestamps as poses')
abs_quaternions = np.zeros((4, len(abs_poses)))
abs_positions = np.zeros((3, len(abs_poses)))
for i, pose in enumerate(abs_poses):
if i > 0 and pose_timestamps[i-1] >= pose_timestamps[i]:
raise ValueError('Pose timestamps must be in ascending order')
abs_quaternions[:, i] = so3_to_quaternion(pose[0:3, 0:3])
abs_positions[:, i] = np.ravel(pose[0:3, 3])
upper_indices = [bisect.bisect(pose_timestamps, pt) for pt in requested_timestamps]
lower_indices = [u - 1 for u in upper_indices]
if max(upper_indices) >= len(pose_timestamps):
upper_indices = [min(i, len(pose_timestamps) - 1) for i in upper_indices]
fractions = (requested_timestamps - pose_timestamps[lower_indices]) // \
(pose_timestamps[upper_indices] - pose_timestamps[lower_indices])
quaternions_lower = abs_quaternions[:, lower_indices]
quaternions_upper = abs_quaternions[:, upper_indices]
d_array = (quaternions_lower * quaternions_upper).sum(0)
linear_interp_indices = np.nonzero(d_array >= 1)
sin_interp_indices = np.nonzero(d_array < 1)
scale0_array = np.zeros(d_array.shape)
scale1_array = np.zeros(d_array.shape)
scale0_array[linear_interp_indices] = 1 - fractions[linear_interp_indices]
scale1_array[linear_interp_indices] = fractions[linear_interp_indices]
theta_array = np.arccos(np.abs(d_array[sin_interp_indices]))
scale0_array[sin_interp_indices] = \
np.sin((1 - fractions[sin_interp_indices]) * theta_array) / | np.sin(theta_array) | numpy.sin |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.