prompt
stringlengths
19
879k
completion
stringlengths
3
53.8k
api
stringlengths
8
59
""" Reads either pickle or mat files and plots the results. -- <EMAIL> -- <EMAIL> Usage: python plotting.py --filelist <file containing list of pickle or mat file paths> python plotting.py --file <pickle or mat file path> """ from __future__ import division # pylint: disable=invalid-name # pylint: disable=redefined-builtin # pylint: disable=too-many-locals import os import pickle import argparse import warnings import matplotlib.pyplot as plt import matplotlib from scipy.io import loadmat import numpy as np matplotlib.rcParams['mathtext.fontset'] = 'custom' matplotlib.rcParams['mathtext.rm'] = 'Bitstream Vera Sans' matplotlib.rcParams['mathtext.it'] = 'Bitstream Vera Sans:italic' matplotlib.rcParams['mathtext.bf'] = 'Bitstream Vera Sans:bold' matplotlib.rcParams['mathtext.fontset'] = 'stix' matplotlib.rcParams['font.family'] = 'STIXGeneral' def rgba(red, green, blue, a): '''rgba: generates matplotlib compatible rgba values from html-style rgba values ''' return (red / 255.0, green / 255.0, blue / 255.0, a) def hex(hexstring): '''hex: generates matplotlib-compatible rgba values from html-style hex colors ''' if hexstring[0] == '#': hexstring = hexstring[1:] red = int(hexstring[:2], 16) green = int(hexstring[2:4], 16) blue = int(hexstring[4:], 16) return rgba(red, green, blue, 1.0) def transparent(red, green, blue, _, opacity=0.5): '''transparent: converts a rgba color to a transparent opacity ''' return (red, green, blue, opacity) def read_results(file_path): """reads experiment result data from a '.m' file :file_path: the path to the file :returns: a dataframe object with all the various pieces of data """ if file_path.endswith('.mat'): results = loadmat(file_path) elif file_path.endswith('.p'): with open(file_path, 'rb') as pickleF: res = pickle.load(pickleF) pickleF.close() results = {} for key in list(res.keys()): if not hasattr(res[key], '__len__'): results[key] = np.array(res[key]) elif isinstance(res[key], str): results[key] = np.array(res[key]) elif isinstance(res[key], list): results[key] = np.array(res[key]) elif isinstance(res[key], np.ndarray): val = np.zeros(res[key].shape, dtype=res[key].dtype) for idx, x in np.ndenumerate(res[key]): if isinstance(x, list): val[idx] = np.array(x) else: val[idx] = x results[key] = val else: results[key] = res[key] else: raise ValueError('Wrong file format. It has to be either mat or pickle file') return results def get_plot_info( meth_curr_opt_vals, cum_costs, meth_costs, grid_pts, outlier_frac, init_opt_vals ): """generates means and standard deviation for the method's output """ num_experiments = len(meth_curr_opt_vals) with warnings.catch_warnings(): warnings.simplefilter(action='ignore', category=FutureWarning) idx = np.where(meth_curr_opt_vals == '-') if idx[0].size != 0: num_experiments = idx[0][0] outlier_low_idx = max(np.round(outlier_frac * num_experiments), 1) outlier_high_idx = min( num_experiments, int(num_experiments - np.rint(outlier_frac * num_experiments)) ) inlier_idx = np.arange(outlier_low_idx, outlier_high_idx) num_grid_pts = len(grid_pts) grid_vals = np.zeros((num_experiments, num_grid_pts)) for exp_iter in range(num_experiments): if cum_costs is None: curr_cum_costs = np.cumsum(meth_costs[exp_iter]) else: curr_cum_costs = cum_costs[exp_iter] if init_opt_vals is not None: opt_vals = np.concatenate((np.array([init_opt_vals[exp_iter]]), np.squeeze(meth_curr_opt_vals[exp_iter])), axis=0) curr_cum_costs = np.concatenate((np.array([0]), np.squeeze(curr_cum_costs)), axis=0) else: opt_vals = meth_curr_opt_vals[exp_iter] interp = np.interp(grid_pts, curr_cum_costs.flatten(), opt_vals.flatten()) grid_vals[exp_iter, :] = np.maximum.accumulate(interp) sorted_grid_vals = np.sort(grid_vals, axis=0) inlier_grid_vals = sorted_grid_vals[inlier_idx, :] def mean_and_std(arr1d): """ Returns mean and standard deviation.""" finite_arr1d = arr1d[np.isfinite(arr1d)] if finite_arr1d.size / arr1d.size >= 0.4: return np.array([np.mean(finite_arr1d), np.std(finite_arr1d) / np.sqrt(arr1d.size)]) return
np.array([np.NaN] * 2)
numpy.array
import cv2 import numpy as np import os import torch import torch.utils.data as data from pyquaternion import Quaternion from sklearn.externals import joblib from tqdm import tqdm def suncg_parse_path(dataset_dir, img_path): splits = img_path.split('/') house_id = splits[-2] img_id = splits[-1] img_path = os.path.join(dataset_dir, house_id, img_id) return img_path class SUNCGRPDataset(data.Dataset): def __init__(self, flags): self.flags = flags if flags.loss_fn == 'C': assert(os.path.exists(flags.kmeans_trans_path)) assert(os.path.exists(flags.kmeans_rots_path)) self.kmeans_trans = joblib.load(flags.kmeans_trans_path) self.kmeans_rots = joblib.load(flags.kmeans_rots_path) anno_file = os.path.join(flags.split_dir, '{}_set.txt'.format(flags.train_test_phase)) f = open(anno_file) lines = f.readlines()[3:] print("precomputing relative pose...") self.index = [] compute_mean = True # get image size for network self.img_input_shape = tuple([int(_) for _ in flags.img_resize.split('x')]) assert len(self.img_input_shape) == 2 mean_npy = os.path.join(flags.cached_dir, "img_mean_{}x{}.npy".format(self.img_input_shape[0], self.img_input_shape[1])) if os.path.exists(mean_npy) or flags.train_test_phase != 'train': compute_mean = False all_img_resized = np.zeros((self.img_input_shape[1], self.img_input_shape[0], 3)) print('Reading from '+flags.dataset_dir) for line in tqdm(lines): annot = line.split(' ') # get images and paths img1_path = suncg_parse_path(flags.dataset_dir, annot[0]) img2_path = suncg_parse_path(flags.dataset_dir, annot[8]) if compute_mean: img1, img2 = self.read_image(img1_path, img2_path) # calculate sum all_img_resized += img1 all_img_resized += img2 # calculate relative pose relative_pose = self.get_relative_pose(np.hstack((annot[1:8], annot[9:])).astype('f4')) relative_pose_dict = {'tran': relative_pose[:3], 'rot':relative_pose[3:]} self.index.append([img1_path, img2_path, relative_pose_dict]) if compute_mean: # calculate mean all_img_resized /= (len(lines)*2) # save mean
np.save(mean_npy, all_img_resized)
numpy.save
import matplotlib.pyplot as plt from matplotlib.ticker import MaxNLocator from pymatgen.io.vasp.outputs import Vasprun from pymatgen.io.vasp.inputs import Poscar from pymatgen.electronic_structure.core import Spin, Orbital from pymatgen.electronic_structure.dos import Dos from scipy.ndimage.filters import gaussian_filter1d from scipy.ndimage import gaussian_filter from functools import reduce import numpy as np import pandas as pd from ase.visualize.plot import plot_atoms from pymatgen.io.ase import AseAtomsAdaptor import copy import time class Dos: """ This class contains all the methods for contructing density of states plots from the outputs of VASP calculations. Parameters: folder (str): This is the folder that contains the VASP files. spin (str): Which spin direction to parse ('up' or 'down') """ def __init__(self, folder, spin='up'): self.folder = folder self.spin = spin self.forbitals = False self.vasprun = Vasprun( f'{folder}/vasprun.xml', parse_dos=True, parse_eigen=False, parse_potcar_file=False ) self.poscar = Poscar.from_file( f'{folder}/POSCAR', check_for_POTCAR=False, read_velocities=False ) self.color_dict = { 0: '#FF0000', 1: '#0000FF', 2: '#008000', 3: '#800080', 4: '#E09200', 5: '#FF5C77', 6: '#778392', 7: '#07C589', 8: '#40BAF2', 9: '#FF0000', 10: '#0000FF', 11: '#008000', 12: '#800080', 13: '#E09200', 14: '#FF5C77', 15: '#778392', } self.orbital_labels = { 0: '$s$', 1: '$p_{y}$', 2: '$p_{x}$', 3: '$p_{z}$', 4: '$d_{xy}$', 5: '$d_{yz}$', 6: '$d_{z^{2}}$', 7: '$d_{xz}$', 8: '$d_{x^{2}-y^{2}}$', 9: '$f_{y^{3}x^{2}}$', 10: '$f_{xyz}$', 11: '$f_{yz^{2}}$', 12: '$f_{z^{3}}$', 13: '$f_{xz^{2}}$', 14: '$f_{zx^{3}}$', 15: '$f_{x^{3}}$', } self.spin_dict = {'up': Spin.up, 'down': Spin.down} self.tdos_dict = self._load_tdos() self.pdos_dict = self._load_pdos() def _load_tdos(self): """ This function loads the total density of states into a dictionary Returns: tdos_dict (dict[str][np.ndarray]): Dictionary that consists or the energies and densities of the system. """ if self.spin == 'up': spin_factor = 1 elif self.spin == 'down': spin_factor = -1 tdos = self.vasprun.tdos if self.spin == 'up' or self.spin == 'down': tdos_dict = { 'energy': np.array(tdos.energies - tdos.efermi), 'density': spin_factor *
np.array(tdos.densities[self.spin_dict[self.spin]])
numpy.array
# -*- coding:utf-8 -*- ''' MIT License Copyright (c) 2019 李俊諭 <NAME> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE. ''' from Config import Config from Gen_DataSet import Gen_DataSet from tensorflow.python.client import device_lib from tensorflow import keras from tensorflow.keras import backend as k from tensorflow.keras import Sequential, Model from tensorflow.keras.layers import Dense, Dropout, Flatten from tensorflow.keras.layers import Conv2D, Activation, MaxPooling2D, BatchNormalization import os import tensorflow as tf import time import numpy as np import history_plot '''模型訓練參數配置-CNN''' batch_size = 100 epochs = 20 verbose = 1 # CNN_optimizer = 'Adadelta' CNN_optimizer = keras.optimizers.Adadelta(lr=0.5) # CNN_optimizer = tf.train.AdadeltaOptimizer(learning_rate=0.00015) # CNN_loss = 'categorical_crossentropy' CNN_loss = keras.losses.categorical_crossentropy CNN_inputlayer_conv2D_hidden_unit = 32 CNN_inputlayer_conv2D_kernel_size = (2, 2) CNN_inputlayer_Activation = 'relu' CNN_inputlayer_conv2D_padding = 'same' CNN_onelayer_conv2D_hidden_unit = 32 CNN_onelayer_conv2D_kernel_size = (2, 2) CNN_onelayer_conv2D_padding = 'same' CNN_onelayer_Activation = 'relu' CNN_onelayer_MaxPooling2D_pool_size = (2, 2) CNN_twolayer_conv2D_hidden_unit = 64 CNN_twolayer_conv2D_kernel_size = (2, 2) CNN_twolayer_conv2D_padding = 'same' CNN_twolayer_Activation = 'relu' CNN_twolayer_MaxPooling2D_pool_size = (2, 2) CNN_twolayer_Dropout = 0.25 CNN_full_connectionlayer_Dense = 128 CNN_full_connectionlayer_Activation = 'relu' CNN_full_connectionlayer_Dropout = 0.25 CNN_ouputlayer_Activation = 'softmax' ''' 設置模型訓練時之回調函數控制 ''' callbacks = [ tf.keras.callbacks.TensorBoard( log_dir=Config.Log_TensorBoard_Path, batch_size=batch_size, write_images=True, ), tf.keras.callbacks.ModelCheckpoint(filepath=os.path.join(Config.Model_ModelCheckpoint_Path, "ckpt_{epoch:02d}"), verbose=1, save_weights_only=False), ] input_arrays = list() output_arrays = list() def build_model(): ''' 建置產生CNN模型實體。 ''' # 輸入層維度from keras.models import load_model input_shape = ( np.array(Config.Train_DataSet).shape[1],
np.array(Config.Train_DataSet)
numpy.array
import os import time import datetime import random import json from collections import Counter from math import sqrt import gensim import pandas as pd import numpy as np from bs4 import BeautifulSoup import logging from gensim.models import word2vec import multiprocessing import yaml import jieba import matplotlib.pyplot as plt from sklearn.metrics import roc_auc_score from sklearn.metrics import accuracy_score from sklearn.metrics import precision_score from sklearn.metrics import recall_score from sklearn.model_selection import train_test_split from sklearn.preprocessing import LabelEncoder import tensorflow as tf from tensorflow.keras.layers import GlobalAveragePooling1D from tensorflow.keras.layers import TimeDistributed from tensorflow.keras.layers import BatchNormalization from tensorflow.keras.layers import Layer from tensorflow.keras.layers import Input from tensorflow.keras.layers import Conv2D from tensorflow.keras.layers import MaxPool2D from tensorflow.keras.layers import concatenate from tensorflow.keras.layers import Flatten from tensorflow.keras.layers import Dense from tensorflow.keras.layers import Dropout from tensorflow.keras.layers import Embedding from tensorflow.keras.layers import Reshape from tensorflow.keras.layers import GRU from tensorflow.keras.layers import LSTM from tensorflow.keras.layers import Bidirectional from tensorflow.keras.layers import Flatten from tensorflow.keras import backend as K from tensorflow.keras import Sequential from tensorflow.keras import optimizers from tensorflow.keras import losses from tensorflow.keras import regularizers from tensorflow.keras import initializers from tensorflow.keras.models import Model from tensorflow.keras.models import Sequential from tensorflow.keras.preprocessing import sequence from tensorflow.keras.utils import to_categorical from tensorflow.keras.utils import plot_model from tensorflow.keras.callbacks import ReduceLROnPlateau from tensorflow.keras.callbacks import EarlyStopping from tensorflow.keras.callbacks import ModelCheckpoint class Dataset(object): def __init__(self, config): self.dataSource = config.dataSource self.stopWordSource = config.stopWordSource # 每条输入的序列处理为定长 self.sequenceLength = config.sequenceLength self.embeddingSize = config.embeddingSize self.batchSize = config.batchSize self.rate = config.rate self.miniFreq = config.miniFreq self.stopWordDict = {} self.trainReviews = [] self.trainLabels = [] self.evalReviews = [] self.evalLabels = [] self.wordEmbedding = None self.n_symbols = 0 self.wordToIndex = {} self.indexToWord = {} def readData(self, filePath): with open(filePath, mode='r', encoding='utf-8') as f: text = [] label = [] for line in f: temp = line.replace('\n', '').split(',,') text.append(temp[0]) label.append(temp[1]) print('data:the text number is {},the label number is {}'.format(len(text), len(label))) texts = [jieba.lcut(document.replace('\n', '')) for document in text] return texts, label def readStopWord(self, stopWordPath): """ 读取停用词 """ with open(stopWordPath, mode='r', encoding='utf-8') as f: stopWordList = f.read().splitlines() # 将停用词用列表的形式生成,之后查找停用词时会比较快 self.stopWordDict = dict(zip(stopWordList, list(range(len(stopWordList))))) def getWordEmbedding(self, words): """ 按照我们的数据集中的单词取出预训练好的word2vec中的词向量 """ # 中文 model = gensim.models.Word2Vec.load('../data/word2VecModel') vocab = [] wordEmbedding = [] # 添加 "pad" 和 "UNK", vocab.append("pad") wordEmbedding.append(np.zeros(self.embeddingSize)) vocab.append("UNK") wordEmbedding.append(np.random.randn(self.embeddingSize)) for word in words: try: # 中文 vector = model[word] vocab.append(word) wordEmbedding.append(vector) except: print(word + " : 不存在于词向量中") return vocab,
np.array(wordEmbedding)
numpy.array
import numpy as np import pytest from clustering_algorithms import KMedoidsAlgorithm, Point class TestKMedoidsAlgorithm: def test_list_of_labels_with_incorrect_length_passed_to_init(self): points = [ Point(1, np.array([0, 1]), ["x", "y"]), Point(2, np.array([2, 3]), ["x", "y"]), Point(3, np.array([4, 5]), ["x", "y"]), ] with pytest.raises(ValueError): KMedoidsAlgorithm(points, 2, labels=["1", "2", "3"]) def test_get_initial_medoids_indices(self): points = [ Point(idx=1, coordinates=np.array([0, 1]), coordinates_names=["x", "y"]), Point(idx=2, coordinates=np.array([2, 3]), coordinates_names=["x", "y"]), Point(idx=3, coordinates=np.array([4, 5]), coordinates_names=["x", "y"]), Point(idx=4, coordinates=np.array([6, 7]), coordinates_names=["x", "y"]), Point(idx=5, coordinates=np.array([8, 9]), coordinates_names=["x", "y"]), ] medoids = KMedoidsAlgorithm.get_initial_medoids_indices(points, 2) assert len(medoids) == 2 medoids = KMedoidsAlgorithm.get_initial_medoids_indices(points, 3) assert len(medoids) == 3 def test_prepare_medoids(self): points = [ Point(idx=1, coordinates=np.array([0, 1]), coordinates_names=["x", "y"]), Point(idx=2, coordinates=np.array([2, 3]), coordinates_names=["x", "y"]), Point(idx=3, coordinates=
np.array([4, 5])
numpy.array
import tensorflow as tf import keras.backend as K from Utils import * from generators.MotionBlurGenerator import * from generators.SVHNGenerator import * from generators.SVHN_New import * from generators.SVHNgan import SVHNganGenerator K.set_learning_phase(0) from glob import glob import os import numpy as np # paths Orig_Path = './results/SVHN/Original Images/*.png' Range_Path = './results/SVHN/Range Images/*.png' Blur_Path = './results/SVHN/Original Blurs/Test Blurs.npy' # paths REGULARIZORS = [0.01 , 0.01] RANDOM_RESTARTS = 10 NOISE_STD = 0.01 STEPS = 6000 LEARNING_RATE = 0.005 IMAGE_RANGE = [0,1] optimizer = tf.train.AdamOptimizer(learning_rate=LEARNING_RATE) def step_size(t): return 0.01 * np.exp( - t / 1000 ) SAVE_PATH = './results/SVHN/deblur - '+str(int(NOISE_STD*100)) + 'perc noise - ' +str(RANDOM_RESTARTS) + 'RR/out_' # ----------------------------------------------------------------------- # loading test blur kernels W = np.load(Blur_Path) BLUR_RES = W.shape[1] # loading svhn test images X_Orig = np.array([ imread(path) for path in glob(Orig_Path)]) / 255 # print(X_Orig.shape) X_Orig = X_Orig[0:10] # print(X_Orig.shape) IMAGE_RES = X_Orig.shape[1] CHANNELS = X_Orig.shape[-1] # loading svhn generator SVHNGen = SVHNGenerator() SVHNGen.GenerateModel() SVHNGen.LoadWeights() svhn_vae, svhn_encoder, svhn_decoder = SVHNGen.GetModels() svhn_latent_dim = SVHNGen.latent_dim # loading new generator model SVHNGen_new = SVHN_New() SVHNGen_new.GenerateModel() SVHNGen_new.LoadWeights() svhn_vae_new, svhn_encoder_new, svhn_decoder_new = SVHNGen_new.GetModels() svhn_latent_dim_new = SVHNGen_new.latent_dim #wait just runing a f # loading motion blur generator BLURGen = MotionBlur() BLURGen.GenerateModel() BLURGen.LoadWeights() blur_vae, blur_encoder, blur_decoder = BLURGen.GetModels() blur_latent_dim = BLURGen.latent_dim # check if save dir exists, if not create a new one try: os.stat(SAVE_PATH[:-5]) except: os.mkdir(SAVE_PATH[:-5]) # generating blurry images from test Y_np = [] Blurry_Images = [] for i in tqdm(range(len(X_Orig)), ascii=True, desc ='Gen-Test-Blurry'): x_np = X_Orig[i] w_np = W[i] y_np, y_f = GenerateBlurry(x_np, w_np, noise_std = NOISE_STD ) Y_np.append(y_np) for _ in range(RANDOM_RESTARTS): Blurry_Images.append(y_f) Y_np = np.array(Y_np) Blurry_Images = np.array(Blurry_Images) """ # alternating gradient descent for test images using original weights image_gradients, blur_gradients, get_loss = Generate_Gradient_Functions(rr = Blurry_Images.shape[0], reg = REGULARIZORS, image_range = IMAGE_RANGE, decoder = svhn_decoder, blur_decoder = blur_decoder, image_res = IMAGE_RES, blur_res = BLUR_RES, channels = CHANNELS) m_hat, h_hat, Loss = Optimize_Parallel(blurry_fourier = Blurry_Images, stepsize=step_size,steps = STEPS, image_grad = image_gradients , blur_grad = blur_gradients, getloss = get_loss, latent_image_dim = svhn_latent_dim , latent_blur_dim = blur_latent_dim) X_hat_algo1 = [] W_hat_algo1 = [] for i in range(len(X_Orig)): m_hat_i = m_hat[i*RANDOM_RESTARTS:(i+1)*RANDOM_RESTARTS] h_hat_i = h_hat[i*RANDOM_RESTARTS:(i+1)*RANDOM_RESTARTS] Loss_i = Loss[i*RANDOM_RESTARTS:(i+1)*RANDOM_RESTARTS] x_hat_test, w_hat_test, loss_last_iter_test = Get_Min_Loss(Loss_i, m_hat_i, h_hat_i, decoder = svhn_decoder, blur_decoder = blur_decoder, latent_image_dim = svhn_latent_dim, latent_blur_dim = blur_latent_dim, print_grad=False) X_hat_algo1.append(x_hat_test) W_hat_algo1.append(w_hat_test) X_hat_algo1 = np.array(X_hat_algo1) W_hat_algo1 = np.array(W_hat_algo1) # alternating gradient descent for test images using new weights image_gradients, blur_gradients, get_loss = Generate_Gradient_Functions(rr = Blurry_Images.shape[0], reg = REGULARIZORS, image_range = IMAGE_RANGE, decoder = svhn_decoder_new, blur_decoder = blur_decoder, image_res = IMAGE_RES, blur_res = BLUR_RES, channels = CHANNELS) m_hat, h_hat, Loss = Optimize_Parallel(blurry_fourier = Blurry_Images, stepsize=step_size,steps = STEPS, image_grad = image_gradients , blur_grad = blur_gradients, getloss = get_loss, latent_image_dim = svhn_latent_dim , latent_blur_dim = blur_latent_dim) X_hat_new = [] W_hat_new = [] for i in range(len(X_Orig)): m_hat_i = m_hat[i*RANDOM_RESTARTS:(i+1)*RANDOM_RESTARTS] h_hat_i = h_hat[i*RANDOM_RESTARTS:(i+1)*RANDOM_RESTARTS] Loss_i = Loss[i*RANDOM_RESTARTS:(i+1)*RANDOM_RESTARTS] x_hat_test, w_hat_test, loss_last_iter_test = Get_Min_Loss(Loss_i, m_hat_i, h_hat_i, decoder = svhn_decoder_new, blur_decoder = blur_decoder, latent_image_dim = svhn_latent_dim, latent_blur_dim = blur_latent_dim, print_grad=False) X_hat_new.append(x_hat_test) W_hat_new.append(w_hat_test) X_hat_new = np.array(X_hat_new) W_hat_new = np.array(W_hat_new) # saving results Max = 10**len(str(len(X_Orig)-1)) for i in range(len(X_Orig)): Save_Results(path = SAVE_PATH + str(i+Max)[1:], x_np = X_Orig[i], w_np = W[i], y_np = Y_np[i], y_np_range = None , x_hat_test = X_hat_algo1[i], w_hat_test = W_hat_algo1[i], x_range = None, x_hat_range = X_hat_new[i], w_hat_range = W_hat_new[i], clip=True) # Running Algo 2 REGULARIZORS = [1.0, 0.5, 100.0, 0.001] # loading svhn generator SVHNGen = SVHNGenerator() SVHNGen.GenerateModel() SVHNGen.LoadWeights() svhn_vae, svhn_encoder, svhn_decoder = SVHNGen.GetModels() svhn_decoder.trainable = False svhn_latent_dim = SVHNGen.latent_dim # loading new generator model SVHNGen_new = SVHN_New() SVHNGen_new.GenerateModel() SVHNGen_new.LoadWeights() svhn_vae_new, svhn_encoder_new, svhn_decoder_new = SVHNGen_new.GetModels() svhn_decoder_new.trainable = False svhn_latent_dim_new = SVHNGen_new.latent_dim # solving deconvolution using Algorithm 2 rr = np.shape(Blurry_Images)[0] zi_tf = tf.Variable(tf.random_normal(shape=([rr, svhn_latent_dim])), dtype = 'float32') zk_tf = tf.Variable(tf.random_normal(shape=([rr, blur_latent_dim])), dtype = 'float32') x_tf = tf.Variable(tf.random_normal(mean = 0.5, stddev = 0.01,shape=([rr, IMAGE_RES,IMAGE_RES,CHANNELS]))) x_G = svhn_decoder(zi_tf) x_G = tf.reshape(x_G, shape=(rr,IMAGE_RES,IMAGE_RES,CHANNELS)) x_G = (x_G + 1)/2 y_fourier = tf.placeholder(shape=(rr, IMAGE_RES,IMAGE_RES,CHANNELS), dtype='complex64') blur = blur_decoder(zk_tf) blur = tf.reshape(blur, shape=(rr,BLUR_RES,BLUR_RES)) padding = np.int((IMAGE_RES -BLUR_RES)/2) blur = tf.pad(blur, [[0,0], [padding,padding],[padding,padding]], 'CONSTANT') blur_fourier = tf.fft2d( tf.cast(blur, dtype = 'complex64')) # splitting tensors into 3 channels y_fourier0 = y_fourier[:,:,:,0]; x_0 = x_tf[:,:,:,0]; x_G0 = x_G[:,:,:,0] y_fourier1 = y_fourier[:,:,:,1]; x_1 = x_tf[:,:,:,1]; x_G1 = x_G[:,:,:,1] y_fourier2 = y_fourier[:,:,:,2]; x_2 = x_tf[:,:,:,2]; x_G2 = x_G[:,:,:,2] # 1st Channel Loss x_0_fourier = tf.fft2d( tf.cast( x_0, dtype='complex64')) loss_x0 = tf.reduce_mean( tf.square( tf.abs(y_fourier0 - x_0_fourier*blur_fourier) ), axis=[1,2]) x_Gi0_fourier = tf.fft2d( tf.cast( x_G0, dtype='complex64')) loss_xG0 = tf.reduce_mean( tf.square( tf.abs(y_fourier0 - x_Gi0_fourier*blur_fourier) ), axis=[1,2]) # 2nd Channel Loss x_1_fourier = tf.fft2d( tf.cast( x_1, dtype='complex64')) loss_x1 = tf.reduce_mean( tf.square( tf.abs(y_fourier1 - x_1_fourier*blur_fourier) ), axis=[1,2]) x_Gi1_fourier = tf.fft2d( tf.cast( x_G1, dtype='complex64')) loss_xG1 = tf.reduce_mean( tf.square( tf.abs(y_fourier1 - x_Gi1_fourier*blur_fourier) ), axis=[1,2]) # 3rd Channel Loss x_2_fourier = tf.fft2d( tf.cast( x_2, dtype='complex64')) loss_x2 = tf.reduce_mean( tf.square( tf.abs(y_fourier2 - x_2_fourier*blur_fourier) ), axis=[1,2]) x_Gi2_fourier = tf.fft2d( tf.cast( x_G2, dtype='complex64')) loss_xG2 = tf.reduce_mean( tf.square( tf.abs(y_fourier2 - x_Gi2_fourier*blur_fourier) ), axis=[1,2]) Loss_xG_tf = tf.constant(REGULARIZORS[0])*(loss_xG0 + loss_xG1 + loss_xG2) Loss_x_tf = tf.constant(REGULARIZORS[1])*(loss_x0 + loss_x1 + loss_x2) x_minus_xG_tf = tf.constant(REGULARIZORS[2])*tf.reduce_mean( tf.square( tf.abs(x_tf - x_G)), axis=[1,2,3]) LossTV_tf = tf.constant(REGULARIZORS[3])*tf.image.total_variation(x_tf) TotalLoss_tf = Loss_xG_tf + Loss_x_tf + x_minus_xG_tf + LossTV_tf opt = optimizer.minimize(TotalLoss_tf, var_list = [zi_tf, zk_tf, x_tf]) sess = K.get_session() sess.run(tf.variables_initializer([zi_tf, zk_tf, x_tf])) Losses = [] # running optimizer steps for i in tqdm(range(STEPS), ascii=True, desc = 'Solving Deconv.'): losses = sess.run([opt, TotalLoss_tf, Loss_xG_tf, Loss_x_tf, x_minus_xG_tf], feed_dict = {y_fourier: Blurry_Images}) Losses.append([loss for loss in losses[1:] ]) Losses = np.array(Losses) zi_hat, zk_hat, x_hat = sess.run([zi_tf, zk_tf, x_tf]) tmp = [] for i in range(4): tmp.append( [loss[i] for loss in Losses]) Losses = tmp TotalLoss, Loss_xG, Loss_x, x_minus_xG = Losses # extracting best images from random restarts with minimum residual error X_Hat = [] XG_Hat = [] W_Hat = [] for i in range(len(X_Orig)): x_i = X_Orig[i] zi_hat_i = zi_hat[i*RANDOM_RESTARTS:(i+1)*RANDOM_RESTARTS] zk_hat_i = zk_hat[i*RANDOM_RESTARTS:(i+1)*RANDOM_RESTARTS] x_hat_i = x_hat[i*RANDOM_RESTARTS:(i+1)*RANDOM_RESTARTS] w_hat_i = blur_decoder.predict(zk_hat_i)[:,:,:,0] x_hat_i = np.clip(x_hat_i, 0, 1) loss_i = [ComputeResidual(Y_np[i], x, w) for x,w in zip(x_hat_i,w_hat_i)] min_loss_loc = np.argmin(loss_i) zi_hat_recov = zi_hat_i[min_loss_loc].reshape([1,svhn_latent_dim]) zk_hat_recov = zk_hat_i[min_loss_loc].reshape([1,blur_latent_dim]) x_hat_recov = x_hat_i[min_loss_loc] w_hat = blur_decoder.predict(zk_hat_recov).reshape(BLUR_RES,BLUR_RES) xg_hat = svhn_decoder.predict(zi_hat_recov).reshape(IMAGE_RES,IMAGE_RES,CHANNELS) X_Hat.append(x_hat_recov); W_Hat.append(w_hat); XG_Hat.append(xg_hat) X_Hat = np.array(X_Hat) W_Hat = np.array(W_Hat) XG_Hat = np.array(XG_Hat) # normalizing images X_Hat = np.clip(X_Hat, 0,1) XG_Hat = (XG_Hat + 1)/2 # saving results Max = 10**len(str(len(X_Orig)-1)) for i in range(len(X_Orig)): Save_Results_Algo_2(path = SAVE_PATH + str(i+Max)[1:], x_np = None, w_np = None, y_np = None, y_np_range = None , x_hat_test = XG_Hat[i], w_hat_test = W_Hat[i], x_range = None, x_hat_range = None, w_hat_range = None, clip=True) # solving deconvolution using Algorithm 2 rr = np.shape(Blurry_Images)[0] zi_tf = tf.Variable(tf.random_normal(shape=([rr, svhn_latent_dim_new])), dtype = 'float32') zk_tf = tf.Variable(tf.random_normal(shape=([rr, blur_latent_dim])), dtype = 'float32') x_tf = tf.Variable(tf.random_normal(mean = 0.5, stddev = 0.01,shape=([rr, IMAGE_RES,IMAGE_RES,CHANNELS]))) x_G = svhn_decoder_new(zi_tf) x_G = tf.reshape(x_G, shape=(rr,IMAGE_RES,IMAGE_RES,CHANNELS)) x_G = (x_G + 1)/2 y_fourier = tf.placeholder(shape=(rr, IMAGE_RES,IMAGE_RES,CHANNELS), dtype='complex64') blur = blur_decoder(zk_tf) blur = tf.reshape(blur, shape=(rr,BLUR_RES,BLUR_RES)) padding = np.int((IMAGE_RES -BLUR_RES)/2) blur = tf.pad(blur, [[0,0], [padding,padding],[padding,padding]], 'CONSTANT') blur_fourier = tf.fft2d( tf.cast(blur, dtype = 'complex64')) # splitting tensors into 3 channels y_fourier0 = y_fourier[:,:,:,0]; x_0 = x_tf[:,:,:,0]; x_G0 = x_G[:,:,:,0] y_fourier1 = y_fourier[:,:,:,1]; x_1 = x_tf[:,:,:,1]; x_G1 = x_G[:,:,:,1] y_fourier2 = y_fourier[:,:,:,2]; x_2 = x_tf[:,:,:,2]; x_G2 = x_G[:,:,:,2] # 1st Channel Loss x_0_fourier = tf.fft2d( tf.cast( x_0, dtype='complex64')) loss_x0 = tf.reduce_mean( tf.square( tf.abs(y_fourier0 - x_0_fourier*blur_fourier) ), axis=[1,2]) x_Gi0_fourier = tf.fft2d( tf.cast( x_G0, dtype='complex64')) loss_xG0 = tf.reduce_mean( tf.square( tf.abs(y_fourier0 - x_Gi0_fourier*blur_fourier) ), axis=[1,2]) # 2nd Channel Loss x_1_fourier = tf.fft2d( tf.cast( x_1, dtype='complex64')) loss_x1 = tf.reduce_mean( tf.square( tf.abs(y_fourier1 - x_1_fourier*blur_fourier) ), axis=[1,2]) x_Gi1_fourier = tf.fft2d( tf.cast( x_G1, dtype='complex64')) loss_xG1 = tf.reduce_mean( tf.square( tf.abs(y_fourier1 - x_Gi1_fourier*blur_fourier) ), axis=[1,2]) # 3rd Channel Loss x_2_fourier = tf.fft2d( tf.cast( x_2, dtype='complex64')) loss_x2 = tf.reduce_mean( tf.square( tf.abs(y_fourier2 - x_2_fourier*blur_fourier) ), axis=[1,2]) x_Gi2_fourier = tf.fft2d( tf.cast( x_G2, dtype='complex64')) loss_xG2 = tf.reduce_mean( tf.square( tf.abs(y_fourier2 - x_Gi2_fourier*blur_fourier) ), axis=[1,2]) Loss_xG_tf = tf.constant(REGULARIZORS[0])*(loss_xG0 + loss_xG1 + loss_xG2) Loss_x_tf = tf.constant(REGULARIZORS[1])*(loss_x0 + loss_x1 + loss_x2) x_minus_xG_tf = tf.constant(REGULARIZORS[2])*tf.reduce_mean( tf.square( tf.abs(x_tf - x_G)), axis=[1,2,3]) LossTV_tf = tf.constant(REGULARIZORS[3])*tf.image.total_variation(x_tf) TotalLoss_tf = Loss_xG_tf + Loss_x_tf + x_minus_xG_tf + LossTV_tf opt = optimizer.minimize(TotalLoss_tf, var_list = [zi_tf, zk_tf, x_tf]) sess = K.get_session() sess.run(tf.variables_initializer([zi_tf, zk_tf, x_tf])) Losses = [] # running optimizer steps for i in tqdm(range(STEPS), ascii=True, desc = 'Solving Deconv.'): losses = sess.run([opt, TotalLoss_tf, Loss_xG_tf, Loss_x_tf, x_minus_xG_tf], feed_dict = {y_fourier: Blurry_Images}) Losses.append([loss for loss in losses[1:] ]) Losses = np.array(Losses) zi_hat, zk_hat, x_hat = sess.run([zi_tf, zk_tf, x_tf]) tmp = [] for i in range(4): tmp.append( [loss[i] for loss in Losses]) Losses = tmp TotalLoss, Loss_xG, Loss_x, x_minus_xG = Losses # extracting best images from random restarts with minimum residual error X_Hat = [] XG_Hat = [] W_Hat = [] for i in range(len(X_Orig)): x_i = X_Orig[i] zi_hat_i = zi_hat[i*RANDOM_RESTARTS:(i+1)*RANDOM_RESTARTS] zk_hat_i = zk_hat[i*RANDOM_RESTARTS:(i+1)*RANDOM_RESTARTS] x_hat_i = x_hat[i*RANDOM_RESTARTS:(i+1)*RANDOM_RESTARTS] w_hat_i = blur_decoder.predict(zk_hat_i)[:,:,:,0] x_hat_i = np.clip(x_hat_i, 0, 1) loss_i = [ComputeResidual(Y_np[i], x, w) for x,w in zip(x_hat_i,w_hat_i)] min_loss_loc = np.argmin(loss_i) zi_hat_recov = zi_hat_i[min_loss_loc].reshape([1,svhn_latent_dim_new]) zk_hat_recov = zk_hat_i[min_loss_loc].reshape([1,blur_latent_dim]) x_hat_recov = x_hat_i[min_loss_loc] w_hat = blur_decoder.predict(zk_hat_recov).reshape(BLUR_RES,BLUR_RES) xg_hat = svhn_decoder_new.predict(zi_hat_recov).reshape(IMAGE_RES,IMAGE_RES,CHANNELS) X_Hat.append(x_hat_recov); W_Hat.append(w_hat); XG_Hat.append(xg_hat) X_Hat = np.array(X_Hat) W_Hat = np.array(W_Hat) XG_Hat = np.array(XG_Hat) # normalizing images X_Hat = np.clip(X_Hat, 0,1) XG_Hat = (XG_Hat + 1)/2 # saving results Max = 10**len(str(len(X_Orig)-1)) for i in range(len(X_Orig)): Save_Results_Algo_2(path = SAVE_PATH + str(i+Max)[1:], x_np = None, w_np = None, y_np = None, y_np_range = None , x_hat_test = None, w_hat_test = None, x_range = None, x_hat_range = XG_Hat[i], w_hat_range = W_Hat[i], clip=True) """ # Running GAN # Algo 1 # constants REGULARIZORS = [0.01 , 0.01] RANDOM_RESTARTS = 10 NOISE_STD = 0.01 STEPS = 10000 IMAGE_RANGE = [-1,1] # loading svhn generator SVHNGen = SVHNganGenerator() SVHNGen.GenerateModel() SVHNGen.LoadWeights() SVHNGAN = SVHNGen.GetModels() svhn_latent_dim = SVHNGen.latent_dim # alternating gradient descent for test images image_gradients, blur_gradients, get_loss = Generate_Gradient_Functions(rr = Blurry_Images.shape[0], reg = REGULARIZORS, image_range = IMAGE_RANGE, decoder = SVHNGAN , blur_decoder = blur_decoder, image_res = IMAGE_RES, blur_res = BLUR_RES, channels = CHANNELS) m_hat, h_hat, Loss = Optimize_Parallel(blurry_fourier = Blurry_Images, stepsize=step_size,steps = STEPS, image_grad = image_gradients , blur_grad = blur_gradients, getloss = get_loss, latent_image_dim = svhn_latent_dim , latent_blur_dim = blur_latent_dim) X_hat_test = [] W_hat_test = [] for i in range(len(X_Orig)): m_hat_i = m_hat[i*RANDOM_RESTARTS:(i+1)*RANDOM_RESTARTS] h_hat_i = h_hat[i*RANDOM_RESTARTS:(i+1)*RANDOM_RESTARTS] Loss_i = Loss[i*RANDOM_RESTARTS:(i+1)*RANDOM_RESTARTS] x_hat_test, w_hat_test, loss_last_iter_test = Get_Min_Loss(Loss_i, m_hat_i, h_hat_i, decoder = SVHNGAN , blur_decoder = blur_decoder, latent_image_dim = svhn_latent_dim, latent_blur_dim = blur_latent_dim, print_grad=False) X_hat_test.append(x_hat_test) W_hat_test.append(w_hat_test) X_hat_test = np.array(X_hat_test) W_hat_test = np.array(W_hat_test) X_hat_test = (X_hat_test + 1)/2 Max = 10**len(str(len(X_Orig)-1)) # saving results for i in range(len(X_Orig)): Save_Results_Algo_3(path = SAVE_PATH + str(i+Max)[1:], x_np = None, w_np = None, y_np = None, y_np_range = None, x_hat_test = X_hat_test[i], w_hat_test = W_hat_test[i], x_range = None, x_hat_range = None, w_hat_range = None, clip=True) #Algo 2 # algorithm constants REGULARIZORS = [1.0, 0.5, 100.0, 0.001] LEARNING_RATE = 0.005 RANDOM_RESTARTS = 10 NOISE_STD = 0.01 STEPS = 10000 IMAGE_RANGE = [-1,1] optimizer = tf.train.AdamOptimizer(learning_rate=LEARNING_RATE) # loading svhn generator SVHNGen = SVHNganGenerator() SVHNGen.GenerateModel() SVHNGen.LoadWeights() SVHNGAN = SVHNGen.GetModels() SVHNGAN.trainable = False svhn_latent_dim = SVHNGen.latent_dim # solving deconvolution using Algorithm 2 rr = Blurry_Images.shape[0] zi_tf = tf.Variable(tf.random_normal(shape=([rr, svhn_latent_dim])), dtype = 'float32') zk_tf = tf.Variable(tf.random_normal(shape=([rr, blur_latent_dim])), dtype = 'float32') x_tf = tf.Variable(tf.random_normal(mean = 0.5, stddev = 0.01,shape=([rr, IMAGE_RES,IMAGE_RES,CHANNELS]))) x_G = SVHNGAN(zi_tf) x_G = tf.reshape(x_G, shape=(rr,IMAGE_RES,IMAGE_RES,CHANNELS)) x_G = (x_G + 1)/2 y_fourier = tf.placeholder(shape=(rr, IMAGE_RES,IMAGE_RES,CHANNELS), dtype='complex64') blur = blur_decoder(zk_tf) blur = tf.reshape(blur, shape=(rr,BLUR_RES,BLUR_RES)) padding = np.int((IMAGE_RES -BLUR_RES)/2) blur = tf.pad(blur, [[0,0], [padding,padding],[padding,padding]], 'CONSTANT') blur_fourier = tf.fft2d( tf.cast(blur, dtype = 'complex64')) # splitting tensors into 3 channels y_fourier0 = y_fourier[:,:,:,0]; x_0 = x_tf[:,:,:,0]; x_G0 = x_G[:,:,:,0] y_fourier1 = y_fourier[:,:,:,1]; x_1 = x_tf[:,:,:,1]; x_G1 = x_G[:,:,:,1] y_fourier2 = y_fourier[:,:,:,2]; x_2 = x_tf[:,:,:,2]; x_G2 = x_G[:,:,:,2] # 1st Channel Loss x_0_fourier = tf.fft2d( tf.cast( x_0, dtype='complex64')) loss_x0 = tf.reduce_mean( tf.square( tf.abs(y_fourier0 - x_0_fourier*blur_fourier) ), axis=[1,2]) x_Gi0_fourier = tf.fft2d( tf.cast( x_G0, dtype='complex64')) loss_xG0 = tf.reduce_mean( tf.square( tf.abs(y_fourier0 - x_Gi0_fourier*blur_fourier) ), axis=[1,2]) # 2nd Channel Loss x_1_fourier = tf.fft2d( tf.cast( x_1, dtype='complex64')) loss_x1 = tf.reduce_mean( tf.square( tf.abs(y_fourier1 - x_1_fourier*blur_fourier) ), axis=[1,2]) x_Gi1_fourier = tf.fft2d( tf.cast( x_G1, dtype='complex64')) loss_xG1 = tf.reduce_mean( tf.square( tf.abs(y_fourier1 - x_Gi1_fourier*blur_fourier) ), axis=[1,2]) # 3rd Channel Loss x_2_fourier = tf.fft2d( tf.cast( x_2, dtype='complex64')) loss_x2 = tf.reduce_mean( tf.square( tf.abs(y_fourier2 - x_2_fourier*blur_fourier) ), axis=[1,2]) x_Gi2_fourier = tf.fft2d( tf.cast( x_G2, dtype='complex64')) loss_xG2 = tf.reduce_mean( tf.square( tf.abs(y_fourier2 - x_Gi2_fourier*blur_fourier) ), axis=[1,2]) Loss_xG_tf = tf.constant(REGULARIZORS[0])*(loss_xG0 + loss_xG1 + loss_xG2) Loss_x_tf = tf.constant(REGULARIZORS[1])*(loss_x0 + loss_x1 + loss_x2) x_minus_xG_tf = tf.constant(REGULARIZORS[2])*tf.reduce_mean( tf.square( tf.abs(x_tf - x_G)), axis=[1,2,3]) LossTV_tf = tf.constant(REGULARIZORS[3])*tf.image.total_variation(x_tf) TotalLoss_tf = Loss_xG_tf + Loss_x_tf + x_minus_xG_tf + LossTV_tf opt = optimizer.minimize(TotalLoss_tf, var_list = [zi_tf, zk_tf, x_tf]) sess = K.get_session() sess.run(tf.variables_initializer([zi_tf, zk_tf, x_tf])) Losses = [] # running optimizer steps for i in tqdm(range(STEPS), ascii=True, desc = 'Solving Deconv.'): losses = sess.run([opt, TotalLoss_tf, Loss_xG_tf, Loss_x_tf, x_minus_xG_tf], feed_dict = {y_fourier: Blurry_Images}) Losses.append([loss for loss in losses[1:] ]) Losses = np.array(Losses) zi_hat, zk_hat, x_hat = sess.run([zi_tf, zk_tf, x_tf]) tmp = [] for i in range(4): tmp.append( [loss[i] for loss in Losses]) Losses = tmp TotalLoss, Loss_xG, Loss_x, x_minus_xG = Losses # extracting best images from random restarts with minimum residual error X_Hat = [] XG_Hat = [] W_Hat = [] for i in range(len(X_Orig)): x_i = X_Orig[i] zi_hat_i = zi_hat[i*RANDOM_RESTARTS:(i+1)*RANDOM_RESTARTS] zk_hat_i = zk_hat[i*RANDOM_RESTARTS:(i+1)*RANDOM_RESTARTS] x_hat_i = x_hat[i*RANDOM_RESTARTS:(i+1)*RANDOM_RESTARTS] w_hat_i = blur_decoder.predict(zk_hat_i)[:,:,:,0] x_hat_i = np.clip(x_hat_i, 0, 1) loss_i = [ComputeResidual(Y_np[i], x, w) for x,w in zip(x_hat_i,w_hat_i)] min_loss_loc = np.argmin(loss_i) zi_hat_recov = zi_hat_i[min_loss_loc].reshape([1,svhn_latent_dim]) zk_hat_recov = zk_hat_i[min_loss_loc].reshape([1,blur_latent_dim]) x_hat_recov = x_hat_i[min_loss_loc] w_hat = blur_decoder.predict(zk_hat_recov).reshape(BLUR_RES,BLUR_RES) xg_hat = SVHNGAN.predict(zi_hat_recov).reshape(IMAGE_RES,IMAGE_RES,CHANNELS) X_Hat.append(x_hat_recov); W_Hat.append(w_hat); XG_Hat.append(xg_hat) X_Hat =
np.array(X_Hat)
numpy.array
''' @author: genis ''' import numpy as np from scipy.optimize import curve_fit import numpy as np import math from sklearn.metrics import roc_curve, auc from sklearn.linear_model import LogisticRegression from scipy.special import erf import statsmodels.api as sm from scipy.stats import norm def kernel_logistic(stim, d,Nboot=500): ''' Computes the PK and the standard error as a LogisticRegression. Standard error is computed using bootstrap. inputs: stim: 2D-Array of stimulus d: 1-D array of decisions (-1,1) outputs: pk and pk std error ''' Nstim,Nframes=np.shape(stim) pk_boots=np.zeros((Nboot,len(stim[1]))) INDEXS=np.random.randint(0,Nstim,(Nboot,Nstim)) C=10000 clf_l2_LR = LogisticRegression(C=C, penalty='l2') if not all(np.unique(d)==[0,1]): d=(d+1)/2 for i,index in enumerate(INDEXS): clf_l2_LR.fit(stim[index],d[index]) pk_boots[i]=clf_l2_LR.coef_[0] pk=np.mean(pk_boots,axis=0) pk_std=np.std(pk_boots,axis=0) return pk, pk_std def kernel_error(stim,d,error=False,Nboot=500): ''' Computes the kernel and the standard error with bootstrap. inputs: stim: 2D-Array of stimulus d: 1-D array of decisions (-1,1) outputs: kernel: Dictionary with kernel and error_kernel ''' Nframe=len(stim[0]) Nstim=len(stim) kernel={'kernel':np.zeros(Nframe),'error':np.zeros(Nframe)} if not error: aux_kernel=np.zeros(Nframe) for iframe in range(Nframe): fpr,tpr,_=roc_curve(d,stim[:,iframe]) aux_kernel[iframe] = auc(fpr, tpr) kernel['kernel']=aux_kernel return kernel else: aux_kernel=np.zeros((Nframe,Nboot)) indexs=np.random.randint(0,Nstim,(Nboot,Nstim)) for iboot in range(Nboot): if iboot%100==0: print(iboot) for iframe in range(Nframe): fpr,tpr,_=roc_curve(d[indexs[iboot]],stim[indexs[iboot],iframe]) aux_kernel[iframe][iboot] = auc(fpr, tpr) for iframe in range(Nframe): kernel['kernel'][iframe]=np.mean(aux_kernel[iframe]) kernel['error'][iframe]=np.std(aux_kernel[iframe]) return kernel def kernel_shuffle(stim,d,Nboot=500): ''' Computes the kernel and the standard error for shuffle choices. inputs: stim: 2D-Array of stimulus d: 1-D array of decisions (-1,1) outputs: kernel: Dictionary with kernel and error_kernel ''' Nframe=len(stim[0]) Nstim=len(stim) kernel={'kernel':np.zeros(Nframe),'error':np.zeros(Nframe)} aux_kernel=np.zeros((Nframe,Nboot)) for ishuffle in range(Nboot): np.random.shuffle(d) if ishuffle%100==0: print(ishuffle) for iframe in range(Nframe): fpr,tpr,_=roc_curve(d,stim[:,iframe]) aux_kernel[iframe][ishuffle] = auc(fpr, tpr) for iframe in range(Nframe): kernel['kernel'][iframe]=np.mean(aux_kernel[iframe]) kernel['error'][iframe]=np.std(aux_kernel[iframe]) return kernel def PK_slope(kernel): ''' Compute the slope of the PK: PRR=integral( kernel*f(t)) with f(t)=1-a*t with a such as f(T)=-1 T stimulus duration positive recency zero flat negative primacy ''' aux=np.linspace(1,-1,len(kernel)) kernel=kernel-0.5 aux_kernel=(kernel)/(np.sum(kernel)) return -np.sum(aux_kernel*aux) def total_area_kernel_PInormalize(kernel): ''' Compute the PK area normalized by the area of a PI ''' nframes=len(kernel) area_pi=nframes*( 0.5+2/np.pi*np.arctan(1/np.sqrt(2*nframes-1)) ) -0.5*nframes return np.sum(kernel-0.5)/area_pi def mean_and_error(vector,z=1.0): ''' mean and error according to binomial distribution ''' m=np.mean(vector) return m,z*np.sqrt(m*(1-m)/len(vector)) def make_control_stim(mu,sigma,T,N): ''' it returns stimulus with exact mean mu and std sigma ''' if N==1: stim=np.random.randn(T) m=np.mean(stim) s=np.std(stim) stim=mu+((stim-m)/s)*sigma else: stim=np.random.randn(N,T) for itrial in range(N): m=np.mean(stim[itrial]) s=np.std(stim[itrial]) stim[itrial]=mu+((stim[itrial]-m)/s)*sigma return stim def running_average(x,window,mode='valid'): y=np.convolve(x, np.ones((window))/window, mode=mode) return y def spatial_kernel_sklearn(stim,d,bins=None,Nbins=21,Nboot=500): """ Compute the spatial kernel. stim is a 2d array with the stimulus d is a 1d array with the choices associated with stimulus. """ if not all(np.unique(d)==[0,1]): d=(d+1)/2 if bins is None: max_stim=np.max(stim) min_stim=np.min(stim) b=np.max([max_stim,abs(min_stim)]) bins=np.linspace(-b,b,Nbins+1) print("bins:",bins) bin_center=[ (bins[i]+bins[i+1])/2.0 for i in range(len(bins)-1) ] kernel_boots=np.zeros((Nboot,len(bin_center))) C=10000 clf_l2_LR = LogisticRegression(C=C, penalty='l2') spatial_stim=np.array([np.histogram(stim[i],bins)[0] for i in range(len(stim))]) #print "hola" for iN in range(Nboot): index_stims=np.random.randint(0,len(stim),Nboot) clf_l2_LR.fit(spatial_stim[index_stims], d[index_stims]) kernel_boots[iN]=clf_l2_LR.coef_[0] kernel_mean=np.zeros(len(kernel_boots[0])) kernel_err=np.zeros(len(kernel_boots[0])) for it in range(len(kernel_mean)): kernel_mean[it]=np.mean(kernel_boots[:,it]) kernel_err[it]=np.std(kernel_boots[:,it]) return kernel_mean,kernel_err,bin_center,bins def basic_psychophysics_results(data): Nframes=len(data['stim'][0]) stim=np.array(data['stim']) stim=np.reshape(stim,(len(stim),len(stim[0]))) stim_sign=np.array(data['stim_sign']) isigmas=np.array(data['i_sigmas']) choice=np.array(data['choice']) #correct=np.array(data['correct']) correct=( (stim_sign*choice)+1)/2 mu=np.array(data['mu']) isigmas_values=np.unique(isigmas) Nsigmas=len(isigmas_values) kernel=np.zeros((Nsigmas,Nframes)) kernel_err=np.zeros((Nsigmas,Nframes)) kernel_shuff=np.zeros((Nsigmas,Nframes)) kernel_shuff_err=np.zeros((Nsigmas,Nframes)) Performance=np.zeros(Nsigmas) Performance_err=np.zeros(Nsigmas) PerformanceR=np.zeros(Nsigmas) PerformanceR_err=np.zeros(Nsigmas) PerformanceL=np.zeros(Nsigmas) PerformanceL_err=np.zeros(Nsigmas) PR=np.zeros(Nsigmas) PR_err=np.zeros(Nsigmas) PR_trial=np.zeros(Nsigmas) Nbins_spatial_kernel=11 spatial_kernel=np.zeros((Nsigmas,Nbins_spatial_kernel)) spatial_kernel_std=np.zeros((Nsigmas,Nbins_spatial_kernel)) bin_centers=np.zeros((Nsigmas,Nbins_spatial_kernel)) spatial_kernel_all,spatial_kernel_all_std,bin_centers_all,_=spatial_kernel_sklearn(stim,choice,Nbins=Nbins_spatial_kernel) # Nbins_spatial_kernel2=22 # percentile=np.linspace(0,100,Nbins_spatial_kernel) # bins=np.percentile(np.hstack(stim),percentile) # spatial_kernel_all2,spatial_kernel_all_std2,bin_centers_all2,_=spatial_kernel_sklearn(stim,choice,bins=bins,Nbins=Nbins_spatial_kernel2) PR_total,PR_total_err=mean_and_error( (choice+1)/2 ) PR_trial_total,PR_trial_total_err=mean_and_error( (stim_sign+1)/2 ) Nboot=1000 for isigma in isigmas_values: print("isigma: ",isigma) indices=np.where(isigmas==isigma)[0] Performance[isigma],Performance_err[isigma]=mean_and_error(correct[indices]) indicesL=np.where( (isigmas==isigma) & (stim_sign==-1) )[0] indicesR=np.where( (isigmas==isigma) & (stim_sign==1) )[0] PerformanceR[isigma],PerformanceR_err[isigma]=mean_and_error(correct[indicesR]) PerformanceL[isigma],PerformanceL_err[isigma]=mean_and_error(correct[indicesL]) d=choice[indices]*stim_sign[indices] PR[isigma],PR_err[isigma]=mean_and_error( (choice[indices]+1)/2 ) PR_trial[isigma]=np.mean( (stim_sign[indices]+1)/2 ) stim_sigma=np.array([stim[i]*stim_sign[i]-mu[i] for i in indices ]) ### correct vs incorrecte d=choice[indices]*stim_sign[indices] ### correct vs incorrecte aux_k=kernel_error(stim_sigma,d,error=True,Nboot=Nboot) kernel[isigma]=aux_k['kernel'] kernel_err[isigma]=aux_k['error'] aux_k=kernel_shuffle(stim_sigma,d,Nboot=Nboot) kernel_shuff[isigma]=aux_k['kernel'] kernel_shuff_err[isigma]=aux_k['error'] results={ "kernel": kernel,"kernel_std": kernel_err,"kernel_shuffle": kernel_shuff, "kernel_shuffle_std": kernel_shuff_err,"Performance":Performance,"Performance_std":Performance_err, "PerformanceR":PerformanceR,"PerformanceR_std":PerformanceR_err,"PerformanceL":PerformanceL, "PerformanceL_std":PerformanceL_err,"PR":PR,"PR_std":PR_err,"PR_trial":PR_trial, "spatial_kernel_all":spatial_kernel_all,"spatial_kernel_all_std":spatial_kernel_all_std, "bin_centers_all":bin_centers_all} return results def AIC(LL,Nparam,model_base): Delta_AIC={} for model in LL.keys(): Delta_AIC[model]=[] for isubject in range(len(LL[model_base])): AIC_model=2*Nparam[model]+2*LL[model][isubject] AIC_model_base=2*Nparam[model_base]+2*LL[model_base][isubject] Delta_AIC[model].append(AIC_model-AIC_model_base) return Delta_AIC def best_model(LL,Nparam): keys=list(LL.keys()) Nsubject=len(LL[keys[0]]) AIC=[ {} for isubject in range(Nsubject) ] best_model=[] for isubject in range(Nsubject): for model in LL.keys(): AIC[isubject][model]=2*Nparam[model]+2*LL[model][isubject] best=min(AIC[isubject], key=AIC[isubject].get) best_model.append(best) return best_model,AIC def create_stim(mu,sigmas,N,Nframes): m=mu*np.random.choice([-1,1],N) stim=np.random.randn(N,Nframes) s=int((N/len(sigmas)) )*list(sigmas) s=np.array(s) for i in range(N): stim[i]=m[i]+s[i]*(stim[i] -np.mean(stim[i]) )/np.std(stim[i]) return stim,m,s def basic_psychophysics_results2(data): Nframes=len(data['stim'][0]) stim=np.array(data['stim']) stim=np.reshape(stim,(len(stim),len(stim[0]))) Nstim,Nframes=np.shape(stim) #stim_sign=np.array(data['stim_sign']) i_sigmas=np.array(data['i_sigmas'],dtype=int) choices=np.array(data['choices']) stim_sign=np.array(data["stim_sign"]) correct=np.array( (choices*stim_sign+1)/2) print("correct",correct) ## temporal kernel ## logit,tpk_stderr=temporal_kernel_sm(stim,choices) tpk=logit.params pkslope,pkslope_std_err=temporal_pk_slope(tpk) ## spatial kernel ## logit,spk_stderr,bins_centers=spatial_kernel_sm(stim,choices,Nbins=11) spk=logit.params i_sigmas_values=np.unique(i_sigmas) Nsigmas=len(i_sigmas_values) Performance=np.zeros(Nsigmas) Performance_stderr=np.zeros(Nsigmas) PerformanceR=np.zeros(Nsigmas) PerformanceR_stderr=np.zeros(Nsigmas) PerformanceL=np.zeros(Nsigmas) PerformanceL_stderr=np.zeros(Nsigmas) for isigma in i_sigmas_values: print("isigma: ",isigma) indices=np.where(i_sigmas==isigma)[0] Performance[isigma],Performance_stderr[isigma]=mean_and_error(correct[indices]) indicesL=np.where( (i_sigmas==isigma) & (stim_sign==-1) )[0] indicesR=np.where( (i_sigmas==isigma) & (stim_sign==1) )[0] PerformanceR[isigma],PerformanceR_stderr[isigma]=mean_and_error(correct[indicesR]) PerformanceL[isigma],PerformanceL_stderr[isigma]=mean_and_error(correct[indicesL]) results={ "tpk":tpk,"tpk_stderr":tpk_stderr,"pkslope":pkslope, "pkslope_std_err":pkslope_std_err,"spk":spk, "spk_stderr":spk_stderr,"bins_centers":bins_centers, "Performance":Performance,"Performance_stderr":Performance_stderr, "PerformanceR":PerformanceR,"PerformanceR_stderr":PerformanceR_stderr, "PerformanceL":PerformanceL,"PerformanceL_stderr":PerformanceL_stderr } return results def temporal_kernel_sm_correct_vs_error(stim,choices,stim_sign,stim_full=False): #compute mean of each stimulus Nstim,Nframes=np.shape(stim) mu=np.array([np.mean(stim[i]) for i in range(Nstim) ]) mu=np.reshape(mu,(Nstim,1)) #stim_sign=np.sign(mu[:,0]) # if not all(np.unique(choices)==[0,1]): # choices=(choices+1)/2 correct=stim_sign*choices correct=(correct+1)/2 m=np.abs(np.mean(stim[0])) if not stim_full: stim_fluctuation=np.array([ stim_sign[i]*(stim[i]) -m for i in range(Nstim)]) #stim_fit=stim_fluctuation #stim_fit=np.append(stim_fluctuation,np.abs(mu), axis=1) #stim_fit=np.append(stim_fit,np.ones((Nstim,1)), axis=1) #add a bias stim_fit=np.append(stim_fluctuation,m*np.ones((Nstim,1)), axis=1) #add a bias else: stim_fit=sm.add_constant(stim) print(np.shape(stim_fit)) print(np.shape(correct)) log_reg=sm.Logit(correct,stim_fit) fit_res=log_reg.fit() #Laplace approximation for conf_interval hessian=log_reg.hessian(fit_res.params) hinv=np.linalg.inv(hessian) diag=np.diagonal(hinv) stderr=
np.sqrt(-diag)
numpy.sqrt
#!/usr/bin/python3 '''Poisson's finite difference method''' import numpy as np import matplotlib.pyplot as plt import matplotlib as mpl from numba import jit try: from emtoolbox.utils.constants import EPS0 except ImportError: EPS0 = 8.854e-12 @jit() def check_arrays_1d(X, er=None): if er is not None: if er.shape[0] != X.shape[0] - 1: raise Exception('X shape must be one larger than er') @jit() def check_arrays_2d(X, Y, er=None): if X.shape != Y.shape: raise Exception('X and Y shape must identical') if X[1, 0] == X[0, 0] or Y[0, 1] == Y[0, 0]: raise Exception('X and Y must have ij indexing') if abs(abs(X[1, 0] - X[0, 0]) - abs(Y[0, 1] - Y[0, 0])) > 1e-6: raise Exception('X and Y must have the same spacing') if er is not None: if er.shape[0] != X.shape[0] - 1 or er.shape[1] != X.shape[1] - 1: raise Exception('X and Y shape must be one larger than er') @jit() def check_arrays_3d(X, Y, Z): if X[1, 0, 0] == X[0, 0, 0] or Y[0, 1, 0] == Y[0, 0, 0] or Z[0, 0, 1] == Z[0, 0, 0]: raise Exception('X, Y and Z must have ij indexing') # TODO check array spacing @jit(nopython=True) def poisson_1d(X: np.ndarray, /, v_left: float = 0, v_right: float = 0, dielectric: np.ndarray = None, charge: np.ndarray = None, bc: list = None, sor=1.8, conv: float = 1e-5, Nmax: int = 1e5): '''One-dimension Poisson equation with fixed potential boundaries. Normalized charge density ps/eps can be provided via the charge argument Dielectric is an array of relative permittivity, located at half-grid points x0 x1 x2 ... xn e0 e1 ... en-1 [one less point] Boundary condition is to be provided as a (bool array, value array) matching X Where the condition is X[bool] = value''' check_arrays_1d(X, dielectric) if charge is not None: raise Exception('Charge is currently not supported') # TODO enforce array types V = np.zeros_like(X, dtype='float64') V[0] = v_left V[-1] = v_right V[1:-1] = 0.5 * (v_left + v_right) # Initial seed nx = len(X) if bc is None: # Explicit to prompt numba type bc_bool = np.array([False]) bc_val = np.array([0.0]) else: bc_bool, bc_val = bc for i in range(nx): if bc_bool[i]: V[i] = bc_val[i] for n in range(int(Nmax)): Vsum = 0 Verr = 0 for i in range(1, nx-1): V_old = V[i] if not bc or not bc_bool[i]: if dielectric is None: R = 0.5 * (V[i+1] + V[i-1]) - V_old else: er1 = dielectric[i-1] er2 = dielectric[i] R = (er2 * V[i+1] + er1 * V[i-1]) / (er1 + er2) - V_old V[i] = R * sor + V_old Verr += abs(R) Vsum += abs(V[i]) if Vsum > 0 and Verr / Vsum < conv: break print('1D Error', Verr / Vsum, 'after', n+1, 'iterations') return V @jit(nopython=True) def poisson_2d(X: np.ndarray, Y: np.ndarray, /, v_left: float = 0, v_right: float = 0, v_top: float = 0, v_bottom: float = 0, dielectric: np.ndarray = None, charge: np.ndarray = None, bc: list = None, sor=1.8, xsym: bool = False, ysym: bool = False, conv: float = 1e-5, Nmax: int = 1e5): '''Two-dimension Poisson equation with fixed potential boundaries. Normalized charge density ps/eps can be provided via the charge argument''' check_arrays_2d(X, Y, dielectric) if charge is not None: raise Exception('Charge is currently not supported') # TODO enforce array types V = np.zeros_like(X, dtype='float64') V[0, :] = v_left V[-1, :] = v_right V[:, -1] = v_top V[:, 0] = v_bottom V[0, 0] = 0.5 * (v_bottom + v_left) V[-1, 0] = 0.5 * (v_bottom + v_right) V[0, -1] = 0.5 * (v_top + v_left) V[-1, -1] = 0.5 * (v_top + v_right) V[1:-1, 1:-1] = 0.25 * (v_bottom + v_right + v_top + v_left) nx = X.shape[0] ny = X.shape[1] if bc is None: # Explicit to prompt numba type bc_bool = np.array([[False]]) bc_val = np.array([[0.0]]) else: bc_bool, bc_val = bc for j in range(ny): for i in range(nx): if bc_bool[i, j]: V[i, j] = bc_val[i, j] for n in range(int(Nmax)): Vsum = 0 Verr = 0 if xsym: for j in range(1, ny-1): if not bc or not bc_bool[0, j]: V[0, j] = 0.25 * (V[0, j+1] + V[0, j-1] + 2*V[1, j]) if ysym: for i in range(1, nx-1): if not bc or not bc_bool[i, 0]: V[i, 0] = 0.25 * (V[i+1, 0] + V[i-1, 0] + 2*V[i, 1]) for j in range(1, ny-1): for i in range(1, nx-1): V_old = V[i, j] if not bc or not bc_bool[i, j]: if dielectric is None: R = 0.25 * (V[i+1, j] + V[i-1, j] + V[i, j+1] + V[i, j-1]) - V_old else: er_nw = dielectric[i-1, j] er_ne = dielectric[i, j] er_sw = dielectric[i-1, j-1] er_se = dielectric[i, j-1] R = (((er_sw + er_nw) * V[i-1, j] + (er_nw + er_ne) * V[i, j+1] + (er_ne + er_se) * V[i+1, j] + (er_se + er_sw) * V[i, j-1]) / (2 * (er_nw + er_ne + er_sw + er_se))) - V_old V[i, j] = R * sor + V_old Verr += abs(R) Vsum += abs(V[i, j]) if Vsum > 0 and Verr / Vsum < conv: break print('2D Error', Verr / Vsum, 'after', n+1, 'iterations') return V @jit(nopython=True) def poisson_3d(X: np.ndarray, Y: np.ndarray, Z: np.ndarray, /, v_left: float = 0, v_right: float = 0, v_top: float = 0, v_bottom: float = 0, v_front: float = 0, v_back: float = 0, dielectric: np.ndarray = None, charge: np.ndarray = None, bc: list = None, sor: float = 1.8, xsym: bool = False, ysym: bool = False, zsym: bool = False, conv: float = 1e-5, Nmax: int = 1e5): '''Three-dimension Poisson equation with fixed potential boundaries. Normalized charge density ps/eps can be provided via the charge argument''' check_arrays_3d(X, Y, Z) if charge is not None: raise Exception('Charge is currently not supported') # TODO enforce array types V = np.zeros_like(X, dtype='float64') V[0, :, :] = v_back V[-1, :, :] = v_front V[:, 0, :] = v_left V[:, -1, :] = v_right V[:, :, 0] = v_bottom V[:, :, -1] = v_top V[0, 0, 0] = 1/3 * (v_bottom + v_left + v_back) V[-1, 0, 0] = 1/3 * (v_bottom + v_left + v_front) V[0, -1, 0] = 1/3 * (v_bottom + v_right + v_back) V[-1, -1, 0] = 1/3 * (v_bottom + v_right + v_front) V[0, 0, -1] = 1/3 * (v_top + v_left + v_back) V[-1, 0, -1] = 1/3 * (v_top + v_left + v_front) V[0, -1, -1] = 1/3 * (v_top + v_right + v_back) V[-1, -1, -1] = 1/3 * (v_top + v_right + v_front) V[1:-1, 1:-1, 1:-1] = 1/6 * (v_bottom + v_right + v_top + v_left + v_front + v_back) nx = X.shape[0] ny = X.shape[1] nz = X.shape[2] if bc is None: # Explicit to prompt numba type bc_bool = np.array([[[False]]]) bc_val = np.array([[[0.0]]]) else: bc_bool, bc_val = bc for k in range(nz): for j in range(ny): for i in range(nx): if bc_bool[i, j, k]: V[i, j, k] = bc_val[i, j, k] for n in range(int(Nmax)): Vsum = 0 Verr = 0 if xsym: for k in range(1, nz-1): for j in range(1, ny-1): if not bc or not bc_bool[0, j, k]: V[0, j, k] = 1/6 * (V[0, j+1, k] + V[0, j-1, k] + V[0, j, k+1] + V[0, j, k-1] + 2*V[1, j, k]) if ysym: for k in range(1, nz-1): for i in range(1, nx-1): if not bc or not bc_bool[i, 0, k]: V[i, 0, k] = 1/6 * (V[i+1, 0, k] + V[i-1, 0, k] + V[i, 0, k+1] + V[i, 0, k-1] + 2*V[i, 1, k]) if zsym: for j in range(1, ny-1): for i in range(1, nx-1): if not bc or not bc_bool[i, j, 0]: V[i, j, 0] = 1/6 * (V[i+1, j, 0] + V[i-1, j, 0] + V[i, j+1, 0] + V[i, j-1, 0] + 2*V[i, j, 1]) for k in range(1, nz-1): for j in range(1, ny-1): for i in range(1, nx-1): V_old = V[i, j, k] if not bc or not bc_bool[i, j, k]: if dielectric is None: R = (V[i+1, j, k] + V[i-1, j, k] + V[i, j+1, k] + V[i, j-1, k] + V[i, j, k+1] + V[i, j, k-1]) / 6 - V_old else: er_brb = dielectric[i-1, j, k-1] er_frb = dielectric[i, j, k-1] er_blb = dielectric[i-1, j-1, k-1] er_flb = dielectric[i, j-1, k-1] er_brt = dielectric[i-1, j, k] er_frt = dielectric[i, j, k] er_blt = dielectric[i-1, j-1, k] er_flt = dielectric[i, j-1, k] R = (((er_brb + er_blb + er_brt + er_blt) * V[i-1, j, k] + (er_frb + er_flb + er_frt + er_flt) * V[i+1, j, k] + (er_blb + er_flb + er_blt + er_flt) * V[i, j-1, k] + (er_brb + er_frb + er_brt + er_frt) * V[i, j+1, k] + (er_brb + er_frb + er_blb + er_flb) * V[i, j, k-1] + (er_brt + er_frt + er_blt + er_flt) * V[i, j, k+1]) / (3 * (er_brb + er_frb + er_blb + er_flb + er_brt + er_frt + er_blt + er_flt))) - V_old V[i, j, k] = R * sor + V_old Verr += abs(R) Vsum += abs(V[i, j, k]) if Vsum > 0 and Verr / Vsum < conv: break print('3D Error', Verr / Vsum, 'after', n+1, 'iterations') return V def gauss_1d(X: np.ndarray, V: np.ndarray, er: np.ndarray, i: int): '''One-dimensional Gauss' law, returning enclosed charge Evaluated at array index i Note: charge polarity is positive for V increasing with X''' dx = X[i+1] - X[i-1] return EPS0 * (er[i] * V[i+1] - er[i-1] * V[i-1] + (er[i-1] - er[i]) * V[i]) / dx def gauss_2d(X: np.ndarray, Y: np.ndarray, V: np.ndarray, er: np.ndarray, xi1: int, xi2: int, yi1: int, yi2: int): '''Two-dimensional Gauss' law, returning enclosed charge Evaluated along closed rectangle defined by corners: Bottom-left (xi1, yi1) to top-right (xi2, yi2) Setting xi1 to 0 implies x-symmetry, and yi0 implies y-symmetry In this case, the left-edge and the bottom-edge are omitted, respectively, with the result multiplied by 2 or 4, as appropriate Note: charge polarity is positive for V increasing with X or Y''' check_arrays_2d(X, Y) qe = 0 # Top and bottom edges; dV/dy and -dV/dy, 0.5 is due to central-difference if yi1 == 0: h_edges = [(yi2, -0.5)] else: h_edges = zip((yi1, yi2), (0.5, -0.5)) for yi, k in h_edges: for xi in range(xi1, xi2+1): qe += k * (er[xi, yi] * V[xi, yi+1] - er[xi, yi-1] * V[xi, yi-1] + (er[xi, yi-1] - er[xi, yi]) * V[xi, yi]) # Left and right edges; dV/dx and -dV/dx, 0.5 is due to central-difference if xi1 == 0: v_edges = [(xi2, -0.5)] else: v_edges = zip((xi1, xi2), (0.5, -0.5)) for xi, k in v_edges: for yi in range(yi1, yi2+1): qe += k * (er[xi, yi] * V[xi+1, yi] - er[xi-1, yi] * V[xi-1, yi] + (er[xi-1, yi] - er[xi, yi]) * V[xi, yi]) if xi1 == 0: qe = 2 * qe # TODO Do not double count point on x-axis if yi1 == 0: qe = 2 * qe # TODO Do not double count point on y-axis return EPS0 * qe def trough_analytical(X: np.ndarray, Y: np.ndarray, v_left: float = 0, v_right: float = 0, v_top: float = 0, v_bottom: float = 0): a = X.max() - X.min() b = Y.max() - Y.min() V = np.zeros_like(X) for n in range(1, 101, 2): k1 = 4 / (n * np.pi) * np.sin(n * np.pi * Y / b) / np.sinh(n * np.pi * a / b) k2 = 4 / (n * np.pi) * np.sin(n * np.pi * X / a) / np.sinh(n * np.pi * b / a) vx = v_right * np.sinh(n * np.pi * X / b) + v_left * np.sinh(n * np.pi / b * (a - X)) vy = v_top * np.sinh(n * np.pi * Y / a) + v_bottom * np.sinh(n * np.pi / a * (b - Y)) V += k1 * vx + k2 * vy return V def example_poisson_2d(): w = 2.0 h = 1.0 x = np.linspace(0, w, 101) y = np.linspace(0, h, 51) X, Y = np.meshgrid(x, y, indexing='ij') bc = {'v_top': 10, 'v_left': 5} V = poisson_2d(X, Y, **bc) Va = trough_analytical(X, Y, **bc) error = np.abs(Va - V) + 1e-10 fig = plt.figure() fig.suptitle('Poisson Equation 2D') grid_spec = plt.GridSpec(2, 2, hspace=0.4) ax_fdm = fig.add_subplot(grid_spec[0]) ax_ana = fig.add_subplot(grid_spec[1]) ax_err = fig.add_subplot(grid_spec[2]) ax_surf = fig.add_subplot(grid_spec[3], projection='3d') ax_fdm.set_title('Finite Difference') ax_fdm.contour(X, Y, V) ax_ana.set_title('Analytical Solution') ax_ana.contour(X, Y, Va) ax_err.set_title('|Error|') c = ax_err.pcolor(X, Y, error, shading='auto', norm=mpl.colors.LogNorm(vmin=error.min(), vmax=error.max()), cmap='PuBu_r') fig.colorbar(c, ax=ax_err) ax_surf.set_title('Surface') ax_surf.plot_surface(X, Y, V) plt.show() def example_parallel_plates(): w = 4e-3 X, dx = np.linspace(0, w, 101, retstep=True) bc = {'v_left': 0, 'v_right': 200} er1 = 5.0 er2 = 1.0 era = np.where(X[:-1] < 1e-3, er1, er2) erb = np.select([X[:-1] < 2e-3, X[:-1] < 3e-3, X[:-1] < 4e-3], [er2, er1, er2]) V1 = poisson_1d(X, dielectric=era, **bc) V2 = poisson_1d(X, dielectric=erb, **bc) _, ax = plt.subplots() ax.plot(X, V1, label='2-layer') ax.plot(X, V2, label='3-layer') ax.set_ylabel('Potential (V)') ax.set_xlim([0, w]) er_ax = ax.twinx() er_ax.plot(X[:-1] + 0.5 * dx, era, ls=':') er_ax.plot(X[:-1] + 0.5 * dx, erb, ls=':') er_ax.set_ylabel(r'$\epsilon_r$') ax.legend() ax.grid() plt.show() def example_poisson_2d_coax(): ri = 2.0e-3 ro = 4.0e-3 w = 1.1 * ro dx = ri / 40 Va = 10.0 x = np.arange(0, w, dx) y = np.arange(-w, w, dx) X, Y =
np.meshgrid(x, y, indexing='ij')
numpy.meshgrid
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Thu Nov 21 14:08:43 2019 to produce X and y use combine_pos_neg_from_nc_file or prepare_X_y_for_holdout_test @author: ziskin """ from PW_paths import savefig_path from PW_paths import work_yuval from pathlib import Path cwd = Path().cwd() hydro_path = work_yuval / 'hydro' axis_path = work_yuval/'axis' gis_path = work_yuval / 'gis' ims_path = work_yuval / 'IMS_T' hydro_ml_path = hydro_path / 'hydro_ML' gnss_path = work_yuval / 'GNSS_stations' # 'tela': 17135 hydro_pw_dict = {'nizn': 25191, 'klhv': 21105, 'yrcm': 55165, 'ramo': 56140, 'drag': 48125, 'dsea': 48192, 'spir': 56150, 'nrif': 60105, 'elat': 60190 } hydro_st_name_dict = {25191: 'Lavan - new nizana road', 21105: 'Shikma - Tel milcha', 55165: 'Mamsheet', 56140: 'Ramon', 48125: 'Draga', 48192: 'Chiemar - down the cliff', 46150: 'Nekrot - Top', 60105: 'Yaelon - Kibutz Yahel', 60190: 'Solomon - Eilat'} best_hp_models_dict = {'SVC': {'kernel': 'rbf', 'C': 1.0, 'gamma': 0.02, 'coef0': 0.0, 'degree': 1}, 'RF': {'max_depth': 5, 'max_features': 'auto', 'min_samples_leaf': 1, 'min_samples_split': 2, 'n_estimators': 400}, 'MLP': {'alpha': 0.1, 'activation': 'relu', 'hidden_layer_sizes': (10,10,10), 'learning_rate': 'constant', 'solver': 'lbfgs'}} scorer_order = ['precision', 'recall', 'f1', 'accuracy', 'tss', 'hss'] tsafit_dict = {'lat': 30.985556, 'lon': 35.263056, 'alt': -35.75, 'dt_utc': '2018-04-26T10:15:00'} axis_southern_stations = ['Dimo', 'Ohad', 'Ddse', 'Yotv', 'Elat', 'Raha', 'Yaha'] soi_axis_dict = {'yrcm': 'Dimo', 'slom': 'Ohad', 'dsea': 'Ddse', 'nrif': 'Yotv', 'elat': 'Elat', 'klhv': 'Raha', 'spir': 'Yaha'} def plot_mean_abs_shap_values_features(SV, fix_xticklabels=True): import matplotlib.pyplot as plt import numpy as np import seaborn as sns from natsort import natsorted features = ['pwv', 'pressure', 'DOY'] # sns.set_palette('Dark2', 6) sns.set_theme(style='ticks', font_scale=1.5) # sns.set_style('whitegrid') # sns.set_style('ticks') sv = np.abs(SV).mean('sample').sel(clas=0).reset_coords(drop=True) gr_spec = [20, 20, 1] fig, axes = plt.subplots(1, 3, sharey=True, figsize=(17, 5), gridspec_kw={'width_ratios': gr_spec}) try: axes.flatten() except AttributeError: axes = [axes] for i, f in enumerate(features): fe = [x for x in sv['feature'].values if f in x] dsf = sv.sel(feature=fe).reset_coords(drop=True).to_dataframe() title = '{}'.format(f.upper()) dsf.plot.bar(ax=axes[i], title=title, rot=0, legend=False, zorder=20, width=.8, color='k', alpha=0.8) axes[i].set_title(title) dsf_sum = dsf.sum().tolist() handles, labels = axes[i].get_legend_handles_labels() labels = [ '{} ({:.1f} %)'.format( x, y) for x, y in zip( labels, dsf_sum)] # axes[i].legend(handles=handles, labels=labels, prop={'size': fontsize-3}, loc='upper center') axes[i].set_ylabel('mean(|SHAP value|)\n(average impact\non model output magnitude)') axes[i].grid(axis='y', zorder=1) if fix_xticklabels: # n = sum(['pwv' in x for x in sv.feature.values]) axes[2].xaxis.set_ticklabels('') axes[2].set_xlabel('') hrs = np.arange(-1, -25, -1) axes[0].set_xticklabels(hrs, rotation=30, ha="center", fontsize=12) axes[1].set_xticklabels(hrs, rotation=30, ha="center", fontsize=12) axes[2].tick_params() axes[0].set_xlabel('Hours prior to flood') axes[1].set_xlabel('Hours prior to flood') fig.tight_layout() filename = 'RF_shap_values_{}.png'.format('+'.join(features)) plt.savefig(savefig_path / filename, bbox_inches='tight') return fig def read_binary_classification_shap_values_to_pandas(shap_values, X): import xarray as xr SV0 = X.copy(data=shap_values[0]) SV1 = X.copy(data=shap_values[1]) SV = xr.concat([SV0, SV1], dim='clas') SV['clas'] = [0, 1] return SV def get_shap_values_RF_classifier(plot=True): import shap X, y = combine_pos_neg_from_nc_file() ml = ML_Classifier_Switcher() rf = ml.pick_model('RF') rf.set_params(**best_hp_models_dict['RF']) X = select_doy_from_feature_list(X, features=['pwv', 'pressure', 'doy']) rf.fit(X, y) explainer = shap.TreeExplainer(rf) shap_values = explainer.shap_values(X.values) if plot: shap.summary_plot(shap_values, X, feature_names=[ x for x in X.feature.values], max_display=49, sort=False) return shap_values def interpolate_pwv_to_tsafit_event(path=work_yuval, savepath=work_yuval): import pandas as pd import xarray as xr from PW_stations import produce_geo_gnss_solved_stations from interpolation_routines import interpolate_var_ds_at_multiple_dts from aux_gps import save_ncfile # get gnss soi-apn pwv data and geo-meta data: geo_df = produce_geo_gnss_solved_stations(plot=False) pw = xr.load_dataset(work_yuval/'GNSS_PW_thresh_50.nc') pw = pw[[x for x in pw if '_error' not in x]] pw = pw.sel(time=slice('2018-04-25', '2018-04-26')) pw = pw.drop_vars(['elat', 'elro', 'csar', 'slom']) # get tsafit data: predict_df = pd.DataFrame(tsafit_dict, index=['tsafit']) df_inter = interpolate_var_ds_at_multiple_dts(pw, geo_df, predict_df) da=df_inter['interpolated_lr_fixed'].to_xarray() da.name = 'pwv' da.attrs['operation'] = 'interploated from SOI-APN PWV data' da.attrs['WV scale height'] = 'variable from SOI-APN data' da.attrs.update(**tsafit_dict) if savepath is not None: filename = 'Tsafit_PWV_event.nc' save_ncfile(da, savepath, filename) return da def plot_tsafit_event(path=work_yuval): import xarray as xr import pandas as pd import matplotlib.pyplot as plt import seaborn as sns sns.set_theme(style='ticks', font_scale=1.5) da = xr.load_dataarray(path / 'Tsafit_PWV_event.nc') fig, ax = plt.subplots(figsize=(11, 8)) da_sliced = da.sel(time=slice('2018-04-26T00:00:00', '2018-04-26T12:00:00')) # da_sliced.name = 'PWV [mm]' da_sliced = da_sliced.rename({'time': 'Time [UTC]'}) da_sliced.to_dataframe().plot(ax=ax, ylabel='PWV [mm]', linewidth=2, marker='o', legend=False) dt = pd.to_datetime(da.attrs['dt_utc']) ax.axvline(dt, color='r', linestyle='--', linewidth=2, label='T') handles, labels = ax.get_legend_handles_labels() plt.legend(handles=handles, labels=['PWV', 'Tsafit Flood Event']) ax.grid(True) # ax.set_xlabel('Time [UTC]') fig.tight_layout() fig.suptitle('PWV from SOI-APN over Tsafit area on 2018-04-26') fig.subplots_adjust(top=0.941) return fig # TODO: treat all pwv from events as follows: # For each station: # 0) rolling mean to all pwv 1 hour # 1) take 288 points before events, if < 144 gone then drop # 2) interpolate them 12H using spline/other # 3) then, check if dts coinside 1 day before, if not concat all dts+pwv for each station # 4) prepare features, such as pressure, doy, try to get pressure near the stations and remove the longterm hour dayofyear # pressure in BD anoms is highly correlated with SEDOM (0.9) and ELAT (0.88) so no need for local pressure features # fixed filling with jerusalem centre since 2 drag events dropped due to lack of data 2018-11 2019-02 in pressure # 5) feature addition: should be like pwv steps 1-3, # 6) negative events should be sampled separtely, for # 7) now prepare pwv and pressure to single ds with 1 hourly sample rate # 8) produce positives and save them to file! # 9) produce a way to get negatives considering the positives # maybe implement permutaion importance to pwv ? see what is more important to # the model in 24 hours ? only on SVC and MLP ? # implemetn TSS and HSS scores and test them (make_scorer from confusion matrix) # redo results but with inner and outer splits of 4, 4 # plot and see best_score per refit-scorrer - this is the best score of GridSearchCV on the entire # train/validation subset per each outerfold - basically see if the test_metric increased after the gridsearchcv as it should # use holdout set # implement repeatedstratifiedkfold and run it... # check for stability of the gridsearch CV...also run with 4-folds ? # finalize the permutation_importances and permutation_test_scores def prepare_tide_events_GNSS_dataset(hydro_path=hydro_path): import xarray as xr import pandas as pd import numpy as np from aux_gps import xr_reindex_with_date_range feats = xr.load_dataset( hydro_path/'hydro_tides_hourly_features_with_positives.nc') ds = feats['Tides'].to_dataset('GNSS').rename({'tide_event': 'time'}) da_list = [] for da in ds: time = ds[da].dropna('time') daa = time.copy(data=np.ones(time.shape)) daa['time'] = pd.to_datetime(time.values) daa.name = time.name + '_tide' da_list.append(daa) ds = xr.merge(da_list) li = [xr_reindex_with_date_range(ds[x], freq='H') for x in ds] ds = xr.merge(li) return ds def select_features_from_X(X, features='pwv'): if isinstance(features, str): f = [x for x in X.feature.values if features in x] X = X.sel(feature=f) elif isinstance(features, list): fs = [] for f in features: fs += [x for x in X.feature.values if f in x] X = X.sel(feature=fs) return X def combine_pos_neg_from_nc_file(hydro_path=hydro_path, negative_sample_num=1, seed=1, std=True): from aux_gps import path_glob from sklearn.utils import resample import xarray as xr import numpy as np # import pandas as pd if std: file = path_glob( hydro_path, 'hydro_tides_hourly_features_with_positives_negatives_std*.nc')[-1] else: file = path_glob( hydro_path, 'hydro_tides_hourly_features_with_positives_negatives_*.nc')[-1] ds = xr.open_dataset(file) # get the positive features and produce target: X_pos = ds['X_pos'].rename({'positive_sample': 'sample'}) y_pos = xr.DataArray(np.ones(X_pos['sample'].shape), dims=['sample']) y_pos['sample'] = X_pos['sample'] # choose at random y_pos size of negative class: X_neg = ds['X_neg'].rename({'negative_sample': 'sample'}) pos_size = y_pos['sample'].size np.random.seed(seed) # negatives = [] for n_samples in [x for x in range(negative_sample_num)]: # dts = np.random.choice(X_neg['sample'], size=y_pos['sample'].size, # replace=False) # print(np.unique(dts).shape) # negatives.append(X_neg.sel(sample=dts)) negative = resample(X_neg, replace=False, n_samples=pos_size * negative_sample_num, random_state=seed) negatives = np.split(negative, negative_sample_num, axis=0) Xs = [] ys = [] for X_negative in negatives: y_neg = xr.DataArray(np.zeros(X_negative['sample'].shape), dims=['sample']) y_neg['sample'] = X_negative['sample'] # now concat all X's and y's: X = xr.concat([X_pos, X_negative], 'sample') y = xr.concat([y_pos, y_neg], 'sample') X.name = 'X' Xs.append(X) ys.append(y) if len(negatives) == 1: return Xs[0], ys[0] else: return Xs, ys def drop_hours_in_pwv_pressure_features(X, last_hours=7, verbose=True): import numpy as np Xcopy = X.copy() pwvs_to_drop = ['pwv_{}'.format(x) for x in np.arange(24-last_hours + 1, 25)] if set(pwvs_to_drop).issubset(set(X.feature.values)): if verbose: print('dropping {} from X.'.format(', '.join(pwvs_to_drop))) Xcopy = Xcopy.drop_sel(feature=pwvs_to_drop) pressures_to_drop = ['pressure_{}'.format(x) for x in np.arange(24-last_hours + 1, 25)] if set(pressures_to_drop).issubset(set(X.feature.values)): if verbose: print('dropping {} from X.'.format(', '.join(pressures_to_drop))) Xcopy = Xcopy.drop_sel(feature=pressures_to_drop) return Xcopy def check_if_negatives_are_within_positives(neg_da, hydro_path=hydro_path): import xarray as xr import pandas as pd pos_da = xr.open_dataset( hydro_path / 'hydro_tides_hourly_features_with_positives.nc')['X'] dt_pos = pos_da.sample.to_dataframe() dt_neg = neg_da.sample.to_dataframe() dt_all = dt_pos.index.union(dt_neg.index) dff = pd.DataFrame(dt_all, index=dt_all) dff = dff.sort_index() samples_within = dff[(dff.diff()['sample'] <= pd.Timedelta(1, unit='D'))] num = samples_within.size print('samples that are within a day of each other: {}'.format(num)) print('samples are: {}'.format(samples_within)) return dff def produce_negatives_events_from_feature_file(hydro_path=hydro_path, seed=42, batches=1, verbose=1, std=True): # do the same thing for pressure (as for pwv), but not for import xarray as xr import numpy as np import pandas as pd from aux_gps import save_ncfile feats = xr.load_dataset(hydro_path / 'hydro_tides_hourly_features.nc') feats = feats.rename({'doy': 'DOY'}) if std: pos_filename = 'hydro_tides_hourly_features_with_positives_std.nc' else: pos_filename = 'hydro_tides_hourly_features_with_positives.nc' all_tides = xr.open_dataset( hydro_path / pos_filename)['X_pos'] # pos_tides = xr.open_dataset(hydro_path / 'hydro_tides_hourly_features_with_positives.nc')['tide_datetimes'] tides = xr.open_dataset( hydro_path / pos_filename)['Tides'] # get the positives (tide events) for each station: df_stns = tides.to_dataset('GNSS').to_dataframe() # get all positives (tide events) for all stations: df = all_tides.positive_sample.to_dataframe()['positive_sample'] df.columns = ['sample'] stns = [x for x in hydro_pw_dict.keys()] other_feats = ['DOY', 'doy_sin', 'doy_cos'] # main stns df features (pwv) pwv_df = feats[stns].to_dataframe() pressure = feats['bet-dagan'].to_dataframe()['bet-dagan'] # define the initial no_choice_dt_range from the positive dt_range: no_choice_dt_range = [pd.date_range( start=dt, periods=48, freq='H') for dt in df] no_choice_dt_range = pd.DatetimeIndex( np.unique(np.hstack(no_choice_dt_range))) dts_to_choose_from = pwv_df.index.difference(no_choice_dt_range) # dts_to_choose_from_pressure = pwv_df.index.difference(no_choice_dt_range) # loop over all stns and produce negative events: np.random.seed(seed) neg_batches = [] for i in np.arange(1, batches + 1): if verbose >= 0: print('preparing batch {}:'.format(i)) neg_stns = [] for stn in stns: dts_df = df_stns[stn].dropna() pwv = pwv_df[stn].dropna() # loop over all events in on stn: negatives = [] negatives_pressure = [] # neg_samples = [] if verbose >= 1: print('finding negatives for station {}, events={}'.format( stn, len(dts_df))) # print('finding negatives for station {}, dt={}'.format(stn, dt.strftime('%Y-%m-%d %H:%M'))) cnt = 0 while cnt < len(dts_df): # get random number from each stn pwv: # r = np.random.randint(low=0, high=len(pwv.index)) # random_dt = pwv.index[r] random_dt = np.random.choice(dts_to_choose_from) negative_dt_range = pd.date_range( start=random_dt, periods=24, freq='H') if not (no_choice_dt_range.intersection(negative_dt_range)).empty: # print('#') if verbose >= 2: print('Overlap!') continue # get the actual pwv and check it is full (24hours): negative = pwv.loc[pwv.index.intersection(negative_dt_range)] neg_pressure = pressure.loc[pwv.index.intersection( negative_dt_range)] if len(negative.dropna()) != 24 or len(neg_pressure.dropna()) != 24: # print('!') if verbose >= 2: print('NaNs!') continue if verbose >= 2: print('number of dts that are already chosen: {}'.format( len(no_choice_dt_range))) negatives.append(negative) negatives_pressure.append(neg_pressure) # now add to the no_choice_dt_range the negative dt_range we just aquired: negative_dt_range_with_padding = pd.date_range( start=random_dt-pd.Timedelta(24, unit='H'), end=random_dt+pd.Timedelta(23, unit='H'), freq='H') no_choice_dt_range = pd.DatetimeIndex( np.unique(np.hstack([no_choice_dt_range, negative_dt_range_with_padding]))) dts_to_choose_from = dts_to_choose_from.difference( no_choice_dt_range) if verbose >= 2: print('number of dts to choose from: {}'.format( len(dts_to_choose_from))) cnt += 1 neg_da = xr.DataArray(negatives, dims=['sample', 'feature']) neg_da['feature'] = ['{}_{}'.format( 'pwv', x) for x in np.arange(1, 25)] neg_samples = [x.index[0] for x in negatives] neg_da['sample'] = neg_samples neg_pre_da = xr.DataArray( negatives_pressure, dims=['sample', 'feature']) neg_pre_da['feature'] = ['{}_{}'.format( 'pressure', x) for x in np.arange(1, 25)] neg_pre_samples = [x.index[0] for x in negatives_pressure] neg_pre_da['sample'] = neg_pre_samples neg_da = xr.concat([neg_da, neg_pre_da], 'feature') neg_da = neg_da.sortby('sample') neg_stns.append(neg_da) da_stns = xr.concat(neg_stns, 'sample') da_stns = da_stns.sortby('sample') # now loop over the remaining features (which are stns agnostic) # and add them with the same negative datetimes of the pwv already aquired: dts = [pd.date_range(x.item(), periods=24, freq='H') for x in da_stns['sample']] dts_samples = [x[0] for x in dts] other_feat_list = [] for feat in feats[other_feats]: # other_feat_sample_list = [] da_other = xr.DataArray(feats[feat].sel(time=dts_samples).values, dims=['sample']) # for dt in dts_samples: # da_other = xr.DataArray(feats[feat].sel( # time=dt).values, dims=['feature']) da_other['sample'] = dts_samples other_feat_list.append(da_other) # other_feat_da = xr.concat(other_feat_sample_list, 'feature') da_other_feats = xr.concat(other_feat_list, 'feature') da_other_feats['feature'] = other_feats da_stns = xr.concat([da_stns, da_other_feats], 'feature') neg_batches.append(da_stns) neg_batch_da = xr.concat(neg_batches, 'sample') # neg_batch_da['batch'] = np.arange(1, batches + 1) neg_batch_da.name = 'X_neg' feats['X_neg'] = neg_batch_da feats['X_pos'] = all_tides feats['X_pwv_stns'] = tides # feats['tide_datetimes'] = pos_tides feats = feats.rename({'sample': 'negative_sample'}) if std: filename = 'hydro_tides_hourly_features_with_positives_negatives_std_{}.nc'.format( batches) else: filename = 'hydro_tides_hourly_features_with_positives_negatives_{}.nc'.format( batches) save_ncfile(feats, hydro_path, filename) return neg_batch_da def produce_positives_from_feature_file(hydro_path=hydro_path, std=True): import xarray as xr import pandas as pd import numpy as np from aux_gps import save_ncfile # load features: if std: file = hydro_path / 'hydro_tides_hourly_features_std.nc' else: file = hydro_path / 'hydro_tides_hourly_features.nc' feats = xr.load_dataset(file) feats = feats.rename({'doy': 'DOY'}) # load positive event for each station: dfs = [read_station_from_tide_database(hydro_pw_dict.get( x), rounding='1H') for x in hydro_pw_dict.keys()] dfs = check_if_tide_events_from_stations_are_within_time_window( dfs, days=1, rounding=None, return_hs_list=True) da_list = [] positives_per_station = [] for i, feat in enumerate(feats): try: _, _, pr = produce_pwv_days_before_tide_events(feats[feat], dfs[i], plot=False, rolling=None, days_prior=1, drop_thresh=0.75, max_gap='6H', verbose=0) print('getting positives from station {}'.format(feat)) positives = [pd.to_datetime( (x[-1].time + pd.Timedelta(1, unit='H')).item()) for x in pr] da = xr.DataArray(pr, dims=['sample', 'feature']) da['sample'] = positives positives_per_station.append(positives) da['feature'] = ['pwv_{}'.format(x) for x in np.arange(1, 25)] da_list.append(da) except IndexError: continue da_pwv = xr.concat(da_list, 'sample') da_pwv = da_pwv.sortby('sample') # now add more features: da_list = [] for feat in ['bet-dagan']: print('getting positives from feature {}'.format(feat)) positives = [] for dt_end in da_pwv.sample: dt_st = pd.to_datetime(dt_end.item()) - pd.Timedelta(24, unit='H') dt_end_end = pd.to_datetime( dt_end.item()) - pd.Timedelta(1, unit='H') positive = feats[feat].sel(time=slice(dt_st, dt_end_end)) positives.append(positive) da = xr.DataArray(positives, dims=['sample', 'feature']) da['sample'] = da_pwv.sample if feat == 'bet-dagan': feat_name = 'pressure' else: feat_name = feat da['feature'] = ['{}_{}'.format(feat_name, x) for x in np.arange(1, 25)] da_list.append(da) da_f = xr.concat(da_list, 'feature') da_list = [] for feat in ['DOY', 'doy_sin', 'doy_cos']: print('getting positives from feature {}'.format(feat)) positives = [] for dt in da_pwv.sample: positive = feats[feat].sel(time=dt) positives.append(positive) da = xr.DataArray(positives, dims=['sample']) da['sample'] = da_pwv.sample # da['feature'] = feat da_list.append(da) da_ff = xr.concat(da_list, 'feature') da_ff['feature'] = ['DOY', 'doy_sin', 'doy_cos'] da = xr.concat([da_pwv, da_f, da_ff], 'feature') if std: filename = 'hydro_tides_hourly_features_with_positives_std.nc' else: filename = 'hydro_tides_hourly_features_with_positives.nc' feats['X_pos'] = da # now add positives per stations: pdf = pd.DataFrame(positives_per_station).T pdf.index.name = 'tide_event' pos_da = pdf.to_xarray().to_array('GNSS') pos_da['GNSS'] = [x for x in hydro_pw_dict.keys()] pos_da.attrs['info'] = 'contains the datetimes of the tide events per GNSS station.' feats['Tides'] = pos_da # rename sample to positive sample: feats = feats.rename({'sample': 'positive_sample'}) save_ncfile(feats, hydro_path, filename) return feats def prepare_features_and_save_hourly(work_path=work_yuval, ims_path=ims_path, savepath=hydro_path, std=True): import xarray as xr from aux_gps import save_ncfile import numpy as np # pwv = xr.load_dataset( if std: pwv_filename = 'GNSS_PW_thresh_0_hour_dayofyear_anoms_sd.nc' pre_filename = 'IMS_BD_hourly_anoms_std_ps_1964-2020.nc' else: pwv_filename = 'GNSS_PW_thresh_0_hour_dayofyear_anoms.nc' pre_filename = 'IMS_BD_hourly_anoms_ps_1964-2020.nc' # work_path / 'GNSS_PW_thresh_0_hour_dayofyear_anoms.nc') pwv = xr.load_dataset(work_path / pwv_filename) pwv_stations = [x for x in hydro_pw_dict.keys()] pwv = pwv[pwv_stations] # pwv = pwv.rolling(time=12, keep_attrs=True).mean(keep_attrs=True) pwv = pwv.resample(time='1H', keep_attrs=True).mean(keep_attrs=True) # bd = xr.load_dataset(ims_path / 'IMS_BD_anoms_5min_ps_1964-2020.nc') bd = xr.load_dataset(ims_path / pre_filename) # min_time = pwv.dropna('time')['time'].min() # bd = bd.sel(time=slice('1996', None)).resample(time='1H').mean() bd = bd.sel(time=slice('1996', None)) pressure = bd['bet-dagan'] doy = pwv['time'].copy(data=pwv['time'].dt.dayofyear) doy.name = 'doy' doy_sin = np.sin(doy * np.pi / 183) doy_sin.name = 'doy_sin' doy_cos = np.cos(doy * np.pi / 183) doy_cos.name = 'doy_cos' ds = xr.merge([pwv, pressure, doy, doy_sin, doy_cos]) if std: filename = 'hydro_tides_hourly_features_std.nc' else: filename = 'hydro_tides_hourly_features.nc' save_ncfile(ds, savepath, filename) return ds def plot_all_decompositions(X, y, n=2): import xarray as xr models = [ 'PCA', 'LDA', 'ISO_MAP', 'LLE', 'LLE-modified', 'LLE-hessian', 'LLE-ltsa', 'MDA', 'RTE', 'SE', 'TSNE', 'NCA'] names = [ 'Principal Components', 'Linear Discriminant', 'Isomap', 'Locally Linear Embedding', 'Modified LLE', 'Hessian LLE', 'Local Tangent Space Alignment', 'MDS embedding', 'Random forest', 'Spectral embedding', 't-SNE', 'NCA embedding'] name_dict = dict(zip(models, names)) da = xr.DataArray(models, dims=['model']) da['model'] = models fg = xr.plot.FacetGrid(da, col='model', col_wrap=4, sharex=False, sharey=False) for model_str, ax in zip(da['model'].values, fg.axes.flatten()): model = model_str.split('-')[0] method = model_str.split('-')[-1] if model == method: method = None try: ax = scikit_decompose(X, y, model=model, n=n, method=method, ax=ax) except ValueError: pass ax.set_title(name_dict[model_str]) ax.set_xlabel('') ax.set_ylabel('') fg.fig.suptitle('various decomposition projections (n={})'.format(n)) return def scikit_decompose(X, y, model='PCA', n=2, method=None, ax=None): from sklearn import (manifold, decomposition, ensemble, discriminant_analysis, neighbors) import matplotlib.pyplot as plt import pandas as pd # from mpl_toolkits.mplot3d import Axes3D n_neighbors = 30 if model == 'PCA': X_decomp = decomposition.TruncatedSVD(n_components=n).fit_transform(X) elif model == 'LDA': X2 = X.copy() X2.values.flat[::X.shape[1] + 1] += 0.01 X_decomp = discriminant_analysis.LinearDiscriminantAnalysis(n_components=n ).fit_transform(X2, y) elif model == 'ISO_MAP': X_decomp = manifold.Isomap( n_neighbors, n_components=n).fit_transform(X) elif model == 'LLE': # method = 'standard', 'modified', 'hessian' 'ltsa' if method is None: method = 'standard' clf = manifold.LocallyLinearEmbedding(n_neighbors, n_components=2, method=method) X_decomp = clf.fit_transform(X) elif model == 'MDA': clf = manifold.MDS(n_components=n, n_init=1, max_iter=100) X_decomp = clf.fit_transform(X) elif model == 'RTE': hasher = ensemble.RandomTreesEmbedding(n_estimators=200, random_state=0, max_depth=5) X_transformed = hasher.fit_transform(X) pca = decomposition.TruncatedSVD(n_components=n) X_decomp = pca.fit_transform(X_transformed) elif model == 'SE': embedder = manifold.SpectralEmbedding(n_components=n, random_state=0, eigen_solver="arpack") X_decomp = embedder.fit_transform(X) elif model == 'TSNE': tsne = manifold.TSNE(n_components=n, init='pca', random_state=0) X_decomp = tsne.fit_transform(X) elif model == 'NCA': nca = neighbors.NeighborhoodComponentsAnalysis(init='random', n_components=n, random_state=0) X_decomp = nca.fit_transform(X, y) df = pd.DataFrame(X_decomp) df.columns = [ '{}_{}'.format( model, x + 1) for x in range( X_decomp.shape[1])] df['flood'] = y df['flood'] = df['flood'].astype(int) df_1 = df[df['flood'] == 1] df_0 = df[df['flood'] == 0] if X_decomp.shape[1] == 1: if ax is not None: df_1.plot.scatter(ax=ax, x='{}_1'.format(model), y='{}_1'.format(model), color='b', marker='s', alpha=0.3, label='1', s=50) else: ax = df_1.plot.scatter( x='{}_1'.format(model), y='{}_1'.format(model), color='b', label='1', s=50) df_0.plot.scatter( ax=ax, x='{}_1'.format(model), y='{}_1'.format(model), color='r', marker='x', label='0', s=50) elif X_decomp.shape[1] == 2: if ax is not None: df_1.plot.scatter(ax=ax, x='{}_1'.format(model), y='{}_2'.format(model), color='b', marker='s', alpha=0.3, label='1', s=50) else: ax = df_1.plot.scatter( x='{}_1'.format(model), y='{}_2'.format(model), color='b', label='1', s=50) df_0.plot.scatter( ax=ax, x='{}_1'.format(model), y='{}_2'.format(model), color='r', label='0', s=50) elif X_decomp.shape[1] == 3: ax = plt.figure().gca(projection='3d') # df_1.plot.scatter(x='{}_1'.format(model), y='{}_2'.format(model), z='{}_3'.format(model), color='b', label='1', s=50, ax=threedee) ax.scatter(df_1['{}_1'.format(model)], df_1['{}_2'.format(model)], df_1['{}_3'.format(model)], color='b', label='1', s=50) ax.scatter(df_0['{}_1'.format(model)], df_0['{}_2'.format(model)], df_0['{}_3'.format(model)], color='r', label='0', s=50) ax.set_xlabel('{}_1'.format(model)) ax.set_ylabel('{}_2'.format(model)) ax.set_zlabel('{}_3'.format(model)) return ax def permutation_scikit(X, y, cv=False, plot=True): import matplotlib.pyplot as plt from sklearn.svm import SVC from sklearn.discriminant_analysis import LinearDiscriminantAnalysis from sklearn.model_selection import KFold from sklearn.model_selection import StratifiedKFold from sklearn.model_selection import GridSearchCV from sklearn.model_selection import permutation_test_score from sklearn.model_selection import train_test_split from sklearn.metrics import classification_report, confusion_matrix import numpy as np if not cv: clf = SVC(C=0.01, break_ties=False, cache_size=200, class_weight=None, coef0=0.0, decision_function_shape='ovr', degree=3, gamma=0.032374575428176434, kernel='poly', max_iter=-1, probability=False, random_state=None, shrinking=True, tol=0.001, verbose=False) clf = SVC(kernel='linear') # clf = LinearDiscriminantAnalysis() cv = StratifiedKFold(4, shuffle=True) # cv = KFold(4, shuffle=True) n_classes = 2 score, permutation_scores, pvalue = permutation_test_score( clf, X, y, scoring="f1", cv=cv, n_permutations=1000, n_jobs=-1, verbose=2) print("Classification score %s (pvalue : %s)" % (score, pvalue)) plt.hist(permutation_scores, 20, label='Permutation scores', edgecolor='black') ylim = plt.ylim() plt.plot(2 * [score], ylim, '--g', linewidth=3, label='Classification Score' ' (pvalue %s)' % pvalue) plt.plot(2 * [1. / n_classes], ylim, '--k', linewidth=3, label='Luck') plt.ylim(ylim) plt.legend() plt.xlabel('Score') plt.show() else: X_train, X_test, y_train, y_test = train_test_split( X, y, test_size=0.2, shuffle=True, random_state=42) param_grid = { 'C': np.logspace(-2, 3, 50), 'gamma': np.logspace(-2, 3, 50), 'kernel': ['rbf', 'poly', 'sigmoid']} grid = GridSearchCV(SVC(), param_grid, refit=True, verbose=2) grid.fit(X_train, y_train) print(grid.best_estimator_) grid_predictions = grid.predict(X_test) print(confusion_matrix(y_test, grid_predictions)) print(classification_report(y_test, grid_predictions)) return def grab_y_true_and_predict_from_sklearn_model(model, X, y, cv, kfold_name='inner_kfold'): from sklearn.model_selection import GridSearchCV import xarray as xr import numpy as np if isinstance(model, GridSearchCV): model = model.best_estimator_ ds_list = [] for i, (train, val) in enumerate(cv.split(X, y)): model.fit(X[train], y[train]) y_true = y[val] y_pred = model.predict(X[val]) try: lr_probs = model.predict_proba(X[val]) # keep probabilities for the positive outcome only lr_probs = lr_probs[:, 1] except AttributeError: lr_probs = model.decision_function(X[val]) y_true_da = xr.DataArray(y_true, dims=['sample']) y_pred_da = xr.DataArray(y_pred, dims=['sample']) y_prob_da = xr.DataArray(lr_probs, dims=['sample']) ds = xr.Dataset() ds['y_true'] = y_true_da ds['y_pred'] = y_pred_da ds['y_prob'] = y_prob_da ds['sample'] = np.arange(0, len(X[val])) ds_list.append(ds) ds = xr.concat(ds_list, kfold_name) ds[kfold_name] = np.arange(1, cv.n_splits + 1) return ds def produce_ROC_curves_from_model(model, X, y, cv, kfold_name='inner_kfold'): import numpy as np import xarray as xr from sklearn.metrics import roc_curve from sklearn.metrics import roc_auc_score from sklearn.model_selection import GridSearchCV from sklearn.metrics import precision_recall_curve from sklearn.metrics import average_precision_score # TODO: collect all predictions and y_tests from this, also predict_proba # and save, then calculte everything elsewhere. if isinstance(model, GridSearchCV): model = model.best_estimator_ tprs = [] aucs = [] pr = [] pr_aucs = [] mean_fpr = np.linspace(0, 1, 100) for i, (train, val) in enumerate(cv.split(X, y)): model.fit(X[train], y[train]) y_pred = model.predict(X[val]) try: lr_probs = model.predict_proba(X[val]) # keep probabilities for the positive outcome only lr_probs = lr_probs[:, 1] except AttributeError: lr_probs = model.decision_function(X[val]) fpr, tpr, _ = roc_curve(y[val], y_pred) interp_tpr = np.interp(mean_fpr, fpr, tpr) interp_tpr[0] = 0.0 tprs.append(interp_tpr) aucs.append(roc_auc_score(y[val], y_pred)) precision, recall, _ = precision_recall_curve(y[val], lr_probs) pr.append(recall) average_precision = average_precision_score(y[val], y_pred) pr_aucs.append(average_precision) # mean_tpr = np.mean(tprs, axis=0) # mean_tpr[-1] = 1.0 # mean_auc = auc(mean_fpr, mean_tpr) # std_auc = np.std(aucs) # std_tpr = np.std(tprs, axis=0) tpr_da = xr.DataArray(tprs, dims=[kfold_name, 'fpr']) auc_da = xr.DataArray(aucs, dims=[kfold_name]) ds = xr.Dataset() ds['TPR'] = tpr_da ds['AUC'] = auc_da ds['fpr'] = mean_fpr ds[kfold_name] = np.arange(1, cv.n_splits + 1) # variability for each tpr is ds['TPR'].std('kfold') return ds def cross_validation_with_holdout(X, y, model_name='SVC', features='pwv', n_splits=3, test_ratio=0.25, scorers=['f1', 'recall', 'tss', 'hss', 'precision', 'accuracy'], seed=42, savepath=None, verbose=0, param_grid='normal', n_jobs=-1, n_repeats=None): # from sklearn.model_selection import cross_validate from sklearn.model_selection import StratifiedKFold from sklearn.model_selection import RepeatedStratifiedKFold from sklearn.model_selection import GridSearchCV from sklearn.model_selection import train_test_split from sklearn.metrics import make_scorer # from string import digits import numpy as np # import xarray as xr scores_dict = {s: s for s in scorers} if 'tss' in scorers: scores_dict['tss'] = make_scorer(tss_score) if 'hss' in scorers: scores_dict['hss'] = make_scorer(hss_score) X = select_doy_from_feature_list(X, model_name, features) if param_grid == 'light': print(np.unique(X.feature.values)) # first take out the hold-out set: X_train, X_test, y_train, y_test = train_test_split(X, y, test_size=test_ratio, random_state=seed, stratify=y) if n_repeats is None: # configure the cross-validation procedure cv = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=seed) print('CV StratifiedKfolds of {}.'.format(n_splits)) # define the model and search space: else: cv = RepeatedStratifiedKFold(n_splits=n_splits, n_repeats=n_repeats, random_state=seed) print('CV RepeatedStratifiedKFold of {} with {} repeats.'.format(n_splits, n_repeats)) ml = ML_Classifier_Switcher() print('param grid group is set to {}.'.format(param_grid)) sk_model = ml.pick_model(model_name, pgrid=param_grid) search_space = ml.param_grid # define search gr_search = GridSearchCV(estimator=sk_model, param_grid=search_space, cv=cv, n_jobs=n_jobs, scoring=scores_dict, verbose=verbose, refit=False, return_train_score=True) gr_search.fit(X, y) if isinstance(features, str): features = [features] if savepath is not None: filename = 'GRSRCHCV_holdout_{}_{}_{}_{}_{}_{}_{}.pkl'.format( model_name, '+'.join(features), '+'.join(scorers), n_splits, int(test_ratio*100), param_grid, seed) save_gridsearchcv_object(gr_search, savepath, filename) # gr, _ = process_gridsearch_results( # gr_search, model_name, split_dim='kfold', features=X.feature.values) # remove_digits = str.maketrans('', '', digits) # features = list(set([x.translate(remove_digits).split('_')[0] # for x in X.feature.values])) # # add more attrs, features etc: # gr.attrs['features'] = features return gr_search def select_doy_from_feature_list(X, model_name='RF', features='pwv'): # first if RF chosen, replace the cyclic coords of DOY (sin and cos) with # the DOY itself. if isinstance(features, list): feats = features.copy() else: feats = features if model_name == 'RF' and 'doy' in features: if isinstance(features, list): feats.remove('doy') feats.append('DOY') elif isinstance(features, str): feats = 'DOY' elif model_name != 'RF' and 'doy' in features: if isinstance(features, list): feats.remove('doy') feats.append('doy_sin') feats.append('doy_cos') elif isinstance(features, str): feats = ['doy_sin'] feats.append('doy_cos') X = select_features_from_X(X, feats) return X def single_cross_validation(X_val, y_val, model_name='SVC', features='pwv', n_splits=4, scorers=['f1', 'recall', 'tss', 'hss', 'precision', 'accuracy'], seed=42, savepath=None, verbose=0, param_grid='normal', n_jobs=-1, n_repeats=None, outer_split='1-1'): # from sklearn.model_selection import cross_validate from sklearn.model_selection import StratifiedKFold from sklearn.model_selection import RepeatedStratifiedKFold from sklearn.model_selection import GridSearchCV # from sklearn.model_selection import train_test_split from sklearn.metrics import make_scorer # from string import digits import numpy as np # import xarray as xr scores_dict = {s: s for s in scorers} if 'tss' in scorers: scores_dict['tss'] = make_scorer(tss_score) if 'hss' in scorers: scores_dict['hss'] = make_scorer(hss_score) X = select_doy_from_feature_list(X_val, model_name, features) y = y_val if param_grid == 'light': print(np.unique(X.feature.values)) if n_repeats is None: # configure the cross-validation procedure cv = StratifiedKFold(n_splits=n_splits, shuffle=True, random_state=seed) print('CV StratifiedKfolds of {}.'.format(n_splits)) # define the model and search space: else: cv = RepeatedStratifiedKFold(n_splits=n_splits, n_repeats=n_repeats, random_state=seed) print('CV RepeatedStratifiedKFold of {} with {} repeats.'.format( n_splits, n_repeats)) ml = ML_Classifier_Switcher() print('param grid group is set to {}.'.format(param_grid)) if outer_split == '1-1': cv_type = 'holdout' print('holdout cv is selected.') else: cv_type = 'nested' print('nested cv {} out of {}.'.format( outer_split.split('-')[0], outer_split.split('-')[1])) sk_model = ml.pick_model(model_name, pgrid=param_grid) search_space = ml.param_grid # define search gr_search = GridSearchCV(estimator=sk_model, param_grid=search_space, cv=cv, n_jobs=n_jobs, scoring=scores_dict, verbose=verbose, refit=False, return_train_score=True) gr_search.fit(X, y) if isinstance(features, str): features = [features] if savepath is not None: filename = 'GRSRCHCV_{}_{}_{}_{}_{}_{}_{}_{}.pkl'.format(cv_type, model_name, '+'.join(features), '+'.join( scorers), n_splits, outer_split, param_grid, seed) save_gridsearchcv_object(gr_search, savepath, filename) return gr_search def save_cv_params_to_file(cv_obj, path, name): import pandas as pd di = vars(cv_obj) splitter_type = cv_obj.__repr__().split('(')[0] di['splitter_type'] = splitter_type (pd.DataFrame.from_dict(data=di, orient='index') .to_csv(path / '{}.csv'.format(name), header=False)) print('{}.csv saved to {}.'.format(name, path)) return def read_cv_params_and_instantiate(filepath): import pandas as pd from sklearn.model_selection import StratifiedKFold df = pd.read_csv(filepath, header=None, index_col=0) d = {} for row in df.iterrows(): dd = pd.to_numeric(row[1], errors='ignore') if dd.item() == 'True' or dd.item() == 'False': dd = dd.astype(bool) d[dd.to_frame().columns.item()] = dd.item() s_type = d.pop('splitter_type') if s_type == 'StratifiedKFold': cv = StratifiedKFold(**d) return cv def nested_cross_validation_procedure(X, y, model_name='SVC', features='pwv', outer_splits=4, inner_splits=2, refit_scorer='roc_auc', scorers=['f1', 'recall', 'tss', 'hss', 'roc_auc', 'precision', 'accuracy'], seed=42, savepath=None, verbose=0, param_grid='normal', n_jobs=-1): from sklearn.model_selection import cross_validate from sklearn.model_selection import StratifiedKFold from sklearn.model_selection import GridSearchCV from sklearn.metrics import make_scorer from sklearn.inspection import permutation_importance from string import digits import numpy as np import xarray as xr assert refit_scorer in scorers scores_dict = {s: s for s in scorers} if 'tss' in scorers: scores_dict['tss'] = make_scorer(tss_score) if 'hss' in scorers: scores_dict['hss'] = make_scorer(hss_score) X = select_doy_from_feature_list(X, model_name, features) # if model_name == 'RF': # doy = X['sample'].dt.dayofyear # sel_doy = [x for x in X.feature.values if 'doy_sin' in x] # doy_X = doy.broadcast_like(X.sel(feature=sel_doy)) # doy_X['feature'] = [ # 'doy_{}'.format(x) for x in range( # doy_X.feature.size)] # no_doy = [x for x in X.feature.values if 'doy' not in x] # X = X.sel(feature=no_doy) # X = xr.concat([X, doy_X], 'feature') # else: # # first slice X for features: # if isinstance(features, str): # f = [x for x in X.feature.values if features in x] # X = X.sel(feature=f) # elif isinstance(features, list): # fs = [] # for f in features: # fs += [x for x in X.feature.values if f in x] # X = X.sel(feature=fs) if param_grid == 'light': print(np.unique(X.feature.values)) # configure the cross-validation procedure cv_inner = StratifiedKFold(n_splits=inner_splits, shuffle=True, random_state=seed) print('Inner CV StratifiedKfolds of {}.'.format(inner_splits)) # define the model and search space: ml = ML_Classifier_Switcher() if param_grid == 'light': print('disgnostic mode light.') sk_model = ml.pick_model(model_name, pgrid=param_grid) search_space = ml.param_grid # define search gr_search = GridSearchCV(estimator=sk_model, param_grid=search_space, cv=cv_inner, n_jobs=n_jobs, scoring=scores_dict, verbose=verbose, refit=refit_scorer, return_train_score=True) # gr.fit(X, y) # configure the cross-validation procedure cv_outer = StratifiedKFold( n_splits=outer_splits, shuffle=True, random_state=seed) # execute the nested cross-validation scores_est_dict = cross_validate(gr_search, X, y, scoring=scores_dict, cv=cv_outer, n_jobs=n_jobs, return_estimator=True, verbose=verbose) # perm = [] # for i, (train, val) in enumerate(cv_outer.split(X, y)): # gr_model = scores_est_dict['estimator'][i] # gr_model.fit(X[train], y[train]) # r = permutation_importance(gr_model, X[val], y[val],scoring='f1', # n_repeats=30, n_jobs=-1, # random_state=0) # perm.append(r) # get the test scores: test_keys = [x for x in scores_est_dict.keys() if 'test' in x] ds = xr.Dataset() for key in test_keys: ds[key] = xr.DataArray(scores_est_dict[key], dims=['outer_kfold']) preds_ds = [] gr_ds = [] for est in scores_est_dict['estimator']: gr, _ = process_gridsearch_results( est, model_name, split_dim='inner_kfold', features=X.feature.values) # somehow save gr: gr_ds.append(gr) preds_ds.append( grab_y_true_and_predict_from_sklearn_model(est, X, y, cv_inner)) # tpr_ds.append(produce_ROC_curves_from_model(est, X, y, cv_inner)) dss = xr.concat(preds_ds, 'outer_kfold') gr_dss = xr.concat(gr_ds, 'outer_kfold') dss['outer_kfold'] = np.arange(1, cv_outer.n_splits + 1) gr_dss['outer_kfold'] = np.arange(1, cv_outer.n_splits + 1) # aggragate results: dss = xr.merge([ds, dss]) dss = xr.merge([dss, gr_dss]) dss.attrs = gr_dss.attrs dss.attrs['outer_kfold_splits'] = outer_splits remove_digits = str.maketrans('', '', digits) features = list(set([x.translate(remove_digits).split('_')[0] for x in X.feature.values])) # add more attrs, features etc: dss.attrs['features'] = features # rename major data_vars with model name: # ys = [x for x in dss.data_vars if 'y_' in x] # new_ys = [y + '_{}'.format(model_name) for y in ys] # dss = dss.rename(dict(zip(ys, new_ys))) # new_test_keys = [y + '_{}'.format(model_name) for y in test_keys] # dss = dss.rename(dict(zip(test_keys, new_test_keys))) # if isinstance(X.attrs['pwv_id'], list): # dss.attrs['pwv_id'] = '-'.join(X.attrs['pwv_id']) # else: # dss.attrs['pwv_id'] = X.attrs['pwv_id'] # if isinstance(y.attrs['hydro_station_id'], list): # dss.attrs['hs_id'] = '-'.join([str(x) for x in y.attrs['hydro_station_id']]) # else: # dss.attrs['hs_id'] = y.attrs['hydro_station_id'] # dss.attrs['hydro_max_flow'] = y.attrs['max_flow'] # dss.attrs['neg_pos_ratio'] = y.attrs['neg_pos_ratio'] # save results to file: if savepath is not None: save_cv_results(dss, savepath=savepath) return dss # def ML_main_procedure(X, y, estimator=None, model_name='SVC', features='pwv', # val_size=0.18, n_splits=None, test_size=0.2, seed=42, best_score='f1', # savepath=None, plot=True): # """split the X,y for train and test, either do HP tuning using HP_tuning # with val_size or use already tuned (or not) estimator. # models to play with = MLP, RF and SVC. # n_splits = 2, 3, 4. # features = pwv, pressure. # best_score = f1, roc_auc, accuracy. # can do loop on them. RF takes the most time to tune.""" # X = select_features_from_X(X, features) # X_train, X_test, y_train, y_test = train_test_split(X, y, # test_size=test_size, # shuffle=True, # random_state=seed) # # do HP_tuning: # if estimator is None: # cvr, model = HP_tuning(X_train, y_train, model_name=model_name, val_size=val_size, test_size=test_size, # best_score=best_score, seed=seed, savepath=savepath, n_splits=n_splits) # else: # model = estimator # if plot: # ax = plot_many_ROC_curves(model, X_test, y_test, name=model_name, # ax=None) # return ax # else: # return model def plot_hyper_parameters_heatmaps_from_nested_CV_model(dss, path=hydro_path, model_name='MLP', features='pwv+pressure+doy', save=True): import matplotlib.pyplot as plt ds = dss.sel(features=features).reset_coords(drop=True) non_hp_vars = ['mean_score', 'std_score', 'test_score', 'roc_auc_score', 'TPR'] if model_name == 'RF': non_hp_vars.append('feature_importances') ds = ds[[x for x in ds if x not in non_hp_vars]] seq = 'Blues' cat = 'Dark2' cmap_hp_dict = { 'alpha': seq, 'activation': cat, 'hidden_layer_sizes': cat, 'learning_rate': cat, 'solver': cat, 'kernel': cat, 'C': seq, 'gamma': seq, 'degree': seq, 'coef0': seq, 'max_depth': seq, 'max_features': cat, 'min_samples_leaf': seq, 'min_samples_split': seq, 'n_estimators': seq } # fix stuff for SVC: if model_name == 'SVC': ds['degree'] = ds['degree'].where(ds['kernel']=='poly') ds['coef0'] = ds['coef0'].where(ds['kernel']=='poly') # da = ds.to_arrray('hyper_parameters') # fg = xr.plot.FacetGrid( # da, # col='hyper_parameters', # sharex=False, # sharey=False, figsize=(16, 10)) fig, axes = plt.subplots(5, 1, sharex=True, figsize=(4, 10)) for i, da in enumerate(ds): df = ds[da].reset_coords(drop=True).to_dataset('scorer').to_dataframe() df.index.name = 'Outer Split' try: df = df.astype(float).round(2) except ValueError: pass cmap = cmap_hp_dict.get(da, 'Set1') plot_heatmap_for_hyper_parameters_df(df, ax=axes[i], title=da, cmap=cmap) fig.tight_layout() if save: filename = 'Hyper-parameters_nested_{}.png'.format( model_name) plt.savefig(savefig_path / filename, bbox_inches='tight') return def plot_heatmaps_for_hyper_parameters_data_splits(df1, df2, axes=None, cmap='colorblind', title=None, fig=None, cbar_params=[.92, .12, .03, .75], fontsize=12, val_type='float'): import pandas as pd import seaborn as sns import numpy as np # from mpl_toolkits.axes_grid1 import make_axes_locatable from matplotlib.colors import Normalize import matplotlib import matplotlib.pyplot as plt import matplotlib.cm as cm sns.set_style('ticks') sns.set_style('whitegrid') sns.set(font_scale=1.2) df1 = df1.astype(eval(val_type)) df2 = df2.astype(eval(val_type)) arr = pd.concat([df1, df2], axis=0).values.ravel() value_to_int = {j: i for i, j in enumerate( np.unique(arr))} # like you did # try: # sorted_v_to_i = dict(sorted(value_to_int.items())) # except TypeError: # sorted_v_to_i = value_to_int # print(value_to_int) n = len(value_to_int) # discrete colormap (n samples from a given cmap) cmap_list = sns.color_palette(cmap, n) if val_type == 'float': # print([value_to_int.keys()]) cbar_ticklabels = ['{:.2g}'.format(x) for x in value_to_int.keys()] elif val_type == 'int': cbar_ticklabels = [int(x) for x in value_to_int.keys()] elif val_type == 'str': cbar_ticklabels = [x for x in value_to_int.keys()] if 'nan' in value_to_int.keys(): cmap_list[-1] = (0.5, 0.5, 0.5) new_value_to_int = {} for key, val in value_to_int.items(): try: new_value_to_int[str(int(float(key)))] = val except ValueError: new_value_to_int['NR'] = val cbar_ticklabels = [x for x in new_value_to_int.keys()] # u1 = np.unique(df1.replace(value_to_int)).astype(int) # cmap1 = [cmap_list[x] for x in u1] # u2 = np.unique(df2.replace(value_to_int)).astype(int) # cmap2 = [cmap_list[x] for x in u2] # prepare normalizer ## Prepare bins for the normalizer norm_bins = np.sort([*value_to_int.values()]) + 0.5 norm_bins = np.insert(norm_bins, 0, np.min(norm_bins) - 1.0) # print(norm_bins) ## Make normalizer and formatter norm = matplotlib.colors.BoundaryNorm(norm_bins, n, clip=True) # normalizer = Normalize(np.array([x for x in value_to_int.values()])[0],np.array([x for x in value_to_int.values()])[-1]) # im=cm.ScalarMappable(norm=normalizer) if axes is None: fig, axes = plt.subplots(2, 1, sharex=True, sharey=False) # divider = make_axes_locatable([axes[0], axes[1]]) # cbar_ax = divider.append_axes('right', size='5%', pad=0.05) cbar_ax = fig.add_axes(cbar_params) sns.heatmap(df1.replace(value_to_int), cmap=cmap_list, cbar=False, ax=axes[0], linewidth=0.7, linecolor='k', square=True, cbar_kws={"shrink": .9}, cbar_ax=cbar_ax, norm=norm) sns.heatmap(df2.replace(value_to_int), cmap=cmap_list, cbar=False, ax=axes[1], linewidth=0.7, linecolor='k', square=True, cbar_kws={"shrink": .9}, cbar_ax=cbar_ax, norm=norm) # else: # ax = sns.heatmap(df.replace(sorted_v_to_i), cmap=cmap, # ax=ax, linewidth=1, linecolor='k', # square=False, cbar_kws={"shrink": .9}) if title is not None: axes[0].set_title(title, fontsize=fontsize) for ax in axes: ax.set_xticklabels(ax.get_xticklabels(), ha='right', va='top', rotation=45) ax.set_yticklabels(ax.get_yticklabels(), rotation=0) ax.tick_params(labelsize=fontsize, direction='out', bottom=True, left=True, length=2) ax.set_ylabel(ax.get_ylabel(), fontsize=fontsize) ax.set_xlabel(ax.get_xlabel(), fontsize=fontsize) # colorbar = axes[0].collections[0].colorbar # diff = norm_bins[1:] - norm_bins[:-1] # tickz = norm_bins[:-1] + diff / 2 colorbar = fig.colorbar(cm.ScalarMappable(norm=norm, cmap=matplotlib.colors.ListedColormap(cmap_list)), ax=[axes[0], axes[1]], shrink=1, pad=0.05, cax=cbar_ax) # colorbar = plt.gca().images[-1].colorbar r = colorbar.vmax - colorbar.vmin colorbar.set_ticks([colorbar.vmin + r / n * (0.5 + i) for i in range(n)]) colorbar.ax.set_yticklabels(cbar_ticklabels, fontsize=fontsize-2) return axes def plot_hyper_parameters_heatmap_data_splits_per_model(dss4, dss5, fontsize=14, save=True, model_name='SVC', features='pwv+pressure+doy'): import matplotlib.pyplot as plt # import seaborn as sns fig, axes = plt.subplots(2, 5, sharex=True, sharey=False ,figsize=(16, 5)) ds4 = dss4.sel(features=features).reset_coords(drop=True) ds5 = dss5.sel(features=features).reset_coords(drop=True) ds4 = ds4.reindex(scorer=scorer_order) ds5 = ds5.reindex(scorer=scorer_order) non_hp_vars = ['mean_score', 'std_score', 'test_score', 'roc_auc_score', 'TPR'] if model_name == 'RF': non_hp_vars.append('feature_importances') if model_name == 'MLP': adj_dict=dict( top=0.946, bottom=0.145, left=0.046, right=0.937, hspace=0.121, wspace=0.652) cb_st = 0.167 cb_mul = 0.193 else: adj_dict=dict( wspace = 0.477, top=0.921, bottom=0.17, left=0.046, right=0.937, hspace=0.121) cb_st = 0.18 cb_mul = 0.19 ds4 = ds4[[x for x in ds4 if x not in non_hp_vars]] ds5 = ds5[[x for x in ds5 if x not in non_hp_vars]] seq = 'Blues' cat = 'Dark2' hp_dict = { 'alpha': ['Reds', 'float'], 'activation': ['Set1_r', 'str'], 'hidden_layer_sizes': ['Paired', 'str'], 'learning_rate': ['Spectral_r', 'str'], 'solver': ['Dark2', 'str'], 'kernel': ['Dark2', 'str'], 'C': ['Blues', 'float'], 'gamma': ['Oranges', 'float'], 'degree': ['Greens', 'str'], 'coef0': ['Spectral', 'str'], 'max_depth': ['Blues', 'int'], 'max_features': ['Dark2', 'str'], 'min_samples_leaf': ['Greens', 'int'], 'min_samples_split': ['Reds', 'int'], 'n_estimators': ['Oranges', 'int'] } # fix stuff for SVC: if model_name == 'SVC': ds4['degree'] = ds4['degree'].where(ds4['kernel']=='poly') ds4['coef0'] = ds4['coef0'].where(ds4['kernel']=='poly') ds5['degree'] = ds5['degree'].where(ds5['kernel']=='poly') ds5['coef0'] = ds5['coef0'].where(ds5['kernel']=='poly') for i, (da4, da5) in enumerate(zip(ds4, ds5)): df4 = ds4[da4].reset_coords(drop=True).to_dataset('scorer').to_dataframe() df5 = ds5[da5].reset_coords(drop=True).to_dataset('scorer').to_dataframe() df4.index.name = 'Outer Split' df5.index.name = 'Outer Split' # try: # df4 = df4.astype(float).round(2) # df5 = df5.astype(float).round(2) # except ValueError: # pass cmap = hp_dict.get(da4, 'Set1')[0] val_type = hp_dict.get(da4, 'int')[1] cbar_params = [cb_st + cb_mul*float(i), .175, .01, .71] plot_heatmaps_for_hyper_parameters_data_splits(df4, df5, axes=[axes[0, i], axes[1, i]], fig=fig, title=da4, cmap=cmap, cbar_params=cbar_params, fontsize=fontsize, val_type=val_type) if i > 0 : axes[0, i].set_ylabel('') axes[0, i].yaxis.set_tick_params(labelleft=False) axes[1, i].set_ylabel('') axes[1, i].yaxis.set_tick_params(labelleft=False) fig.tight_layout() fig.subplots_adjust(**adj_dict) if save: filename = 'Hyper-parameters_nested_{}.png'.format( model_name) plt.savefig(savefig_path / filename, bbox_inches='tight') return fig def plot_heatmap_for_hyper_parameters_df(df, ax=None, cmap='colorblind', title=None, fontsize=12): import pandas as pd import seaborn as sns import numpy as np sns.set_style('ticks') sns.set_style('whitegrid') sns.set(font_scale=1.2) value_to_int = {j: i for i, j in enumerate( sorted(pd.unique(df.values.ravel())))} # like you did # for key in value_to_int.copy().keys(): # try: # if np.isnan(key): # value_to_int['NA'] = value_to_int.pop(key) # df = df.fillna('NA') # except TypeError: # pass try: sorted_v_to_i = dict(sorted(value_to_int.items())) except TypeError: sorted_v_to_i = value_to_int n = len(value_to_int) # discrete colormap (n samples from a given cmap) cmap = sns.color_palette(cmap, n) if ax is None: ax = sns.heatmap(df.replace(sorted_v_to_i), cmap=cmap, linewidth=1, linecolor='k', square=False, cbar_kws={"shrink": .9}) else: ax = sns.heatmap(df.replace(sorted_v_to_i), cmap=cmap, ax=ax, linewidth=1, linecolor='k', square=False, cbar_kws={"shrink": .9}) if title is not None: ax.set_title(title, fontsize=fontsize) ax.set_xticklabels(ax.get_xticklabels(), rotation=30) ax.set_yticklabels(ax.get_yticklabels(), rotation=0) ax.tick_params(labelsize=fontsize) ax.set_ylabel(ax.get_ylabel(), fontsize=fontsize) ax.set_xlabel(ax.get_xlabel(), fontsize=fontsize) colorbar = ax.collections[0].colorbar r = colorbar.vmax - colorbar.vmin colorbar.set_ticks([colorbar.vmin + r / n * (0.5 + i) for i in range(n)]) colorbar.set_ticklabels(list(value_to_int.keys())) return ax # def plot_ROC_curves_for_all_models_and_scorers(dss, save=False, # fontsize=24, fig_split=1, # feat=['pwv', 'pwv+pressure', 'pwv+pressure+doy']): # import xarray as xr # import seaborn as sns # import matplotlib.pyplot as plt # import pandas as pd # cmap = sns.color_palette('tab10', len(feat)) # sns.set_style('whitegrid') # sns.set_style('ticks') # if fig_split == 1: # dss = dss.sel(scorer=['precision', 'recall', 'f1']) # elif fig_split == 2: # dss = dss.sel(scorer=['accuracy', 'tss', 'hss']) # fg = xr.plot.FacetGrid( # dss, # col='model', # row='scorer', # sharex=True, # sharey=True, figsize=(20, 20)) # for i in range(fg.axes.shape[0]): # i is rows # for j in range(fg.axes.shape[1]): # j is cols # ax = fg.axes[i, j] # modelname = dss['model'].isel(model=j).item() # scorer = dss['scorer'].isel(scorer=i).item() # chance_plot = [False for x in feat] # chance_plot[-1] = True # for k, f in enumerate(feat): # # name = '{}-{}-{}'.format(modelname, scoring, feat) # # model = dss.isel({'model': j, 'scoring': i}).sel( # # {'features': feat}) # model = dss.isel({'model': j, 'scorer': i} # ).sel({'features': f}) # # return model # title = 'ROC of {} model ({})'.format(modelname.replace('SVC', 'SVM'), scorer) # try: # ax = plot_ROC_curve_from_dss_nested_CV(model, outer_dim='outer_split', # plot_chance=[k], # main_label=f, # ax=ax, # color=cmap[k], title=title, # fontsize=fontsize) # except ValueError: # ax.grid('on') # continue # handles, labels = ax.get_legend_handles_labels() # lh_ser = pd.Series(labels, index=handles).drop_duplicates() # lh_ser = lh_ser.sort_values(ascending=False) # hand = lh_ser.index.values # labe = lh_ser.values # ax.legend(handles=hand.tolist(), labels=labe.tolist(), loc="lower right", # fontsize=fontsize-7) # ax.grid('on') # if j >= 1: # ax.set_ylabel('') # if fig_split == 1: # ax.set_xlabel('') # ax.tick_params(labelbottom=False) # else: # if i <= 1: # ax.set_xlabel('') # # title = '{} station: {} total events'.format( # # station.upper(), events) # # if max_flow > 0: # # title = '{} station: {} total events (max flow = {} m^3/sec)'.format( # # station.upper(), events, max_flow) # # fg.fig.suptitle(title, fontsize=fontsize) # fg.fig.tight_layout() # fg.fig.subplots_adjust(top=0.937, # bottom=0.054, # left=0.039, # right=0.993, # hspace=0.173, # wspace=0.051) # if save: # filename = 'ROC_curves_nested_{}_figsplit_{}.png'.format( # dss['outer_split'].size, fig_split) # plt.savefig(savefig_path / filename, bbox_inches='tight') # return fg def plot_hydro_ML_models_results_from_dss(dss, std_on='outer', save=False, fontsize=16, plot_type='ROC', split=1, feat=['pwv', 'pressure+pwv', 'doy+pressure+pwv']): import xarray as xr import seaborn as sns import matplotlib.pyplot as plt import pandas as pd cmap = sns.color_palette("colorblind", len(feat)) if split == 1: dss = dss.sel(scoring=['f1', 'precision', 'recall']) elif split == 2: dss = dss.sel(scoring=['tss', 'hss', 'roc-auc', 'accuracy']) fg = xr.plot.FacetGrid( dss, col='model', row='scoring', sharex=True, sharey=True, figsize=(20, 20)) for i in range(fg.axes.shape[0]): # i is rows for j in range(fg.axes.shape[1]): # j is cols ax = fg.axes[i, j] modelname = dss['model'].isel(model=j).item() scoring = dss['scoring'].isel(scoring=i).item() chance_plot = [False for x in feat] chance_plot[-1] = True for k, f in enumerate(feat): # name = '{}-{}-{}'.format(modelname, scoring, feat) # model = dss.isel({'model': j, 'scoring': i}).sel( # {'features': feat}) model = dss.isel({'model': j, 'scoring': i} ).sel({'features': f}) title = '{} of {} model ({})'.format( plot_type, modelname, scoring) try: plot_ROC_PR_curve_from_dss(model, outer_dim='outer_kfold', inner_dim='inner_kfold', plot_chance=[k], main_label=f, plot_type=plot_type, plot_std_legend=False, ax=ax, color=cmap[k], title=title, std_on=std_on, fontsize=fontsize) except ValueError: ax.grid('on') continue handles, labels = ax.get_legend_handles_labels() hand = pd.Series( labels, index=handles).drop_duplicates().index.values labe = pd.Series(labels, index=handles).drop_duplicates().values ax.legend(handles=hand.tolist(), labels=labe.tolist(), loc="lower right", fontsize=14) ax.grid('on') # title = '{} station: {} total events'.format( # station.upper(), events) # if max_flow > 0: # title = '{} station: {} total events (max flow = {} m^3/sec)'.format( # station.upper(), events, max_flow) # fg.fig.suptitle(title, fontsize=fontsize) fg.fig.tight_layout() fg.fig.subplots_adjust(top=0.937, bottom=0.054, left=0.039, right=0.993, hspace=0.173, wspace=0.051) if save: filename = 'hydro_models_on_{}_{}_std_on_{}_{}.png'.format( dss['inner_kfold'].size, dss['outer_kfold'].size, std_on, plot_type) plt.savefig(savefig_path / filename, bbox_inches='tight') return fg # def plot_hydro_ML_models_result(model_da, nsplits=2, station='drag', # test_size=20, n_splits_plot=None, save=False): # import xarray as xr # import seaborn as sns # import matplotlib.pyplot as plt # from sklearn.model_selection import train_test_split # # TODO: add plot_roc_curve(model, X_other_station, y_other_station) # # TODO: add pw_station, hs_id # cmap = sns.color_palette("colorblind", 3) # X, y = produce_X_y(station, hydro_pw_dict[station], neg_pos_ratio=1) # events = int(y[y == 1].sum().item()) # model_da = model_da.sel( # splits=nsplits, # test_size=test_size).reset_coords( # drop=True) ## just_pw = [x for x in X.feature.values if 'pressure' not in x] ## X_pw = X.sel(feature=just_pw) # fg = xr.plot.FacetGrid( # model_da, # col='model', # row='scoring', # sharex=True, # sharey=True, figsize=(20, 20)) # for i in range(fg.axes.shape[0]): # i is rows # for j in range(fg.axes.shape[1]): # j is cols # ax = fg.axes[i, j] # modelname = model_da['model'].isel(model=j).item() # scoring = model_da['scoring'].isel(scoring=i).item() # chance_plot = [False, False, True] # for k, feat in enumerate(model_da['feature'].values): # name = '{}-{}-{}'.format(modelname, scoring, feat) # model = model_da.isel({'model': j, 'scoring': i}).sel({'feature': feat}).item() # title = 'ROC of {} model ({})'.format(modelname, scoring) # if not '+' in feat: # f = [x for x in X.feature.values if feat in x] # X_f = X.sel(feature=f) # else: # X_f = X # X_train, X_test, y_train, y_test = train_test_split( # X_f, y, test_size=test_size/100, shuffle=True, random_state=42) # # plot_many_ROC_curves(model, X_f, y, name=name, # color=cmap[k], ax=ax, # plot_chance=chance_plot[k], # title=title, n_splits=n_splits_plot) # fg.fig.suptitle('{} station: {} total_events, test_events = {}, n_splits = {}'.format(station.upper(), events, int(events* test_size/100), nsplits)) # fg.fig.tight_layout() # fg.fig.subplots_adjust(top=0.937, # bottom=0.054, # left=0.039, # right=0.993, # hspace=0.173, # wspace=0.051) # if save: # plt.savefig(savefig_path / 'try.png', bbox_inches='tight') # return fg def order_features_list(flist): """ order the feature list in load_ML_run_results so i don't get duplicates""" import pandas as pd import numpy as np # first get all features: li = [x.split('+') for x in flist] flat_list = [item for sublist in li for item in sublist] f = list(set(flat_list)) nums = np.arange(1, len(f)+1) # now assagin a number for each entry: inds = [] for x in flist: for fe, num in zip(f, nums): x = x.replace(fe, str(10**num)) inds.append(eval(x)) ser = pd.Series(inds) ser.index = flist ser1 = ser.drop_duplicates() di = dict(zip(ser1.values, ser1.index)) new_flist = [] for ind, feat in zip(inds, flist): new_flist.append(di.get(ind)) return new_flist def smart_add_dataarray_to_ds_list(dsl, da_name='feature_importances'): """add data array to ds_list even if it does not exist, use shape of data array that exists in other part of ds list""" import numpy as np import xarray as xr # print(da_name) fi = [x for x in dsl if da_name in x][0] print(da_name, fi[da_name].shape) fi = fi[da_name].copy(data=np.zeros(shape=fi[da_name].shape)) new_dsl = [] for ds in dsl: if da_name not in ds: ds = xr.merge([ds, fi], combine_attrs='no_conflicts') new_dsl.append(ds) return new_dsl def load_ML_run_results(path=hydro_ml_path, prefix='CVR', change_DOY_to_doy=True): from aux_gps import path_glob import xarray as xr # from aux_gps import save_ncfile import pandas as pd import numpy as np print('loading hydro ML results for all models and features') # print('loading hydro ML results for station {}'.format(pw_station)) model_files = path_glob(path, '{}_*.nc'.format(prefix)) model_files = sorted(model_files) # model_files = [x for x in model_files if pw_station in x.as_posix()] ds_list = [xr.load_dataset(x) for x in model_files] if change_DOY_to_doy: for ds in ds_list: if 'DOY' in ds.features: new_feats = [x.replace('DOY', 'doy') for x in ds['feature'].values] ds['feature'] = new_feats ds.attrs['features'] = [x.replace('DOY', 'doy') for x in ds.attrs['features']] model_as_str = [x.as_posix().split('/')[-1].split('.')[0] for x in model_files] model_names = [x.split('_')[1] for x in model_as_str] model_scores = [x.split('_')[3] for x in model_as_str] model_features = [x.split('_')[2] for x in model_as_str] if change_DOY_to_doy: model_features = [x.replace('DOY', 'doy') for x in model_features] new_model_features = order_features_list(model_features) ind = pd.MultiIndex.from_arrays( [model_names, new_model_features, model_scores], names=( 'model', 'features', 'scoring')) # ind1 = pd.MultiIndex.from_product([model_names, model_scores, model_features], names=[ # 'model', 'scoring', 'feature']) # ds_list = [x[data_vars] for x in ds_list] # complete non-existant fields like best and fi for all ds: data_vars = [x for x in ds_list[0] if x.startswith('test')] # data_vars += ['AUC', 'TPR'] data_vars += [x for x in ds_list[0] if x.startswith('y_')] bests = [[x for x in y if x.startswith('best')] for y in ds_list] data_vars += list(set([y for x in bests for y in x])) if 'RF' in model_names: data_vars += ['feature_importances'] new_ds_list = [] for dvar in data_vars: ds_list = smart_add_dataarray_to_ds_list(ds_list, dvar) # # check if all data vars are in each ds and merge them: new_ds_list = [xr.merge([y[x] for x in data_vars if x in y], combine_attrs='no_conflicts') for y in ds_list] # concat all dss = xr.concat(new_ds_list, dim='dim_0') dss['dim_0'] = ind dss = dss.unstack('dim_0') # dss.attrs['pwv_id'] = pw_station # fix roc_auc to roc-auc in dss datavars dss = dss.rename_vars({'test_roc_auc': 'test_roc-auc'}) # dss['test_roc_auc'].name = 'test_roc-auc' print('calculating ROC, PR metrics.') dss = calculate_metrics_from_ML_dss(dss) print('Done!') return dss def plot_nested_CV_test_scores(dss, feats=None, fontsize=16, save=True, wv_label='pwv'): import seaborn as sns import matplotlib.pyplot as plt from aux_gps import convert_da_to_long_form_df import numpy as np import xarray as xr def change_width(ax, new_value) : for patch in ax.patches : current_width = patch.get_width() diff = current_width - new_value # we change the bar width patch.set_width(new_value) # we recenter the bar patch.set_x(patch.get_x() + diff * .5) def show_values_on_bars(axs, fs=12, fw='bold', exclude_bar_num=None): import numpy as np def _show_on_single_plot(ax, exclude_bar_num=3): for i, p in enumerate(ax.patches): if i != exclude_bar_num and exclude_bar_num is not None: _x = p.get_x() + p.get_width() / 2 _y = p.get_y() + p.get_height() value = '{:.2f}'.format(p.get_height()) ax.text(_x, _y, value, ha="right", fontsize=fs, fontweight=fw, zorder=20) if isinstance(axs, np.ndarray): for idx, ax in np.ndenumerate(axs): _show_on_single_plot(ax, exclude_bar_num) else: _show_on_single_plot(axs, exclude_bar_num) splits = dss['outer_split'].size try: assert 'best' in dss.attrs['comment'] best = True except AssertionError: best = False except KeyError: best = False if 'neg_sample' in dss.dims: neg = dss['neg_sample'].size else: neg = 1 if 'model' not in dss.dims: dss = dss.expand_dims('model') dss['model'] = [dss.attrs['model']] dss = dss.sortby('model', ascending=False) dss = dss.reindex(scorer=scorer_order) if feats is None: feats = ['pwv', 'pwv+pressure', 'pwv+pressure+doy'] dst = dss.sel(features=feats) # .reset_coords(drop=True) # df = dst['test_score'].to_dataframe() # df['scorer'] = df.index.get_level_values(3) # df['model'] = df.index.get_level_values(0) # df['features'] = df.index.get_level_values(1) # df['outer_splits'] = df.index.get_level_values(2) # df['model'] = df['model'].str.replace('SVC', 'SVM') # df = df.melt(value_vars='test_score', id_vars=[ # 'features', 'model', 'scorer', 'outer_splits'], var_name='test_score', # value_name='score') da = dst['test_score'] if len(feats) == 5: da_empty = da.isel(features=0).copy( data=np.zeros(da.isel(features=0).shape)) da_empty['features'] = 'empty' da = xr.concat([da, da_empty], 'features') da = da.reindex(features=['doy', 'pressure', 'pwv', 'empty', 'pwv+pressure', 'pwv+pressure+doy']) da.name = 'feature groups' df = convert_da_to_long_form_df(da, value_name='score', var_name='feature groups') sns.set(font_scale=1.5) sns.set_style('whitegrid') sns.set_style('ticks') cmap = sns.color_palette('tab10', n_colors=len(feats)) if len(feats) == 5: cmap = ['tab:purple', 'tab:brown', 'tab:blue', 'tab:blue', 'tab:orange', 'tab:green'] fg = sns.FacetGrid(data=df, row='model', col='scorer', height=4, aspect=0.9) # fg.map_dataframe(sns.stripplot, x="test_score", y="score", hue="features", # data=df, dodge=True, alpha=1, zorder=1, palette=cmap) # fg.map_dataframe(sns.pointplot, x="test_score", y="score", hue="features", # data=df, dodge=True, join=False, palette=cmap, # markers="o", scale=.75, ci=None) fg.map_dataframe(sns.barplot, x='feature groups', y="score", hue='features', ci='sd', capsize=None, errwidth=2, errcolor='k', palette=cmap, dodge=True) # g = sns.catplot(x='test_score', y="score", hue='features', # col="scorer", row='model', ci='sd', # data=df, kind="bar", capsize=0.25, # height=4, aspect=1.5, errwidth=1.5) #fg.set_xticklabels(rotation=45) # fg.set_yticklabels([0, 0.2, 0.4, 0.6, 0.8, 1.0], fontsize=fontsize) fg.set_ylabels('score') [x.grid(True) for x in fg.axes.flatten()] handles, labels = fg.axes[0, 0].get_legend_handles_labels() if len(feats) == 5: del handles[3] del labels[3] show_values_on_bars(fg.axes, fs=fontsize-4, exclude_bar_num=3) for i in range(fg.axes.shape[0]): # i is rows model = dss['model'].isel(model=i).item() if model == 'SVC': model = 'SVM' for j in range(fg.axes.shape[1]): # j is cols ax = fg.axes[i, j] scorer = dss['scorer'].isel(scorer=j).item() title = '{} | scorer={}'.format(model, scorer) ax.set_title(title, fontsize=fontsize) ax.set_xlabel('') ax.set_ylim(0, 1) change_width(ax, 0.110) fg.set_xlabels(' ') if wv_label is not None: labels = [x.replace('pwv', wv_label) for x in labels] fg.fig.legend(handles=handles, labels=labels, prop={'size': fontsize}, edgecolor='k', framealpha=0.5, fancybox=True, facecolor='white', ncol=len(feats), fontsize=fontsize, loc='upper center', bbox_to_anchor=(0.5, 1.005), bbox_transform=plt.gcf().transFigure) # true_scores = dst.sel(scorer=scorer, model=model)['true_score'] # dss['permutation_score'].plot.hist(ax=ax, bins=25, color=color) # ymax = ax.get_ylim()[-1] - 0.2 # ax.vlines(x=true_scores.values, ymin=0, ymax=ymax, linestyle='--', color=cmap) fg.fig.tight_layout() fg.fig.subplots_adjust(top=0.92) if save: if best: filename = 'ML_scores_models_nested_CV_best_hp_{}_{}_neg_{}.png'.format('_'.join(feats), splits, neg) else: filename = 'ML_scores_models_nested_CV_{}_{}_neg_{}.png'.format('_'.join(feats), splits, neg) plt.savefig(savefig_path / filename, bbox_inches='tight') return fg def plot_holdout_test_scores(dss, feats='pwv+pressure+doy'): import seaborn as sns import matplotlib.pyplot as plt def show_values_on_bars(axs, fs=12, fw='bold'): import numpy as np def _show_on_single_plot(ax): for p in ax.patches: _x = p.get_x() + p.get_width() / 2 _y = p.get_y() + p.get_height() value = '{:.2f}'.format(p.get_height()) ax.text(_x, _y, value, ha="center", fontsize=fs, fontweight=fw) if isinstance(axs, np.ndarray): for idx, ax in np.ndenumerate(axs): _show_on_single_plot(ax) else: _show_on_single_plot(axs) if feats is None: feats = ['pwv', 'pwv+pressure', 'pwv+pressure+doy'] dst = dss.sel(features=feats) # .reset_coords(drop=True) df = dst['holdout_test_scores'].to_dataframe() df['scorer'] = df.index.droplevel(1).droplevel(0) df['model'] = df.index.droplevel(2).droplevel(1) df['features'] = df.index.droplevel(2).droplevel(0) df['model'] = df['model'].str.replace('SVC', 'SVM') df = df.melt(value_vars='holdout_test_scores', id_vars=[ 'features', 'model', 'scorer'], var_name='test_score') sns.set(font_scale=1.5) sns.set_style('whitegrid') sns.set_style('ticks') g = sns.catplot(x="model", y="value", hue='features', col="scorer", ci='sd', row=None, col_wrap=3, data=df, kind="bar", capsize=0.15, height=4, aspect=1.5, errwidth=0.8) g.set_xticklabels(rotation=45) [x.grid(True) for x in g.axes.flatten()] show_values_on_bars(g.axes) filename = 'ML_scores_models_holdout_{}.png'.format('_'.join(feats)) plt.savefig(savefig_path / filename, bbox_inches='tight') return df def prepare_test_df_to_barplot_from_dss(dss, feats='doy+pwv+pressure', plot=True, splitfigs=True): import seaborn as sns import matplotlib.pyplot as plt dvars = [x for x in dss if 'test_' in x] scores = [x.split('_')[-1] for x in dvars] dst = dss[dvars] # dst['scoring'] = [x+'_inner' for x in dst['scoring'].values] # for i, ds in enumerate(dst): # dst[ds] = dst[ds].sel(scoring=scores[i]).reset_coords(drop=True) if feats is None: feats = ['pwv', 'pressure+pwv', 'doy+pressure+pwv'] dst = dst.sel(features=feats) # .reset_coords(drop=True) dst = dst.rename_vars(dict(zip(dvars, scores))) # dst = dst.drop('scoring') df = dst.to_dataframe() # dfu = df df['inner score'] = df.index.droplevel(2).droplevel(1).droplevel(0) df['features'] = df.index.droplevel(2).droplevel(2).droplevel(1) df['model'] = df.index.droplevel(2).droplevel(0).droplevel(1) df = df.melt(value_vars=scores, id_vars=[ 'features', 'model', 'inner score'], var_name='outer score') # return dfu # dfu.columns = dfu.columns.droplevel(1) # dfu = dfu.T # dfu['score'] = dfu.index # dfu = dfu.reset_index() # df = dfu.melt(value_vars=['MLP', 'RF', 'SVC'], id_vars=['score']) df1 = df[(df['inner score']=='f1') | (df['inner score']=='precision') | (df['inner score']=='recall')] df2 = df[(df['inner score']=='hss') | (df['inner score']=='tss') | (df['inner score']=='roc-auc') | (df['inner score']=='accuracy')] if plot: sns.set(font_scale = 1.5) sns.set_style('whitegrid') sns.set_style('ticks') if splitfigs: g = sns.catplot(x="outer score", y="value", hue='features', col="inner score", ci='sd',row='model', data=df1, kind="bar", capsize=0.15, height=4, aspect=1.5,errwidth=0.8) g.set_xticklabels(rotation=45) filename = 'ML_scores_models_{}_1.png'.format('_'.join(feats)) plt.savefig(savefig_path / filename, bbox_inches='tight') g = sns.catplot(x="outer score", y="value", hue='features', col="inner score", ci='sd',row='model', data=df2, kind="bar", capsize=0.15, height=4, aspect=1.5,errwidth=0.8) g.set_xticklabels(rotation=45) filename = 'ML_scores_models_{}_2.png'.format('_'.join(feats)) plt.savefig(savefig_path / filename, bbox_inches='tight') else: g = sns.catplot(x="outer score", y="value", hue='features', col="inner score", ci='sd',row='model', data=df, kind="bar", capsize=0.15, height=4, aspect=1.5,errwidth=0.8) g.set_xticklabels(rotation=45) filename = 'ML_scores_models_{}.png'.format('_'.join(feats)) plt.savefig(savefig_path / filename, bbox_inches='tight') return df def calculate_metrics_from_ML_dss(dss): from sklearn.metrics import roc_curve from sklearn.metrics import roc_auc_score from sklearn.metrics import auc from sklearn.metrics import precision_recall_curve import xarray as xr import numpy as np import pandas as pd mean_fpr = np.linspace(0, 1, 100) # fpr = dss['y_true'].copy(deep=False).values # tpr = dss['y_true'].copy(deep=False).values # y_true = dss['y_true'].values # y_prob = dss['y_prob'].values ok = [x for x in dss['outer_kfold'].values] ik = [x for x in dss['inner_kfold'].values] m = [x for x in dss['model'].values] sc = [x for x in dss['scoring'].values] f = [x for x in dss['features'].values] # r = [x for x in dss['neg_pos_ratio'].values] ind = pd.MultiIndex.from_product( [ok, ik, m, sc, f], names=[ 'outer_kfold', 'inner_kfold', 'model', 'scoring', 'features']) # , 'station']) okn = [x for x in range(dss['outer_kfold'].size)] ikn = [x for x in range(dss['inner_kfold'].size)] mn = [x for x in range(dss['model'].size)] scn = [x for x in range(dss['scoring'].size)] fn = [x for x in range(dss['features'].size)] ds_list = [] for i in okn: for j in ikn: for k in mn: for n in scn: for m in fn: ds = xr.Dataset() y_true = dss['y_true'].isel( outer_kfold=i, inner_kfold=j, model=k, scoring=n, features=m).reset_coords(drop=True).squeeze() y_prob = dss['y_prob'].isel( outer_kfold=i, inner_kfold=j, model=k, scoring=n, features=m).reset_coords(drop=True).squeeze() y_true = y_true.dropna('sample') y_prob = y_prob.dropna('sample') if y_prob.size == 0: # in case of NaNs in the results: fpr_da = xr.DataArray( np.nan*np.ones((1)), dims=['sample']) fpr_da['sample'] = [ x for x in range(fpr_da.size)] tpr_da = xr.DataArray( np.nan*np.ones((1)), dims=['sample']) tpr_da['sample'] = [ x for x in range(tpr_da.size)] prn_da = xr.DataArray( np.nan*np.ones((1)), dims=['sample']) prn_da['sample'] = [ x for x in range(prn_da.size)] rcll_da = xr.DataArray( np.nan*np.ones((1)), dims=['sample']) rcll_da['sample'] = [ x for x in range(rcll_da.size)] tpr_fpr = xr.DataArray( np.nan*np.ones((100)), dims=['FPR']) tpr_fpr['FPR'] = mean_fpr prn_rcll = xr.DataArray( np.nan*np.ones((100)), dims=['RCLL']) prn_rcll['RCLL'] = mean_fpr pr_auc_da = xr.DataArray(np.nan) roc_auc_da = xr.DataArray(np.nan) no_skill_da = xr.DataArray(np.nan) else: no_skill = len( y_true[y_true == 1]) / len(y_true) no_skill_da = xr.DataArray(no_skill) fpr, tpr, _ = roc_curve(y_true, y_prob) interp_tpr = np.interp(mean_fpr, fpr, tpr) interp_tpr[0] = 0.0 roc_auc = roc_auc_score(y_true, y_prob) prn, rcll, _ = precision_recall_curve( y_true, y_prob) interp_prn = np.interp( mean_fpr, rcll[::-1], prn[::-1]) interp_prn[0] = 1.0 pr_auc_score = auc(rcll, prn) roc_auc_da = xr.DataArray(roc_auc) pr_auc_da = xr.DataArray(pr_auc_score) prn_da = xr.DataArray(prn, dims=['sample']) prn_da['sample'] = [x for x in range(len(prn))] rcll_da = xr.DataArray(rcll, dims=['sample']) rcll_da['sample'] = [ x for x in range(len(rcll))] fpr_da = xr.DataArray(fpr, dims=['sample']) fpr_da['sample'] = [x for x in range(len(fpr))] tpr_da = xr.DataArray(tpr, dims=['sample']) tpr_da['sample'] = [x for x in range(len(tpr))] tpr_fpr = xr.DataArray( interp_tpr, dims=['FPR']) tpr_fpr['FPR'] = mean_fpr prn_rcll = xr.DataArray( interp_prn, dims=['RCLL']) prn_rcll['RCLL'] = mean_fpr ds['fpr'] = fpr_da ds['tpr'] = tpr_da ds['roc-auc'] = roc_auc_da ds['pr-auc'] = pr_auc_da ds['prn'] = prn_da ds['rcll'] = rcll_da ds['TPR'] = tpr_fpr ds['PRN'] = prn_rcll ds['no_skill'] = no_skill_da ds_list.append(ds) ds = xr.concat(ds_list, 'dim_0') ds['dim_0'] = ind ds = ds.unstack() ds.attrs = dss.attrs ds['fpr'].attrs['long_name'] = 'False positive rate' ds['tpr'].attrs['long_name'] = 'True positive rate' ds['prn'].attrs['long_name'] = 'Precision' ds['rcll'].attrs['long_name'] = 'Recall' ds['roc-auc'].attrs['long_name'] = 'ROC or FPR-TPR Area under curve' ds['pr-auc'].attrs['long_name'] = 'Precition-Recall Area under curve' ds['PRN'].attrs['long_name'] = 'Precision-Recall' ds['TPR'].attrs['long_name'] = 'TPR-FPR (ROC)' dss = xr.merge([dss, ds], combine_attrs='no_conflicts') return dss # # def load_ML_models(path=hydro_ml_path, station='drag', prefix='CVM', suffix='.pkl'): # from aux_gps import path_glob # import joblib # import matplotlib.pyplot as plt # import seaborn as sns # import xarray as xr # import pandas as pd # model_files = path_glob(path, '{}_*{}'.format(prefix, suffix)) # model_files = sorted(model_files) # model_files = [x for x in model_files if station in x.as_posix()] # m_list = [joblib.load(x) for x in model_files] # model_files = [x.as_posix().split('/')[-1].split('.')[0] for x in model_files] # # fix roc-auc: # model_files = [x.replace('roc_auc', 'roc-auc') for x in model_files] # print('loading {} station only.'.format(station)) # model_names = [x.split('_')[3] for x in model_files] ## model_pw_stations = [x.split('_')[1] for x in model_files] ## model_hydro_stations = [x.split('_')[2] for x in model_files] # model_nsplits = [x.split('_')[6] for x in model_files] # model_scores = [x.split('_')[5] for x in model_files] # model_features = [x.split('_')[4] for x in model_files] # model_test_sizes = [] # for file in model_files: # try: # model_test_sizes.append(int(file.split('_')[7])) # except IndexError: # model_test_sizes.append(20) ## model_pwv_hs_id = list(zip(model_pw_stations, model_hydro_stations)) ## model_pwv_hs_id = ['_'.join(x) for filename = 'CVR_{}_{}_{}_{}_{}.nc'.format( # name, features, refitted_scorer, ikfolds, okfolds) # x in model_pwv_hs_id] # # transform model_dict to dataarray: # tups = [tuple(x) for x in zip(model_names, model_scores, model_nsplits, model_features, model_test_sizes)] #, model_pwv_hs_id)] # ind = pd.MultiIndex.from_tuples((tups), names=['model', 'scoring', 'splits', 'feature', 'test_size']) #, 'station']) # da = xr.DataArray(m_list, dims='dim_0') # da['dim_0'] = ind # da = da.unstack('dim_0') # da['splits'] = da['splits'].astype(int) # da['test_size'].attrs['units'] = '%' # return da def plot_heatmaps_for_all_models_and_scorings(dss, var='roc-auc'): # , save=True): import xarray as xr import seaborn as sns import matplotlib.pyplot as plt # assert station == dss.attrs['pwv_id'] cmaps = {'roc-auc': sns.color_palette("Blues", as_cmap=True), 'pr-auc': sns.color_palette("Greens", as_cmap=True)} fg = xr.plot.FacetGrid( dss, col='model', row='scoring', sharex=True, sharey=True, figsize=(10, 20)) dss = dss.mean('inner_kfold', keep_attrs=True) vmin, vmax = dss[var].min(), 1 norm = plt.Normalize(vmin=vmin, vmax=vmax) for i in range(fg.axes.shape[0]): # i is rows for j in range(fg.axes.shape[1]): # j is cols ax = fg.axes[i, j] modelname = dss['model'].isel(model=j).item() scoring = dss['scoring'].isel(scoring=i).item() model = dss[var].isel( {'model': j, 'scoring': i}).reset_coords(drop=True) df = model.to_dataframe() title = '{} model ({})'.format(modelname, scoring) df = df.unstack() mean = df.mean() mean.name = 'mean' df = df.append(mean).T.droplevel(0) ax = sns.heatmap(df, annot=True, cmap=cmaps[var], cbar=False, ax=ax, norm=norm) ax.set_title(title) ax.vlines([4], 0, 10, color='r', linewidth=2) if j > 0: ax.set_ylabel('') if i < 2: ax.set_xlabel('') cax = fg.fig.add_axes([0.1, 0.025, .8, .015]) fg.fig.colorbar(ax.get_children()[0], cax=cax, orientation="horizontal") fg.fig.suptitle('{}'.format( dss.attrs[var].upper()), fontweight='bold') fg.fig.tight_layout() fg.fig.subplots_adjust(top=0.937, bottom=0.099, left=0.169, right=0.993, hspace=0.173, wspace=0.051) # if save: # filename = 'hydro_models_heatmaps_on_{}_{}_{}.png'.format( # station, dss['outer_kfold'].size, var) # plt.savefig(savefig_path / filename, bbox_inches='tight') return fg def plot_ROC_from_dss(dss, feats=None, fontsize=16, save=True, wv_label='pwv', best=False): import seaborn as sns import matplotlib.pyplot as plt import pandas as pd from aux_gps import convert_da_to_long_form_df sns.set_style('whitegrid') sns.set_style('ticks') sns.set(font_scale=1.0) cmap = sns.color_palette('tab10', n_colors=3) splits = dss['outer_split'].size if 'neg_sample' in dss.dims: neg = dss['neg_sample'].size else: neg = 1 dss = dss.reindex(scorer=scorer_order) if feats is None: feats = ['pwv', 'pwv+pressure', 'pwv+pressure+doy'] if 'model' not in dss.dims: dss = dss.expand_dims('model') dss['model'] = [dss.attrs['model']] dss = dss.sortby('model', ascending=False) dst = dss.sel(features=feats) # .reset_coords(drop=True) # df = dst['TPR'].to_dataframe() # if 'neg_sample' in dss.dims: # fpr_lnum = 5 # model_lnum = 0 # scorer_lnum = 4 # features_lnum = 1 # else: # fpr_lnum = 4 # model_lnum = 0 # scorer_lnum = 3 # features_lnum = 1 # df['FPR'] = df.index.get_level_values(fpr_lnum) # df['model'] = df.index.get_level_values(model_lnum) # df['scorer'] = df.index.get_level_values(scorer_lnum) # df['features'] = df.index.get_level_values(features_lnum) df = convert_da_to_long_form_df(dst['TPR'], var_name='score') # df = df.melt(value_vars='TPR', id_vars=[ # 'features', 'model', 'scorer', 'FPR'], var_name='score') if best is not None: if best == 'compare_negs': df1 = df.copy()[df['neg_sample'] == 1] df2 = df.copy() df2.drop('neg_sample', axis=1, inplace=True) df1.drop('neg_sample', axis=1, inplace=True) df1['neg_group'] = 1 df2['neg_group'] = 25 df = pd.concat([df1, df2]) col = 'neg_group' titles = ['Neg=1', 'Neg=25'] else: col=None else: col = 'scorer' df['model'] = df['model'].str.replace('SVC', 'SVM') fg = sns.FacetGrid(df, col=col, row='model', aspect=1) fg.map_dataframe(sns.lineplot, x='FPR', y='value', hue='features', ci='sd', palette=cmap, n_boot=None, estimator='mean') for i in range(fg.axes.shape[0]): # i is rows model = dss['model'].isel(model=i).item() auc_model = dst.sel(model=model) if model == 'SVC': model = 'SVM' for j in range(fg.axes.shape[1]): # j is cols scorer = dss['scorer'].isel(scorer=j).item() auc_scorer_df = auc_model['roc_auc_score'].sel(scorer=scorer).reset_coords(drop=True).to_dataframe() auc_scorer_mean = [auc_scorer_df.loc[x].mean() for x in feats] auc_scorer_std = [auc_scorer_df.loc[x].std() for x in feats] auc_mean = [x.item() for x in auc_scorer_mean] auc_std = [x.item() for x in auc_scorer_std] if j == 0 and best is not None: scorer = dss['scorer'].isel(scorer=j).item() auc_scorer_df = auc_model['roc_auc_score'].sel(scorer=scorer).isel(neg_sample=0).reset_coords(drop=True).to_dataframe() auc_scorer_mean = [auc_scorer_df.loc[x].mean() for x in feats] auc_scorer_std = [auc_scorer_df.loc[x].std() for x in feats] auc_mean = [x.item() for x in auc_scorer_mean] auc_std = [x.item() for x in auc_scorer_std] ax = fg.axes[i, j] ax.plot([0, 1], [0, 1], color='tab:red', linestyle='--', lw=2, label='chance') if best is not None: if best == 'compare_negs': title = '{} | {}'.format(model, titles[j]) else: title = '{}'.format(model) else: title = '{} | scorer={}'.format(model, scorer) ax.set_title(title, fontsize=fontsize) handles, labels = ax.get_legend_handles_labels() hands = handles[0:3] # labes = labels[0:3] new_labes = [] for auc, auc_sd in zip(auc_mean, auc_std): l = r'{:.2}$\pm${:.1}'.format(auc, auc_sd) new_labes.append(l) ax.legend(handles=hands, labels=new_labes, loc='lower right', title='AUCs', prop={'size': fontsize-4}) ax.set_xticks([0, 0.2, 0.4, 0.6, 0.8, 1]) ax.grid(True) # return handles, labels fg.set_ylabels('True Positive Rate', fontsize=fontsize) fg.set_xlabels('False Positive Rate', fontsize=fontsize) if wv_label is not None: labels = [x.replace('pwv', wv_label) for x in labels] if best is not None: if best == 'compare_negs': fg.fig.legend(handles=handles, labels=labels, prop={'size': fontsize}, edgecolor='k', framealpha=0.5, fancybox=True, facecolor='white', ncol=2, fontsize=fontsize, loc='upper center', bbox_to_anchor=(0.5, 1.005), bbox_transform=plt.gcf().transFigure) fg.fig.tight_layout() fg.fig.subplots_adjust(top=0.865, bottom=0.079, left=0.144, right=0.933, hspace=0.176, wspace=0.2) else: fg.fig.legend(handles=handles, labels=labels, prop={'size': fontsize}, edgecolor='k', framealpha=0.5, fancybox=True, facecolor='white', ncol=1, fontsize=fontsize, loc='upper center', bbox_to_anchor=(0.5, 1.005), bbox_transform=plt.gcf().transFigure) fg.fig.tight_layout() fg.fig.subplots_adjust(top=0.825, bottom=0.079, left=0.184, right=0.933, hspace=0.176, wspace=0.2) else: fg.fig.legend(handles=handles, labels=labels, prop={'size': fontsize}, edgecolor='k', framealpha=0.5, fancybox=True, facecolor='white', ncol=5, fontsize=fontsize, loc='upper center', bbox_to_anchor=(0.5, 1.005), bbox_transform=plt.gcf().transFigure) # true_scores = dst.sel(scorer=scorer, model=model)['true_score'] # dss['permutation_score'].plot.hist(ax=ax, bins=25, color=color) # ymax = ax.get_ylim()[-1] - 0.2 # ax.vlines(x=true_scores.values, ymin=0, ymax=ymax, linestyle='--', color=cmap) fg.fig.tight_layout() fg.fig.subplots_adjust(top=0.915) if save: if best is not None: filename = 'ROC_plots_models_nested_CV_best_hp_{}_{}_neg_{}.png'.format('_'.join(feats), splits, neg) else: filename = 'ROC_plots_models_nested_CV_{}_{}_neg_{}.png'.format('_'.join(feats), splits, neg) plt.savefig(savefig_path / filename, bbox_inches='tight') return fg def plot_permutation_importances_from_dss(dss, feat_dim='features', outer_dim='outer_split', features='pwv+pressure+doy', fix_xticklabels=True,split=1, axes=None, save=True): import matplotlib.pyplot as plt import numpy as np import seaborn as sns from natsort import natsorted sns.set_palette('Dark2', 6) sns.set_style('whitegrid') sns.set_style('ticks') model = dss.attrs['model'] # use dss.sel(model='RF') first as input dss['feature'] = dss['feature'].str.replace('DOY', 'doy') dss = dss.sel({feat_dim: features}) # tests_ds = dss['test_score'] # tests_ds = tests_ds.sel(scorer=scorer) # max_score_split = int(tests_ds.idxmax(outer_dim).item()) # use mean outer split: # dss = dss.mean(outer_dim) dss = dss.sel({outer_dim: split}) feats = features.split('+') fn = len(feats) if fn == 1: gr_spec = None fix_xticklabels = False elif fn == 2: gr_spec = [1, 1] elif fn == 3: gr_spec = [2, 5, 5] if axes is None: fig, axes = plt.subplots(1, fn, sharey=True, figsize=(17, 5), gridspec_kw={'width_ratios': gr_spec}) try: axes.flatten() except AttributeError: axes = [axes] for i, f in enumerate(sorted(feats)): fe = [x for x in dss['feature'].values if f in x] dsf = dss['PI_mean'].sel( feature=fe).reset_coords( drop=True) sorted_feat = natsorted([x for x in dsf.feature.values]) dsf = dsf.reindex(feature=sorted_feat) print([x for x in dsf.feature.values]) # dsf = dss['PI_mean'].sel( # feature=fe).reset_coords( # drop=True) dsf = dsf.to_dataset('scorer').to_dataframe( ).reset_index(drop=True) title = '{}'.format(f.upper()) dsf.plot.bar(ax=axes[i], title=title, rot=0, legend=False, zorder=20, width=.8) dsf_sum = dsf.sum().tolist() handles, labels = axes[i].get_legend_handles_labels() labels = [ '{} ({:.1f})'.format( x, y) for x, y in zip( labels, dsf_sum)] axes[i].legend(handles=handles, labels=labels, prop={'size': 10}, loc='upper left') axes[i].set_ylabel('Scores') axes[i].grid(axis='y', zorder=1) if fix_xticklabels: n = sum(['pwv' in x for x in dss.feature.values]) axes[0].xaxis.set_ticklabels('') hrs = np.arange(-24, -24+n) axes[1].set_xticklabels(hrs, rotation=30, ha="center", fontsize=12) axes[2].set_xticklabels(hrs, rotation=30, ha="center", fontsize=12) axes[1].set_xlabel('Hours prior to flood') axes[2].set_xlabel('Hours prior to flood') fig.tight_layout() fig.suptitle('permutation importance scores for {} model split #{}'.format(model, split)) fig.subplots_adjust(top=0.904) if save: filename = 'permutation_importances_{}_split_{}_all_scorers_{}.png'.format(model, split, features) plt.savefig(savefig_path / filename, bbox_inches='tight') return def plot_feature_importances_from_dss( dss, feat_dim='features', outer_dim='outer_split', features='pwv+pressure+doy', fix_xticklabels=True, axes=None, save=True, ylim=[0, 12], fontsize=16): import matplotlib.pyplot as plt import numpy as np import seaborn as sns from natsort import natsorted sns.set_palette('Dark2', 6) # sns.set_style('whitegrid') # sns.set_style('ticks') sns.set_theme(style='ticks', font_scale=1.5) # use dss.sel(model='RF') first as input dss['feature'] = dss['feature'].str.replace('DOY', 'doy') dss = dss.sel({feat_dim: features}) # tests_ds = dss['test_score'] # tests_ds = tests_ds.sel(scorer=scorer) # max_score_split = int(tests_ds.idxmax(outer_dim).item()) # use mean outer split: dss = dss.mean(outer_dim) feats = features.split('+') fn = len(feats) if fn == 1: gr_spec = None fix_xticklabels = False elif fn == 2: gr_spec = [1, 1] elif fn == 3: gr_spec = [5, 5, 2] if axes is None: fig, axes = plt.subplots(1, fn, sharey=True, figsize=(17, 5), gridspec_kw={'width_ratios': gr_spec}) try: axes.flatten() except AttributeError: axes = [axes] for i, f in enumerate(feats): fe = [x for x in dss['feature'].values if f in x] dsf = dss['feature_importances'].sel( feature=fe).reset_coords( drop=True) # dsf = dss['PI_mean'].sel( # feature=fe).reset_coords( # drop=True) sorted_feat = natsorted([x for x in dsf.feature.values]) # sorted_feat = [x for x in dsf.feature.values] print(sorted_feat) dsf = dsf.reindex(feature=sorted_feat) dsf = dsf.to_dataset('scorer').to_dataframe( ).reset_index(drop=True) * 100 title = '{}'.format(f.upper()) dsf.plot.bar(ax=axes[i], title=title, rot=0, legend=False, zorder=20, width=.8) axes[i].set_title(title, fontsize=fontsize) dsf_sum = dsf.sum().tolist() handles, labels = axes[i].get_legend_handles_labels() labels = [ '{} ({:.1f} %)'.format( x, y) for x, y in zip( labels, dsf_sum)] axes[i].legend(handles=handles, labels=labels, prop={'size': 12}, loc='upper center') axes[i].set_ylabel('Feature importances [%]') axes[i].grid(axis='y', zorder=1) if ylim is not None: [ax.set_ylim(*ylim) for ax in axes] if fix_xticklabels: n = sum(['pwv' in x for x in dss.feature.values]) axes[2].xaxis.set_ticklabels('') hrs = np.arange(-1, -25, -1) axes[0].set_xticklabels(hrs, rotation=30, ha="center", fontsize=14) axes[1].set_xticklabels(hrs, rotation=30, ha="center", fontsize=14) axes[2].tick_params(labelsize=fontsize) axes[0].set_xlabel('Hours prior to flood') axes[1].set_xlabel('Hours prior to flood') fig.tight_layout() if save: filename = 'RF_feature_importances_all_scorers_{}.png'.format(features) plt.savefig(savefig_path / filename, bbox_inches='tight') return def plot_feature_importances( dss, feat_dim='features', features='pwv+pressure+doy', scoring='f1', fix_xticklabels=True, axes=None, save=True): # use dss.sel(model='RF') first as input import matplotlib.pyplot as plt import numpy as np dss = dss.sel({feat_dim: features}) tests_ds = dss[[x for x in dss if 'test' in x]] tests_ds = tests_ds.sel(scoring=scoring) score_ds = tests_ds['test_{}'.format(scoring)] max_score = score_ds.idxmax('outer_kfold').values feats = features.split('+') fn = len(feats) if axes is None: fig, axes = plt.subplots(1, fn, sharey=True, figsize=(17, 5), gridspec_kw={'width_ratios': [1, 4, 4]}) try: axes.flatten() except AttributeError: axes = [axes] for i, f in enumerate(feats): fe = [x for x in dss['feature'].values if f in x] dsf = dss['feature_importances'].sel( feature=fe, outer_kfold=max_score).reset_coords( drop=True) dsf = dsf.to_dataset('scoring').to_dataframe( ).reset_index(drop=True) * 100 title = '{} ({})'.format(f.upper(), scoring) dsf.plot.bar(ax=axes[i], title=title, rot=0, legend=False, zorder=20, width=.8) dsf_sum = dsf.sum().tolist() handles, labels = axes[i].get_legend_handles_labels() labels = [ '{} ({:.1f} %)'.format( x, y) for x, y in zip( labels, dsf_sum)] axes[i].legend(handles=handles, labels=labels, prop={'size': 8}) axes[i].set_ylabel('Feature importance [%]') axes[i].grid(axis='y', zorder=1) if fix_xticklabels: axes[0].xaxis.set_ticklabels('') hrs = np.arange(-24,0) axes[1].set_xticklabels(hrs, rotation = 30, ha="center", fontsize=12) axes[2].set_xticklabels(hrs, rotation = 30, ha="center", fontsize=12) axes[1].set_xlabel('Hours prior to flood') axes[2].set_xlabel('Hours prior to flood') if save: fig.tight_layout() filename = 'RF_feature_importances_{}.png'.format(scoring) plt.savefig(savefig_path / filename, bbox_inches='tight') return def plot_feature_importances_for_all_scorings(dss, features='doy+pwv+pressure', model='RF', splitfigs=True): import matplotlib.pyplot as plt # station = dss.attrs['pwv_id'].upper() dss = dss.sel(model=model).reset_coords(drop=True) fns = len(features.split('+')) scores = dss['scoring'].values scores1 = ['f1', 'precision', 'recall'] scores2 = ['hss', 'tss', 'accuracy','roc-auc'] if splitfigs: fig, axes = plt.subplots(len(scores1), fns, sharey=True, figsize=(15, 20)) for i, score in enumerate(scores1): plot_feature_importances( dss, features=features, scoring=score, axes=axes[i, :]) fig.suptitle( 'feature importances of {} model'.format(model)) fig.tight_layout() fig.subplots_adjust(top=0.935, bottom=0.034, left=0.039, right=0.989, hspace=0.19, wspace=0.027) filename = 'RF_feature_importances_1.png' plt.savefig(savefig_path / filename, bbox_inches='tight') fig, axes = plt.subplots(len(scores2), fns, sharey=True, figsize=(15, 20)) for i, score in enumerate(scores2): plot_feature_importances( dss, features=features, scoring=score, axes=axes[i, :]) fig.suptitle( 'feature importances of {} model'.format(model)) fig.tight_layout() fig.subplots_adjust(top=0.935, bottom=0.034, left=0.039, right=0.989, hspace=0.19, wspace=0.027) filename = 'RF_feature_importances_2.png' plt.savefig(savefig_path / filename, bbox_inches='tight') else: fig, axes = plt.subplots(len(scores), fns, sharey=True, figsize=(15, 20)) for i, score in enumerate(scores): plot_feature_importances( dss, features=features, scoring=score, axes=axes[i, :]) fig.suptitle( 'feature importances of {} model'.format(model)) fig.tight_layout() fig.subplots_adjust(top=0.935, bottom=0.034, left=0.039, right=0.989, hspace=0.19, wspace=0.027) filename = 'RF_feature_importances.png' plt.savefig(savefig_path / filename, bbox_inches='tight') return dss def plot_ROC_curve_from_dss_nested_CV(dss, outer_dim='outer_split', plot_chance=True, color='tab:blue', fontsize=14, plot_legend=True, title=None, ax=None, main_label=None): import matplotlib.pyplot as plt import numpy as np if ax is None: fig, ax = plt.subplots() if title is None: title = "Receiver operating characteristic" mean_fpr = dss['FPR'].values mean_tpr = dss['TPR'].mean(outer_dim).values mean_auc = dss['roc_auc_score'].mean().item() if np.isnan(mean_auc): return ValueError std_auc = dss['roc_auc_score'].std().item() field = 'TPR' xlabel = 'False Positive Rate' ylabel = 'True Positive Rate' if main_label is None: main_label = r'Mean ROC (AUC={:.2f}$\pm${:.2f})'.format(mean_auc, std_auc) textstr = '\n'.join(['{}'.format( main_label), r'(AUC={:.2f}$\pm${:.2f})'.format(mean_auc, std_auc)]) main_label = textstr ax.plot(mean_fpr, mean_tpr, color=color, lw=3, alpha=.8, label=main_label) std_tpr = dss[field].std(outer_dim).values n = dss[outer_dim].size tprs_upper = np.minimum(mean_tpr + std_tpr, 1) tprs_lower =
np.maximum(mean_tpr - std_tpr, 0)
numpy.maximum
import numpy as np import matplotlib import matplotlib.pyplot as plt from matplotlib.colors import LogNorm, PowerNorm, Normalize from scipy import fftpack def pix_intensity_hist(vals, generator, noise_vector_length, inv_transf, channel_axis, fname=None, Xterm=True, window=None, multichannel=False): """Plots a histogram of pixel intensities for validation set and generated samples""" num = len(vals) samples = generator.predict(np.random.normal(size=(num,1,noise_vector_length))) if multichannel: samples = np.take(samples,0,axis=channel_axis) # take the scaled channel samples = inv_transf(samples) # transform back to original data scale valhist, bin_edges = np.histogram(vals.flatten(), bins=25) samphist, _ = np.histogram(samples.flatten(), bins=bin_edges) centers = (bin_edges[:-1] + bin_edges[1:]) / 2 plt.figure() plt.errorbar(centers, valhist, yerr=np.sqrt(valhist), fmt='o-', label='validation') plt.errorbar(centers, samphist, yerr=np.sqrt(samphist), fmt='o-', label='generated') plt.yscale('log') plt.legend(loc='upper right') plt.xlabel('Pixel value') plt.ylabel('Counts') plt.title('Pixel Intensity Histogram') if window: plt.axis(window) if Xterm: plt.draw() else: plt.savefig(fname, format='png') plt.close() valhist = valhist[:-5] samphist = samphist[:-5] return np.sum(np.divide(
np.power(valhist - samphist, 2.0)
numpy.power
"""Performs face alignment and calculates L2 distance between the embeddings of images.""" # MIT License # # Copyright (c) 2016 <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. from __future__ import absolute_import from __future__ import division from __future__ import print_function import PIL from PIL import ImageTk from shutil import copy, rmtree, move from scipy import misc from sklearn.cluster import KMeans from sklearn.metrics import silhouette_samples, silhouette_score, pairwise_distances_argmin_min import matplotlib.pyplot as plt import matplotlib.cm as cm import tensorflow as tf import numpy as np import cv2 import sys import os import argparse import facenet import align.detect_face from tkinter import filedialog, messagebox from tkinter import * from functools import partial os.environ['TF_CPP_MIN_LOG_LEVEL'] = '2' def main(args): class Data: def __init__(self, image_path,cropped_img, prewhitened_img, cluster_label, emb, outlier): self.image_path = image_path self.prewhitened_img = prewhitened_img self.cropped_img = cropped_img self.cluster_label = cluster_label self.emb = emb self.outlier = outlier def print_data(self): print(self.image_path) #print(self.prewhitened_img) #print(self.cluster_label) #print(self.emb) def gui(video_path, output_dir, model, outl_const, frame_interval): def BVideoFunction(video_path): video_path = filedialog.askopenfilename(initialdir = '', title = "Choose Video source",filetypes = (("avi files","*.avi"),("mp4 files","*.mp4"),("all files","*.*"))) retArgs[0] = video_path def BOutpDirFunction(output_dir): output_dir = filedialog.askdirectory(initialdir = output_dir) retArgs[1] = output_dir def BModelFunction(model): model = filedialog.askopenfilename(initialdir = os.path.dirname(os.path.realpath(__file__)) ,title = "Give the path of the model",filetypes = (("pb files","*.pb"),("all files","*.*"))) retArgs[2] = model def BRunFunction(): retArgs[3] = int(EFrames.get()) retArgs[4] = float(EConstant.get()) root.destroy() def on_closing(): if messagebox.askokcancel("Quit", "Do you want to quit?"): root.destroy() exit() retArgs = [video_path, output_dir, model, frame_interval, outl_const] root = Tk(className = ' Finding Distinct Faces') root.configure(background='#A3CEDC') BVideoFuncArg = partial(BVideoFunction, video_path) BVideo = Button(root, text =" Choose Video source ", command = BVideoFuncArg) BVideo.grid(ipadx=3, ipady=3, padx=4, pady=4) BOutpDirFuncArg = partial(BOutpDirFunction, output_dir) BOutpDir = Button(root, text ="Choose Output directory \n(Needs to be empty)", command = BOutpDirFuncArg) BOutpDir.grid(ipadx=2, ipady=2, padx=4, pady=4) LFrames = Label( root, text='Frames after which\n to exrtact image:' ) LFrames.grid(column=2, row=0, ipadx=2, ipady=2, padx=4, pady=4) frames = StringVar() frames.set(retArgs[3]) EFrames = Entry(root, bd =5, textvariable = frames) EFrames.grid(column=3, row=0, ipadx=2, ipady=2, padx=4, pady=4) LConstant = Label( root, text='Silhouette constant for locating outliers\n(Recommendation: Do not modify):' ) LConstant.grid(column=2, row=1, ipadx=1, ipady=1, padx=4, pady=4) constant = StringVar() constant.set(outl_const) EConstant = Entry(root, bd =5, textvariable = constant) EConstant.grid(column=3, row=1, ipadx=2, ipady=2, padx=4, pady=4) BModelFuncArg = partial(BModelFunction, model) BModel = Button(root, text ="Give the path of the model protobuf (.pb) file", command = BModelFuncArg) BModel.grid(row=3, column=0, ipadx=2, ipady=2, padx=4, pady=4) BRun = Button(root, text ="RUN", command = BRunFunction) BRun.grid(column=2, row=4, ipadx=3, ipady=3, padx=4, pady=4) root.protocol("WM_DELETE_WINDOW", on_closing) root.mainloop() return tuple(retArgs) def frame_getter(frame_interval, frame = None, cl = None): cap = cv2.VideoCapture(video_path) def write_img(cl): ret, frame_read = cap.read() path = output_dir_vid+ "/frame-" + str(int(cap.get(1)-1)) + ".jpg" if not (os.path.isfile(path)): cv2.imwrite(path, frame_read) temp_data_list.append(Data(image_path = path, prewhitened_img = None, cropped_img = None, cluster_label = cl, emb = None, outlier = False)) if frame is None: frame_count = 0 while(cap.isOpened()): ret, frame = cap.read() if ret==True: if (frame_count % frame_interval) == 0: path = output_dir_vid+ "/frame-" + str(frame_count) + ".jpg" cv2.imwrite(path, frame) data_list.append(Data(image_path = path, prewhitened_img = None, cropped_img = None, cluster_label = None, emb = None, outlier = False)) frame_count+=1 else: break else: total_frames = cap.get(7) #to thelw g an einai sto telos if frame > 2: cap.set(1, frame-2) else: cap.set(1, frame+1) for i in range(2): write_img(cl) for i in range(2): write_img(cl) if total_frames > frame + 2: cap.read() #apla proxwraei g na mn ksanagrapsei tin idia eikona for i in range(2): write_img(cl) # kai meta ksanatrexw ooolo apo tin arxi g ti 1i periptwsi # When everything is done, release the capture cap.release() def load_and_align_data(dl): minsize = 150 # minimum size of face threshold = [ 0.8, 0.9, 0.9 ] # three steps's threshold factor = 0.709 # scale factor print('Creating networks and loading parameters') with tf.Graph().as_default(): gpu_options = tf.GPUOptions(per_process_gpu_memory_fraction=args.gpu_memory_fraction) sess = tf.Session(config=tf.ConfigProto(gpu_options=gpu_options, log_device_placement=False)) with sess.as_default(): pnet, rnet, onet = align.detect_face.create_mtcnn(sess, None) nrof_samples = len(dl) #nrof_samples_dif = nrof_samples # to thelw g ta multiple faces (wste n mn ksanampainei se epeksergasmena dl to main loop tou while) i=0 while i < nrof_samples: img = misc.imread(os.path.expanduser(dl[i].image_path), mode='RGB') img_size = np.asarray(img.shape)[0:2] bounding_boxes, _ = align.detect_face.detect_face(img, minsize, pnet, rnet, onet, threshold, factor) nrof_faces = bounding_boxes.shape[0] if bounding_boxes.size==0: print('image:'+dl[i].image_path+'\n has not a detectable face') try: os.remove(dl[i].image_path) #diagrafw ta frames xwris proswpa del dl[i] except FileNotFoundError: print("warning: file not found (already deleted)") nrof_samples-=1 i-=1 elif bounding_boxes.shape[0]!=1: for j in range(nrof_faces): det = np.squeeze(bounding_boxes[j,0:4]) bb = np.zeros(4, dtype=np.int32) bb[0] = np.maximum(det[0]-args.margin/2, 0) bb[1] = np.maximum(det[1]-args.margin/2, 0) bb[2] = np.minimum(det[2]+args.margin/2, img_size[1]) bb[3] = np.minimum(det[3]+args.margin/2, img_size[0]) cropped = img[bb[1]:bb[3],bb[0]:bb[2],:] aligned = misc.imresize(cropped, (args.image_size, args.image_size), interp='bilinear') prewhitened = facenet.prewhiten(aligned) dl.append(Data(image_path = dl[i].image_path, prewhitened_img = prewhitened, cropped_img = aligned, cluster_label = dl[i].cluster_label, emb = dl[i].emb, outlier = dl[i].outlier)) print('image:'+dl[i].image_path+'\n has more than one face') del dl[i] #diagrafw to arxiko datalist, evala alla pio panw nrof_samples-=1 i-=1 else: det = np.squeeze(bounding_boxes[0,0:4]) bb = np.zeros(4, dtype=np.int32) bb[0] = np.maximum(det[0]-args.margin/2, 0) bb[1] =
np.maximum(det[1]-args.margin/2, 0)
numpy.maximum
import os import numpy as np import pickle import astropy.units as u import astropy.constants as const import nexoclom.math as mathMB from nexoclom.atomicdata import atomicmass def sputdist(velocity, U, alpha, beta, species): mspecies = atomicmass(species) v_b = np.sqrt(2*U/mspecies) v_b = v_b.to(u.km/u.s) f_v = velocity**(2*beta+1) / (velocity**2 + v_b**2)**alpha f_v /= np.max(f_v) return f_v.value def MaxwellianDist(velocity, temperature, species): vth2 = 2*temperature*const.k_B/atomicmass(species) vth2 = vth2.to(u.km**2/u.s**2) f_v = velocity**3 * np.exp(-velocity**2/vth2) f_v /= np.max(f_v) return f_v.value def xyz_from_lonlat(lon, lat, isplan, exobase): if isplan: # Starting at a planet # 0 deg longitude = subsolar pt. = (0, -1, 0) # 90 deg longitude = dusk pt. = (1, 0, 0) # 270 deg longitude = dawn pt. = (-1, 0, 0) x0 = exobase * np.sin(lon) * np.cos(lat) y0 = -exobase * np.cos(lon) * np.cos(lat) z0 = exobase * np.sin(lat) else: # Starting at a satellite # 0 deg longitude = sub-planet pt. = (0, -1, 0) # 90 deg longitude = leading pt. = (-1, 0, 0) # 270 deg longitude = trailing pt. = (1, 0, 0) x0 = -exobase * np.sin(lon) * np.cos(lat) y0 = -exobase * np.cos(lon) * np.cos(lat) z0 = exobase * np.sin(lat) X0 = np.array([x0, y0, z0]) # Error checking assert np.all(np.isfinite(X0)), 'Non-Finite values of X0' return X0 def surface_distribution(outputs): """ Distribute packets on a sphere with radius r = SpatialDist.exobase Returns (x0, y0, z0, lon0, lat0) for satellites, assumes satellite is at phi=0 """ spatialdist = outputs.inputs.spatialdist npack = outputs.npackets if spatialdist.type == 'uniform': # Choose the latitude: f(lat) = cos(lat) lat0 = spatialdist.latitude if lat0[0] == lat0[1]: lat = np.zeros(npack)+lat0[0] else: ll = (np.sin(lat0[0]), np.sin(lat0[1])) sinlat = ll[0] + (ll[1]-ll[0]) * outputs.randgen.random(npack) lat = np.arcsin(sinlat) # Choose the longitude: f(lon) = 1/(lonmax-lonmin) lon0 = spatialdist.longitude if lon0[0] > lon0[1]: lon0 = [lon0[0], lon0[1]+2*np.pi*u.rad] lon = ((lon0[0] + (lon0[1]-lon0[0]) * outputs.randgen.random(npack)) % (2*np.pi*u.rad)) elif spatialdist.type == 'surface map': # Choose lon, lat based on predetermined map if spatialdist.mapfile == 'default': mapfile = os.path.join(os.path.dirname(__file__), 'data', f'{outputs.inputs.options.species}_surface_composition.pkl') with open(mapfile, 'rb') as mfile: sourcemap = pickle.load(mfile) elif spatialdist.mapfile.endswith('.pkl'): with open(spatialdist.mapfile, 'rb') as mfile: sourcemap = pickle.load(mfile) elif spatialdist.mapfile.endswith('.sav'): from scipy.io import readsav sourcemap_ = readsav(spatialdist.mapfile)['sourcemap'] sourcemap = {'longitude':sourcemap_['longitude'][0]*u.rad, 'latitude':sourcemap_['latitude'][0]*u.rad, 'abundance':sourcemap_['map'][0].transpose(), 'coordinate_system':str(sourcemap_['coordinate_system'][0])} else: assert 0, 'Mapfile is the wrong format.' lon, lat = mathMB.random_deviates_2d(sourcemap['abundance'], sourcemap['longitude'], np.sin(sourcemap['latitude']), npack) lat = np.arcsin(lat) if (('planet' in sourcemap['coordinate_system']) and (outputs.inputs.spatialdist.subsolarlon is not None)): # Need to rotate to model coordinate system lon = ((outputs.inputs.spatialdist.subsolarlon.value - lon + 2*np.pi) % (2*np.pi)) elif ('planet' in sourcemap['coordinate_system']): raise ValueError('inputs.spatialdist.subsolarlon is None') else: pass elif spatialdist.type == 'surface spot': lon0 = spatialdist.longitude lat0 = spatialdist.latitude sigma0 = spatialdist.sigma spot0 = ((np.sin(lon0)*np.cos(lat0)).value, (-np.cos(lon0)*np.cos(lat0)).value, (np.sin(lat0)).value) longitude = np.linspace(0, 2*np.pi, 361)*u.rad latitude = np.linspace(-np.pi/2, np.pi/2, 181)*u.rad ptsx = np.outer(np.sin(longitude.value), np.cos(latitude.value)) ptsy = -np.outer(np.cos(longitude.value), np.cos(latitude.value)) ptsz = -np.outer(np.ones_like(longitude.value), np.sin(latitude.value)) cosphi = ptsx*spot0[0]+ptsy*spot0[1]+ptsz*spot0[2] cosphi[cosphi > 1] = 1 cosphi[cosphi < -1] = -1 phi = np.arccos(cosphi) sourcemap = np.exp(-phi/sigma0.value) lon, lat = mathMB.random_deviates_2d(sourcemap, longitude, np.sin(latitude), npack) lat = np.arcsin(lat) else: assert 0, "Can't get here" X_ = xyz_from_lonlat(lon, lat, outputs.inputs.geometry.planet.type == 'Planet', spatialdist.exobase) outputs.X0['x'] = X_[0,:] outputs.X0['y'] = X_[1,:] outputs.X0['z'] = X_[2,:] outputs.X0['longitude'] = lon.value outputs.X0['latitude'] = lat.value local_time = (lon.value * 12/np.pi + 12) % 24 outputs.X0['local_time'] = local_time def speed_distribution(outputs): speeddist = outputs.inputs.speeddist npackets = outputs.npackets if speeddist.type.lower() == 'gaussian': if speeddist.sigma == 0.: v0 = np.zeros(npackets)*u.km/u.s + speeddist.vprob else: v0 = (outputs.randgen.standard_normal(npackets) * speeddist.sigma.value + speeddist.vprob.value) v0 *= speeddist.vprob.unit elif speeddist.type == 'sputtering': velocity = np.linspace(.1, 50, 5000)*u.km/u.s f_v = sputdist(velocity, speeddist.U, speeddist.alpha, speeddist.beta, outputs.inputs.options.species) v0 = (mathMB.random_deviates_1d(velocity.value, f_v, npackets) * velocity.unit) elif speeddist.type == 'maxwellian': if speeddist.temperature != 0*u.K: # Use a constant temperature amass = atomicmass(outputs.inputs.options.species) v_th = np.sqrt(2*speeddist.temperature*const.k_B/amass) v_th = v_th.to(u.km/u.s) velocity = np.linspace(0.1*u.km/u.s, v_th*5, 5000) f_v = MaxwellianDist(velocity, speeddist.temperature, outputs.inputs.options.species) v0 = (mathMB.random_deviates_1d(velocity.value, f_v, npackets) * velocity.unit) else: # Use a surface temperature map # Need to write this assert 0, 'Not implemented yet' elif speeddist.type == 'flat': v0 = (outputs.randgen.random(npackets)*2*speeddist.delv + speeddist.vprob - speeddist.delv) elif speeddist.type == 'user defined': source = pickle.load(open(speeddist.vdistfile, 'rb')) v0 = mathMB.random_deviates_1d(source['velocity'].value, source['vdist'], npackets) * source['velocity'].unit else: # Need to add more distributions assert 0, 'Distribtuion does not exist' v0 = v0.to(outputs.unit/u.s) outputs.X0['v'] = v0.value assert np.all(np.isfinite(v0)), 'Infinite values for v0' return v0 def angular_distribution(outputs): npackets = outputs.npackets angulardist = outputs.inputs.angulardist if angulardist.type == 'none': return elif angulardist.type == 'radial': # All packets going radially outward alt = (np.zeros(npackets) + np.pi/2.) * u.rad az = np.zeros(npackets) * u.rad elif angulardist.type == 'isotropic': # Choose the altitude -- f(alt) = cos(alt) alt0 = angulardist.altitude aa = (np.sin(alt0[0]), np.sin(alt0[1])) sinalt = outputs.randgen.random(npackets) * (aa[1] - aa[0]) + aa[0] alt = np.arcsin(sinalt) # Choose the azimuth -- f(az) = 1/(azmax-azmin) az0, az1 = angulardist.azimuth m = (az0, az1) if az0 < az1 else (az1, az0+2*np.pi) az = m[0] + (m[1]-m[0])*outputs.randgen.random(npackets) else: assert 0, 'Angular Distribution not defined.' # Find the velocity components in coordinate system centered on packet v_rad = np.sin(alt.value) # Radial component of velocity v_tan0 = np.cos(alt.value) * np.cos(az.value) # Component along latitude (points E) v_tan1 = np.cos(alt.value) * np.sin(az.value) # Component along longitude (points N) # Now rotate to proper surface point # v_ren = M # v_xyz => v_xyz = invert(M) # v_ren X0 = outputs.X0.values x0, y0, z0 = X0[:,2], X0[:,3], X0[:,4] rad = np.array([x0, y0, z0]).transpose() east = np.array([y0, -x0,
np.zeros_like(z0)
numpy.zeros_like
import os import sys from util import * import numpy as np from multiprocessing import Pool # Load processed rgb video frames and compute the dark channel # Save the dark channel as the 4th channel with rgb together # For dark channel, see paper "Single Image Haze Removal Using Dark Channel Prior" # https://ieeexplore.ieee.org/abstract/document/5567108 # The input and output file format are numpy.array def main(argv): rgb_dir = "../data/rgb/" rgbd_dir = "../data/rgbd/" # rgb + dark channel check_and_create_dir(rgb_dir) check_and_create_dir(rgbd_dir) file_names = get_all_file_names_in_folder(rgb_dir) p = Pool() # use all available CPUs p.map(compute_dark_channel, file_names) print("Done compute_dark_channel.py") def compute_dark_channel(file_path): print("Process", file_path) rgb_dir = "../data/rgb/" rgbd_dir = "../data/rgbd/" # rgb + dark channel rgb =
np.load(rgb_dir + file_path)
numpy.load
import re import string from typing import Dict, List from jina.executors.decorators import batching, as_ndarray from jina.executors.encoders import BaseTextEncoder from jina.executors.rankers import Chunk2DocRanker from jina.executors.segmenters import BaseSegmenter import numpy as np class DummySentencizer(BaseSegmenter): def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) punct_chars = [','] self._slit_pat = re.compile( '\s*([^{0}]+)(?<!\s)[{0}]*'.format(''.join(set(punct_chars))) ) def segment(self, text: str, *args, **kwargs) -> List[Dict]: """ Split the text into sentences. :param text: the raw text :return: a list of chunks """ results = [] ret = [ (m.group(0), m.start(), m.end()) for m in re.finditer(self._slit_pat, text) ] if not ret: ret = [(text, 0, len(text))] for ci, (r, s, e) in enumerate(ret): f = ''.join(filter(lambda x: x in string.printable, r)) f = re.sub('\n+', ' ', f).strip() f = f[:100] results.append(dict(text=f)) return results class DummyMinRanker(Chunk2DocRanker): """ :class:`MinRanker` calculates the score of the matched doc from the matched chunks. For each matched doc, the score is `1 / (1 + s)`, where `s` is the minimal score from all the matched chunks belonging to this doc. .. warning:: Here we suppose that the smaller chunk score means the more similar. """ def __init__(self, *args, **kwargs): super().__init__(*args, **kwargs) import warnings warnings.warn( "MinRanker is deprecated. Please use SimpleAggregateRanker instead", DeprecationWarning, stacklevel=2, ) def _get_score( self, match_idx, query_chunk_meta, match_chunk_meta, *args, **kwargs ): return self.get_doc_id(match_idx), 1.0 / (1.0 + match_idx[self.COL_SCORE].min()) class DummyOneHotTextEncoder(BaseTextEncoder): """ One-hot Encoder encodes the characters into one-hot vectors. ONLY FOR TESTING USAGES. :param on_value: the default value for the locations represented by characters :param off_value: the default value for the locations not represented by characters """ def __init__(self, on_value: float = 1, off_value: float = 0, *args, **kwargs): super().__init__(*args, **kwargs) self.offset = 32 self.dim = ( 127 - self.offset + 2 ) # only the Unicode code point between 32 and 127 are embedded, and the rest are considered as ``UNK``` self.unk = self.dim self.on_value = on_value self.off_value = off_value self.embeddings = None def post_init(self): self.embeddings = (
np.eye(self.dim)
numpy.eye
import numpy as np import math import rules import copy import gym import os import tensorflow as tf from geometric_primitives import brick from geometric_primitives import bricks from geometric_primitives import utils_meshes import matplotlib.pyplot as plt #from mpl_toolkits.mplot3d import Axes3D from rules.rules_mnist import LIST_RULES_2_4 import constants str_path = constants.str_path_mnist import warnings warnings.filterwarnings("ignore", category=np.VisibleDeprecationWarning) voxel_width = 14 voxel_dim = (14, 8, 14) class LegoEnv_Mnist_No_Mask(gym.Env): def __init__(self, max_bricks = 500, class_to_build = 0, num_bricks_to_build = None, testing = False, target_class_conditioned = False, test_overfitting = True): self.num_max_bricks = max_bricks self.num_bricks_to_build = num_bricks_to_build self.class_to_build = class_to_build self.change_coordinate = None self.stacked_representation = None self.testing = testing self.target_class_conditioned = target_class_conditioned self.test_overfitting = test_overfitting self.target_voxel = None self.prev_reward = None self.max_node_num = 45 self.loop_target_index = 0 self.action_space = gym.spaces.Discrete(len(LIST_RULES_2_4) * 2) self.observation_space = gym.spaces.Dict({'adjacency' : gym.spaces.Box(low = 0, high = 1, shape = (self.max_node_num, self.max_node_num), dtype = np.float32), 'node_attributes' : gym.spaces.Box(low = (-1) * voxel_width, high = voxel_width, shape = (self.max_node_num, 4), dtype = np.float32), 'node_mask' : gym.spaces.Box(low = 0, high = 1., shape = (self.max_node_num, 1), dtype = np.float32), 'target_information' : gym.spaces.Box(low = 0., high = 1., shape = (14, 14), dtype = np.float32), 'edge_attributes' : gym.spaces.Box(low = -20., high = 20., shape = (self.max_node_num, self.max_node_num, 4), dtype = np.float32), 'target_class' : gym.spaces.Discrete(1), 'current_pic' : gym.spaces.Box(low = 0., high = 1., shape = (14, 14), dtype = np.float32)}) def reset(self): self.target_voxel, self.target, self.target_num_brick, \ self.target_class = self.load_target_from_class(class_info=None) self.target_embedding = self.target btv_index = np.min(np.where(np.sum(self.target_voxel, axis=(0,1)) > 0)) btv = self.target_voxel[:,:,btv_index] bottom_trans_pad = np.array(list(map(lambda i : (list(map(lambda j : np.sum(btv[i-1, j-1:j+2] + btv[i, j-1:j+2] + btv[i+1, j-1:j+2]), range(1, btv.shape[1]-1)))), range(1, btv.shape[0]-1)))) bottom_trans = np.zeros((btv.shape[0], btv.shape[1])).astype(bottom_trans_pad.dtype) bottom_trans[1:-1, 1:-1] = bottom_trans_pad x_s, y_s = np.where(bottom_trans == np.max(bottom_trans)) self.translation = np.array([x_s[0] + 1, y_s[0] + 1, btv_index, 0]) brick_ = brick.Brick() ''' 0 : Vertical / 1 : Horizontal ''' random_direction = np.random.randint(2) random_x = np.random.randint(-3, 4) random_y = np.random.randint(-3, 4) self.random_translation = np.array([random_x, random_y, 0, 0]) # brick_.set_position(self.random_translation[:-1]) # brick_.set_direction(random_direction) brick_.set_position([0, 0, 0]) brick_.set_direction(0) self.bricks_ = bricks.Bricks(self.num_max_bricks, '0') self.bricks_.add(brick_) init_node_coordinates = np.concatenate((brick_.get_position(), [brick_.get_direction()])) node_matrix = np.zeros((self.max_node_num, 4), dtype=init_node_coordinates.dtype) node_matrix[0] = init_node_coordinates # node_matrix = np.ones((self.max_node_num, 4), dtype=init_node_coordinates.dtype) node_mask = np.expand_dims(np.eye(self.max_node_num, dtype=np.float32)[0], axis=-1) self.brick_voxel = np.zeros(shape = voxel_dim, dtype=np.int32) self._occupy(init_node_coordinates, self.brick_voxel) self.prev_reward = np.sum(np.logical_and(self.brick_voxel, self.target_voxel)) / np.sum(np.logical_or(self.brick_voxel, self.target_voxel)) self.prev_abs_reward = np.sum(np.logical_and(self.brick_voxel, self.target_voxel)) self.last_accum_reward = 0 X, A, _, _ = self.bricks_.get_graph() A += 1 * np.eye(A.shape[0], dtype=A.dtype) A = A / np.sum(A, axis = 1)[:, np.newaxis] adjacency_matrix = np.zeros((self.max_node_num, self.max_node_num), dtype=A.dtype) adjacency_matrix[:A.shape[0],:A.shape[0]] = A edge_attributes = np.zeros((self.max_node_num, self.max_node_num, 4)).astype(np.float32) displacement = X[np.newaxis, :] - X[:, np.newaxis] edge_attributes[:displacement.shape[0], :displacement.shape[1], :] = displacement current_pic = self._get_current_pic() self.obs = dict(zip(['adjacency', 'node_attributes', 'node_mask', 'target_information', 'edge_attributes', 'target_class', 'current_pic'], (adjacency_matrix.astype(np.float32), node_matrix.astype(np.float32), node_mask.astype(np.float32), self.target_embedding, edge_attributes, self.target_class, current_pic))) return self.get_state() def step(self, actions): pivot_fragment_index, relative_action, pretrain_step = actions if pretrain_step == 0: done = False available_actions = self.get_masks() self._update_graph_randomly() X, A, _, _ = self.bricks_.get_graph() # if np.sum(self.obs['node_mask']) >= self.target_num_brick[0] or A.shape[0] >= self.target_num_brick[0]: if np.sum(self.obs['node_mask']) > 30: done = True return self.get_state(), 0, done, available_actions else: done = False new_brick_coordinate, valid_flag = self._add_node_and_edge(pivot_fragment_index, relative_action) if valid_flag == False: # episode_reward = self.get_episode_reward() episode_reward = self.get_episode_reward_with_intermediate_as_well() return self.get_state(), 0., True, episode_reward else: self._update_graph(new_brick_coordinate) reward = self.calculate_reward(valid_flag) episode_reward = self.get_episode_reward() X, A, _, _ = self.bricks_.get_graph() if np.sum(self.obs['node_mask']) >= self.target_num_brick[0] or A.shape[0] >= self.target_num_brick[0]: done = True if np.sum(self.obs['node_mask']) == self.max_node_num: done = True print(self.loop_target_index) if valid_flag == False: done = True return self.get_state(), reward, done, episode_reward def render(self): visualization.visualize(self.bricks_) def get_state(self): return copy.deepcopy(self.obs) def _add_node_and_edge(self, pivot_fragment_index, relative_action): X, A, _, _ = self.bricks_.get_graph() if pivot_fragment_index > X.shape[0] - 1: return
np.zeros(4)
numpy.zeros
import nengo import numpy as np from numpy import random import matplotlib.pyplot as plt import matplotlib.cm as cm import tensorflow as tf import os from nengo.dists import Choice from datetime import datetime from nengo_extras.data import load_mnist import pickle from nengo.utils.matplotlib import rasterplot import time from InputData import PresentInputWithPause from nengo_extras.graphviz import net_diagram from nengo.neurons import LIFRate from nengo.params import Parameter, NumberParam, FrozenObject from nengo.dists import Choice, Distribution, get_samples, Uniform from nengo.utils.numpy import clip, is_array_like from utilis import * from args_mnist import args as my_args import itertools import random import logging # import nengo_spinnaker import nengo_ocl def evaluate_mnist_single(args): ############################# # load the data ############################# input_nbr = args.input_nbr (image_train, label_train), (image_test, label_test) = (tf.keras.datasets.mnist.load_data()) probe_sample_rate = (input_nbr/10)/1000 #Probe sample rate. Proportional to input_nbr to scale down sampling rate of simulations # probe_sample_rate = 1000 image_train_filtered = [] label_train_filtered = [] x = args.digit for i in range(0,input_nbr): if label_train[i] == x: image_train_filtered.append(image_train[i]) label_train_filtered.append(label_train[i]) image_train_filtered = np.array(image_train_filtered) label_train_filtered = np.array(label_train_filtered) #Simulation Parameters #Presentation time presentation_time = args.presentation_time #0.20 #Pause time pause_time = args.pause_time #Iterations iterations=args.iterations #Input layer parameters n_in = args.n_in # g_max = 1/784 #Maximum output contribution g_max = args.g_max n_neurons = args.n_neurons # Layer 1 neurons inhib_factor = -0*100 #Multiplication factor for lateral inhibition n_neurons = 1 input_neurons_args = { "n_neurons":n_in, "dimensions":1, "label":"Input layer", "encoders":nengo.dists.Uniform(1,1), "gain":nengo.dists.Uniform(2,2), "bias":nengo.dists.Uniform(0,0), "neuron_type":MyLIF_in(tau_rc=args.tau_in,min_voltage=-1, amplitude=args.g_max) # "neuron_type":nengo.neurons.LIF(tau_rc=args.tau_in,min_voltage=0)#SpikingRelu neuron. } #Layer 1 parameters layer_1_neurons_args = { "n_neurons":n_neurons, "dimensions":1, "label":"Layer 1", "encoders":nengo.dists.Uniform(1,1), # "gain":nengo.dists.Uniform(2,2), # "bias":nengo.dists.Uniform(0,0), "intercepts":nengo.dists.Choice([0]), "max_rates":nengo.dists.Choice([20,20]), "noise":nengo.processes.WhiteNoise(dist=nengo.dists.Gaussian(0, 1), seed=1), # "neuron_type":nengo.neurons.LIF(tau_rc=args.tau_out, min_voltage=0) # "neuron_type":MyLIF_out(tau_rc=args.tau_out, min_voltage=-1) "neuron_type":STDPLIF(tau_rc=args.tau_out, min_voltage=-1), } # "noise":nengo.processes.WhiteNoise(dist=nengo.dists.Gaussian(0, 20), seed=1), #Lateral Inhibition parameters lateral_inhib_args = { "transform": inhib_factor* (np.full((n_neurons, n_neurons), 1) -
np.eye(n_neurons)
numpy.eye
#----------------------------------------------------------------------------- # This file is part of the 'EPIX HR Firmware'. It is subject to # the license terms in the LICENSE.txt file found in the top-level directory # of this distribution and at: # https://confluence.slac.stanford.edu/display/ppareg/LICENSE.html. # No part of the 'EPIX HR Firmware', including this file, may be # copied, modified, propagated, or distributed except according to the terms # contained in the LICENSE.txt file. #----------------------------------------------------------------------------- import pyrogue as pr import epix_hr_core as epixHrCore import numpy as np import time class AsicDeserHr16bRegisters6St(pr.Device): def __init__(self, **kwargs): super().__init__(description='20 bit Deserializer Registers', **kwargs) # Creation. memBase is either the register bus server (srp, rce mapped memory, etc) or the device which # contains this object. In most cases the parent and memBase are the same but they can be # different in more complex bus structures. They will also be different for the top most node. # The setMemBase call can be used to update the memBase for this Device. All sub-devices and local # blocks will be updated. ############################################# # Create block / variable combinations ############################################# #Setup registers & variables self.add(pr.RemoteVariable(name='StreamsEn_n', description='Enable/Disable', offset=0x00000000, bitSize=6, bitOffset=0, base=pr.UInt, mode='RW')) self.add(pr.RemoteVariable(name=('IdelayRst'), description='iDelay reset', offset=0x00000008, bitSize=6, bitOffset=0, base=pr.UInt, disp = '{:#x}', mode='RW')) self.add(pr.RemoteVariable(name=('IserdeseRst'), description='iSerdese3 reset', offset=0x0000000C, bitSize=6, bitOffset=0, base=pr.UInt, disp = '{:#x}', mode='RW')) self.add(pr.RemoteVariable(name='Resync', description='Resync', offset=0x00000004, bitSize=1, bitOffset=0, base=pr.Bool, verify = False, mode='RW')) self.add(pr.RemoteVariable(name='Delay0_', description='Data ADC Idelay3 value', offset=0x00000010, bitSize=10, bitOffset=0, base=pr.UInt, disp = '{}', verify=False, mode='RW', hidden=True)) self.add(pr.LinkVariable( name='Delay0', description='Data ADC Idelay3 value', linkedGet=self.getDelay, linkedSet=self.setDelay, dependencies=[self.Delay0_])) self.add(pr.RemoteVariable(name='Delay1_', description='Data ADC Idelay3 value', offset=0x00000014, bitSize=10, bitOffset=0, base=pr.UInt, disp = '{}', verify=False, mode='RW', hidden=True)) self.add(pr.LinkVariable( name='Delay1', description='Data ADC Idelay3 value', linkedGet=self.getDelay, linkedSet=self.setDelay, dependencies=[self.Delay1_])) self.add(pr.RemoteVariable(name='Delay2_', description='Data ADC Idelay3 value', offset=0x00000018, bitSize=10, bitOffset=0, base=pr.UInt, disp = '{}', verify=False, mode='RW', hidden=True)) self.add(pr.LinkVariable( name='Delay2', description='Data ADC Idelay3 value', linkedGet=self.getDelay, linkedSet=self.setDelay, dependencies=[self.Delay2_])) self.add(pr.RemoteVariable(name='Delay3_', description='Data ADC Idelay3 value', offset=0x0000001C, bitSize=10, bitOffset=0, base=pr.UInt, disp = '{}', verify=False, mode='RW', hidden=True)) self.add(pr.LinkVariable( name='Delay3', description='Data ADC Idelay3 value', linkedGet=self.getDelay, linkedSet=self.setDelay, dependencies=[self.Delay3_])) self.add(pr.RemoteVariable(name='Delay4_', description='Data ADC Idelay3 value', offset=0x00000020, bitSize=10, bitOffset=0, base=pr.UInt, disp = '{}', verify=False, mode='RW', hidden=True)) self.add(pr.LinkVariable( name='Delay4', description='Data ADC Idelay3 value', linkedGet=self.getDelay, linkedSet=self.setDelay, dependencies=[self.Delay4_])) self.add(pr.RemoteVariable(name='Delay5_', description='Data ADC Idelay3 value', offset=0x00000024, bitSize=10, bitOffset=0, base=pr.UInt, disp = '{}', verify=False, mode='RW', hidden=True)) self.add(pr.LinkVariable( name='Delay5', description='Data ADC Idelay3 value', linkedGet=self.getDelay, linkedSet=self.setDelay, dependencies=[self.Delay5_])) for i in range(0, 6): self.add(pr.RemoteVariable(name=('LockErrors%d'%i), description='LockErrors', offset=0x00000100+i*4, bitSize=16, bitOffset=0, base=pr.UInt, disp = '{}', mode='RO')) self.add(pr.RemoteVariable(name=('Locked%d'%i), description='Locked', offset=0x00000100+i*4, bitSize=1, bitOffset=16, base=pr.Bool, mode='RO')) for j in range(0, 6): for i in range(0, 2): self.add(pr.RemoteVariable(name=('IserdeseOut%d_%d' % (j, i)), description='IserdeseOut'+str(i), offset=0x00000300+i*4+j*8, bitSize=20, bitOffset=0, base=pr.UInt, disp = '{:#x}', mode='RO')) self.add(pr.RemoteVariable(name='FreezeDebug', description='Restart BERT', offset=0x00000400, bitSize=1, bitOffset=0, base=pr.Bool, mode='RW')) self.add(pr.RemoteVariable(name='BERTRst', description='Restart BERT', offset=0x00000400, bitSize=1, bitOffset=1, base=pr.Bool, mode='RW')) for i in range(0, 6): self.add(pr.RemoteVariable(name='BERTCounter'+str(i), description='Counter value.'+str(i), offset=0x00000404+i*8, bitSize=44, bitOffset=0, base=pr.UInt, disp = '{}', mode='RO')) for i in range(0,6): self.add(epixHrCore.AsicDeser10bDataRegisters(name='tenbData_ser%d'%i, offset=(0x00000500+(i*0x00000100)), expand=False)) ##################################### # Create commands ##################################### # A command has an associated function. The function can be a series of # python commands in a string. Function calls are executed in the command scope # the passed arg is available as 'arg'. Use 'dev' to get to device scope. # A command can also be a call to a local function with local scope. # The command object and the arg are passed self.add(pr.LocalCommand(name='InitAdcDelay',description='Find and set best delay for the adc channels', function=self.fnSetFindAndSetDelays)) self.add(pr.LocalCommand(name='InitAdcDelayConf',description='[skewPct, pattern1, pattern2, noReSync]', value=[50,0,0,0], function=self.fnSetFindAndSetDelaysConf)) self.add(pr.LocalCommand(name='Refines delay settings',description='Find and set best delay for the adc channels', function=self.fnRefineDelays)) def fnSetFindAndSetDelaysConf(self,dev,cmd,arg): """Find and set Monitoring ADC delays""" arguments = np.asarray(arg) # parent = self.parent numDelayTaps = 512 if arguments[1] == 0 and arguments[2] == 0: self.IDLE_PATTERN1 = 0xAAA83 self.IDLE_PATTERN2 = 0xAA97C else: self.IDLE_PATTERN1 = arguments[1] self.IDLE_PATTERN2 = arguments[2] eyeFactor = arguments[0]/100 noReSync = arguments[3] print("Executing delay test for ePixHr. Eye delay skew %f, pattern1 %X, pattern2 %X, do re-sync %d"%(eyeFactor, self.IDLE_PATTERN1, self.IDLE_PATTERN2, not noReSync)) #check adcs self.testResult = np.zeros((24,numDelayTaps)) self.testDelay = np.zeros((24,numDelayTaps)) for delay in range (0, numDelayTaps): self.Delay0.set(delay) self.Delay1.set(delay) self.Delay2.set(delay) self.Delay3.set(delay) self.Delay4.set(delay) self.Delay5.set(delay) self.testDelay[0,delay] = self.Delay0.get() self.testDelay[1,delay] = self.Delay1.get() self.testDelay[2,delay] = self.Delay2.get() self.testDelay[3,delay] = self.Delay3.get() self.testDelay[4,delay] = self.Delay4.get() self.testDelay[5,delay] = self.Delay5.get() if noReSync == 0: self.Resync.set(True) self.Resync.set(False) time.sleep(1.0 / float(100)) IserdeseOut_value = self.IserdeseOut0_0.get() self.testResult[0,delay] = ((IserdeseOut_value==self.IDLE_PATTERN1)or(IserdeseOut_value==self.IDLE_PATTERN2)) IserdeseOut_value = self.IserdeseOut1_0.get() self.testResult[1,delay] = ((IserdeseOut_value==self.IDLE_PATTERN1)or(IserdeseOut_value==self.IDLE_PATTERN2)) IserdeseOut_value = self.IserdeseOut2_0.get() self.testResult[2,delay] = ((IserdeseOut_value==self.IDLE_PATTERN1)or(IserdeseOut_value==self.IDLE_PATTERN2)) IserdeseOut_value = self.IserdeseOut3_0.get() self.testResult[3,delay] = ((IserdeseOut_value==self.IDLE_PATTERN1)or(IserdeseOut_value==self.IDLE_PATTERN2)) IserdeseOut_value = self.IserdeseOut4_0.get() self.testResult[4,delay] = ((IserdeseOut_value==self.IDLE_PATTERN1)or(IserdeseOut_value==self.IDLE_PATTERN2)) IserdeseOut_value = self.IserdeseOut5_0.get() self.testResult[5,delay] = ((IserdeseOut_value==self.IDLE_PATTERN1)or(IserdeseOut_value==self.IDLE_PATTERN2)) for i in range(0, 24): print("Test result adc %d:"%i) print(self.testResult[i,:]*self.testDelay) np.savetxt(str(self.name)+'_delayTestResultAll.csv', (self.testResult*self.testDelay), delimiter=',') self.resultArray = np.zeros((24,numDelayTaps)) for j in range(0, 24): for i in range(1, numDelayTaps): if (self.testResult[j,i] != 0): self.resultArray[j,i] = self.resultArray[j,i-1] + self.testResult[j,i] self.longestDelay0 = np.where(self.resultArray[0]==np.max(self.resultArray[0])) if len(self.longestDelay0[0])==1: self.sugDelay0 = int(self.longestDelay0[0]) - int(self.resultArray[0][self.longestDelay0]*eyeFactor) else: self.sugDelay0 = int(self.longestDelay0[0][0]) - int(self.resultArray[0][self.longestDelay0[0][0]]*eyeFactor) self.longestDelay1 = np.where(self.resultArray[1]==np.max(self.resultArray[1])) if len(self.longestDelay1[0])==1: self.sugDelay1 = int(self.longestDelay1[0]) - int(self.resultArray[1][self.longestDelay1]*eyeFactor) else: self.sugDelay1 = int(self.longestDelay1[0][0]) - int(self.resultArray[1][self.longestDelay1[0][0]]*eyeFactor) self.longestDelay2 = np.where(self.resultArray[2]==np.max(self.resultArray[2])) if len(self.longestDelay2[0])==1: self.sugDelay2 = int(self.longestDelay2[0]) - int(self.resultArray[2][self.longestDelay2]*eyeFactor) else: self.sugDelay2 = int(self.longestDelay2[0][0]) - int(self.resultArray[2][self.longestDelay2[0][0]]*eyeFactor) self.longestDelay3 = np.where(self.resultArray[3]==np.max(self.resultArray[3])) if len(self.longestDelay3[0])==1: self.sugDelay3 = int(self.longestDelay3[0]) - int(self.resultArray[3][self.longestDelay3]*eyeFactor) else: self.sugDelay3 = int(self.longestDelay3[0][0]) - int(self.resultArray[3][self.longestDelay3[0][0]]*eyeFactor) self.longestDelay4 = np.where(self.resultArray[4]==np.max(self.resultArray[4])) if len(self.longestDelay4[0])==1: self.sugDelay4 = int(self.longestDelay4[0]) - int(self.resultArray[4][self.longestDelay4]*eyeFactor) else: self.sugDelay4 = int(self.longestDelay4[0][0]) - int(self.resultArray[4][self.longestDelay4[0][0]]*eyeFactor) self.longestDelay5 = np.where(self.resultArray[5]==
np.max(self.resultArray[5])
numpy.max
#!/usr/bin/env python3 # Copyright (c) Facebook, Inc. and its affiliates. # # This source code is licensed under the MIT license found in the # LICENSE file in the root directory of this source tree. import numpy as np import pandas as pd from ax.utils.common.testutils import TestCase from ax.utils.stats.statstools import inverse_variance_weight, marginal_effects class InverseVarianceWeightingTest(TestCase): def test_bad_arg_ivw(self): with self.assertRaises(ValueError): inverse_variance_weight( np.array([0]), np.array([1]), conflicting_noiseless="foo" ) with self.assertRaises(ValueError): inverse_variance_weight(np.array([1, 2]),
np.array([1])
numpy.array
"""test_ulogconv.""" from context import ulogconv from context import TopicMsgs import pyulog import pandas as pd import numpy as np from numpy.testing import assert_almost_equal def test_createPandaDict(): """test create dictionary of panda-topics.""" file = "testlogs/position.ulg" topics = ["vehicle_local_position", "vehicle_attitude"] ulog = pyulog.ULog(file, topics) dp = ulogconv.create_pandadict(ulog) expected_names = { "T_vehicle_local_position_0": "T_vehicle_local_position_0", "T_vehicle_attitude_0": "T_vehicle_attitude_0", } for key in dp: assert key == expected_names[key] for name in dp[key].columns: if name != "timestamp": assert name[:2] == "F_" def test_apply_zoh(): """test zoh.""" # zoh to msg_2 msg1 = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] msg2 = [0, np.nan, 2, np.nan, 4, np.nan, np.nan, 7, 8, np.nan] df = pd.DataFrame( {"T_topic_1_0__F_msg_1": msg1, "T_topic_1_0__F_msg_2": msg2} ) topicMsgsList = [TopicMsgs("topic_1", ["msg_2"])] ulogconv.apply_zoh(df, topicMsgsList) msg_2_expected = [0, 0, 2, 2, 4, 4, 4, 7, 8, 8] assert_almost_equal(msg_2_expected, df.T_topic_1_0__F_msg_2) # zoh to msg_1 df = pd.DataFrame( {"T_topic_1_0__F_msg_1": msg1, "T_topic_1_0__F_msg_2": msg2} ) topicMsgsList = [TopicMsgs("topic_1", ["msg_1"])] ulogconv.apply_zoh(df, topicMsgsList) msg_1_expected = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9] assert_almost_equal(msg_1_expected, df.T_topic_1_0__F_msg_1) # zoh to all msgs msg3 = [0, np.nan, np.nan, np.nan, 6, np.nan, np.nan, 7, np.nan, np.nan] msg_3_expected = [0, 0, 0, 0, 6, 6, 6, 7, 7, 7] df = pd.DataFrame( { "T_topic_1_0__F_msg_1": msg1, "T_topic_1_0__F_msg_2": msg2, "T_topic_1_0__F_msg_3": msg3, } ) topicMsgsList = [TopicMsgs("topic_1", [])] ulogconv.apply_zoh(df, topicMsgsList) assert_almost_equal(msg_1_expected, df.T_topic_1_0__F_msg_1) assert_almost_equal(msg_2_expected, df.T_topic_1_0__F_msg_2) assert_almost_equal(msg_3_expected, df.T_topic_1_0__F_msg_3) # zoh if first is nan msg4 = [np.nan, np.nan, 2, 3, 4, 5, np.nan, np.nan, 8, 9] msg_4_expected = [np.nan, np.nan, 2, 3, 4, 5, 5, 5, 8, 9] df = pd.DataFrame( { "T_topic_1_0__F_msg_1": msg1, "T_topic_1_0__F_msg_2": msg2, "T_topic_1_0__F_msg_3": msg3, "T_topic_1_0__F_msg_4": msg4, } ) topicMsgsList = [TopicMsgs("topic_1", ["msg_4"])] ulogconv.apply_zoh(df, topicMsgsList) assert_almost_equal(msg_4_expected, df.T_topic_1_0__F_msg_4) # zoh if inf is present msg5 = [np.inf, np.nan, 2, 3, 4, 5, np.inf, np.nan, 8, 9] msg_5_expected = [np.inf, np.inf, 2, 3, 4, 5, np.inf, np.inf, 8, 9] df = pd.DataFrame( { "T_topic_1_0__F_msg_1": msg1, "T_topic_1_0__F_msg_2": msg2, "T_topic_1_0__F_msg_3": msg3, "T_topic_1_0__F_msg_4": msg4, "T_topic_1_0__F_msg_5": msg5, } ) topicMsgsList = [TopicMsgs("topic_1", ["msg_5"])] ulogconv.apply_zoh(df, topicMsgsList) assert_almost_equal(msg_5_expected, df.T_topic_1_0__F_msg_5) def test_replace_nan_with_inf(): """test replace nan with inf.""" file = "testlogs/position.ulg" topics = ["vehicle_local_position", "vehicle_attitude"] nan_msg = [np.nan, 2, 4, np.nan, np.nan, 5] inf_msg = [np.inf, 2, 4, np.inf, np.inf, 5] ulog = pyulog.ULog(file, topics) ulog.data_list[0].data["fake_msg_0"] = np.array(nan_msg) ulog.data_list[1].data["fake_msg_0"] = np.array(nan_msg) topic_msgs_list = [ TopicMsgs("vehicle_local_position", ["fake_msg_0"]), TopicMsgs("vehicle_attitude", ["fake_msg_0"]), ] ulogconv.replace_nan_with_inf(ulog, topic_msgs_list) assert_almost_equal(ulog.data_list[0].data["fake_msg_0"], inf_msg)
assert_almost_equal(ulog.data_list[1].data["fake_msg_0"], inf_msg)
numpy.testing.assert_almost_equal
# coding: utf-8 # Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department # Distributed under the terms of "New BSD License", see the LICENSE file. from __future__ import print_function, unicode_literals import os import posixpath import sys import h5py import numpy as np import pandas as pd import warnings from io import StringIO from pyiron.lammps.potential import LammpsPotentialFile, PotentialAvailable from pyiron.atomistics.job.atomistic import AtomisticGenericJob from pyiron.base.settings.generic import Settings from pyiron.base.pyio.parser import Logstatus, extract_data_from_file from pyiron.lammps.control import LammpsControl from pyiron.lammps.potential import LammpsPotential from pyiron.lammps.structure import LammpsStructure, UnfoldingPrism from pyiron.atomistics.md_analysis.trajectory_analysis import unwrap_coordinates __author__ = "<NAME>, <NAME>, <NAME>" __copyright__ = "Copyright 2019, Max-Planck-Institut für Eisenforschung GmbH " \ "- Computational Materials Design (CM) Department" __version__ = "1.0" __maintainer__ = "<NAME>" __email__ = "<EMAIL>" __status__ = "production" __date__ = "Sep 1, 2017" s = Settings() class LammpsBase(AtomisticGenericJob): """ Class to setup and run and analyze LAMMPS simulations which is a derivative of atomistics.job.generic.GenericJob. The functions in these modules are written in such the function names and attributes are very generic (get_structure(), molecular_dynamics(), version) but the functions are written to handle LAMMPS specific input/output. Args: project (pyiron.project.Project instance): Specifies the project path among other attributes job_name (str): Name of the job Attributes: input (lammps.Input instance): Instance which handles the input """ def __init__(self, project, job_name): super(LammpsBase, self).__init__(project, job_name) self.input = Input() self._cutoff_radius = None self._is_continuation = None self._compress_by_default = True s.publication_add(self.publication) @property def cutoff_radius(self): """ Returns: """ return self._cutoff_radius @cutoff_radius.setter def cutoff_radius(self, cutoff): """ Args: cutoff: Returns: """ self._cutoff_radius = cutoff @property def potential(self): """ Execute view_potential() or list_potential() in order to see the pre-defined potential files Returns: """ return self.input.potential.df @potential.setter def potential(self, potential_filename): """ Execute view_potential() or list_potential() in order to see the pre-defined potential files Args: potential_filename: Returns: """ if sys.version_info.major == 2: stringtypes = (str, unicode) else: stringtypes = str if isinstance(potential_filename, stringtypes): if '.lmp' in potential_filename: potential_filename = potential_filename.split('.lmp')[0] potential_db = LammpsPotentialFile() potential = potential_db.find_by_name(potential_filename) elif isinstance(potential_filename, pd.DataFrame): potential = potential_filename else: raise TypeError('Potentials have to be strings or pandas dataframes.') self.input.potential.df = potential for val in ["units", "atom_style", "dimension"]: v = self.input.potential[val] if v is not None: self.input.control[val] = v self.input.potential.remove_structure_block() @property def potential_available(self): return PotentialAvailable(list_of_potentials=self.potential_list) @property def potential_list(self): """ List of interatomic potentials suitable for the current atomic structure. use self.potentials_view() to get more details. Returns: list: potential names """ return self.list_potentials() @property def potential_view(self): """ List all interatomic potentials for the current atomistic sturcture including all potential parameters. To quickly get only the names of the potentials you can use: self.potentials_list() Returns: pandas.Dataframe: Dataframe including all potential parameters. """ return self.view_potentials() def set_input_to_read_only(self): """ This function enforces read-only mode for the input classes, but it has to be implement in the individual classes. """ super(LammpsBase, self).set_input_to_read_only() self.input.control.read_only = True self.input.potential.read_only = True def validate_ready_to_run(self): """ Returns: """ super(LammpsBase, self).validate_ready_to_run() if self.potential is None: raise ValueError('This job does not contain a valid potential: {}'.format(self.job_name)) def get_potentials_for_structure(self): """ Returns: """ return self.list_potentials() def get_final_structure(self): """ Returns: """ warnings.warn("get_final_structure() is deprecated - please use get_structure() instead.", DeprecationWarning) return self.get_structure(iteration_step=-1) def view_potentials(self): """ List all interatomic potentials for the current atomistic sturcture including all potential parameters. To quickly get only the names of the potentials you can use: self.potentials_list() Returns: pandas.Dataframe: Dataframe including all potential parameters. """ from pyiron.lammps.potential import LammpsPotentialFile if not self.structure: raise ValueError('No structure set.') list_of_elements = set(self.structure.get_chemical_symbols()) list_of_potentials = LammpsPotentialFile().find(list_of_elements) if list_of_potentials is not None: return list_of_potentials else: raise TypeError('No potentials found for this kind of structure: ', str(list_of_elements)) def list_potentials(self): """ List of interatomic potentials suitable for the current atomic structure. use self.potentials_view() to get more details. Returns: list: potential names """ return list(self.view_potentials()['Name'].values) def enable_h5md(self): """ Returns: """ del self.input.control['dump_modify'] del self.input.control['dump'] self.input.control['dump'] = '1 all h5md ${dumptime} dump.h5 position force create_group yes' def write_input(self): """ Call routines that generate the code specific input files Returns: """ if self.structure is None: raise ValueError("Input structure not set. Use method set_structure()") lmp_structure = self._get_lammps_structure(structure=self.structure, cutoff_radius=self.cutoff_radius) lmp_structure.write_file(file_name="structure.inp", cwd=self.working_directory) version_int_lst = self._get_executable_version_number() if version_int_lst is not None and 'dump_modify' in self.input.control._dataset['Parameter'] and \ (version_int_lst[0] < 2016 or (version_int_lst[0] == 2016 and version_int_lst[1] < 11)): self.input.control['dump_modify'] = self.input.control['dump_modify'].replace(' line ', ' ') if not all(self.structure.pbc): self.input.control['boundary'] = ' '.join(['p' if coord else 'f' for coord in self.structure.pbc]) self._set_selective_dynamics() self.input.control.write_file(file_name="control.inp", cwd=self.working_directory) self.input.potential.write_file(file_name="potential.inp", cwd=self.working_directory) self.input.potential.copy_pot_files(self.working_directory) def _get_executable_version_number(self): """ Get the version of the executable Returns: list: List of integers defining the version number """ if self.executable.version: return [l for l in [[int(i) for i in sv.split('.') if i.isdigit()] for sv in self.executable.version.split('/')[-1].split('_')] if len(l) > 0][0] else: return None @property def publication(self): return {'lammps': {'lammps': {'title': 'Fast Parallel Algorithms for Short-Range Molecular Dynamics', 'journal': 'Journal of Computational Physics', 'volume': '117', 'number': '1', 'pages': '1-19', 'year': '1995', 'issn': '0021-9991', 'doi': '10.1006/jcph.1995.1039', 'url': 'http://www.sciencedirect.com/science/article/pii/S002199918571039X', 'author': ['<NAME>']}}} def collect_output(self): """ Returns: """ self.input.from_hdf(self._hdf5) if os.path.isfile(self.job_file_name(file_name="dump.h5", cwd=self.working_directory)): self.collect_h5md_file(file_name="dump.h5", cwd=self.working_directory) else: self.collect_dump_file(file_name="dump.out", cwd=self.working_directory) self.collect_output_log(file_name="log.lammps", cwd=self.working_directory) final_structure = self.get_structure(iteration_step=-1) with self.project_hdf5.open("output") as hdf_output: final_structure.to_hdf(hdf_output) def convergence_check(self): if self._generic_input['calc_mode'] == 'minimize': if self._generic_input['max_iter'] + 1 <= len(self['output/generic/energy_tot']) or \ len([l for l in self['log.lammps'] if 'linesearch alpha is zero' in l]) != 0: return False else: return True else: return True def collect_logfiles(self): """ Returns: """ return # TODO: make rotation of all vectors back to the original as in self.collect_dump_file def collect_h5md_file(self, file_name="dump.h5", cwd=None): """ Args: file_name: cwd: Returns: """ file_name = self.job_file_name(file_name=file_name, cwd=cwd) with h5py.File(file_name, 'r', libver='latest', swmr=True) as h5md: positions = [pos_i.tolist() for pos_i in h5md['/particles/all/position/value']] time = [time_i.tolist() for time_i in h5md['/particles/all/position/step']] forces = [for_i.tolist() for for_i in h5md['/particles/all/force/value']] # following the explanation at: http://nongnu.org/h5md/h5md.html cell = [np.eye(3) * np.array(cell_i.tolist()) for cell_i in h5md['/particles/all/box/edges/value']] with self.project_hdf5.open("output/generic") as h5_file: h5_file['forces'] = np.array(forces) h5_file['positions'] = np.array(positions) h5_file['time'] = np.array(time) h5_file['cells'] = cell def collect_errors(self, file_name, cwd=None): """ Args: file_name: cwd: Returns: """ file_name = self.job_file_name(file_name=file_name, cwd=cwd) error = extract_data_from_file(file_name, tag="ERROR", num_args=1000) if len(error) > 0: error = " ".join(error[0]) raise RuntimeError("Run time error occurred: " + str(error)) else: return True def collect_output_log(self, file_name="log.lammps", cwd=None): """ general purpose routine to extract static from a lammps log file Args: file_name: cwd: Returns: """ self.collect_errors(file_name=file_name, cwd=cwd) file_name = self.job_file_name(file_name=file_name, cwd=cwd) with open(file_name, 'r') as f: f = f.readlines() l_start = np.where([line.startswith('Step') for line in f])[0] l_end = np.where([line.startswith('Loop') for line in f])[0] if len(l_start)>len(l_end): l_end = np.append(l_end, [None]) if sys.version_info >= (3,): df = [pd.read_csv(StringIO('\n'.join(f[llst:llen])), delim_whitespace=True) for llst, llen in zip(l_start, l_end)] else: df = [pd.read_csv(StringIO(unicode('\n'.join(f[llst:llen]))), delim_whitespace=True) for llst, llen in zip(l_start, l_end)] df = df[-1] h5_dict = {"Step": "steps", "Temp": "temperature", "PotEng": "energy_pot", "TotEng": "energy_tot", "Volume": "volume"} df = df.rename(index=str, columns=h5_dict) pressures = np.stack((df.Pxx, df.Pxy, df.Pxz, df.Pxy, df.Pyy, df.Pyz, df.Pxz, df.Pyz, df.Pzz), axis=-1).reshape(-1, 3, 3) pressures *= 0.0001 # bar -> GPa df = df.drop(columns=df.columns[((df.columns.str.len() == 3) & df.columns.str.startswith('P'))]) df['pressures'] = pressures.tolist() with self.project_hdf5.open("output/generic") as hdf_output: # This is a hack for backward comparability for k,v in df.items(): hdf_output[k] = np.array(v) def calc_minimize(self, e_tol=0.0, f_tol=1e-2, max_iter=100000, pressure=None, n_print=100): """ Args: e_tol: f_tol: max_iter: pressure: n_print: Returns: """ super(LammpsBase, self).calc_minimize(e_tol=e_tol, f_tol=f_tol, max_iter=max_iter, pressure=pressure, n_print=n_print) self.input.control.calc_minimize(e_tol=e_tol, f_tol=f_tol, max_iter=max_iter, pressure=pressure, n_print=n_print) def calc_static(self): """ Returns: """ super(LammpsBase, self).calc_static() self.input.control.calc_static() def calc_md(self, temperature=None, pressure=None, n_ionic_steps=1000, time_step=1.0, n_print=100, temperature_damping_timescale=100.0, pressure_damping_timescale=1000.0, seed=None, tloop=None, initial_temperature=None, langevin=False, delta_temp=None, delta_press=None): """ Set an MD calculation within LAMMPS. Nosé Hoover is used by default. Args: temperature (None/float): Target temperature. If set to None, an NVE calculation is performed. It is required when the pressure is set or langevin is set pressure (None/float): Target pressure. If set to None, an NVE or an NVT calculation is performed. (This tag will allow for a list in the future as it is done for calc_minimize()) n_ionic_steps (int): Number of ionic steps time_step (float): Step size between two steps. In fs if units==metal n_print (int): Print frequency temperature_damping_timescale (float): The time associated with the thermostat adjusting the temperature. (In fs. After rescaling to appropriate time units, is equivalent to Lammps' `Tdamp`.) pressure_damping_timescale (float): The time associated with the barostat adjusting the temperature. (In fs. After rescaling to appropriate time units, is equivalent to Lammps' `Pdamp`.) seed (int): Seed for the random number generation (required for the velocity creation) tloop: initial_temperature (None/float): Initial temperature according to which the initial velocity field is created. If None, the initial temperature will be twice the target temperature (which would go immediately down to the target temperature as described in equipartition theorem). If 0, the velocity field is not initialized (in which case the initial velocity given in structure will be used). If any other number is given, this value is going to be used for the initial temperature. langevin (bool): (True or False) Activate Langevin dynamics delta_temp (float): Thermostat timescale, but in your Lammps time units, whatever those are. (DEPRECATED.) delta_press (float): Barostat timescale, but in your Lammps time units, whatever those are. (DEPRECATED.) """ if self.server.run_mode.interactive_non_modal: warnings.warn('calc_md() is not implemented for the non modal interactive mode use calc_static()!') super(LammpsBase, self).calc_md(temperature=temperature, pressure=pressure, n_ionic_steps=n_ionic_steps, time_step=time_step, n_print=n_print, temperature_damping_timescale=temperature_damping_timescale, pressure_damping_timescale=pressure_damping_timescale, seed=seed, tloop=tloop, initial_temperature=initial_temperature, langevin=langevin) self.input.control.calc_md(temperature=temperature, pressure=pressure, n_ionic_steps=n_ionic_steps, time_step=time_step, n_print=n_print, temperature_damping_timescale=temperature_damping_timescale, pressure_damping_timescale=pressure_damping_timescale, seed=seed, tloop=tloop, initial_temperature=initial_temperature, langevin=langevin, delta_temp=delta_temp, delta_press=delta_press, job_name=self.job_name) # define hdf5 input and output def to_hdf(self, hdf=None, group_name=None): """ Args: hdf: group_name: Returns: """ super(LammpsBase, self).to_hdf(hdf=hdf, group_name=group_name) self._structure_to_hdf() self.input.to_hdf(self._hdf5) def from_hdf(self, hdf=None, group_name=None): # TODO: group_name should be removed """ Args: hdf: group_name: Returns: """ super(LammpsBase, self).from_hdf(hdf=hdf, group_name=group_name) self._structure_from_hdf() self.input.from_hdf(self._hdf5) def write_restart_file(self, filename="restart.out"): """ Args: filename: Returns: """ self.input.control.modify(write_restart=filename, append_if_not_present=True) def compress(self, files_to_compress=None): """ Compress the output files of a job object. Args: files_to_compress (list): """ if files_to_compress is None: files_to_compress = [f for f in list(self.list_files()) if f not in ["restart.out"]] super(LammpsBase, self).compress(files_to_compress=files_to_compress) def read_restart_file(self, filename="restart.out"): """ Args: filename: Returns: """ self._is_continuation = True self.input.control.set(read_restart=filename) self.input.control['reset_timestep'] = 0 self.input.control.remove_keys(['dimension', 'read_data', 'boundary', 'atom_style', 'velocity']) def collect_dump_file(self, file_name="dump.out", cwd=None): """ general purpose routine to extract static from a lammps dump file Args: file_name: cwd: Returns: """ file_name = self.job_file_name(file_name=file_name, cwd=cwd) output = {} with open(file_name, 'r') as ff: dump = ff.readlines() prism = UnfoldingPrism(self.structure.cell, digits=15) rotation_lammps2orig = np.linalg.inv(prism.R) time = np.genfromtxt([dump[nn] for nn in np.where([ll.startswith('ITEM: TIMESTEP') for ll in dump])[0]+1], dtype=int) time = np.array([time]).flatten() output['time'] = time natoms = np.genfromtxt([dump[nn] for nn in np.where([ll.startswith('ITEM: NUMBER OF ATOMS') for ll in dump])[0]+1], dtype=int) natoms = np.array([natoms]).flatten() cells = np.genfromtxt(' '.join(([' '.join(dump[nn:nn+3]) for nn in np.where([ll.startswith('ITEM: BOX BOUNDS') for ll in dump])[0]+1])).split()).reshape(len(natoms), -1) cells = np.array([to_amat(cc) for cc in cells]) output['cells'] = cells l_start = np.where([ll.startswith('ITEM: ATOMS') for ll in dump])[0] l_end = l_start+natoms+1 content = [pd.read_csv(StringIO('\n'.join(dump[llst:llen]).replace('ITEM: ATOMS ', '')), delim_whitespace=True) for llst, llen in zip(l_start, l_end)] forces = np.array([np.stack((cc['fx'], cc['fy'], cc['fz']), axis=-1) for cc in content]) output['forces'] = np.einsum('ijk,kl->ijl', forces, rotation_lammps2orig) unwrapped_positions = np.array([np.stack((cc['xsu'], cc['ysu'], cc['zsu']), axis=-1) for cc in content]) positions = unwrapped_positions-np.floor(unwrapped_positions) unwrapped_positions = np.einsum('ikj,ilk->ilj', cells, unwrapped_positions) output['unwrapped_positions'] = np.einsum('ijk,kl->ijl', unwrapped_positions, rotation_lammps2orig) positions = np.einsum('ikj,ilk->ilj', cells, positions) output['positions'] = np.einsum('ijk,kl->ijl', positions, rotation_lammps2orig) with self.project_hdf5.open("output/generic") as hdf_output: for k,v in output.items(): hdf_output[k] = v # Outdated functions: def set_potential(self, file_name): """ Args: file_name: Returns: """ print('This function is outdated use the potential setter instead!') self.potential = file_name def next(self, snapshot=-1, job_name=None, job_type=None): """ Restart a new job created from an existing Lammps calculation. Args: project (pyiron.project.Project instance): Project instance at which the new job should be created snapshot (int): Snapshot of the calculations which would be the initial structure of the new job job_name (str): Job name job_type (str): Job type. If not specified a Lammps job type is assumed Returns: new_ham (lammps.lammps.Lammps instance): New job """ return super(LammpsBase, self).restart(snapshot=snapshot, job_name=job_name, job_type=job_type) def restart(self, snapshot=-1, job_name=None, job_type=None): """ Restart a new job created from an existing Lammps calculation. Args: project (pyiron.project.Project instance): Project instance at which the new job should be created snapshot (int): Snapshot of the calculations which would be the initial structure of the new job job_name (str): Job name job_type (str): Job type. If not specified a Lammps job type is assumed Returns: new_ham (lammps.lammps.Lammps instance): New job """ new_ham = super(LammpsBase, self).restart(snapshot=snapshot, job_name=job_name, job_type=job_type) if new_ham.__name__ == self.__name__: new_ham.potential = self.potential if os.path.isfile(os.path.join(self.working_directory, "restart.out")): new_ham.read_restart_file(filename="restart.out") new_ham.restart_file_list.append(posixpath.join(self.working_directory, "restart.out")) return new_ham def _get_lammps_structure(self, structure=None, cutoff_radius=None): lmp_structure = LammpsStructure() lmp_structure.potential = self.input.potential lmp_structure.atom_type = self.input.control["atom_style"] if cutoff_radius is not None: lmp_structure.cutoff_radius = cutoff_radius else: lmp_structure.cutoff_radius = self.cutoff_radius lmp_structure.el_eam_lst = self.input.potential.get_element_lst() if structure is not None: lmp_structure.structure = structure else: lmp_structure.structure = self.structure if not set(lmp_structure.structure.get_species_symbols()).issubset(set(lmp_structure.el_eam_lst)): raise ValueError('The selected potentials do not support the given combination of elements.') return lmp_structure def _set_selective_dynamics(self): if 'selective_dynamics' in self.structure._tag_list.keys(): if self.structure.selective_dynamics._default is None: self.structure.selective_dynamics._default = [True, True, True] sel_dyn = np.logical_not(self.structure.selective_dynamics.list()) # Enter loop only if constraints present if len(np.argwhere(np.any(sel_dyn, axis=1)).flatten()) != 0: all_indices = np.arange(len(self.structure), dtype=int) constraint_xyz = np.argwhere(np.all(sel_dyn, axis=1)).flatten() not_constrained_xyz = np.setdiff1d(all_indices, constraint_xyz) # LAMMPS starts counting from 1 constraint_xyz += 1 ind_x = np.argwhere(sel_dyn[not_constrained_xyz, 0]).flatten() ind_y = np.argwhere(sel_dyn[not_constrained_xyz, 1]).flatten() ind_z = np.argwhere(sel_dyn[not_constrained_xyz, 2]).flatten() constraint_xy = not_constrained_xyz[np.intersect1d(ind_x, ind_y)] + 1 constraint_yz = not_constrained_xyz[np.intersect1d(ind_y, ind_z)] + 1 constraint_zx = not_constrained_xyz[np.intersect1d(ind_z, ind_x)] + 1 constraint_x = not_constrained_xyz[np.setdiff1d(np.setdiff1d(ind_x, ind_y), ind_z)] + 1 constraint_y = not_constrained_xyz[np.setdiff1d(np.setdiff1d(ind_y, ind_z), ind_x)] + 1 constraint_z = not_constrained_xyz[np.setdiff1d(
np.setdiff1d(ind_z, ind_x)
numpy.setdiff1d
import copy import numpy as np from liegroups.numpy import SE3 def test_identity(): T = SE3.identity() assert isinstance(T, SE3) def test_dot(): T = np.array([[0, 0, -1, 0.1], [0, 1, 0, 0.5], [1, 0, 0, -0.5], [0, 0, 0, 1]]) T2 = T.dot(T) assert np.allclose( (SE3.from_matrix(T).dot(SE3.from_matrix(T))).as_matrix(), T2) def test_wedge_vee(): xi = [1, 2, 3, 4, 5, 6] Xi = SE3.wedge(xi) xis = np.array([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]) Xis = SE3.wedge(xis) assert np.array_equal(xi, SE3.vee(Xi)) assert np.array_equal(xis, SE3.vee(Xis)) def test_curlywedge_curlyvee(): xi = [1, 2, 3, 4, 5, 6] Psi = SE3.curlywedge(xi) xis = np.array([[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]) Psis = SE3.curlywedge(xis) assert np.array_equal(xi, SE3.curlyvee(Psi)) assert np.array_equal(xis, SE3.curlyvee(Psis)) def test_odot(): p1 = [1, 2, 3] p2 = [1, 2, 3, 1] p3 = [1, 2, 3, 0] odot12 = np.vstack([SE3.odot(p1), np.zeros([1, 6])]) odot13 = np.vstack([SE3.odot(p1, directional=True), np.zeros([1, 6])]) odot2 = SE3.odot(p2) odot3 = SE3.odot(p3) assert np.array_equal(odot12, odot2) assert np.array_equal(odot13, odot3) def test_odot_vectorized(): p1 = [1, 2, 3] p2 = [2, 3, 4] ps = np.array([p1, p2]) odot1 = SE3.odot(p1) odot2 = SE3.odot(p2) odots = SE3.odot(ps) assert np.array_equal(odot1, odots[0, :, :]) assert
np.array_equal(odot2, odots[1, :, :])
numpy.array_equal
import os import logging import datetime import time import math import json import librosa import numpy as np from utils import normalize import tensorflow as tf from tensorflow.contrib import rnn from sklearn.preprocessing import normalize as sk_normalize from sklearn.cluster import KMeans from scipy.ndimage.filters import gaussian_filter from collections import defaultdict from configuration import get_config from VAD_segments import VAD_chunk config = get_config() config.log_path = 'voxceleb1-dev-embeddings.logs' log_file = os.path.abspath(config.log_path) logging.basicConfig( filename=log_file, level=logging.DEBUG, format="%(asctime)s:%(levelname)s:%(message)s" ) print(f'Log path: {log_file}') data_path = '/app/datasets/voxceleb-1/dev/wav' save_dir_path = '/app/voxsrc21-dia/embeddings/sequences' config.model_path = '/app/voxsrc21-dia/models/model.ckpt-46' os.makedirs(save_dir_path, exist_ok=True) def concat_segs(times, segs): #Concatenate continuous voiced segments concat_seg = [] seg_concat = segs[0] for i in range(0, len(times)-1): if times[i][1] == times[i+1][0]: seg_concat =
np.concatenate((seg_concat, segs[i+1]))
numpy.concatenate
#!/usr/bin/env python3 # -*- coding: utf-8 -*- import numpy as np from grasp_test import evaluate_grasp import pickle from util.plotter import plot_compare_methods import sys sys.path.insert(0, '..') from ood_detect import ood_confidence, ood_p_value if __name__ == "__main__": np.random.seed(1) bound = 0.1 print("PAC-Bound:", bound) config_file = "configs/config.json" grasper = evaluate_grasp(config_file) load = True """ Distribution shift in mug location """ if load: with open("results/emp_cost_mug_pos.txt", "rb") as fp: #Pickling emp_cost_test = pickle.load(fp) with open("results/cost_all_mug_pos.txt", "rb") as fp: #Pickling cost_all = pickle.load(fp) else: x_lim_list = [[0.45, 0.55], [0.4, 0.6], [0.35, 0.65], [0.3, 0.7], [0.25, 0.75], [0.2, 0.8]] y_lim_list = [[-0.05, 0.05],[-0.1, 0.1],[-0.15, 0.15],[-0.2, 0.2],[-0.25, 0.25], [-0.3, 0.3]] num_seeds = 20 emp_cost_test = [] cost_all = [] for (x_lim,y_lim) in zip(x_lim_list, y_lim_list): # print((x_lim,y_lim)) p_ood_detect = [] conf_ood_detect = [] emp_cost = 0 cost_list = [] for seed in range(num_seeds): _, cost, _ = grasper.test_policy_derandomized( numObjs=10, obj_folder="geometry/mugs/SNC_v4_mug_xs/", x_lim=x_lim, y_lim=y_lim, gui=False, obj_seed=seed ) cost_list.append(cost) emp_cost +=
np.mean(cost)
numpy.mean
""" Testing DKI microstructure """ from __future__ import division, print_function, absolute_import import numpy as np import random import dipy.reconst.dki_micro as dki_micro from numpy.testing import (assert_array_almost_equal, assert_almost_equal, assert_, assert_raises) from dipy.sims.voxel import (multi_tensor_dki, _check_directions, multi_tensor) from dipy.io.gradients import read_bvals_bvecs from dipy.core.gradients import gradient_table from dipy.data import get_data from dipy.reconst.dti import (eig_from_lo_tri) from dipy.data import get_sphere fimg, fbvals, fbvecs = get_data('small_64D') bvals, bvecs = read_bvals_bvecs(fbvals, fbvecs) gtab = gradient_table(bvals, bvecs) # 2 shells for techniques that requires multishell data bvals_2s = np.concatenate((bvals, bvals * 2), axis=0) bvecs_2s = np.concatenate((bvecs, bvecs), axis=0) gtab_2s = gradient_table(bvals_2s, bvecs_2s) # single fiber simulate (which is the assumption of our model) FIE = np.array([[[0.30, 0.32], [0.74, 0.51]], [[0.47, 0.21], [0.80, 0.63]]]) RDI = np.zeros((2, 2, 2)) ADI = np.array([[[1e-3, 1.3e-3], [0.8e-3, 1e-3]], [[0.9e-3, 0.99e-3], [0.89e-3, 1.1e-3]]]) ADE = np.array([[[2.2e-3, 2.3e-3], [2.8e-3, 2.1e-3]], [[1.9e-3, 2.5e-3], [1.89e-3, 2.1e-3]]]) Tor = np.array([[[2.6, 2.4], [2.8, 2.1]], [[2.9, 2.5], [2.7, 2.3]]]) RDE = ADE / Tor # prepare simulation: DWIsim = np.zeros((2, 2, 2, gtab_2s.bvals.size)) # Diffusion microstructural model assumes that signal does not have Taylor # approximation components larger than the fourth order. Thus parameter # estimates are only equal to the ground truth values of the simulation # if signals taylor components larger than the fourth order are removed. # Signal whithout this taylor components can be generated using the # multi_tensor_dki simulations. Therefore we used this function to test the # expected estimates of the model. DWIsim_all_taylor = np.zeros((2, 2, 2, gtab_2s.bvals.size)) # Signal with all taylor components can be simulated using the function # multi_tensor. Generating this signals will be usefull to test the prediction # procedures of DKI-based microstructural model. for i in range(2): for j in range(2): for k in range(2): ADi = ADI[i, j, k] RDi = RDI[i, j, k] ADe = ADE[i, j, k] RDe = RDE[i, j, k] fie = FIE[i, j, k] mevals = np.array([[ADi, RDi, RDi], [ADe, RDe, RDe]]) frac = [fie*100, (1 - fie)*100] theta = random.uniform(0, 180) phi = random.uniform(0, 320) angles = [(theta, phi), (theta, phi)] signal, dt, kt = multi_tensor_dki(gtab_2s, mevals, angles=angles, fractions=frac, snr=None) DWIsim[i, j, k, :] = signal signal, sticks = multi_tensor(gtab_2s, mevals, angles=angles, fractions=frac, snr=None) DWIsim_all_taylor[i, j, k, :] = signal def test_single_fiber_model(): # single fiber simulate (which is the assumption of our model) fie = 0.49 ADi = 0.00099 ADe = 0.00226 RDi = 0 RDe = 0.00087 # prepare simulation: theta = random.uniform(0, 180) phi = random.uniform(0, 320) angles = [(theta, phi), (theta, phi)] mevals = np.array([[ADi, RDi, RDi], [ADe, RDe, RDe]]) frac = [fie*100, (1 - fie)*100] signal, dt, kt = multi_tensor_dki(gtab_2s, mevals, angles=angles, fractions=frac, snr=None) # DKI fit dkiM = dki_micro.DiffusionKurtosisModel(gtab_2s, fit_method="WLS") dkiF = dkiM.fit(signal) # Axonal Water Fraction sphere = get_sphere('symmetric724') AWF = dki_micro.axonal_water_fraction(dkiF.model_params, sphere, mask=None, gtol=1e-5) assert_almost_equal(AWF, fie) # Extra-cellular and intra-cellular components edt, idt = dki_micro.diffusion_components(dkiF.model_params, sphere) EDT = eig_from_lo_tri(edt) IDT = eig_from_lo_tri(idt) # check eigenvalues assert_array_almost_equal(EDT[0:3], np.array([ADe, RDe, RDe])) assert_array_almost_equal(IDT[0:3], np.array([ADi, RDi, RDi])) # first eigenvalue should be the direction of the fibers fiber_direction = _check_directions([(theta, phi)]) f_norm = abs(np.dot(fiber_direction, np.array((EDT[3], EDT[6], EDT[9])))) assert_almost_equal(f_norm, 1.) f_norm = abs(np.dot(fiber_direction, np.array((IDT[3], IDT[6], IDT[9])))) assert_almost_equal(f_norm, 1.) # Test model and fit objects wmtiM = dki_micro.KurtosisMicrostructureModel(gtab_2s, fit_method="WLS") wmtiF = wmtiM.fit(signal) assert_almost_equal(wmtiF.awf, AWF) assert_array_almost_equal(wmtiF.hindered_evals, np.array([ADe, RDe, RDe])) assert_array_almost_equal(wmtiF.restricted_evals, np.array([ADi, RDi, RDi])) assert_almost_equal(wmtiF.hindered_ad, ADe) assert_almost_equal(wmtiF.hindered_rd, RDe) assert_almost_equal(wmtiF.axonal_diffusivity, ADi) assert_almost_equal(wmtiF.tortuosity, ADe/RDe, decimal=4) # Test diffusion_components when a kurtosis tensors is associated with # negative kurtosis values. E.g of this cases is given below: dkiparams = np.array([1.67135726e-03, 5.03651205e-04, 9.35365328e-05, -7.11167583e-01, 6.23186820e-01, -3.25390313e-01, -1.75247376e-02, -4.78415563e-01, -8.77958674e-01, 7.02804064e-01, 6.18673368e-01, -3.51154825e-01, 2.18384153, -2.76378153e-02, 2.22893297, -2.68306546e-01, -1.28411610, -1.56557645e-01, -1.80850619e-01, -8.33152110e-01, -3.62410766e-01, 1.57775442e-01, 8.73775381e-01, 2.77188975e-01, -3.67415502e-02, -1.56330984e-01, -1.62295407e-02]) edt, idt = dki_micro.diffusion_components(dkiparams) assert_(np.all(np.isfinite(edt))) def test_wmti_model_multi_voxel(): # DKI fit dkiM = dki_micro.DiffusionKurtosisModel(gtab_2s, fit_method="WLS") dkiF = dkiM.fit(DWIsim) # Axonal Water Fraction sphere = get_sphere() AWF = dki_micro.axonal_water_fraction(dkiF.model_params, sphere, mask=None, gtol=1e-5) assert_almost_equal(AWF, FIE) # Extra-cellular and intra-cellular components edt, idt = dki_micro.diffusion_components(dkiF.model_params, sphere) EDT = eig_from_lo_tri(edt) IDT = eig_from_lo_tri(idt) # check eigenvalues assert_array_almost_equal(EDT[..., 0], ADE, decimal=3) assert_array_almost_equal(EDT[..., 1], RDE, decimal=3) assert_array_almost_equal(EDT[..., 2], RDE, decimal=3) assert_array_almost_equal(IDT[..., 0], ADI, decimal=3) assert_array_almost_equal(IDT[..., 1], RDI, decimal=3) assert_array_almost_equal(IDT[..., 2], RDI, decimal=3) # Test methods performance when a signal with all zeros is present FIEc = FIE.copy() RDIc = RDI.copy() ADIc = ADI.copy() ADEc = ADE.copy() Torc = Tor.copy() RDEc = RDE.copy() DWIsimc = DWIsim.copy() FIEc[0, 0, 0] = 0 RDIc[0, 0, 0] = 0 ADIc[0, 0, 0] = 0 ADEc[0, 0, 0] = 0 Torc[0, 0, 0] = 0 RDEc[0, 0, 0] = 0 DWIsimc[0, 0, 0, :] = 0 mask = np.ones((2, 2, 2)) mask[0, 0, 0] = 0 dkiF = dkiM.fit(DWIsimc) awf = dki_micro.axonal_water_fraction(dkiF.model_params, sphere, gtol=1e-5) assert_almost_equal(awf, FIEc) # Extra-cellular and intra-cellular components edt, idt = dki_micro.diffusion_components(dkiF.model_params, sphere, awf=awf) EDT = eig_from_lo_tri(edt) IDT = eig_from_lo_tri(idt) assert_array_almost_equal(EDT[..., 0], ADEc, decimal=3) assert_array_almost_equal(EDT[..., 1], RDEc, decimal=3) assert_array_almost_equal(EDT[..., 2], RDEc, decimal=3) assert_array_almost_equal(IDT[..., 0], ADIc, decimal=3) assert_array_almost_equal(IDT[..., 1], RDIc, decimal=3) assert_array_almost_equal(IDT[..., 2], RDIc, decimal=3) # Check when mask is given dkiF = dkiM.fit(DWIsim) awf = dki_micro.axonal_water_fraction(dkiF.model_params, sphere, gtol=1e-5, mask=mask) assert_almost_equal(awf, FIEc, decimal=3) # Extra-cellular and intra-cellular components edt, idt = dki_micro.diffusion_components(dkiF.model_params, sphere, awf=awf, mask=mask) EDT = eig_from_lo_tri(edt) IDT = eig_from_lo_tri(idt) assert_array_almost_equal(EDT[..., 0], ADEc, decimal=3) assert_array_almost_equal(EDT[..., 1], RDEc, decimal=3) assert_array_almost_equal(EDT[..., 2], RDEc, decimal=3) assert_array_almost_equal(IDT[..., 0], ADIc, decimal=3) assert_array_almost_equal(IDT[..., 1], RDIc, decimal=3) assert_array_almost_equal(IDT[..., 2], RDIc, decimal=3) # Check class object wmtiM = dki_micro.KurtosisMicrostructureModel(gtab_2s, fit_method="WLS") wmtiF = wmtiM.fit(DWIsim, mask=mask)
assert_almost_equal(wmtiF.awf, FIEc, decimal=3)
numpy.testing.assert_almost_equal
# -*- coding: utf-8 -*- import unittest import warnings import time import inspect import numpy as np from sklearn.datasets import load_breast_cancer, load_boston from sklearn.preprocessing import StandardScaler from sklearn.model_selection import train_test_split, GridSearchCV from sklearn.metrics import accuracy_score from sklearn.linear_model import Lasso, LogisticRegression from sklearn.exceptions import NotFittedError import pandas as pd from .context import grouplasso from grouplasso.model import GroupLassoRegressor, GroupLassoClassifier RANDOM_STATE = 42 def _scaling_and_add_noise_feature(x_train, x_test, n_noised_features): # scaling np.random.seed(0) scaler = StandardScaler() x_train = scaler.fit_transform(x_train) x_test = scaler.transform(x_test) # add noise feature noise = np.random.randn(len(x_train), n_noised_features) x_train = np.c_[x_train, noise] noise = np.random.randn(len(x_test), n_noised_features) x_test = np.c_[x_test, noise] return x_train, x_test class BasicTestSuite(unittest.TestCase): """Basic test cases.""" def test_basic(self): for ModelClass in (GroupLassoRegressor, GroupLassoClassifier): # get_params test model = ModelClass( np.array([0, 1]), random_state=RANDOM_STATE) model.get_params() # document test (check that the doc has 10 or more lines) doc = inspect.getdoc(ModelClass) assert doc.count("\n") >= 10 def test_regressor(self): data = load_boston() x = data.data y = data.target x_train, x_test, y_train, y_test = train_test_split( x, y, random_state=RANDOM_STATE) n_noised_features = 5 x_train, x_test = _scaling_and_add_noise_feature( x_train, x_test, n_noised_features) # set group id group_ids = np.r_[np.zeros(x.shape[1]), np.ones( n_noised_features)].astype(int) model = GroupLassoRegressor(group_ids=group_ids, random_state=RANDOM_STATE, verbose=True, verbose_interval=10, alpha=1.0, tol=1e-3, eta=1e-1, max_iter=1000) start = time.time() model.fit(x_train, y_train) print('elapsed time:', time.time() - start) print('itr:', model.n_iter_) # check that the loss value is getting smaller assert len(model._losses) >= model.n_iter_ // model.verbose_interval for i in range(1, len(model._losses)): assert model._losses[i] < model._losses[i - 1] # check that coef of noised feature to be zero assert (model.coef_[-n_noised_features:] == 0).all() # chek that coef of NOT noised feature not to be zero assert (model.coef_[:x.shape[1]] != 0).all() # test score is not bad score = model.score(x_test, y_test) assert score >= 0.65 # initialize weights model_weights = np.r_[model.coef_, model.intercept_] # add tiny noise to correct weights model_weights += np.random.randn(len(model_weights)) * 0.3 model2 = GroupLassoRegressor(group_ids=group_ids, random_state=RANDOM_STATE) model2.set_params(**model.get_params()) model2.set_params(initial_weights=model_weights) model2.fit(x_train, y_train) assert model2.n_iter_ < model.n_iter_ assert np.linalg.norm(model2.coef_ - model.coef_, 2) < 5e-2 def test_regressor_vs_sklearn_Lasso(self): """ compare with lasso of sklearn. group lasso become normal lasso if every feature is differenet group with each other. """ data = load_boston() x = StandardScaler().fit_transform(data.data) y = data.target group_ids = np.arange(x.shape[1]).astype(int) alpha = 1.0 group_lasso = GroupLassoRegressor(group_ids=group_ids, random_state=RANDOM_STATE, verbose=False, alpha=alpha, tol=1e-3, eta=1e-1, max_iter=1000) ret = group_lasso.fit(x, y) # check that fit method return self assert isinstance(ret, GroupLassoRegressor) print('itr:', group_lasso.n_iter_) sklearn_lasso = Lasso(random_state=RANDOM_STATE, alpha=alpha) sklearn_lasso.fit(x, y) diff_of_coef = np.abs(group_lasso.coef_ - sklearn_lasso.coef_) diff_of_intercept = abs( group_lasso.intercept_ - sklearn_lasso.intercept_) assert (diff_of_coef < 1e-2).all() assert diff_of_intercept < 1e-2 def test_classifier(self): data = load_breast_cancer() x = data.data y = data.target x_train, x_test, y_train, y_test = train_test_split( x, y, random_state=RANDOM_STATE) n_noised_features = 5 x_train, x_test = _scaling_and_add_noise_feature( x_train, x_test, n_noised_features) # set group id group_ids = np.r_[np.zeros(x.shape[1]), np.ones( n_noised_features)].astype(int) model = GroupLassoClassifier(group_ids=group_ids, random_state=RANDOM_STATE, verbose=True, verbose_interval=10, alpha=1e-1, tol=1e-3, eta=1e-0, max_iter=1000) start = time.time() model.fit(x_train, y_train) print('elapsed time:', time.time() - start) print('itr:', model.n_iter_) # check that the loss value is getting smaller assert len(model._losses) >= model.n_iter_ // model.verbose_interval for i in range(1, len(model._losses)): assert model._losses[i] < model._losses[i - 1] # check that coef of noised feature to be zero assert (model.coef_[-n_noised_features:] == 0).all() # chek that coef of NOT noised feature not to be zero assert (model.coef_[:x.shape[1]] != 0).all() # test predicted result proba = model.predict_proba(x_test) pred = model.predict(x_test) assert proba.shape == (len(x_test), 2) assert (np.sum(proba, axis=1) == 1).all() assert ((proba >= 0) & (proba <= 1)).all() acc = accuracy_score(y_test, pred) assert acc >= 0.9 # initialize weights model_weights = np.r_[model.coef_, model.intercept_] # add tiny noise to correct weights model_weights += np.random.randn(len(model_weights)) * 0.01 model2 = GroupLassoClassifier(group_ids=group_ids, random_state=RANDOM_STATE) model2.set_params(**model.get_params()) model2.set_params(initial_weights=model_weights) model2.fit(x_train, y_train) assert model2.n_iter_ < model.n_iter_ assert np.linalg.norm(model2.coef_ - model.coef_, 2) < 5e-2 def test_classifier_vs_sklearn_LogisticRegression(self): """ compare with lasso(L1 logistic regression) of sklearn. group lasso become normal lasso if every feature is differenet group with each other. """ data = load_breast_cancer() x = StandardScaler().fit_transform(data.data) y = data.target group_ids =
np.arange(x.shape[1])
numpy.arange
import sys import warnings import math import pdb import itertools import numpy as np from utils import reset_wrapper, step_wrapper from scipy.ndimage.filters import convolve1d as convolve import os import copy import pygame from numba import njit, jit from collections import deque @njit def angle_between(v1, v2): v1_conv = v1.astype(np.dtype("float")) v2_conv = v2.astype(np.dtype("float")) return np.abs( np.arctan2( np.linalg.det(np.stack((v1_conv, v2_conv))), np.dot(v1_conv, v2_conv), ) ) @njit def total_angle_between(v1, v2): """ Calculate total angle between v1 and v2. Resulting angle is in range [-pi, pi]. :param v1: first vector. :type v1: np.array :param v2: second vector. :type v2: np.array :return: angle between v1 and v2, in range [-pi, pi]. :rtype: float. """ v1_conv = v1.astype(np.dtype("float")) v2_conv = v2.astype(np.dtype("float")) return np.arctan2( np.linalg.det(np.stack((v1_conv, v2_conv))), np.dot(v1_conv, v2_conv), ) @njit def dist_2d(v1, v2): return math.sqrt((v1[0] - v2[0]) ** 2 + (v1[1] - v2[1]) ** 2) @njit def norm_2d(vector): return math.sqrt(vector[0] ** 2 + vector[1] ** 2) def deg_to_rad(deg): return deg * np.pi / 180 def rad_to_deg(rad): return rad * 180 / np.pi def get_rot_matrix(theta): """ returns the rotation matrix given a theta value rotates in the counter clockwise direction """ return np.asarray( [[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]] ) def arange_orientation_info(dim_vector_8): # converts the 8 dim vector of orientation to # a 9 dim vector, for visulization purposes orient_disp_vector = np.zeros(9) j = 0 for i in range(dim_vector_8.shape[0]): if i == 4: j += 1 orient_disp_vector[j] = dim_vector_8[i] return orient_disp_vector """ def get_abs_orientation(agent_state, orientation_approximator): #returns the current absolute binned orientation of the agent #one of the 8 directions. Dim:8 (this is the default case) #for the default case, it additionally returns a 9 dimensional vector #if no orientation information is provided it returns 4. #works for the orientation approximator 0 1 2 3 4 5 6 7 ############ #for other cases, it just returns the orientation. #if no orientation information is provided, it returns -1. no_of_directions = len(orientation_approximator) angle_diff= np.zeros(no_of_directions) abs_approx_orientation = None if no_of_directions==8: #the default #will return the vector only if the orientation_approximator is the default 8-dir one. abs_approx_orientation = np.zeros(9) else: abs_approx_orientation = np.zeros(no_of_directions) orientation = agent_state['orientation'] if orientation is None: #straight up orientation = 1 elif np.linalg.norm(orientation)==0: if no_of_directions==8: orientation = 1 else: orientation = 1 else: for i in range(len(orientation_approximator)): #print('The orientation val') #print(orientation) angle_diff[i] = angle_between(orientation_approximator[i], orientation) orientation = np.argmin(angle_diff) if no_of_directions == 8: if orientation >=4: orientation += 1 abs_approx_orientation[orientation] = 1 return abs_approx_orientation, orientation return abs_approx_orientation, orientation """ def get_abs_orientation(agent_state, orientation_approximator): """ #returns the current absolute binned orientation of the agent #one of the 8 directions. Dim:8 (this is the default case) #for the default case, it additionally returns a 9 dimensional vector #if no orientation information is provided it returns 4. #works for the orientation approximator 0 1 2 7 3 6 5 4 ############ #for other cases, it just returns the orientation. #if no orientation information is provided, it returns -1. """ no_of_directions = len(orientation_approximator) angle_diff = np.zeros(no_of_directions) min_thresh = 0.001 abs_approx_orientation = None if no_of_directions == 8: # the default # will return the vector only if the orientation_approximator is the default 8-dir one. abs_approx_orientation = np.zeros(9) else: abs_approx_orientation = np.zeros(no_of_directions) orientation = agent_state["orientation"] if orientation is None: # straight up orientation = 1 else: for i in range(len(orientation_approximator)): # print('The orientation val') # print(orientation) angle_diff[i] = angle_between( orientation_approximator[i], orientation ) orientation = np.argmin(angle_diff) abs_approx_orientation[orientation] = 1 return abs_approx_orientation, orientation def get_rel_orientation(prev_frame_info, agent_state, goal_state): """ Calculates and bins the angle between (agent_pos - goal_pos) and agent velocity. in effect, this is the "error" in the agent's heading. """ # returns the relative orientation of the agent with the direction # of the goal. # Primarily for use in IRL relative_orientation_vector = np.zeros(4) vector_to_goal = goal_state - agent_state["position"] if prev_frame_info is None: agent_orientation = np.array([-1, 0]) else: agent_orientation = ( agent_state["position"] - prev_frame_info["position"] ) diff_in_angle = angle_between(vector_to_goal, agent_orientation) # pdb.set_trace() if diff_in_angle < np.pi / 8: rel_orientation = 0 elif diff_in_angle < np.pi / 4 and diff_in_angle >= np.pi / 8: rel_orientation = 1 elif diff_in_angle < np.pi * 3 / 4 and diff_in_angle >= np.pi / 4: rel_orientation = 2 else: rel_orientation = 3 relative_orientation_vector[rel_orientation] = 1 return relative_orientation_vector def get_rel_goal_orientation( orientation_approximator, rel_orient_conv, agent_state, agent_abs_orientation, goal_state, ): """ Calculates a vector from the agent to the goal. This vector is in the agent's coordinate system, e.g. zero degrees is forward. This vector is binned into a one hot vector based on orientation_approximator. """ # returns the relative orientation of the goal wrt to the agent # Dim:8 no_of_directions = len(orientation_approximator) angle_diff = np.zeros(no_of_directions) relative_orientation_vector = np.zeros(no_of_directions) rot_matrix = get_rot_matrix(rel_orient_conv[agent_abs_orientation]) # translate the point so that the agent sits at the center of the coordinates # before rtotation vec_to_goal = goal_state - agent_state["position"] # rotate the coordinates to get the relative coordinates wrt the agent rel_coord_goal = np.matmul(rot_matrix, vec_to_goal) relative_goal = {} relative_goal["orientation"] = rel_coord_goal relative_orientation_vector, _ = get_abs_orientation( relative_goal, orientation_approximator ) return relative_orientation_vector def discretize_information(information, information_slabs): # given a piece of information(scalar), this function returns the correct # slab in which the information belongs, based on the slab information # information_slab(list)provided for i in range(len(information_slabs) - 1): if ( information >= information_slabs[i] and information < information_slabs[i + 1] ): return i # if does not classify in any information slabs return None def calculate_social_forces( agent_state, obstacle_state, agent_width, obstacle_width, a, b, lambda_val ): # agent_state and obstacle_state are dictionaries with the following information: # position, orientation and speed r_i_j = agent_width / 2 + obstacle_width / 2 d_i_j = np.linalg.norm( agent_state["position"] - obstacle_state["position"] ) @njit def radial_density_features(agent_position, pedestrian_positions, radius): """ implements the 'density features' from: IRL Algorithms and Features for Robot navigation in Crowds: Vasquez et. al :param agent_position: position of agent. :type agent_position: numpy array or tuple. :param pedestrian_positions: list or array of pedestrian positions. :type pedestrian_positions: list or np array of tuples or np arrays. """ pedestrian_count = 0 # Using indexing necessary for Numba to work for ped_idx in range(len(pedestrian_positions)): if dist_2d(pedestrian_positions[ped_idx], agent_position) <= radius: pedestrian_count += 1 if pedestrian_count >= 5: return np.array([0.0, 0.0, 1.0]) if pedestrian_count < 2: return np.array([1.0, 0.0, 0.0]) elif 2 <= pedestrian_count < 5: return np.array([0.0, 1.0, 0.0]) else: raise ValueError @njit def speed_features( agent_velocity, pedestrian_velocities, lower_threshold=0.015, upper_threshold=0.025, ): """ Computes speed features as described in Vasquez et. al's paper: "Learning to navigate through crowded environments". :param agent_velocity: velocity of agent (robot) :type agent_velocity: 2D np.array or tuple :param pedestrian_velocities: velocities of pedestrians :type pedestrian_velocities: list or np.array of 2d arrays or tuples. :param lower_threshold: Lower magnitude of speed threshold threshold used for binning. This is 0.015 in the paper. :type lower_threshold: float :param upper_threshold: Higher magnitude of speed threshold used for binning. This is 0.025 in the paper. :type upper_threshold: float :return: magnitude feature np.array of shape (3,) :rtype: float np.array """ assert lower_threshold < upper_threshold feature = np.zeros(3) for idx in range(len(pedestrian_velocities)): pedestrian_vel = pedestrian_velocities[idx] speed = dist_2d(pedestrian_vel, agent_velocity) # put value into proper bin if 0 <= speed < lower_threshold: feature[0] += 1 elif lower_threshold <= speed < upper_threshold: feature[1] += 1 elif speed >= upper_threshold: feature[2] += 1 else: raise ValueError( "Error in binning speed. speed does not fit into any bin." ) return feature @njit def orientation_features( agent_position, agent_velocity, pedestrian_positions, pedestrian_velocities ): """ Computes the orientation features described in Vasquez et. al's paper: "Learning to navigate through crowded environments". :param agent_position: position of the agent (robot) :type agent_position: 2d np.array or tuple :param agent_velocity: velocity of the agent (robot) :type agent_velocity: 2d np.array or tuple :param pedestrian_positions: positions of pedestrians. :type pedestrian_positions: np.array or list, containing 2d arrays or tuples. :param pedestrian_velocities: velocities of pedestrians. :type pedestrian_velocities: np.array or list, containing 2d arrays or tuples. :return: orientation feature vector. :rtype: float np.array of shape (3,) """ feature = np.zeros(3) # Check that same number of pedestrian positions and velocities are passed in. assert len(pedestrian_positions) == len(pedestrian_velocities) for ped_id in range(len(pedestrian_positions)): relative_pos = agent_position - pedestrian_positions[ped_id] relative_vel = agent_velocity - pedestrian_velocities[ped_id] # angle_between produces only positive angles angle = angle_between(relative_pos, relative_vel) # put into bins # Bins adjusted to work with angle_between() (i.e. abs value of angles.) if 0.75 * np.pi < angle <= np.pi: feature[0] += 1 elif 0.25 * np.pi <= angle < 0.75 * np.pi: feature[1] += 1 elif 0.0 <= angle < 0.25 * np.pi: feature[2] += 1 else: raise ValueError( "Error in binning orientation. Orientation does not fit into any bin." ) return feature @njit def velocity_features( agent_position, agent_velocity, pedestrian_positions, pedestrian_velocities, lower_speed_threshold=0.015, upper_speed_threshold=0.025, ): """ Computes the velocity features described in Vasquez et. al's paper: "Learning to navigate through crowded environments". :param agent_position: position of the agent (robot) :type agent_position: 2d np.array or tuple :param agent_velocity: velocity of the agent (robot) :type agent_velocity: 2d np.array or tuple :param pedestrian_positions: positions of pedestrians. :type pedestrian_positions: 2d float np.array. :param lower_speed_threshold: Lower magnitude of speed threshold threshold used for binning. This is 0.015 in the paper. :type lower_threshold: float :param upper_speed_threshold: Higher magnitude of speed threshold threshold used for binning. This is 0.025 in the paper. :type upper_threshold: float :param pedestrian_velocities: velocities of pedestrians. :type pedestrian_velocities: 2d float np.array. :param lower_threshold: Lower magnitude of speed threshold threshold used for binning. This is 0.015 in the paper. :type lower_threshold: float :param upper_threshold: Higher magnitude of speed threshold threshold used for binning. This is 0.025 in the paper. :type upper_threshold: float :return: orientation feature vector. :rtype: float np.array of shape (3,) """ assert lower_speed_threshold < upper_speed_threshold feature = np.zeros((3, 3)) assert len(pedestrian_positions) == len(pedestrian_velocities) # used to group pedestrians with the same orientation bin together using # their ID. ped_sorted_by_orientation = [np.empty(0, dtype=np.int64)] * 3 for ped_id in range(len(pedestrian_positions)): relative_pos = agent_position - pedestrian_positions[ped_id] relative_vel = agent_velocity - pedestrian_velocities[ped_id] # angle_between produces only positive angles if (relative_pos == np.zeros(2)).all() or ( relative_vel == np.zeros(2) ).all(): # cannot calculate angle between zero vectors angle = 0.0 else: angle = angle_between(relative_pos, relative_vel) # put into bins # Bins adjusted to work with angle_between() (i.e. abs value of angles.) if 0.75 * np.pi < angle <= np.pi: ped_sorted_by_orientation[0] = np.append( ped_sorted_by_orientation[0], ped_id ) elif 0.25 * np.pi <= angle < 0.75 * np.pi: ped_sorted_by_orientation[1] = np.append( ped_sorted_by_orientation[1], ped_id ) elif 0.0 <= angle < 0.25 * np.pi: ped_sorted_by_orientation[2] = np.append( ped_sorted_by_orientation[2], ped_id ) else: raise ValueError("Orientation does not fit into any bin.") for idx, ped_ids in enumerate(ped_sorted_by_orientation): velocities = pedestrian_velocities[ped_ids] if not velocities.size: break else: mean_speeds = np.mean(np.abs(velocities)) # bin speeds if 0 <= mean_speeds < lower_speed_threshold: feature[idx, 0] = 1 elif lower_speed_threshold <= mean_speeds < upper_speed_threshold: feature[idx, 1] = 1 elif mean_speeds >= upper_speed_threshold: feature[idx, 2] = 1 else: raise ValueError("Average speed does not fit in any bins.") return feature.flatten() def social_force_features( agent_radius, agent_position, agent_velocity, pedestrian_positions ): """ Computes the social forces features described in Vasquez et. al's paper: "Learning to navigate through crowded environments". :param agent_radius: radius of agent(s) in the environment. Note: this is the radius of the agent's graphical circle, not a radius around the agent. :type agent_radius: float. :param agent_position: position of the agent (robot) :type agent_position: 2d np.array or tuple :param agent_velocity: velocity of the agent (robot) :type agent_velocity: 2d np.array or tuple :param pedestrian_positions: positions of pedestrians. :type pedestrian_positions: 2d float np.array. :param pedestrian_velocities: velocities of pedestrians. :type pedestrian_velocities: 2d float np.array. :return: orientation feature vector. :rtype: float np.array of shape (3,) """ # in the paper formula, 'i' is our agent, while 'j's are the pedestrians. rel_positions = pedestrian_positions - agent_position rel_distances = np.linalg.norm(rel_positions, axis=1) normalized_rel_positions = rel_positions / np.max(rel_distances) assert rel_positions.shape == normalized_rel_positions.shape rel_angles = np.zeros(rel_distances.shape) # used to group pedestrians with the same orientation bin together using # their ID. feature = np.zeros(3) ped_orientation_bins = [np.empty(0, dtype=np.int64)] * 3 for ped_id in range(len(pedestrian_positions)): relative_pos = rel_positions[ped_id] # angle_between produces only positive angles angle = angle_between(relative_pos, agent_velocity) rel_angles[ped_id] = angle # put into bins # Bins adjusted to work with angle_between() (i.e. abs value of angles.) if 0.75 * np.pi <= angle <= np.pi: ped_orientation_bins[0] = np.append( ped_orientation_bins[0], ped_id ) elif 0.25 * np.pi <= angle < 0.75 * np.pi: ped_orientation_bins[1] = np.append( ped_orientation_bins[1], ped_id ) elif 0.0 <= angle < 0.25 * np.pi: ped_orientation_bins[2] = np.append( ped_orientation_bins[2], ped_id ) else: raise ValueError("Orientation does not fit into any bin.") exp_multiplier = np.exp(2 * agent_radius - rel_distances).reshape(-1, 1) anisotropic_term = (2.0 - 0.5 * (1.0 + np.cos(rel_angles))).reshape(-1, 1) social_forces = ( exp_multiplier * normalized_rel_positions * anisotropic_term ) forces_above_threshold = np.linalg.norm(social_forces, axis=1) > 0.5 feature[0] = np.sum(forces_above_threshold[ped_orientation_bins[0]]) feature[1] = np.sum(forces_above_threshold[ped_orientation_bins[1]]) feature[2] = np.sum(forces_above_threshold[ped_orientation_bins[2]]) return feature @njit def angle_to_goal_features(goal_position, agent_position, agent_orientation): """ computes features based on the error in the agent's heading towards the goal. Error is the angle between agent heading vector and vector (goal_pos - agent_pos). The features are binary features based on where the angle fits in the bins [0-pi/8, pi/8-pi/4, pi/4-3/4pi, 3/4pi-pi]. This is meant to mimic the goal_rel_orientation function. :param goal_position: position of the goal. :type goal_position: 2d numpy vector. :param agent_position: position of agent. :type agent_position: 2d numpy vector. :param agent_orientation: orientation vector of agent. :type agent_orientation: 2d numpy vector. :raises ValueError: If angle does not fit in the [0,pi] interval, something unexpected has happened. :return: feature vector representing binned angles. :rtype: float np.array """ features = np.zeros(4) vector_to_goal = goal_position - agent_position angle = angle_between(agent_orientation, vector_to_goal) # bin in angle bins if 0.0 <= angle < 0.125 * np.pi: features[0] = 1.0 elif 0.125 * np.pi <= angle < 0.25 * np.pi: features[1] = 1.0 elif 0.25 * np.pi <= angle < 0.75 * np.pi: features[2] = 1.0 elif 0.75 * np.pi <= angle <= np.pi: features[3] = 1.0 else: raise ValueError("Cannot bin angle in [0,pi] interval.") return features @njit def vector_to_goal_features(goal_position, agent_position, agent_orientation): features = np.zeros(8) vector_to_goal = goal_position - agent_position angle = total_angle_between(agent_orientation, vector_to_goal) # mimic finding closest relative vector by binning angle if -0.125 * np.pi <= angle < 0.125 * np.pi: features[0] = 1.0 elif 0.125 * np.pi <= angle < 0.375 * np.pi: features[1] = 1.0 elif 0.375 * np.pi <= angle < 0.625 * np.pi: features[2] = 1.0 elif 0.625 * np.pi <= angle < 0.875 * np.pi: features[3] = 1.0 elif 0.875 * np.pi <= angle <= np.pi: features[4] = 1.0 elif -np.pi <= angle < -0.875 * np.pi: features[4] = 1.0 elif -0.875 * np.pi <= angle < -0.625 * np.pi: features[5] = 1.0 elif -0.625 * np.pi <= angle < -0.375 * np.pi: features[6] = 1.0 elif -0.375 * np.pi <= angle < -0.125 * np.pi: features[7] = 1.0 else: raise ValueError("Faled to bin angles in [-pi, pi] range.") return features @njit def orientation_change_features(new_orientation, old_orientation): thresholds = np.array( [0, np.pi / 9, 2 * np.pi / 9, np.pi * 3 / 9, 4 * np.pi / 9] ) if old_orientation is None: print("Warning: old orientation is none, assuming old=new.") orientation_change = 0.0 else: orientation_change = angle_between(new_orientation, old_orientation) # bin based on thresholds features = np.zeros(5) index = np.argmin(np.abs(orientation_change - thresholds)) features[index] = 1.0 return features @njit def SAM_features( agent_position, agent_velocity, pedestrian_positions, pedestrian_velocities, inner_radius, outer_radius, lower_speed_threshold, upper_speed_threshold, ): """ Calculates entire sam features based on Fahad et. al's 2018 paper: "Learning How Pedestrians Navigate: A Deep Inverse Reinforcement Learning Approach" :param agent_position: Position of the agent. :type agent_position: 2d numpy float array. :param agent_velocity: Agent velocity. :type agent_velocity: 2d numpy float array. :param pedestrian_positions: Px2 vector of the position of all pedestrians. :type pedestrian_positions: Px2 numpy float array where P is the number of pedestrians. :param pedestrian_velocities: Px2 vector of the velocity of all pedestrians. :type pedestrian_velocities: Px2 numpy float array where P is the number of pedestrians. :param inner_radius: Radius of inner circle of feature extractor. :type inner_radius: float. :param outer_radius: Radius of outer circle of feature extractor. :type outer_radius: float. :param lower_speed_threshold: lower binning threshold for speed. :type lower_speed_threshold: float. :param upper_speed_threshold: upper binning threshold for speed. :type upper_speed_threshold: float. :return: tuple (SAM_features, density) where SAM_features are the features and density is total number of pedestrians inside all bins. :rtype: tuples(numpy 1d array, float) """ num_pedestrians = pedestrian_positions.shape[0] # classify pedestrians in either inner or outer ring ring_designation = np.zeros(num_pedestrians) for idx in range(num_pedestrians): ped_distance = dist_2d(agent_position, pedestrian_positions[idx]) if ped_distance <= outer_radius: if ped_distance > inner_radius: ring_designation[idx] = 2 else: ring_designation[idx] = 1 inner_ped_positions = pedestrian_positions[ring_designation == 1] inner_ped_velocities = pedestrian_velocities[ring_designation == 1] outer_ped_positions = pedestrian_positions[ring_designation == 2] outer_ped_velocities = pedestrian_velocities[ring_designation == 2] assert inner_ped_positions.shape[0] == inner_ped_velocities.shape[0] assert outer_ped_positions.shape[0] == outer_ped_velocities.shape[0] num_inner_pedestrians = inner_ped_positions.shape[0] num_outer_pedestrians = outer_ped_positions.shape[0] # classify pedestrians in each bin, and add up their velocities per bin peds_in_bin_counts = np.zeros(10) average_velocities = np.zeros((10, 2)) for idx in range(num_inner_pedestrians): ped_relative_position = inner_ped_positions[idx] - agent_position ped_velocity = inner_ped_velocities[idx] angle = total_angle_between(agent_velocity, ped_relative_position) if -0.25 * np.pi < angle < 0.25 * np.pi: peds_in_bin_counts[0] += 1 average_velocities[0] += ped_velocity elif 0.25 * np.pi <= angle < 0.75 * np.pi: peds_in_bin_counts[1] += 1 average_velocities[1] += ped_velocity elif 0.75 * np.pi <= angle < np.pi or -np.pi < angle < -0.75 * np.pi: peds_in_bin_counts[2] += 1 average_velocities[2] += ped_velocity elif -0.75 * np.pi <= angle <= -0.25 * np.pi: peds_in_bin_counts[3] += 1 average_velocities[3] += ped_velocity else: raise ValueError("angle couldn't be binned.") for idx in range(num_outer_pedestrians): ped_relative_position = outer_ped_positions[idx] - agent_position ped_velocity = outer_ped_velocities[idx] angle = total_angle_between(agent_velocity, ped_relative_position) if -0.25 * np.pi < angle < 0.25 * np.pi: peds_in_bin_counts[4] += 1 average_velocities[4] += ped_velocity elif 0.25 * np.pi <= angle < 0.5 * np.pi: peds_in_bin_counts[9] += 1 average_velocities[9] += ped_velocity elif 0.5 * np.pi <= angle < 0.75 * np.pi: peds_in_bin_counts[8] += 1 average_velocities[8] += ped_velocity elif 0.75 * np.pi <= angle < np.pi or -np.pi < angle < -0.75 * np.pi: peds_in_bin_counts[7] += 1 average_velocities[7] += ped_velocity elif -0.5 * np.pi <= angle < -0.25 * np.pi: peds_in_bin_counts[5] += 1 average_velocities[5] += ped_velocity elif -0.75 * np.pi <= angle < -0.5 * np.pi: peds_in_bin_counts[6] += 1 average_velocities[6] += ped_velocity else: raise ValueError("angle couldn't be binned.") nonzero_mask = peds_in_bin_counts != 0 average_velocities[nonzero_mask] /= peds_in_bin_counts[ nonzero_mask ].reshape(-1, 1) heading_feat_vect = np.zeros((10, 3)) velocity_feat_vect = np.zeros((10, 3)) # 0 degree degree vector used as reference for judging absolute angles. angle_origin = np.array([1.0, 0.0]) for idx in range(len(average_velocities)): if peds_in_bin_counts[idx] == 0.0: continue relative_velocity = agent_velocity - average_velocities[idx] heading = angle_between(relative_velocity, agent_velocity) heading_thresholds = np.array([0.25 * np.pi, 0.75 * np.pi]) heading_idx = np.digitize(np.array(heading), heading_thresholds) heading_feat_vect[idx][heading_idx] = 1 vel_idx = np.digitize( np.array(norm_2d(relative_velocity)), np.array([lower_speed_threshold, upper_speed_threshold]), ) velocity_feat_vect[idx][vel_idx] = 1 velocity_feat_vect = np.concatenate( (heading_feat_vect, velocity_feat_vect), axis=1 ).reshape(-1, 1) SAM_vector = np.concatenate( (peds_in_bin_counts.reshape(-1, 1), velocity_feat_vect) ).flatten() density = np.sum(peds_in_bin_counts) return SAM_vector, density @njit def distance_from_goal_features(agent_position, goal_position): """ Calculates manhattan distance between agent position and goal position. This distance is calculated in a discrete manner, taken from floor of distance vector. Which results in an integer. :param agent_position: position of agent. :type agent_position: 2d np float array. :param goal_position: position of goal. :type goal_position: 2d np float array. :return: manhattan distance from goal. :rtype: int. """ distance = goal_position - agent_position manhattan_distance = np.sum(np.abs(
np.floor(distance)
numpy.floor
# -*- coding: utf-8 -*- """Tests of array utility functions.""" #------------------------------------------------------------------------------ # Imports #------------------------------------------------------------------------------ import os.path as op import numpy as np from pytest import raises from ..array import (_unique, _normalize, _index_of, _in_polygon, _spikes_in_clusters, _spikes_per_cluster, _flatten_per_cluster, _get_data_lim, select_spikes, Selector, chunk_bounds, regular_subset, excerpts, data_chunk, grouped_mean, get_excerpts, _concatenate_virtual_arrays, _range_from_slice, _pad, _get_padded, read_array, write_array, ) from phy.utils._types import _as_array from phy.utils.testing import _assert_equal as ae from ..mock import artificial_spike_clusters #------------------------------------------------------------------------------ # Test utility functions #------------------------------------------------------------------------------ def test_range_from_slice(): """Test '_range_from_slice'.""" class _SliceTest(object): """Utility class to make it more convenient to test slice objects.""" def __init__(self, **kwargs): self._kwargs = kwargs def __getitem__(self, item): if isinstance(item, slice): return _range_from_slice(item, **self._kwargs) with raises(ValueError): _SliceTest()[:] with raises(ValueError): _SliceTest()[1:] ae(_SliceTest()[:5], [0, 1, 2, 3, 4]) ae(_SliceTest()[1:5], [1, 2, 3, 4]) with raises(ValueError): _SliceTest()[::2] with raises(ValueError): _SliceTest()[1::2] ae(_SliceTest()[1:5:2], [1, 3]) with raises(ValueError): _SliceTest(start=0)[:] with raises(ValueError): _SliceTest(start=1)[:] with raises(ValueError): _SliceTest(step=2)[:] ae(_SliceTest(stop=5)[:], [0, 1, 2, 3, 4]) ae(_SliceTest(start=1, stop=5)[:], [1, 2, 3, 4]) ae(_SliceTest(stop=5)[1:], [1, 2, 3, 4]) ae(_SliceTest(start=1)[:5], [1, 2, 3, 4]) ae(_SliceTest(start=1, step=2)[:5], [1, 3]) ae(_SliceTest(start=1)[:5:2], [1, 3]) ae(_SliceTest(length=5)[:], [0, 1, 2, 3, 4]) with raises(ValueError): _SliceTest(length=5)[:3] ae(_SliceTest(length=5)[:10], [0, 1, 2, 3, 4]) ae(_SliceTest(length=5)[:5], [0, 1, 2, 3, 4]) ae(_SliceTest(start=1, length=5)[:], [1, 2, 3, 4, 5]) ae(_SliceTest(start=1, length=5)[:6], [1, 2, 3, 4, 5]) with raises(ValueError): _SliceTest(start=1, length=5)[:4] ae(_SliceTest(start=1, step=2, stop=5)[:], [1, 3]) ae(_SliceTest(start=1, stop=5)[::2], [1, 3]) ae(_SliceTest(stop=5)[1::2], [1, 3]) def test_pad(): arr = np.random.rand(10, 3) ae(_pad(arr, 0, 'right'), arr[:0, :]) ae(_pad(arr, 3, 'right'), arr[:3, :]) ae(_pad(arr, 9), arr[:9, :]) ae(_pad(arr, 10), arr) ae(_pad(arr, 12, 'right')[:10, :], arr) ae(_pad(arr, 12)[10:, :], np.zeros((2, 3))) ae(_pad(arr, 0, 'left'), arr[:0, :]) ae(_pad(arr, 3, 'left'), arr[7:, :]) ae(_pad(arr, 9, 'left'), arr[1:, :]) ae(_pad(arr, 10, 'left'), arr) ae(_pad(arr, 12, 'left')[2:, :], arr) ae(_pad(arr, 12, 'left')[:2, :], np.zeros((2, 3))) with raises(ValueError): _pad(arr, -1) def test_get_padded(): arr = np.array([1, 2, 3])[:, np.newaxis] with raises(RuntimeError): ae(_get_padded(arr, -2, 5).ravel(), [1, 2, 3, 0, 0]) ae(_get_padded(arr, 1, 2).ravel(), [2]) ae(_get_padded(arr, 0, 5).ravel(), [1, 2, 3, 0, 0]) ae(_get_padded(arr, -2, 3).ravel(), [0, 0, 1, 2, 3]) def test_get_data_lim(): arr = np.random.rand(10, 5) assert 0 < _get_data_lim(arr) < 1 assert 0 < _get_data_lim(arr, 2) < 1 def test_unique(): """Test _unique() function""" _unique([]) n_spikes = 300 n_clusters = 3 spike_clusters = artificial_spike_clusters(n_spikes, n_clusters) ae(_unique(spike_clusters), np.arange(n_clusters)) def test_normalize(): """Test _normalize() function.""" n_channels = 10 positions = 1 + 2 * np.random.randn(n_channels, 2) # Keep ration is False. positions_n = _normalize(positions) x_min, y_min = positions_n.min(axis=0) x_max, y_max = positions_n.max(axis=0) np.allclose(x_min, 0.) np.allclose(x_max, 1.) np.allclose(y_min, 0.) np.allclose(y_max, 1.) # Keep ratio is True. positions_n = _normalize(positions, keep_ratio=True) x_min, y_min = positions_n.min(axis=0) x_max, y_max = positions_n.max(axis=0) np.allclose(min(x_min, y_min), 0.) np.allclose(max(x_max, y_max), 1.) np.allclose(x_min + x_max, 1) np.allclose(y_min + y_max, 1) def test_index_of(): """Test _index_of.""" arr = [36, 42, 42, 36, 36, 2, 42] lookup = _unique(arr) ae(_index_of(arr, lookup), [1, 2, 2, 1, 1, 0, 2]) def test_as_array(): ae(_as_array(3), [3]) ae(_as_array([3]), [3]) ae(_as_array(3.), [3.]) ae(_as_array([3.]), [3.]) with raises(ValueError): _as_array(map) def test_in_polygon(): polygon = [[0, 0], [1, 0], [1, 1], [0, 1], [0, 0]] points = np.random.uniform(size=(100, 2), low=-1, high=1) idx_expected = np.nonzero((points[:, 0] > 0) & (points[:, 1] > 0) & (points[:, 0] < 1) & (points[:, 1] < 1))[0] idx = np.nonzero(_in_polygon(points, polygon))[0] ae(idx, idx_expected) #------------------------------------------------------------------------------ # Test read/save #------------------------------------------------------------------------------ def test_read_write(tempdir): arr = np.arange(10).astype(np.float32) path = op.join(tempdir, 'test.npy') write_array(path, arr) ae(read_array(path), arr) ae(read_array(path, mmap_mode='r'), arr) #------------------------------------------------------------------------------ # Test virtual concatenation #------------------------------------------------------------------------------ def test_concatenate_virtual_arrays_1(): arrs = [np.arange(5), np.arange(10, 12), np.array([0])] c = _concatenate_virtual_arrays(arrs, scaling=1) assert c.shape == (8,) assert c._get_recording(3) == 0 assert c._get_recording(5) == 1 ae(c[:], [0, 1, 2, 3, 4, 10, 11, 0]) ae(c[0], [0]) ae(c[4], [4]) ae(c[5], [10]) ae(c[6], [11]) ae(c[4:6], [4, 10]) ae(c[:6], [0, 1, 2, 3, 4, 10]) ae(c[4:], [4, 10, 11, 0]) ae(c[4:-1], [4, 10, 11]) def test_concatenate_virtual_arrays_2(): arrs = [np.zeros((2, 2)), np.ones((3, 2))] c = _concatenate_virtual_arrays(arrs) assert c.shape == (5, 2) ae(c[:, :], np.vstack((
np.zeros((2, 2))
numpy.zeros
""" Tests for the Pytorch dataset classes used to load the training data. """ from pathlib import Path import numpy as np import torch import xarray as xr from quantnn.qrnn import QRNN from quantnn.normalizer import Normalizer from quantnn.models.pytorch.xception import XceptionFpn from gprof_nn import sensors from gprof_nn.data import get_test_data_path from gprof_nn.data.training_data import ( load_variable, decompress_scene, decompress_and_load, remap_scene, GPROF_NN_1D_Dataset, GPROF_NN_3D_Dataset, SimulatorDataset, ) DATA_PATH = get_test_data_path() def test_to_xarray_dataset_1d_gmi(): """ Ensure that converting training data to 'xarray.Dataset' yield same Tbs as the ones found in the first batch of the training data when data is not shuffled. """ input_file = DATA_PATH / "gmi" / "gprof_nn_gmi_era5.nc.gz" dataset = GPROF_NN_1D_Dataset( input_file, batch_size=64, normalize=False, shuffle=False, targets=["surface_precip", "rain_water_content"] ) # # Conversion using datasets 'x' attribute. # data = dataset.to_xarray_dataset() x, y = dataset[0] x = x.numpy() tbs = data.brightness_temperatures.data[:x.shape[0]] tbs_ref = x[:, :15] valid = np.isfinite(tbs_ref) assert np.all(np.isclose(tbs[valid], tbs_ref[valid])) t2m = data.two_meter_temperature.data[:x.shape[0]] t2m_ref = x[:, 15] assert np.all(
np.isclose(t2m, t2m_ref)
numpy.isclose
import numpy as np import nanonet.tb as tb from test.test_hamiltonian_module import expected_bulk_silicon_band_structure def test_simple_atomic_chain(): """ """ site_energy = -1.0 coupling = -1.0 l_const = 1.0 a = tb.Orbitals('A') a.add_orbital(title='s', energy=-1, ) xyz_file = """1 H cell A 0.0000000000 0.0000000000 0.0000000000 """ tb.set_tb_params(PARAMS_A_A={'ss_sigma': -1.0}) h = tb.HamiltonianSp(xyz=xyz_file, nn_distance=1.1) h.initialize() PRIMITIVE_CELL = [[0, 0, l_const]] h.set_periodic_bc(PRIMITIVE_CELL) num_points = 10 kk = np.linspace(0, 3.14 / l_const, num_points, endpoint=True) band_structure = [] for jj in range(num_points): vals, _ = h.diagonalize_periodic_bc([0.0, 0.0, kk[jj]]) band_structure.append(vals) band_structure = np.array(band_structure) desired_value = site_energy + 2 * coupling * np.cos(l_const * kk) np.testing.assert_allclose(band_structure, desired_value[:, np.newaxis], atol=1e-9) def test_atomic_chain_two_kinds_of_atoms(): """ """ site_energy1 = -1.0 site_energy2 = -2.0 coupling = -1.0 l_const = 2.0 a = tb.Orbitals('A') a.add_orbital(title='s', energy=site_energy1, ) b = tb.Orbitals('B') b.add_orbital(title='s', energy=site_energy2, ) xyz_file = """2 H cell A 0.0000000000 0.0000000000 0.0000000000 B 0.0000000000 0.0000000000 1.0000000000 """ tb.set_tb_params(PARAMS_A_B={'ss_sigma': coupling}) h = tb.HamiltonianSp(xyz=xyz_file, nn_distance=1.1) h.initialize() PRIMITIVE_CELL = [[0, 0, l_const]] h.set_periodic_bc(PRIMITIVE_CELL) num_points = 10 kk = np.linspace(0, 3.14 / 2, num_points, endpoint=True) band_structure = [] for jj in range(num_points): vals, _ = h.diagonalize_periodic_bc([0.0, 0.0, kk[jj]]) band_structure.append(vals) band_structure = np.array(band_structure) desired_value = np.zeros(band_structure.shape) b = site_energy1 + site_energy2 c = site_energy1 * site_energy2 - (2.0 * coupling * np.cos(0.5 * kk * l_const)) ** 2 desired_value[:, 0] = 0.5 * (b -
np.sqrt(b ** 2 - 4.0 * c)
numpy.sqrt
import numpy as np from ..utils import * class Add(): def __init__(self): pass def run(self, a, b): return np.add(a, b) # takes the the output variable tensor and input variable tensors # set the grads of the input variable tensors def pass_gradients(self, out_tensor, in_tensor_1, in_tensor_2): assert not out_tensor.get_gradients() is None out_tensor_grad = out_tensor.get_gradients() # in case of bias adding if len(in_tensor_1.shape) < len(out_tensor_grad.shape): in_tensor_1.set_gradients(np.sum(out_tensor_grad, axis = 0)) else: in_tensor_1.set_gradients(out_tensor_grad) if len(in_tensor_2.shape) < len(out_tensor_grad.shape): in_tensor_2.set_gradients(np.sum(out_tensor_grad, axis = 0)) else: in_tensor_2.set_gradients(out_tensor_grad) class Sub(): def __init__(self): pass def run(self, a, b): return np.add(a, -b) def pass_gradients(self, out_tensor, in_tensor_1, in_tensor_2): assert not out_tensor.get_gradients() is None out_tensor_grad = out_tensor.get_gradients() in_tensor_1.set_gradients(out_tensor_grad) in_tensor_2.set_gradients(-out_tensor_grad) class Mul(): def __init__(self): pass def run(self, a, b): return a * b def pass_gradients(self, out_tensor, in_tensor_1, in_tensor_2): assert is_scalar(in_tensor_1.data) or is_scalar(in_tensor_2.data) assert not out_tensor.get_gradients() is None out_tensor_grad = out_tensor.get_gradients() in_tensor_1.set_gradients(np.dot(out_tensor_grad, in_tensor_2.data.T)) in_tensor_2.set_gradients(np.dot(in_tensor_1.data.T, out_tensor_grad)) class Div(): def __init__(self): pass def run(self, a, b): return a / b def pass_gradients(self, out_tensor, in_tensor_1, in_tensor_2): assert is_scalar(in_tensor_2.data) assert not out_tensor.get_gradients() is None out_tensor_grad = out_tensor.get_gradients() in_tensor_1.set_gradients(
np.dot(out_tensor_grad, (1.0 / in_tensor_2.data).T)
numpy.dot
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Thu Jan 28 16:21:46 2021 @author: jiayingweng """ import numpy as np import scipy.linalg as la __all__ = ['generateX', 'generateY'] def generateX(n, p, covstr): """ Generate X for simulation Args: n (int): sample size p (int): number of dimension of X covstr (0-3): covariance structure Returns: X: n times p array """ ## generate X if covstr == 0: covx = np.eye(p) elif covstr == 1: v = 0.5 ** np.arange(p) covx = la.toeplitz(v) elif covstr == 2: offdiag = 0.2 covx = np.ones((p,p)) * offdiag covx = covx + np.eye(p) * (1-offdiag) elif covstr == 3: v = 0.8 ** np.arange(p) covx = la.toeplitz(v) L = np.linalg.cholesky(covx) Z = np.random.randn(p,n) X = (L @ Z).T return(X) def generateY(X, M): """ Generate Y based on X Args: X: input covariate M: model 1-7 uni; 10-15 multi Returns: Y: outcome d: structural dimension p: the dimension of Y b: the true beta """ [n,p] = X.shape ## generate Y if M == 1: # Qian M1 d = 1 q = 1 b = np.zeros((p,d)) y = np.zeros((n,q)) index = np.arange(5) b[index,:] = 1 y[:,0] = np.exp(X @ b[:,0]) + np.random.randn(n) elif M == 2: # Qian M2 d = 2 q = 1 b = np.zeros((p,d)) y = np.zeros((n,q)) index1 = np.arange(4) #np.random.randint(p, size = 5) index2 = np.arange(p-4,p) b[index1,0] = 1 b[index2, 1] = 1 y[:,0] = np.sign(X @ b[:,0]) * np.log( np.abs( X @ b[:,1] + 5 ) ) + 0.2 * np.random.randn(n) elif M == 3: # Tan AOS Model 1 d = 1 q = 1 b = np.zeros((p,d)) y = np.zeros((n,q)) index = np.arange(5) b[index,:] = 1 y[:,0] = np.sin(X @ b[:,0]) ** 2 + X @ b[:,0] + np.random.randn(n) elif M == 4: # Tan AOS Model 2 d = 1 q = 1 b = np.zeros((p,d)) y = np.zeros((n,q)) index = np.arange(5) b[index,:] = 1 y[:,0] = 2 * np.tanh(X @ b[:,0]) + np.random.randn(n) elif M == 5: # <NAME> d = 1 q = 1 b = np.zeros((p,d)) index = np.arange(1) b[index,:] = 1 X = 1/4 * np.sqrt(0.1) * ( np.random.randn(p,n) + 1) + 1/2 * np.sqrt(0.1) * ( np.random.randn(p,n) + 2 ) + 1/4 * np.sqrt(10) * (np.random.randn(p,n) + 1) X = X.T y = np.abs( np.sin( X @ b[:,0] ) ) + 0.2 * np.random.randn(n) elif M == 6: d = 2 q = 1 b = np.zeros((p,d)) b[0,0] = 1 b[1,1] = 1 X[:,1] = X[:,0] + X[:,1] X[:,3] = ( 1+X[:,1] ) * X[:,3] y = X @ b[:,0] + 0.5 * (X @ b[:,1])** 2 elif M == 7: d = 2 q = 1 b = np.zeros((p,d)) y = np.zeros((n,q)) index1 = np.arange(1) index2 = np.arange(1,3) b[index1,0] = 1 b[index2, 1] = 1 y = (X @ b[:,0]) * (X @ b[:,1] + 1) + np.random.randn(n) elif M == 10: ## simple d = 2 q = 3 b = np.zeros((p,d)) y = np.zeros((n,q)) #index = np.random.randint(p, size = 5) index = np.arange(5) b[index[0:2], 0] = 1 b[index[2:], 1] = 1 y[:,0] = np.exp( X @ b[:,0]) + 0.5 * np.random.randn(n) y[:,1] = X @ b[:,1] + 0.1 * np.random.randn(n) y[:,2] = 0.1 * np.random.randn(n) elif M == 11: ## <NAME>en 2010 Example 3 ## complex d = 2 q = 5 covy = np.diag([1,1/2,1/2,1/3,1/4]) covy[0,1] = covy[1,0] = -1/2 L = np.linalg.cholesky(covy) Z =
np.random.randn(q,n)
numpy.random.randn
#!/usr/bin/env python2 # -*- coding: utf-8 -*- #------------------------------------------------------------------------------ # Copyright (c) 2007-2020, Acoular Development Team. #------------------------------------------------------------------------------ """ This file contains all the functionalities which are very expansive, regarding computational costs. All functionalities are optimized via NUMBA. """ import numpy as np import numba as nb cachedOption = True # if True: saves the numba func as compiled func in sub directory parallelOption = 'parallel' # if numba.guvectorize is used: 'CPU' for single threading; 'parallel' for multithreading; 'cuda' for calculating on GPU # Formerly known as 'faverage' @nb.njit([nb.complex128[:,:,:](nb.complex128[:,:,:], nb.complex128[:,:]), nb.complex64[:,:,:](nb.complex64[:,:,:], nb.complex64[:,:])], cache=cachedOption) def calcCSM(csm, SpecAllMics): """ Adds a given spectrum to the Cross-Spectral-Matrix (CSM). Here only the upper triangular matrix of the CSM is calculated. After averaging over the various ensembles, the whole CSM is created via complex conjugation transposing. This happens outside (in :class:`PowerSpectra<acoular.spectra.PowerSpectra>`). This method was called 'faverage' in acoular versions <= 16.5. Parameters ---------- csm : complex128[nFreqs, nMics, nMics] The cross spectral matrix which gets updated with the spectrum of the ensemble. SpecAllMics : complex128[nFreqs, nMics] Spectrum of the added ensemble at all Mics. Returns ------- None : as the input csm gets overwritten. """ #============================================================================== # It showed, that parallelizing brings no benefit when calling calcCSM once per # ensemble (as its done at the moment). BUT it could be whorth, taking a closer # look to parallelization, when averaging over all ensembles inside this numba # optimized function. See "vglOptimierungFAverage.py" for some information on # the various implementations and their limitations. #============================================================================== nFreqs = csm.shape[0] nMics = csm.shape[1] for cntFreq in range(nFreqs): for cntColumn in range(nMics): temp = SpecAllMics[cntFreq, cntColumn].conjugate() for cntRow in range(cntColumn + 1): # calculate upper triangular matrix (of every frequency-slice) only csm[cntFreq, cntRow, cntColumn] += temp * SpecAllMics[cntFreq, cntRow] return csm def beamformerFreq(steerVecType, boolRemovedDiagOfCSM, normFactor, inputTupleSteer, inputTupleCsm): """ Conventional beamformer in frequency domain. Use either a predefined steering vector formulation (see Sarradj 2012) or pass your own steering vector. Parameters ---------- steerVecType : (one of the following strings: 'classic' (I), 'inverse' (II), 'true level' (III), 'true location' (IV), 'custom') Either build the steering vector via the predefined formulations I - IV (see :ref:`Sarradj, 2012<Sarradj2012>`) or pass it directly. boolRemovedDiagOfCSM : bool Should the diagonal of the csm be removed? normFactor : float In here both the signalenergy loss factor (due to removal of the csm diagonal) as well as beamforming algorithm (music, capon, ...) dependent normalization factors are handled. inputTupleSteer : contains the information needed to create the steering vector. Is dependent of steerVecType. There are 2 cases: steerVecType != 'custom' : inputTupleSteer = (distGridToArrayCenter, distGridToAllMics, waveNumber) , with distGridToArrayCenter : float64[nGridpoints] Distance of all gridpoints to the center of sensor array distGridToAllMics : float64[nGridpoints, nMics] Distance of all gridpoints to all sensors of array waveNumber : float64 The wave number steerVecType == 'custom' : inputTupleSteer = steeringVector , with steeringVector : complex128[nGridPoints, nMics] The steering vector of each gridpoint for the same frequency as the CSM inputTupleCsm : contains the data of measurement as a tuple. There are 2 cases: perform standard CSM-beamformer: inputTupleCsm = csm csm : complex128[ nMics, nMics] The cross spectral matrix for one frequency perform beamformer on eigenvalue decomposition of csm: inputTupleCsm = (eigValues, eigVectors) , with eigValues : float64[nEV] nEV is the number of eigenvalues which should be taken into account. All passed eigenvalues will be evaluated. eigVectors : complex128[nMics, nEV] Eigen vectors corresponding to eigValues. All passed eigenvector slices will be evaluated. Returns ------- *Autopower spectrum beamforming map [nGridPoints] *steer normalization factor [nGridPoints]... contains the values the autopower needs to be multiplied with, in order to fullfill 'steer^H * steer = 1' as needed for functional beamforming. Some Notes on the optimization of all subroutines ------------------------------------------------- Reducing beamforming equation: Let the csm be C and the steering vector be h, than, using Linear Albegra, the conventional beamformer can be written as .. math:: B = h^H \\cdot C \\cdot h, with ^H meaning the complex conjugated transpose. When using that C is a hermitian matrix one can reduce the equation to .. math:: B = h^H \\cdot C_D \\cdot h + 2 \\cdot Real(h^H \\cdot C_U \\cdot h), where C_D and C_U are the diagonal part and upper part of C respectively. Steering vector: Theoretically the steering vector always includes the term "exp(distMicsGrid - distArrayCenterGrid)", but as the steering vector gets multplied with its complex conjugation in all beamformer routines, the constant "distArrayCenterGrid" cancels out --> In order to save operations, it is not implemented. Spectral decomposition of the CSM: In Linear Algebra the spectral decomposition of the CSM matrix would be: .. math:: CSM = \\sum_{i=1}^{nEigenvalues} \\lambda_i (v_i \\cdot v_i^H) , where lambda_i is the i-th eigenvalue and v_i is the eigenvector[nEigVal,1] belonging to lambda_i and ^H denotes the complex conjug transpose. Using this, one must not build the whole CSM (which would be time consuming), but can drag the steering vector into the sum of the spectral decomp. This saves a lot of operations. Squares: Seemingly "a * a" is slightly faster than "a**2" in numba Square of abs(): Even though "a.real**2 + a.imag**2" would have fewer operations, modern processors seem to be optimized for "a * a.conj" and are slightly faster the latter way. Both Versions are much faster than "abs(a)**2". Using Cascading Sums: When using the Spectral-Decomposition-Beamformer one could use numpys cascading sums for the scalar product "eigenVec.conj * steeringVector". BUT (at the moment) this only brings benefits in comp-time for a very small range of nMics (approx 250) --> Therefor it is not implemented here. """ boolIsEigValProb = isinstance(inputTupleCsm, tuple)# len(inputTupleCsm) > 1 # get the beamformer type (key-tuple = (isEigValProblem, formulationOfSteeringVector, RemovalOfCSMDiag)) beamformerDict = {(False, 'classic', False) : _freqBeamformer_Formulation1AkaClassic_FullCSM, (False, 'classic', True) : _freqBeamformer_Formulation1AkaClassic_CsmRemovedDiag, (False, 'inverse', False) : _freqBeamformer_Formulation2AkaInverse_FullCSM, (False, 'inverse', True) : _freqBeamformer_Formulation2AkaInverse_CsmRemovedDiag, (False, 'true level', False) : _freqBeamformer_Formulation3AkaTrueLevel_FullCSM, (False, 'true level', True) : _freqBeamformer_Formulation3AkaTrueLevel_CsmRemovedDiag, (False, 'true location', False) : _freqBeamformer_Formulation4AkaTrueLocation_FullCSM, (False, 'true location', True) : _freqBeamformer_Formulation4AkaTrueLocation_CsmRemovedDiag, (False, 'custom', False) : _freqBeamformer_SpecificSteerVec_FullCSM, (False, 'custom', True) : _freqBeamformer_SpecificSteerVec_CsmRemovedDiag, (True, 'classic', False) : _freqBeamformer_EigValProb_Formulation1AkaClassic_FullCSM, (True, 'classic', True) : _freqBeamformer_EigValProb_Formulation1AkaClassic_CsmRemovedDiag, (True, 'inverse', False) : _freqBeamformer_EigValProb_Formulation2AkaInverse_FullCSM, (True, 'inverse', True) : _freqBeamformer_EigValProb_Formulation2AkaInverse_CsmRemovedDiag, (True, 'true level', False) : _freqBeamformer_EigValProb_Formulation3AkaTrueLevel_FullCSM, (True, 'true level', True) : _freqBeamformer_EigValProb_Formulation3AkaTrueLevel_CsmRemovedDiag, (True, 'true location', False) : _freqBeamformer_EigValProb_Formulation4AkaTrueLocation_FullCSM, (True, 'true location', True) : _freqBeamformer_EigValProb_Formulation4AkaTrueLocation_CsmRemovedDiag, (True, 'custom', False) : _freqBeamformer_EigValProb_SpecificSteerVec_FullCSM, (True, 'custom', True) : _freqBeamformer_EigValProb_SpecificSteerVec_CsmRemovedDiag} coreFunc = beamformerDict[(boolIsEigValProb, steerVecType, boolRemovedDiagOfCSM)] # prepare Input if steerVecType == 'custom': # beamformer with custom steering vector steerVec = inputTupleSteer #nFreqs, nGridPoints = steerVec.shape[0], steerVec.shape[1] nGridPoints = steerVec.shape[0] else: # predefined beamformers (Formulation I - IV) distGridToArrayCenter, distGridToAllMics, waveNumber = inputTupleSteer#[0], inputTupleSteer[1], inputTupleSteer[2] if not isinstance(waveNumber, np.ndarray): waveNumber = np.array([waveNumber]) #nFreqs, nGridPoints = waveNumber.shape[0], distGridToAllMics.shape[0] nGridPoints = distGridToAllMics.shape[0] if boolIsEigValProb: eigVal, eigVec = inputTupleCsm#[0], inputTupleCsm[1] else: csm = inputTupleCsm # beamformer routine: parallelized over Gridpoints beamformOutput = np.zeros(nGridPoints, np.float64) steerNormalizeOutput = np.zeros_like(beamformOutput) result = np.zeros(nGridPoints, np.float64) normalHelp = np.zeros_like(result) if steerVecType == 'custom': # beamformer with custom steering vector if boolIsEigValProb: coreFunc(eigVal, eigVec, steerVec, normFactor, result, normalHelp) else: coreFunc(csm, steerVec, normFactor, result, normalHelp) else: # predefined beamformers (Formulation I - IV) if boolIsEigValProb: coreFunc(eigVal, eigVec, distGridToArrayCenter, distGridToAllMics, waveNumber, normFactor, result, normalHelp) else: coreFunc(csm, distGridToArrayCenter, distGridToAllMics, waveNumber, normFactor, result, normalHelp) beamformOutput = result steerNormalizeOutput = normalHelp return beamformOutput, steerNormalizeOutput #%% beamformers - steer * CSM * steer @nb.guvectorize([(nb.complex128[:,:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:])], '(m,m),(),(m),(),()->(),()', nopython=True, target=parallelOption, cache=cachedOption) def _freqBeamformer_Formulation1AkaClassic_FullCSM(csm, distGridToArrayCenter, distGridToAllMics, waveNumber, signalLossNormalization, result, normalizeSteer): # see bottom of information header of 'beamformerFreq' for information on which steps are taken, in order to gain speed improvements. nMics = csm.shape[0] steerVec = np.zeros((nMics), np.complex128) # building steering vector: in order to save some operation -> some normalization steps are applied after mat-vec-multipl. for cntMics in range(nMics): expArg = np.float32(waveNumber[0] * distGridToAllMics[cntMics]) steerVec[cntMics] = (np.cos(expArg) - 1j * np.sin(expArg)) # performing matrix-vector-multiplication (see bottom of information header of 'beamformerFreq) scalarProd = 0.0 for cntMics in range(nMics): leftVecMatrixProd = 0.0 + 0.0j for cntMics2 in range(cntMics): # calculate 'steer^H * CSM' of upper-triangular-part of csm (without diagonal) leftVecMatrixProd += csm[cntMics2, cntMics] * steerVec[cntMics2].conjugate() scalarProd += 2 * (leftVecMatrixProd * steerVec[cntMics]).real # use that csm is Hermitian (lower triangular of csm can be reduced to factor '2') scalarProd += (csm[cntMics, cntMics] * steerVec[cntMics].conjugate() * steerVec[cntMics]).real # include diagonal of csm normalizeFactor = nMics # specific normalization of steering vector formulation normalizeSteer[0] = 1.0 / nMics result[0] = scalarProd / (normalizeFactor * normalizeFactor) * signalLossNormalization[0] @nb.guvectorize([(nb.complex128[:,:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:])], '(m,m),(),(m),(),()->(),()', nopython=True, target=parallelOption, cache=cachedOption) def _freqBeamformer_Formulation1AkaClassic_CsmRemovedDiag(csm, distGridToArrayCenter, distGridToAllMics, waveNumber, signalLossNormalization, result, normalizeSteer): # see bottom of information header of 'beamformerFreq' for information on which steps are taken, in order to gain speed improvements. nMics = csm.shape[0] steerVec = np.zeros((nMics), np.complex128) # building steering vector: in order to save some operation -> some normalization steps are applied after mat-vec-multipl. for cntMics in range(nMics): expArg = np.float32(waveNumber[0] * distGridToAllMics[cntMics]) steerVec[cntMics] = (np.cos(expArg) - 1j * np.sin(expArg)) # performing matrix-vector-multiplication (see bottom of information header of 'beamformerFreq') scalarProd = 0.0 for cntMics in range(nMics): leftVecMatrixProd = 0.0 + 0.0j for cntMics2 in range(cntMics): # calculate 'steer^H * CSM' of upper-triangular-part of csm (without diagonal) leftVecMatrixProd += csm[cntMics2, cntMics] * steerVec[cntMics2].conjugate() scalarProd += 2 * (leftVecMatrixProd * steerVec[cntMics]).real # use that csm is Hermitian (lower triangular of csm can be reduced to factor '2') normalizeFactor = nMics # specific normalization of steering vector formulation normalizeSteer[0] = 1.0 / nMics result[0] = scalarProd / (normalizeFactor * normalizeFactor) * signalLossNormalization[0] @nb.guvectorize([(nb.complex128[:,:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:])], '(m,m),(),(m),(),()->(),()', nopython=True, target=parallelOption, cache=cachedOption) def _freqBeamformer_Formulation2AkaInverse_FullCSM(csm, distGridToArrayCenter, distGridToAllMics, waveNumber, signalLossNormalization, result, normalizeSteer): # see bottom of information header of 'beamformerFreq' for information on which steps are taken, in order to gain speed improvements. nMics = csm.shape[0] steerVec = np.zeros((nMics), np.complex128) # building steering vector: in order to save some operation -> some normalization steps are applied after mat-vec-multipl. helpNormalize = 0.0 for cntMics in range(nMics): helpNormalize += distGridToAllMics[cntMics] * distGridToAllMics[cntMics] expArg = np.float32(waveNumber[0] * distGridToAllMics[cntMics]) steerVec[cntMics] = (np.cos(expArg) - 1j * np.sin(expArg)) * distGridToAllMics[cntMics] # r_{t,i}-normalization is handled here # performing matrix-vector-multiplication (see bottom of information header of 'beamformerFreq') scalarProd = 0.0 for cntMics in range(nMics): leftVecMatrixProd = 0.0 + 0.0j for cntMics2 in range(cntMics): # calculate 'steer^H * CSM' of upper-triangular-part of csm (without diagonal) leftVecMatrixProd += csm[cntMics2, cntMics] * steerVec[cntMics2].conjugate() scalarProd += 2 * (leftVecMatrixProd * steerVec[cntMics]).real # use that csm is Hermitian (lower triangular of csm can be reduced to factor '2') scalarProd += (csm[cntMics, cntMics] * steerVec[cntMics].conjugate() * steerVec[cntMics]).real # include diagonal of csm normalizeFactor = nMics * distGridToArrayCenter[0] # specific normalization of steering vector formulation normalizeFactorSquared = normalizeFactor * normalizeFactor normalizeSteer[0] = helpNormalize / normalizeFactorSquared result[0] = scalarProd / normalizeFactorSquared * signalLossNormalization[0] @nb.guvectorize([(nb.complex128[:,:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:])], '(m,m),(),(m),(),()->(),()', nopython=True, target=parallelOption, cache=cachedOption) def _freqBeamformer_Formulation2AkaInverse_CsmRemovedDiag(csm, distGridToArrayCenter, distGridToAllMics, waveNumber, signalLossNormalization, result, normalizeSteer): # see bottom of information header of 'beamformerFreq' for information on which steps are taken, in order to gain speed improvements. nMics = csm.shape[0] steerVec = np.zeros((nMics), np.complex128) # building steering vector: in order to save some operation -> some normalization steps are applied after mat-vec-multipl. helpNormalize = 0.0 for cntMics in range(nMics): helpNormalize += distGridToAllMics[cntMics] * distGridToAllMics[cntMics] expArg = np.float32(waveNumber[0] * distGridToAllMics[cntMics]) steerVec[cntMics] = (np.cos(expArg) - 1j * np.sin(expArg)) * distGridToAllMics[cntMics] # r_{t,i}-normalization is handled here # performing matrix-vector-multiplication (see bottom of information header of 'beamformerFreq') scalarProd = 0.0 for cntMics in range(nMics): leftVecMatrixProd = 0.0 + 0.0j for cntMics2 in range(cntMics): # calculate 'steer^H * CSM' of upper-triangular-part of csm (without diagonal) leftVecMatrixProd += csm[cntMics2, cntMics] * steerVec[cntMics2].conjugate() scalarProd += 2 * (leftVecMatrixProd * steerVec[cntMics]).real # use that csm is Hermitian (lower triangular of csm can be reduced to factor '2') normalizeFactor = nMics * distGridToArrayCenter[0] # specific normalization of steering vector formulation normalizeFactorSquared = normalizeFactor * normalizeFactor normalizeSteer[0] = helpNormalize / normalizeFactorSquared result[0] = scalarProd / normalizeFactorSquared * signalLossNormalization[0] @nb.guvectorize([(nb.complex128[:,:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:])], '(m,m),(),(m),(),()->(),()', nopython=True, target=parallelOption, cache=cachedOption) def _freqBeamformer_Formulation3AkaTrueLevel_FullCSM(csm, distGridToArrayCenter, distGridToAllMics, waveNumber, signalLossNormalization, result, normalizeSteer): # see bottom of information header of 'beamformerFreq' for information on which steps are taken, in order to gain speed improvements. nMics = csm.shape[0] steerVec = np.zeros((nMics), np.complex128) # building steering vector: in order to save some operation -> some normalization steps are applied after mat-vec-multipl. helpNormalize = 0.0 for cntMics in range(nMics): helpNormalize += 1.0 / (distGridToAllMics[cntMics] * distGridToAllMics[cntMics]) expArg = np.float32(waveNumber[0] * distGridToAllMics[cntMics]) steerVec[cntMics] = (np.cos(expArg) - 1j * np.sin(expArg)) / distGridToAllMics[cntMics] # r_{t,i}-normalization is handled here # performing matrix-vector-multiplication (see bottom of information header of 'beamformerFreq') scalarProd = 0.0 for cntMics in range(nMics): leftVecMatrixProd = 0.0 + 0.0j for cntMics2 in range(cntMics): # calculate 'steer^H * CSM' of upper-triangular-part of csm (without diagonal) leftVecMatrixProd += csm[cntMics2, cntMics] * steerVec[cntMics2].conjugate() scalarProd += 2 * (leftVecMatrixProd * steerVec[cntMics]).real # use that csm is Hermitian (lower triangular of csm can be reduced to factor '2') scalarProd += (csm[cntMics, cntMics] * steerVec[cntMics].conjugate() * steerVec[cntMics]).real # include diagonal of csm normalizeFactor = distGridToArrayCenter[0] * helpNormalize # specific normalization of steering vector formulation normalizeSteer[0] = 1.0 / (distGridToArrayCenter[0] * distGridToArrayCenter[0]) / helpNormalize result[0] = scalarProd / (normalizeFactor * normalizeFactor) * signalLossNormalization[0] @nb.guvectorize([(nb.complex128[:,:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:], nb.float64[:])], '(m,m),(),(m),(),()->(),()', nopython=True, target=parallelOption, cache=cachedOption) def _freqBeamformer_Formulation3AkaTrueLevel_CsmRemovedDiag(csm, distGridToArrayCenter, distGridToAllMics, waveNumber, signalLossNormalization, result, normalizeSteer): # see bottom of information header of 'beamformerFreq' for information on which steps are taken, in order to gain speed improvements. nMics = csm.shape[0] steerVec = np.zeros((nMics), np.complex128) # building steering vector: in order to save some operation -> some normalization steps are applied after mat-vec-multipl. helpNormalize = 0.0 for cntMics in range(nMics): helpNormalize += 1.0 / (distGridToAllMics[cntMics] * distGridToAllMics[cntMics]) expArg = np.float32(waveNumber[0] * distGridToAllMics[cntMics]) steerVec[cntMics] = (
np.cos(expArg)
numpy.cos
# Copyright (c) Open-MMLab. All rights reserved. import cv2 import numpy as np def _scale_size(size, scale): """Rescale a size by a ratio. Args: size (tuple[int]): (w, h). scale (float): Scaling factor. Returns: tuple[int]: scaled size. """ w, h = size return int(w * float(scale) + 0.5), int(h * float(scale) + 0.5) interp_codes = { 'nearest': cv2.INTER_NEAREST, 'bilinear': cv2.INTER_LINEAR, 'bicubic': cv2.INTER_CUBIC, 'area': cv2.INTER_AREA, 'lanczos': cv2.INTER_LANCZOS4 } def imresize(img, size, return_scale=False, interpolation='bilinear', out=None): """Resize image to a given size. Args: img (ndarray): The input image. size (tuple[int]): Target size (w, h). return_scale (bool): Whether to return `w_scale` and `h_scale`. interpolation (str): Interpolation method, accepted values are "nearest", "bilinear", "bicubic", "area", "lanczos". out (ndarray): The output destination. Returns: tuple | ndarray: (`resized_img`, `w_scale`, `h_scale`) or `resized_img`. """ h, w = img.shape[:2] resized_img = cv2.resize( img, size, dst=out, interpolation=interp_codes[interpolation]) if not return_scale: return resized_img else: w_scale = size[0] / w h_scale = size[1] / h return resized_img, w_scale, h_scale def imresize_like(img, dst_img, return_scale=False, interpolation='bilinear'): """Resize image to the same size of a given image. Args: img (ndarray): The input image. dst_img (ndarray): The target image. return_scale (bool): Whether to return `w_scale` and `h_scale`. interpolation (str): Same as :func:`resize`. Returns: tuple or ndarray: (`resized_img`, `w_scale`, `h_scale`) or `resized_img`. """ h, w = dst_img.shape[:2] return imresize(img, (w, h), return_scale, interpolation) def rescale_size(old_size, scale, return_scale=False): """Calculate the new size to be rescaled to. Args: old_size (tuple[int]): The old size (w, h) of image. scale (float | tuple[int]): The scaling factor or maximum size. If it is a float number, then the image will be rescaled by this factor, else if it is a tuple of 2 integers, then the image will be rescaled as large as possible within the scale. return_scale (bool): Whether to return the scaling factor besides the rescaled image size. Returns: tuple[int]: The new rescaled image size. """ w, h = old_size if isinstance(scale, (float, int)): if scale <= 0: raise ValueError(f'Invalid scale {scale}, must be positive.') scale_factor = scale elif isinstance(scale, tuple): max_long_edge = max(scale) max_short_edge = min(scale) scale_factor = min(max_long_edge / max(h, w), max_short_edge / min(h, w)) else: raise TypeError( f'Scale must be a number or tuple of int, but got {type(scale)}') new_size = _scale_size((w, h), scale_factor) if return_scale: return new_size, scale_factor else: return new_size def imrescale(img, scale, return_scale=False, interpolation='bilinear'): """Resize image while keeping the aspect ratio. Args: img (ndarray): The input image. scale (float | tuple[int]): The scaling factor or maximum size. If it is a float number, then the image will be rescaled by this factor, else if it is a tuple of 2 integers, then the image will be rescaled as large as possible within the scale. return_scale (bool): Whether to return the scaling factor besides the rescaled image. interpolation (str): Same as :func:`resize`. Returns: ndarray: The rescaled image. """ h, w = img.shape[:2] new_size, scale_factor = rescale_size((w, h), scale, return_scale=True) rescaled_img = imresize(img, new_size, interpolation=interpolation) if return_scale: return rescaled_img, scale_factor else: return rescaled_img def imflip(img, direction='horizontal'): """Flip an image horizontally or vertically. Args: img (ndarray): Image to be flipped. direction (str): The flip direction, either "horizontal" or "vertical". Returns: ndarray: The flipped image. """ assert direction in ['horizontal', 'vertical'] if direction == 'horizontal': return
np.flip(img, axis=1)
numpy.flip
import numpy as np import matplotlib.pyplot as plt ## def zenith_intersections(img_size, A, B, pp = None): horizont_direction = A[0:2] - B[0:2] img_h = img_size[0] img_w = img_size[1] if pp is None: mid_point = [img_w/2, img_h/2] else: mid_point = pp horizont_direction = horizont_direction/np.linalg.norm(horizont_direction) zenith_line = [horizont_direction[0],horizont_direction[1], -mid_point[0]*horizont_direction[0] - mid_point[1]*horizont_direction[1]] C = np.cross(zenith_line, np.array([0, 1, 0])) D = np.cross(zenith_line, np.array([0, 1, -img_h])) C = C / C[2] D = D / D[2] return C, D ## def draw_line_segments_array(coords, color='red'): x0, y0, x1, y1 = np.split(coords, 4, axis=1) X = np.concatenate([x0,x1], axis=1) Y = np.concatenate([y0,y1], axis=1) for x,y in zip(X,Y): plt.plot(x,y,"-",c=color, zorder=1, lw=2) ## def draw_colored_line_segments_array(coords, group): colors = ['red','green','blue','yellow','orange','pink','violet'] x0, y0, x1, y1 = np.split(coords, 4, axis=1) #X = np.concatenate([x0, x1], axis=1) #Y = np.concatenate([y0, y1], axis=1) for i in range(group.max() + 1): X = np.concatenate([x0[group==i], x1[group==i]], axis=1) Y = np.concatenate([y0[group==i], y1[group==i]], axis=1) for x,y in zip(X,Y): plt.plot(x, y, "-", c=colors[i], zorder=1, lw=1) ## def draw_line_to_zenith(pp, zenith_point, img_height): zenith_line = np.cross(np.hstack([pp,1]), zenith_point) C = np.cross(zenith_line, np.array([0, 1, 0])) D = np.cross(zenith_line, np.array([0, 1, -img_height])) C = C / C[2] D = D / D[2] plt.plot(np.array([C[0], D[0]]), np.array([C[1],D[1]]), ':', color='blue') return ## def draw_line_as_horizon(horizon_line, img_width): A = np.cross(horizon_line, np.array([1, 0, 0])) B = np.cross(horizon_line, np.array([1, 0, -img_width])) A = A / A[2] B = B / B[2] plt.plot(np.array([A[0], B[0]]), np.array([A[1], B[1]]), ':', color='blue') return ## def draw_cross(img_data, A = None, B = None, horizon_line = None, zenith_point = None, lc = 'red'): img_h, img_w = img_data['shape'] pp = img_data['pp'] if horizon_line is not None: A = np.cross(horizon_line, np.array([1, 0, 0])) B = np.cross(horizon_line, np.array([1, 0, -img_w])) A = A / A[2] B = B / B[2] if zenith_point is None: horizont_direction = A[0:2] - B[0:2] horizont_direction = horizont_direction / np.linalg.norm(horizont_direction) zenith_line = [horizont_direction[0], horizont_direction[1], -pp[0] * horizont_direction[0] - pp[1] * horizont_direction[1]] else: zenith_line = np.cross(np.hstack([pp, 1]), zenith_point) C = np.cross(zenith_line, np.array([0, 1, 0])) D = np.cross(zenith_line, np.array([0, 1, -img_h])) C = C / C[2] D = D / D[2] plt.plot(np.array([A[0], B[0]]), np.array([A[1], B[1]]), '--', color=lc) plt.plot(
np.array([C[0], D[0]])
numpy.array
""" Routines related to flexure, air2vac, etc. """ import inspect import numpy as np import copy from matplotlib import pyplot as plt from matplotlib import gridspec from scipy import interpolate from astropy import units from astropy.coordinates import solar_system, ICRS from astropy.coordinates import UnitSphericalRepresentation, CartesianRepresentation from astropy.time import Time from linetools.spectra import xspectrum1d from pypeit import msgs from pypeit.core import arc from pypeit.core import qa from pypeit import utils from pypeit import debugger def load_sky_spectrum(sky_file): """ Load a sky spectrum into an XSpectrum1D object Args: sky_file: str Returns: sky_spec: XSpectrum1D spectrum """ sky_spec = xspectrum1d.XSpectrum1D.from_file(sky_file) return sky_spec def flex_shift(obj_skyspec, arx_skyspec, mxshft=20): """ Calculate shift between object sky spectrum and archive sky spectrum Parameters ---------- obj_skyspec arx_skyspec Returns ------- flex_dict: dict Contains flexure info """ flex_dict = {} # Determine the brightest emission lines msgs.warn("If we use Paranal, cut down on wavelength early on") arx_amp, arx_amp_cont, arx_cent, arx_wid, _, arx_w, arx_yprep, nsig = arc.detect_lines(arx_skyspec.flux.value) obj_amp, obj_amp_cont, obj_cent, obj_wid, _, obj_w, obj_yprep, nsig_obj= arc.detect_lines(obj_skyspec.flux.value) # Keep only 5 brightest amplitude lines (xxx_keep is array of # indices within arx_w of the 5 brightest) arx_keep = np.argsort(arx_amp[arx_w])[-5:] obj_keep = np.argsort(obj_amp[obj_w])[-5:] # Calculate wavelength (Angstrom per pixel) arx_disp = np.append(arx_skyspec.wavelength.value[1]-arx_skyspec.wavelength.value[0], arx_skyspec.wavelength.value[1:]-arx_skyspec.wavelength.value[:-1]) #arx_disp = (np.amax(arx_sky.wavelength.value)-np.amin(arx_sky.wavelength.value))/arx_sky.wavelength.size obj_disp = np.append(obj_skyspec.wavelength.value[1]-obj_skyspec.wavelength.value[0], obj_skyspec.wavelength.value[1:]-obj_skyspec.wavelength.value[:-1]) #obj_disp = (np.amax(obj_sky.wavelength.value)-np.amin(obj_sky.wavelength.value))/obj_sky.wavelength.size # Calculate resolution (lambda/delta lambda_FWHM)..maybe don't need # this? can just use sigmas arx_idx = (arx_cent+0.5).astype(np.int)[arx_w][arx_keep] # The +0.5 is for rounding arx_res = arx_skyspec.wavelength.value[arx_idx]/\ (arx_disp[arx_idx]*(2*np.sqrt(2*np.log(2)))*arx_wid[arx_w][arx_keep]) obj_idx = (obj_cent+0.5).astype(np.int)[obj_w][obj_keep] # The +0.5 is for rounding obj_res = obj_skyspec.wavelength.value[obj_idx]/ \ (obj_disp[obj_idx]*(2*np.sqrt(2*np.log(2)))*obj_wid[obj_w][obj_keep]) #obj_res = (obj_sky.wavelength.value[0]+(obj_disp*obj_cent[obj_w][obj_keep]))/( # obj_disp*(2*np.sqrt(2*np.log(2)))*obj_wid[obj_w][obj_keep]) if not np.all(np.isfinite(obj_res)): msgs.warn('Failed to measure the resolution of the object spectrum, likely due to error ' 'in the wavelength image.') return None msgs.info("Resolution of Archive={0} and Observation={1}".format(np.median(arx_res), np.median(obj_res))) # Determine sigma of gaussian for smoothing arx_sig2 = np.power(arx_disp[arx_idx]*arx_wid[arx_w][arx_keep], 2) obj_sig2 = np.power(obj_disp[obj_idx]*obj_wid[obj_w][obj_keep], 2) arx_med_sig2 = np.median(arx_sig2) obj_med_sig2 = np.median(obj_sig2) if obj_med_sig2 >= arx_med_sig2: smooth_sig = np.sqrt(obj_med_sig2-arx_med_sig2) # Ang smooth_sig_pix = smooth_sig / np.median(arx_disp[arx_idx]) arx_skyspec = arx_skyspec.gauss_smooth(smooth_sig_pix*2*np.sqrt(2*np.log(2))) else: msgs.warn("Prefer archival sky spectrum to have higher resolution") smooth_sig_pix = 0. msgs.warn("New Sky has higher resolution than Archive. Not smoothing") #smooth_sig = np.sqrt(arx_med_sig**2-obj_med_sig**2) #Determine region of wavelength overlap min_wave = max(np.amin(arx_skyspec.wavelength.value), np.amin(obj_skyspec.wavelength.value)) max_wave = min(np.amax(arx_skyspec.wavelength.value), np.amax(obj_skyspec.wavelength.value)) #Smooth higher resolution spectrum by smooth_sig (flux is conserved!) # if np.median(obj_res) >= np.median(arx_res): # msgs.warn("New Sky has higher resolution than Archive. Not smoothing") #obj_sky_newflux = ndimage.gaussian_filter(obj_sky.flux, smooth_sig) # else: #tmp = ndimage.gaussian_filter(arx_sky.flux, smooth_sig) # arx_skyspec = arx_skyspec.gauss_smooth(smooth_sig_pix*2*np.sqrt(2*np.log(2))) #arx_sky.flux = ndimage.gaussian_filter(arx_sky.flux, smooth_sig) # Define wavelengths of overlapping spectra keep_idx = np.where((obj_skyspec.wavelength.value>=min_wave) & (obj_skyspec.wavelength.value<=max_wave))[0] #keep_wave = [i for i in obj_sky.wavelength.value if i>=min_wave if i<=max_wave] #Rebin both spectra onto overlapped wavelength range if len(keep_idx) <= 50: msgs.warn("Not enough overlap between sky spectra") return None else: #rebin onto object ALWAYS keep_wave = obj_skyspec.wavelength[keep_idx] arx_skyspec = arx_skyspec.rebin(keep_wave) obj_skyspec = obj_skyspec.rebin(keep_wave) # Trim edges (rebinning is junk there) arx_skyspec.data['flux'][0,:2] = 0. arx_skyspec.data['flux'][0,-2:] = 0. obj_skyspec.data['flux'][0,:2] = 0. obj_skyspec.data['flux'][0,-2:] = 0. # Normalize spectra to unit average sky count norm = np.sum(obj_skyspec.flux.value)/obj_skyspec.npix obj_skyspec.flux = obj_skyspec.flux / norm norm2 = np.sum(arx_skyspec.flux.value)/arx_skyspec.npix arx_skyspec.flux = arx_skyspec.flux / norm2 if (norm < 0.): msgs.warn("Bad normalization of object in flexure algorithm") msgs.warn("Will try the median") norm = np.median(obj_skyspec.flux.value) if (norm < 0.): msgs.warn("Improper sky spectrum for flexure. Is it too faint??") return None if (norm2 < 0.): msgs.warn('Bad normalization of archive in flexure. You are probably using wavelengths ' 'well beyond the archive.') return None # Deal with bad pixels msgs.work("Need to mask bad pixels") # Deal with underlying continuum msgs.work("Consider taking median first [5 pixel]") everyn = obj_skyspec.npix // 20 bspline_par = dict(everyn=everyn) mask, ct = utils.robust_polyfit(obj_skyspec.wavelength.value, obj_skyspec.flux.value, 3, function='bspline', sigma=3., bspline_par=bspline_par) obj_sky_cont = utils.func_val(ct, obj_skyspec.wavelength.value, 'bspline') obj_sky_flux = obj_skyspec.flux.value - obj_sky_cont mask, ct_arx = utils.robust_polyfit(arx_skyspec.wavelength.value, arx_skyspec.flux.value, 3, function='bspline', sigma=3., bspline_par=bspline_par) arx_sky_cont = utils.func_val(ct_arx, arx_skyspec.wavelength.value, 'bspline') arx_sky_flux = arx_skyspec.flux.value - arx_sky_cont # Consider sharpness filtering (e.g. LowRedux) msgs.work("Consider taking median first [5 pixel]") #Cross correlation of spectra #corr = np.correlate(arx_skyspec.flux, obj_skyspec.flux, "same") corr = np.correlate(arx_sky_flux, obj_sky_flux, "same") #Create array around the max of the correlation function for fitting for subpixel max # Restrict to pixels within maxshift of zero lag lag0 = corr.size//2 #mxshft = settings.argflag['reduce']['flexure']['maxshift'] max_corr = np.argmax(corr[lag0-mxshft:lag0+mxshft]) + lag0-mxshft subpix_grid = np.linspace(max_corr-3., max_corr+3., 7.) #Fit a 2-degree polynomial to peak of correlation function fit = utils.func_fit(subpix_grid, corr[subpix_grid.astype(np.int)], 'polynomial', 2) max_fit = -0.5*fit[1]/fit[2] #Calculate and apply shift in wavelength shift = float(max_fit)-lag0 msgs.info("Flexure correction of {:g} pixels".format(shift)) #model = (fit[2]*(subpix_grid**2.))+(fit[1]*subpix_grid)+fit[0] flex_dict = dict(polyfit=fit, shift=shift, subpix=subpix_grid, corr=corr[subpix_grid.astype(np.int)], sky_spec=obj_skyspec, arx_spec=arx_skyspec, corr_cen=corr.size/2, smooth=smooth_sig_pix) # Return return flex_dict ''' def flexure_slit(): """Correct wavelength down slit center for flexure Parameters: ---------- slf : det : int """ debugger.set_trace() # THIS METHOD IS NOT BEING USED THESE DAYS # Load Archive skyspec_fil, arx_sky = flexure_archive() # Extract censpec_wv = arextract.boxcar_cen(slf, det, slf._mswave[det-1]) censpec_fx = arextract.boxcar_cen(slf, det, slf._bgframe[det-1]) cen_sky = xspectrum1d.XSpectrum1D.from_tuple((censpec_wv, censpec_fx)) # Find shift fdict = flex_shift(slf, det, cen_sky, arx_sky) msgs.work("Flexure shift = {:g} down slit center".format(fdict['shift'])) # Refit # What if xfit shifts outside of 0-1? xshift = fdict['shift']/(slf._msarc[det-1].shape[0]-1) mask, fit = utils.robust_polyfit(np.array(slf._wvcalib[det-1]['xfit'])+xshift, np.array(slf._wvcalib[det-1]['yfit']), len(slf._wvcalib[det-1]['fitc']), function=slf._wvcalib[det-1]['function'], sigma=slf._wvcalib[det-1]['nrej'], minv=slf._wvcalib[det-1]['fmin'], maxv=slf._wvcalib[det-1]['fmax']) # Update wvcalib slf._wvcalib[det-1]['shift'] = fdict['shift'] # pixels slf._wvcalib[det-1]['fitc'] = fit msgs.work("Add another QA for wavelengths?") # Update mswave wv_calib = slf._wvcalib[det-1] slf._mswave[det-1] = utils.func_val(wv_calib['fitc'], slf._tilts[det-1], wv_calib['function'], minv=wv_calib['fmin'], maxv=wv_calib['fmax']) # Write to Masters? Not for now # For QA (kludgy..) censpec_wv = arextract.boxcar_cen(slf, det, slf._mswave[det-1]) fdict['sky_spec'] = xspectrum1d.XSpectrum1D.from_tuple((censpec_wv, censpec_fx)) flex_dict = dict(polyfit=[], shift=[], subpix=[], corr=[], corr_cen=[], spec_file=skyspec_fil, smooth=[], arx_spec=[], sky_spec=[]) #debugger.set_trace() #debugger.xplot(censpec_wv, censpec_fx, xtwo=fdict['arx_spec'].wavelength, ytwo=fdict['arx_spec'].flux*50) for key in ['polyfit', 'shift', 'subpix', 'corr', 'corr_cen', 'smooth', 'sky_spec', 'arx_spec']: flex_dict[key].append(fdict[key]) return flex_dict ''' # TODO I don't see why maskslits is needed in these routine, since if the slits are masked in arms, they won't be extracted def flexure_obj(specobjs, maskslits, method, sky_file, mxshft=None): """Correct wavelengths for flexure, object by object Parameters: ---------- method : str 'boxcar' -- Recommneded 'slitpix' -- sky_file: str Returns: ---------- flex_list: list list of dicts containing flexure results Aligned with specobjs Filled with a basically empty dict if the slit is skipped or there is no object """ sv_fdict = None msgs.work("Consider doing 2 passes in flexure as in LowRedux") # Load Archive sky_spectrum = load_sky_spectrum(sky_file) nslits = len(maskslits) gdslits = np.where(~maskslits)[0] # Loop on objects flex_list = [] # Slit/objects to come back to return_later_sobjs = [] # Loop over slits, and then over objects here for slit in range(nslits): msgs.info("Working on flexure in slit (if an object was detected): {:d}".format(slit)) indx = specobjs.slitid == slit this_specobjs = specobjs[indx] # Reset flex_dict = dict(polyfit=[], shift=[], subpix=[], corr=[], corr_cen=[], spec_file=sky_file, smooth=[], arx_spec=[], sky_spec=[]) # If no objects on this slit append an empty dictionary if slit not in gdslits: flex_list.append(flex_dict.copy()) continue for ss, specobj in enumerate(this_specobjs): if specobj is None: continue msgs.info("Working on flexure for object # {:d}".format(specobj.objid) + "in slit # {:d}".format(specobj.slitid)) # Using boxcar if method in ['boxcar', 'slitcen']: sky_wave = specobj.boxcar['WAVE'] #.to('AA').value sky_flux = specobj.boxcar['COUNTS_SKY'] else: msgs.error("Not ready for this flexure method: {}".format(method)) # Generate 1D spectrum for object obj_sky = xspectrum1d.XSpectrum1D.from_tuple((sky_wave, sky_flux)) # Calculate the shift fdict = flex_shift(obj_sky, sky_spectrum, mxshft=mxshft) punt = False if fdict is None: msgs.warn("Flexure shift calculation failed for this spectrum.") if sv_fdict is not None: msgs.warn("Will used saved estimate from a previous slit/object") fdict = copy.deepcopy(sv_fdict) else: # One does not exist yet # Save it for later return_later_sobjs.append([slit, ss]) punt = True else: sv_fdict = copy.deepcopy(fdict) # Punt? if punt: break # Interpolate new_sky = specobj.flexure_interp(sky_wave, fdict) # Update dict for key in ['polyfit', 'shift', 'subpix', 'corr', 'corr_cen', 'smooth', 'arx_spec']: flex_dict[key].append(fdict[key]) flex_dict['sky_spec'].append(new_sky) flex_list.append(flex_dict.copy()) # Do we need to go back? for items in return_later_sobjs: if sv_fdict is None: msgs.info("No flexure corrections could be made") break # Setup slit, ss = items flex_dict = flex_list[slit] specobj = specobjs[ss] sky_wave = specobj.boxcar['WAVE'] #.to('AA').value # Copy me fdict = copy.deepcopy(sv_fdict) # Interpolate new_sky = specobj.flexure_interp(sky_wave, fdict) # Update dict for key in ['polyfit', 'shift', 'subpix', 'corr', 'corr_cen', 'smooth', 'arx_spec']: flex_dict[key].append(fdict[key]) flex_dict['sky_spec'].append(new_sky) return flex_list # TODO I don't see why maskslits is needed in these routine, since if the slits are masked in arms, they won't be extracted def flexure_obj_oldbuggyversion(specobjs, maskslits, method, sky_spectrum, sky_file=None, mxshft=None): """Correct wavelengths for flexure, object by object Parameters: ---------- method : str 'boxcar' -- Recommneded 'slitpix' -- Returns: ---------- flex_list: list list of dicts containing flexure results Aligned with specobjs Filled with a basically empty dict if the slit is skipped or there is no object """ msgs.work("Consider doing 2 passes in flexure as in LowRedux") # Load Archive # skyspec_fil, arx_sky = flexure_archive(spectrograph=spectrograph, skyspec_fil=skyspec_fil) # Loop on objects flex_list = [] gdslits = np.where(~maskslits)[0] for sl in range(len(specobjs)): # Reset flex_dict = dict(polyfit=[], shift=[], subpix=[], corr=[], corr_cen=[], spec_file=sky_file, smooth=[], arx_spec=[], sky_spec=[]) if sl not in gdslits: flex_list.append(flex_dict.copy()) continue msgs.info("Working on flexure in slit (if an object was detected): {:d}".format(sl)) for specobj in specobjs[sl]: # for convenience if specobj is None: continue # Using boxcar if method in ['boxcar', 'slitcen']: sky_wave = specobj.boxcar['WAVE'] #.to('AA').value sky_flux = specobj.boxcar['COUNTS_SKY'] else: msgs.error("Not ready for this flexure method: {}".format(method)) # Generate 1D spectrum for object obj_sky = xspectrum1d.XSpectrum1D.from_tuple((sky_wave, sky_flux)) # Calculate the shift fdict = flex_shift(obj_sky, sky_spectrum, mxshft=mxshft) # Simple interpolation to apply npix = len(sky_wave) x = np.linspace(0., 1., npix) # Apply for attr in ['boxcar', 'optimal']: if not hasattr(specobj, attr): continue if 'WAVE' in getattr(specobj, attr).keys(): msgs.info("Applying flexure correction to {0:s} extraction for object:".format(attr) + msgs.newline() + "{0:s}".format(str(specobj))) f = interpolate.interp1d(x, sky_wave, bounds_error=False, fill_value="extrapolate") getattr(specobj, attr)['WAVE'] = f(x+fdict['shift']/(npix-1))*units.AA # Shift sky spec too cut_sky = fdict['sky_spec'] x = np.linspace(0., 1., cut_sky.npix) f = interpolate.interp1d(x, cut_sky.wavelength.value, bounds_error=False, fill_value="extrapolate") twave = f(x + fdict['shift']/(cut_sky.npix-1))*units.AA new_sky = xspectrum1d.XSpectrum1D.from_tuple((twave, cut_sky.flux)) # Update dict for key in ['polyfit', 'shift', 'subpix', 'corr', 'corr_cen', 'smooth', 'arx_spec']: flex_dict[key].append(fdict[key]) flex_dict['sky_spec'].append(new_sky) flex_list.append(flex_dict.copy()) return flex_list def geomotion_calculate(radec, time, longitude, latitude, elevation, refframe): """ Correct the wavelength calibration solution to the desired reference frame """ # Time loc = (longitude * units.deg, latitude * units.deg, elevation * units.m,) obstime = Time(time.value, format=time.format, scale='utc', location=loc) return geomotion_velocity(obstime, radec, frame=refframe) def geomotion_correct(specObjs, radec, time, maskslits, longitude, latitude, elevation, refframe): """ Correct the wavelength of every pixel to a barycentric/heliocentric frame. Args: specObjs (SpecObjs object): radec (astropy.coordiantes.SkyCoord): time (:obj:`astropy.time.Time`): maskslits fitstbl : Table/PypeItMetaData Containing the properties of every fits file longitude (float): deg latitude (float): deg elevation (float): m refframe (str): Returns: Two objects are returned:: - float: - The velocity correction that should be applied to the wavelength array. - float: The relativistic velocity correction that should be multiplied by the wavelength array to convert each wavelength into the user-specified reference frame. """ # Calculate vel = geomotion_calculate(radec, time, longitude, latitude, elevation, refframe) vel_corr = np.sqrt((1. + vel/299792.458) / (1. - vel/299792.458)) gdslits = np.where(~maskslits)[0] # Loop on slits to apply for slit in gdslits: indx = (specObjs.slitid-1) == slit this_specobjs = specObjs[indx] # Loop on objects for specobj in this_specobjs: if specobj is None: continue # Loop on extraction methods for attr in ['boxcar', 'optimal']: if not hasattr(specobj, attr): continue if 'WAVE' in getattr(specobj, attr).keys(): msgs.info('Applying {0} correction to '.format(refframe) + '{0} extraction for object:'.format(attr) + msgs.newline() + "{0}".format(str(specobj))) getattr(specobj, attr)['WAVE'] = getattr(specobj, attr)['WAVE'] * vel_corr # Return return vel, vel_corr # Mainly for debugging def geomotion_velocity(time, skycoord, frame="heliocentric"): """ Perform a barycentric/heliocentric velocity correction. For the correciton, this routine uses the ephemeris: astropy.coordinates.solar_system_ephemeris.set For more information see `~astropy.coordinates.solar_system_ephemeris`. Parameters ---------- time : astropy.time.Time The time of observation, including the location. skycoord: astropy.coordinates.SkyCoord The RA and DEC of the pointing, as a SkyCoord quantity. frame : str The reference frame that should be used for the calculation. Returns ------- vcorr : float The velocity correction that should be added to the original velocity. """ # Check that the RA/DEC of the object is ICRS compatible if not skycoord.is_transformable_to(ICRS()): msgs.error("Cannot transform RA/DEC of object to the ICRS") # Calculate ICRS position and velocity of Earth's geocenter ep, ev = solar_system.get_body_barycentric_posvel('earth', time) # Calculate GCRS position and velocity of observatory op, ov = time.location.get_gcrs_posvel(time) # ICRS and GCRS are axes-aligned. Can add the velocities velocity = ev + ov if frame == "heliocentric": # ICRS position and velocity of the Sun sp, sv = solar_system.get_body_barycentric_posvel('sun', time) velocity += sv # Get unit ICRS vector in direction of SkyCoord sc_cartesian = skycoord.icrs.represent_as(UnitSphericalRepresentation).represent_as(CartesianRepresentation) return sc_cartesian.dot(velocity).to(units.km / units.s).value def airtovac(wave): """ Convert air-based wavelengths to vacuum Parameters: ---------- wave: Quantity array Wavelengths Returns: ---------- wave: Quantity array Wavelength array corrected to vacuum wavelengths """ # Convert to AA wave = wave.to(units.AA) wavelength = wave.value # Standard conversion format sigma_sq = (1.e4/wavelength)**2. #wavenumber squared factor = 1 + (5.792105e-2/(238.0185-sigma_sq)) + (1.67918e-3/(57.362-sigma_sq)) factor = factor*(wavelength>=2000.) + 1.*(wavelength<2000.) #only modify above 2000A # Convert wavelength = wavelength*factor # Units new_wave = wavelength*units.AA new_wave.to(wave.unit) return new_wave def vactoair(wave): """Convert to air-based wavelengths from vacuum Parameters: ---------- wave: Quantity array Wavelengths Returns: ---------- wave: Quantity array Wavelength array corrected to air """ # Convert to AA wave = wave.to(units.AA) wavelength = wave.value # Standard conversion format sigma_sq = (1.e4/wavelength)**2. #wavenumber squared factor = 1 + (5.792105e-2/(238.0185-sigma_sq)) + (1.67918e-3/(57.362-sigma_sq)) factor = factor*(wavelength>=2000.) + 1.*(wavelength<2000.) #only modify above 2000A # Convert wavelength = wavelength/factor new_wave = wavelength*units.AA new_wave.to(wave.unit) return new_wave # TODO I don't see why maskslits is needed in these routine, since if the slits are masked in arms, they won't be extracted # AND THIS IS WHY THE CODE IS CRASHING def flexure_qa(specobjs, maskslits, basename, det, flex_list, slit_cen=False, out_dir=None): """ Args: specobjs: maskslits (np.ndarray): basename (str): det (int): flex_list (list): slit_cen: out_dir: """ plt.rcdefaults() plt.rcParams['font.family']= 'times new roman' # Grab the named of the method method = inspect.stack()[0][3] # gdslits = np.where(np.invert(maskslits))[0] # Loop over slits, and then over objects here for slit in gdslits: indx = specobjs.slitid == slit this_specobjs = specobjs[indx] this_flex_dict = flex_list[slit] # Setup if slit_cen: nobj = 1 ncol = 1 else: nobj = np.sum(indx) ncol = min(3, nobj) # if nobj == 0: continue nrow = nobj // ncol + ((nobj % ncol) > 0) # Outfile, one QA file per slit outfile = qa.set_qa_filename(basename, method + '_corr', det=det,slit=(slit + 1), out_dir=out_dir) plt.figure(figsize=(8, 5.0)) plt.clf() gs = gridspec.GridSpec(nrow, ncol) for iobj, specobj in enumerate(this_specobjs): if specobj is None: continue # Correlation QA ax = plt.subplot(gs[iobj//ncol, iobj % ncol]) # Fit fit = this_flex_dict['polyfit'][iobj] xval = np.linspace(-10., 10, 100) + this_flex_dict['corr_cen'][iobj] #+ flex_dict['shift'][o] #model = (fit[2]*(xval**2.))+(fit[1]*xval)+fit[0] model = utils.func_val(fit, xval, 'polynomial') mxmod = np.max(model) ylim = [np.min(model/mxmod), 1.3] ax.plot(xval-this_flex_dict['corr_cen'][iobj], model/mxmod, 'k-') # Measurements ax.scatter(this_flex_dict['subpix'][iobj]-this_flex_dict['corr_cen'][iobj], this_flex_dict['corr'][iobj]/mxmod, marker='o') # Final shift ax.plot([this_flex_dict['shift'][iobj]]*2, ylim, 'g:') # Label if slit_cen: ax.text(0.5, 0.25, 'Slit Center', transform=ax.transAxes, size='large', ha='center') else: ax.text(0.5, 0.25, '{:s}'.format(specobj.idx), transform=ax.transAxes, size='large', ha='center') ax.text(0.5, 0.15, 'flex_shift = {:g}'.format(this_flex_dict['shift'][iobj]), transform=ax.transAxes, size='large', ha='center')#, bbox={'facecolor':'white'}) # Axes ax.set_ylim(ylim) ax.set_xlabel('Lag') # Finish plt.tight_layout(pad=0.2, h_pad=0.0, w_pad=0.0) plt.savefig(outfile, dpi=400) plt.close() # Sky line QA (just one object) if slit_cen: iobj = 0 else: iobj = 0 specobj = this_specobjs[iobj] sky_spec = this_flex_dict['sky_spec'][iobj] arx_spec = this_flex_dict['arx_spec'][iobj] # Sky lines sky_lines = np.array([3370.0, 3914.0, 4046.56, 4358.34, 5577.338, 6300.304, 7340.885, 7993.332, 8430.174, 8919.610, 9439.660, 10013.99, 10372.88])*units.AA dwv = 20.*units.AA gdsky = np.where((sky_lines > sky_spec.wvmin) & (sky_lines < sky_spec.wvmax))[0] if len(gdsky) == 0: msgs.warn("No sky lines for Flexure QA") return if len(gdsky) > 6: idx = np.array([0, 1, len(gdsky)//2, len(gdsky)//2+1, -2, -1]) gdsky = gdsky[idx] # Outfile outfile = qa.set_qa_filename(basename, method+'_sky', det=det,slit=(slit + 1), out_dir=out_dir) # Figure plt.figure(figsize=(8, 5.0)) plt.clf() nrow, ncol = 2, 3 gs = gridspec.GridSpec(nrow, ncol) if slit_cen: plt.suptitle('Sky Comparison for Slit Center', y=1.05) else: plt.suptitle('Sky Comparison for {:s}'.format(specobj.idx), y=1.05) for ii, igdsky in enumerate(gdsky): skyline = sky_lines[igdsky] ax = plt.subplot(gs[ii//ncol, ii % ncol]) # Norm pix = np.where(np.abs(sky_spec.wavelength-skyline) < dwv)[0] f1 = np.sum(sky_spec.flux[pix]) f2 = np.sum(arx_spec.flux[pix]) norm = f1/f2 # Plot ax.plot(sky_spec.wavelength[pix], sky_spec.flux[pix], 'k-', label='Obj', drawstyle='steps-mid') pix2 = np.where(np.abs(arx_spec.wavelength-skyline) < dwv)[0] ax.plot(arx_spec.wavelength[pix2], arx_spec.flux[pix2]*norm, 'r-', label='Arx', drawstyle='steps-mid') # Axes ax.xaxis.set_major_locator(plt.MultipleLocator(dwv.value)) ax.set_xlabel('Wavelength') ax.set_ylabel('Counts') # Legend plt.legend(loc='upper left', scatterpoints=1, borderpad=0.3, handletextpad=0.3, fontsize='small', numpoints=1) # Finish plt.savefig(outfile, dpi=400) plt.close() #plt.close() plt.rcdefaults() return def flexure_qa_oldbuggyversion(specobjs, maskslits, basename, det, flex_list, slit_cen=False): """ QA on flexure measurement Parameters ---------- det flex_list : list list of dict containing flexure results slit_cen : bool, optional QA on slit center instead of objects Returns ------- """ plt.rcdefaults() plt.rcParams['font.family']= 'times new roman' # Grab the named of the method method = inspect.stack()[0][3] # gdslits = np.where(~maskslits)[0] for sl in range(len(specobjs)): if sl not in gdslits: continue if specobjs[sl][0] is None: continue # Setup if slit_cen: nobj = 1 ncol = 1 else: nobj = len(specobjs[sl]) ncol = min(3, nobj) # if nobj==0: continue nrow = nobj // ncol + ((nobj % ncol) > 0) # Get the flexure dictionary flex_dict = flex_list[sl] # Outfile outfile = qa.set_qa_filename(basename, method+'_corr', det=det, slit=specobjs[sl][0].slitid) plt.figure(figsize=(8, 5.0)) plt.clf() gs = gridspec.GridSpec(nrow, ncol) # Correlation QA for o in range(nobj): ax = plt.subplot(gs[o//ncol, o % ncol]) # Fit fit = flex_dict['polyfit'][o] xval = np.linspace(-10., 10, 100) + flex_dict['corr_cen'][o] #+ flex_dict['shift'][o] #model = (fit[2]*(xval**2.))+(fit[1]*xval)+fit[0] model = utils.func_val(fit, xval, 'polynomial') mxmod = np.max(model) ylim = [np.min(model/mxmod), 1.3] ax.plot(xval-flex_dict['corr_cen'][o], model/mxmod, 'k-') # Measurements ax.scatter(flex_dict['subpix'][o]-flex_dict['corr_cen'][o], flex_dict['corr'][o]/mxmod, marker='o') # Final shift ax.plot([flex_dict['shift'][o]]*2, ylim, 'g:') # Label if slit_cen: ax.text(0.5, 0.25, 'Slit Center', transform=ax.transAxes, size='large', ha='center') else: ax.text(0.5, 0.25, '{:s}'.format(specobjs[sl][o].idx), transform=ax.transAxes, size='large', ha='center') ax.text(0.5, 0.15, 'flex_shift = {:g}'.format(flex_dict['shift'][o]), transform=ax.transAxes, size='large', ha='center')#, bbox={'facecolor':'white'}) # Axes ax.set_ylim(ylim) ax.set_xlabel('Lag') # Finish plt.tight_layout(pad=0.2, h_pad=0.0, w_pad=0.0) plt.savefig(outfile, dpi=400) plt.close() # Sky line QA (just one object) if slit_cen: o = 0 else: o = 0 specobj = specobjs[sl][o] sky_spec = flex_dict['sky_spec'][o] arx_spec = flex_dict['arx_spec'][o] # Sky lines sky_lines = np.array([3370.0, 3914.0, 4046.56, 4358.34, 5577.338, 6300.304, 7340.885, 7993.332, 8430.174, 8919.610, 9439.660, 10013.99, 10372.88])*units.AA dwv = 20.*units.AA gdsky =
np.where((sky_lines > sky_spec.wvmin) & (sky_lines < sky_spec.wvmax))
numpy.where
import numpy as np import pinocchio from .state import StatePinocchio, StateVector from .utils import EPS, a2m, randomOrthonormalMatrix class DifferentialActionModelAbstract: """ Abstract class for the differential action model. In crocoddyl, an action model combines dynamics and cost data. Each node, in our optimal control problem, is described through an action model. Every time that we want describe a problem, we need to provide ways of computing the dynamics, cost functions and their derivatives. These computations are mainly carry on inside calc() and calcDiff(), respectively. """ def __init__(self, nq, nv, nu): self.nq = nq self.nv = nv self.nu = nu self.nx = nq + nv self.ndx = 2 * nv self.nout = nv self.unone = np.zeros(self.nu) def createData(self): """ Create the differential action data. Each differential action model has its own data that needs to be allocated. This function returns the allocated data for a predefined DAM. Note that you need to defined the DifferentialActionDataType inside your DAM. :return DAM data. """ return self.DifferentialActionDataType(self) def calc(self, data, x, u=None): """ Compute the state evolution and cost value. First, it describes the time-continuous evolution of our dynamical system in which along predefined integrated action self we might obtain the next discrete state. Indeed it computes the time derivatives of the state from a predefined dynamical system. Additionally it computes the cost value associated to this state and control pair. :param self: differential action model :param data: differential action data :param x: state vector :param u: control input """ raise NotImplementedError("Not implemented yet.") def calcDiff(self, data, x, u=None, recalc=True): """ Compute the derivatives of the dynamics and cost functions. It computes the partial derivatives of the dynamical system and the cost function. If recalc == True, it first updates the state evolution and cost value. This function builds a quadratic approximation of the time-continuous action model (i.e. dynamical system and cost function). :param model: differential action model :param data: differential action data :param x: state vector :param u: control input :param recalc: If true, it updates the state evolution and the cost value. """ raise NotImplementedError("Not implemented yet.") class DifferentialActionDataAbstract: def __init__(self, model, costData=None): """ Create common data shared between DAMs. In crocoddyl, a DAD might use an externally defined cost data. If so, you need to pass your own cost data using costData. Otherwise it will be allocated here. :param model: differential action model :param costData: external cost data (optional) """ ndx, nu, nout = model.ndx, model.nu, model.nout # State evolution and cost data self.cost = np.nan self.xout = np.zeros(nout) # Dynamics data self.Fx = np.zeros([nout, ndx]) self.Fu = np.zeros([nout, nu]) # Cost data if costData is None: self.g = np.zeros([ndx + nu]) self.L = np.zeros([ndx + nu, ndx + nu]) self.Lx = self.g[:ndx] self.Lu = self.g[ndx:] self.Lxx = self.L[:ndx, :ndx] self.Lxu = self.L[:ndx, ndx:] self.Luu = self.L[ndx:, ndx:] if hasattr(model, 'ncost') and model.ncost > 1: ncost = model.ncost self.costResiduals = np.zeros(ncost) self.R = np.zeros([ncost, ndx + nu]) self.Rx = self.R[:, ndx:] self.Ru = self.R[:, ndx:] else: self.costs = costData self.Lx = self.costs.Lx self.Lu = self.costs.Lu self.Lxx = self.costs.Lxx self.Lxu = self.costs.Lxu self.Luu = self.costs.Luu if model.ncost > 1: self.costResiduals = self.costs.residuals self.Rx = self.costs.Rx self.Ru = self.costs.Ru class DifferentialActionModelFullyActuated(DifferentialActionModelAbstract): def __init__(self, pinocchioModel, costModel): DifferentialActionModelAbstract.__init__(self, pinocchioModel.nq, pinocchioModel.nv, pinocchioModel.nv) self.DifferentialActionDataType = DifferentialActionDataFullyActuated self.pinocchio = pinocchioModel self.State = StatePinocchio(self.pinocchio) self.costs = costModel # Use this to force the computation with ABA # Side effect is that armature is not used. self.forceAba = False @property def ncost(self): return self.costs.ncost def calc(self, data, x, u=None): if u is None: u = self.unone nq, nv = self.nq, self.nv q = a2m(x[:nq]) v = a2m(x[-nv:]) tauq = a2m(u) # --- Dynamics if self.forceAba: data.xout[:] = pinocchio.aba(self.pinocchio, data.pinocchio, q, v, tauq).flat else: pinocchio.computeAllTerms(self.pinocchio, data.pinocchio, q, v) data.M = data.pinocchio.M if hasattr(self.pinocchio, 'armature'): data.M[range(nv), range(nv)] += self.pinocchio.armature.flat data.Minv = np.linalg.inv(data.M) data.xout[:] = data.Minv * (tauq - data.pinocchio.nle).flat # --- Cost pinocchio.forwardKinematics(self.pinocchio, data.pinocchio, q, v) pinocchio.updateFramePlacements(self.pinocchio, data.pinocchio) data.cost = self.costs.calc(data.costs, x, u) return data.xout, data.cost def calcDiff(self, data, x, u=None, recalc=True): if u is None: u = self.unone if recalc: xout, cost = self.calc(data, x, u) nq, nv = self.nq, self.nv q = a2m(x[:nq]) v = a2m(x[-nv:]) tauq = a2m(u) a = a2m(data.xout) # --- Dynamics if self.forceAba: pinocchio.computeABADerivatives(self.pinocchio, data.pinocchio, q, v, tauq) data.Fx[:, :nv] = data.pinocchio.ddq_dq data.Fx[:, nv:] = data.pinocchio.ddq_dv data.Fu[:, :] = data.pinocchio.Minv else: pinocchio.computeRNEADerivatives(self.pinocchio, data.pinocchio, q, v, a) data.Fx[:, :nv] = -np.dot(data.Minv, data.pinocchio.dtau_dq) data.Fx[:, nv:] = -np.dot(data.Minv, data.pinocchio.dtau_dv) data.Fu[:, :] = data.Minv # --- Cost pinocchio.computeJointJacobians(self.pinocchio, data.pinocchio, q) pinocchio.updateFramePlacements(self.pinocchio, data.pinocchio) self.costs.calcDiff(data.costs, x, u, recalc=False) return data.xout, data.cost class DifferentialActionDataFullyActuated(DifferentialActionDataAbstract): def __init__(self, model): self.pinocchio = model.pinocchio.createData() costData = model.costs.createData(self.pinocchio) DifferentialActionDataAbstract.__init__(self, model, costData) class DifferentialActionModelLQR(DifferentialActionModelAbstract): """ Differential action model for linear dynamics and quadratic cost. This class implements a linear dynamics, and quadratic costs (i.e. LQR action). Since the DAM is a second order system, and the integrated action models are implemented as being second order integrators. This class implements a second order linear system given by x = [q, v] dv = Fq q + Fv v + Fu u + f0 where Fq, Fv, Fu and f0 are randomly chosen constant terms. On the other hand the cost function is given by l(x,u) = 1/2 [x,u].T [Lxx Lxu; Lxu.T Luu] [x,u] + [lx,lu].T [x,u] """ def __init__(self, nq, nu, driftFree=True): DifferentialActionModelAbstract.__init__(self, nq, nq, nu) self.DifferentialActionDataType = DifferentialActionDataLQR self.State = StateVector(self.nx) # linear dynamics and quadratic cost terms self.Fq = randomOrthonormalMatrix(self.nq) self.Fv = randomOrthonormalMatrix(self.nv) self.Fu = randomOrthonormalMatrix(self.nq)[:, :self.nu] self.f0 = np.zeros(self.nv) if driftFree else np.random.rand(self.nv) A = np.random.rand(self.ndx + self.nu, self.ndx + self.nu) L = np.dot(A.T, A) self.Lxx = L[:self.nx, :self.nx] self.Lxu = L[:self.nx, self.nx:] self.Luu = L[self.nx:, self.nx:] self.lx = np.random.rand(self.nx) self.lu = np.random.rand(self.nu) def calc(model, data, x, u=None): if u is None: u = model.unone q = x[:model.nq] v = x[model.nq:] data.xout[:] = \ np.dot(model.Fq, q) +
np.dot(model.Fv, v)
numpy.dot
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Sun Sep 19 12:28:14 2021 @author: alankar python dirty-check.py fields 1371 300 ./output-128/ """ import sys import h5py import numpy as np import matplotlib.pyplot as plt from scipy.interpolate import interp1d mp = 1.67e-24 pc = 3.086e18 kB = 1.38e-16 Myr= 1e6*365*24*60**2 cool = np.loadtxt('cooltable.dat') cool = interp1d(cool[:,0],cool[:,1]) X = 0.7154 Y = 0.2703 Z = 0.0143 mu = 1./(2*X+0.75*Y+0.5625*Z) mue = 2./(1+X) mui = 1./(1/mu-1/mue) gamma = 5/3. nhot = 3.16e-5 Thot = 3.16e6 rho = nhot*mu*mp p = nhot*kB*Thot cs = np.sqrt(gamma*p/rho) tcool = p/(rho**2*cool(Thot)/(mue*mui*mp**2))/(gamma-1) freq = 1.0 max_val, min_val = None, None start = 0 base_dir = sys.argv[4] #'./output-128' #res = int((sys.argv[4])[9:-1]) hfile = h5py.File('%s/data.%04d.dbl.h5'%(base_dir, start),'r') res = np.array(hfile['cell_coords/X']).shape[0] X = np.array(hfile['cell_coords/X'])*pc hfile.close() total = int(sys.argv[2]) - start rho_all = np.zeros((total,res)) vr_all = np.zeros((total,res)) T_all = np.zeros((total,res)) p_all = np.zeros((total,res)) mac_all = np.zeros((total,res)) ent_all = np.zeros((total,res)) tcool_all = np.zeros((total,res)) min_ent, max_ent = None, None min_den, max_den = None, None min_vel, max_vel = None, None min_prs, max_prs = None, None min_mac, max_mac = None, None min_tmp, max_tmp = None, None time = None for file_no in range(start,int(sys.argv[2])): hfile = h5py.File('%s/data.%04d.dbl.h5'%(base_dir, file_no),'r') time = file_no*pc/1e5*freq X = np.array(hfile['cell_coords/X'])*pc rho = np.array(hfile['Timestep_%d/vars/rho'%file_no])*mp vr = np.array(hfile['Timestep_%d/vars/vx1'%file_no])*1e5 T = np.array(hfile['Timestep_%d/vars/T'%file_no]) p = np.array(hfile['Timestep_%d/vars/prs'%file_no])*mp*1e10 cs = np.sqrt(gamma*p/rho) mach = vr/cs #Mdot = 4*np.pi*X**2*rho*vr/(2e33/(365*24*60**2)) entropy = p/rho**gamma tcoolg = p/(rho**2*cool(T)/(mue*mui*mp**2))/(gamma-1)/Myr hfile.close() rho_all[file_no-start,:] = rho vr_all[file_no-start,:] = vr T_all[file_no-start,:] = T p_all[file_no-start,:] = p mac_all[file_no-start,:] = mach ent_all[file_no-start,:] = entropy tcool_all[file_no-start,:] = tcoolg if file_no==start: max_ent = np.max(entropy) min_ent = np.min(entropy) max_den = np.max(rho/(mu*mp)) min_den = np.min(rho/(mu*mp)) max_vel = np.max(vr/1e5) min_vel =
np.min(vr/1e5)
numpy.min
# Code for plan-net # <NAME>, Feb 2019 # Keras import keras from keras import backend as K from keras.models import Sequential, Model from keras.metrics import binary_accuracy from keras.layers import Convolution1D, Dense, MaxPooling1D, Flatten, Input, Lambda, Wrapper, merge, concatenate from keras.engine import InputSpec from keras.layers.core import Dense, Dropout, Activation, Layer, Lambda, Flatten from keras.regularizers import l2 from keras.optimizers import RMSprop, Adadelta, adam from keras.layers.advanced_activations import LeakyReLU from keras import initializers import tensorflow as tf from itertools import product import numpy as np import matplotlib.pyplot as plt from matplotlib.colors import LinearSegmentedColormap from sklearn import metrics, neighbors from sklearn.preprocessing import MinMaxScaler # From: https://github.com/yaringal/ConcreteDropout/blob/master/concrete-dropout-keras.ipynb class ConcreteDropout(Wrapper): """This wrapper allows to learn the dropout probability for any given input Dense layer. ```python # as the first layer in a model model = Sequential() model.add(ConcreteDropout(Dense(8), input_shape=(16))) # now model.output_shape == (None, 8) # subsequent layers: no need for input_shape model.add(ConcreteDropout(Dense(32))) # now model.output_shape == (None, 32) ``` `ConcreteDropout` can be used with arbitrary layers which have 2D kernels, not just `Dense`. However, Conv2D layers require different weighing of the regulariser (use SpatialConcreteDropout instead). # Arguments layer: a layer instance. weight_regularizer: A positive number which satisfies $weight_regularizer = l**2 / (\tau * N)$ with prior lengthscale l, model precision $\tau$ (inverse observation noise), and N the number of instances in the dataset. Note that kernel_regularizer is not needed. dropout_regularizer: A positive number which satisfies $dropout_regularizer = 2 / (\tau * N)$ with model precision $\tau$ (inverse observation noise) and N the number of instances in the dataset. Note the relation between dropout_regularizer and weight_regularizer: $weight_regularizer / dropout_regularizer = l**2 / 2$ with prior lengthscale l. Note also that the factor of two should be ignored for cross-entropy loss, and used only for the eculedian loss. """ def __init__(self, layer, weight_regularizer=1e-6, dropout_regularizer=1e-5, init_min=0.1, init_max=0.1, is_mc_dropout=True, **kwargs): assert 'kernel_regularizer' not in kwargs super(ConcreteDropout, self).__init__(layer, **kwargs) self.weight_regularizer = weight_regularizer self.dropout_regularizer = dropout_regularizer self.is_mc_dropout = is_mc_dropout self.supports_masking = True self.p_logit = None self.p = None self.init_min = np.log(init_min) - np.log(1. - init_min) self.init_max = np.log(init_max) - np.log(1. - init_max) def build(self, input_shape=None): self.input_spec = InputSpec(shape=input_shape) if not self.layer.built: self.layer.build(input_shape) self.layer.built = True super(ConcreteDropout, self).build() # this is very weird.. we must call super before we add new losses # initialise p self.p_logit = self.layer.add_weight(name='p_logit', shape=(1,), initializer=initializers.RandomUniform(self.init_min, self.init_max), trainable=True) self.p = K.sigmoid(self.p_logit[0]) # initialise regulariser / prior KL term assert len(input_shape) == 2, 'this wrapper only supports Dense layers' input_dim = np.prod(input_shape[-1]) # we drop only last dim weight = self.layer.kernel kernel_regularizer = self.weight_regularizer * K.sum(K.square(weight)) / (1. - self.p) dropout_regularizer = self.p * K.log(self.p) dropout_regularizer += (1. - self.p) * K.log(1. - self.p) dropout_regularizer *= self.dropout_regularizer * input_dim regularizer = K.sum(kernel_regularizer + dropout_regularizer) self.layer.add_loss(regularizer) def compute_output_shape(self, input_shape): return self.layer.compute_output_shape(input_shape) def concrete_dropout(self, x): ''' Concrete dropout - used at training time (gradients can be propagated) :param x: input :return: approx. dropped out input ''' eps = K.cast_to_floatx(K.epsilon()) temp = 0.1 unif_noise = K.random_uniform(shape=K.shape(x)) drop_prob = ( K.log(self.p + eps) - K.log(1. - self.p + eps) + K.log(unif_noise + eps) - K.log(1. - unif_noise + eps) ) drop_prob = K.sigmoid(drop_prob / temp) random_tensor = 1. - drop_prob retain_prob = 1. - self.p x *= random_tensor x /= retain_prob return x def call(self, inputs, training=None): if self.is_mc_dropout: return self.layer.call(self.concrete_dropout(inputs)) else: def relaxed_dropped_inputs(): return self.layer.call(self.concrete_dropout(inputs)) return K.in_train_phase(relaxed_dropped_inputs, self.layer.call(inputs), training=training) # This is the BNN class which learns cholesky. class BNNModel_het_chol: """ Builds basic BNN model around training data """ def __init__(self, X: np.array, Y: np.array, architecture: list, dropout = 0.1, T = 10, tau = 1.0, lengthscale = 1., base_lr = 5e-2, gamma = 0.0001*0.25, ens_num = 0, train_flag = True): """ :X: training data X -> so far only implemented for 1D data, needs to be of shape (n,1) or (1,n) :Y: training data y, needs to be passed as array of shape (n,1); :param architecture: list of perceptrons per layer, as long as network deep :param dropout: probability of perceptron being dropped out :param T: number of samples from posterior of weights during test time :param tau: precision of prior :param lengthscale: lengthscale :param base_lr: initial learning rate for SGD optimizer :param gamma: parameter for decay of initial learning rate according to default SGD learning schedule """ if np.shape(X)[0] == len(Y): assert np.shape(X)[1] >= 1 else: assert
np.shape(X)
numpy.shape
from typing import Union import gym import numpy as np from PIL import Image from gym import Env, ObservationWrapper from gym.spaces import Box, Discrete def get_wrapper(env, wrapper_type): w = next(filter(lambda w: isinstance(w, wrapper_type), list_wrappers(env)), None) return w def list_wrappers(env: Union[Env, gym.Wrapper]): while isinstance(env, gym.Wrapper): yield env env = env.env class Wrapper(gym.Wrapper): def __init__(self, env): super().__init__(env) self.meta = {} def set_meta(self, meta): self.meta = meta w = get_wrapper(self.env, Wrapper) if w: w.set_meta(meta) def _reset(self): observation = self.env.reset() return self._observation(observation) def observation(self, observation): return self._observation(observation) def _observation(self, observation): return observation def _step(self, action): action = self.action(action) observation, reward, done, info = self.env.step(action) return self.observation(observation), self.reward(reward), done, info def reward(self, reward): return self._reward(reward) def _reward(self, reward): return reward def action(self, action): return self._action(action) def _action(self, action): return action def reverse_action(self, action): return self._reverse_action(action) def _reverse_action(self, action): return action class SkipWrapper(Wrapper): def __init__(self, env, repeat_count): super().__init__(env) self.repeat_count = repeat_count self.stepcount = 0 def _step(self, action): done = False total_reward = 0 total_unwrapped_reward = 0 current_step = 0 while current_step < (self.repeat_count + 1) and not done: self.stepcount += 1 obs, reward, done, info = self.env.step(action) total_reward += reward total_unwrapped_reward += info.get('unwrapped_reward', reward) current_step += 1 if 'skip.stepcount' in info: raise gym.error.Error('Key "skip.stepcount" already in info. Make sure you are not stacking ' 'the SkipWrapper wrappers.') info['skip.stepcount'] = self.stepcount if 'unwrapped_reward' in info: info['unwrapped_reward'] = total_unwrapped_reward return obs, total_reward, done, info def _reset(self): self.stepcount = 0 return self.env.reset() class DiscretizeActions(gym.Wrapper): def __init__(self, env, actions): super().__init__(env) acsp = self.env.action_space assert isinstance(acsp, Box), "action space not continuous" self.actions = np.array(actions) assert self.actions.shape[1:] == acsp.shape, "shape of actions does not match action space" self.action_space = Discrete(self.actions.shape[0]) def _step(self, action): a = self.actions[action] return super()._step(a) class AtariWrapper(ObservationWrapper): """ Pre-processing according to the following paper: http://www.nature.com/nature/journal/v518/n7540/full/nature14236.html """ def __init__(self, env): super().__init__(env) lo = self.env.observation_space.low hi = self.env.observation_space.high w, h, c = self.env.observation_space.shape self.w = w self.h = h self.observation_space = Box(0, 255, [84, 84]) def _reset(self): self.previous_frame = np.zeros([self.w, self.h, 3], dtype=np.uint8) o = super()._reset() return o def _step(self, action): s, r, t, i = super()._step(action) i.setdefault('unwrapped_reward', r) r = np.clip(r, -1, 1) return s, r, t, i def _observation(self, observation): """ Paper: First, to encode a single frame we take the maximum value for each pixel colour value over the frame being encoded and the previous frame. This was necessary to remove flickering that is present in games where some objects appear only in even frames while other objects appear only in odd frames, an artefact caused by the limited number of sprites Atari 2600 can display at once. """ obs =
np.maximum(observation, self.previous_frame)
numpy.maximum
import numpy as np from keras.models import Sequential from keras.layers.core import Dense, Activation from keras.layers import Conv1D, MaxPooling1D from keras.optimizers import adam from keras.callbacks import History from keras.callbacks import EarlyStopping from keras import metrics import os history = History() import time import json import matplotlib from matplotlib import pyplot as plt print("Using:",matplotlib.get_backend()) cwd = os.getcwd() ## saving and setting up datasets #test_data = np.load('mini_synth_set.npy') #test_data = np.load('synth_set.npy') #np.random.shuffle(test_data) # shuffles the dataset row wise #num_train = int(np.shape(test_data)[0]*0.8) #num_val = int(np.shape(test_data)[0]*0.1) #num_test = int(np.shape(test_data)[0]*0.1) #train = test_data[0:num_train,:] #test = test_data[num_train:num_train+num_test,:] #val = test_data[num_train+num_test:num_train+num_test+num_val,:] #np.save('synth_set_train.npy',train) #np.save('synth_set_val.npy',val) #np.save('synth_set_test.npy',test) fakes =
np.load(cwd + "/datasets/mini_synth_set.npy")
numpy.load
"""Linear model base class.""" import abc import numpy as np import six @six.add_metaclass(abc.ABCMeta) class LinearModel(object): """Abstract class for linear models.""" def __init__(self, ndims, w_init='zeros'): """Initialize a linear model. This function prepares an uninitialized linear model. It will initialize the weight vector, self.w, based on the method specified in w_init. We assume that the last index of w is the bias term, self.w = [w,b] self.w(numpy.ndarray): array of dimension (n_dims+1,) w_init needs to support: 'zeros': initialize self.w with all zeros. 'ones': initialze self.w with all ones. 'uniform': initialize self.w with uniform random number between [0,1) Args: ndims(int): feature dimension w_init(str): types of initialization. """ self.ndims = ndims self.w_init = w_init w_dim = ndims + 1 self.w = None if w_init == 'ones': self.w = np.ones((w_dim,)) elif w_init == 'uniform': self.w = np.random.random_sample((w_dim,)) else: self.w =
np.zeros((w_dim,))
numpy.zeros
## # \brief Gumbel copula. from __future__ import print_function, absolute_import, division import numpy as np from starvine.bvcopula.copula.copula_base import CopulaBase class GumbelCopula(CopulaBase): """! @brief Gumbel copula single paramter model \f$\theta \in [1, \infty) \f$ """ def __init__(self, rotation=0, init_params=None): super(GumbelCopula, self).__init__(rotation, params=init_params) self.thetaBounds = ((1 + 1e-9, np.inf),) self.theta0 = (2.0, ) self.rotation = rotation self.name = 'gumbel' @CopulaBase._rotPDF def _pdf(self, u, v, rotation=0, *theta): """! @brief Probability density function for gumbel bivariate copula """ h1 = theta[0] - 1.0 # h2 = (1.0 - 2.0 ** theta[0]) / theta[0] h2 = (1.0 - 2.0 * theta[0]) / theta[0] h3 = 1.0 / theta[0] UU = np.asarray(u) VV =
np.asarray(v)
numpy.asarray
import numpy as np import cv2 import operator import numpy as np from matplotlib import pyplot as plt def plot_many_images(images, titles, rows=1, columns=2): """Plots each image in a given list as a grid structure. using Matplotlib.""" for i, image in enumerate(images): plt.subplot(rows, columns, i+1) plt.imshow(image, 'gray') plt.title(titles[i]) plt.xticks([]), plt.yticks([]) # Hide tick marks plt.show() def show_image(img): """Shows an image until any key is pressed""" # print(type(img)) # print(img.shape) # cv2.imshow('image', img) # Display the image # cv2.imwrite('images/gau_sudoku3.jpg', img) # cv2.waitKey(0) # Wait for any key to be pressed (with the image window active) # cv2.destroyAllWindows() # Close all windows return img def show_digits(digits, colour=255): """Shows list of 81 extracted digits in a grid format""" rows = [] with_border = [cv2.copyMakeBorder(img.copy(), 1, 1, 1, 1, cv2.BORDER_CONSTANT, None, colour) for img in digits] for i in range(9): row = np.concatenate(with_border[i * 9:((i + 1) * 9)], axis=1) rows.append(row) img = show_image(np.concatenate(rows)) return img def convert_when_colour(colour, img): """Dynamically converts an image to colour if the input colour is a tuple and the image is grayscale.""" if len(colour) == 3: if len(img.shape) == 2: img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) elif img.shape[2] == 1: img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) return img def display_points(in_img, points, radius=5, colour=(0, 0, 255)): """Draws circular points on an image.""" img = in_img.copy() # Dynamically change to a colour image if necessary if len(colour) == 3: if len(img.shape) == 2: img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) elif img.shape[2] == 1: img = cv2.cvtColor(img, cv2.COLOR_GRAY2BGR) for point in points: img = cv2.circle(img, tuple(int(x) for x in point), radius, colour, -1) show_image(img) return img def display_rects(in_img, rects, colour=(0, 0, 255)): """Displays rectangles on the image.""" img = convert_when_colour(colour, in_img.copy()) for rect in rects: img = cv2.rectangle(img, tuple(int(x) for x in rect[0]), tuple(int(x) for x in rect[1]), colour) show_image(img) return img def display_contours(in_img, contours, colour=(0, 0, 255), thickness=2): """Displays contours on the image.""" img = convert_when_colour(colour, in_img.copy()) img = cv2.drawContours(img, contours, -1, colour, thickness) show_image(img) def pre_process_image(img, skip_dilate=False): """Uses a blurring function, adaptive thresholding and dilation to expose the main features of an image.""" # Gaussian blur with a kernal size (height, width) of 9. # Note that kernal sizes must be positive and odd and the kernel must be square. proc = cv2.GaussianBlur(img.copy(), (9, 9), 0) # Adaptive threshold using 11 nearest neighbour pixels proc = cv2.adaptiveThreshold(proc, 255, cv2.ADAPTIVE_THRESH_GAUSSIAN_C, cv2.THRESH_BINARY, 11, 2) # Invert colours, so gridlines have non-zero pixel values. # Necessary to dilate the image, otherwise will look like erosion instead. proc = cv2.bitwise_not(proc, proc) if not skip_dilate: # Dilate the image to increase the size of the grid lines. kernel = np.array([[0., 1., 0.], [1., 1., 1.], [0., 1., 0.]],np.uint8) proc = cv2.dilate(proc, kernel) return proc def find_corners_of_largest_polygon(img): """Finds the 4 extreme corners of the largest contour in the image.""" opencv_version = cv2.__version__.split('.')[0] if opencv_version == '3': _, contours, h = cv2.findContours(img.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # Find contours else: contours, h = cv2.findContours(img.copy(), cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) # Find contours contours = sorted(contours, key=cv2.contourArea, reverse=True) # Sort by area, descending polygon = contours[0] # Largest image # Use of `operator.itemgetter` with `max` and `min` allows us to get the index of the point # Each point is an array of 1 coordinate, hence the [0] getter, then [0] or [1] used to get x and y respectively. # Bottom-right point has the largest (x + y) value # Top-left has point smallest (x + y) value # Bottom-left point has smallest (x - y) value # Top-right point has largest (x - y) value bottom_right, _ = max(enumerate([pt[0][0] + pt[0][1] for pt in polygon]), key=operator.itemgetter(1)) top_left, _ = min(enumerate([pt[0][0] + pt[0][1] for pt in polygon]), key=operator.itemgetter(1)) bottom_left, _ = min(enumerate([pt[0][0] - pt[0][1] for pt in polygon]), key=operator.itemgetter(1)) top_right, _ = max(enumerate([pt[0][0] - pt[0][1] for pt in polygon]), key=operator.itemgetter(1)) # Return an array of all 4 points using the indices # Each point is in its own array of one coordinate return [polygon[top_left][0], polygon[top_right][0], polygon[bottom_right][0], polygon[bottom_left][0]] def distance_between(p1, p2): """Returns the scalar distance between two points""" a = p2[0] - p1[0] b = p2[1] - p1[1] return np.sqrt((a ** 2) + (b ** 2)) def crop_and_warp(img, crop_rect): """Crops and warps a rectangular section from an image into a square of similar size.""" # Rectangle described by top left, top right, bottom right and bottom left points top_left, top_right, bottom_right, bottom_left = crop_rect[0], crop_rect[1], crop_rect[2], crop_rect[3] # Explicitly set the data type to float32 or `getPerspectiveTransform` will throw an error src = np.array([top_left, top_right, bottom_right, bottom_left], dtype='float32') # Get the longest side in the rectangle side = max([ distance_between(bottom_right, top_right), distance_between(top_left, bottom_left), distance_between(bottom_right, bottom_left), distance_between(top_left, top_right) ]) # Describe a square with side of the calculated length, this is the new perspective we want to warp to dst = np.array([[0, 0], [side - 1, 0], [side - 1, side - 1], [0, side - 1]], dtype='float32') # Gets the transformation matrix for skewing the image to fit a square by comparing the 4 before and after points m = cv2.getPerspectiveTransform(src, dst) # Performs the transformation on the original image return cv2.warpPerspective(img, m, (int(side), int(side))) def infer_grid(img): """Infers 81 cell grid from a square image.""" squares = [] side = img.shape[:1] side = side[0] / 9 # Note that we swap j and i here so the rectangles are stored in the list reading left-right instead of top-down. for j in range(9): for i in range(9): p1 = (i * side, j * side) # Top left corner of a bounding box p2 = ((i + 1) * side, (j + 1) * side) # Bottom right corner of bounding box squares.append((p1, p2)) return squares def cut_from_rect(img, rect): """Cuts a rectangle from an image using the top left and bottom right points.""" return img[int(rect[0][1]):int(rect[1][1]), int(rect[0][0]):int(rect[1][0])] def scale_and_centre(img, size, margin=0, background=0): """Scales and centres an image onto a new background square.""" h, w = img.shape[:2] def centre_pad(length): """Handles centering for a given length that may be odd or even.""" if length % 2 == 0: side1 = int((size - length) / 2) side2 = side1 else: side1 = int((size - length) / 2) side2 = side1 + 1 return side1, side2 def scale(r, x): return int(r * x) if h > w: t_pad = int(margin / 2) b_pad = t_pad ratio = (size - margin) / h w, h = scale(ratio, w), scale(ratio, h) l_pad, r_pad = centre_pad(w) else: l_pad = int(margin / 2) r_pad = l_pad ratio = (size - margin) / w w, h = scale(ratio, w), scale(ratio, h) t_pad, b_pad = centre_pad(h) img = cv2.resize(img, (w, h)) img = cv2.copyMakeBorder(img, t_pad, b_pad, l_pad, r_pad, cv2.BORDER_CONSTANT, None, background) return cv2.resize(img, (size, size)) def find_largest_feature(inp_img, scan_tl=None, scan_br=None): """ Uses the fact the `floodFill` function returns a bounding box of the area it filled to find the biggest connected pixel structure in the image. Fills this structure in white, reducing the rest to black. """ img = inp_img.copy() # Copy the image, leaving the original untouched height, width = img.shape[:2] max_area = 0 seed_point = (None, None) if scan_tl is None: scan_tl = [0, 0] if scan_br is None: scan_br = [width, height] # Loop through the image for x in range(scan_tl[0], scan_br[0]): for y in range(scan_tl[1], scan_br[1]): # Only operate on light or white squares if img.item(y, x) == 255 and x < width and y < height: # Note that .item() appears to take input as y, x area = cv2.floodFill(img, None, (x, y), 64) if area[0] > max_area: # Gets the maximum bound area which should be the grid max_area = area[0] seed_point = (x, y) # Colour everything grey (compensates for features outside of our middle scanning range for x in range(width): for y in range(height): if img.item(y, x) == 255 and x < width and y < height: cv2.floodFill(img, None, (x, y), 64) mask =
np.zeros((height + 2, width + 2), np.uint8)
numpy.zeros
############################################################################ # This Python file is part of PyFEM, the code that accompanies the book: # # # # 'Non-Linear Finite Element Analysis of Solids and Structures' # # <NAME>, <NAME>, <NAME> and <NAME> # # <NAME> and Sons, 2012, ISBN 978-0470666449 # # # # The code is written by <NAME>, <NAME> and <NAME>. # # # # The latest stable version can be downloaded from the web-site: # # http://www.wiley.com/go/deborst # # # # A github repository, with the most up to date version of the code, # # can be found here: # # https://github.com/jjcremmers/PyFEM # # # # The code is open source and intended for educational and scientific # # purposes only. If you use PyFEM in your research, the developers would # # be grateful if you could cite the book. # # # # Disclaimer: # # The authors reserve all rights but do not guarantee that the code is # # free from errors. Furthermore, the authors shall not be liable in any # # event caused by the use of the program. # ############################################################################ from .Element import Element from pyfem.util.shapeFunctions import getElemShapeData from pyfem.util.kinematics import Kinematics from numpy import zeros, dot, outer, ones , eye, ix_, linalg, tensordot import sys class ThermoSmallStrainContinuum( Element ): def __init__ ( self, elnodes , props ): Element.__init__( self, elnodes , props ) self.rank = props.rank self.k = 1.0e-6 if self.rank == 2: self.dofTypes = [ 'u' , 'v' , 'temp' ] self.nstr = 3 elif self.rank == 3: self.dofTypes = [ 'u' , 'v' , 'w' , 'temp' ] self.nstr = 6 self.kin = Kinematics(self.rank,self.nstr) self.D = self.material.heatConductivity*eye(2) self.capac = self.material.heatCapacity self.alpha = self.material.alpha*ones(self.nstr) self.labels = [ "q1" , "q2" ] self.transient = True self.theta = 1.0 def __type__ ( self ): return name #------------------------------------------------------------------------------- # #------------------------------------------------------------------------------- def getTangentStiffness ( self, elemdat ): sData = getElemShapeData( elemdat.coords ) dDofs,tDofs = self.splitDofIDs( len(elemdat.coords) ) temp0 = elemdat.state [tDofs] - elemdat.Dstate[tDofs] if self.transient: ctt = zeros(shape=(4,4)) invdtime = 1.0/self.solverStat.dtime for iInt,iData in enumerate(sData): B = self.getBmatrix( iData.dhdx ) self.kin.strain = dot ( B , elemdat.state [dDofs] ) self.kin.dstrain = dot ( B , elemdat.Dstate[dDofs] ) temp = sum( iData.h * elemdat.state [tDofs] ) dtemp = sum( iData.h * elemdat.Dstate[tDofs] ) gradTemp = dot( iData.dhdx.transpose() , elemdat.state [tDofs] ) self.kin.strain[:self.nstr] += -self.alpha * temp self.kin.dstrain[:self.nstr] += -self.alpha * dtemp sigma,tang = self.mat.getStress( self.kin ) elemdat.stiff[ix_(dDofs,dDofs)] += \ dot ( B.transpose() , dot ( tang , B ) ) * iData.weight elemdat.stiff[ix_(dDofs,tDofs)] += \ dot ( B.transpose() , outer ( -self.alpha , iData.weight ) ) * iData.weight elemdat.stiff[ix_(tDofs,tDofs)] += \ dot ( iData.dhdx , dot( self.D , iData.dhdx.transpose() ) ) * iData.weight elemdat.fint[dDofs] += dot ( B.transpose() , sigma ) * iData.weight if self.transient: ctt += self.capac * outer( iData.h , iData.h ) * iData.weight self.appendNodalOutput( self.mat.outLabels() , self.mat.outData() ) self.appendNodalOutput( self.labels , dot(self.D,gradTemp) ) if self.transient: ktt0 = invdtime * ctt - elemdat.stiff[ix_(tDofs,tDofs)] * \ ( 1.0-self.theta ) elemdat.stiff *= self.theta elemdat.stiff[ix_(tDofs,tDofs)] += invdtime * ctt elemdat.fint[tDofs] += \ dot ( elemdat.stiff[ix_(tDofs,tDofs)] , elemdat.state[tDofs] ) if self.transient: elemdat.fint[tDofs] += -dot ( ktt0 , temp0 ) #------------------------------------------------------------------------------- # #------------------------------------------------------------------------------- def getInternalForce ( self, elemdat ): sData = getElemShapeData( elemdat.coords ) dDofs,tDofs = self.splitDofIDs( len(elemdat.coords) ) temp0 = elemdat.state [tDofs] - elemdat.Dstate[tDofs] stiff = zeros(shape=(4,4)) if self.transient: ctt = zeros(shape=(4,4)) invdtime = 1.0/self.solverStat.dtime for iInt,iData in enumerate(sData): B = self.getBmatrix( iData.dhdx ) self.kin.strain = dot ( B , elemdat.state [dDofs] ) self.kin.dstrain = dot ( B , elemdat.Dstate[dDofs] ) temp = sum( iData.h * elemdat.state [tDofs] ) dtemp = sum( iData.h * elemdat.Dstate[tDofs] ) gradTemp = dot( iData.dhdx.transpose() , elemdat.state [tDofs] ) self.kin.strain[:self.nstr] += -self.alpha * temp self.kin.dstrain[:self.nstr] += -self.alpha * dtemp sigma,tang = self.mat.getStress( self.kin ) stiff[
ix_(tDofs,tDofs)
numpy.ix_
import numpy as np import randomvars._utils as utils from randomvars.options import config # %% Conversion # There were different other approaches to Cont-Disc conversion, which were # decided to be less appropriate: # - In Cont-Disc construct discrete distribution with the same x-grid to be the # closest to input continuous CDF in terms of some metric ("L1" or "L2"). # These were discarded because they were not invertible and hence not really # possible to create appropriate Disc-Cont conversion. The problem was that # during inverse conversion there were negative values in y-grid, which is an # additional problem. For example, `x = [0, 1]`, `p = [0.9, 0.1]`. # - Another idea of Cont-Disc conversion was along the following lines: # - Assume there are many elements sampled from input distribution. # - For every sample element find the closest one among input x-grid. # - Take sample probability of x-grid elements as ratio of number of times # it was the closest and number of all points. # - Probability of element in x-grid is a limit of sample probabilities. Those # can be computed directly by computing probability of Voronoi intervals # (with ends at midpoints of adjacent intervals). # This turned out to be a previous approach with "L1" metric, which is not # invertible. def _y_from_xp(x, p): """Compute y-grid from xp-grid Compute y-grid which together with input x-grid is dual to input xp-grid. Duality is defined in terms of maximum likelihood estimation. Output xy-grid maximizes weighted log-likelihood `sum(p * log(y))` subject to integration constraint on xy-grid (`0.5 * sum((x[1:] - x[:-1]) * (y[1:] + y[:-1])) = 1`). Notes: - Points with zero p-elements affect the output y-grid: they indicate that in that region probability should be low (corresponding elements of y-grid will be zero). This is somewhat counterintuitive, as presence of zero probabilities doesn't change input discrete variable, but affects output continuous one. """ return p / _convert_coeffs(x) def _p_from_xy(x, y): """Compute p-grid from xy-grid Compute p-grid which together with input x-grid is dual to input xy-grid. Duality is defined in terms of maximum likelihood estimation of xy-grid. Output xp-grid is the one, for which input xy-grid maximizes weighted log-likelihood `sum(p * log(y))` subject to integration constraint on xy-grid (`0.5 * sum((x[1:] - x[:-1]) * (y[1:] + y[:-1])) = 1`). This approach is taken to be inverse of y-from-p conversion. Notes: - Points with zero y-elements result into zero p-elements. """ return y * _convert_coeffs(x) def _convert_coeffs(x): """These are coefficients of y-grid when computing integral using trapezoidal rule""" x_ext = np.concatenate(([x[0]], x, [x[-1]])) return 0.5 * (x_ext[2:] - x_ext[:-2]) # %% Stacking def _stack_xp(xp_seq): """Stack xp-grids Here "stack xp-grids" means "compute xp-grid which represents sum of all input xp-grids". Output x-grid consists of all unique values from all input x-grids. Output p-grid is computed as sum of all p-values at corresponding x-value of output x-grid (if x-value is not in xp-grid, 0 p-value is taken). TODO: It seems to be reasonable to use not strictly unique x-values but rather "unique with tolerance". Parameters ---------- xp_seq : sequence Sequence of xp-grids. """ x_raw, p_raw = [
np.concatenate(t)
numpy.concatenate
import cv2 import warnings import numpy as np from tqdm import tqdm from PIL import Image from os import listdir import skimage.io as io import tensorflow as tf import matplotlib.pyplot as plt import matplotlib.image as mpimg from os.path import join as pjoin from skimage.filters import gaussian from skimage.util import random_noise from skimage.transform import rotate, AffineTransform, warp warnings.filterwarnings('ignore') def normalize(arr): ''' Function to scale an input array to [-1, 1] arr : Array that is to be normalized return: Normalized array''' arr_min = arr.min() arr_max = arr.max() arr_range = arr_max - arr_min scaled = np.array((arr - arr_min) / float(arr_range), dtype='f') arr_new = -1 + (scaled * 2) return arr_new def section_and_masks_to_npy(line, low, high, image_type="plt"): '''get section and mask from image folder and save it in npy format line : "inlines"/"crosslines" low & high : section number image_type : "plt"/"PIL" Returns : npy file''' sections = [] masks = [] for i, filename in enumerate(listdir(line)): if filename.split('.')[1] == 'tiff': line_num = int((filename.split('.')[0]).split('_')[1]) if (line_num > low) and (line_num <= high): if image_type == "plt": seismic_section = plt.imread(pjoin(line, filename))[:, :, 0] elif image_type == "PIL": seismic_section = Image.open(pjoin(line, filename)) else: 'Unknown image type! possible input: ["plt", "PIL"]' seismic_section = np.array(seismic_section) seismic_section = normalize(seismic_section) sections.append(seismic_section) mask_filename = filename.split('.')[0] + '_mask.png' seismic_facies = Image.open(pjoin('masks', mask_filename)) seismic_facies = np.array(seismic_facies) masks.append(seismic_facies) npy_sections = np.asarray(sections) npy_masks = np.asarray(masks) print("Section Shape\t:{0}\nMask Shape\t:{1}".format(npy_sections.shape, npy_masks.shape)) return npy_sections, npy_masks def plot_section_mask(section, mask, vmin = None, vmax = None, figsize = (25, 8)): '''Plot section and corresponding mask, works for both section based and patch based section : Seismic Sections in 3D array mask : Corresponding Mask vmin, vmax : Normalize Section array between vmin, vmax value for visualization purpose''' idx = np.random.randint(0, mask.shape[0], (20)) _, ax = plt.subplots(2, 20, figsize = figsize) for i in range(len(ax[0])): ax[0][i].imshow(mask[idx[i]], vmin = vmin, vmax = vmax) ax[0][i].set_yticks([]) ax[0][i].set_xticks([]) ax[1][i].imshow(section[idx[i]]) ax[1][i].set_yticks([]) ax[1][i].set_xticks([]) plt.tight_layout() def extract_patch(section, mask, stride = 50, patch = 99, padding = "VALID"): '''Extract patch from section and mask array using TesorFlow patch : size of patch to be extracted Stride : stride of patch window padding : Don't use "SAME" as it will pad with 0''' images = section[:,:,:] labels = mask[:,:,:] images = np.expand_dims(images, axis=3) labels = np.expand_dims(labels, axis=3) patch_images = tf.image.extract_patches(images, (1,patch,patch,1), (1,stride,stride,1), (1,1,1,1), padding = padding, name=None) patch_labels = tf.image.extract_patches(labels, (1,patch,patch,1), (1,stride,stride,1), (1,1,1,1), padding = padding, name=None) patch_images = tf.reshape(patch_images, (-1,patch,patch)).numpy() patch_labels = tf.reshape(patch_labels, (-1,patch,patch)).numpy() print("Patch Images Shape\t:{0}\nPatch Masks Shape\t:{1}".format(patch_images.shape, patch_labels.shape)) return patch_images, patch_labels def labels_conversion_canada_new(labels): '''Converts unwanted labels (0, 5, 6, 7) to 255 and renames (1, 2, 3, 4) to (0, 1, 2, 3) for Penobscot dataset''' labels = np.where(labels == 1, 255, labels) labels = np.where(labels == 0, 255, labels) labels = np.where(labels == 5, 255, labels) labels = np.where(labels == 6, 255, labels) labels = np.where(labels == 7, 255, labels) labels = np.where(labels == 2, 0, labels) labels = np.where(labels == 3, 1, labels) labels = np.where(labels == 4, 2, labels) return labels def labels_conversion_canada_new_class_1_2_from_sections(labels): '''Converts unwanted labels (0, 5, 6, 7) to 255 and renames (1, 2, 3, 4) to (0, 1, 2, 3) for Penobscot dataset''' labels = np.where(labels == 1, 255, labels) labels = np.where(labels == 2, 255, labels) labels = np.where(labels == 0, 255, labels) labels = np.where(labels == 5, 255, labels) labels = np.where(labels == 6, 255, labels) labels = np.where(labels == 7, 255, labels) labels = np.where(labels == 3, 1, labels) labels = np.where(labels == 4, 2, labels) return labels def labels_conversion_canada(labels): '''Converts unwanted labels (0, 5, 6, 7) to 255 and renames (1, 2, 3, 4) to (0, 1, 2, 3) for Penobscot dataset''' labels = np.where(labels == 0, 255, labels) labels = np.where(labels == 5, 255, labels) labels = np.where(labels == 6, 255, labels) labels = np.where(labels == 7, 255, labels) labels = np.where(labels == 1, 0, labels) labels = np.where(labels == 2, 1, labels) labels = np.where(labels == 3, 2, labels) labels = np.where(labels == 4, 3, labels) return labels def labels_conversion_netherlands_new(labels): '''Converts unwanted labels (0, 1) to 255 and renames (0, 3, 4, 5) to (0, 1, 2, 3) for Netherlands F3 Block dataset''' labels = np.where(labels == 0, 255, labels) labels = np.where(labels == 1, 255, labels) labels = np.where(labels == 2, 255, labels) labels = np.where(labels == 3, 0, labels) labels = np.where(labels == 4, 1, labels) labels = np.where(labels == 5, 2, labels) return labels def labels_conversion_netherlands(labels): '''Converts unwanted labels (0, 1) to 255 and renames (0, 3, 4, 5) to (0, 1, 2, 3) for Netherlands F3 Block dataset''' labels = np.where(labels == 1, 255, labels) labels = np.where(labels == 2, 255, labels) labels = np.where(labels == 0, 0, labels) labels = np.where(labels == 3, 1, labels) labels = np.where(labels == 4, 2, labels) labels = np.where(labels == 5, 3, labels) return labels def filter_patches(images, labels, threshold = 0.70): '''Drops any patch with 255 if total pixel number for a particular patch exceeds threshold value returns : filtered patch (255 removed), based on threshold''' filtered_images = [] filtered_labels = [] count0 = 0 total_pixel = sum(np.unique(labels[0], return_counts=True)[1]) for i in range(images.shape[0]): unique = np.unique(labels[i], return_counts=True) if ((np.max(unique[1])/total_pixel) >= threshold): #checks if in a particular patch labels, any label is greater than given %age idx = np.argmax(unique[1]) #if above statement satisfies, then find out which label is that which statisfies above condition new_lbl = unique[0][idx] #if above statement satisfies, then find out which label is that which statisfies above condition if new_lbl == 255: continue #if that label is 255, don't save that patch else: #if that label is anything but 255, save that patch filtered_images.append(images[i]) filtered_labels.append(labels[i]) else: #if first condition doesn't satisfy, then save all the patch filtered_images.append(images[i]) filtered_labels.append(labels[i]) filtered_images = np.asarray(filtered_images) filtered_labels = np.asarray(filtered_labels) print("Filtered Patch Images Shape\t:{0}\nFiltered Patch Masks Shape\t:{1}".format(filtered_images.shape, filtered_labels.shape)) return filtered_images, filtered_labels def balance_class_dist(images, labels, class_to_be_balanced = 0, skipping_factor = 10): '''Skip class_to_be_balanced patch by a skipping factor to balance the dataset''' filtered_images = [] filtered_labels = [] count = 0 for i in range(images.shape[0]): unique = np.unique(labels[i], return_counts=True) idx = np.argmax(unique[1]) #find out which label is most of the time present in a particular patch new_lbl = unique[0][idx] #find out which label is most of the time present in a particular patch if new_lbl == class_to_be_balanced: #if it's class 0, reduce the number of patches with a skipping factor if count % skipping_factor == 0: filtered_images.append(images[i]) filtered_labels.append(labels[i]) count+=1 else: #if that label is anything but 0, save that patch filtered_images.append(images[i]) filtered_labels.append(labels[i]) filtered_images = np.asarray(filtered_images) filtered_labels =
np.asarray(filtered_labels)
numpy.asarray
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Wed May 30 17:18:58 2018 @author: chrelli """ # Demo getting the KRLS-t to work! #%% import time, os, sys, shutil # for math and plotting import pandas as pd import numpy as np import scipy as sp import matplotlib.pyplot as plt #import math # small utilities #import csv #from colour import Color from itertools import compress # for list selection with logical from tqdm import tqdm # for image manipulation #import cv2 # for recording and connecting to the intel realsense librar #import pyrealsense as pyrs #import multiprocessing from multiprocessing import Process # for cloud handling #from pyntcloud import PyntCloud # import handy Functions #from utils.common_utils import * #from utils.recording_utils import * #from utils.cloud_utils import * from utils.fitting_utils import * #from merge_and_filter_clouds import filter_and_downsample_cloud # h5py for acessing data #import h5py # ALLSO JIT STUFF from numba import jit, njit tracking_holder = np.load("utils/raw_tracking_no_bounds_full.npy") # call the fitted values for X (is N body dimension x M time steps) #%% Try to generate an estimate! Just xy for now! xx = tracking_holder[-3,:] yy = tracking_holder[-2,:] zz = tracking_holder[-1,:] #response variable is the next value! plt.figure() plt.plot(xx,yy) plt.show() plt.figure() plt.plot(xx) #%% Now, try generating the time embedded data! #%% Generate training data by time embedding! N_train = 2000 embedding = 5 def time_embedding(X,embedding): # X is a column vector! N = X.shape[0] X_embedded = np.zeros((N,embedding)) for i in range(embedding): X_embedded[i:,i] = X[:(N-i)] return X_embedded X = time_embedding(xx[:N_train],embedding) Y = xx[1:(N_train+1)] # add extra time dimension to the start for Xt Xt = np.column_stack((np.arange(X.shape[0]),X)) #%% from matlab we have #sigma_est,reg_est,lambda_est = 0.1631, 1.1680e-08,1.0000 #sigma_est,reg_est,lambda_est = 0.3775, 2.4780e-08,.9999 #sigma_est,reg_est,lambda_est = 14, 2.4780e-04,.999 #sigma_est = 0.2215 #reg_est = 4.449468e-09 #lambda_est = 1.0000 sigma_est = 0.1902 reg_est = 0.7567e-07 lambda_est = 0.9999 # Now make the kernel function! from utils.gaussian import Gaussian from utils.krlst import krlst # make the kernel function with the appropriate sigma! kern = Gaussian(sigma = sigma_est) # make the regressor! reg = krlst(kern) reg.Lambda = lambda_est #reg.Lambda = 0.99 reg.sn2 = reg_est # % % Loop over the data and predict! y_max = [] loops = np.linspace(100,len(Y)-100,num = 20) for loop_from in loops: y_pred = [0] # loop_from = 200 # at 400, we stop adding 'real' data, and just recursively add predicted data! for i,y in tqdm(enumerate(Y)): if i < loop_from: # train with real data! reg.train(X[i,:],y) X_train = X[i,:] if i>0: y_guess = float(reg.evaluate(X[i,:])[0]) y_pred.append(y_guess) # get this ready for the prediction! # initialize X_train for the next! X_train = X[i+1,:] else: # estimate the guess y_guess = float(reg.evaluate(X_train)[0]) # add to list y_pred.append(y_guess) # and update X_train # now, just do it recursively! #train here? # reg.train(X_train,y_guess) if i == loop_from + 20: continue X_train = np.hstack((y_guess,X_train[:-1])) y_max.append(y_pred) #% % plt.close('all') plt.figure() plt.plot(Y) for y_pred in y_max: plt.plot(y_pred) for loop_from in loops: plt.axvline(x=loop_from-1) #plt.xlim([loop_from-100,loop_from+100]) plt.show() #%% Super naiive linear regression from sklearn import linear_model regr = linear_model.LinearRegression() y_pred = [0] y_pred2 = [0,0] y_pred3 = [0,0,0] loop_from = 2000 # at 400, we stop adding 'real' data, and just recursively add predicted data! for i,y in enumerate(Y): regr = linear_model.LinearRegression() regr.fit(np.arange(embedding).reshape(-1,1),X[i,:],0.9**np.arange(embedding)) y_pred.append(regr.predict(np.array([-1]).reshape(-1,1))) y_pred2.append(regr.predict(np.array([-2]).reshape(-1,1))) y_pred3.append(regr.predict(np.array([-3]).reshape(-1,1))) #% % plt.close('all') plt.figure() plt.plot(Y) plt.plot(y_pred) plt.plot(y_pred2) plt.plot(y_pred3) plt.axvline(x=loop_from) plt.show() #%% Try just with KRLS from utils.krlst import KRLS #%% def compute_RBF(mat1, mat2, sigma = 0.016): trnorms1 = np.mat([(v * v.T)[0, 0] for v in mat1]).T trnorms2 = np.mat([(v * v.T)[0, 0] for v in mat2]).T k1 = trnorms1 * np.mat(np.ones((mat2.shape[0], 1), dtype=np.float64)).T k2 = np.mat(np.ones((mat1.shape[0], 1), dtype=np.float64)) * trnorms2.T k = k1 + k2 k -= 2 * np.mat(mat1 * mat2.T) k *= - 1./(2 * np.power(sigma, 2)) return
np.exp(k)
numpy.exp
import numpy as np import numpy.random as npr import math import pandas as pd def WongChanSimCov(n): Z = npr.normal(size=(n, 10)) X = np.zeros((n, 10)) X[:,0] = np.exp(Z[:,0]/2.) X[:,1] = Z[:,1]/(1+np.exp(Z[:,0])) X[:,2] = (Z[:,0]*Z[:,2]/25.+0.6)**3 X[:,3] = (Z[:,1]+Z[:,3]+20)**2 X[:,4:] = Z[:,4:] return n, Z, X def WongChanSimPS(n, Z, X): p = np.exp(-Z[:,1]-0.1*Z[:,4]) / (1.+np.exp(-Z[:,1]-0.1*Z[:,4])) T = npr.binomial(1, p) return p, T def WongChanSimOutA(n, Z, X, T): Y = 210 + \ (1.5*T-0.5) * (27.4*Z[:,1]+13.7*Z[:,2]+13.7*Z[:,3]+13.7*Z[:,4]) + \ npr.normal(size=n) Y1 = 210 + \ (1.5*1-0.5) * (27.4*Z[:,1]+13.7*Z[:,2]+13.7*Z[:,3]+13.7*Z[:,4]) + \ npr.normal(size=n) Y0 = 210 + \ (1.5*0-0.5) * (27.4*Z[:,1]+13.7*Z[:,2]+13.7*Z[:,3]+13.7*Z[:,4]) + \ npr.normal(size=n) return Y, Y1, Y0 def WongChanSimOutB(n, Z, X, T): Y = Z[:,1]*(Z[:,2]**3)*(Z[:,3]**2)*Z[:,4] + Z[:,4]*(np.abs(Z[:,1]))**0.5 + \ npr.normal(size=n) Y1 = Z[:,1]*(Z[:,2]**3)*(Z[:,3]**2)*Z[:,4] + Z[:,4]*(np.abs(Z[:,1]))**0.5 + \ npr.normal(size=n) Y0 = Z[:,1]*(Z[:,2]**3)*(Z[:,3]**2)*Z[:,4] + Z[:,4]*(np.abs(Z[:,1]))**0.5 + \ npr.normal(size=n) return Y, Y1, Y0 def WongChanSimA(n=200): n, Z, X = WongChanSimCov(n) p, T = WongChanSimPS(n, Z, X) Y, Y1, Y0 = WongChanSimOutA(n, Z, X, T) return n, Z, X, p, T, Y, Y1, Y0 def WongChanSimB(n=200): n, Z, X = WongChanSimCov(n) p, T = WongChanSimPS(n, Z, X) Y, Y1, Y0 = WongChanSimOutB(n, Z, X, T) return n, Z, X, p, T, Y, Y1, Y0 if __name__ == '__main__': N = 100 datdir = 'sim_datasets/' for i in range(N): n, Z, X, p, T, Y, Y1, Y0 = WongChanSimA(n=5000) simA = np.column_stack([Z, p, X, T, Y, Y1, Y0]) np.savetxt(datdir+str(i)+'WongChanSimA.csv', simA, delimiter=',') n, Z, X, p, T, Y, Y1, Y0 = WongChanSimB(n=5000) simB =
np.column_stack([Z, p, X, T, Y, Y1, Y0])
numpy.column_stack
import sentiment_analysis.algorithms as ai import logging import numpy as np import os import sys import traceback import time def equals(x, y): if type(y) == np.ndarray: return (x == y).all return x == y def check_real(ex_name, f, exp_result, *args): try: res = f(*args) except NotImplementedError: logging.error('%s: not implemented', ex_name) return True if not np.isreal(res): logging.error('%s: does not return a real number, type: %s', ex_name, type(res)) return True if res != exp_result: logging.error('%s: incorrect answer. Expected %s, got %s', ex_name, exp_result, res) return True def check_tuple(ex_name, f, exp_res, *args, **kwargs): try: res = f(*args, **kwargs) except NotImplementedError: logging.error('%s: not implemented', ex_name) return True if not type(res) == tuple: logging.error('%s: does not return a tuple, type: %s', ex_name, type(res)) return True if not len(res) == len(exp_res): logging.error('%s: expected a tuple of size %d, but got tuple of size %d', ex_name, len(exp_res), len(res)) return True if not all(equals(x, y) for x, y in zip(res, exp_res)): logging.error('%s: incorrect answer, expected %s, but got %s', ex_name, exp_res, res) return True def check_array(ex_name, f, exp_res, *args): try: res = f(*args) except NotImplementedError: logging.error('%s: not implemented', ex_name) return True if not type(res) == np.ndarray: logging.error('%s: does not return a numpy array, type: ', ex_name, type(res)) return True if not len(res) == len(exp_res): logging.error('%s: expected an array of shape %s, but got array of shape %s', ex_name, exp_res.shape, res.shape) return True if not all(equals(x, y) for x, y in zip(res, exp_res)): logging.error('%s: incorrect answer. Expected %s, got %s', ex_name, exp_res, res) return True def check_list(ex_name, f, exp_res, *args): try: res = f(*args) except FileNotFoundError: logging.error("%s: Not implemented", ex_name) return True if not type(res) == list: logging.error('%s: does not return a list, type %s', ex_name, type(res)) return True if not len(res) == len(exp_res): logging.error('%s: expected a list of size %d but got the list of size %d', ex_name, len(exp_res), len(res)) return True if not all(equals(x, y) for x, y in zip(res, exp_res)): logging.error('%s: incorrect answer. Expected, %s, got: %s', ex_name, exp_res, res) return True def check_get_order(): ex_name = "get_order" if check_list(ex_name, ai.get_order, [0], 1): logging.info("You should revert `get_order` to its original implementation for this test to pass") return if check_list(ex_name, ai.get_order, [1, 0], 2): logging.info("You should revert 'get_order' to its original implementation for this test to pass") return logging.info("%s: PASS", ex_name) def check_hinge_loss_single(): do_check_hinge_loss_single(1 - 0.8, np.array([1, 2]), 1, np.array([-1, 1]), -0.2) do_check_hinge_loss_single(0.0, np.array([0.92454549, 0.80196337, 0.38027544, 0.69273305, 0.01614677, 0.35642963, 0.83956723, 0.83481115, 0.66612153, 0.96900118]), 1.0, np.array([0.05408063, 0.06234699, 0.13148364, 0.07217788, 3.09659492, 0.14028014, 0.05955449, 0.05989379, 0.07506138, 0.05159952]), 0.5) def check_hinge_loss_full(): ex_name = 'hinge_loss_full' feature_matrix = np.array([[1, 2], [1, 2]]) label, theta, theta_0 = np.array([1, 1]), np.array([-1, 1]), -0.2 exp_result = 1 - 0.8 if check_real(ex_name, ai.hinge_loss_full, exp_result, feature_matrix, label, theta, theta_0): return logging.info("%s: PASS", ex_name) def do_check_hinge_loss_single(ex_result, feature_vector, label, theta, theta_0): ex_name = 'hinge_loss_single' if check_real(ex_name, ai.hinge_loss_single, ex_result, feature_vector, label, theta, theta_0): return logging.info("%s: PASS", ex_name) def check_perceptron_single_step_update(): ex_name = 'perceptron_single_step_update' feature_vector = np.array([1, 2]) label, theta, theta_0 = 1, np.array([-1, 1]), -1.5 exp_result = (np.array([0, 3]), -0.5) if check_tuple(ex_name, ai.perceptron_single_step_update, exp_result, feature_vector, label, theta, theta_0): return logging.info("%s: PASS", ex_name) def check_perceptron(): ex_name = "perceptron" feature_matrix = np.array([[1, 2]]) labels = np.array([1]) t = 1 exp_res = (np.array([1, 2]), 1) if check_tuple(ex_name, ai.perceptron, exp_res, feature_matrix, labels, t): return feature_matrix = np.array([[1, 2], [-1, 0]]) labels = np.array([1, 1]) t = 1 exp_res = (np.array([0, 2]), 2) if check_tuple(ex_name, ai.perceptron, exp_res, feature_matrix, labels, t): return feature_matrix = np.array([[1, 2]]) labels = np.array([1]) t = 2 exp_res = (np.array([1, 2]), 1) if check_tuple(ex_name, ai.perceptron, exp_res, feature_matrix, labels, t): return feature_matrix = np.array([[1, 2], [-1, 0]]) labels = np.array([1, 1]) t = 2 exp_res = (np.array([0, 2]), 2) if check_tuple(ex_name, ai.perceptron, exp_res, feature_matrix, labels, t): return logging.info('%s: PASS', ex_name) def check_average_perceptron(): ex_name = "average_perceptron" feature_matrix =
np.array([[1, 2]])
numpy.array
""" .. moduleauthor:: <NAME> <<EMAIL>> """ from numpy import exp, log, mean, std, sqrt, tanh, cos, cov from numpy import array, linspace, sort, searchsorted, pi, argmax, argsort, logaddexp from numpy.random import random from scipy.integrate import quad, simps from scipy.optimize import minimize, minimize_scalar, differential_evolution from warnings import warn from itertools import product from functools import reduce import matplotlib.pyplot as plt class DensityEstimator(object): """ Parent class for the 1D density estimation classes GaussianKDE and UnimodalPdf. """ def __init__(self): self.lwr_limit = None self.upr_limit = None self.mode = None def __call__(self, x): return None def interval(self, frac=0.95): p_max = self(self.mode) p_conf = self.binary_search( self.interval_prob, frac, [0.0, p_max], uphill=False ) return self.get_interval(p_conf) def get_interval(self, z): lwr = self.binary_search(self, z, [self.lwr_limit, self.mode], uphill=True) upr = self.binary_search(self, z, [self.mode, self.upr_limit], uphill=False) return lwr, upr def interval_prob(self, z): lwr, upr = self.get_interval(z) return quad(self, lwr, upr, limit=100)[0] def moments(self): pass def plot_summary(self, filename=None, show=True, label=None): """ Plot the estimated PDF along with summary statistics. :keyword str filename: Filename to which the plot will be saved. If unspecified, the plot will not be saved. :keyword bool show: Boolean value indicating whether the plot should be displayed in a window. (Default is True) :keyword str label: The label to be used for the x-axis on the plot as a string. """ def ensure_is_nested_list(var): if not isinstance(var[0], (list, tuple)): var = [var] return var sigma_1 = ensure_is_nested_list(self.interval(frac=0.68268)) sigma_2 = ensure_is_nested_list(self.interval(frac=0.95449)) sigma_3 = ensure_is_nested_list(self.interval(frac=0.9973)) mu, var, skw, kur = self.moments() if type(self) is GaussianKDE: lwr = sigma_3[0][0] - 5 * self.h upr = sigma_3[0][1] + 5 * self.h else: s_min = sigma_3[0][0] s_max = sigma_3[-1][1] lwr = s_min - 0.1 * (s_max - s_min) upr = s_max + 0.1 * (s_max - s_min) axis = linspace(lwr, upr, 500) fig, ax = plt.subplots( nrows=1, ncols=2, figsize=(10, 6), gridspec_kw={"width_ratios": [2, 1]}, ) ax[0].plot(axis, self(axis), lw=1, c="C0") ax[0].fill_between(axis, self(axis), color="C0", alpha=0.1) ax[0].plot([self.mode, self.mode], [0.0, self(self.mode)], c="red", ls="dashed") ax[0].set_xlabel(label or "argument", fontsize=13) ax[0].set_ylabel("probability density", fontsize=13) ax[0].set_ylim([0.0, None]) ax[0].grid() gap = 0.05 h = 0.95 x1 = 0.35 x2 = 0.40 def section_title(height, name): ax[1].text(0.0, height, name, horizontalalignment="left", fontweight="bold") return height - gap def write_quantity(height, name, value): ax[1].text(x1, height, f"{name}:", horizontalalignment="right") ax[1].text(x2, height, f"{value:.5G}", horizontalalignment="left") return height - gap h = section_title(h, "Basics") h = write_quantity(h, "Mode", self.mode) h = write_quantity(h, "Mean", mu) h = write_quantity(h, "Standard dev", sqrt(var)) h -= gap h = section_title(h, "Highest-density intervals") def write_sigma(height, name, sigma): ax[1].text(x1, height, name, horizontalalignment="right") for itvl in sigma: ax[1].text( x2, height, rf"{itvl[0]:.5G} $\rightarrow$ {itvl[1]:.5G}", horizontalalignment="left", ) height -= gap return height h = write_sigma(h, "1-sigma:", sigma_1) h = write_sigma(h, "2-sigma:", sigma_2) h = write_sigma(h, "3-sigma:", sigma_3) h -= gap h = section_title(h, "Higher moments") h = write_quantity(h, "Variance", var) h = write_quantity(h, "Skewness", skw) h = write_quantity(h, "Kurtosis", kur) ax[1].axis("off") plt.tight_layout() if filename is not None: plt.savefig(filename) if show: plt.show() return fig, ax @staticmethod def binary_search(func, value, bounds, uphill=True): x_min, x_max = bounds x = (x_min + x_max) * 0.5 converged = False while not converged: f = func(x) if f > value: if uphill: x_max = x else: x_min = x else: if uphill: x_min = x else: x_max = x x = (x_min + x_max) * 0.5 if abs((x_max - x_min) / x) < 1e-3: converged = True # now linearly interpolate as a polish step f_max = func(x_max) f_min = func(x_min) df = f_max - f_min return x_min * ((f_max - value) / df) + x_max * ((value - f_min) / df) class UnimodalPdf(DensityEstimator): """ Construct a UnimodalPdf object, which can be called as a function to return the estimated PDF of the given sample. The UnimodalPdf class is designed to robustly estimate univariate, unimodal probability distributions given a sample drawn from that distribution. This is a parametric method based on an heavily modified student-t distribution, which is extremely flexible. :param sample: 1D array of samples from which to estimate the probability distribution """ def __init__(self, sample): self.sample = array(sample) self.n_samps = len(sample) # chebyshev quadtrature weights and axes self.sd = 0.2 self.n_nodes = 128 k = linspace(1, self.n_nodes, self.n_nodes) t = cos(0.5 * pi * ((2 * k - 1) / self.n_nodes)) self.u = t / (1.0 - t**2) self.w = (pi / self.n_nodes) * (1 + t**2) / (self.sd * (1 - t**2) ** 1.5) # first minimise based on a slice of the sample, if it's large enough self.cutoff = 2000 self.skip = max(self.n_samps // self.cutoff, 1) self.x = self.sample[:: self.skip] self.n = len(self.x) # makes guesses based on sample moments guesses = self.generate_guesses() # sort the guesses by the lowest score guesses = sorted(guesses, key=self.minfunc) # minimise based on the best guess self.min_result = minimize(self.minfunc, guesses[0], method="Nelder-Mead") self.MAP = self.min_result.x self.mode = self.MAP[0] # if we were using a reduced sample, use full sample if self.skip > 1: self.x = self.sample self.n = self.n_samps self.min_result = minimize(self.minfunc, self.MAP, method="Nelder-Mead") self.MAP = self.min_result.x self.mode = self.MAP[0] # normalising constant for the MAP estimate curve self.map_lognorm = log(self.norm(self.MAP)) # set some bounds for the confidence limits calculation x0, s0, v, f, k, q = self.MAP self.upr_limit = x0 + s0 * (4 * exp(f) + 1) self.lwr_limit = x0 - s0 * (4 * exp(-f) + 1) def generate_guesses(self): mu, sigma, skew = self.sample_moments() x0 = [mu, mu - sigma * skew * 0.15, mu - sigma * skew * 0.3] v = [0, 5.0] s0 = [sigma, sigma * 2] f = [0.5 * skew, skew] k = [1.0, 4.0, 8.0] q = [2.0] return [array(i) for i in product(x0, s0, v, f, k, q)] def sample_moments(self): mu = mean(self.x) x2 = self.x**2 x3 = x2 * self.x sig = sqrt(mean(x2) - mu**2) skew = (mean(x3) - 3 * mu * sig**2 - mu**3) / sig**3 return mu, sig, skew def __call__(self, x): """ Evaluate the PDF estimate at a set of given axis positions. :param x: axis location(s) at which to evaluate the estimate. :return: values of the PDF estimate at the specified locations. """ return exp(self.log_pdf_model(x, self.MAP) - self.map_lognorm) def posterior(self, paras): x0, s0, v, f, k, q = paras # prior checks if (s0 > 0) & (0 < k < 20) & (1 < q < 6): normalisation = self.n * log(self.norm(paras)) return self.log_pdf_model(self.x, paras).sum() - normalisation else: return -1e50 def minfunc(self, paras): return -self.posterior(paras) def norm(self, pvec): v = self.pdf_model(self.u, [0.0, self.sd, *pvec[2:]]) integral = (self.w * v).sum() * pvec[1] return integral def pdf_model(self, x, pvec): return exp(self.log_pdf_model(x, pvec)) def log_pdf_model(self, x, pvec): x0, s0, v, f, k, q = pvec v = exp(v) + 1 z0 = (x - x0) / s0 ds = exp(f * tanh(z0 / k)) z = z0 / ds log_prob = -(0.5 * (1 + v)) * log(1 + (abs(z) ** q) / v) return log_prob def moments(self): """ Calculate the mean, variance skewness and excess kurtosis of the estimated PDF. :return: mean, variance, skewness, ex-kurtosis """ s = self.MAP[1] f = self.MAP[3] lwr = self.mode - 5 * max(exp(-f), 1.0) * s upr = self.mode + 5 * max(exp(f), 1.0) * s x = linspace(lwr, upr, 1000) p = self(x) mu = simps(p * x, x=x) var = simps(p * (x - mu) ** 2, x=x) skw = simps(p * (x - mu) ** 3, x=x) / var * 1.5 kur = (simps(p * (x - mu) ** 4, x=x) / var**2) - 3.0 return mu, var, skw, kur class GaussianKDE(DensityEstimator): """ Construct a GaussianKDE object, which can be called as a function to return the estimated PDF of the given sample. GaussianKDE uses Gaussian kernel-density estimation to estimate the PDF associated with a given sample. :param sample: \ 1D array of samples from which to estimate the probability distribution :param float bandwidth: \ Width of the Gaussian kernels used for the estimate. If not specified, an appropriate width is estimated based on sample data. :param bool cross_validation: \ Indicate whether or not cross-validation should be used to estimate the bandwidth in place of the simple 'rule of thumb' estimate which is normally used. :param int max_cv_samples: \ The maximum number of samples to be used when estimating the bandwidth via cross-validation. The computational cost scales roughly quadratically with the number of samples used, and can become prohibitive for samples of size in the tens of thousands and up. Instead, if the sample size is greater than *max_cv_samples*, the cross-validation is performed on a sub-sample of this size. """ def __init__( self, sample, bandwidth=None, cross_validation=False, max_cv_samples=5000 ): self.s = sort(array(sample).flatten()) # sorted array of the samples self.max_cvs = ( max_cv_samples # maximum number of samples to be used for cross-validation ) if self.s.size < 3: raise ValueError( """ [ GaussianKDE error ] Not enough samples were given to estimate the PDF. At least 3 samples are required. """ ) if bandwidth is None: self.h = self.simple_bandwidth_estimator() # very simple bandwidth estimate if cross_validation: self.h = self.cross_validation_bandwidth_estimator(self.h) else: self.h = bandwidth # define some useful constants self.norm = 1.0 / (len(self.s) * sqrt(2 * pi) * self.h) self.cutoff = self.h * 4 self.q = 1.0 / (sqrt(2) * self.h) self.lwr_limit = self.s[0] - self.cutoff * 0.5 self.upr_limit = self.s[-1] + self.cutoff * 0.5 # decide how many regions the axis should be divided into n = int(log((self.s[-1] - self.s[0]) / self.h) / log(2)) + 1 # now generate midpoints of these regions mids = linspace(self.s[0], self.s[-1], 2**n + 1) mids = 0.5 * (mids[1:] + mids[:-1]) # get the cutoff indices lwr_inds = searchsorted(self.s, mids - self.cutoff) upr_inds = searchsorted(self.s, mids + self.cutoff) slices = [slice(l, u) for l, u in zip(lwr_inds, upr_inds)] # now build a dict that maps midpoints to the slices self.slice_map = dict(zip(mids, slices)) # build a binary tree which allows fast look-up of which # region contains a given value self.tree = BinaryTree(n, (self.s[0], self.s[-1])) #: The mode of the pdf, calculated automatically when an instance of GaussianKDE is created. self.mode = self.locate_mode() def __call__(self, x_vals): """ Evaluate the PDF estimate at a set of given axis positions. :param x_vals: axis location(s) at which to evaluate the estimate. :return: values of the PDF estimate at the specified locations. """ if hasattr(x_vals, "__iter__"): return [self.density(x) for x in x_vals] else: return self.density(x_vals) def density(self, x): # look-up the region region = self.tree.lookup(x) # look-up the cutting points slc = self.slice_map[region[2]] # evaluate the density estimate from the slice return self.norm * exp(-(((x - self.s[slc]) * self.q) ** 2)).sum() def simple_bandwidth_estimator(self): # A simple estimate which assumes the distribution close to a Gaussian return 1.06 * std(self.s) / (len(self.s) ** 0.2) def cross_validation_bandwidth_estimator(self, initial_h): """ Selects the bandwidth by maximising a log-probability derived using a 'leave-one-out cross-validation' approach. """ # first check if we need to sub-sample for computational cost reduction if len(self.s) > self.max_cvs: scrambler = argsort(random(size=len(self.s))) samples = (self.s[scrambler])[: self.max_cvs] else: samples = self.s # create a grid in log-bandwidth space and evaluate the log-prob across it dh = 0.5 log_h = [initial_h + m * dh for m in (-2, -1, 0, 1, 2)] log_p = [self.cross_validation_logprob(samples, exp(h)) for h in log_h] # if the maximum log-probability is at the edge of the grid, extend it for i in range(5): # stop when the maximum is not at the edge max_ind = argmax(log_p) if 0 < max_ind < len(log_h) - 1: break if max_ind == 0: # extend grid to lower bandwidths new_h = log_h[0] - dh new_lp = self.cross_validation_logprob(samples, exp(new_h)) log_h.insert(0, new_h) log_p.insert(0, new_lp) else: # extend grid to higher bandwidths new_h = log_h[-1] + dh new_lp = self.cross_validation_logprob(samples, exp(new_h)) log_h.append(new_h) log_p.append(new_lp) # cost of evaluating the cross-validation is expensive, so we want to # minimise total evaluations. Here we assume the CV score has only one # maxima, and use recursive grid refinement to rapidly find it. for refine in range(6): max_ind = int(argmax(log_p)) lwr_h = 0.5 * (log_h[max_ind - 1] + log_h[max_ind]) upr_h = 0.5 * (log_h[max_ind] + log_h[max_ind + 1]) lwr_lp = self.cross_validation_logprob(samples, exp(lwr_h)) upr_lp = self.cross_validation_logprob(samples,
exp(upr_h)
numpy.exp
# -*- Mode: python; tab-width: 4; indent-tabs-mode:nil; coding:utf-8 -*- # vim: tabstop=4 expandtab shiftwidth=4 softtabstop=4 # # MDAnalysis --- https://www.mdanalysis.org # Copyright (c) 2006-2017 The MDAnalysis Development Team and contributors # (see the file AUTHORS for the full list of names) # # Released under the GNU Public Licence, v2 or any higher version # # Please cite your use of MDAnalysis in published work: # # <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, <NAME>, # <NAME>, <NAME>, <NAME>, <NAME>, and <NAME>. # MDAnalysis: A Python package for the rapid analysis of molecular dynamics # simulations. In S. Benthall and S. Rostrup editors, Proceedings of the 15th # Python in Science Conference, pages 102-109, Austin, TX, 2016. SciPy. # doi: 10.25080/majora-629e541a-00e # # <NAME>, <NAME>, <NAME>, and <NAME>. # MDAnalysis: A Toolkit for the Analysis of Molecular Dynamics Simulations. # J. Comput. Chem. 32 (2011), 2319--2327, doi:10.1002/jcc.21787 # """ Neighbor Search wrapper for MDAnalysis --- :mod:`MDAnalysis.lib.NeighborSearch` =============================================================================== This module contains classes that allow neighbor searches directly with `AtomGroup` objects from `MDAnalysis`. """ from __future__ import absolute_import import numpy as np from MDAnalysis.lib.distances import capped_distance from MDAnalysis.lib.util import unique_int_1d from MDAnalysis.core.groups import AtomGroup, Atom class AtomNeighborSearch(object): """This class can be used to find all atoms/residues/segments within the radius of a given query position. For the neighbor search, this class uses the BioPython KDTree and its wrapper PeriodicKDTree for non-periodic and periodic systems, respectively. """ def __init__(self, atom_group, box=None, bucket_size=10): """ Parameters ---------- atom_list : AtomGroup list of atoms box : array-like or ``None``, optional, default ``None`` Simulation cell dimensions in the form of :attr:`MDAnalysis.trajectory.base.Timestep.dimensions` when periodic boundary conditions should be taken into account for the calculation of contacts. bucket_size : int Number of entries in leafs of the KDTree. If you suffer poor performance you can play around with this number. Increasing the `bucket_size` will speed up the construction of the KDTree but slow down the search. """ self.atom_group = atom_group self._u = atom_group.universe self._box = box #self.kdtree = PeriodicKDTree(box=box, leafsize=bucket_size) def search(self, atoms, radius, level='A'): """ Return all atoms/residues/segments that are within *radius* of the atoms in *atoms*. Parameters ---------- atoms : AtomGroup, MDAnalysis.core.groups.Atom list of atoms radius : float Radius for search in Angstrom. level : str char (A, R, S). Return atoms(A), residues(R) or segments(S) within *radius* of *atoms*. """ unique_idx = [] if isinstance(atoms, Atom): positions = atoms.position.reshape(1, 3) else: positions = atoms.positions pairs = capped_distance(positions, self.atom_group.positions, radius, box=self._box, return_distances=False) if pairs.size > 0: unique_idx = unique_int_1d(
np.asarray(pairs[:, 1], dtype=np.int64)
numpy.asarray
# -*- coding: utf-8 -*- """Tests of array utility functions.""" #------------------------------------------------------------------------------ # Imports #------------------------------------------------------------------------------ import os.path as op import numpy as np from pytest import raises from ..array import (_unique, _normalize, _index_of, _in_polygon, _spikes_in_clusters, _spikes_per_cluster, _flatten_per_cluster, _get_data_lim, select_spikes, Selector, chunk_bounds, regular_subset, excerpts, data_chunk, grouped_mean, get_excerpts, _concatenate_virtual_arrays, _range_from_slice, _pad, _get_padded, read_array, write_array, ) from phy.utils._types import _as_array from phy.utils.testing import _assert_equal as ae from ..mock import artificial_spike_clusters #------------------------------------------------------------------------------ # Test utility functions #------------------------------------------------------------------------------ def test_range_from_slice(): """Test '_range_from_slice'.""" class _SliceTest(object): """Utility class to make it more convenient to test slice objects.""" def __init__(self, **kwargs): self._kwargs = kwargs def __getitem__(self, item): if isinstance(item, slice): return _range_from_slice(item, **self._kwargs) with raises(ValueError): _SliceTest()[:] with raises(ValueError): _SliceTest()[1:] ae(_SliceTest()[:5], [0, 1, 2, 3, 4]) ae(_SliceTest()[1:5], [1, 2, 3, 4]) with raises(ValueError): _SliceTest()[::2] with raises(ValueError): _SliceTest()[1::2] ae(_SliceTest()[1:5:2], [1, 3]) with raises(ValueError): _SliceTest(start=0)[:] with raises(ValueError): _SliceTest(start=1)[:] with raises(ValueError): _SliceTest(step=2)[:] ae(_SliceTest(stop=5)[:], [0, 1, 2, 3, 4]) ae(_SliceTest(start=1, stop=5)[:], [1, 2, 3, 4]) ae(_SliceTest(stop=5)[1:], [1, 2, 3, 4]) ae(_SliceTest(start=1)[:5], [1, 2, 3, 4]) ae(_SliceTest(start=1, step=2)[:5], [1, 3]) ae(_SliceTest(start=1)[:5:2], [1, 3]) ae(_SliceTest(length=5)[:], [0, 1, 2, 3, 4]) with raises(ValueError): _SliceTest(length=5)[:3] ae(_SliceTest(length=5)[:10], [0, 1, 2, 3, 4]) ae(_SliceTest(length=5)[:5], [0, 1, 2, 3, 4]) ae(_SliceTest(start=1, length=5)[:], [1, 2, 3, 4, 5]) ae(_SliceTest(start=1, length=5)[:6], [1, 2, 3, 4, 5]) with raises(ValueError): _SliceTest(start=1, length=5)[:4] ae(_SliceTest(start=1, step=2, stop=5)[:], [1, 3]) ae(_SliceTest(start=1, stop=5)[::2], [1, 3]) ae(_SliceTest(stop=5)[1::2], [1, 3]) def test_pad(): arr = np.random.rand(10, 3) ae(_pad(arr, 0, 'right'), arr[:0, :]) ae(_pad(arr, 3, 'right'), arr[:3, :]) ae(_pad(arr, 9), arr[:9, :]) ae(_pad(arr, 10), arr) ae(_pad(arr, 12, 'right')[:10, :], arr) ae(_pad(arr, 12)[10:, :], np.zeros((2, 3))) ae(_pad(arr, 0, 'left'), arr[:0, :]) ae(_pad(arr, 3, 'left'), arr[7:, :]) ae(_pad(arr, 9, 'left'), arr[1:, :]) ae(_pad(arr, 10, 'left'), arr) ae(_pad(arr, 12, 'left')[2:, :], arr) ae(_pad(arr, 12, 'left')[:2, :], np.zeros((2, 3))) with raises(ValueError): _pad(arr, -1) def test_get_padded(): arr = np.array([1, 2, 3])[:, np.newaxis] with raises(RuntimeError): ae(_get_padded(arr, -2, 5).ravel(), [1, 2, 3, 0, 0]) ae(_get_padded(arr, 1, 2).ravel(), [2]) ae(_get_padded(arr, 0, 5).ravel(), [1, 2, 3, 0, 0]) ae(_get_padded(arr, -2, 3).ravel(), [0, 0, 1, 2, 3]) def test_get_data_lim(): arr = np.random.rand(10, 5) assert 0 < _get_data_lim(arr) < 1 assert 0 < _get_data_lim(arr, 2) < 1 def test_unique(): """Test _unique() function""" _unique([]) n_spikes = 300 n_clusters = 3 spike_clusters = artificial_spike_clusters(n_spikes, n_clusters) ae(_unique(spike_clusters), np.arange(n_clusters)) def test_normalize(): """Test _normalize() function.""" n_channels = 10 positions = 1 + 2 * np.random.randn(n_channels, 2) # Keep ration is False. positions_n = _normalize(positions) x_min, y_min = positions_n.min(axis=0) x_max, y_max = positions_n.max(axis=0) np.allclose(x_min, 0.) np.allclose(x_max, 1.) np.allclose(y_min, 0.) np.allclose(y_max, 1.) # Keep ratio is True. positions_n = _normalize(positions, keep_ratio=True) x_min, y_min = positions_n.min(axis=0) x_max, y_max = positions_n.max(axis=0) np.allclose(min(x_min, y_min), 0.) np.allclose(max(x_max, y_max), 1.) np.allclose(x_min + x_max, 1) np.allclose(y_min + y_max, 1) def test_index_of(): """Test _index_of.""" arr = [36, 42, 42, 36, 36, 2, 42] lookup = _unique(arr) ae(_index_of(arr, lookup), [1, 2, 2, 1, 1, 0, 2]) def test_as_array(): ae(_as_array(3), [3]) ae(_as_array([3]), [3]) ae(_as_array(3.), [3.]) ae(_as_array([3.]), [3.]) with raises(ValueError): _as_array(map) def test_in_polygon(): polygon = [[0, 0], [1, 0], [1, 1], [0, 1], [0, 0]] points = np.random.uniform(size=(100, 2), low=-1, high=1) idx_expected = np.nonzero((points[:, 0] > 0) & (points[:, 1] > 0) & (points[:, 0] < 1) & (points[:, 1] < 1))[0] idx = np.nonzero(_in_polygon(points, polygon))[0] ae(idx, idx_expected) #------------------------------------------------------------------------------ # Test read/save #------------------------------------------------------------------------------ def test_read_write(tempdir): arr = np.arange(10).astype(np.float32) path = op.join(tempdir, 'test.npy') write_array(path, arr) ae(read_array(path), arr) ae(read_array(path, mmap_mode='r'), arr) #------------------------------------------------------------------------------ # Test virtual concatenation #------------------------------------------------------------------------------ def test_concatenate_virtual_arrays_1(): arrs = [np.arange(5), np.arange(10, 12), np.array([0])] c = _concatenate_virtual_arrays(arrs, scaling=1) assert c.shape == (8,) assert c._get_recording(3) == 0 assert c._get_recording(5) == 1 ae(c[:], [0, 1, 2, 3, 4, 10, 11, 0]) ae(c[0], [0]) ae(c[4], [4]) ae(c[5], [10]) ae(c[6], [11]) ae(c[4:6], [4, 10]) ae(c[:6], [0, 1, 2, 3, 4, 10]) ae(c[4:], [4, 10, 11, 0]) ae(c[4:-1], [4, 10, 11]) def test_concatenate_virtual_arrays_2(): arrs = [np.zeros((2, 2)),
np.ones((3, 2))
numpy.ones
##################################################################################################################### # more_nodes: This module implements several new nodes and helper functions. It is part of the Cuicuilco framework. # # # # These nodes include: BasicAdaptiveCutoffNode, SFA_GaussianClassifier, RandomizedMaskNode, GeneralExpansionNode, # # PointwiseFunctionNode, RandomPermutationNode # # # # By <NAME>. <EMAIL> # # Ruhr-University-Bochum, Institute for Neural Computation, Group of Prof. Dr. Wiskott # ##################################################################################################################### from __future__ import absolute_import from __future__ import print_function from __future__ import division import numpy import scipy import scipy.optimize import scipy.stats from scipy.stats import ortho_group import copy import sys import inspect import mdp from mdp.utils import (mult, pinv, symeig, CovarianceMatrix, SymeigException) from . import sfa_libs from .sfa_libs import select_rows_from_matrix, distance_squared_Euclidean # from . import inversion from .histogram_equalization import * def add_corrections(initial_corrections, added_corrections): if initial_corrections is None: return added_corrections elif added_corrections is None: return initial_corrections else: return initial_corrections * added_corrections def combine_correction_factors(flow_or_node, average_over_layers = True, average_inside_layers=False): """This function takes into account all corrections performed by the BasicAdaptiveCutoffNodes of a flow (possibly a hierarchical network) and combines them into a single vector. The function also works on standard nodes. average_over_layers: if True, the combined corrections are the average of the corrections of each node in the flow, otherwise they are multiplied (omitting nodes without corrections) average_inside_layers: if True, the combined corrections of Layers are computed as the average of the corrections of each node in the layer, otherwise they are multiplied The combined correction factor of each sample estimates the probability that it is not an anomaly. That is, correction=1.0 implies "not anomaly", and smaller values increase the rareness of the sample. """ final_corrections = None final_gauss_corrections = None if isinstance(flow_or_node, mdp.Flow): flow = flow_or_node if average_over_layers: corrections = [] gauss_corrections = [] for node in flow: another_node_corrections, another_node_gauss_corrections = combine_correction_factors(node, average_over_layers) if another_node_corrections is not None: corrections.append(another_node_corrections) if another_node_gauss_corrections is not None: gauss_corrections.append(another_node_gauss_corrections) if len(corrections) > 0: corrections = numpy.stack(corrections, axis=1) final_corrections = corrections.mean(axis=1) gauss_corrections = numpy.stack(gauss_corrections, axis=1) final_gauss_corrections = gauss_corrections.mean(axis=1) else: final_corrections = None final_gauss_corrections = None else: for node in flow: another_node_corrections, another_node_gauss_corrections = combine_correction_factors(node) final_corrections = add_corrections(final_corrections, another_node_corrections) final_gauss_corrections = add_corrections(final_gauss_corrections, another_node_gauss_corrections) elif isinstance(flow_or_node, mdp.Node): node = flow_or_node if isinstance(node, mdp.hinet.CloneLayer): err = "CloneLayers not yet supported when computing/storing correction factors" print(err) final_corrections = None final_gauss_corrections = None # raise Exception(err) elif isinstance(node, mdp.hinet.Layer): if average_inside_layers: corrections = [] gauss_corrections = [] for another_node in node.nodes: another_node_corrections, another_node_gauss_corrections = combine_correction_factors(another_node) corrections.append(another_node_corrections) gauss_corrections.append(another_node_gauss_corrections) if len(corrections) > 0: corrections = numpy.stack(corrections, axis=1) final_corrections = corrections.mean(axis=1) gauss_corrections = numpy.stack(gauss_corrections, axis=1) final_gauss_corrections = gauss_corrections.mean(axis=1) else: final_corrections = None final_gauss_corrections = None else: for another_node in node.nodes: another_node_corrections, another_node_gauss_corrections = combine_correction_factors(another_node) final_corrections = add_corrections(final_corrections, another_node_corrections) final_gauss_corrections = add_corrections(final_gauss_corrections, another_node_gauss_corrections) elif isinstance(node, BasicAdaptiveCutoffNode): final_corrections = add_corrections(final_corrections, node.corrections) final_gauss_corrections = add_corrections(final_gauss_corrections, node.gauss_corrections) return final_corrections, final_gauss_corrections class BasicAdaptiveCutoffNode(mdp.PreserveDimNode): """Node that allows to "cut off" values at bounds derived from the training data. This node is similar to CutoffNode, but the bounds are computed based on the training data. And it is also similar to AdaptiveCutoffNode, but no histograms are stored and the limits are hard. This node does not have any have no effect on training data but it corrects atypical variances in test data and may improve generalization. """ def __init__(self, input_dim=None, output_dim=None, num_rotations=1, measure_corrections=False, only_measure=False, verbose=True, dtype=None): """Initialize node. """ super(BasicAdaptiveCutoffNode, self).__init__(input_dim=input_dim, output_dim=output_dim, dtype=dtype) self.lower_bounds = None self.upper_bounds = None self.rotation_matrices = None self.num_rotations = num_rotations self.measure_corrections = measure_corrections self.corrections = None self.gauss_corrections = None self.only_measure = only_measure self.verbose = verbose self._avg_x = None self._avg_x_squared = None self._num_samples = 0 self._std_x = None if self.verbose: print("num_rotations:", num_rotations, "measure_corrections:", measure_corrections, "only_measure:", only_measure, "verbose:", verbose) @staticmethod def is_trainable(): return True @staticmethod def is_invertible(): return True @staticmethod def _get_supported_dtypes(): return (mdp.utils.get_dtypes('Float')) def _train(self, x): # initialize rotations and arrays that store the bounds dim = x.shape[1] if self.rotation_matrices is None: self.rotation_matrices = [None] * self.num_rotations self.lower_bounds = [None] * self.num_rotations self.upper_bounds = [None] * self.num_rotations if self.num_rotations >= 1: self.rotation_matrices[0] = numpy.eye(dim) for i in range(1, self.num_rotations): self.rotation_matrices[i] = ortho_group.rvs(dim=dim) # The training method updates the lower and upper bounds for i in range(self.num_rotations): rotated_data = numpy.dot(x, self.rotation_matrices[i]) if self.lower_bounds[i] is None: self.lower_bounds[i] = rotated_data.min(axis=0) else: self.lower_bounds[i] = numpy.minimum(self.lower_bounds[i], rotated_data.min(axis=0)) if self.upper_bounds[i] is None: self.upper_bounds[i] = rotated_data.max(axis=0) else: self.upper_bounds[i] = numpy.maximum(self.upper_bounds[i], rotated_data.max(axis=0)) if self._avg_x is None: self._avg_x = x.sum(axis=0) self._avg_x_squared = (x**2).sum(axis=0) else: self._avg_x += x.sum(axis=0) self._avg_x_squared += (x ** 2).sum(axis=0) self._num_samples += x.shape[0] def _stop_training(self): self._avg_x /= self._num_samples self._avg_x_squared /= self._num_samples self._std_x = (self._avg_x_squared - self._avg_x **2) ** 0.5 if self.verbose: print("self._avg_x", self._avg_x) print("self._avg_x_squared", self._avg_x_squared) print("self._std_x", self._std_x) def _execute(self, x): """Return the clipped data.""" num_samples = x.shape[0] self.corrections = numpy.ones(num_samples) self.gauss_corrections = numpy.ones(num_samples) if self.only_measure: x_copy = x.copy() for i in range(self.num_rotations): data_rotated = numpy.dot(x, self.rotation_matrices[i]) data_rotated_clipped = numpy.clip(data_rotated, self.lower_bounds[i], self.upper_bounds[i]) if self.measure_corrections: interval = numpy.abs(self.upper_bounds[i] - self.lower_bounds[i]) delta = numpy.abs(data_rotated_clipped - data_rotated) # factors = interval ** 2 / (delta + interval) ** 2 norm_delta = delta / interval factors = 1.0 - (norm_delta / (norm_delta + 0.15)) ** 2 self.corrections *= factors.prod(axis=1) # consider using here and below the mean instead of the product if self.verbose: print("Factors of BasicAdaptiveCutoffNode:", factors) # Computation of Gaussian probabilities factors = scipy.stats.norm.pdf(x, loc=self._avg_x, scale=4*self._std_x) if self.verbose: print("Factors of BasicAdaptiveCutoffNode (gauss):", factors) print("x.mean(axis=0):", x.mean(axis=0)) print("x.std(axis=0):", x.std(axis=0)) self.gauss_corrections *= factors.prod(axis=1) x = numpy.dot(data_rotated_clipped, self.rotation_matrices[i].T) # Project back to original coordinates if self.verbose: print("Corrections of BasicAdaptiveCutoffNode:", self.corrections) print("20 worst final corrections at indices:", numpy.argsort(self.corrections)[0:20]) print("20 worst final corrections:", self.corrections[numpy.argsort(self.corrections)[0:20]]) print("Gaussian corrections of BasicAdaptiveCutoffNode:", self.gauss_corrections) print("20 worst final Gaussian corrections at indices:", numpy.argsort(self.gauss_corrections)[0:20]) print("20 worst final Gaussian corrections:", self.corrections[numpy.argsort(self.gauss_corrections)[0:20]]) if self.only_measure: return x_copy else: return x def _inverse(self, x): """An approximate inverse applies the same clipping. """ return self.execute(x) class SFA_GaussianClassifier(mdp.ClassifierNode): """ This node is a simple extension of the GaussianClassifier node, where SFA is applied before the classifier. The labels are important, since they are used to order the data samples before SFA. """ def __init__(self, reduced_dim=None, verbose=False, **argv): super(SFA_GaussianClassifier, self).__init__(**argv) self.gc_node = mdp.nodes.GaussianClassifier() self.reduced_dim = reduced_dim if self.reduced_dim > 0: self.sfa_node = mdp.nodes.SFANode(output_dim=self.reduced_dim) else: self.sfa_node = mdp.nodes.IdentityNode() self.verbose = verbose def _train(self, x, labels=None): if self.reduced_dim > 0: ordering = numpy.argsort(labels) x_ordered = x[ordering, :] self.sfa_node.train(x_ordered) self.sfa_node.stop_training() if self.verbose: print("SFA_GaussianClassifier: sfa_node.d = ", self.sfa_node.d) else: # sfa_node is the identity node pass y = self.sfa_node.execute(x) self.gc_node.train(y, labels=labels) self.gc_node.stop_training() def _label(self, x): y = self.sfa_node.execute(x) return self.gc_node.label(y) def regression(self, x, avg_labels, estimate_std=False): y = self.sfa_node.execute(x) return self.gc_node.regression(y, avg_labels, estimate_std) def regressionMAE(self, x, avg_labels): y = self.sfa_node.execute(x) return self.gc_node.regressionMAE(y, avg_labels) def softCR(self, x, true_classes): y = self.sfa_node.execute(x) return self.gc_node.softCR(y, true_classes) def class_probabilities(self, x): y = self.sfa_node.execute(x) return self.gc_node.class_probabilities(y) @staticmethod def is_trainable(): return True # using the provided average and standard deviation def gauss_noise(x, avg, std): return numpy.random.normal(avg, std, x.shape) # Zero centered def additive_gauss_noise(x, std): return x + numpy.random.normal(0, std, x.shape) class RandomizedMaskNode(mdp.Node): """Selectively mask some components of a random variable by hiding them with arbitrary noise or by removing them from the feature vector. This code has been inspired by NoiseNode """ def __init__(self, remove_mask=None, noise_amount_mask=None, noise_func=gauss_noise, noise_args=(0, 1), noise_mix_func=None, input_dim=None, dtype=None): self.remove_mask = remove_mask self.noise_amount_mask = noise_amount_mask self.noise_func = noise_func self.noise_args = noise_args self.noise_mix_func = noise_mix_func self.seen_samples = 0 self.x_avg = None self.x_std = None self.type = dtype if remove_mask is not None and input_dim is None: input_dim = remove_mask.size elif remove_mask is None and input_dim is not None: remove_mask = numpy.zeros(input_dim) > 0.5 elif remove_mask and input_dim is not None: if remove_mask.size != input_dim: err = "size of remove_mask and input_dim not compatible" raise Exception(err) else: err = "At least one of input_dim or remove_mask should be specified" raise Exception(err) if noise_amount_mask is None: print ("Signal will be only the computed noise") self.noise_amount_mask = numpy.ones(input_dim) else: self.noise_amount_mask = noise_amount_mask output_dim = remove_mask.size - remove_mask.sum() print ("Output_dim should be:", output_dim) super(RandomizedMaskNode, self).__init__(input_dim=input_dim, output_dim=output_dim, dtype=dtype) @staticmethod def is_trainable(): return True def _train(self, x): if self.x_avg is None: self.x_avg = numpy.zeros(self.input_dim, dtype=self.type) self.x_std = numpy.zeros(self.input_dim, dtype=self.type) new_samples = x.shape[0] self.x_avg = (self.x_avg * self.seen_samples + x.sum(axis=0)) / (self.seen_samples + new_samples) self.x_std = (self.x_std * self.seen_samples + x.std(axis=0) * new_samples) / (self.seen_samples + new_samples) self.seen_samples = self.seen_samples + new_samples @staticmethod def is_invertible(): return False def _execute(self, x): print ("computed X_avg=", self.x_avg) print ("computed X_std=", self.x_std) noise_mat = self.noise_func(x, self.x_avg, self.x_std) # noise_mat = self._refcast(self.noise_func(*self.noise_args, # **{'size': x.shape})) print ("Noise_amount_mask:", self.noise_amount_mask) print ("Noise_mat:", noise_mat) noisy_signal = (1.0 - self.noise_amount_mask) * x + self.noise_amount_mask * noise_mat preserve_mask = (self.remove_mask == False) return noisy_signal[:, preserve_mask] class GeneralExpansionNode(mdp.Node): def __init__(self, funcs, input_dim=None, dtype=None, \ use_pseudoinverse=True, use_hint=False, output_dim=None, starting_point=None, use_special_features=False, max_steady_factor=1.5, delta_factor=0.6, min_delta=0.00001, verbose=False): self.funcs = funcs self.exp_output_dim = output_dim self.expanded_dims = None self.starting_point = starting_point self.use_special_features = use_special_features if self.funcs == "RandomSigmoids" and self.exp_output_dim <= 0: er = "output_dim in GeneralExpansion node with RandomSigmoids should be at least 1, but is" + \ str(self.exp_output_dim) raise Exception(er) self.use_pseudoinverse = use_pseudoinverse self.use_hint = use_hint self.max_steady_factor = max_steady_factor self.delta_factor = delta_factor self.min_delta = min_delta self.verbose = verbose if self.verbose: print("GeneralExpansionNode with expansion functions:", funcs) self.rs_coefficients = None self.rs_offsets = None self.rs_data_training_std = None self.rs_data_training_mean = None self.normalization_constant = None super(GeneralExpansionNode, self).__init__(input_dim, dtype) def expanded_dim(self, n): exp_dim = 0 x = numpy.zeros((1, n)) for func in self.funcs: outx = func(x) # print "outx= ", outx exp_dim += outx.shape[1] return exp_dim def output_sizes(self, n): if self.funcs == "RandomSigmoids": sizes = [self.exp_output_dim] else: sizes = numpy.zeros(len(self.funcs), dtype=int) x = numpy.zeros((1, n)) for i, func in enumerate(self.funcs): outx = func(x) sizes[i] = outx.shape[1] print ("S", end="") return sizes def is_trainable(self): if self.funcs == "RandomSigmoids": return True else: return False def _train(self, x, verbose=None): if verbose is None: verbose = self.verbose if self.input_dim is None: self.set_input_dim(x.shape[1]) input_dim = self.input_dim # Generate functions used for regression self.rs_data_training_mean = x.mean(axis=0) self.rs_data_training_std = x.std(axis=0) if verbose: print ("GeneralExpansionNode: output_dim=", self.output_dim, end="") starting_point = self.starting_point c1, l1 = generate_random_sigmoid_weights(self.input_dim, self.output_dim) if starting_point == "Identity": if verbose: print ("starting_point: adding (encoded) identity coefficients to expansion") c1[0:input_dim, 0:input_dim] = numpy.identity(input_dim) l1[0:input_dim] = numpy.ones(input_dim) * 1.0 # Code identity elif starting_point == "Sigmoids": if verbose: print ("starting_point: adding sigmoid of coefficients to expansion") c1[0:input_dim, 0:input_dim] = 4.0 * numpy.identity(input_dim) l1[0:input_dim] = numpy.ones(input_dim) * 0.0 elif starting_point == "08Exp": if verbose: print ("starting_point: adding (encoded) 08Exp coefficients to expansion") c1[0:input_dim, 0:input_dim] = numpy.identity(input_dim) c1[0:input_dim, input_dim:2 * input_dim] = numpy.identity(input_dim) l1[0:input_dim] = numpy.ones(input_dim) * 1.0 # Code identity l1[input_dim:2 * input_dim] = numpy.ones(input_dim) * 0.8 # Code abs(x)**0.8 elif starting_point == "Pseudo-Identity": if verbose: print ("starting_point: adding pseudo-identity coefficients to expansion") c1[0:input_dim, 0:input_dim] = 0.1 * numpy.identity(input_dim) l1[0:input_dim] = numpy.zeros(input_dim) # nothig is encoded elif starting_point is None: if verbose: print ("starting_point: no starting point") else: er = "Unknown starting_point", starting_point raise Exception(er) self.rs_coefficients = c1 self.rs_offsets = l1 # 4.0 was working fine, 2.0 was apparently better. This also depends on how many features are computed!!! self.normalization_constant = (2.0 / self.input_dim) ** 0.5 def is_invertible(self): return self.use_pseudoinverse def inverse(self, x, use_hint=None, max_steady_factor=None, delta_factor=None, min_delta=None): if self.use_pseudoinverse is False: ex = "Inversion not activated" raise Exception(ex) if use_hint is None: use_hint = self.use_hint if max_steady_factor is None: max_steady_factor = self.max_steady_factor if delta_factor is None: delta_factor = self.delta_factor if min_delta is None: min_delta = self.min_delta # print "Noisy pre = ", x, "****************************************************" app_x_2, app_ex_x_2 = invert_exp_funcs2(x, self.input_dim, self.funcs, use_hint=use_hint, max_steady_factor=max_steady_factor, delta_factor=delta_factor, min_delta=min_delta) # print "Noisy post = ", x, "****************************************************" return app_x_2 def _set_input_dim(self, n): self._input_dim = n if self.funcs == "RandomSigmoids": self._output_dim = self.exp_output_dim else: self._output_dim = self.expanded_dim(n) self.expanded_dims = self.output_sizes(n) def _execute(self, x): if self.input_dim is None: self.set_input_dim(x.shape[1]) if "expanded_dims" not in self.__dict__: self.expanded_dims = self.output_sizes(self.input_dim) if self.funcs != "RandomSigmoids": num_samples = x.shape[0] # output_dim = expanded_dim(self.input_dim) # self.expanded_dims = self.output_sizes(self.input_dim) out = numpy.zeros((num_samples, self.output_dim)) current_pos = 0 for i, func in enumerate(self.funcs): out[:, current_pos:current_pos + self.expanded_dims[i]] = func(x) current_pos += self.expanded_dims[i] else: data_norm = self.normalization_constant * (x - self.rs_data_training_mean) / self.rs_data_training_std # A variation of He random weight initialization out = extract_sigmoid_features(data_norm, self.rs_coefficients, self.rs_offsets, scale=1.0, offset=0.0, use_special_features=self.use_special_features) return out class PointwiseFunctionNode(mdp.Node): """"This node applies a function to the whole input. It also supports a given 'inverse' function. """ def __init__(self, func, inv_func, input_dim=None, dtype=None): self.func = func self.inv_func = inv_func super(PointwiseFunctionNode, self).__init__(input_dim, dtype) @staticmethod def is_trainable(): return False def is_invertible(self): if self.inv_func is None: return True else: return False def inverse(self, x): if self.inv_func: return self.inv_func(x) else: return x def _set_input_dim(self, n): self._input_dim = n self._output_dim = n def _execute(self, x): if self.input_dim is None: self.set_input_dim(x.shape[1]) if self.func: return self.func(x) else: return x class PairwiseAbsoluteExpansionNode(mdp.Node): def expanded_dim(self, n): return n + n * (n + 1) // 2 def is_trainable(self): return False def is_invertible(self): return False def _set_input_dim(self, n): self._input_dim = n self._output_dim = self.expanded_dim(n) def _execute(self, x): out = numpy.concatenate((x, pairwise_expansion(x, abs_sum)), axis=1) return out # TODO:ADD inverse type sum, suitable for when output_scaling is True class PInvSwitchboard(mdp.hinet.Switchboard): """This node is a variation of the RectangularSwitchboard that facilitates (approximate) inverse operations. """ def __init__(self, input_dim, connections, slow_inv=False, type_inverse="average", output_scaling=True, additive_noise_std=0.00004, verbose=False): super(PInvSwitchboard, self).__init__(input_dim=input_dim, connections=connections) self.pinv = None self.mat2 = None self.slow_inv = slow_inv self.type_inverse = type_inverse self.output_dim = len(connections) self.output_scales = None self.additive_noise_std = additive_noise_std self.verbose = verbose if verbose: print ("self.inverse_connections=", self.inverse_connections, "self.slow_inv=", self.slow_inv) # WARNING! IF/ELIF doesn't make any sense! what are the semantics of inverse_connections if self.inverse_connections is None: if verbose: print ("type(connections)", type(connections)) all_outputs = numpy.arange(self.output_dim) self.inverse_indices = [[]] * self.input_dim for i in range(self.input_dim): self.inverse_indices[i] = all_outputs[connections == i] # print "inverse_indices[%d]="%i, self.inverse_indices[i] # print "inverse_indices =", self.inverse_indices elif self.inverse_connections is None and not self.slow_inv: index_array = numpy.argsort(connections) value_array = connections[index_array] value_range = numpy.zeros((input_dim, 2)) self.inverse_indices = range(input_dim) for i in range(input_dim): value_range[i] = numpy.searchsorted(value_array, [i - 0.5, i + 0.5]) if value_range[i][1] == value_range[i][0]: self.inverse_indices[i] = [] else: self.inverse_indices[i] = index_array[value_range[i][0]: value_range[i][1]] if verbose: print ("inverse_indices computed in PINVSB") elif self.inverse_connections is None and self.slow_inv: if verbose: print ("warning using slow inversion in PInvSwitchboard!!!") # find input variables not used by connections: used_inputs = numpy.unique(connections) used_inputs_set = set(used_inputs) all_inputs_set = set(range(input_dim)) unused_inputs_set = all_inputs_set - all_inputs_set.intersection(used_inputs_set) unused_inputs = list(unused_inputs_set) self.num_unused_inputs = len(unused_inputs) # extend connections array # ext_connections = numpy.concatenate((connections, unused_inputs)) # create connections matrix mat_height = len(connections) + len(unused_inputs) mat_width = input_dim mat = numpy.zeros((mat_height, mat_width)) # fill connections matrix for i in range(len(connections)): mat[i, connections[i]] = 1 # for i in range(len(unused_inputs)): mat[i + len(connections), unused_inputs[i]] = 1 # if verbose: print ("extended matrix is:", mat) # compute pseudoinverse mat2 = numpy.matrix(mat) self.mat2 = mat2 self.pinv = (mat2.T * mat2).I * mat2.T else: if verbose: print ("Inverse connections already given, in PInvSwitchboard") if output_scaling: if self.inverse_connections is None and not self.slow_inv: if verbose: print ("**A", end="") if self.type_inverse != "average": err = "self.type_inverse not supported " + self.type_inverse raise Exception(err) self.output_scales = numpy.zeros(self.output_dim) tt = 0 for i in range(self.input_dim): output_indices = self.inverse_indices[i] multiplicity = len(output_indices) for j in output_indices: self.output_scales[j] = (1.0 / multiplicity) ** 0.5 tt += 1 if verbose: print ("connections in switchboard considered: ", tt, "output dimension=", self.output_dim) elif self.inverse_connections is None and self.slow_inv: if verbose: print ("**B", end="") err = "use of self.slow_inv = True is obsolete" raise Exception(err) else: # inverse connections are unique, mapping bijective if verbose: print ("**C", end="") self.output_scales = numpy.ones(self.output_dim) else: if verbose: print ("**D", end="") self.output_scales = numpy.ones(self.output_dim) if verbose: print ("PINVSB output_scales =", self.output_scales) print ("SUM output_scales/len(output_scales)=", self.output_scales.sum() / len(self.output_scales)) print ("output_scales.min()", self.output_scales.min()) # PInvSwitchboard is always invertible def is_invertible(self): return True def _execute(self, x): force_float32_type = False # Experimental variation, ignore if force_float32_type: x = x.astype("float32") use_fortran_ordering = False # Experimental variation, ignore if use_fortran_ordering: x = numpy.array(x, order="FORTRAN") y = super(PInvSwitchboard, self)._execute(x) # print "y computed" # print "y.shape", y.shape # print "output_scales ", self.output_scales y *= self.output_scales if self.additive_noise_std > 0.0: n, dim = y.shape steps = int(n / 9000 + 1) if self.verbose: print ("PInvSwitchboard is adding noise to the output features with std", self.additive_noise_std, end="") print (" computation in %d steps" % steps) step_size = int(n / steps) for s in range(steps): y[step_size * s:step_size * (s + 1)] += numpy.random.uniform(low=-(3 ** 0.5) * self.additive_noise_std, high=(3 ** 0.5) * self.additive_noise_std, size=(step_size, dim)) if self.verbose: print ("noise block %d added" % s) if step_size * steps < n: rest = n - step_size * steps y[step_size * steps:step_size * steps + rest] += numpy.random.uniform( low=-(3 ** 0.5) * self.additive_noise_std, high=(3 ** 0.5) * self.additive_noise_std, size=(rest, dim)) if self.verbose: print ("remaining noise block added") return y # If true inverse is present, just use it, otherwise compute it by means of the pseudoinverse def _inverse(self, x): x = x * (1.0 / self.output_scales) if self.inverse_connections is None and not self.slow_inv: height_x = x.shape[0] mat2 = numpy.zeros((height_x, self.input_dim)) for row in range(height_x): x_row = x[row] for i in range(self.input_dim): elements = x_row[self.inverse_indices[i]] if self.type_inverse == "average": if elements.size > 0: mat2[row][i] = elements.mean() else: err = "self.type_inverse not supported: " + self.type_inverse raise Exception(err) output = mat2 elif self.inverse_connections is None and self.slow_inv: height_x = x.shape[0] full_x = numpy.concatenate((x, 255 * numpy.ones((height_x, self.num_unused_inputs))), axis=1) data2 = numpy.matrix(full_x) if self.verbose: print ("x=", x) print ("data2=", data2) print ("PINV=", self.pinv) output = (self.pinv * data2.T).T else: if self.verbose: print ("using inverse_connections in PInvSwitchboard") # return apply_permutation_to_signal(x, self.inverse_connections, self.input_dim) output = select_rows_from_matrix(x, self.inverse_connections) return output class RandomPermutationNode(mdp.Node): """This node randomly permutes the components of the input signal in a consistent way. The concrete permuntation is fixed during the training procedure. """ def __init__(self, input_dim=None, output_dim=None, dtype=None, verbose=False): super(RandomPermutationNode, self).__init__(input_dim, output_dim, dtype) self.permutation = None self.inv_permutation = None self.dummy = 5 # without it the hash fails!!!!! def is_trainable(self): return True def is_invertible(self): return True def inverse(self, x): return select_rows_from_matrix(x, self.inv_permutation) # def localized_inverse(self, xf, yf, y): # return y[:, self.inv_permutation] def _set_input_dim(self, n, verbose=False): if verbose: print ("RandomPermutationNode: Setting input_dim to ", n) self._input_dim = n self._output_dim = n def _train(self, x, verbose=True): n = x.shape[1] if self.input_dim is None: self.set_input_dim(n) if self.input_dim is None: print ("*******Really Setting input_dim to ", n) self.input_dim = n if self.output_dim is None: print ("*******Really Setting output_dim to ", n) self.output_dim = n if self.permutation is None: if verbose: print ("Creating new random permutation") print ("Permutation=", self.permutation) print ("x=", x, "with shape", x.shape) print ("Input dim is: ", self.input_dim()) self.permutation = numpy.random.permutation(range(self.input_dim)) self.inv_permutation = numpy.zeros(self.input_dim, dtype="int") self.inv_permutation[self.permutation] = numpy.arange(self.input_dim) if verbose: print ("Permutation=", self.permutation) print ("Output dim is: ", self.output_dim) def _execute(self, x, verbose=False): # print "RandomPermutationNode: About to excecute, with input x= ", x y = select_rows_from_matrix(x, self.permutation) if verbose: print ("Output shape is = ", y.shape, end="") return y def sfa_pretty_coefficients(sfa_node, transf_training, start_negative=True): count = 0 for i in range(sfa_node.output_dim): sum_firsts = transf_training[0, i] + transf_training[1, i] + transf_training[2, i] + transf_training[3, i] + \ transf_training[4, i] + transf_training[5, i] + transf_training[6, i] + transf_training[7, i] + \ transf_training[8, i] + transf_training[9, i] + transf_training[10, i] + transf_training[11, i] if (sum_firsts > 0 and start_negative) or (sum_firsts < 0 and not start_negative): sfa_node.sf[:, i] = (sfa_node.sf[:, i] * -1) transf_training[:, i] = (transf_training[:, i] * -1) count += 1 print ("Polarization of %d SFA Signals Corrected!!!\n" % count, end="") sfa_node._bias = mdp.utils.mult(sfa_node.avg, sfa_node.sf) print ("Bias updated") return transf_training def describe_flow(flow): length = len(flow) total_size = 0 print ("Flow has %d nodes:" % length) for i in range(length): node = flow[i] node_size = compute_node_size(node) total_size += node_size print ("Node[%d] is %s, has input_dim=%d, output_dim=%d and size=%d" % (i, str(node), node.input_dim, node.output_dim, node_size)) if isinstance(node, mdp.hinet.CloneLayer): print (" contains %d cloned nodes of type %s, each with input_dim=%d, output_dim=%d" % (len(node.nodes), str(node.nodes[0]), node.nodes[0].input_dim, node.nodes[0].output_dim)) elif isinstance(node, mdp.hinet.Layer): print (" contains %d nodes of type %s, each with input_dim=%d, output_dim=%d" % (len(node.nodes), str(node.nodes[0]), node.nodes[0].input_dim, node.nodes[0].output_dim)) print ("Total flow size: %d" % total_size) print ("Largest node size: %d" % compute_largest_node_size(flow)) def display_node_eigenvalues(node, i, mode="All"): if isinstance(node, mdp.hinet.CloneLayer): if isinstance(node.nodes[0], mdp.nodes.SFANode): print ("Node %d is a CloneLayer that contains an SFANode with d=" % i, node.nodes[0].d) # elif isinstance(node.nodes[0], mdp.nodes.IEVMNode): # if node.nodes[0].use_sfa: # print ("Node %d is a CloneLayer that contains an IEVMNode containing an SFA node with" % i, end="") # print ("num_sfa_features_preserved=%d" % node.nodes[0].num_sfa_features_preserved, end="") # print ("and d=", node.nodes[0].sfa_node.d) elif isinstance(node.nodes[0], mdp.nodes.iGSFANode): print ("Node %d is a CloneLayer that contains an iGSFANode containing an SFA node with " % i, end="") print ("num_sfa_features_preserved=%d " % node.nodes[0].num_sfa_features_preserved, end="") print ("and d=", node.nodes[0].sfa_node.d, end=" ") print ("and evar=", node.nodes[0].evar) elif isinstance(node.nodes[0], mdp.nodes.PCANode): print ("Node %d is a CloneLayer that contains a PCANode with d=" % i, node.nodes[0].d, end=" ") print ("and evar=", node.nodes[0].explained_variance) elif isinstance(node, mdp.hinet.Layer): if isinstance(node.nodes[0], mdp.nodes.SFANode): if mode == "Average": out = 0.0 for n in node.nodes: out += n.d print ("Node %d is a Layer that contains %d SFANodes with avg(d)= " % (i, len(node.nodes)), out / len(node.nodes)) elif mode == "All": for n in node.nodes: print ("Node %d is a Layer that contains an SFANode with d= " % i, n.d) elif mode == "FirstNodeInLayer": print ("Node %d is a Layer, and its first SFANode has d= " % i, node.nodes[0].d) else: er = 'Unknown mode in display_eigenvalues, try "FirstNodeInLayer", "Average" or "All"' raise Exception(er) elif isinstance(node.nodes[0], mdp.nodes.iGSFANode): if mode == "Average": evar_avg = 0.0 d_avg = 0.0 avg_num_sfa_features = 0.0 min_num_sfa_features_preserved = min([n.num_sfa_features_preserved for n in node.nodes]) for n in node.nodes: d_avg += n.sfa_node.d[:min_num_sfa_features_preserved] evar_avg += n.evar avg_num_sfa_features += n.num_sfa_features_preserved d_avg /= len(node.nodes) evar_avg /= len(node.nodes) avg_num_sfa_features /= len(node.nodes) print ("Node %d" % i, "is a Layer that contains", len(node.nodes), "iGSFANodes containing SFANodes with " + "avg(num_sfa_features_preserved)=%f " % avg_num_sfa_features, "and avg(d)=%s" % str(d_avg) + "and avg(evar)=%f" % evar_avg) elif mode == "All": print ("Node %d is a Layer that contains iGSFANodeRecNodes:" % i) for n in node.nodes: print (" iGSFANode containing an SFANode with num_sfa_features_preserved=%f, d=%s and evar=%f" % (n.num_sfa_features_preserved, str(n.sfa_node.d), n.evar)) elif mode == "FirstNodeInLayer": print ("Node %d is a Layer, and its first iGSFANode " % i, end="") print ("contains an SFANode with num_sfa_features_preserved)=%f, d=%s and evar=%f" % (node.nodes[0].num_sfa_features_preserved, str(node.nodes[0].sfa_node.d), node.nodes[0].evar)) else: er = 'Unknown mode in display_eigenvalues, try "FirstNodeInLayer", "Average" or "All"' raise Exception(er) elif isinstance(node.nodes[0], mdp.nodes.SFAAdaptiveNLNode): if mode == "Average": out = 0.0 for n in node.nodes: out += n.sfa_node.d print ("Node %d is a Layer that contains SFAAdaptiveNLNodes containing SFANodes with", end="") print ("avg(d)=" % i, out / len(node.nodes)) elif mode == "All": for n in node.nodes: print ("Node %d is a Layer that contains an SFAAdaptiveNLNode" % i, end="") print ("containing an SFANode with d=", n.sfa_node.d) elif mode == "FirstNodeInLayer": print ("Node %d is a Layer, and its first SFAAdaptiveNLNode" % i) print ("contains an SFANode with d=", node.nodes[0].sfa_node.d) else: er = 'Unknown mode in display_eigenvalues, try "FirstNodeInLayer", "Average" or "All"' raise Exception(er) elif isinstance(node.nodes[0], mdp.nodes.PCANode): if mode == "Average": d_avg = 0.0 evar_avg = 0.0 min_num_pca_features_preserved = min([n.output_dim for n in node.nodes]) for n in node.nodes: d_avg += n.d[:min_num_pca_features_preserved] evar_avg += n.explained_variance d_avg /= len(node.nodes) evar_avg /= len(node.nodes) print ("Node %d is a Layer that contains PCA nodes with avg(d)=%s and avg(evar)=%f" % ( i, str(d_avg), evar_avg)) elif mode == "All": print ("Node %d is a Layer that contains PCA nodes:" % i) for n in node.nodes: print (" PCANode with d=%s and evar=%f" % (str(n.d), n.explained_variance)) elif mode == "FirstNodeInLayer": print ("Node %d is a Layer, and its first PCANode" % i, "has d=%s and evar=%f" % ( str(node.nodes[0].sfa_node.d), node.nodes[0].explained_variance)) else: er = 'Unknown mode in display_eigenvalues, try "FirstNodeInLayer", "Average" or "All"' raise Exception(er) elif isinstance(node, mdp.nodes.iGSFANode): print ("Node %d is an iGSFANode containing an SFA node with num_sfa_features_preserved=%d" % (i, node.num_sfa_features_preserved), end="") print ("and d=", node.sfa_node.d) elif isinstance(node, mdp.nodes.SFANode): print ("Node %d is an SFANode with d=" % i, node.d) elif isinstance(node, mdp.nodes.PCANode): print ("Node %d is a PCANode with d=%s and evar=%f" % (i, str(node.d), node.explained_variance)) else: print ("Cannot display eigenvalues of Node %d" % i, node) def display_eigenvalues(flow, mode="All"): """This function displays the learned eigenvalues of different nodes in a trained Flow object. Three mode parameter can take three values and it specifies what to do when a layer is found: "FirstNodeInLayer": the eigenvalues of the first node in the layer are displayed "Average": the average eigenvalues of all nodes in a layer are displayed (bounded to the smallest length). "All": the eigenvalues of all nodes in the layer are displayed. """ length = len(flow) print ("Displaying eigenvalues of SFA Nodes in flow of length", length) for i in range(length): node = flow[i] display_node_eigenvalues(node, i, mode) def compute_node_size(node, verbose=False): """ Computes the number of parameters (weights) that have been learned by node. Note: Means and offsets are not counted, only (multiplicative) weights. The node must have been already trained. The following nodes are supported currently: SFANode, PCANode, WhitheningNode, CloneLayer, Layer, GSFANode, iGSFANode, LinearRegressionNode """ if isinstance(node, mdp.nodes.iGSFANode): return compute_node_size(node.sfa_node) + compute_node_size(node.pca_node) + compute_node_size(node.lr_node) elif isinstance(node, (mdp.nodes.SFANode, mdp.nodes.PCANode, mdp.nodes.GSFANode, mdp.nodes.LinearRegressionNode, mdp.nodes.WhiteningNode)) and node.input_dim is not None and node.output_dim is not None: return node.input_dim * node.output_dim elif isinstance(node, mdp.hinet.CloneLayer): return compute_node_size(node.nodes[0]) elif isinstance(node, mdp.hinet.Layer): size = 0 for node_child in node.nodes: size += compute_node_size(node_child) return size else: if verbose: print ("compute_node_size not implemented for nodes of type:", type(node), "or training has not finished") return 0 def compute_flow_size(flow): """ Computes the number of weights learned by the whole flow after training. See compute_node_size for more details on the counting procedure """ flow_size = 0 for node in flow: flow_size += compute_node_size(node) return flow_size def compute_largest_node_size(flow): """ Computes the larger number of weights learned by a node after training. See compute_node_size for more details on the counting procedure """ largest_size = 0 for node in flow: if (isinstance(node, mdp.nodes.SFANode) or isinstance(node, mdp.nodes.PCANode) or isinstance(node, mdp.nodes.WhiteningNode)): current_size = compute_node_size(node) elif isinstance(node, mdp.hinet.CloneLayer): current_size = compute_node_size(node.nodes[0]) elif isinstance(node, mdp.hinet.Layer): current_size = 0 for nodechild in node.nodes: tmp_size = compute_node_size(nodechild) if tmp_size > current_size: current_size = tmp_size else: current_size = 0 if current_size > largest_size: largest_size = current_size return largest_size # Used to compare the effectiveness of several PCA Networks def estimate_explained_variance(images, flow, sl_images, num_considered_images=100, verbose=True): # Here explained variance is defined as 1 - normalized reconstruction error num_images = images.shape[0] im_numbers = numpy.random.randint(num_images, size=num_considered_images) avg_image = images[im_numbers].mean(axis=0) selected_images = images[im_numbers] ori_differences = selected_images - avg_image ori_energies = ori_differences ** 2 ori_energy = ori_energies.sum() sl_selected_images = sl_images[im_numbers] print ("sl_selected_images.shape=", sl_selected_images.shape) inverses = flow.inverse(sl_selected_images) rec_differences = inverses - avg_image rec_energies = rec_differences ** 2 rec_energy = rec_energies.sum() rec_errors = selected_images - inverses rec_error_energies = rec_errors ** 2 rec_error_energy = rec_error_energies.sum() if verbose: explained_individual = rec_energies.sum(axis=1) / ori_energies.sum(axis=1) print ("Individual explained variances: ", explained_individual) print ("Which, itself has standar deviation: ", explained_individual.std()) print ("Therefore, estimated explained variance has std of about: ", explained_individual.std() / numpy.sqrt( num_considered_images)) print ("Dumb reconstruction_energy/original_energy=", rec_energy / ori_energy) print ("rec_error_energy/ori_energy=", rec_error_energy / ori_energy) print ("Thus explained variance about:", 1 - rec_error_energy / ori_energy) return 1 - rec_error_energy / ori_energy # rec_energy/ori_energy class HeadNode(mdp.Node): """Preserve only the first k dimensions from the data """ def __init__(self, input_dim=None, output_dim=None, dtype=None): self.type = dtype super(HeadNode, self).__init__(input_dim=input_dim, output_dim=output_dim, dtype=dtype) def is_trainable(self): return True def _train(self, x): pass def _is_invertible(self): return True def _execute(self, x): if self.output_dim is None: er = "Warning 12345..." raise Exception(er) return x[:, 0:self.output_dim] def _stop_training(self): pass def _inverse(self, y): num_samples, out_dim = y.shape[0], y.shape[1] zz = numpy.zeros((num_samples, self.input_dim - out_dim)) return numpy.concatenate((y, zz), axis=1) # # This code is obsolete. # class SFAPCANode(mdp.Node): # """Node that extracts slow features unless their delta value is too high. In such a case PCA features are extracted. # """ # # def __init__(self, input_dim=None, output_dim=None, max_delta=1.95, sfa_args={}, pca_args={}, **argv): # super(SFAPCANode, self).__init__(input_dim=input_dim, output_dim=output_dim, **argv) # self.sfa_node = mdp.nodes.SFANode(**sfa_args) # # max delta value allowed for a slow feature, otherwise a principal component is extracted # self.max_delta = max_delta # self.avg = None # input average # self.W = None # weights for complete transformation # self.pinv = None # weights for pseudoinverse of complete transformation # # def is_trainable(self): # return True # # def _train(self, x, **argv): # self.sfa_node.train(x, **argv) # # @staticmethod # def _is_invertible(): # return True # # def _execute(self, x): # W = self.W # avg = self.avg # return numpy.dot(x - avg, W) # # def _stop_training(self, **argv): # # New GraphSFA node # if "_covdcovmtx" in dir(self.sfa_node): # # Warning, fix is computed twice. TODO: avoid double computation # C, self.avg, CD = self.sfa_node._covdcovmtx.fix() # else: # # Old fix destroys data... so we copy the matrices first. # cov_mtx = copy.deepcopy(self.sfa_node._cov_mtx) # dcov_mtx = copy.deepcopy(self.sfa_node._dcov_mtx) # # C, self.avg, tlen = cov_mtx.fix() # DC, davg, dtlen = dcov_mtx.fix() # # dim = C.shape[0] # type_ = C.dtype # self.sfa_node.stop_training() # d = self.sfa_node.d # sfa_output_dim = len(d[d <= self.max_delta]) # sfa_output_dim = min(sfa_output_dim, self.output_dim) # print ("sfa_output_dim=", sfa_output_dim) # # Wsfa = self.sfa_node.sf[:, 0:sfa_output_dim] # print ("Wsfa.shape=", Wsfa.shape) # if Wsfa.shape[1] == 0: # No slow components will be used # print ("No Psfa created") # PS = numpy.zeros((dim, dim), dtype=type_) # else: # Psfa = pinv(Wsfa) # print ("Psfa.shape=", Psfa.shape) # PS = numpy.dot(Wsfa, Psfa) # # print ("PS.shape=", PS.shape) # Cproy = numpy.dot(PS, numpy.dot(C, PS.T)) # Cpca = C - Cproy # # if self.output_dim is None: # self.output_dim = dim # # pca_output_dim = self.output_dim - sfa_output_dim # print ("PCA output_dim=", pca_output_dim) # if pca_output_dim > 0: # pca_node = mdp.nodes.PCANode(output_dim=pca_output_dim) # WARNING: WhiteningNode should be used here # pca_node._cov_mtx._dtype = type_ # pca_node._cov_mtx._input_dim = dim # pca_node._cov_mtx._avg = numpy.zeros(dim, type_) # pca_node._cov_mtx.bias = True # pca_node._cov_mtx._tlen = 1 # WARNING!!! 1 # pca_node._cov_mtx._cov_mtx = Cpca # pca_node._input_dim = dim # pca_node._train_phase_started = True # pca_node.stop_training() # print ("pca_node.d=", pca_node.d) # print ("1000000 * pca_node.d[0]=", 1000000 * pca_node.d[0]) # # Wpca = pca_node.v # Ppca = pca_node.v.T # else: # Wpca = numpy.array([]).reshape((dim, 0)) # Ppca = numpy.array([]).reshape((0, dim)) # # print ("Wpca.shape=", Wpca.shape) # print ("Ppca.shape=", Ppca.shape) # # self.W = numpy.concatenate((Wsfa, Wpca), axis=1) # self.pinv = None # WARNING, why this does not work correctly: numpy.concatenate((Psfa, Ppca),axis=0) ????? # # print "Pinv 1=", self.pinv # # print "Pinv 2-Pinv1=", pinv(self.W)-self.pinv # print ("W.shape=", self.W.shape) # # print "pinv.shape=", self.pinv.shape # print ("avg.shape=", self.avg.shape) # # def _inverse(self, y): # if self.pinv is None: # print ("Computing PINV", end="") # self.pinv = pinv(self.W) # return numpy.dot(y, self.pinv) + self.avg # Computes the variance of some MDP data array def data_variance(x): return ((x - x.mean(axis=0)) ** 2).sum(axis=1).mean() def estimate_explained_var_linearly(x, y, x_test, y_test): x_test_app = approximate_linearly(x, y, y_test) explained_variance = compute_explained_var(x_test, x_test_app) x_variance = data_variance(x_test) print ("x_variance=", x_variance, ", explained_variance=", explained_variance) return explained_variance / x_variance def approximate_linearly(x, y, y_test): lr_node = mdp.nodes.LinearRegressionNode(use_pseudoinverse=True) lr_node.train(y, x) lr_node.stop_training() x_test_app = lr_node.execute(y_test) return x_test_app # Approximates x from y, and computes how sensitive the estimation is to changes in y def sensivity_of_linearly_approximation(x, y): lr_node = mdp.nodes.LinearRegressionNode(use_pseudoinverse=True) lr_node.train(y, x) lr_node.stop_training() beta = lr_node.beta[1:, :] # bias is used by default, we do not need to consider it print ("beta.shape=", beta.shape) sens = (beta ** 2).sum(axis=1) return sens def estimate_explained_var_with_kNN(x, y, max_num_samples_for_ev=None, max_test_samples_for_ev=None, k=1, ignore_closest_match=False, operation="average"): num_samples = x.shape[0] indices_all_x = numpy.arange(x.shape[0]) if max_num_samples_for_ev is not None: # use all samples for reconstruction max_num_samples_for_ev = min(max_num_samples_for_ev, num_samples) indices_all_x_selection = indices_all_x + 0 numpy.random.shuffle(indices_all_x_selection) indices_all_x_selection = indices_all_x_selection[0:max_num_samples_for_ev] x_sel = x[indices_all_x_selection] y_sel = y[indices_all_x_selection] else: x_sel = x y_sel = y if max_test_samples_for_ev is not None: # use all samples for reconstruction max_test_samples_for_ev = min(max_test_samples_for_ev, num_samples) indices_all_x_selection = indices_all_x + 0 numpy.random.shuffle(indices_all_x_selection) indices_all_x_selection = indices_all_x_selection[0:max_test_samples_for_ev] x_test = x[indices_all_x_selection] y_test = y[indices_all_x_selection] else: x_test = x y_test = y x_app_test = approximate_kNN_op(x_sel, y_sel, y_test, k, ignore_closest_match, operation=operation) print ("x_test=", x_test) print ("x_app_test=", x_app_test) explained_variance = compute_explained_var(x_test, x_app_test) test_variance = data_variance(x_test) print ("explained_variance=", explained_variance) print ("test_variance=", test_variance) return explained_variance / test_variance def random_subindices(num_indices, size_selection): if size_selection > num_indices: ex = "Error, size_selection is larger than num_indices! (", size_selection, ">", num_indices, ")" raise Exception(ex) all_indices = numpy.arange(num_indices) numpy.random.shuffle(all_indices) return all_indices[0:size_selection] + 0 def estimate_explained_var_linear_global(subimages_train, sl_seq_training, subimages_newid, sl_seq_newid, reg_num_signals, number_samples_EV_linear_global): """Function that computes how much variance is explained linearly from a global mapping. It works as follows: 1) Linear regression is trained with sl_seq_training and subimages_train. 2) Estimation is done on subset of size number_samples_EV_linear_global from training and test data 3) For training data evaluation is done on the same data used to train LR, and on new random subset of data. 4) For test data all samples are used. """ indices_all_train1 = random_subindices(subimages_train.shape[0], number_samples_EV_linear_global) indices_all_train2 = random_subindices(subimages_train.shape[0], number_samples_EV_linear_global) indices_all_newid = numpy.arange(subimages_newid.shape[0]) lr_node = mdp.nodes.LinearRegressionNode() sl_seq_training_sel1 = sl_seq_training[indices_all_train1, 0:reg_num_signals] subimages_train_sel1 = subimages_train[indices_all_train1] lr_node.train(sl_seq_training_sel1, subimages_train_sel1) # Notice that the input "x"=n_sfa_x and the output to learn is "y" = x_pca lr_node.stop_training() subimages_train_app1 = lr_node.execute(sl_seq_training_sel1) EVLinGlobal_train1 = compute_explained_var(subimages_train_sel1, subimages_train_app1) data_variance_train1 = data_variance(subimages_train_sel1) sl_seq_training_sel2 = sl_seq_training[indices_all_train2, 0:reg_num_signals] subimages_train_sel2 = subimages_train[indices_all_train2] subimages_train_app2 = lr_node.execute(sl_seq_training_sel2) EVLinGlobal_train2 = compute_explained_var(subimages_train_sel2, subimages_train_app2) data_variance_train2 = data_variance(subimages_train_sel2) sl_seq_newid_sel = sl_seq_newid[indices_all_newid, 0:reg_num_signals] subimages_newid_sel = subimages_newid[indices_all_newid] subimages_newid_app = lr_node.execute(sl_seq_newid_sel) EVLinGlobal_newid = compute_explained_var(subimages_newid_sel, subimages_newid_app) data_variance_newid = data_variance(subimages_newid_sel) print ("Data variances=", data_variance_train1, data_variance_train2, data_variance_newid) print ("EVLinGlobal=", EVLinGlobal_train1, EVLinGlobal_train2, EVLinGlobal_newid) return EVLinGlobal_train1 / data_variance_train1, EVLinGlobal_train2 / data_variance_train2, \ EVLinGlobal_newid / data_variance_newid def compute_explained_var(true_samples, approximated_samples): """Computes the explained variance provided by the approximation to some data, with respect to the true data. Additionally, the original data variance is provided: app = true_samples + error exp_var ~ energy(true_samples) - energy(error) """ error = (approximated_samples - true_samples) error_energy = (error ** 2.0).sum(axis=1).mean() # average squared error per sample true_energy = data_variance(true_samples) # (true_samples-true_samples.mean(axis=0)).var() explained_var = true_energy - error_energy # print "Debug information:", error_energy, true_energy return explained_var def approximate_kNN_op(x, x_exp, y_exp, k=1, ignore_closest_match=False, operation=None): """ Approximates a signal y given its expansion y_exp. The method is kNN with training data given by x, x_exp If label_avg=True, the inputs of the k closest expansions are averaged, otherwise the most frequent among k-closest is returned. When label_avg=True, one can also specify to ignore the best match (useful if y_exp = x_exp) """ n = mdp.nodes.KNNClassifier(k=k, execute_method="label") n.train(x_exp, range(len(x_exp))) if operation == "average": n.stop_training() ii = n.klabels(y_exp) if ignore_closest_match and k == 1: ex = "Error, k==1 but ignoring closest match!" raise Exception(ex) elif ignore_closest_match: ii = ii[:, 1:] y = x[ii].mean(axis=1) # y_exp_app = x_exp[ii].mean(axis=1) # print "Error for y_exp is:", ((y_exp_app - y_exp)**2).sum(axis=1).mean() # print "y=",y return y # x[ii].mean(axis=1) elif operation == "lin_app": n.stop_training() ii = n.klabels(y_exp) if ignore_closest_match and k == 1: ex = "Error, k==1 but ignoring closest match!" raise Exception(ex) elif ignore_closest_match: ii = ii[:, 1:] x_dim = x.shape[1] x_exp_dim = x_exp.shape[1] x_mean = x.mean(axis=0) x = x - x_mean nk = ii.shape[1] y = numpy.zeros((len(y_exp), x_dim)) y_exp_app = numpy.zeros((len(y_exp), x_exp_dim)) x_ind = x[ii] x_exp_ind = x_exp[ii] y_expit = numpy.zeros((x_exp_dim + 1, 1)) k = 1.0e10 # make larger to force sum closer to one?! y_expit[x_exp_dim, 0] = 0.0 * 1.0 * k x_expit = numpy.zeros((x_exp_dim + 1, nk)) x_expit[x_exp_dim, :] = 1.0 * k # zero_threshold = -40.0500 # -0.004 max_zero_weights = nk // 5 w_0 = numpy.ones((nk, 1)) * 1.0 / nk # print "w_0", w_0 for i in range(len(y_exp)): negative_weights = 0 iterate = True # print "Iteration: ", i, x_expit[0:x_exp_dim, :] = x_exp_ind[i].T y_0 = numpy.dot(x_exp_ind[i].T, w_0) fixing_zero_threshold = zero_threshold * 500 while iterate: # print x_exp_ind[i].T.shape # print x_expit[0:x_exp_dim,:].shape x_pinv = numpy.linalg.pinv(x_expit) # print y_0.shape, y_exp[i].shape y_expit[0:x_exp_dim, 0] = y_exp[i] - y_0.flatten() w_i = numpy.dot(x_pinv, y_expit) + w_0 iterate = False if (w_i < zero_threshold).any(): # print "w_i[:,0] =", w_i[:,0] # print "x_expit = ", x_expit negative_weights += (w_i < fixing_zero_threshold).sum() negative_elements = numpy.arange(nk)[w_i[:, 0] < fixing_zero_threshold] numpy.random.shuffle(negative_elements) for nn in negative_elements: # print "nn=", nn x_expit[0:x_exp_dim + 1, nn] = 0.0 # print "negative_elements", negative_elements iterate = True fixing_zero_threshold /= 2 if negative_weights >= max_zero_weights: iterate = False # FORCE SUM WEIGHTS=1: # print "w_i[:,0] =", w_i[:,0] # print "weight sum=",w_i.sum(),"min_weight=",w_i.min(),"max_weight=",w_i.max(), # "negative weights=", negative_weights w_i /= w_i.sum() # print "y[i].shape", y[i].shape # print "as.shape", numpy.dot(x_ind[i].T, w_i).T.shape y[i] = numpy.dot(x_ind[i].T, w_i).T + x_mean # numpy.dot(w_i, x_ind[i]).T y_exp_app[i] = numpy.dot(x_exp_ind[i].T, w_i).T if w_i.min() < zero_threshold: # 0.1: #negative_weights >= max_zero_weights: # quit()max_zero_weights print ("Warning smallest weight is", w_i.min(), "thus replacing with simple average") # print "Warning, at least %d all weights turned out to be negative! (%d)"%(max_zero_weights, # negative_weights) # print x_ind[i] # print x_ind[i].shape y[i] = x_ind[i].mean(axis=0) print (".", end="") # print "Error for y_exp is:", ((y_exp_app - y_exp)**2).sum(axis=1).mean() # print "y=",y return y # x[ii].mean(axis=1) elif operation == "plainKNN": ii = n.execute(y_exp) ret = x[ii] return ret else: er = "operation unknown:", operation raise Exception(er) def approximate_kNN(x, x_exp, y_exp, k=1, ignore_closest_match=False, label_avg=True): n = mdp.nodes.KNNClassifier(k=k, execute_method="label") n.train(x_exp, range(len(x_exp))) if label_avg: n.stop_training() ii = n.klabels(y_exp) if ignore_closest_match and k == 1: ex = "Error, k==1 but ignoring closest match!" raise Exception(ex) elif ignore_closest_match: ii = ii[:, 1:] y = x[ii].mean(axis=1) return y # x[ii].mean(axis=1) else: ii = n.execute(y_exp) ret = x[ii] return ret def rank_expanded_signals_max_linearly(x, x_exp, y, y_exp, max_comp=10, max_num_samples_for_ev=None, max_test_samples_for_ev=None, verbose=False): """ Third ranking method. More robust and closer to max EV(x; y_i + Y)-EV(x;Y) for all Y, EV computed linearly. Ordering and scoring of signals respects principle of best incremental feature selection Computes a scores vector that measures the importance of each expanded component at reconstructing a signal x, x_exp are training data, y and y_exp are test data At most max_comp are evaluated exhaustively, the rest is set equal to the remaining """ dim_out = x_exp.shape[1] all_indices = numpy.arange(dim_out) indices_all_x = numpy.arange(x.shape[0]) indices_all_y = numpy.arange(y.shape[0]) max_scores = numpy.zeros(dim_out) available_mask = numpy.zeros(dim_out) >= 0 # boolean mask that indicates which elements are not yet scored taken = [] # list with the same elements. # Compute maximum explainable variance (taking all components) total_variance = data_variance(y) last_explained_var = 0.0 last_score = 0.0 for iteration in range(min(max_comp, dim_out)): # find individual contribution to expl var, from not taken indices_available = all_indices[available_mask] # mapping from index_short to index_long temp_explained_vars = numpy.zeros( dim_out - iteration) # s_like(indices_available, dtype=") #explained variances for each available index # On each iteration, the subset of samples used for testing and samples for reconstruction are kept fixed if max_num_samples_for_ev is not None and max_num_samples_for_ev < x.shape[0]: indices_all_x_selection = indices_all_x + 0 numpy.random.shuffle(indices_all_x_selection) indices_all_x_selection = indices_all_x_selection[0:max_num_samples_for_ev] x_sel = x[indices_all_x_selection] x_exp_sel = x_exp[indices_all_x_selection] else: x_sel = x x_exp_sel = x_exp if max_test_samples_for_ev is not None and max_test_samples_for_ev < x.shape[0]: indices_all_y_selection = indices_all_y + 0 numpy.random.shuffle(indices_all_y_selection) indices_all_y_selection = indices_all_y_selection[0:max_test_samples_for_ev] y_sel = y[indices_all_y_selection] y_exp_sel = y_exp[indices_all_y_selection] else: y_sel = y y_exp_sel = y_exp if verbose: print ("indices available=", indices_available) for index_short, index_long in enumerate(indices_available): taken_tmp = list(taken) # Copy the taken list taken_tmp.append(index_long) # Add index_long to it x_exp_tmp_sel = x_exp_sel[:, taken_tmp] # Select the variables y_exp_tmp_sel = y_exp_sel[:, taken_tmp] y_app_sel = approximate_linearly(x_sel, x_exp_tmp_sel, y_exp_tmp_sel) # print "QQQ=", compute_explained_var(y_sel, y_app_sel) temp_explained_vars[index_short] = compute_explained_var(y_sel, y_app_sel) # compute explained var if verbose: print ("taken_tmp=", taken_tmp, "temp_explained_vars[%d (long = %d) ]=%f" % (index_short, index_long, temp_explained_vars[index_short])) # Update scores max_scores[indices_available] = numpy.maximum(max_scores[indices_available], temp_explained_vars - last_explained_var) # select maximum # print "temp_explained_vars=", temp_explained_vars max_explained_var_index_short = temp_explained_vars.argmax() # print "max_explained_var_index_short=", max_explained_var_index_short # print "indices_available=",indices_available max_explained_var_index_long = indices_available[max_explained_var_index_short] if verbose: print ("Selecting index short:", max_explained_var_index_short, end="") print (" and index_ long:", max_explained_var_index_long) # mark as taken and update temporal variables taken.append(max_explained_var_index_long) available_mask[max_explained_var_index_long] = False # last_score = scores[max_explained_var_index_long] last_explained_var = temp_explained_vars[max_explained_var_index_short] print ("brute max_scores = ", max_scores) print ("brute taken = ", taken) # Find ordering of variables not yet taken if max_comp < dim_out: max_explained_var_indices_short = temp_explained_vars.argsort()[::-1][1:] # In increasing order, then remove first element, which was already added to taken for max_explained_var_index_short in max_explained_var_indices_short: taken.append(indices_available[max_explained_var_index_short]) print ("final taken = ", taken) # Make scoring decreasing in ordering stored in taken last_explained_var = max(last_explained_var, 0.01) # For numerical reasons last_max_score = -numpy.inf sum_max_scores = 0.0 for i, long_index in enumerate(taken): current_max_score = max_scores[long_index] sum_max_scores += current_max_score if current_max_score > last_max_score and i > 0: max_scores[long_index] = last_max_score tmp_sum_max_scores = max_scores[taken[0:i + 1]].sum() max_scores[taken[0:i + 1]] += (sum_max_scores - tmp_sum_max_scores) / (i + 1) last_max_score = max_scores[long_index] # print "iteration max_scores = ", max_scores print ("preeliminar max_scores = ", max_scores) # max_scores *= (last_explained_var / max_scores.sum())**0.5 # NOTE: last_explained_var is not the data variance. # Here it is the variance up to max_comp components # 3 options: all features, first max_comp features, output_dim features max_scores *= (last_explained_var / max_scores.sum()) ** 0.5 print ("final max_scores = ", max_scores) if (max_scores == 0.0).any(): print ("WARNING, removing 0.0 max_scores!") max_score_min = (max_scores[max_scores > 0.0]).min() # TODO:Find reasonable way to fix this, is this causing the distorted reconstructions??? max_scores += max_score_min * 0.001 # max_scores += (max_scores[max_scores>0.0]) return max_scores def rank_expanded_signals_max(x, x_exp, y, y_exp, max_comp=10, k=1, operation="average", max_num_samples_for_ev=None, max_test_samples_for_ev=None, offsetting_mode="max_comp features", verbose=False): """ This Second ranking method more robust and closer to max I(x; y_i + Y)-I(x;Y) for all Y. Ordering and scoring of signals respects principle of best incremental feature selection Computes a scores vector that measures the importance of each expanded component at reconstructing a signal x, x_exp are training data, y and y_exp are test data At most max_comp are evaluated exhaustively, the rest is set equal to the remaining """ dim_out = x_exp.shape[1] all_indices = numpy.arange(dim_out) indices_all_x = numpy.arange(x.shape[0]) indices_all_y = numpy.arange(y.shape[0]) max_scores = numpy.zeros(dim_out) available_mask = numpy.zeros(dim_out) >= 0 # boolean mask that indicates which elements are not yet scored taken = [] # list with the same elements. # Compute maximum explainable variance (taking all components) total_variance = data_variance(y) last_explained_var = 0.0 last_score = 0.0 for iteration in range(min(max_comp, dim_out)): # find individual contribution to expl var, from not taken indices_available = all_indices[available_mask] # mapping from index_short to index_long temp_explained_vars = numpy.zeros( dim_out - iteration) # s_like(indices_available, dtype=") #explained variances for each available index # On each iteration, the subset of samples used for testing and samples for reconstruction are kept fixed if max_num_samples_for_ev is not None and max_num_samples_for_ev < x.shape[0]: indices_all_x_selection = indices_all_x + 0 numpy.random.shuffle(indices_all_x_selection) indices_all_x_selection = indices_all_x_selection[0:max_num_samples_for_ev] x_sel = x[indices_all_x_selection] x_exp_sel = x_exp[indices_all_x_selection] else: x_sel = x x_exp_sel = x_exp if max_test_samples_for_ev is notNone and max_test_samples_for_ev < x.shape[0]: indices_all_y_selection = indices_all_y + 0 numpy.random.shuffle(indices_all_y_selection) indices_all_y_selection = indices_all_y_selection[0:max_test_samples_for_ev] y_sel = y[indices_all_y_selection] y_exp_sel = y_exp[indices_all_y_selection] else: y_sel = y y_exp_sel = y_exp if verbose: print ("indices available=", indices_available) for index_short, index_long in enumerate(indices_available): taken_tmp = list(taken) # Copy the taken list taken_tmp.append(index_long) # Add index_long to it x_exp_tmp_sel = x_exp_sel[:, taken_tmp] # Select the variables y_exp_tmp_sel = y_exp_sel[:, taken_tmp] if operation == "linear_rec": y_app_sel = approximate_linearly(x_sel, x_exp_tmp_sel, y_exp_tmp_sel) else: y_app_sel = approximate_kNN_op(x_sel, x_exp_tmp_sel, y_exp_tmp_sel, k=k, ignore_closest_match=True, operation=operation) # invert from taken variables # print "QQQ=", compute_explained_var(y_sel, y_app_sel) temp_explained_vars[index_short] = compute_explained_var(y_sel, y_app_sel) # compute explained var if verbose: print ("taken_tmp=", taken_tmp, "temp_explained_vars[%d (long = %d) ]=%f" % ( index_short, index_long, temp_explained_vars[index_short])) # Update scores max_scores[indices_available] = numpy.maximum(max_scores[indices_available], temp_explained_vars - last_explained_var) # select maximum # print "temp_explained_vars=", temp_explained_vars max_explained_var_index_short = temp_explained_vars.argmax() # print "max_explained_var_index_short=", max_explained_var_index_short # print "indices_available=",indices_available max_explained_var_index_long = indices_available[max_explained_var_index_short] if verbose: print("Selecting index short:", max_explained_var_index_short, " and index_ long:", max_explained_var_index_long) # mark as taken and update temporal variables taken.append(max_explained_var_index_long) available_mask[max_explained_var_index_long] = False # last_score = scores[max_explained_var_index_long] last_explained_var = temp_explained_vars[max_explained_var_index_short] print("brute max_scores = ", max_scores) print("brute taken = ", taken) # Find ordering of variables not yet taken if max_comp < dim_out: max_explained_var_indices_short = \ temp_explained_vars.argsort()[::-1][1:] # In increasing order, then remove first element, which was already added to taken for max_explained_var_index_short in max_explained_var_indices_short: taken.append(indices_available[max_explained_var_index_short]) print("final taken = ", taken) # Make scoring decreasing in ordering stored in taken last_explained_var = max(last_explained_var, 0.01) # For numerical reasons last_max_score = -numpy.inf sum_max_scores = 0.0 for i, long_index in enumerate(taken): current_max_score = max_scores[long_index] sum_max_scores += current_max_score if current_max_score > last_max_score and i > 0: max_scores[long_index] = last_max_score tmp_sum_max_scores = max_scores[taken[0:i + 1]].sum() max_scores[taken[0:i + 1]] += (sum_max_scores - tmp_sum_max_scores) / (i + 1) last_max_score = max_scores[long_index] # print "iteration max_scores = ", max_scores print("preeliminar max_scores = ", max_scores) # Compute explained variance with all features indices_all_x_selection = random_subindices(x.shape[0], max_num_samples_for_ev) x_sel = x[indices_all_x_selection] x_exp_sel = x_exp[indices_all_x_selection] indices_all_y_selection = random_subindices(y.shape[0], max_test_samples_for_ev) y_sel = y[indices_all_y_selection] y_exp_sel = y_exp[indices_all_y_selection] if operation == "linear_rec": y_app_sel = approximate_linearly(x_sel, x_exp_sel, y_exp_sel) else: y_app_sel = approximate_kNN_op(x_sel, x_exp_sel, y_exp_sel, k=k, ignore_closest_match=True, operation=operation) # invert from taken variables explained_var_all_feats = compute_explained_var(y_sel, y_app_sel) print("last_explained_var =", last_explained_var) print("explained_var_all_feats=", explained_var_all_feats, "total input variance:", total_variance) # max_scores *= (last_explained_var / max_scores.sum())**0.5 # NOTE: last_explained_var is not the data variance. It is the variance up to max_comp components # 3 options: all scores, max_comp scores, output_dim scores (usually all scores) if offsetting_mode == "max_comp features": max_scores *= (last_explained_var / max_scores.sum()) elif offsetting_mode == "all features": print("explained_var_all_feats=", explained_var_all_feats, "total input variance:", total_variance) max_scores *= (explained_var_all_feats / max_scores.sum()) elif offsetting_mode == "all features smart": max_scores *= (last_explained_var / max_scores.sum()) print("scaled max_scores=", max_scores) max_scores += (explained_var_all_feats - last_explained_var) / max_scores.shape[0] print("offsetted max_scores=", max_scores) elif offsetting_mode == "democratic": max_scores = numpy.ones_like(max_scores) * explained_var_all_feats / max_scores.shape[0] print("democractic max_scores=", max_scores) elif offsetting_mode == "linear": # Code fixed!!! max_scores = numpy.arange(dim_out, 0, -1) * explained_var_all_feats / (dim_out * (dim_out + 1) / 2) print("linear max_scores=", max_scores) elif offsetting_mode == "sensitivity_based": sens = sensivity_of_linearly_approximation(x_sel, x_exp_sel) max_scores = sens * explained_var_all_feats / sens.sum() print("sensitivity_based max_scores=", max_scores) else: ex = "offsetting_mode unknown", offsetting_mode raise Exception(ex) print("final max_scores = ", max_scores) if (max_scores == 0.0).any(): print("WARNING, removing 0.0 max_scores!") max_score_min = (max_scores[max_scores > 0.0]).min() max_scores += max_score_min * 0.001 # TODO:Find reasonable way to fix this, is this causing the distorted reconstructions??? # max_scores += (max_scores[max_scores>0.0]) return max_scores # TODO: Improve: if max_comp < output_dim choose remaining features from the last evaluation of explained variances. def rank_expanded_signals(x, x_exp, y, y_exp, max_comp=10, k=1, linear=False, max_num_samples_for_ev=None, max_test_samples_for_ev=None, verbose=False): """ Computes a scores vector that measures the importance of each expanded component at reconstructing a signal x, x_exp are training data, y and y_exp are test data At most max_comp are evaluated exhaustively, the rest is set equal to the remaining """ dim_out = x_exp.shape[1] all_indices = numpy.arange(dim_out) indices_all_x = numpy.arange(x.shape[0]) indices_all_y = numpy.arange(y.shape[0]) scores = numpy.zeros(dim_out) available_mask = numpy.zeros(dim_out) >= 0 # boolean mask that indicates which elements are not yet scored taken = [] # list with the same elements. # Compute maximum explainable variance (taking all components) total_variance = data_variance(y) last_explained_var = 0.0 last_score = 0.0 for iteration in range(min(max_comp, dim_out)): # find individual contribution to expl var, from not taken indices_available = all_indices[available_mask] # mapping from index_short to index_long temp_explained_vars = numpy.zeros( dim_out - iteration) # s_like(indices_available, dtype=") #explained variances for each available index # On each iteration, the subset of samples used for testing and samples for reconstruction are kept fixed if max_num_samples_for_ev is not None and max_num_samples_for_ev < x.shape[0]: indices_all_x_selection = indices_all_x + 0 numpy.random.shuffle(indices_all_x_selection) indices_all_x_selection = indices_all_x_selection[0:max_num_samples_for_ev] x_sel = x[indices_all_x_selection] x_exp_sel = x_exp[indices_all_x_selection] else: x_sel = x x_exp_sel = x_exp if max_test_samples_for_ev is not None and max_test_samples_for_ev < x.shape[0]: indices_all_y_selection = indices_all_y + 0 numpy.random.shuffle(indices_all_y_selection) indices_all_y_selection = indices_all_y_selection[0:max_test_samples_for_ev] y_sel = y[indices_all_y_selection] y_exp_sel = y_exp[indices_all_y_selection] else: y_sel = y y_exp_sel = y_exp if verbose: print("indices available=", indices_available) for index_short, index_long in enumerate(indices_available): taken_tmp = list(taken) # Copy the taken list taken_tmp.append(index_long) # Add index_long to it x_exp_tmp_sel = x_exp_sel[:, taken_tmp] # Select the variables y_exp_tmp_sel = y_exp_sel[:, taken_tmp] y_app_sel = approximate_kNN(x_sel, x_exp_tmp_sel, y_exp_tmp_sel, k=k, ignore_closest_match=True, label_avg=True) # invert from taken variables # print "QQQ=", compute_explained_var(y_sel, y_app_sel) temp_explained_vars[index_short] = compute_explained_var(y_sel, y_app_sel) # compute explained var if verbose: print("taken_tmp=", taken_tmp, "temp_explained_vars[%d (long = %d) ]=%f" % ( index_short, index_long, temp_explained_vars[index_short])) # select maximum # print "temp_explained_vars=", temp_explained_vars max_explained_var_index_short = temp_explained_vars.argmax() # print "max_explained_var_index_short=", max_explained_var_index_short # print "indices_available=",indices_available max_explained_var_index_long = indices_available[max_explained_var_index_short] if verbose: print("Selecting index short:", max_explained_var_index_short) print(" and index_ long:", max_explained_var_index_long) # update total explained var & scores # Add logic to robustly handle strange contributions: 3, 2, 1, 4 => 5, 2.5, 1.25, 1.25 ? # TODO:FIX NORMALIZATION WHEN FIRST SCORES ARE ZERO OR NEGATIVE! # TODO:NORMALIZATION SHOULD BE OPTIONAL, SINCE IT WEAKENS THE INTERPRETATION OF THE SCORES explained_var = max(temp_explained_vars[max_explained_var_index_short], 0.0) new_score = explained_var - last_explained_var if verbose: print("new_score raw = ", new_score) new_score = max(new_score, 0.0) if new_score > last_score and iteration > 0: new_score = last_score # Here some options are available to favour components taken first scores[max_explained_var_index_long] = new_score if verbose: print("tmp scores = ", scores) # normalize scores, so that they sume up to explained_var sum_scores = scores.sum() residual = max(explained_var, 0.0) - sum_scores if residual > 0.0: correction = residual / (iteration + 1) scores[taken] += correction scores[max_explained_var_index_long] += correction # scores = scores * explained_var / (sum_scores+1e-6) #TODO:CORRECT THIS; INSTEAD OF FACTOR USE ADDITIVE TERM if verbose: print("normalized scores = ", scores, "sum to:", scores.sum(), "explained_var =", explained_var) # mark as taken and update temporal variables taken.append(max_explained_var_index_long) available_mask[max_explained_var_index_long] = False last_score = scores[max_explained_var_index_long] last_explained_var = explained_var # handle variables not used, assign equal scores to all of them preserve_last_evaluation = True if preserve_last_evaluation and max_comp < dim_out: # The score of the last feature found will be modified, as well as of not yet found features # TODO: Take care of negative values if last_score <= 0.0: last_score = 0.01 # Just some value is needed here remaining_output_features = len(temp_explained_vars) # including feature already processed remaining_ordered_explained_variances_short_index = numpy.argsort(temp_explained_vars)[::-1] remaining_ordered_explained_variances_long_index = indices_available[ remaining_ordered_explained_variances_short_index] remaining_ordered_explained_variances = temp_explained_vars[ remaining_ordered_explained_variances_short_index] + 0.0 remaining_total_contribution = last_score print("last_score=", last_score) beta = 0.95 remaining_ordered_explained_variances[ remaining_ordered_explained_variances <= 0.0] = 0.0001 # To avoid division over zero, numerical hack # numpy.clip(remaining_ordered_explained_variances, 0.0, None) fails here!!!! print("remaining_ordered_explained_variances=", remaining_ordered_explained_variances) minimum = remaining_ordered_explained_variances.min() # first element ev_sum = remaining_ordered_explained_variances.sum() normalized_scores = (remaining_total_contribution / (ev_sum - remaining_output_features * minimum) * beta) * \ (remaining_ordered_explained_variances - minimum) + \ ((1.0 - beta) / remaining_output_features) * remaining_total_contribution print("normalized_scores=", normalized_scores) print("remaining_ordered_explained_variances_long_index=", remaining_ordered_explained_variances_long_index) print(scores.dtype) print(normalized_scores.dtype) scores[remaining_ordered_explained_variances_long_index] = normalized_scores else: # rest_explained_variance = total_variance-last_explained_var sum_scores = scores.sum() rest_explained_variance = total_variance - sum_scores if verbose: print("rest_explained_variance=", rest_explained_variance) correction = rest_explained_variance / dim_out scores += correction if (scores == 0.0).any(): print("WARNING, removing 0.0 scores!") scores += 0.0001 # num_unused = dim_out - max_comp # scores[available_mask] = min(rest_explained_variance / num_unused, last_score) # sum_scores = scores.sum() # scores = scores * explained_var / (sum_scores+1e-6) if verbose: print("final scores: ", scores) if verbose and linear and False: for i in indices_available: taken.append(i) scores[taken] = numpy.arange(dim_out - 1, -1, -1) # **2 #WARNING!!! QUADRATIC SCORES!!! scores = scores * total_variance / scores.sum() print("Overriding with linear scores:", scores) return scores # TODO: Remove this node, it is now obsolete class IEVMNode(mdp.Node): """ Node implementing simple Incremental Explained Variance Maximization. Extracted features are moderately useful for reconstruction, although this node does itself provide reconstruction. The expansion function is optional, as well as performing PCA on the scores. The added variance of the first k-outputs is equal to the explained variance of such k-outputs. """ def __init__(self, input_dim=None, output_dim=None, expansion_funcs=None, k=5, max_comp=None, max_num_samples_for_ev=None, max_test_samples_for_ev=None, use_pca=False, use_sfa=False, max_preserved_sfa=2.0, second_weighting=False, operation="average", out_sfa_filter=False, **argv): super(IEVMNode, self).__init__(input_dim=input_dim, output_dim=output_dim, **argv) if expansion_funcs is not None: self.exp_node = GeneralExpansionNode(funcs=expansion_funcs) else: self.exp_node = None self.sfa_node = None self.second_weighting = second_weighting self.use_pca = use_pca self.use_sfa = use_sfa if use_sfa and not use_pca: er = "Combination of use_sfa and use_pca not considered. Please activate use_pca or deactivate use_sfa" raise Exception(er) self.k = k self.max_comp = max_comp self.max_num_samples_for_ev = max_num_samples_for_ev self.max_test_samples_for_ev = max_test_samples_for_ev self.feature_scaling_factor = 0.5 # Factor that prevents amplitudes of features from growing across the network self.exponent_variance = 0.5 self.operation = operation self.max_preserved_sfa = max_preserved_sfa self.out_sfa_filter = out_sfa_filter @staticmethod def is_trainable(): return True def _train(self, x, block_size=None, train_mode=None, node_weights=None, edge_weights=None, scheduler=None, n_parallel=None, **argv): num_samples, self.input_dim = x.shape if self.output_dim is None: self.output_dim = self.input_dim if self.max_comp is None: self.max_comp = min(self.input_dim, self.output_dim) else: self.max_comp = min(self.max_comp, self.input_dim, self.output_dim) print("Training IEVMNode...") self.x_mean = x.mean(axis=0) # Remove mean before expansion x = x - self.x_mean if self.exp_node is not None: # Expand data print("expanding x...") exp_x = self.exp_node.execute(x) else: exp_x = x self.expanded_dim = exp_x.shape[1] self.exp_x_mean = exp_x.mean(axis=0) self.exp_x_std = exp_x.std(axis=0) print("self.exp_x_mean=", self.exp_x_mean) print("self.exp_x_std=", self.exp_x_std) if (self.exp_x_std == 0).any(): er = "zero-component detected" raise Exception(er) n_exp_x = (exp_x - self.exp_x_mean) / self.exp_x_std # Remove media and variance from expansion print("ranking n_exp_x ...") rankings = rank_expanded_signals_max(x, n_exp_x, x, n_exp_x, max_comp=self.max_comp, k=self.k, operation=self.operation, max_num_samples_for_ev=self.max_num_samples_for_ev, max_test_samples_for_ev=self.max_test_samples_for_ev, verbose=True) rankings *= self.feature_scaling_factor print("rankings=", rankings) if (rankings == 0).any(): er = "zero-component detected" raise Exception(er) self.perm1 = numpy.argsort(rankings)[::-1] # Sort in decreasing ranking self.magn1 = rankings print("self.perm1=", self.perm1) s_x_1 = n_exp_x * self.magn1 ** self.exponent_variance # Scale according to ranking s_x_1 = s_x_1[:, self.perm1] # Permute with most important signal first if self.second_weighting: print("ranking s_x_1 ...") rankings_B = rank_expanded_signals_max(x, s_x_1, x, s_x_1, max_comp=self.max_comp, k=self.k, operation=self.operation, max_num_samples_for_ev=self.max_num_samples_for_ev, max_test_samples_for_ev=self.max_test_samples_for_ev, verbose=False) print("rankings_B=", rankings_B) if (rankings_B == 0).any(): er = "zero-component detected" raise Exception(er) self.perm1_B = numpy.argsort(rankings_B)[::-1] # Sort in decreasing ranking self.magn1_B = rankings_B print("self.perm1_B=", self.perm1_B) # WARNING, this only works for normalized s_x_1 s_x_1B = s_x_1 * self.magn1_B ** self.exponent_variance # Scale according to ranking s_x_1B = s_x_1B[:, self.perm1_B] # Permute with most important signal first else: s_x_1B = s_x_1 if self.use_sfa: self.sfa_node = mdp.nodes.SFANode() # TODO: Preserve amplitude self.sfa_node.train(s_x_1B, block_size=block_size, train_mode=train_mode) # , node_weights=None, edge_weights=None, scheduler = None, n_parallel=None) self.sfa_node.stop_training() print("self.sfa_node.d", self.sfa_node.d) # Adaptive mechanism based on delta values if isinstance(self.max_preserved_sfa, float): self.num_sfa_features_preserved = (self.sfa_node.d <= self.max_preserved_sfa).sum() elif isinstance(self.max_preserved_sfa, int): self.num_sfa_features_preserved = self.max_preserved_sfa else: ex = "Cannot handle type of self.max_preserved_sfa" print(ex) raise Exception(ex) # self.num_sfa_features_preserved = 10 sfa_x = self.sfa_node.execute(s_x_1B) # TODO: Change internal variables of SFANode, so that we do not need to zero some components # TODO: Is this equivalent to truncation of the matrices??? PERHAPS IT IS NOT !!! sfa_x[:, self.num_sfa_features_preserved:] = 0.0 proj_sfa_x = self.sfa_node.inverse(sfa_x) sfa_x = sfa_x[:, 0:self.num_sfa_features_preserved] # Notice that sfa_x has WEIGHTED zero-mean, thus we correct this here? self.sfa_x_mean = sfa_x.mean(axis=0) self.sfa_x_std = sfa_x.std(axis=0) print("self.sfa_x_mean=", self.sfa_x_mean) print("self.sfa_x_std=", self.sfa_x_std) sfa_x -= self.sfa_x_mean sfa_removed_x = s_x_1B - proj_sfa_x # Remove sfa projection of data else: self.num_sfa_features_preserved = 0 sfa_x = numpy.ones((num_samples, 0)) sfa_removed_x = s_x_1B pca_out_dim = self.expanded_dim - self.num_sfa_features_preserved if self.use_pca and pca_out_dim > 0: self.pca_node = mdp.nodes.PCANode(output_dim=pca_out_dim) self.pca_node.train(sfa_removed_x) # TODO:check that pca_out_dim > 0 pca_x = self.pca_node.execute(sfa_removed_x) self.pca_x_mean = pca_x.mean(axis=0) self.pca_x_std = pca_x.std(axis=0) print("self.pca_x_std=", self.pca_x_std) if (self.pca_x_std == 0).any(): er = "zero-component detected" raise Exception(er) # TODO: Is this step needed? if heuristic works well this weakens algorithm n_pca_x = (pca_x - self.pca_x_mean) / self.pca_x_std else: n_pca_x = sfa_removed_x[:, 0:pca_out_dim] # Concatenate SFA and PCA signals and rank them preserving SFA components in ordering if self.use_pca or self.use_sfa: # TODO: Either both signals conserve magnitudes or they are both normalized sfa_pca_x = numpy.concatenate((sfa_x, n_pca_x), axis=1) sfa_pca_rankings = rank_expanded_signals_max(x, sfa_pca_x, x, sfa_pca_x, max_comp=self.max_comp, k=self.k, operation=self.operation, max_num_samples_for_ev=self.max_num_samples_for_ev, max_test_samples_for_ev=self.max_test_samples_for_ev, verbose=False) sfa_pca_rankings *= self.feature_scaling_factor # Only one magnitude normalization by node, but where should it be done? I guess after last transformation print("sfa_pca_rankings=", sfa_pca_rankings) if (sfa_pca_rankings == 0).any(): er = "zero-component detected" raise Exception(er) self.magn2 = sfa_pca_rankings perm2a = numpy.arange(self.num_sfa_features_preserved, dtype="int") perm2b = numpy.argsort(sfa_pca_rankings[self.num_sfa_features_preserved:])[::-1] self.perm2 = numpy.concatenate((perm2a, perm2b + self.num_sfa_features_preserved)) print("second permutation=", self.perm2) # WARNING, this only works for normalized sfa_pca_x s_x_2 = sfa_pca_x * self.magn2 ** self.exponent_variance # Scale according to ranking s_x_2 = s_x_2[:, self.perm2] # Permute with slow features first, and then most important signal first else: s_x_2 = n_pca_x # Tuncating output_dim components s_x_2_truncated = s_x_2[:, 0:self.output_dim] # Filtering output through SFA if self.out_sfa_filter: self.out_sfa_node = mdp.nodes.SFANode() self.out_sfa_node.train(s_x_2_truncated, block_size=block_size, train_mode=train_mode) self.out_sfa_node.stop_training() sfa_filtered = self.out_sfa_node.execute(s_x_2_truncated) else: sfa_filtered = s_x_2_truncated self.stop_training() # def __init__(self, funcs, input_dim = None, dtype = None, \ # use_pseudoinverse=True, use_hint=False, max_steady_factor=1.5, \ # delta_factor=0.6, min_delta=0.00001): # # # # self.sfa_node.train(x, **argv) def _is_invertible(self): return True def _execute(self, x): x_orig = x + 0.0 num_samples = x.shape[0] zm_x = x - self.x_mean if self.exp_node: exp_x = self.exp_node.execute(zm_x) else: exp_x = zm_x n_exp_x = (exp_x - self.exp_x_mean) / self.exp_x_std if numpy.isnan(n_exp_x).any() or numpy.isinf(n_exp_x).any(): print("n_exp_x=", n_exp_x) quit() n_exp_x[numpy.isnan(n_exp_x)] = 0.0 if numpy.isnan(self.magn1).any(): print("self.magn1=", self.magn1) quit() s_x_1 = n_exp_x * self.magn1 ** self.exponent_variance # Scale according to ranking s_x_1 = s_x_1[:, self.perm1] # Permute with most important signal first if self.second_weighting: s_x_1B = s_x_1 * self.magn1_B ** self.exponent_variance # Scale according to ranking_B s_x_1B = s_x_1B[:, self.perm1_B] # Permute with most important signal first else: s_x_1B = s_x_1 if numpy.isnan(s_x_1B).any(): print("s_x_1B=", s_x_1B) quit() if self.use_sfa: sfa_x = self.sfa_node.execute(s_x_1B) # TODO: Change internal variables of SFANode, so that we do not need to zero some components sfa_x[:, self.num_sfa_features_preserved:] = 0.0 proj_sfa_x = self.sfa_node.inverse(sfa_x) sfa_x = sfa_x[:, 0:self.num_sfa_features_preserved] sfa_x -= self.sfa_x_mean sfa_removed_x = s_x_1B - proj_sfa_x else: sfa_x = numpy.ones((num_samples, 0)) sfa_removed_x = s_x_1B pca_out_dim = self.expanded_dim - self.num_sfa_features_preserved if self.use_pca and pca_out_dim > 0: pca_x = self.pca_node.execute(sfa_removed_x) n_pca_x = (pca_x - self.pca_x_mean) / self.pca_x_std else: n_pca_x = sfa_removed_x[:, 0:pca_out_dim] if self.use_pca or self.use_sfa: sfa_pca_x = numpy.concatenate((sfa_x, n_pca_x), axis=1) s_x_2 = sfa_pca_x * self.magn2 ** self.exponent_variance # Scale according to ranking s_x_2 = s_x_2[:, self.perm2] # Permute with most important signal first else: s_x_2 = n_pca_x if numpy.isnan(s_x_2).any(): print("s_x_2=", s_x_2) quit() # Tuncating output_dim components s_x_2_truncated = s_x_2[:, 0:self.output_dim] # Filtering output through SFA if self.out_sfa_filter: sfa_filtered = self.out_sfa_node.execute(s_x_2_truncated) else: sfa_filtered = s_x_2_truncated verbose = False if verbose: print("x[0]=", x_orig[0]) print("x_zm[0]=", x[0]) print("exp_x[0]=", exp_x[0]) print("s_x_1[0]=", s_x_1[0]) print("sfa_removed_x[0]=", sfa_removed_x[0]) print("proj_sfa_x[0]=", proj_sfa_x[0]) print("pca_x[0]=", pca_x[0]) print("n_pca_x[0]=", n_pca_x[0]) print("sfa_x[0]=", sfa_x[0] + self.sfa_x_mean) print("s_x_2_truncated[0]=", s_x_2_truncated[0]) print("sfa_filtered[0]=", sfa_filtered[0]) return sfa_filtered # TODO:Code inverse with SFA def _inverse(self, y): num_samples = y.shape[0] if y.shape[1] != self.output_dim: er = "Serious dimensionality inconsistency:", y.shape[0], self.output_dim raise Exception(er) # input_dim = self.input_dim # De-Filtering output through SFA sfa_filtered = y if self.out_sfa_filter: s_x_2_truncated = self.out_sfa_node.inverse(sfa_filtered) else: s_x_2_truncated = sfa_filtered # De-Tuncating output_dim components s_x_2_full = numpy.zeros((num_samples, self.expanded_dim)) s_x_2_full[:, 0:self.output_dim] = s_x_2_truncated if self.use_pca or self.use_sfa: perm_2_inv = numpy.zeros(self.expanded_dim, dtype="int") # print "input_dim", input_dim # print "self.perm2", self.perm2 # print "len(self.perm2)", len(self.perm2) perm_2_inv[self.perm2] = numpy.arange(self.expanded_dim, dtype="int") # print perm_2_inv sfa_pca_x = s_x_2_full[:, perm_2_inv] sfa_pca_x /= self.magn2 ** self.exponent_variance sfa_x = sfa_pca_x[:, 0:self.num_sfa_features_preserved] n_pca_x = sfa_pca_x[:, self.num_sfa_features_preserved:] else: # sfa_x = ...? n_pca_x = s_x_2_full pca_out_dim = self.expanded_dim - self.num_sfa_features_preserved if self.use_pca and pca_out_dim > 0: pca_x = n_pca_x * self.pca_x_std + self.pca_x_mean sfa_removed_x = self.pca_node.inverse(pca_x) else: sfa_removed_x = n_pca_x if self.use_sfa: sfa_x += self.sfa_x_mean sfa_x_full = numpy.zeros((num_samples, self.expanded_dim)) sfa_x_full[:, 0:self.num_sfa_features_preserved] = sfa_x proj_sfa_x = self.sfa_node.inverse(sfa_x_full) s_x_1B = sfa_removed_x + proj_sfa_x else: s_x_1B = sfa_removed_x if self.second_weighting: perm_1B_inv = numpy.zeros(self.expanded_dim, dtype="int") perm_1B_inv[self.perm1_B] = numpy.arange(self.expanded_dim, dtype="int") s_x_1 = s_x_1B[:, perm_1B_inv] s_x_1 /= self.magn1_B ** self.exponent_variance else: s_x_1 = s_x_1B perm_1_inv = numpy.zeros(self.expanded_dim, dtype="int") perm_1_inv[self.perm1] = numpy.arange(self.expanded_dim, dtype="int") n_exp_x = s_x_1[:, perm_1_inv] n_exp_x /= self.magn1 ** self.exponent_variance exp_x = n_exp_x * self.exp_x_std + self.exp_x_mean if self.exp_node: zm_x = self.exp_node.inverse(exp_x) else: zm_x = exp_x x = zm_x + self.x_mean verbose = False if verbose: print("x[0]=", x[0]) print("zm_x[0]=", zm_x[0]) print("exp_x[0]=", exp_x[0]) print("s_x_1[0]=", s_x_1[0]) print("proj_sfa_x[0]=", proj_sfa_x[0]) print("sfa_removed_x[0]=", sfa_removed_x[0]) print("pca_x[0]=", pca_x[0]) print("n_pca_x[0]=", n_pca_x[0]) print("sfa_x[0]=", sfa_x[0]) return x def export_to_libsvm(labels_classes, features, filename): dim_features = features.shape[1] filehandle = open(filename, "wb") if len(features) != len(labels_classes): er = "number of labels_classes %d does not match number of samples %d!" % (len(labels_classes), len(features)) raise Exception(er) for i in range(len(features)): filehandle.write("%d" % labels_classes[i]) for j in range(dim_features): filehandle.write(" %d:%f" % (j + 1, features[i, j])) filehandle.write("\n") filehandle.close() def is_monotonic_increasing(x): prev = x[0] for curr in x[1:]: if curr <= prev: return False prev = curr return True def compute_average_labels_for_each_class(classes, labels): all_classes = numpy.unique(classes) avg_labels = numpy.zeros(len(all_classes)) for i, cl in enumerate(all_classes): avg_label = labels[classes == cl].mean() avg_labels[i] = avg_label return avg_labels def map_class_numbers_to_avg_label(all_classes, avg_labels, class_numbers): if not (is_monotonic_increasing(all_classes)): er = "Array of class numbers should be monotonically increasing:" + str(all_classes) raise Exception(er) if not (is_monotonic_increasing(avg_labels)): er = "SEVERE WARNING! Array of labels should be monotonically increasing:" + str(avg_labels) raise Exception(er) if len(all_classes) != len(avg_labels): er = "SEVERE WARNING! Array of classes should have the same length as the array of labels: %d vs. %d" % \ (len(all_classes), len(avg_labels)) raise Exception(er) indices = numpy.searchsorted(all_classes, class_numbers) return avg_labels[indices] def map_labels_to_class_number(all_classes, avg_labels, labels): if not (is_monotonic_increasing(all_classes)): er = "Array of class numbers should be monotonically increasing:", all_classes raise Exception(er) if not (is_monotonic_increasing(avg_labels)): er = "Array of labels should be monotonically increasing:", avg_labels raise Exception(er) if len(all_classes) != len(avg_labels): er = "Array of classes should have the same length as the array of labels:" + str(len(all_classes)) + \ " vs. " + str(len(avg_labels)) raise Exception(er) interval_midpoints = (avg_labels[1:] + avg_labels[:-1]) / 2.0 indices = numpy.searchsorted(interval_midpoints, labels) return all_classes[indices] def random_boolean_array(size): return numpy.random.randint(2, size=size) == 1 def generate_random_sigmoid_weights(input_dim, num_features): # scale_factor = 8.0 / numpy.sqrt(input_dim) scale_factor = 1.0 c = numpy.random.normal(loc=0.0, scale=scale_factor, size=(input_dim, num_features)) c2 = (numpy.abs(c) ** 1.5) # print "c2=", c2 # print "c2[0]=", c2[0] c = 4.0 * numpy.sign(c) * c2 / c2.max() # print "c=", c # print "c[0]=", c[0] l = numpy.random.normal(loc=0.0, scale=1.0, size=num_features) return c, l def extract_sigmoid_features(x, c1, l1, scale=1.0, offset=0.0, use_special_features=False): if x.shape[1] != c1.shape[0] or c1.shape[1] != len(l1): er = "Array dimensions mismatch: x.shape =" + str(x.shape) + ", c1.shape =" + str( c1.shape) + ", l1.shape=" + str(l1.shape) print(er) raise Exception(er) s = numpy.dot(x, c1) + l1 f = numpy.tanh(s) if use_special_features: # replace features with l1 = -1.0 to x^T * c1[i] # replace features with l1 = 0.8 to 0.8 expo(x^T * c1[i]) # print "f.shape=", f.shape # print "numpy.dot(x,c1[:,0]).shape=", numpy.dot(x,c1[:,0]).shape fixed = 0 for i in range(c1.shape[1]): if l1[i] == 0.8: f[:, i] = numpy.abs(numpy.dot(x, c1[:, i])) ** 0.8 fixed += 1 elif l1[i] == 1.0: # identity f[:, i] = numpy.dot(x, c1[:, i]) fixed += 1 print("Number of features adapted to either identity or 08Expo:", fixed) return f * scale + offset # sf_matrix has shape input_dim x output_dim def evaluate_coefficients(sf_matrix): # Exponentially decaying weights # weighting = numpy.e ** -numpy.arange(sf_matrix.shape[1]) weighting = 2.0 ** -numpy.arange(sf_matrix.shape[1]) weighted_relevances = numpy.abs(sf_matrix) * weighting relevances = weighted_relevances.sum(axis=1) return relevances class SFAAdaptiveNLNode(mdp.Node): """Node that implements SFA with an adaptive non-linearity. """ def __init__(self, input_dim=None, output_dim=None, pre_expansion_node_class=None, final_expanded_dim=None, initial_expansion_size=None, starting_point=None, expansion_size_decrement=None, expansion_size_increment=None, number_iterations=2, **argv): super(SFAAdaptiveNLNode, self).__init__(input_dim=input_dim, output_dim=output_dim, **argv) self.pre_expansion_node_class = pre_expansion_node_class self.pre_expansion_node = None self.final_expanded_dim = final_expanded_dim self.initial_expansion_size = initial_expansion_size self.starting_point = starting_point self.expansion_size_decrement = expansion_size_decrement self.expansion_size_increment = expansion_size_increment self.number_iterations = number_iterations self.sfa_node = None self.f1_mean = None self.f1_std = None @staticmethod def is_trainable(): return True # sfa_block_size, sfa_train_mode, etc. would be preferred # max_preserved_sfa=1.995 def _train(self, x, block_size=None, train_mode=None, node_weights=None, edge_weights=None, scheduler=None, n_parallel=None, **argv): self.input_dim = x.shape[1] if self.output_dim is None: self.output_dim = self.input_dim print("Training SFAAdaptiveNLNode...") print("block_size =", block_size, ", train_mode =", train_mode) print("x.shape=", x.shape, "self.starting_point=", self.starting_point) # TODO: Remove mean and normalize variance before expansion # self.x_mean = x.mean(axis=0) # x_zm=x-self.x_mean # TODO:Make this code more pretty (refactoring) if self.starting_point == "Identity": print("wrong1") c0 = numpy.identity(self.input_dim) l0 = numpy.ones(self.input_dim) * -1.0 # Code identity elif self.starting_point == "08Exp": print("good 1") c0 = numpy.concatenate((numpy.identity(self.input_dim), numpy.identity(self.input_dim)), axis=1) l0 = numpy.concatenate((numpy.ones(self.input_dim) * 1.0, numpy.ones(self.input_dim) * 0.8), axis=0) if self.starting_point == "Identity" or self.starting_point == "08Exp": print("good 2") remaining_feats = self.initial_expansion_size - c0.shape[1] print("remaining_feats =", remaining_feats) if remaining_feats < 0: er = "Error, features needed for identity or 08Exp exceeds number of features availabe" + \ "remaining_feats=%d < 0" % remaining_feats + \ ". self.initial_expansion_size=%d" % self.initial_expansion_size + \ "c0.shape[1]%d" % c0.shape[1] raise Exception(er) c2, l2 = generate_random_sigmoid_weights(self.input_dim, remaining_feats) c1 = numpy.concatenate((c0, c2), axis=1) l1 = numpy.concatenate((l0, l2), axis=0) else: print("wrong wrong") c1, l1 = generate_random_sigmoid_weights(self.input_dim, self.initial_expansion_size - self.expansion_size_increment) for num_iter in range(self.number_iterations): print("**************** Iteration %d of %d ********************" % (num_iter, self.number_iterations)) if num_iter > 0: # Only add additional features after first iteration cp, lp = generate_random_sigmoid_weights(self.input_dim, self.expansion_size_increment) c1 = numpy.append(c1, cp, axis=1) l1 = numpy.append(l1, lp, axis=0) # print "c1=", c1 # print "l1=", l1 f1 = extract_sigmoid_features(x, c1, l1, use_special_features=True) f1_mean = f1.mean(axis=0) f1 = f1 - f1_mean f1_std = f1.std(axis=0) f1 = f1 / f1_std # print "Initial features f1=", f1 print("f1.shape=", f1.shape) print("f1[0]=", f1[0]) print("f1[-1]=", f1[-1]) sfa_node = mdp.nodes.SFANode(output_dim=self.output_dim) sfa_node.train(f1, block_size=block_size, train_mode=train_mode, node_weights=node_weights, edge_weights=edge_weights, scheduler=scheduler, n_parallel=n_parallel) sfa_node.stop_training() print("self.sfa_node.d (full expanded) =", sfa_node.d) # Evaluate features based on sfa coefficient coeffs = evaluate_coefficients(sfa_node.sf) print("Scores of each feature from SFA coefficients:", coeffs) # find indices of best features. Largest scores first best_feat_indices = coeffs.argsort()[::-1] print("indices of best features:", best_feat_indices) # remove worst expansion_size_decrement features if num_iter < self.number_iterations - 1: # Except during last iteration best_feat_indices = best_feat_indices[:-self.expansion_size_decrement] c1 = c1[:, best_feat_indices] l1 = l1[best_feat_indices] # print "cc=", cc # print "ll=", ll if c1.shape[1] > self.final_expanded_dim: c1 = c1[:, :self.final_expanded_dim] l1 = l1[:self.final_expanded_dim] self.c1 = c1 self.l1 = l1 print("self.c1.shape=,", self.c1.shape, "self.l1.shape=,", self.l1.shape) print("Learning of non-linear features finished") f1 = extract_sigmoid_features(x, self.c1, self.l1, use_special_features=True) self.f1_mean = f1.mean(axis=0) f1 -= self.f1_mean self.f1_std = f1.std(axis=0) f1 /= self.f1_std self.sfa_node = mdp.nodes.SFANode(output_dim=self.output_dim) self.sfa_node.train(f1, block_size=block_size, train_mode=train_mode, node_weights=node_weights, edge_weights=edge_weights, scheduler=scheduler, n_parallel=n_parallel) self.sfa_node.stop_training() print("self.sfa_node.d (final features) =", self.sfa_node.d) # Evaluate features based on sfa coefficient coeffs = evaluate_coefficients(self.sfa_node.sf) print("evaluation of each features from SFA coefficients: ", coeffs) # find indices of best features. Largest scores first best_feat_indices = coeffs.argsort()[::-1] print("indices of best features:", best_feat_indices) print("f1.shape=", f1.shape) # Train linear regression node for a linear approximation to inversion self.lr_node = mdp.nodes.LinearRegressionNode() y = self.sfa_node.execute(f1) self.lr_node.train(y, x) self.lr_node.stop_training() x_app = self.lr_node.execute(y) ev_linear_inverse = compute_explained_var(x, x_app) / data_variance(x) print("EV_linear_inverse (train)=", ev_linear_inverse) self.stop_training() def _is_invertible(self): return True def _execute(self, x): num_samples = x.shape[0] f1 = extract_sigmoid_features(x, self.c1, self.l1, use_special_features=True) f1 -= self.f1_mean f1 /= self.f1_std return self.sfa_node.execute(f1) def _inverse(self, y, linear_inverse=True): x_app = self.lr_node.execute(y) return x_app # TODO:Finish this and correct it def indices_training_graph_split(num_samples, train_mode="regular", block_size=None, num_parts=1): if train_mode == "regular": indices = numpy.arange(num_samples) block_assignment = (indices * 1.0 * num_parts / num_samples).astype(int) numpy.random.shuffle(block_assignment) part_indices = [] for num_part in range(num_parts): part = indices[block_assignment == num_part] part_indices.append(part) elif train_mode in ["serial", "sequence"]: if isinstance(block_size, int): shuffled_indices = numpy.zeros(num_samples) for block in range(num_samples // block_size): shuffled_indices[block * block_size:(block + 1) * block_size] = \ (numpy.arange(block_size) * 1.0 * num_parts / block_size).astype(int) for block in range(num_samples // block_size): shuffled_indices = (numpy.arange(block_size) * 1.0 * num_parts / block_size).astype(int) numpy.random.shuffle(shuffled_indices[block * block_size:(block + 1) * block_size]) part_indices = [] for num_part in range(num_parts): part = indices[block_assignment == num_part] part_indices.append(part) else: er = "Inhomogeneous block sizes not supported for now" raise Exception(er) elif train_mode == "clustered": print("Mode unuported for now... FIX this!!!") # Cumulative score metric def cumulative_score(ground_truth, estimation, largest_error, integer_rounding=True): if len(ground_truth) != len(estimation): er = "ground_truth and estimation have different number of elements" raise Exception(er) if integer_rounding: _estimation = numpy.rint(estimation) else: _estimation = estimation N_e_le_j = (numpy.absolute(_estimation - ground_truth) <= largest_error).sum() return N_e_le_j * 1.0 / len(ground_truth) def compute_regression_performance(data_training, correct_labels_training, data_test, correct_labels_test, size_feature_space, starting_point=None): input_dim = data_training.shape[1] # Generate functions used for regression data_training_mean = data_training.mean(axis=0) data_training_std = data_training.std(axis=0) data_training_norm = (data_training - data_training_mean) / data_training_std data_test_norm = (data_test - data_training_mean) / data_training_std c1, l1 = generate_random_sigmoid_weights(input_dim, size_feature_space) if starting_point == "Identity": # print "adding identity coefficients to expansion" c1[0:input_dim, 0:input_dim] = numpy.identity(input_dim) l1[0:input_dim] = numpy.ones(input_dim) * 1.0 # Code identity elif starting_point == "Sigmoids": print("Sigmoid starting point enabled") # print "adding identity coefficients to expansion" c1[0:input_dim, 0:input_dim] = numpy.identity(input_dim) l1[0:input_dim] = numpy.ones(input_dim) * 0.0 # Sigmoids of each component will be computed later elif starting_point == "08Exp": # identity included # print "adding 08Exp coefficients to expansion" c1[0:input_dim, 0:input_dim] =
numpy.identity(input_dim)
numpy.identity
''' Python3 implementation of oddball @author: <NAME> (<EMAIL>) ''' import numpy as np from sklearn.linear_model import LinearRegression from sklearn.neighbors import LocalOutlierFactor # feature dictionary which format is {node i's id:Ni, Ei, Wi, λw,i} def star_or_clique(featureDict): N = [] E = [] for key in featureDict.keys(): N.append(featureDict[key][0]) E.append(featureDict[key][1]) # E=CN^α => log on both sides => logE=logC+αlogN # regard as y=b+wx to do linear regression # here the base of log is 2 y_train =
np.log2(E)
numpy.log2
#encoding:UTF-8 #author:justry import os import numpy as np def summary(path): with open(path, 'r') as f: lines = f.readlines() detect = [] recognize = [] total = [] for i in lines: if 'web_service.post return success, time:' in i: index = i[-10:].find(':') total.append(i[1-10+index:-1]) elif 'nature_scene_ocr.recognize line recognizer time:' in i: index = i[-10:].find(':') recognize.append(i[1-10+index:-1]) elif 'nature_scene_ocr.recognize get boxes time:' in i: index = i[-10:].find(':') detect.append(i[1-10+index:-1]) return (np.array(detect).astype(np.float), np.array(recognize).astype(np.float), np.array(total).astype(np.float)) if __name__ == '__main__': path = 'multigpu_results/logging.log' #path = 'singlegpu_results/logging.log' #path = "singlegpu_multiprocess_results/logging.log" detect, recognize, total = summary(path) print('detect time is ', np.average(detect)) print('recognize time is ',
np.average(recognize)
numpy.average
# Copyright 2022 The DDSP Authors. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. """Library containing DDSP output -> MIDI heurstics.""" import ddsp import gin import note_seq import numpy as np import tensorflow.compat.v2 as tf DDSP_DEFAULT_FRAME_RATE = 250 def get_active_frame_indices(piano_roll): """Create matrix of frame indices for active notes relative to onset.""" active_frame_indices = np.zeros_like(piano_roll) for frame_i in range(1, active_frame_indices.shape[0]): prev_indices = active_frame_indices[frame_i - 1, :] active_notes = piano_roll[frame_i, :] active_frame_indices[frame_i, :] = (prev_indices + 1) * active_notes return active_frame_indices def _unbatch(batch): """Splits a dictionary of batched tensors into a list of dictionaries.""" unbatched = [] for key, val in batch.items(): if isinstance(val, (tf.Tensor, np.ndarray)): if not unbatched: unbatched = [{} for _ in range(val.shape[0])] assert val.shape[0] == len( unbatched), f'batch size mismatch: {val.shape[0]} vs {len(unbatched)}' for i in range(val.shape[0]): unbatched[i][key] = val[i] elif isinstance(val, dict): sub_batch = _unbatch(val) if not unbatched: unbatched = [{} for _ in sub_batch] for i in range(len(sub_batch)): unbatched[i][key] = sub_batch[i] elif val is None: continue else: raise Exception(f'unsupported value at {key}:{val} of type {type(val)}') return unbatched @gin.configurable def segment_notes_batch(binarize_f, pick_f0_f, pick_amps_f, controls_batch, frame_rate=DDSP_DEFAULT_FRAME_RATE): """A function to split a controls dict into discrete notes. Args: binarize_f: Returns a binary vector that is True when a note is on. pick_f0_f: Returns a single f0 for a vector of f0s of a single note. pick_amps_f: Returns a single amplitude for a vector of amplidutes of a single note. controls_batch: The controls as returned from model inference, packed into a batch. frame_rate: Frame rate for the notes found. Returns: A list of NoteSequence objects, one for each element in the input batch. """ notes_batch = [] for controls in _unbatch(controls_batch): notes_batch.append( segment_notes( binarize_f=binarize_f, pick_f0_f=pick_f0_f, pick_amps_f=pick_amps_f, controls=controls, frame_rate=frame_rate)) return notes_batch def window_array(array, sr, win_len, frame_step_ratio=0.75, ax=0): """Chop up an array into overlapping frame windows.""" frame_length = int(sr * win_len) frame_step = int(sr * win_len * frame_step_ratio) pad_front = int(sr * win_len * (1 - frame_step_ratio)) padded_a = np.concatenate([np.zeros_like(array)[:pad_front], array], axis=ax) return tf.signal.frame( padded_a, frame_length=frame_length, frame_step=frame_step, pad_end=True, axis=ax).numpy() def segment_notes(binarize_f, pick_f0_f, pick_amps_f, controls, frame_rate=DDSP_DEFAULT_FRAME_RATE): """A function to split a controls dict into discrete notes. Args: binarize_f: Returns a binary vector that is True when a note is on. pick_f0_f: Returns a single f0 for a vector of f0s of a single note. pick_amps_f: Returns a single amplitude for a vector of amplidutes of a single note. controls: The controls as returned from model inference. frame_rate: Frame rate for the notes found. Returns: NoteSequence object with discretized note information. """ sequence = note_seq.NoteSequence() def construct_note(curr_ind, duration): note_start = curr_ind - duration f0 = pick_f0_f(controls, start=note_start, stop=curr_ind) amplitude = pick_amps_f(controls, start=note_start, stop=curr_ind) # pylint:disable=unused-variable note = sequence.notes.add() note.pitch = np.round(ddsp.core.hz_to_midi(f0)).astype(np.int32) note.start_time = note_start / frame_rate note.end_time = (note_start + duration) / frame_rate # TODO(rigeljs): convert amplitude to velocity and add to note. note.velocity = 127 binary_sample = binarize_f(controls) has_been_on = 0 for i, sample_i in enumerate(np.nditer(binary_sample)): if sample_i: has_been_on += 1 elif has_been_on > 0: construct_note(i, has_been_on) has_been_on = 0 if has_been_on > 0: construct_note(len(binary_sample), has_been_on) sequence.total_time = len(binary_sample) / frame_rate return sequence ### PICK_F0_F candidates ### @gin.register def mean_f0(controls, start, stop): f0_hz = controls['f0_hz'] return np.mean(f0_hz[start:stop]) @gin.register def median_f0(controls, start, stop): f0_hz = controls['f0_hz'] return np.median(f0_hz[start:stop]) ### PICK_AMPS_F candidates ### @gin.register def median_amps(controls, start, stop): amps = np.squeeze(controls['harmonic']['controls']['amplitudes']) return np.median(amps[start:stop]) ### BINARIZE_FN candidates ### def remove_short(is_on_vec, min_samples=20, glue_back=False): """Removes short notes and optionally reattaches them to the previous note.""" has_been_on = 0 prev_note_end = 0 for i, is_on in enumerate(np.nditer(is_on_vec, flags=('refs_ok',))): if is_on: has_been_on += 1 else: if has_been_on < min_samples: # set this "on" stretch to off if glue_back: is_on_vec[prev_note_end:i] = True else: is_on_vec[i - has_been_on:i] = False has_been_on = 0 prev_note_end = i return is_on_vec def pad_for_frame(vec, mode, frame_width, axis=0): """A helper function to pad vectors for input to tf.signal.frame. Each element in vec is the center of a frame if frame_step == 1 after padding. Args: vec: The vector to be padded along the first dimension. mode: Either 'front', 'center', or 'end'. frame_width: Width of frame to pad for. axis: Axis to pad. Returns: The padded vector of shape [vec.shape[0] + pad_size]. Raises: ValueError: If 'mode' passed in is not 'front', 'center', or 'end'. """ if mode == 'front': pad_width_arg = (frame_width - 1, 0) elif mode == 'center': # handles even and odd frame widths pad_width_arg = int(frame_width / 2), frame_width - int(frame_width / 2) - 1 elif mode == 'end': pad_width_arg = (0, frame_width - 1) else: raise ValueError(f'Unrecognized pad mode {mode}.') return np.pad( vec, pad_width_arg, mode='constant', constant_values=(int(np.take(vec, 0, axis)), int(np.take(vec, -1, axis)))) @gin.register def amp_pooled_outliers(controls, frame_width=80, num_devs=2, pad_mode='center'): """Finds amps that are n std devs below the mean of their neighbors.""" log_amps = np.log(np.squeeze(controls['harmonic']['controls']['amplitudes'])) padded_amps = pad_for_frame( log_amps, mode=pad_mode, frame_width=frame_width, axis=0) frames = tf.signal.frame(padded_amps, frame_width, 1) low_pooled =
np.mean(frames, axis=-1)
numpy.mean
""" This hopes to recreate the functionality of the ACRG repo function footprints_data_merge """ from pandas import Timestamp from xarray import Dataset, DataArray from typing import List, Optional, Tuple, Union, Dict, Any from openghg.dataobjects import FootprintData # from openghg.dataobjects import FluxData __all__ = ["single_site_footprint", "footprints_data_merge"] def single_site_footprint( site: str, height: str, network: str, domain: str, species: str, start_date: Union[str, Timestamp], end_date: Union[str, Timestamp], resample_to: str = "coarsest", site_modifier: Optional[str] = None, platform: Optional[str] = None, instrument: Optional[str] = None, ) -> Dataset: """Creates a Dataset for a single site's measurement data and footprints Args: site: Site name height: Height of inlet in metres network: Network name resample_to: Resample the data to a given time dataset. Valid options are ["obs", "footprints", "coarsen"]. - "obs" resamples the footprints to the observation time series data - "footprints" resamples to to the footprints time series - "coarsest" resamples to the data with the coarsest time resolution site_modifier: The name of the site given in the footprints. This is useful for example if the same site footprints are run with a different met and they are named slightly differently from the obs file. E.g. site="DJI", site_modifier = "DJI-SAM" - station called DJI, footprints site called DJI-SAM platform: Observation platform used to decide whether to resample instrument: species: Species type Returns: xarray.Dataset """ from openghg.retrieve import get_obs_surface, get_footprint from openghg.util import timestamp_tzaware start_date = timestamp_tzaware(start_date) end_date = timestamp_tzaware(end_date) resample_to = resample_to.lower() resample_choices = ("obs", "footprints", "coarsest") if resample_to not in resample_choices: raise ValueError( f"Invalid resample choice {resample_to} past, please select from one of {resample_choices}" ) # As we're not retrieve any satellite data yet just set tolerance to None tolerance = None platform = None # Here we want to use get_obs_surface obs_results = get_obs_surface( site=site, inlet=height, start_date=start_date, end_date=end_date, species=species, instrument=instrument, ) obs_data = obs_results.data # Save the observation data units try: units: Union[float, None] = float(obs_data.mf.attrs["units"]) except KeyError: units = None except AttributeError: raise AttributeError("Unable to read mf attribute from observation data.") # If the site for the footprints has a different name, pass that in if site_modifier: footprint_site = site_modifier else: footprint_site = site # Try to find appropriate footprints file first with and then without species name try: footprint = get_footprint( site=footprint_site, domain=domain, height=height, start_date=start_date, end_date=end_date, species=species, ) except ValueError: footprint = get_footprint( site=footprint_site, domain=domain, height=height, start_date=start_date, end_date=end_date, ) # TODO: Add checks for particular species e.g. co2 and short-lived species # which should have a specific footprints available rather than the generic one # Extract dataset footprint_data = footprint.data # Align the two Datasets aligned_obs, aligned_footprint = align_datasets( obs_data=obs_data, footprint_data=footprint_data, platform=platform, resample_to=resample_to, ) combined_dataset = combine_datasets( dataset_A=aligned_obs, dataset_B=aligned_footprint, tolerance=tolerance ) # Transpose to keep time in the last dimension position in case it has been moved in resample combined_dataset = combined_dataset.transpose(..., "time") if units is not None: combined_dataset["fp"].values = combined_dataset["fp"].values / units # if HiTRes: # combined_dataset.update({"fp_HiTRes": (combined_dataset.fp_HiTRes.dims, (combined_dataset.fp_HiTRes / units))}) return combined_dataset def footprints_data_merge( site: str, height: str, network: str, domain: str, species: str, start_date: Union[str, Timestamp], end_date: Union[str, Timestamp], resample_to: str = "coarsest", site_modifier: Optional[str] = None, platform: Optional[str] = None, instrument: Optional[str] = None, load_flux: Optional[bool] = True, flux_sources: Union[str, List] = None, load_bc: Optional[bool] = True, calc_timeseries: Optional[bool] = True, calc_bc: Optional[bool] = True, time_resolution: Optional[str] = "standard", ) -> FootprintData: """ TODO - Should this be renamed? Args: site: Three letter site code height: Height of inlet in metres network: Network name domain: Domain name start_date: Start date end_date: End date resample_to: Overrides resampling to coarsest time resolution, can be one of ["coarsest", "footprints", "obs"] site_modifier: The name of the site given in the footprints. This is useful for example if the same site footprints are run with a different met and they are named slightly differently from the obs file. E.g. site="DJI", site_modifier = "DJI-SAM" - station called DJI, footprints site called DJI-SAM platform: Observation platform used to decide whether to resample instrument: Instrument name species: Species name load_flux: Load flux flux_sources: Flux source names load_bc: Load boundary conditions (not currently implemented) calc_timeseries: Calculate timeseries data (not currently implemented) calc_bc: Calculate boundary conditions (not currently implemented) time_resolution: One of ["standard", "high"] Returns: dict: Dictionary footprints data objects """ from openghg.retrieve import get_flux from pandas import Timedelta # First get the site data combined_dataset = single_site_footprint( site=site, height=height, network=network, domain=domain, start_date=start_date, end_date=end_date, resample_to=resample_to, site_modifier=site_modifier, platform=platform, instrument=instrument, species=species, ) # So here we iterate over the emissions types and get the fluxes flux_dict = {} if load_flux: if flux_sources is None: raise ValueError("If you want to load flux you must pass a flux source") flux_dict["standard"] = get_flux( species=species, domain=domain, sources=flux_sources, time_resolution=time_resolution, start_date=start_date, end_date=end_date, ).data if time_resolution == "high": # TODO: Check appropriate date range and file formats for other species if species == "co2": max_h_back = str(combined_dataset["H_back"][-1].values) + "H" if isinstance(start_date, str): start_date = Timestamp(start_date) start_date_hr = start_date - Timedelta(max_h_back) else: start_date_hr = start_date flux_dict["high_time_res"] = get_flux( species=species, domain=domain, sources=flux_sources, time_resolution=time_resolution, start_date=start_date_hr, end_date=end_date, ).data # Calculate model time series, if required if calc_timeseries: combined_dataset = add_timeseries(combined_dataset=combined_dataset, flux_dict=flux_dict) return FootprintData( data=combined_dataset, metadata={}, flux=flux_dict, bc={}, species=species, scales="scale", units="units", ) def combine_datasets( dataset_A: Dataset, dataset_B: Dataset, method: str = "ffill", tolerance: Optional[float] = None, ) -> Dataset: """Merges two datasets and re-indexes to the first dataset. If "fp" variable is found within the combined dataset, the "time" values where the "lat", "lon" dimensions didn't match are removed. Args: dataset_A: First dataset to merge dataset_B: Second dataset to merge method: One of None, nearest, ffill, bfill. See xarray.DataArray.reindex_like for list of options and meaning. Defaults to ffill (forward fill) tolerance: Maximum allowed tolerance between matches. Returns: xarray.Dataset: Combined dataset indexed to dataset_A """ import numpy as np if indexes_match(dataset_A, dataset_B): dataset_B_temp = dataset_B else: dataset_B_temp = dataset_B.reindex_like(other=dataset_A, method=method, tolerance=tolerance) # type: ignore merged_ds = dataset_A.merge(other=dataset_B_temp) if "fp" in merged_ds: if all(k in merged_ds.fp.dims for k in ("lat", "long")): flag = np.where(np.isfinite(merged_ds.fp.mean(dim=["lat", "lon"]).values)) merged_ds = merged_ds[dict(time=flag[0])] return merged_ds def indexes_match(dataset_A: Dataset, dataset_B: Dataset) -> bool: """Check if two datasets need to be reindexed_like for combine_datasets Args: dataset_A: First dataset to check dataset_B: Second dataset to check Returns: bool: True if indexes match, else False """ import numpy as np common_indices = (key for key in dataset_A.indexes.keys() if key in dataset_B.indexes.keys()) for index in common_indices: if not len(dataset_A.indexes[index]) == len(dataset_B.indexes[index]): return False # Check number of values that are not close (testing for equality with floating point) if index == "time": # For time override the default to have ~ second precision rtol = 1e-10 else: rtol = 1e-5 index_diff = np.sum( ~np.isclose( dataset_A.indexes[index].values.astype(float), dataset_B.indexes[index].values.astype(float), rtol=rtol, ) ) if not index_diff == 0: return False return True def align_datasets( obs_data: Dataset, footprint_data: Dataset, resample_to: Optional[str] = "coarsest", platform: Optional[str] = None, ) -> Tuple[Dataset, Dataset]: """Slice and resample two datasets to align along time This slices the date to the smallest time frame spanned by both the footprints and obs, then resamples the data using the mean to the one with coarsest median resolution starting from the sliced start date. Args: obs_data: Observations Dataset footprint_data: Footprint Dataset resample_to: Overrides resampling to coarsest time resolution, can be one of ["coarsest", "footprints", "obs"] platform: Observation platform used to decide whether to resample Returns: tuple: Two xarray.Dataset with aligned time dimensions """ import numpy as np from pandas import Timedelta if platform is not None: platform = platform.lower() # Do not apply resampling for "satellite" (but have re-included "flask" for now) if platform == "satellite": return obs_data, footprint_data # Whether sampling period is present or we need to try to infer this infer_sampling_period = False # Get the period of measurements in time obs_attributes = obs_data.attrs if "averaged_period" in obs_attributes: obs_data_period_s = float(obs_attributes["averaged_period"]) elif "sampling_period" in obs_attributes: sampling_period = obs_attributes["sampling_period"] if sampling_period == "NOT_SET": infer_sampling_period = True else: obs_data_period_s = float(sampling_period) obs_data_period_s = float(obs_attributes["sampling_period"]) elif "sampling_period_estimate" in obs_attributes: estimate = obs_attributes["sampling_period_estimate"] print(f"WARNING: Using estimated sampling period of {estimate}s for observational data") obs_data_period_s = float(estimate) else: infer_sampling_period = True if infer_sampling_period: # Attempt to derive sampling period from frequency of data obs_data_period_s = np.nanmedian((obs_data.time.data[1:] - obs_data.time.data[0:-1]) / 1e9).astype( "float32" ) obs_data_period_s_min = np.diff(obs_data.time.data).min() / 1e9 obs_data_period_s_max = np.diff(obs_data.time.data).max() / 1e9 # Check if the periods differ by more than 1 second if np.isclose(obs_data_period_s_min, obs_data_period_s_max, 1): raise ValueError("Sample period can be not be derived from observations") obs_data_timeperiod = Timedelta(seconds=obs_data_period_s) # Derive the footprints period from the frequency of the data footprint_data_period_ns = np.nanmedian( (footprint_data.time.data[1:] - footprint_data.time.data[0:-1]).astype("int64") ) footprint_data_timeperiod = Timedelta(footprint_data_period_ns, unit="ns") # Here we want timezone naive Timestamps # Add sampling period to end date to make sure resample includes these values when matching obs_startdate = Timestamp(obs_data.time[0].values) obs_enddate = Timestamp(obs_data.time[-1].values) + Timedelta(obs_data_timeperiod, unit="seconds") footprint_startdate = Timestamp(footprint_data.time[0].values) footprint_enddate = Timestamp(footprint_data.time[-1].values) + Timedelta(footprint_data_timeperiod, unit="nanoseconds") start_date = max(obs_startdate, footprint_startdate) end_date = min(obs_enddate, footprint_enddate) # Subtract half a second to ensure lower range covered start_slice = start_date - Timedelta("0.5s") # Add half a second to ensure upper range covered end_slice = end_date + Timedelta("0.5s") obs_data = obs_data.sel(time=slice(start_slice, end_slice)) footprint_data = footprint_data.sel(time=slice(start_slice, end_slice)) # Only non satellite datasets with different periods need to be resampled timeperiod_diff_s = np.abs(obs_data_timeperiod - footprint_data_timeperiod).total_seconds() tolerance = 1e-9 # seconds if timeperiod_diff_s >= tolerance: base = start_date.hour + start_date.minute / 60.0 + start_date.second / 3600.0 if resample_to == "coarsest": if obs_data_timeperiod >= footprint_data_timeperiod: resample_to = "obs" elif obs_data_timeperiod < footprint_data_timeperiod: resample_to = "footprints" if resample_to == "obs": resample_period = str(round(obs_data_timeperiod / np.timedelta64(1, "h"), 5)) + "H" footprint_data = footprint_data.resample(indexer={"time": resample_period}, base=base).mean() elif resample_to == "footprints": resample_period = str(round(footprint_data_timeperiod / np.timedelta64(1, "h"), 5)) + "H" obs_data = obs_data.resample(indexer={"time": resample_period}, base=base).mean() return obs_data, footprint_data def add_timeseries(combined_dataset: Dataset, flux_dict: Dict[str, Dataset]) -> Dataset: """ Add timeseries mole fraction values in footprint_data_merge Args: combined_dataset [Dataset]: output created during footprint_data_merge flux_dict [dict]: Dictionary containing flux datasets """ # TODO: Extend to include multiple sources # TODO: Improve ability to merge high time resolution footprints (e.g. species as co2) # What do we expect flux_dict to look like? for key, flux_ds in flux_dict.items(): if key == "high_time_res": mf_mod: DataArray = timeseries_HiTRes(combined_dataset, flux_ds) name = "mf_mod_high_res" # TODO: May want to reindex afterwards? But can be expensive operation. else: # flux_reindex = flux_ds.reindex_like(combined_dataset, 'ffill') # combined_dataset['mf_mod'] = DataArray((combined_dataset.fp * flux_reindex.flux).sum(["lat", "lon"]), coords={'time': combined_dataset.time}) mf_mod = timeseries_integrated(combined_dataset, flux_ds) name = "mf_mod" combined_dataset[name] = DataArray(mf_mod, coords={"time": combined_dataset.time}) return combined_dataset def timeseries_integrated(combined_dataset: Dataset, flux_ds: Dataset) -> DataArray: """ Calculate modelled mole fraction timeseries using integrated footprints data. Args: combined_dataset [Dataset]: output created during footprint_data_merge flux_ds [Dataset]: Dataset containing flux values Returns: DataArray : Modelled mole fraction timeseries, dimensions = (time) TODO: Also allow flux_mod to be returned as an option? Include flags if so. """ flux_reindex = flux_ds.reindex_like(combined_dataset, "ffill") flux_mod: DataArray = combined_dataset.fp * flux_reindex.flux timeseries: DataArray = flux_mod.sum(["lat", "lon"]) # combined_dataset['mf_mod'] = DataArray((combined_dataset.fp * flux_reindex.flux).sum(["lat", "lon"]), coords={'time': combined_dataset.time}) return timeseries def timeseries_HiTRes( combined_dataset: Dataset, flux_ds: Dataset, averaging: Optional[str] = None, output_TS: Optional[bool] = True, output_fpXflux: Optional[bool] = False, ) -> Any: """ Calculate modelled mole fraction timeseries using high time resolution footprints data and emissions data. Args: combined_dataset: output created during footprint_data_merge. Expect dataset containing "fp_HiTRes" data variable with dimensions (lat, lon, time, H_back). Where H_back represents the hourly footprints related to the footprints time. flux_ds: Dataset containing flux values. Expect dataset containing "flux" data variable with dimensions (lat, lon, time). averaging: Time resolution to use to average the time dimension. Default = None output_TS: Whether to output the modelled mole fraction timeseries DataArray. Default = True output_fpXflux: Whether to output the modelled flux map DataArray used to create the timeseries. Default = False Returns: DataArray / DataArray : Modelled mole fraction timeseries, dimensions = (time) Modelled flux map, dimensions = (lat, lon, time) If one of output_TS and output_fpXflux are True: DataArray is returned for the respective output If both output_TS and output_fpXflux are both True: Both DataArrays are returned. TODO: Low frequency flux values may need to be selected from the month before (currently selecting the same month). TODO: Indexing for low frequency flux should be checked to make sure this allows for crossing over the end of the year. TODO: Currently using pure dask arrays (based on Hannah's original code) but would be good to update this to add more pre-indexing using xarray and/or use dask as part of datasets. TODO: May want to update this to not rely on indexing when selecting the appropriate flux values. At the moment this solution has been chosen because selecting on a dimension, rather than indexing, can be *very* slow depending on the operations performed beforehand on the Dataset (e.g. resample and reindex) TODO: This code currently resamples the frequency to be regular. This will have no effect if the time frequency was already regular but this may not be what we want and may want to add extra code to remove any NaNs, if they are introduced or to find a way to remove this requirement. TODO: mypy having trouble with different types options and incompatible types, included as Any for now. """ import numpy as np import dask.array as da # type: ignore from tqdm import tqdm from pandas import date_range from math import gcd fp_HiTRes = combined_dataset.fp_HiTRes # Calculate time resolution for both the flux and footprints data nanosecond_to_hour = 1 / (1e9 * 60.0 * 60.0) flux_res_H = int(flux_ds.time.diff(dim="time").values.mean() * nanosecond_to_hour) fp_res_time_H = int(fp_HiTRes.time.diff(dim="time").values.mean() * nanosecond_to_hour) fp_res_Hback_H = int(fp_HiTRes["H_back"].diff(dim="H_back").values.mean()) # Define resolution on time dimension in number in hours if averaging: try: time_res_H = int(averaging) time_resolution = f"{time_res_H}H" except (ValueError, TypeError): time_res_H = int(averaging[0]) time_resolution = averaging else: # If not specified derive from time from combined dataset time_res_H = fp_res_time_H time_resolution = f"{time_res_H}H" # Resample fp timeseries to match time resolution if fp_res_time_H != time_res_H: fp_HiTRes = fp_HiTRes.resample(time=time_resolution).ffill() # Define resolution on high frequency dimension in number of hours # At the moment this is matched to the Hback dimension time_hf_res_H = fp_res_Hback_H # Only allow for high frequency resolution < 24 hours if time_hf_res_H > 24: raise ValueError(f"High frequency resolution must be <= 24 hours. Current: {time_hf_res_H}H") elif 24 % time_hf_res_H != 0 or 24 % time_hf_res_H != 0.0: raise ValueError( f"High frequency resolution must exactly divide into 24 hours. Current: {time_hf_res_H}H" ) # Find the greatest common denominator between time and high frequency resolutions. # This is needed to make sure suitable flux frequency is used to allow for indexing. # e.g. time: 1H; hf (high frequency): 2H, highest_res_H would be 1H # e.g. time: 2H; hf (high frequency): 3H, highest_res_H would be 1H highest_res_H = gcd(time_res_H, time_hf_res_H) highest_resolution = f"{highest_res_H}H" # create time array to loop through, with the required resolution # fp_HiTRes.time is the release time of particles into the model time_array = fp_HiTRes["time"] lat = fp_HiTRes["lat"] lon = fp_HiTRes["lon"] hback = fp_HiTRes["H_back"] ntime = len(time_array) nlat = len(lat) nlon = len(lon) # nh_back = len(hback) # Define maximum hour back max_h_back = hback.values[-1] # Define full range of dates to select from the flux input date_start = time_array[0] date_start_back = date_start - np.timedelta64(max_h_back, "h") date_end = time_array[-1] +
np.timedelta64(1, "s")
numpy.timedelta64
import numpy as np import openmesh as om from math import cos, sin, sqrt import argparse # construct the argument parse and parse the arguments ap = argparse.ArgumentParser() ap.add_argument("-d", "--debug", required=False, help="debug mode", action="store_true") ap.add_argument("-f", "--leaf", required=False, help="build leaf", action="store_true") ap.add_argument("-r", "--leafr", required=False, help="leaf sphere radius", default=10.0, type=float) ap.add_argument("-l", "--level", required=False, help="total level of tree", default=5, type=int) ap.add_argument("-w", "--width", required=False, help="trunk width", default=10, type=float) ap.add_argument("-n", "--length", required=False, help="trunk length", default=30, type=float) ap.add_argument("-i", "--rwidth", required=False, help="trunk width ratio", default=0.7, type=float) ap.add_argument("-p", "--rlength", required=False, help="trunk length ration", default=0.7, type=float) ap.add_argument("-t", "--delta", required=False, help="angle between two branches", default=30, type=float) ap.add_argument("-e", "--theta1", required=False, help="angle of branch #1", default=30, type=float) ap.add_argument("-a", "--theta2", required=False, help="angle of branch #2", default=30, type=float) ap.add_argument("-o", "--output", required=False, help="output file name .obj", default="output", type=str) args = vars(ap.parse_args()) # generate icosphere leaf def generate_icosphere(mesh, radius, center): r = (1.0 + np.sqrt(5.0)) / 2.0 vertices = np.array([ [-1.0, r, 0.0], [ 1.0, r, 0.0], [-1.0, -r, 0.0], [ 1.0, -r, 0.0], [0.0, -1.0, r], [0.0, 1.0, r], [0.0, -1.0, -r], [0.0, 1.0, -r], [ r, 0.0, -1.0], [ r, 0.0, 1.0], [ -r, 0.0, -1.0], [ -r, 0.0, 1.0], ], dtype=float) length = np.linalg.norm(vertices, axis=1).reshape((-1, 1)) vertices = vertices / length * radius + center vv = [] for v in vertices: vv.append(mesh.add_vertex(v)) faces = np.array([ [0, 11, 5], [0, 5, 1], [0, 1, 7], [0, 7, 10], [0, 10, 11], [1, 5, 9], [5, 11, 4], [11, 10, 2], [10, 7, 6], [7, 1, 8], [3, 9, 4], [3, 4, 2], [3, 2, 6], [3, 6, 8], [3, 8, 9], [5, 4, 9], [2, 4, 11], [6, 2, 10], [8, 6, 7], [9, 8, 1], ]) for f in faces: fh0 = mesh.add_face(vv[f[0]], vv[f[1]], vv[f[2]]) # vector perpendicular to plane of p0->p1 and p1->p2 # returns the projection of p1->p3 to the plane perpendicular to p1->p2 def make_perpendicular_unit_vector(p0, p1, p3, p2): v1 = np.array([p1[0] - p0[0], p1[1] - p0[1], p1[2] - p0[2]]) v2 = np.array([p2[0] - p1[0], p2[1] - p1[1], p2[2] - p1[2]]) v3 = np.cross(v1, v2) v4 = np.array([p3[0] - p1[0], p3[1] - p1[1], p3[2] - p1[2]]) v5 = np.cross(v4, v2) v2_u = v2 / np.linalg.norm(v2) v4_u = v4 / np.linalg.norm(v4) angle = np.arccos(np.clip(np.dot(v2_u, v4_u), -1.0, 1.0)) delta = np.pi / 2 - angle rmat = rotation_matrix(v5, delta) v6 = np.dot(rmat, np.array(v4).T).T return v6 / np.linalg.norm(v6) def rotation_matrix(axis, theta): mat = np.eye(3,3) axis = axis/sqrt(np.dot(axis, axis)) a = cos(theta/2.) b, c, d = -axis*sin(theta/2.) return np.array([[a*a+b*b-c*c-d*d, 2*(b*c-a*d), 2*(b*d+a*c)], [2*(b*c+a*d), a*a+c*c-b*b-d*d, 2*(c*d-a*b)], [2*(b*d-a*c), 2*(c*d+a*b), a*a+d*d-b*b-c*c]]) # tree node class Node: def __init__(self, data): # edges direction check list self.edges = [] self.left = None self.right = None self.data = data def get_left(self): return self.left def get_right(self): return self.right def insert_left(self, data): if self.left is None: self.left = Node(data) def insert_right(self, data): if self.right is None: self.right = Node(data) def mesh(self, mesh, base): if len(self.edges) == 0: self.edges.append([base[1], base[2]]) self.edges.append([base[2], base[3]]) self.edges.append([base[3], base[1]]) v1 = mesh.add_vertex(mesh.point(base[1])) v2 = mesh.add_vertex(mesh.point(base[2])) v3 = mesh.add_vertex(mesh.point(base[3])) #pp1 = [mesh.point(self.data[2]), mesh.point(self.data[3]), mesh.point(self.data[4])] #pp2 = [mesh.point(base[2]), mesh.point(base[3]), mesh.point(base[4])] pp1 = [self.data[1], self.data[2], self.data[3]] #pp2 = [base[2], base[3], base[4]] pp2 = [v1, v2, v3] # find minimum distance pairs ff = self.find_min_pairs(mesh, pp1, pp2) #ff = [] #for i in [2, 3, 4]: # ii = self.find_closest_point(mesh.point(self.data[i]), pp) # ff.append([self.data[i], base[2 + ii]]) # add triangle faces #print ff[0][1], ff[1][1], ff[0][0] mesh.add_face(self.correct_edges(ff[0][1], ff[1][1], ff[0][0])) mesh.add_face(self.correct_edges(ff[1][1], ff[2][1], ff[1][0])) mesh.add_face(self.correct_edges(ff[2][1], ff[0][1], ff[2][0])) mesh.add_face(self.correct_edges(ff[2][0], ff[1][0], ff[2][1])) mesh.add_face(self.correct_edges(ff[1][0], ff[0][0], ff[1][1])) mesh.add_face(self.correct_edges(ff[0][0], ff[2][0], ff[0][1])) if self.left: self.left.mesh(mesh, self.data) if self.right: self.right.mesh(mesh, self.data) def correct_edges(self, e1, e2, e3): if [e1, e2] in self.edges or [e2, e3] in self.edges or [e3, e1] in self.edges: self.edges.append([e3, e2]) self.edges.append([e2, e1]) self.edges.append([e1, e3]) #print [e3, e2, e1] return [e3, e2, e1] else: self.edges.append([e1, e2]) self.edges.append([e2, e3]) self.edges.append([e3, e1]) #print [e1, e2, e3] return [e1, e2, e3] def find_closest_point(self, p, pp): dd = [((p[0] - p1[0]) ** 2 + (p[1] - p1[1]) ** 2 + (p[2] - p1[2]) ** 2) for p1 in pp] return np.argmin(dd) def build_branch(self, level, mesh, base, s_length, r, s_thickness, theta1, theta2, delta, leaf=False, leaf_r=0.0): a0 = np.array(base) a1 = np.array(self.data[0]) a2 = mesh.point(self.data[1]) a01 = a1 - a0 r = r * s_thickness zx = np.array([0, 0, 1]) axis1 = np.cross(a01, zx) axis2 = np.cross(axis1, a01) # make left branch rmat = rotation_matrix(axis2, theta1) a12 = np.dot(rmat, a01.T).T * s_length p0 = a1 + a12 u0 = make_perpendicular_unit_vector(a0, a1, a2, p0) * r #rmat = rotation_matrix(a12, np.pi / 6) #u0 = np.dot(rmat, np.array(u0).T).T p1 = np.array(p0) + u0 rmat = rotation_matrix(a12, np.pi * 2 / 3) u1 = np.dot(rmat, np.array(u0).T).T p2 = u1 + p0 u2 = np.dot(rmat, np.array(u1).T).T p3 = u2 + p0 v1 = mesh.add_vertex(p1) v2 = mesh.add_vertex(p2) v3 = mesh.add_vertex(p3) data2 = [p0, v1, v2, v3] self.insert_left(data2) # make right branch rmat = rotation_matrix(axis2, -theta2) a12 = np.dot(rmat, a01.T).T * s_length rmat = rotation_matrix(a01, delta) a12 = np.dot(rmat, a12.T).T p0 = a1 + a12 u0 = make_perpendicular_unit_vector(a0, a1, a2, p0) * r #rmat = rotation_matrix(a12, np.pi / 6) #u0 = np.dot(rmat, np.array(u0).T).T p1 = np.array(p0) + u0 rmat = rotation_matrix(a12, np.pi * 2 / 3) u1 = np.dot(rmat, np.array(u0).T).T p2 = u1 + p0 u2 = np.dot(rmat, np.array(u1).T).T p3 = u2 + p0 v1 = mesh.add_vertex(p1) v2 = mesh.add_vertex(p2) v3 = mesh.add_vertex(p3) data2 = [p0, v1, v2, v3] self.insert_right(data2) level = level - 1 if level < 1: if leaf: #print "make leaf", level generate_icosphere(mesh, leaf_r, self.left.data[0]) generate_icosphere(mesh, leaf_r, self.right.data[0]) return self.left.build_branch(level, mesh, self.data[0], s_length, r, s_thickness, theta1, theta2, delta, leaf=leaf, leaf_r=leaf_r) self.right.build_branch(level, mesh, self.data[0], s_length, r, s_thickness, theta1, theta2, delta, leaf=leaf, leaf_r=leaf_r) def find_min_pairs(self, mesh, pp1, pp2): import itertools ppp = [zip(x, range(len(pp2))) for x in itertools.permutations(range(len(pp1)),len(pp2))] dd = [] for pp in ppp: d1 = 0 for p1 in pp: i = p1[0] j = p1[1] x1 = mesh.point(pp1[i]) x2 = mesh.point(pp2[j]) d1 += np.sqrt((x1[0] - x2[0]) ** 2 + (x1[1] - x2[1]) ** 2 + (x1[2] - x2[2]) ** 2) dd.append(d1) i = np.argmin(dd) return [[pp1[i1], pp2[i2]] for i1, i2 in ppp[i]] # virtual 2-branch tree model class Tree2: def __init__(self, levels, thickness, length, scaling_length, scaling_thickness, theta1, theta2, delta, leaf=False, leaf_r=0.0): self.levels = levels self.thickness = thickness self.length = length self.base = None self.s_length = scaling_length self.s_thickness = scaling_thickness self.theta1 = theta1 self.theta2 = theta2 self.delta = delta self.node = None self.leaf = leaf self.leaf_r = leaf_r self.mesh = om.TriMesh() self.build_root() self.build_branches() def build_root(self): # make base r = self.thickness v1 = self.mesh.add_vertex([0, r, 0]) v2 = self.mesh.add_vertex([0.5 * np.sqrt(3) * r, -0.5 * r, 0]) v3 = self.mesh.add_vertex([-0.5 * np.sqrt(3) * r, -0.5 * r, 0]) self.base = [[0, 0, 0], v1, v2, v3] # make root trunk r = self.thickness * self.s_thickness h = self.length v1 = self.mesh.add_vertex([0, r, h]) v2 = self.mesh.add_vertex([0.5 * np.sqrt(3) * r, -0.5 * r, h]) v3 = self.mesh.add_vertex([-0.5 * np.sqrt(3) * r, -0.5 * r, h]) data = [[0, 0, h], v1, v2, v3] self.node = Node(data) def build_branches(self): theta1 = self.theta1 theta2 = self.theta2 delta = self.delta d = self.length * self.s_length r = self.thickness * self.s_thickness * self.s_thickness h = self.length data1 = self.node.data # initial left branch position p0 = np.array([d * np.sin(theta1), 0, h + d * np.cos(theta1)]) u0 = make_perpendicular_unit_vector(self.base[0], data1[0], self.mesh.point(data1[1]), p0) * r #rmat = rotation_matrix(p0 - np.array(data1[0]), np.pi / 6) #u0 = np.dot(rmat, np.array(u0).T).T p1 = np.array(p0) + u0 rmat = rotation_matrix(p0 - np.array(data1[0]), np.pi * 2 / 3) u1 = np.dot(rmat, np.array(u0).T).T p2 = u1 + p0 u2 = np.dot(rmat, np.array(u1).T).T p3 = u2 + p0 v1 = self.mesh.add_vertex(p1) v2 = self.mesh.add_vertex(p2) v3 = self.mesh.add_vertex(p3) data2 = [p0, v1, v2, v3] self.node.insert_left(data2) #v0 = self.mesh.add_vertex(p0) #self.node.get_left().build_branch(self.levels - 1, self.mesh, self.base[0], self.s_length, r, self.s_thickness, theta1, theta2, delta) self.node.get_left().build_branch(self.levels - 1, self.mesh, self.node.data[0], self.s_length, r, self.s_thickness, theta1, theta2, delta, leaf=self.leaf, leaf_r=self.leaf_r) # initial right brach position p0 = [-d * np.sin(theta2), 0, h + d *
np.cos(theta2)
numpy.cos
# Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. """Tests for linear_model extensions.""" import numpy as np import pytest import unittest import warnings from econml.sklearn_extensions.ensemble import SubsampledHonestForest class TestSubsampledHonestForest(unittest.TestCase): """Test SubsampledHonestForest.""" def test_y1d(self): np.random.seed(123) n = 5000 d = 5 x_grid = np.linspace(-1, 1, 10) X_test = np.hstack([x_grid.reshape(-1, 1), np.random.normal(size=(10, d - 1))]) for _ in range(3): for criterion in ['mse', 'mae']: X = np.random.normal(0, 1, size=(n, d)) y = X[:, 0] + np.random.normal(0, .1, size=(n,)) est = SubsampledHonestForest(n_estimators=100, max_depth=5, criterion=criterion, min_samples_leaf=10, verbose=0) est.fit(X, y) point = est.predict(X_test) lb, ub = est.predict_interval(X_test, alpha=0.01) np.testing.assert_allclose(point, X_test[:, 0], rtol=0, atol=.2) np.testing.assert_array_less(lb, X_test[:, 0] + .05) np.testing.assert_array_less(X_test[:, 0], ub + .05) def test_nonauto_subsample_fr(self): np.random.seed(123) n = 5000 d = 5 x_grid = np.linspace(-1, 1, 10) X_test = np.hstack([x_grid.reshape(-1, 1), np.random.normal(size=(10, d - 1))]) X = np.random.normal(0, 1, size=(n, d)) y = X[:, 0] + np.random.normal(0, .1, size=(n,)) est = SubsampledHonestForest(n_estimators=100, subsample_fr=.8, max_depth=5, min_samples_leaf=10, verbose=0) est.fit(X, y) point = est.predict(X_test) lb, ub = est.predict_interval(X_test, alpha=0.01) np.testing.assert_allclose(point, X_test[:, 0], rtol=0, atol=.2) np.testing.assert_array_less(lb, X_test[:, 0] + .05) np.testing.assert_array_less(X_test[:, 0], ub + .05) def test_y2d(self): np.random.seed(123) n = 5000 d = 5 x_grid = np.linspace(-1, 1, 10) X_test = np.hstack([x_grid.reshape(-1, 1), np.random.normal(size=(10, d - 1))]) for _ in range(3): for criterion in ['mse', 'mae']: X = np.random.normal(0, 1, size=(n, d)) y = X[:, [0, 0]] + np.random.normal(0, .1, size=(n, 2)) est = SubsampledHonestForest(n_estimators=100, max_depth=5, criterion=criterion, min_samples_leaf=10, verbose=0) est.fit(X, y) point = est.predict(X_test) lb, ub = est.predict_interval(X_test, alpha=0.01) np.testing.assert_allclose(point, X_test[:, [0, 0]], rtol=0, atol=.2) np.testing.assert_array_less(lb, X_test[:, [0, 0]] + .05)
np.testing.assert_array_less(X_test[:, [0, 0]], ub + .05)
numpy.testing.assert_array_less
import numpy as np from garage.core import Serializable from garage.envs.box2d.box2d_env import Box2DEnv from garage.envs.box2d.parser import find_body from garage.misc import autoargs from garage.misc.overrides import overrides # http://mlg.eng.cam.ac.uk/pilco/ class DoublePendulumEnv(Box2DEnv, Serializable): @autoargs.inherit(Box2DEnv.__init__) def __init__(self, *args, **kwargs): # make sure mdp-level step is 100ms long kwargs["frame_skip"] = kwargs.get("frame_skip", 2) if kwargs.get("template_args", {}).get("noise", False): self.link_len = (
np.random.rand()
numpy.random.rand
from deap import base from deap import creator from deap import cma import numpy as np import attr from naturalnets.optimizers.i_optimizer import IOptimizer, registered_optimizer_classes @attr.s(slots=True, auto_attribs=True, frozen=True, kw_only=True) class OptimizerCmaEsDeapCfg: type: str population_size: int sigma: float = 1.0 class OptimizerCmaEsDeap(IOptimizer): def __init__(self, individual_size: int, configuration: dict): self.individual_size = individual_size config = OptimizerCmaEsDeapCfg(**configuration) creator.create("FitnessMax", base.Fitness, weights=(1.0,)) creator.create("Individual", list, typecode='b', fitness=creator.FitnessMax) strategy = cma.Strategy(centroid=[0.0] * individual_size, sigma=config.sigma, lambda_=config.population_size) self.toolbox = base.Toolbox() self.toolbox.register("generate", strategy.generate, creator.Individual) self.toolbox.register("update", strategy.update) self.population = None def ask(self): # Generate a new population self.population = self.toolbox.generate() genomes = [] for individual in self.population: genomes.append(
np.array(individual)
numpy.array
""" CAMERA FEATURE POINTS TRACKER USING SIFT Extracts feature points in two following images to compute the euler angles and the translation. MPSYS Project Course for HRP, group 20 """ import numpy as np import cv2 import math # Add some parameters to the SIFT-extraction. lk_params = dict(winSize=(15, 15), maxLevel=5, criteria=(cv2.TERM_CRITERIA_EPS | cv2.TERM_CRITERIA_COUNT, 10, 0.05)) feature_params = dict(maxCorners=1000, qualityLevel=0.05, minDistance=8, blockSize=8) class FeatureTracker: def __init__(self): self.track_len = 10 self.detect_interval = 5 self.tracks = [] self.frame_idx = 0 self.prev_gray = None self.euler_angles = [0, 0, 0] def isRotationMatrix(self, R): """ Checks if the rotation matrix is vaild. @param: R, rotation matrix """ Rt = np.transpose(R) shouldBeIdentity = np.dot(Rt, R) I = np.identity(3, dtype=R.dtype) n = np.linalg.norm(I - shouldBeIdentity) return n < 1e-6 def rotationMatrixToEulerAngles(self, R): """ Converts the rotation matrix to Euler angles. @param: R, rotation matrix """ assert(self.isRotationMatrix(R)) sy = math.sqrt(R[0, 0] * R[0, 0] + R[1, 0] * R[1, 0]) singular = sy < 1e-6 if not singular: x = math.atan2(R[2, 1], R[2, 2]) y = math.atan2(-R[2, 0], sy) z = math.atan2(R[1, 0], R[0, 0]) else: x = math.atan2(-R[1, 2], R[1, 1]) y = math.atan2(-R[2, 0], sy) z = 0 return np.array([x, y, z]) def smooth(self, x, window_len=10, window='blackman'): """smooth the data using a window with requested size. This method is based on the convolution of a scaled window with the signal. The signal is prepared by introducing reflected copies of the signal (with the window size) in both ends so that transient parts are minimized in the begining and end part of the output signal. input: x: the input signal window_len: the dimension of the smoothing window; should be an odd integer window: the type of window from 'flat', 'hanning', 'hamming', 'bartlett', 'blackman' flat window will produce a moving average smoothing. output: the smoothed signal example: t=linspace(-2,2,0.1) x=sin(t)+randn(len(t))*0.1 y=smooth(x) see also: numpy.hanning, numpy.hamming, numpy.bartlett, numpy.blackman, numpy.convolve scipy.signal.lfilter TODO: the window parameter could be the window itself if an array instead of a string NOTE: length(output) != length(input), to correct this: return y[(window_len/2-1):-(window_len/2)] instead of just y. """ if x.ndim != 1: pass if x.size < window_len: pass if window_len < 3: return x if not window in ['flat', 'hanning', 'hamming', 'bartlett', 'blackman']: pass s = np.r_[x[window_len - 1:0:-1], x, x[-2:-window_len - 1:-1]] # print(len(s)) if window == 'flat': # moving average w = np.ones(window_len, 'd') else: w = eval('numpy.' + window + '(window_len)') y = np.convolve(w / w.sum(), s, mode='valid') return y def run(self, curr_img): """ Computes the euler angles from two following pictures. @param: curr_img, the current image in the image stream. """ frame_gray = curr_img if len(self.tracks) > 0: img0, img1 = self.prev_gray, frame_gray p0 = np.float32([tr[-1] for tr in self.tracks]).reshape(-1, 1, 2) p1, st, err = cv2.calcOpticalFlowPyrLK(img0, img1, p0, None, **lk_params) p0r, st, err = cv2.calcOpticalFlowPyrLK(img1, img0, p1, None, **lk_params) d = abs(p0-p0r).reshape(-1, 2).max(-1) good = d < 1 new_tracks = [] for tr, (x, y), good_flag in zip(self.tracks, p1.reshape(-1, 2), good): if not good_flag: continue tr.append((x, y)) if len(tr) > self.track_len: del tr[0] new_tracks.append(tr) self.tracks = new_tracks if self.frame_idx % self.detect_interval == 0: mask = np.zeros_like(frame_gray) mask[:] = 255 p = cv2.goodFeaturesToTrack(frame_gray, mask=mask, **feature_params) if p is not None: for x, y in np.float32(p).reshape(-1, 2): self.tracks.append([(x, y)]) # The calibrated camera parameters, has to be integers. K = np.array([[993, 0, 673], [0, 990, 455], [0, 0, 1]]) # Begin the calculations when two of more frames are received. if self.frame_idx >= 2: # Extract the feature points that is used to compute the homography. new_points = [] old_points = [] new_points_1 = [] old_points_1 = [] for i in range(len(self.tracks)): try: vec1 = list(self.tracks[i][-1]) vec1.append(1) new_points.append(vec1) new_points_1.append(self.tracks[i][-1]) vec2 = list(self.tracks[i][-2]) vec2.append(1) old_points.append(vec2) old_points_1.append(self.tracks[i][-2]) except IndexError: continue new_points = np.array(new_points) old_points = np.array(old_points) new_points_1 = np.array(new_points_1) old_points_1 = np.array(old_points_1) try: # Extract M from the two different sets of points. M, mask = cv2.findHomography(old_points_1, new_points_1, cv2.RANSAC, 1.0) except: pass try: # Compute the rotation and translation using the M and K matrix. _, Rs, Ts, Ns = cv2.decomposeHomographyMat(M, K) # Crate a list for counting the negative depth of 3D points in each direction. neg_pts = [] old_points = np.matmul(np.invert(K), np.transpose(old_points)) new_points = np.matmul(
np.invert(K)
numpy.invert
import numpy as np import scipy.io import Shared_Exp_Beh as seb import os.path as op import pytest data_path = op.join(seb.__path__[0], 'data/') def test_beh_analysis(): """ :return: Test results raise error """ # Test if the size of all variables of the experiment is same file_directory = data_path subject_list = ['behav_Shared_ARSubNum21'] beh_vars = seb.var_extractor(file_directory, subject_list) assert beh_vars[0]["conf_val"].shape == beh_vars[0]["conf_val"].shape == beh_vars[0]["get_rew"].shape == \ beh_vars[0]["rew_val"].shape == beh_vars[0]["sub_rt"].shape == beh_vars[0]["att_first"].shape == \ beh_vars[0]["num_tar_att"].shape # Tests of stay, winstay, and loseswitch cor_vec = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) assert seb.Behavior.performance(cor_vec) == float(0) cor_vec = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1]) assert seb.Behavior.performance(cor_vec) == float(100) cor_vec = np.array([1, 0, 1, 0, 1, 0, 1, 0, 1, 0]) assert seb.Behavior.performance(cor_vec) == float(50) pre_cor_vec = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) cor_vec = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) assert seb.Behavior.prob_stay(cor_vec, pre_cor_vec) == float(1) pre_cor_vec = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1]) cor_vec = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1]) assert seb.Behavior.prob_stay(cor_vec, pre_cor_vec) == float(1) pre_cor_vec = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1]) cor_vec = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) assert seb.Behavior.prob_stay(cor_vec, pre_cor_vec) == float(0) # when all the trials are correct LoseSwitch should be nan # when all the trials are wrong WinStay should be nan pre_cor_vec = np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1]) cor_vec = np.array([0, 0, 0, 0, 0, 0, 0, 0, 0, 0]) assert seb.Behavior.prob_winstay(cor_vec, pre_cor_vec) == float(0) assert np.isnan(seb.Behavior.prob_loseswitch(cor_vec, pre_cor_vec)) pre_cor_vec =
np.array([1, 1, 1, 1, 1, 1, 1, 1, 1, 1])
numpy.array
import argparse, random, os, glob import numpy as np import torch import torch.nn as nn from dgl_model.gin_all import GIN from dgl_model.gcn_all import GCN, GCN_dp from torch.utils.data import DataLoader from ogb.graphproppred.dataset_dgl import DglGraphPropPredDataset, collate_dgl from ogb.graphproppred import Evaluator def set_seed(seed): random.seed(seed)
np.random.seed(seed)
numpy.random.seed
''' Based on https://www.mattkeeter.com/projects/contours/ and http://www.iquilezles.org/ ''' import numpy as np from util import * import matplotlib.pyplot as plt import matplotlib.patches as patches fig1 = plt.figure() ax1 = fig1.add_subplot(111, aspect='equal') ax1.set_xlim([0, 1]) ax1.set_ylim([0, 1]) class ImplicitObject: def __init__(self, implicit_lambda_function): self.implicit_lambda_function = implicit_lambda_function def eval_point(self, two_d_point): assert two_d_point.shape == (2, 1) # not allow vectorize yet value = self.implicit_lambda_function(two_d_point[0][0], two_d_point[1][0]) return value; def is_point_inside(self, two_d_point): assert two_d_point.shape == (2, 1), "two_d_point format incorrect, {}".format(two_d_point) value = self.eval_point(two_d_point) if value <= 0: return True else: return False def union(self, ImplicitObjectInstance): return ImplicitObject(lambda x, y: min( self.eval_point(np.array([[x], [y]])), ImplicitObjectInstance.eval_point(np.array([[x], [y]])) )) def intersect(self, ImplicitObjectInstance): return ImplicitObject(lambda x, y: max( self.eval_point(np.array([[x], [y]])), ImplicitObjectInstance.eval_point(np.array([[x], [y]])) )) def negate(self): return ImplicitObject(lambda x, y: -1 * self.eval_point(np.array([[x], [y]]))) def substraction(self, ImplicitObjectInstance): # substraction ImplicitObjectInstance from self return self.intersect(ImplicitObjectInstance.negate()) # distance deformations # http://www.iquilezles.org/www/articles/smin/smin.htm # exponential smooth min (k = 32); # float smin( float a, float b, float k ) # { # float res = exp( -k*a ) + exp( -k*b ); # return -log( res )/k; # } # http://www.iquilezles.org/www/articles/smin/smin.htm # You must be carefull when using distance transformation functions, # as the field created might not be a real distance function anymore. # You will probably need to decrease your step size, # if you are using a raymarcher to sample this. # The displacement example below is using sin(20*p.x)*sin(20*p.y)*sin(20*p.z) as displacement pattern, # but you can of course use anything you might imagine. # As for smin() function in opBlend(), please read the smooth minimum article in this same site. def exponential_smooth_union(self, ImplicitObjectInstance): def smin(a, b, smooth_parameter = 32): res = np.exp( -smooth_parameter*a ) + np.exp( -smooth_parameter*b ); return -np.log(res)/smooth_parameter return ImplicitObject(lambda x, y: smin( self.eval_point(np.array([[x], [y]])), ImplicitObjectInstance.eval_point(np.array([[x], [y]])) )) # // polynomial smooth min (k = 0.1); # float smin( float a, float b, float k ) # { # float h = clamp( 0.5+0.5*(b-a)/k, 0.0, 1.0 ); # return mix( b, a, h ) - k*h*(1.0-h); # } def polynomial_smooth_union(self, ImplicitObjectInstance): def smin(a, b, smooth_parameter = 0.1): h = clamp(0.5+0.5*(b-a)/smooth_parameter, 0.0, 1.0 ) return mix( b, a, h ) - smooth_parameter*h*(1.0-h); return ImplicitObject(lambda x, y: smin( self.eval_point(np.array([[x], [y]])), ImplicitObjectInstance.eval_point(np.array([[x], [y]])) )) # cannot make it work # // power smooth min (k = 8); # float smin( float a, float b, float k ) # { # a = pow( a, k ); b = pow( b, k ); # return pow( (a*b)/(a+b), 1.0/k ); # } # def power_smooth_union(self, ImplicitObjectInstance): # def smin(a, b, smooth_parameter=3): # print("---------------------") # print(type(a)) # print(type(b)) # a = pow( a, smooth_parameter ) # b = pow( b, smooth_parameter ) # return np.log(a +) # # print(pow( (a*b)/(a+b), 1.0/smooth_parameter )) # # return pow( (a*b)/(a+b), 1.0/smooth_parameter ) # return ImplicitObject(lambda x, y: smin( # self.eval_point(np.array([[x], [y]])), # ImplicitObjectInstance.eval_point(np.array([[x], [y]])) # )) # distance deformations # float opDisplace( vec3 p ) # { # float d1 = primitive(p); # float d2 = displacement(p); # return d1+d2; # } def displace(self, frequency, scale): def displacement(x, y, frequency, scale): return self.eval_point(np.array([[x], [y]])) + (np.sin(frequency*x)*np.sin(frequency*y))/scale return ImplicitObject(lambda x, y: displacement(x, y, frequency, scale)) # domain deformations def derivative_at_point(self, two_d_point, epsilon = 0.001): assert two_d_point.shape == (2, 1), 'wrong data two_d_point {}'.format(two_d_point) x = two_d_point[0][0] y = two_d_point[1][0] dx = self.eval_point(np.array([[x + epsilon], [y]])) - self.eval_point(np.array([[x - epsilon], [y]])) dy = self.eval_point(np.array([[x], [y + epsilon]])) - self.eval_point(np.array([[x], [y - epsilon]])) length = np.sqrt(dx**2 + dy**2) if length <= epsilon: print('dodgy: probably error') print(two_d_point) print(dx) print(dy) print(self.eval_point(np.array([[x + epsilon], [y]]))) print(self.eval_point(np.array([[x - epsilon], [y]]))) print(self.eval_point(np.array([[x], [y + epsilon]]))) print(self.eval_point(np.array([[x], [y - epsilon]])) ) return np.array([[0],[0]]) else: assert length >= epsilon, \ 'length {} if less than epislon {} check dx {} dy {} two_d_point {}'.format( length, epsilon, dx, dy, two_d_point ) return np.array([[dx / length],[dy / length]]) def visualize_bitmap(self, xmin, xmax, ymin, ymax, num_points=200): self.visualize(xmin, xmax, ymin, ymax, 'bitmap', num_points) def visualize_distance_field(self, xmin, xmax, ymin, ymax, num_points=200): self.visualize(xmin, xmax, ymin, ymax, 'distance_field', num_points) def visualize(self, xmin, xmax, ymin, ymax, visualize_type = 'bitmap', num_points=200): assert xmin!=xmax, "incorrect usage xmin == xmax" assert ymin!=ymax, "incorrect usage ymin == ymax" assert visualize_type in ['bitmap', 'distance_field'], \ 'visualize_type should be either bitmap or distance_field, but not {}'.format(visualize_type) visualize_matrix = np.empty((num_points, num_points)); import matplotlib.pyplot as plt x_linspace = np.linspace(xmin, xmax, num_points) y_linspace = np.linspace(ymin, ymax, num_points) for x_counter in range(len(x_linspace)): for y_counter in range(len(y_linspace)): x = x_linspace[x_counter] y = y_linspace[y_counter] if visualize_type == 'bitmap': visualize_matrix[x_counter][y_counter] = \ not self.is_point_inside(np.array([[x],[y]])) # for mapping the color of distance_field elif visualize_type == 'distance_field': visualize_matrix[x_counter][y_counter] = self.eval_point(np.array([[x],[y]])) else: raise ValueError('Unknown visualize_type -> {}'.format(visualize_type)) visualize_matrix = np.rot90(visualize_matrix) assert(visualize_matrix.shape == (num_points, num_points)) fig = plt.figure() ax = fig.add_subplot(111) cax = ax.matshow(visualize_matrix, cmap=plt.cm.gray) plt.show() # TODO: label on x, y axis # In[12]: class ImplicitCircle(ImplicitObject): def __init__(self, x0, y0, r0): self.implicit_lambda_function = lambda x, y: np.sqrt((x - x0)**2 + (y - y0)**2) - r0 # In[15]: class Left(ImplicitObject): def __init__(self, x0): self.implicit_lambda_function = lambda x, _: x - x0 class Right(ImplicitObject): def __init__(self, x0): self.implicit_lambda_function = lambda x, _: x0 - x class Lower(ImplicitObject): def __init__(self, y0): self.implicit_lambda_function = lambda _, y: y - y0 class Upper(ImplicitObject): def __init__(self, y0): self.implicit_lambda_function = lambda _, y: y0 - y # In[17]: class ImplicitRectangle(ImplicitObject): def __init__(self, xmin, xmax, ymin, ymax): assert xmin!=xmax, "incorrect usage xmin == xmax" assert ymin!=ymax, "incorrect usage ymin == ymax" # right xmin ∩ left xmax ∩ upper ymin ∩ lower ymax self.implicit_lambda_function = (Right(xmin).intersect(Left(xmax)).intersect(Upper(ymin)).intersect(Lower(ymax))).implicit_lambda_function class ImplicitFailureStar(ImplicitObject): # http://www.iquilezles.org/live/index.htm def __init__(self, inner_radius, outer_radius, frequency, x0=0, y0=0): self. implicit_lambda_function = \ lambda x, y:inner_radius + outer_radius*np.cos(np.arctan2(y,x)*frequency) class ImplicitStar(ImplicitObject): # http://www.iquilezles.org/live/index.htm def __init__(self, inner_radius, outer_radius, frequency, x0=0, y0=0): self. implicit_lambda_function = \ lambda x, y: ImplicitStar.smoothstep( inner_radius + outer_radius*np.cos(np.arctan2(y - x0, x - y0)*frequency), inner_radius + outer_radius*np.cos(np.arctan2(y - x0, x - y0)*frequency) + 0.01, np.sqrt((x - x0)**2 + (y - y0)**2) ) @staticmethod def smoothstep(edge0, edge1, x): # https://en.wikipedia.org/wiki/Smoothstep # float smoothstep(float edge0, float edge1, float x) # { # // Scale, bias and saturate x to 0..1 range # x = clamp((x - edge0)/(edge1 - edge0), 0.0, 1.0); # // Evaluate polynomial # return x*x*(3 - 2*x); # } x = clamp((x - edge0)/(edge1 -edge0), 0.0, 1.0) return x*x*(3-2*x) class ImplicitTree(ImplicitStar): # http://www.iquilezles.org/live/index.htm def __init__(self, inner_radius=0.2, outer_radius=0.1, frequency=10, x0=0.4, y0=0.5): self.inner_radius = inner_radius self.outer_radius = outer_radius self.frequency = frequency self.x0 = x0 self.y0 = y0 self. implicit_lambda_function = self.implicit_lambda_function def implicit_lambda_function(self, x, y): local_x = x - self.x0 local_y = y - self.y0 r = self.inner_radius + self.outer_radius*np.cos(np.arctan2(local_y, local_x)*self.frequency + 20*local_x + 1) result = ImplicitStar.smoothstep(r, r + 0.01,np.sqrt(local_x**2 + local_y**2)) r = 0.015 r += 0.002 * np.cos(120.0 * local_y) r += np.exp(-20.0 * y) result *= 1.0 - (1.0 - ImplicitStar.smoothstep(r, r + 1, abs(local_x+ 0.2*np.sin(2.0 *local_y)))) * \ (1.0 - ImplicitStar.smoothstep(0.0, 0.1, local_y)) return result # In[19]: # h = (rectangle (0.1, 0.1) (0.25, 0.9) ∪ # rectangle (0.1, 0.1) (0.6, 0.35) ∪ # circle (0.35, 0.35) 0.25) ∩ inv # (circle (0.35, 0.35) 0.1 ∪ # rectangle (0.25, 0.1) (0.45, 0.35)) # i = rectangle (0.75, 0.1) (0.9, 0.55) ∪ # circle (0.825, 0.75) 0.1 class Tree: def __init__(self, tree_or_cell_0, tree_or_cell_1, tree_or_cell_2, tree_or_cell_3): assert (isinstance(tree_or_cell_0, Tree) | isinstance(tree_or_cell_0, Cell)) & (isinstance(tree_or_cell_1, Tree) | isinstance(tree_or_cell_1, Cell)) & (isinstance(tree_or_cell_2, Tree) | isinstance(tree_or_cell_2, Cell)) & (isinstance(tree_or_cell_3, Tree) | isinstance(tree_or_cell_3, Cell)) self.tree_or_cell_0 = tree_or_cell_0 self.tree_or_cell_1 = tree_or_cell_1 self.tree_or_cell_2 = tree_or_cell_2 self.tree_or_cell_3 = tree_or_cell_3 class Cell: def __init__(self, xmin, xmax, ymin, ymax, cell_type): assert xmin!=xmax, "incorrect usage xmin == xmax" assert ymin!=ymax, "incorrect usage ymin == ymax" assert cell_type in ['Empty', 'Full', 'Leaf', 'Root', 'NotInitialized'] self.xmin = xmin self.xmax = xmax self.ymin = ymin self.ymax = ymax self.xmid = (xmin + xmax)/2 self.ymid = (ymin + ymax)/2 self.cell_type = cell_type ''' 2 -- 3 | | 0 -- 1 ''' self.point_0 = np.array([[xmin],[ymin]]) self.point_1 = np.array([[xmax],[ymin]]) self.point_2 = np.array([[xmin],[ymax]]) self.point_3 = np.array([[xmax],[ymax]]) if self.is_Root(): self.to_Root() def xmin_xmax_ymin_ymax(self): return [self.xmin, self.xmax, self.ymin, self.ymax] def to_Root(self): assert self.cell_type != 'Root' self.cell_type = 'Root' self.cell0 = Cell(self.xmin, self.xmid, self.ymin, self.ymid, 'NotInitialized') self.cell1 = Cell(self.xmid, self.xmax, self.ymin, self.ymid, 'NotInitialized') self.cell2 = Cell(self.xmin, self.xmid, self.ymid, self.ymax, 'NotInitialized') self.cell3 = Cell(self.xmid, self.xmax, self.ymid, self.ymax, 'NotInitialized') def to_Empty(self): assert self.is_Root() self.cell_type = 'Empty' del self.cell0 del self.cell1 del self.cell2 del self.cell3 def to_Full(self): assert self.is_Root() self.cell_type = 'Full' del self.cell0 del self.cell1 del self.cell2 del self.cell3 def to_Leaf(self): assert self.is_Root() self.cell_type = 'Leaf' del self.cell0 del self.cell1 del self.cell2 del self.cell3 def check_not_initialized_exists(self): # raise Error if NotInitialized exists if self.is_Root(): self.cell0.check_not_initialized_exists() self.cell1.check_not_initialized_exists() self.cell2.check_not_initialized_exists() self.cell3.check_not_initialized_exists() else: if self.is_NotInitialized(): raise ValueError('cell should not be as cell_type {}'.format(self.cell_type)) else: pass def check_Leaf_exists(self): # raise Error if NotInitialized exists if self.is_Root(): self.cell0.check_Leaf_exists() self.cell1.check_Leaf_exists() self.cell2.check_Leaf_exists() self.cell3.check_Leaf_exists() else: if self.is_Leaf(): raise ValueError('cell should not be as cell_type {}'.format(self.cell_type)) else: pass def eval_type(self, implicit_object_instance): assert self.is_NotInitialized(), 'this function is only called when the cell type is not initialized' is_point0_inside = implicit_object_instance.is_point_inside(self.point_0) is_point1_inside = implicit_object_instance.is_point_inside(self.point_1) is_point2_inside = implicit_object_instance.is_point_inside(self.point_2) is_point3_inside = implicit_object_instance.is_point_inside(self.point_3) if ((is_point0_inside is True) & (is_point1_inside is True) & (is_point2_inside is True) & (is_point3_inside is True) ): self.cell_type = 'Full' elif ( (is_point0_inside is False) & (is_point1_inside is False) & (is_point2_inside is False) & (is_point3_inside is False) ): self.cell_type = 'Empty' else: # print('to Leaf') self.cell_type = 'Leaf' def add_marching_cude_points(self, edge_vectice_0, edge_vectice_1, mc_connect_indicator): assert self.is_Leaf() # self.edge_vectice_0 = edge_vectice_0 # self.edge_vectice_1 = edge_vectice_1 try: self.edge_vectices.append((edge_vectice_0, edge_vectice_1, mc_connect_indicator)) except AttributeError: self.edge_vectices = [(edge_vectice_0, edge_vectice_1, mc_connect_indicator)] def get_first_indicator(self): assert self.is_Leaf() assert len(self.edge_vectices) >= 1 return self.edge_vectices[0][2] def get_second_indicator(self): assert self.is_Leaf() assert len(self.edge_vectices) >= 1 print(self.edge_vectices[1]) print(self.edge_vectices[1][0]) print(self.edge_vectices[1][1]) return self.edge_vectices[1][2] def get_marching_cude_points(self): # remove the edges = raise NotImplementedError def debug_print(self, counter): counter += 1 if self.cell_type in ['Full', 'Empty', 'Leaf', 'NotInitialized']: # print(counter) pass else: self.cell0.debug_print(counter) self.cell1.debug_print(counter) self.cell2.debug_print(counter) self.cell3.debug_print(counter) def visualize(self, ax1): if self.cell_type in ['Empty', 'Full', 'Leaf', 'NotInitialized']: if self.is_Empty(): color = 'grey' elif self.is_Full(): color = 'black' elif self.is_Leaf(): color = 'green' elif self.is_NotInitialized(): color = 'red' else: raise ValueError('cell should not be as cell_type {}'.format(self.cell_type)) ax1.add_patch( patches.Rectangle( (self.xmin, self.ymin), # (x,y) self.xmax - self.xmin, # width self.ymax - self.ymin, # height edgecolor = color, facecolor = 'white' ) ) elif self.is_Root(): self.cell0.visualize(ax1) self.cell1.visualize(ax1) self.cell2.visualize(ax1) self.cell3.visualize(ax1) else: raise ValueError('cell should not be as cell_type {}'.format(self.cell_type)) def print_type(self): if not self.is_Root(): print(self.cell_type) else: print('Root') self.cell0.print_type() self.cell1.print_type() self.cell2.print_type() self.cell3.print_type() def initialise_cell_type(self, implicit_object_instance): if self.is_Root(): self.cell0.initialise_cell_type(implicit_object_instance) self.cell1.initialise_cell_type(implicit_object_instance) self.cell2.initialise_cell_type(implicit_object_instance) self.cell3.initialise_cell_type(implicit_object_instance) elif self.is_NotInitialized(): self.eval_type(implicit_object_instance) else: raise ValueError('There should not be any other \ cell_type when calling this function -> {}'.format(self.cell_type)) def bisection(self, two_points_contain_vectice, implicit_object_instance, epsilon = 0.0001): ''' not considering the orientation left_or_right ''' assert len(two_points_contain_vectice[0]) == 2, "two_points_contain_vectice[0] wrong format, {}".format(two_points_contain_vectice[0]) assert len(two_points_contain_vectice[1]) == 2, "two_points_contain_vectice[0] wrong format, {}".format(two_points_contain_vectice[0]) assert isinstance(two_points_contain_vectice, np.ndarray), 'two_points_contain_vectice has wrong type {}'.format(type(two_points_contain_vectice)) #two_points_contain_vectice = [[[ 0.125 ] # [ 0.09375]] # [[ 0.125 ] # [ 0.125 ]]] edge0_x = two_points_contain_vectice[0][0][0] edge0_y = two_points_contain_vectice[0][1][0] edge1_x = two_points_contain_vectice[1][0][0] edge1_y = two_points_contain_vectice[1][1][0] # print(edge0_x) # print(edge0_y) # print(edge1_x) # print(edge1_y) is_edge0_inside = implicit_object_instance.is_point_inside(np.array([[edge0_x], [edge0_y]])) is_edge1_inside = implicit_object_instance.is_point_inside(np.array([[edge1_x], [edge1_y]])) # TODO: find a assert to make sure two_points_contain_vectice are not the same assert is_edge0_inside != is_edge1_inside,\ 'it cannot be both points {} {}'.format( is_edge0_inside, is_edge1_inside ) edge_xmid = (edge1_x + edge0_x)/2 edge_ymid = (edge1_y + edge0_y)/2 if np.sqrt((edge1_x - edge0_x)**2 + (edge1_y - edge0_y)**2) <= epsilon: return (edge_xmid, edge_ymid) is_edge_mid_inside = implicit_object_instance.is_point_inside(np.array([[edge_xmid], [edge_ymid]])) if is_edge_mid_inside is not is_edge0_inside: return self.bisection(np.array([[[edge0_x], [edge0_y]],[[edge_xmid], [edge_ymid]]]), implicit_object_instance, epsilon = 0.01) elif is_edge_mid_inside is not is_edge1_inside: return self.bisection(np.array([[[edge1_x], [edge1_y]],[[edge_xmid], [edge_ymid]]]), implicit_object_instance, epsilon = 0.01) else: raise ValueError @staticmethod def mc_two_one_connect_h(two_vertice_first, two_type_first, two_vertice_second, two_type_second, one_vertice, one_type): # connect left to right two to one assert 'left' in one_type, 'left not in one_type {}'.format(one_type) if 'left' in one_type[0]: if two_type_first == ['bottom', 'right']: return np.array([two_vertice_first, one_vertice]) elif two_type_second == ['bottom', 'right']: return np.array([two_vertice_second, one_vertice]) else: raise ValueError('two_type_first {}, two_type_second {}'.format(two_type_first, two_type_second)) elif 'left' in one_type[1]: if two_type_first == ['right', 'top']: return np.array([two_vertice_first, one_vertice]) elif two_type_second == ['right', 'top']: return np.array([two_vertice_second, one_vertice]) else: raise ValueError('two_type_first {}, two_type_second {}'.format(two_type_first, two_type_second)) else: raise ValueError @staticmethod def mc_one_two_connect_h(one_vertice, one_type, two_vertice_first, two_type_first, two_vertice_second, two_type_second): # connect left to right one to two assert 'right' in one_type, 'right not in one_type {}'.format(one_type) if 'right' in one_type[0]: if two_type_first == ['top', 'left']: return np.array([one_vertice, two_vertice_first]) elif two_type_second == ['top', 'left']: return np.array([one_vertice, two_vertice_second]) else: raise ValueError('two_type_first {}, two_type_second {}'.format(two_type_first, two_type_second)) elif 'right' in one_type[1]: if two_type_first == ['left', 'bottom']: return np.array([one_vertice, two_vertice_first]) elif two_type_second == ['left', 'bottom']: return np.array([one_vertice, two_vertice_second]) else: raise ValueError('two_type_first {}, two_type_second {}'.format(two_type_first, two_type_second)) else: raise ValueError @staticmethod def mc_two_two_connect_h(two_vertice_l_first, two_type_l_first, two_vertice_l_second, two_type_l_second, two_vertice_r_first, two_type_r_first, two_vertice_r_second, two_type_r_second): # connect left to right two to two ''' not tested ''' if two_type_l_first == ('top', 'left'): assert two_type_l_second == ('bottom', 'right') if two_type_r_first == ('bottom', 'left'): return np.array([two_type_l_second, two_vertice_r_first]) elif two_type_r_second == ('bottom', 'left'): return np.array([two_type_l_second, two_vertice_r_second]) else: raise ValueError elif two_type_l_first == ('bottom', 'right'): assert two_type_l_second == ('top', 'left') if two_type_r_first == ('bottom', 'left'): return np.array([two_vertice_l_first, two_vertice_r_first]) elif two_type_r_second == ('bottom', 'left'): return np.array([two_vertice_l_first, two_vertice_r_second]) else: raise ValueError elif two_type_l_first == ('left', 'bottom'): assert two_type_l_second == ('right', 'top') if two_type_r_first == ('top', 'left'): return np.array([two_type_l_second, two_type_r_first]) elif two_type_r_second == ('top', 'left'): return np.array([two_type_l_second, two_type_r_second]) else: return ValueError elif two_type_l_first == ('right', 'top'): assert two_type_l_second == ('left', 'bottom') if two_type_r_first == ('top', 'left'): return np.array([two_type_l_first, two_type_r_first]) elif two_type_r_second == ('top', 'left'): return np.array([two_type_l_first, two_type_r_second]) else: return ValueError else: raise ValueError @staticmethod def mc_two_one_connect_v(two_vertice_first, two_type_first, two_vertice_second, two_type_second, one_vertice, one_type): # connect top to bottom two to one assert 'top' in one_type, 'bottom not in one_type {}'.format(one_type) if 'top' in one_type[0]: # TODO: why not == instead of in if two_type_first == ['left', 'bottom']: return np.array([two_vertice_first, one_vertice]) elif two_type_second == ['left', 'bottom']: return np.array([two_type_second, one_vertice]) else: raise ValueError elif 'top' in one_type[1]: if two_type_first == ['bottom', 'right']: return np.array([two_vertice_first, one_vertice]) elif two_type_second == ['bottom', 'right']: return np.array([two_type_second, one_vertice]) else: raise ValueError else: raise ValueError @staticmethod def mc_one_two_connect_v(one_vertice, one_type, two_vertice_first, two_type_first, two_vertice_second, two_type_second): # connect top to bottom two to one assert 'bottom' in one_type if 'bottom' in one_type[0]: # TODO: why not == instead of in if two_type_first == ['right', 'top']: return np.array([one_vertice, two_vertice_first]) elif two_type_second == ['right', 'top']: return np.array([one_vertice, two_vertice_second]) else: raise ValueError elif 'bottom' in one_type[1]: if two_type_first == ['top', 'left']: return np.array([one_vertice, two_vertice_first]) elif two_type_second == ['top', 'left']: return np.array([one_vertice, two_vertice_second]) else: raise ValueError else: raise ValueError @staticmethod def mc_two_two_connect_v(two_vertice_t_first, two_type_t_first, two_vertice_t_second, two_type_t_second, two_vertice_b_first, two_type_b_first, two_vertice_b_second, two_type_b_second): ''' not tested ''' if two_type_t_first == ['top', 'left']: assert two_type_t_second == ['bottom', 'right'] if two_type_b_first == ['right', 'top']: return np.array([two_type_t_second, two_type_b_first]) elif two_type_b_second == ['right', 'top']: return np.array([two_type_t_second, two_type_b_second]) else: raise ValueError elif two_type_t_first == ['bottom', 'right']: assert two_type_t_second == ['top', 'left'] if two_type_b_first == ['right', 'top']: return np.array([two_type_t_first, two_type_b_first]) elif two_type_b_second == ['right', 'top']: return np.array([two_type_t_first, two_type_b_second]) else: raise ValueError elif two_type_t_first == ['right', 'top']: assert two_type_t_second == ['left', 'bottom'] if two_type_b_first == ['top', 'left']: return np.array([two_type_t_second, two_type_b_first]) elif two_type_b_second == ['top', 'left']: return np.array([two_type_t_second, two_type_b_second]) else: raise ValueError elif two_type_t_first == ['left', 'bottom']: assert two_type_t_second == ['right', 'top'] if two_type_b_first == ['top', 'left']: return np.array([two_type_t_first, two_type_b_first]) elif two_type_b_second == ['top', 'left']: return np.array([two_type_t_first, two_type_b_second]) else: raise ValueError else: raise ValueError def marching_cube(self, implicit_object_instance, edges): def find_add_mc_edges_vertices(self, points_for_edges, edges): two_points_contain_vectice_0 = np.array(points_for_edges[0]) two_points_contain_vectice_1 = np.array(points_for_edges[1]) mc_connect_indicator = np.array(points_for_edges[2]) edge_vectice_0 = self.bisection(two_points_contain_vectice_0, implicit_object_instance) edge_vectice_1 = self.bisection(two_points_contain_vectice_1, implicit_object_instance) self.add_marching_cude_points(edge_vectice_0, edge_vectice_1, mc_connect_indicator) edges.append([edge_vectice_0, edge_vectice_1]) return [edge_vectice_0, edge_vectice_1] self.check_not_initialized_exists() # self testing # 0 to the left, 1 to the right marching_cube_edge_indicator = { (False, False, False, False): np.array([]), (False, False, True, False): np.array([[self.point_0, self.point_2],[self.point_2, self.point_3], [('top','left')]]), (False, False, False, True): np.array([[self.point_1, self.point_3],[self.point_2, self.point_3], [('right','top')]]), (False, False, True, True): np.array([[self.point_0, self.point_2],[self.point_1, self.point_3], [('right','left')]]), (False, True, False, False): np.array([[self.point_0, self.point_1],[self.point_1, self.point_3], [('bottom','right')]]), (False, True, True, False): np.array([[self.point_0, self.point_1],[self.point_1, self.point_3], [('bottom','right')], [self.point_0, self.point_2],[self.point_2, self.point_3], [('top','left')]]), (False, True, False, True): np.array([[self.point_0, self.point_1], [self.point_2, self.point_3], [('bottom','top')]]), (False, True, True, True): np.array([[self.point_0, self.point_1], [self.point_0, self.point_2], [('bottom','left')]]), (True, False, False, False): np.array([[self.point_0, self.point_1], [self.point_0, self.point_2], [('left','bottom')]]), (True, False, True, False): np.array([[self.point_0, self.point_1], [self.point_2, self.point_3], [('top','bottom')]]), (True, False, False, True): np.array([[self.point_0, self.point_1],[self.point_0, self.point_2], [('left','bottom')], [self.point_1, self.point_3],[self.point_2, self.point_3], [('right','top')]]), (True, False, True, True): np.array([[self.point_0, self.point_1],[self.point_1, self.point_3], [('right','bottom')]]), (True, True, False, False): np.array([[self.point_0, self.point_2],[self.point_1, self.point_3], [('left','right')]]), (True, True, True, False): np.array([[self.point_1, self.point_3],[self.point_2, self.point_3], [('top','right')]]), (True, True, False, True): np.array([[self.point_0, self.point_2],[self.point_2, self.point_3], [('left', 'top')]]), (True, True, True, True):np.array([]) } if self.is_Leaf(): # repeated code is_point0_inside = implicit_object_instance.is_point_inside(self.point_0) is_point1_inside = implicit_object_instance.is_point_inside(self.point_1) is_point2_inside = implicit_object_instance.is_point_inside(self.point_2) is_point3_inside = implicit_object_instance.is_point_inside(self.point_3) points_for_edges = marching_cube_edge_indicator[(is_point0_inside, is_point1_inside, is_point2_inside, is_point3_inside)] if len(points_for_edges) == 0: pass elif len(points_for_edges) == 3: # one edge # two_points_contain_vectice_0 = np.array(points_for_edges[0]) # two_points_contain_vectice_1 = np.array(points_for_edges[1]) # edge_vectice_0 = self.bisection(two_points_contain_vectice_0, implicit_object_instance) # edge_vectice_1 = self.bisection(two_points_contain_vectice_1, implicit_object_instance) # self.add_marching_cude_points(edge_vectice_0, edge_vectice_1) # edges.append([edge_vectice_0, edge_vectice_1]) find_add_mc_edges_vertices(self, points_for_edges, edges) elif len(points_for_edges) == 6: # two edges # two_points_contain_vectice_0 = np.array(points_for_edges[0]) # two_points_contain_vectice_1 = np.array(points_for_edges[1]) # edge_vectice_0 = self.bisection(two_points_contain_vectice_0, implicit_object_instance) # edge_vectice_1 = self.bisection(two_points_contain_vectice_1, implicit_object_instance) # self.add_marching_cude_points(edge_vectice_0, edge_vectice_1) # edges.append([edge_vectice_0, edge_vectice_1]) print('two edges--') points_for_edge = points_for_edges[:3] assert len(points_for_edge) == 3 res = find_add_mc_edges_vertices(self, points_for_edge, edges) print(res) # two_points_contain_vectice_3 = np.array(points_for_edges[3]) # two_points_contain_vectice_4 = np.array(points_for_edges[4]) # edge_vectice_3 = self.bisection(two_points_contain_vectice_3, implicit_object_instance) # edge_vectice_4 = self.bisection(two_points_contain_vectice_4, implicit_object_instance) # self.add_marching_cude_points(edge_vectice_3, edge_vectice_4) # edges.append([edge_vectice_3, edge_vectice_4]) points_for_edge = points_for_edges[3:] assert len(points_for_edge) == 3 res = find_add_mc_edges_vertices(self, points_for_edge, edges) print(res) else: raise ValueError('there should not be another value...') elif self.is_Root(): self.cell0.marching_cube(implicit_object_instance, edges) self.cell1.marching_cube(implicit_object_instance, edges) self.cell2.marching_cube(implicit_object_instance, edges) self.cell3.marching_cube(implicit_object_instance, edges) else: pass return edges def interpolate(self, two_d_point, implicit_object_instance): assert self.is_Root() x_interpolate = two_d_point[0][0] y_interpolate = two_d_point[1][0] assert self.xmin <= x_interpolate <= self.xmax assert self.ymin <= y_interpolate <= self.ymax dx = (x_interpolate - self.xmin) / (self.xmax - self.xmin) dy = (y_interpolate - self.ymin) / (self.ymax - self.ymin) ab = implicit_object_instance.eval_point(np.array([[self.xmin], [self.ymin]])) * (1 - dx) + \ implicit_object_instance.eval_point(np.array([[self.xmax], [self.ymin]])) * dx cd = implicit_object_instance.eval_point(np.array([[self.xmin], [self.ymax]])) * (1 - dx) + \ implicit_object_instance.eval_point(
np.array([[self.xmax], [self.ymax]])
numpy.array
import numpy as np import numbers from scipy import sparse from scipy import linalg import scipy.sparse.linalg as spla from mesh import Vertex, Interval, HalfEdge, QuadCell, convert_to_array from function import Map, Nodal, Constant from fem import parse_derivative_info, Basis from inspect import signature import time class GaussRule(): """ Description: ------------ Gaussian Quadrature weights and nodes on reference cell """ def __init__(self, order, element=None, shape=None): """ Constructor Inputs: order: int, order of quadrature rule 1D rule: order in {1,2,3,4,5,6} 2D rule: order in {1,4,9,16,25,36} for quadrilaterals {1,3,7,13} for triangles element: Element object OR shape: str, 'interval', 'triangle', or 'quadrilateral'. """ # # Determine shape of cells # if element is None: # Shape specified directly assert shape is not None, 'Must specify either element or cell shape.' else: # Element given shape = element.cell_type() # Check if shape is supported assert shape in ['interval','triangle','quadrilateral'], \ "Use 'interval', 'triangle', or 'quadrilateral'." # Get dimension dim = 1 if shape=='interval' else 2 # # Tensorize 1D rules if cell is quadrilateral # use_tensor_product_rules = \ ( dim == 1 or shape == 'quadrilateral' ) if use_tensor_product_rules: # # Determine the order of constituent 1D rules # if dim == 1: assert order in [1,2,3,4,5,6], 'Gauss rules in 1D: 1,2,3,4,5,6.' order_1d = order elif dim == 2: assert order in [1,4,9,16,25,36], 'Gauss rules over quads in 2D: 1,4,16,25' order_1d = int(np.sqrt(order)) r = [0]*order_1d # initialize as list of zeros w = [0]*order_1d # # One Dimensional Rules # if order_1d == 1: r[0] = 0.0 w[0] = 2.0 elif order_1d == 2: # Nodes r[0] = -1.0 /np.sqrt(3.0) r[1] = -r[0] # Weights w[0] = 1.0 w[1] = 1.0 elif order_1d == 3: # Nodes r[0] =-np.sqrt(3.0/5.0) r[1] = 0.0 r[2] =-r[0] # weights w[0] = 5.0/9.0 w[1] = 8.0/9.0 w[2] = w[0] elif order_1d == 4: # Nodes r[0] =-np.sqrt((3.0+2.0*np.sqrt(6.0/5.0))/7.0) r[1] =-np.sqrt((3.0-2.0*np.sqrt(6.0/5.0))/7.0) r[2] =-r[1] r[3] =-r[0] # Weights w[0] = 0.5 - 1.0 / ( 6.0 * np.sqrt(6.0/5.0) ) w[1] = 0.5 + 1.0 / ( 6.0 * np.sqrt(6.0/5.0) ) w[2] = w[1] w[3] = w[0] elif order_1d == 5: # Nodes r[0] =-np.sqrt(5.0+4.0*np.sqrt(5.0/14.0)) / 3.0 r[1] =-np.sqrt(5.0-4.0*np.sqrt(5.0/14.0)) / 3.0 r[2] = 0.0 r[3] =-r[1] r[4] =-r[0] # Weights w[0] = 161.0/450.0-13.0/(180.0*np.sqrt(5.0/14.0)) w[1] = 161.0/450.0+13.0/(180.0*np.sqrt(5.0/14.0)) w[2] = 128.0/225.0 w[3] = w[1] w[4] = w[0] elif order_1d == 6: # Nodes r[0] = -0.2386191861 r[1] = -0.6612093865 r[2] = -0.9324695142 r[3] = - r[0] r[4] = - r[1] r[5] = - r[2] # Weights w[0] = .4679139346 w[1] = .3607615730 w[2] = .1713244924 w[3] = w[0] w[4] = w[1] w[5] = w[2] # # Transform from [-1,1] to [0,1] # r = [0.5+0.5*ri for ri in r] w = [0.5*wi for wi in w] if dim == 1: self.__nodes = np.array(r) self.__weights = np.array(w) elif dim == 2: # # Combine 1d rules into tensor product rules # nodes = [] weights = [] for i in range(len(r)): for j in range(len(r)): nodes.append((r[i],r[j])) weights.append(w[i]*w[j]) self.__nodes = np.array(nodes) self.__weights = np.array(weights) elif element.cell_type == 'triangle': # # Two dimensional rules over triangles # assert order in [1,3,7,13], 'Gauss rules on triangles in 2D: 1, 3, 7 or 13.' if order == 1: # # One point rule # r = [(2.0/3.0,1.0/3.0)] w = [0.5] elif order == 3: # # 3 point rule # r = [0]*order r[0] = (2.0/3.0, 1.0/6.0) r[1] = (1.0/6.0, 2.0/3.0) r[2] = (1.0/6.0, 1.0/6.0) w = [0]*order w[0] = 1.0/6.0 w[1] = w[0] w[2] = w[0] elif order == 7: # The following points correspond to a 7 point rule, # see Dunavant, IJNME, v. 21, pp. 1129-1148, 1995. # or Braess, p. 95. # # Nodes # t1 = 1.0/3.0 t2 = (6.0 + np.sqrt(15.0))/21.0 t3 = 4.0/7.0 - t2 r = [0]*order r[0] = (t1,t1) r[1] = (t2,t2) r[2] = (1.0-2.0*t2, t2) r[3] = (t2,1.0-2.0*t2) r[4] = (t3,t3) r[5] = (1.0-2.0*t3,t3) r[6] = (t3,1.0-2.0*t3) # # Weights # t1 = 9.0/80.0 t2 = ( 155.0 + np.sqrt(15.0))/2400.0 t3 = 31.0/240.0 - t2 w = [0]*order w[0] = t1 w[1] = t2 w[2] = t2 w[3] = t2 w[4] = t3 w[5] = t3 w[6] = t3 elif order == 13: r = [0]*order r1 = 0.0651301029022 r2 = 0.8697397941956 r4 = 0.3128654960049 r5 = 0.6384441885698 r6 = 0.0486903154253 r10 = 0.2603459660790 r11 = 0.4793080678419 r13 = 0.3333333333333 r[0] = (r1,r1) r[1] = (r2,r1) r[2] = (r1,r2) r[3] = (r4,r6) r[4] = (r5,r4) r[5] = (r6,r5) r[6] = (r5,r6) r[7] = (r4,r5) r[8] = (r6,r4) r[9] = (r10,r10) r[10] = (r11,r10) r[11] = (r10,r11) r[12] = (r13,r13) w = [0]*order w1 = 0.0533472356088 w4 = 0.0771137608903 w10 = 0.1756152574332 w13 = -0.1495700444677 w[0] = w1 w[1] = w1 w[2] = w1 w[3] = w4 w[4] = w4 w[5] = w4 w[6] = w4 w[7] = w4 w[8] = w4 w[9] = w10 w[10] = w10 w[11] = w10 w[12] = w13 w = [0.5*wi for wi in w] self.__nodes = np.array(r) self.__weights = np.array(w) self.__cell_type = shape self.__dim = dim def nodes(self): """ Return quadrature nodes """ return self.__nodes def weights(self): """ Return quadrature weights """ return self.__weights def n_nodes(self): """ Return the size of the rule """ return len(self.__weights) def dim(self): """ Return the dimension of the rule """ return self.__dim def shape(self): """ Return the geometric shape of the reference region """ return self.__cell_type def scale_rule(self, position, scale, nodes=None, weights=None): """ Description ----------- Scale the quadrature rule defined over the reference region to an equivalent rule over a sub-region. When mapped to a physical region, this will result in a quadrature over an appropriate sub-region thereof. This is useful when evaluating integrals over cells in which the integrands have different levels of resolution. Parameters ---------- position : double, Position of sub-region within larger region. If the region is an interval, the position is a scalar, if it is a quadrilateral, the position is a pair. scale : double >0, Shrinkage factor of the sub-region size (length) relative to that of the region. nodes: double, Nodes on the reference cell. If none are specified, the stored quadrature nodes are used. weights: double, Quadrature weights on the reference cell. Returns ------- x_scaled : double, Vector of quadrature nodes defined over the sub-region. w_scaled : double, Vector of quadrature weights over sub-region. Notes ----- 1. Use region.reference_map to generate quadrature rules over physical regions. 2. Use region.subcell_position to determine the relative position and size of a region's sub-region (Intervals, HalfEdges, QuadCells). TODO: Test """ if nodes is None: # Quadrature Rule quadrature = True # Get reference quadrature nodes and weights nodes, weights = self.nodes(), self.weights() else: # Evaluation quadrature = False # Scale and shift nodes as specified x_scaled = position + scale*nodes if not quadrature: # # Return only the scaled and shifted nodes # return x_scaled else: # # Adjust the quadrature weights # shape = self.shape() if shape == 'interval': # # On the unit interval # # Scale reference weights by Jacobian w_scaled = scale*weights elif shape == 'quadrilateral': # # On the unit square # assert isinstance(position,np.ndarray), \ 'Input "position" must be a numpy array.' assert position.shape == (1,2), \ 'Position must have dimensions (1,2).' assert np.dim(scale)==0, \ 'Input "scalar" should have dimension 0.' # Scale the weights w_scaled = scale**2*weights else: raise Exception('Only shapes of type "interval" or '+\ '"quadrilateral" supported.') return x_scaled, w_scaled def map_rule(self, region, nodes=None, basis=None): """ Description ----------- Maps a set of reference nodes to a physical region, adjusts the associated quadrature weights and evaluates the shape functions. Parameters ---------- region : {QuadCell, Interval, HalfEdge}, Region to which the rule (or just nodes) is mapped. nodes : double, Nodes on the reference cell. If none are specified, the stored quadrature nodes are used. basis : (list of) Basis, Basis functions to be evaluated on the given region. Returns ------- xg : double, Quadrature (or evaluation) nodes on physical region. wg : double, Quadrature weights associated with the physical region. shapes : double, Basis-indexed dictionary of arrays corresponding to the shape functions evaluated at the given nodes. Notes ----- TODO: This method replaces "mapped_rule", which can be deleted once this is done and tested. TODO: Test this method. """ if nodes is None: # Quadrature rule (default) quadrature = True # Reference nodes nodes, weights = self.nodes(), self.weights() else: # Evaluation at given nodes quadrature = False if basis is None: # # No basis specified -> No need for shape functions # if quadrature: # # Quadrature nodes (modify the weights) # # Map to physical region xg, mg = region.reference_map(nodes, jac_r2p=True) # # Update the weights using the Jacobian # jac = mg['jac_r2p'] if isinstance(region, Interval): # Interval dxdr = np.array(jac) elif isinstance(region, HalfEdge): # HalfEdge dxdr = np.array(np.linalg.norm(jac[0])) elif isinstance(region, QuadCell): # QuadCell dxdr = np.array([np.linalg.det(j) for j in jac]) else: raise Exception('Only regions of type "Interval",' + \ '"HalfEdge", or "QuadCell" supported.') # Modify the reference weights wg = weights*dxdr # Return the nodes and weights return xg, wg else: # # Nodes specified: No quadrature weights # xg = region.reference_map(nodes) # Return only the mapped nodes return xg # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # Below here, basis is not None!! # +++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++ # Initialize dictionary of shapes shapes = dict.fromkeys(basis,0) # # Evaluating basis functions on HalfEdges -> map onto reference cell # if isinstance(region, HalfEdge): # # Half-Edge # # Get physical cell from HalfEdge cell = region.cell() # Get reference cell from single basis function ref_cell = basis[0].dofhandler().element.reference_cell() # Determine equivalent half-edge on reference element i_he = cell.get_half_edges().index(region) ref_he = ref_cell.get_half_edge(i_he) b,h = convert_to_array(ref_he.get_vertices()) # Map 1D nodes onto reference HalfEdge nodes = np.array([b[i]+nodes*(h[i]-b[i]) for i in range(2)]).T # # Group the basis according to scales # grouped_basis = {} for b in basis: # Determine mesh-flag associated with basis function basis_meshflag = b.subforest_flag() # Add basis to list under meshflag if basis_meshflag not in grouped_basis: grouped_basis[basis_meshflag] = [b] else: grouped_basis[basis_meshflag].append(b) # # Scale nodes and weights if necessary! # # Group nodes and weights according to meshflag grouped_nodes_weights = {} for meshflag, basis in enumerate(grouped_basis): # # Determine position of region's cell relative to basis cell # if isinstance(region, HalfEdge): cell = region.cell() else: cell = region # Get cell on which basis is defined coarse_cell = cell.nearest_ancestor(meshflag) # # Determine scaling # if cell != coarse_cell: # Cell is strictly contained in coarse_cell position, scale = coarse_cell.subcell_position(cell) # # Scaled nodes and weights # if quadrature: # Quadrature nodes grouped_nodes_weights[meshflag] = \ self.scale_rule(position, scale, nodes, weights) else: # Evaluation nodes grouped_nodes_weights[meshflag] = \ self.scale_rule(position, scale, nodes) else: # # Unscaled nodes and weights # if quadrature: # Quadrature nodes grouped_nodes_weights[meshflag] = (nodes, weights) else: # Evaluation nodes grouped_nodes_weights[meshflag] = nodes # # Parse Basis for required derivatives # # Check whether we need jacobians and/or Hessians jac_p2r = any([b.derivative()[0]>=1 for b in basis]) hess_p2r = any([b.derivative()[0]==2 for b in basis]) # # Map points to physical region # if quadrature: # # Quadrature rule # x_ref, w_ref = grouped_nodes_weights[meshflag] xg, mg = region.reference_map(x_ref, jac_r2p=True, jac_p2r=jac_p2r, hess_p2r=hess_p2r) # # Update the weights using the Jacobian # jac = mg['jac_r2p'] if isinstance(region, Interval): # Interval dxdr = np.array(jac) elif isinstance(region, HalfEdge): # HalfEdge dxdr = np.array(np.linalg.norm(jac[0])) elif isinstance(region, QuadCell): # QuadCell dxdr = np.array([np.linalg.det(j) for j in jac]) else: raise Exception('Only regions of type "Interval",' + \ '"HalfEdge", or "QuadCell" supported.') # Modify the reference weights wg = w_ref*dxdr else: # # Evaluation # x_ref = grouped_nodes_weights[meshflag] xg, mg = region.reference_map(x_ref, jac_p2r=jac_p2r, hess_p2r=hess_p2r) for b in basis: # # Evaluate the basis functions at the (scaled) reference points # element = b.dofhandler().element D = b.derivative() jac_p2r = mg['jac_p2r'] if D[0] in [1,2] else None hess_p2r = mg['hess_p2r'] if D[0]==2 else None shapes[b] = \ element.shape(x_ref=x_ref, derivatives=D, jac_p2r=jac_p2r, hess_p2r=hess_p2r) if quadrature: # Quadrature return xg, wg, shapes else: # Evaluation return xg, shapes def mapped_rule(self, region, basis=[], jac_p2r=False, hess_p2r=False): """ Return the rule associated with a specific Cell, Interval, or HalfEdge as well as the inverse jacobians and hessians associated with the transformation. Parameters: ----------- region : object, {Interval, HalfEdge, or Cell} Region to which rule is mapped. basis : list, List of basis functions defined on the region jac_p2r, hess_p2r: bool, indicate whether the jacobian and hessian of the inverse mapping should be returned. These are useful when evaluating the gradients and second derivatives of shape functions. TODO: Move assembler.shape_eval part to here. """ # # Map quadrature rule to entity (cell/halfedge) # if isinstance(region, Interval): # # Interval # # Check compatiblity assert self.dim()==1, 'Interval requires a 1D rule.' # Get reference nodes and weights x_ref = self.nodes() w_ref = self.weights() # Map reference quadrature nodes to cell xg, mg = region.reference_map(x_ref, jac_r2p=True, jac_p2r=jac_p2r, hess_p2r=hess_p2r) # Get Jacobian of forward mapping jac = mg['jac_r2p'] # Modify the quadrature weights wg = w_ref*np.array(jac) elif isinstance(region, HalfEdge): # # Edge # # Check compatibility assert self.dim()==1, 'Half Edge requires a 1D rule.' # Get reference quadrature nodes and weights x_ref = self.nodes() w_ref = self.weights() # Map reference nodes to halfedge xg, mg = region.reference_map(x_ref, jac_r2p=True, jac_p2r=jac_p2r, hess_p2r=hess_p2r) # Get jaobian of forward mapping jac = mg['jac_r2p'] # Modify the quadrature weights wg = w_ref*np.array(np.linalg.norm(jac[0])) elif isinstance(region, QuadCell): # # Quadrilateral # # Check compatibility assert self.dim()==2, 'QuadCell requires 2D rule.' x_ref = self.nodes() w_ref = self.weights() # Map reference quadrature nodes to quadcell xg, mg = region.reference_map(x_ref, jac_r2p=True, jac_p2r=jac_p2r, hess_p2r=hess_p2r) # Get Jacobian of forward mapping jac = mg['jac_r2p'] # Modify quadrature weights wg = w_ref*np.array([np.linalg.det(j) for j in jac]) else: raise Exception('Only Intervals, HalfEdges, & QuadCells supported') # # Return Gauss nodes and weights, and Jacobian/Hessian of inverse map # if any([jac_p2r,hess_p2r]): return xg, wg, mg else: return xg, wg class Kernel(object): """ Kernel (combination of Functions) to be used in Forms """ def __init__(self, f, derivatives=None, F=None, subsample=None): """ Constructor Inputs: f: single Function, or list of Functions *f_kwargs: dict, (list of) keyword arguments to be passed to the f's *F: function, lambda function describing how the f's are combined and modified to form the kernel *subsample: int, numpy array of subsample indices """ # # Store input function(s) # if type(f) is not list: # # Single function # assert isinstance(f, Map), 'Input "f" should be a "Map" object.' f = [f] self.__f = f n_functions = len(self.__f) # # Parse function derivatives # dfdx = [] if derivatives is None: # # No derivatives specified # dfdx = [None for dummy in self.__f] elif type(derivatives) is list: # # derivatives specified in list # assert len(derivatives)==n_functions, \ 'The size of input "derivatives" incompatible with '+\ 'that of input "f".' dfdx = derivatives else: # # Single derivative applies to all functions # dfdx = parse_derivative_info(derivatives) dfdx = [dfdx for dummy in self.__f] self.__dfdx = dfdx # # Store meta function F # # Check that F takes the right number of inputs if F is None: # Store metafunction F assert n_functions == 1, \ 'If input "F" not specified, only one function allowed.' F = lambda f: f self.__F = F # Store function signature of F sigF = signature(F) # Figure out which of the cell_args = {} for arg in ['cell', 'region', 'phi', 'dofs']: if arg in sigF.parameters: cell_args[arg] = None bound = sigF.bind_partial(**cell_args) self.__bound = bound self.__signature = sigF # Store subsample self.set_subsample(subsample) def basis(self): """ Determine the basis functions used in the Kernel """ basis = [] for f in self.__f: if isinstance(f, Nodal): basis.append(f.basis()) return basis def set_subsample(self, subsample): """ Set kernel's subsample Input: subsample: int, numpy array specifying subsample indices Note: For stochastic functions, the default subsample is the entire range. For deterministic functions, the subsample can only be None. """ # # Parse subsample # if subsample is None: # # Check whether there is a stochastic function in the list # for f in self.__f: if f.n_samples()>1: f.set_subsample(subsample) subsample = f.subsample() break # # Set same subsample for all functions # for f in self.__f: f.set_subsample(subsample) if subsample is not None: assert np.allclose(f.subsample(),subsample), \ 'Incompatible subsample.' self.__subsample = subsample def n_subsample(self): """ Returns the subsample of functions used """ if self.__subsample is not None: return len(self.__subsample) else: return 1 def f(self): """ Returns the list of functions """ return self.__f def F(self): """ Returns the metafunction """ return self.__F def is_symmetric(self): """ Returns True if all functions in the kernel are symmetric. """ return all([f.is_symmetric() for f in self.f()]) def eval(self, x, phi=None, cell=None, region=None, dofs=None): """ Evaluate the kernel at the points stored in x Inputs: x: (n_points, dim) array of points at which to evaluate the kernel phi: basis-indexed dictionary of shape functions region: Geometric region (Cell, Interval, HalfEdge, Vertex) Included for modified kernels cell: Interval or QuadCell on which kernel is to be evaluated phi: (basis-indexed) shape functions over region Output: Kernel function evaluated at point x. TODO: FIX KERNEL! Interaction with assembler - Different mesh sizes - Derivatives vs. Basis functions. """ # # Evaluate constituent functions # f_vals = [] for f, dfdx in zip(self.__f, self.__dfdx): if isinstance(f, Nodal): phi_f = phi if phi is None else phi[f.basis()] dof_f = None if dofs is None else dofs[f.basis()] if dof_f is None or phi_f is None: fv = f.eval(x=x, derivative=dfdx, cell=cell) else: fv = f.eval(x=x, derivative=dfdx, cell=cell, phi=phi_f, dofs=dof_f) else: fv = f.eval(x=x) f_vals.append(fv) # # Combine functions using meta-function F # # Figure out which of the keyword parameters F can take signature = self.__signature bound = self.__bound cell_args = {'phi': phi, 'cell': cell, 'region':region, 'dofs':dofs} for arg, val in cell_args.items(): if arg in signature.parameters: bound.arguments[arg] = val # Evaluate F return self.__F(*f_vals, **bound.kwargs) class Form(object): """ Constant, Linear, or Bilinear forms (integrals) """ def __init__(self, kernel=None, trial=None, test=None,\ dmu='dx', flag=None, dim=None): """ Constructor Inputs: *kernel: Kernel, specifying the form's kernel *trial: Basis, basis function representing the trial space *test: Basis, basis function representing the test space *dmu: str, area of integration 'dx' - integrate over a cell 'ds' - integrate over a half-edge 'dv' - integrate over a vertex *flag: str/int/tuple cell/half_edge/vertex marker *dim: int, dimension of the domain. """ # # Parse test function # if test is not None: dim = test.dofhandler().element.dim() assert isinstance(test, Basis), \ 'Input "test" must be of type "Basis".' self.test = test # # Parse trial function # if trial is not None: # Check that trial is a Basis assert isinstance(trial, Basis), \ 'Input "trial" must be of type "Basis".' # Check that dimensions are compatible assert dim==trial.dofhandler().element.dim(), \ 'Test and trial functions should be defined over the same '+\ ' dimensional domain.' self.trial = trial # # Parse measure # assert dmu in ['dx', 'ds', 'dv'], \ 'Input "dmu" should be "dx", "ds", or "dv".' # # Check: ds can only be used in 2D # if dmu=='ds' and test is not None: assert dim==2, 'Measure "ds" can only be defined over 2D region.' self.dmu = dmu # # Parse kernel # if kernel is not None: # # Check that kernel is the right type # if isinstance(kernel, Map): # # Kernel entered as Map # kernel = Kernel(kernel) elif isinstance(kernel, numbers.Real): # # Kernel entered as real number # kernel = Kernel(Constant(kernel)) else: # # Otherwise, kernel must be of type Kernel # assert isinstance(kernel, Kernel), \ 'Input "kernel" must be of class "Kernel".' else: # # Default Kernel # kernel = Kernel(Constant(1)) self.kernel = kernel self.flag = flag # # Determine Form type # if self.test is None: # # Constant form # form_type = 'constant' elif self.trial is None: # # Linear form # form_type = 'linear' else: # # Bilinear form # form_type = 'bilinear' self.type = form_type def basis(self): """ Returns ======= basis: list of Basis objects, Returns a list of all the form's basis functions (trial, test, and those used to define the Kernel). """ basis = [] if self.test is not None: # # Add test basis # basis.append(self.test) if self.trial is not None: # # Add trial basis # basis.append(self.trial) # # Add basis functions from the kernel # basis.extend(self.kernel.basis()) # # Return basis list # return basis def dim(self): """ Return the dimension of the form 0 = constant 1 = linear 2 = bilinear """ if self.test is None: # # Constant # return 0 elif self.trial is None: # # Linear # return 1 else: # # Bilinear # return 2 def regions(self, cell): """ Determine the regions over which the form is integrated, using information from dmu and markers """ regions = [] dmu = self.dmu if dmu=='dx': # # Integration region is a cell # if self.flag is None or cell.is_marked(self.flag): # # Valid Cell # regions.append(cell) elif dmu=='ds': # # Integration region is a half-edge # for half_edge in cell.get_half_edges(): # # Iterate over half edges # if self.flag is None or half_edge.is_marked(self.flag): # # Valid HalfEdge # regions.append(half_edge) elif dmu=='dv': # # Integration region is a vertex # for vertex in cell.get_vertices(): # # Iterate over cell vertices # if self.flag is None or vertex.is_marked(self.flag): # # Valid vertex # regions.append(vertex) return regions def eval(self, cell, xg, wg, phi, dofs): """ Evaluates the local kernel, test, (and trial) functions of a (bi)linear form on a given entity. Inputs: cell: Cell containing subregions over which Form is defined xg: dict, Gaussian quadrature points, indexed by regions. wg: dict, Gaussian quadrature weights, indexed by regions. phi: dict, shape functions, indexed by regions -> basis dofs: dict, global degrees of freedom associated with region, indexed by region -> basis Outputs: Constant-, linear-, or bilinear forms and their associated local degrees of freedom. TODO: Explain what the output looks like! Note: This method should be run in conjunction with the Assembler class """ # Determine regions over which form is defined regions = self.regions(cell) # Number of samples n_samples = self.kernel.n_subsample() f_loc = None for region in regions: # Get Gauss points in region x = xg[region] # # Compute kernel, weight by quadrature weights # kernel = self.kernel Ker = kernel.eval(x=x, region=region, cell=cell, phi=phi[region], dofs=dofs[region]) # Weight kernel using quadrature weights wKer = (wg[region]*Ker.T).T if self.type=='constant': # # Constant form # # Initialize form if necessary if f_loc is None: f_loc = np.zeros((1,n_samples)) # # Update form # f_loc += np.sum(wKer, axis=0) elif self.type=='linear': # # Linear form # # Test functions evaluated at Gauss nodes n_dofs_test = self.test.dofhandler().element.n_dofs() test = phi[region][self.test] # Initialize forms if necessary if f_loc is None: if n_samples is None: f_loc = np.zeros(n_dofs_test) else: f_loc = np.zeros((n_dofs_test,n_samples)) # Update form f_loc += np.dot(test.T, wKer) elif self.type=='bilinear': # # Bilinear form # # Test functions evaluated at Gauss nodes n_dofs_test = self.test.dofhandler().element.n_dofs() test = phi[region][self.test] # Trial functions evaluated at Gauss nodes n_dofs_trial = self.trial.dofhandler().element.n_dofs() trial = phi[region][self.trial] # # Initialize local matrix if necessary # if f_loc is None: # # Initialize form # if n_samples is None: f_loc = np.zeros((n_dofs_test,n_dofs_trial)) else: f_loc = np.zeros((n_dofs_test,n_dofs_trial,n_samples)) # # Update form # if n_samples is None: # # Deterministic kernel # ''' f_loc_det = np.dot(test.T, np.dot(np.diag(wg[region]*Ker),trial)) f_loc += f_loc_det.reshape((n_dofs_test*n_dofs_trial,), order='F') ''' f_loc += np.dot(test.T, np.dot(np.diag(wg[region]*Ker),trial)) else: # # Sampled kernel # ''' f_loc_smp = [] for i in range(n_dofs_trial): f_loc_smp.append(np.dot(test.T, (trial[:,i]*wKer.T).T)) f_loc += np.concatenate(f_loc_smp, axis=0) ''' for i in range(n_dofs_trial): f_loc[:,i,:] += np.dot(test.T, (trial[:,i]*wKer.T).T) # # Initialize zero local matrix if necessary # if f_loc is None: if self.type == 'constant': # # Constant form # if n_samples is None: # # Deterministic form # f_loc = 0 else: # # Sampled form # f_loc = np.zeros(n_samples) elif self.type=='linear': # # Linear form # n_dofs_test = self.test.dofhandler().element.n_dofs() if n_samples is None: # # Deterministic form # f_loc =
np.zeros(n_dofs_test)
numpy.zeros
# 2 DOF SYSTEM import numpy as np import matplotlib.pyplot as plt import libraryTugas as lib # 1. SYSTEMS PARAMETERS #================ # a. Initial condition Block 1 & 2 x_init1, xDot_init1 = 0.1, 0 # [m], [m/s] x_init2, xDot_init2 = -0.1, 0 # [m], [m/s] # b. System parameters Block 1 & 2 mass1, damp1, spring1 = 1, 1, 10 # [kg], [Ns/m], [N/m] mass2, damp2, spring2 = 2, 1, 20 # [kg], [Ns/m], [N/m] # c. Time parameters timeStart, timeStop, stepTime = 0, 10, 0.001 # [S] # d. Define System MODEL! def systemFunction (y, t): # Fungsi persamaan gerak Qe1 = -spring1*float(y[0]) + spring2*(float(y[1])-float(y[0]))- damp1*float(y[2]) + damp2*(float(y[3])-float(y[2])) Qe2 = -spring2*(float(y[1])-float(y[0])) - damp2*(float(y[3])-float(y[2])) externalForces_Matrix = np.array([[Qe1], [Qe2]], dtype = float) xDotDot =
np.dot(mass_MatInverse, externalForces_Matrix)
numpy.dot
import numpy as np def eval_relation_recall(sg_entry, roidb_entry, result_dict, mode, iou_thresh): # gt gt_inds = np.where(roidb_entry['max_overlaps'] == 1)[0] gt_boxes = roidb_entry['boxes'][gt_inds].copy().astype(float) num_gt_boxes = gt_boxes.shape[0] gt_relations = roidb_entry['gt_relations'].copy() gt_classes = roidb_entry['gt_classes'].copy() num_gt_relations = gt_relations.shape[0] if num_gt_relations == 0: return (None, None) gt_class_scores = np.ones(num_gt_boxes) gt_predicate_scores = np.ones(num_gt_relations) gt_triplets, gt_triplet_boxes, _ = _triplet(gt_relations[:,2], gt_relations[:,:2], gt_classes, gt_boxes, gt_predicate_scores, gt_class_scores) # pred box_preds = sg_entry['boxes'] num_boxes = box_preds.shape[0] predicate_preds = sg_entry['relations'] class_preds = sg_entry['scores'] predicate_preds = predicate_preds.reshape(num_boxes, num_boxes, -1) # no bg predicate_preds = predicate_preds[:, :, 1:] predicates = np.argmax(predicate_preds, 2).ravel() + 1 predicate_scores = predicate_preds.max(axis=2).ravel() relations = [] keep = [] for i in xrange(num_boxes): for j in xrange(num_boxes): if i != j: keep.append(num_boxes*i + j) relations.append([i, j]) # take out self relations predicates = predicates[keep] predicate_scores = predicate_scores[keep] relations = np.array(relations) assert(relations.shape[0] == num_boxes * (num_boxes - 1)) assert(predicates.shape[0] == relations.shape[0]) num_relations = relations.shape[0] if mode =='pred_cls': # if predicate classification task # use ground truth bounding boxes assert(num_boxes == num_gt_boxes) classes = gt_classes class_scores = gt_class_scores boxes = gt_boxes elif mode =='sg_cls': assert(num_boxes == num_gt_boxes) # if scene graph classification task # use gt boxes, but predicted classes classes = np.argmax(class_preds, 1) class_scores = class_preds.max(axis=1) boxes = gt_boxes elif mode =='sg_det': # if scene graph detection task # use preicted boxes and predicted classes classes = np.argmax(class_preds, 1) class_scores = class_preds.max(axis=1) boxes = [] for i, c in enumerate(classes): boxes.append(box_preds[i, c*4:(c+1)*4]) boxes = np.vstack(boxes) else: raise NotImplementedError('Incorrect Mode! %s' % mode) pred_triplets, pred_triplet_boxes, relation_scores = \ _triplet(predicates, relations, classes, boxes, predicate_scores, class_scores) sorted_inds = np.argsort(relation_scores)[::-1] # compue recall for k in result_dict[mode + '_recall']: this_k = min(k, num_relations) keep_inds = sorted_inds[:this_k] recall = _relation_recall(gt_triplets, pred_triplets[keep_inds,:], gt_triplet_boxes, pred_triplet_boxes[keep_inds,:], iou_thresh) result_dict[mode + '_recall'][k].append(recall) # for visualization return pred_triplets[sorted_inds, :], pred_triplet_boxes[sorted_inds, :] def _triplet(predicates, relations, classes, boxes, predicate_scores, class_scores): # format predictions into triplets assert(predicates.shape[0] == relations.shape[0]) num_relations = relations.shape[0] triplets = np.zeros([num_relations, 3]).astype(np.int32) triplet_boxes = np.zeros([num_relations, 8]).astype(np.int32) triplet_scores = np.zeros([num_relations]).astype(np.float32) for i in xrange(num_relations): triplets[i, 1] = predicates[i] sub_i, obj_i = relations[i,:2] triplets[i, 0] = classes[sub_i] triplets[i, 2] = classes[obj_i] triplet_boxes[i, :4] = boxes[sub_i, :] triplet_boxes[i, 4:] = boxes[obj_i, :] # compute triplet score score = class_scores[sub_i] score *= class_scores[obj_i] score *= predicate_scores[i] triplet_scores[i] = score return triplets, triplet_boxes, triplet_scores def _relation_recall(gt_triplets, pred_triplets, gt_boxes, pred_boxes, iou_thresh): # compute the R@K metric for a set of predicted triplets num_gt = gt_triplets.shape[0] num_correct_pred_gt = 0 for gt, gt_box in zip(gt_triplets, gt_boxes): keep = np.zeros(pred_triplets.shape[0]).astype(bool) for i, pred in enumerate(pred_triplets): if gt[0] == pred[0] and gt[1] == pred[1] and gt[2] == pred[2]: keep[i] = True if not np.any(keep): continue boxes = pred_boxes[keep,:] sub_iou = iou(gt_box[:4], boxes[:,:4]) obj_iou = iou(gt_box[4:], boxes[:,4:]) inds = np.intersect1d(np.where(sub_iou >= iou_thresh)[0], np.where(obj_iou >= iou_thresh)[0]) if inds.size > 0: num_correct_pred_gt += 1 return float(num_correct_pred_gt) / float(num_gt) def iou(gt_box, pred_boxes): # computer Intersection-over-Union between two sets of boxes ixmin = np.maximum(gt_box[0], pred_boxes[:,0]) iymin = np.maximum(gt_box[1], pred_boxes[:,1]) ixmax =
np.minimum(gt_box[2], pred_boxes[:,2])
numpy.minimum
from sciapp.action import ImageTool from sciapp.object import mark2shp import numpy as np def draw(img, lines): if len(lines) < 2: return lines =
np.array(lines)
numpy.array
# ================================================================================== # # Copyright (c) 2019, <NAME> # # Permission is hereby granted, free of charge, to any person obtaining a copy # of this software and associated documentation files (the "Software"), to deal # in the Software without restriction, including without limitation the rights # to use, copy, modify, merge, publish, distribute, sublicense, and/or sell # copies of the Software, and to permit persons to whom the Software is # furnished to do so, subject to the following conditions: # # The above copyright notice and this permission notice shall be included in all # copies or substantial portions of the Software. # # THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR # IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, # FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE # AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER # LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, # OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE # SOFTWARE. # # ================================================================================== # TODO: # > Implement exact Legendre polynomials # > Add references for the used polynomials # > Apply (multiply) a gaussian function in the polynomial in order to # give a localized nature (similar to Gabor wavelet). import numpy as _np # from abc import ABC as _ABC # from abc import abstractmethod as _abstractmethod # ===== POLYNOMIAL INTERFACE ======================================================== # # it would be useful to have some interface for enforcing specific # # polynomial functionality and design. Currently, the following # # code runs, but don't work (does not enforce the interface design). # # class _iPolynomial(_ABC): # @staticmethod # @_abstractmethod # def WeightScheme(upToDegree, *argv): # """Calculates polynomials weights. # Parameters # ---------- # upToDegree: int # The maximum degree for which we desire to calculate the polynomial values. # *argv: additional arguments # Represents whatever parameter may be of need for specific weighting schemes. # Returns # ------- # ndarray # The resulted array is an 1d ndarray, where its rows represent a weight for # the corresponding polynomial values. # """ # pass # @staticmethod # @_abstractmethod # def Poly(upToDegree, x): # """Calculates the n-th degree polynomial. # Parameters # ---------- # upToDegree: int # The maximum degree for which we desire to calculate the polynomial values. # x: ndarray # The variable of the polynomial. It must be a 1D ndarray of the disired length. # Returns # ------- # ndarray # The resulted array is a 2d ndarray, where the rows represent the degree # and the columns the polynomial values. # """ # pass # ----------------------------------------------------------------------------------- def GetKernel(family, krnLen): """It is responsible for returning the selected kernel's calculation function along with the coresponding weights and some other stuff. Parameters ---------- family: str The 'family' parameter determines the kind of desired kernel that will be used in a moment transform. Currently, the supported kernel families are: * 'chebyshev1': the Chebyshev polynomials of the first kind, * 'chebyshevD': the Discrete Chebyshev (or else Tchebichef) polynomials, * 'geometric' : the geometric monomials (Pn(x) = x**n), * 'central' : the central monomials (Pn(x; xm) = (x-xm)**n) krnLen: int It represents the kernel's length. Each kernel is a vector of values, which are calculated by the selected 'family' of functions. Returns ------- tuple: (kernel, weights, xi, isOrthogonal) The first element of the resulted tuple is the kernel's function, the second the the coresponding weight function, the third is the variables values that are used as input in the kernel function and finally the fourth element is a flag (boolean) which infroms whether or not the kernel is orthogonal. Examples -------- from pymoms import Kernel import matplotlib.pyplot as plt import numpy as np def myplot(x, y, title, ylabel): plt.figure(figsize=(15,5)) plt.plot(x, y) plt.grid('on') plt.title(title) plt.xlabel('x') plt.ylabel(ylabel) plt.show() upToOrder = 5 krnLen = 100 # Get the Chebyshev Polynomial of the first kind as Kernel. # Actually, for the particular polynomial family, the resulted xi is given by: # xi = np.cos(np.pi*(np.arange(0, krnLen)+0.5)/float(krnLen)) kernel, weights, xi, isOrthogonal = Kernel.GetKernel('chebyshev1', krnLen) # Currently, the supported kernel families are: # * 'chebyshev1': the Chebyshev polynomials of the first kind, # * 'chebyshevD': the Discrete Chebyshev (or else Tchebichef) polynomials, # * 'geometric' : the geometric monomials (Pn(x) = x**n), # * 'central' : the central monomials (Pn(x; xm) = (x-xm)**n) # Calculate the polynomial values and the corresponding weights P = kernel(upToOrder, xi) Wp = weights(upToOrder, krnLen) P_norm = np.zeros(P.shape) # normalized the polynomial values using the weights for idx, p in enumerate(P): P_norm[idx,:] = P[idx,:] * Wp[idx] myplot(xi, P.T, 'Chebyshef Polynomial of first kind', '$P_{n}(x)$') myplot(xi, P_norm.T, 'Norm. Chebyshef Polynomial of first kind', '$\overline{P}_{n}(x)$') # let us define a different xi: xi = np.linspace(-1, 1, krnLen) myplot(xi, P.T, 'Chebyshef Polynomial of first kind', '$P_{n}(x)$') """ kernels = { 'chebyshev1': { # continuous Chebychev of 1st kind 'poly' : ConOrthoPoly.Chebyshev1st.Poly, 'weights' : ConOrthoPoly.Chebyshev1st.WeightScheme, 'x' : _np.cos(_np.pi*(_np.arange(0, krnLen)+0.5)/float(krnLen)), 'isOrthogonal': True, }, 'chebyshevD': { # discrete Chebyshef (or else Tchebichef) 'poly' : DisOrthoPoly.Chebyshev.Poly, 'weights' : DisOrthoPoly.Chebyshev.WeightScheme, 'x' :
_np.arange(0, krnLen)
numpy.arange
import numpy as np import pandas as pd from collections import defaultdict import datetime import math import os.path from sklearn.preprocessing import StandardScaler def feature_engineering(feature): # confirmed, death, confirmed_diff, death_diff, confirmed_square, death_square diff = [0 for _ in range(12)] squared = [0 for _ in range(14)] for idx in range(2): for i in range(1, 7): diff[idx * 6 + i - 1] = feature[idx * 7 + i] - feature[idx * 7 + i - 1] feature.extend(diff) for i in range(14): squared[i] = feature[i] * feature[i] feature.extend(squared) return feature class PreprocessForNN(object): def __init__(self): self.deathData = None self.confirmedData = None self.features = [] self.icu_beds = defaultdict(int) self.staffed_beds = defaultdict(int) self.licensed_beds = defaultdict(int) self.total_population = defaultdict(int) self.population_over_sixty = defaultdict(int) self.policies = defaultdict(lambda: [0 for _ in range(8)]) self.scaler_feature = StandardScaler() self.scaler_label = StandardScaler() self.valid_FIPS = set() self.upper = 50.0 self.mid = 10.0 self.lower = 1.0 def load_policies(self): ''' dict[key][0] = stay at home dict[key][1] = >50 gathering dict[key][2] = >500 gathering dict[key][3] = public schools dict[key][4] = restaurant dine-in dict[key][5] = entertainment/gym dict[key][6] = federal guidelines dict[key][7] = foreign travel ban ''' policy = pd.read_csv(filepath_or_buffer='data/us/other/policies.csv') label_names = ['stay at home', '>50 gatherings', '>500 gatherings', 'public schools', 'restaurant dine-in', 'entertainment/gym'] mean_times = [0 for _ in range(len(label_names))] ranges = [0 for _ in range(len(label_names))] min_times = [0 for _ in range(len(label_names))] for idx, label in enumerate(label_names): times = policy[label].values[~np.isnan(policy[label])] mean_times[idx] = np.mean(times) ranges[idx] = max(1, np.max(times) - np.min(times)) min_times[idx] = np.min(times) for item in policy.iterrows(): fips = item[1]['FIPS'] for idx, label in enumerate(label_names): if not math.isnan(item[1][label]): scaled = 1 - (item[1][label] - min_times[idx]) / ranges[idx] self.policies[fips][idx] = scaled def load_data(self): self.load_beds_dict() self.load_population_dict() self.load_policies() def fetch_none_zero_data(self): last_date = self.deathData.columns[-1] countyNoneZero = self.deathData.loc[self.deathData[last_date] != 0] countyNoneZero.reset_index(drop=True) self.valid_FIPS = set(self.deathData['countyFIPS']) self.deathData = countyNoneZero keep_flag = [] for FIPS in self.confirmedData['countyFIPS']: keep_flag.append(int(FIPS) in self.valid_FIPS) self.confimedData = self.confirmedData.loc[keep_flag, :] @staticmethod def add_window(FIPS, death_list, confirmed_list, mode): window_size_7 = 7 window_size_14 = 14 count = len(death_list) - window_size_7 - window_size_14 + 1 output = [] FIPS_list = [] label_list = [] feature_list = [] if mode == 'train': for i in range(count): feature = confirmed_list[i:i+window_size_7] + death_list[i:i+window_size_7] feature = feature_engineering(feature) label = death_list[i+window_size_7:i+window_size_7+window_size_14] if sum(label) == 0: continue FIPS_list.append(FIPS) label_list.append(label) feature_list.append(feature) dict = {'FIPS': FIPS_list, 'label': label_list, 'feature': feature_list} else: feature = confirmed_list[-window_size_7:] + death_list[-window_size_7:] feature = feature_engineering(feature) FIPS_list.append(FIPS) feature_list.append(feature) dict = {'FIPS': FIPS_list, 'feature': feature_list} output = pd.DataFrame(dict) return output def load_death_and_confirmed(self, mode='train'): # return dataframe ['FIPS', 'label', 'feature'] path = './processed_data/' death_path = path + 'daily_death_from_nyu.csv' confirmed_path = path + 'daily_confirmed_from_nyu.csv' if not os.path.isfile(death_path): self.transform_format('death') self.transform_format('confirmed') self.deathData = pd.read_csv(death_path) self.confirmedData = pd.read_csv(confirmed_path) # get part of data # self.deathData = self.deathData.iloc[:, :-7] # self.confirmedData = self.confirmedData.iloc[:, :-7] self.valid_FIPS = set(self.deathData['countyFIPS']) # self.fetch_none_zero_data() output = None for FIPS in self.valid_FIPS: if FIPS == '0': continue death_list = list(self.deathData.loc[self.deathData['countyFIPS'] == FIPS].values[0][4:]) confirmed_list = list(self.confirmedData.loc[self.confirmedData['countyFIPS'] == FIPS].values[0][4:]) cur_res = self.add_window(FIPS, death_list, confirmed_list, mode) if output is None: output = cur_res else: output = pd.concat([output, cur_res]) return output def load_beds_dict(self): # load number of beds from 'beds_by_county.csv' beds = pd.read_csv(filepath_or_buffer='data/us/hospitals/beds_by_county.csv') for item in beds.iterrows(): self.icu_beds[item[1]['FIPS']] = item[1]['icu_beds'] self.staffed_beds[item[1]['FIPS']] = item[1]['staffed_beds'] self.licensed_beds[item[1]['FIPS']] = item[1]['licensed_beds'] def load_population_dict(self): # load population from 'county_populations.csv' population = pd.read_csv(filepath_or_buffer='data/us/demographics/county_populations.csv') for item in population.iterrows(): self.total_population[item[1]['FIPS']] = item[1]['total_pop'] self.population_over_sixty[item[1]['FIPS']] = item[1]['60plus'] def generate_training_data(self, mode='outbreak'): self.load_data() data = self.load_death_and_confirmed() confirmed_death_feature_key = 'feature' death_key = 'label' feature, label = [], [] for item in data.iterrows(): FIPS = item[1]['FIPS'] point = item[1][confirmed_death_feature_key] point.append(self.icu_beds[FIPS]) point.append(self.staffed_beds[FIPS]) point.append(self.licensed_beds[FIPS]) point.append(self.total_population[FIPS]) point.append(self.population_over_sixty[FIPS]) point.extend(self.policies[FIPS]) feature.append(point) label.append(item[1][death_key]) outbreak_feature, outbreak_label = [], [] mid2_feature, mid2_label = [], [] mid_feature, mid_label = [], [] burning_feature, burning_label = [], [] for i in range(len(feature)): if self.upper <= np.mean(label[i]): outbreak_feature.append(feature[i]) outbreak_label.append(label[i]) elif self.mid <= np.mean(label[i]): mid2_feature.append(feature[i]) mid2_label.append(label[i]) elif self.lower <= np.mean(label[i]): mid_feature.append(feature[i]) mid_label.append(label[i]) else: burning_feature.append(feature[i]) burning_label.append(label[i]) if mode == 'outbreak': scalered_feature = np.array(outbreak_feature) scalered_label = np.array(outbreak_label) scalered_feature = self.scaler_feature.fit_transform(scalered_feature) scalered_label = self.scaler_label.fit_transform(scalered_label) return scalered_feature, scalered_label elif mode == 'mid2': scalered_feature = np.array(mid2_feature) scalered_label = np.array(mid2_label) scalered_feature = self.scaler_feature.fit_transform(scalered_feature) scalered_label = self.scaler_label.fit_transform(scalered_label) return scalered_feature, scalered_label elif mode == 'mid': scalered_feature = np.array(mid_feature) scalered_label = np.array(mid_label) scalered_feature = self.scaler_feature.fit_transform(scalered_feature) scalered_label = self.scaler_label.fit_transform(scalered_label) return scalered_feature, scalered_label else: scalered_feature = np.array(burning_feature) scalered_label = np.array(burning_label) scalered_feature = self.scaler_feature.fit_transform(scalered_feature) scalered_label = self.scaler_label.fit_transform(scalered_label) return scalered_feature, scalered_label def generate_testing_data(self, mode='outbreak'): self.load_data() data = self.load_death_and_confirmed('test') confirmed_death_feature_key = 'feature' feature, FIPS_list = [], [] for item in data.iterrows(): FIPS = item[1]['FIPS'] point = item[1][confirmed_death_feature_key] point.append(self.icu_beds[FIPS]) point.append(self.staffed_beds[FIPS]) point.append(self.licensed_beds[FIPS]) point.append(self.total_population[FIPS]) point.append(self.population_over_sixty[FIPS]) point.extend(self.policies[FIPS]) feature.append(point) FIPS_list.append(FIPS) outbreak_feature, outbreak_FIPS = [], [] mid_feature, mid_FIPS = [], [] mid2_feature, mid2_FIPS = [], [] burning_feature, burning_FIPS = [], [] for i in range(len(feature)): if self.upper <= np.mean(feature[i][7:14]): outbreak_feature.append(feature[i]) outbreak_FIPS.append(FIPS_list[i]) elif self.mid <= np.mean(feature[i][7:14]): mid2_feature.append(feature[i]) mid2_FIPS.append(FIPS_list[i]) elif self.lower <=
np.mean(feature[i][7:14])
numpy.mean
#!/usr/bin/env python # stdlib imports import struct import os.path import sys # third party imports import numpy as np from scipy.io import netcdf from .grid2d import Grid2D from .dataset import DataSetException from .geodict import GeoDict import h5py '''Grid2D subclass for reading, writing, and manipulating GMT format grids. Usage: :: gmtgrid = GMTGrid.load(gmtfilename) gmtgrid.getGeoDict() This class supports reading and writing of all three GMT formats: NetCDF, HDF, and the GMT "native" format. ''' NETCDF_TYPES = {'B': np.uint8, 'b': np.int8, 'h': np.int16, 'i': np.int32, 'f': np.float32, 'd': np.float64} INVERSE_NETCDF_TYPES = {'uint8': 'B', 'int8': 'b', 'int16': 'h', 'int32': 'i', 'float32': 'f', 'float64': 'd'} def subsetArray(data, data_range, fgeodict): iulx1 = data_range['iulx1'] iuly1 = data_range['iuly1'] ilrx1 = data_range['ilrx1'] ilry1 = data_range['ilry1'] data1 = data[iuly1:ilry1, iulx1:ilrx1] ymax1, xmin1 = fgeodict.getLatLon(iuly1, iulx1) ymin1, xmax1 = fgeodict.getLatLon(ilry1-1, ilrx1-1) if 'iulx2' in data_range: iulx2 = data_range['iulx2'] iuly2 = data_range['iuly2'] ilrx2 = data_range['ilrx2'] ilry2 = data_range['ilry2'] data2 = data[iuly2:ilry2, iulx2:ilrx2] data = np.hstack((data1, data2)).copy() ymax2, xmin2 = fgeodict.getLatLon(iuly2, iulx2) ymin2, xmax2 = fgeodict.getLatLon(ilry2-1, ilrx2-1) ny, nx = data.shape ymin2, xmax2 = fgeodict.getLatLon(ilry2-1, ilrx2-1) xmax = xmax2 else: data = data1.copy() xmax = xmax1 ny, nx = data.shape geodict = GeoDict({'xmin': xmin1, 'xmax': xmax, 'ymin': ymin1, 'ymax': ymax1, 'dx': fgeodict.dx, 'dy': fgeodict.dy, 'nx': nx, 'ny': ny}) return (data, geodict) def sub2ind(shape, subtpl): """ Convert 2D subscripts into 1D index. @param shape: Tuple indicating size of 2D array. @param subtpl: Tuple of (possibly) numpy arrays of row,col values. @return: 1D array of indices. """ if len(shape) != 2 or len(shape) != len(subtpl): raise IndexError("Input size and subscripts must have length 2 and " "be equal in length") row, col = subtpl ny, nx = shape ind = nx*row + col return ind def indexArray(array, shp, i1, i2, j1, j2): if not isinstance(j1, int) and len(j1): j1 = j1[0] if not isinstance(j2, int) and len(j2): j2 = j2[0] if not isinstance(i1, int) and len(i1): i1 = i1[0] if not isinstance(i2, int) and len(i2): i2 = i2[0] if len(array.shape) == 1: ny = i2-i1 nx = j2-j1 if hasattr(array, 'dtype'): data = np.zeros((ny, nx), dtype=array.dtype) else: typecode = array.typecode() dtype = NETCDF_TYPES[typecode] data = np.zeros((ny, nx), dtype=dtype) rowidx = np.arange(i1, i2) i = 0 for row in rowidx: idx1 = sub2ind(shp, (row, j1)) idx2 = sub2ind(shp, (row, j2)) data[i, :] = array[idx1:idx2] i += 1 else: ny, nx = array.shape i1r = ny-i1 i2r = ny-i2 data = array[i2r:i1r, j1:j2].copy() return data def createSampleXRange(M, N, filename, bounds=None, dx=None, dy=None): if dx is None: dx = 1.0 if dy is None: dy = 1.0 if bounds is None: xmin = 0.5 xmax = xmin + (N-1)*dx ymin = 0.5 ymax = ymin + (M-1)*dy else: xmin, xmax, ymin, ymax = bounds data =
np.arange(0, M*N)
numpy.arange
""" Read and parse hdf4 files produced at Omega-60 and Omega-EP. author: <NAME> history: 2017-05-19 basic support for p510_data summary hdf 2017-04-13 created """ import os.path as osp import sys import numpy as np import pyhdf.SD as sd # lazy import: won't import until needed (gives tiny speedup on startup) plt = None # import matplotlib.pyplot as plt class Hdf(): """Hdf file data from Omega-60 and Omega-EP data sources.""" KNOWN_DATA_SOURCES = ('p510', 'asbo', 'sop', 'pxrdip') # known hdf file sources PXRDIP_SCAN_ORDER_DEFAULT = ('U', 'L', 'D', 'R', 'B') # default pxrdip scan order PXRDIP_DISPLAY_ORDER = { # display order for PXRDIP plates on omega-60 or ep 'omega-60': ('R', 'D', 'L', 'U', 'B'), 'omega-ep': ('L', 'U', 'R', 'D', 'B'), } def __init__(self, handle, pathlist=None, hint=None): self.handle = handle # filename or (TODO: implement) other keyword self.pathlist = pathlist # list of possible paths self.hint = hint # hint to preferred filename self.filename = self.handle#self.find_filename(self.handle, pathlist) self.title, self.fileext = osp.splitext(osp.split(self.filename)[1]) self.facility = 'omega-ep' # TODO: omega-60 or NIF or other or None self.attributes = None self.datasets = None self.datasource = self.determine_datasource() self.read_file() def determine_datasource(self): """Determine data type of given file based on filename.""" filename = self.filename.lower() for datasource in self.KNOWN_DATA_SOURCES: if datasource in filename: return datasource def read_file(self): """Read datafile at self.filename.""" if self.fileext == '.hdf': filedata = sd.SD(self.filename, sd.SDC.READ) self.parse_filedata(filedata) print_hdf_info(filedata) filedata.end() # close hdf file def parse_filedata(self, filedata): self.attributes = filedata.attributes() self.datasets = filedata.datasets() if self.fileext == '.hdf' and self.datasource in self.KNOWN_DATA_SOURCES: parse_method = getattr(self, "parse_" + self.datasource) parse_method(filedata) def parse_p510(self, filedata): """Parse Omega p510 summary file (laser power results).""" self.shotnum = self.attributes['LOG_NUM'] self.time = filedata.select('SUMMARY_DATA_TIME').get() # time [ns] self.power = filedata.select('SUMMARY_DATA_AVERAGE') self.beam_power = filedata.select('SUMMARY_DATA_UV').get() # self.beams = self.power.attributes()['BEAMS'] # list of beams self.power = self.power.get() # per-beam powers [TW] def parse_asbo(self, filedata): """Parse Omega ASBO hdf (Active Shock BreakOut, = Omega VISAR).""" dat = filedata.select("Streak_array") ref = filedata.select("Reference") att = self.attributes self.shotnum = "" self.location = att['Location'] self.unit = att['Unit'] att_d = dat.attributes() self.sweep_setting = att_d['SweepSpeed'] self.timestamp = att_d['TimeStamp'] A = dat.get().astype(float) self.sig = np.rot90(A[0] - A[1], 2) self.ref = np.rot90(ref.get().astype(float) - A[1], 2) self.bg = np.rot90(A[1], 2) def parse_sop(self, filedata): """Parse Omega SOP hdf (SOP = streaked optical pyrometer).""" dat = filedata.select("Streak_array") att = self.attributes self.shotnum = "" self.location = att['Location'] self.unit = att['Unit'] att_d = dat.attributes() self.sweep_setting = att_d['SweepSpeed'] self.timestamp = att_d['TimeStamp'] A = dat.get().astype(float) self.sig = np.rot90(A[0] - A[1], 2) self.bg =
np.rot90(A[1], 2)
numpy.rot90
import os import datetime import sys import torch import numpy as np import imageio import json import random import time from tqdm import tqdm, trange from scipy.spatial import KDTree import indirect_utils from load_osf import load_osf_data from intersect import compute_object_intersect_tensors from ray_utils import transform_rays from run_osf_helpers import * from scatter import scatter_coarse_and_fine import shadow_utils import VisionNet_utils import AudioNet_utils import AudioNet_model import TouchNet_utils import TouchNet_model from scipy.io.wavfile import write import librosa import imageio device = torch.device("cuda" if torch.cuda.is_available() else "cpu") def config_parser(): import configargparse parser = configargparse.ArgParser( config_file_parser_class=configargparse.YAMLConfigFileParser ) #parser = configargparse.ArgumentParser() parser.add_argument("--object_file_path", type=str, required=True, help='ObjectFile path') parser.add_argument('--config', is_config_file=True, help='config file path') # VisionNet options parser.add_argument("--vision_test_file_path", default='data/vision_demo.npy', help='The path of the testing file for vision, which should be a npy file.') parser.add_argument("--vision_results_dir", type=str, default='./results/vision/', help='The path of the vision results directory to save rendered images.') parser.add_argument("--chunk", type=int, default=1024*32, help='number of rays processed in parallel, decrease if running out of memory') parser.add_argument("--netchunk", type=int, default=1024*64, help='number of pts sent through network in parallel, decrease if running out of memory') # AudioNet options parser.add_argument('--audio_vertices_file_path', default='./data/audio_demo_vertices.npy', help='The path of the testing vertices file for audio, which should be a npy file.') parser.add_argument('--audio_forces_file_path', default='./data/forces.npy', help='The path of forces file for audio, which should be a npy file.') parser.add_argument('--audio_batchSize', type=int, default=10000, help='input batch size') parser.add_argument('--audio_results_dir', type=str, default='./results/audio/', help='The path of audio results directory to save rendered impact sounds as .wav files.') # TouchNet options parser.add_argument('--touch_vertices_file_path', default='./data/touch_demo_vertices.npy', help='The path of the testing vertices file for touch, which should be a npy file.') parser.add_argument('--touch_batchSize', type=int, default=10000, help='input batch size') parser.add_argument('--touch_results_dir', type=str, default='./results/touch/', help='The path of the touch results directory to save rendered tactile RGB images.') return parser def VisionNet_eval(args): args.secondary_chunk = args.chunk metadata, render_metadata = None, None near = 0.01 far = 4 poses, hwf, i_split, metadata = load_osf_data(args.vision_test_file_path) i_test = i_split[0] render_poses = np.array(poses[i_test]) # Create dummy metadata if not loaded from dataset. if metadata is None: metadata = torch.tensor([[0, 0, 1]] * len(images), dtype=torch.float) # [N, 3] if render_metadata is None: render_metadata = metadata # Cast intrinsics to right types H, W, focal = hwf H, W = int(H), int(W) hwf = [H, W, focal] # Create nerf model render_kwargs_train, render_kwargs_test, start, grad_vars, models, optimizer = VisionNet_utils.create_nerf( args, metadata, render_metadata) global_step = start bds_dict = { 'near': torch.tensor(near).float(), 'far': torch.tensor(far).float(), } render_kwargs_train.update(bds_dict) render_kwargs_test.update(bds_dict) render_kwargs_test['render_metadata'] = metadata # Move testing data to GPU render_poses = torch.Tensor(render_poses).to(device) with torch.no_grad(): images = None testsavedir = args.vision_results_dir os.makedirs(testsavedir, exist_ok=True) print('Begin rendering images in ', testsavedir) rgbs, _ = VisionNet_utils.render_path(render_poses, hwf, args.chunk, render_kwargs_test, gt_imgs=images, savedir=testsavedir, c2w_staticcam=None, render_start=None, render_end=None) print('Done rendering images in ', testsavedir) def AudioNet_eval(args): dim = 32 audio_sampling_rate = 16000 audio_window_size = 400 audio_hop_size = 160 audio_stft_time_dim = 201 audio_stft_freq_dim = 257 audio_network_depth = 8 xyz = np.load(args.audio_vertices_file_path) forces = np.load(args.audio_forces_file_path) xyz = xyz[0:1,:] forces = forces[0:1] N = xyz.shape[0] print(N) #N: number of data #D: number of dimension #C: number of channels, real and img #F: number of frequency #T: number of timestamps N, D, C, F, T = N, 3, 2, audio_stft_freq_dim, audio_stft_time_dim checkpoint = torch.load(args.object_file_path) normalizer_dic = checkpoint['AudioNet']['normalizer'] voxel_vertex = checkpoint['AudioNet']['voxel_vertex'] vert_tree = KDTree(voxel_vertex) translation = checkpoint['AudioNet']['translation'] scale = checkpoint['AudioNet']['scale'] k = 4 # Average over 4 nearest neighbors xyz_in_voxel = np.zeros((4, N, 3)) for i in range(N): obj_coordinates = xyz[i] binvox_coordinates = AudioNet_utils.transform_mesh_collision_binvox(obj_coordinates, translation, scale) coordinates_in_voxel = binvox_coordinates * dim voxel_verts_index = vert_tree.query(coordinates_in_voxel, k)[1] for j in range(k): xyz_in_voxel[j, i] = voxel_vertex[voxel_verts_index[j]] xyz_in_voxel = np.repeat(xyz_in_voxel.reshape((4, N, 1, 3)), F * T, axis=2) #normalize xyz_in_voxel to [-1, 1] xyz_in_voxel_min = xyz_in_voxel.min() xyz_in_voxel_max = xyz_in_voxel.max() xyz_in_voxel = (xyz_in_voxel - xyz_in_voxel_min) / (xyz_in_voxel_max - xyz_in_voxel_min) spec_comps_f1_min = normalizer_dic['f1_min'] spec_comps_f1_max = normalizer_dic['f1_max'] spec_comps_f2_min = normalizer_dic['f2_min'] spec_comps_f2_max = normalizer_dic['f2_max'] spec_comps_f3_min = normalizer_dic['f3_min'] spec_comps_f3_max = normalizer_dic['f3_max'] #initialize frequency and time features freq_feats = np.repeat(np.repeat(np.arange(F).reshape((F, 1)), T, axis=1).reshape((1, 1, F, T)), N, axis=0) time_feats = np.repeat(np.repeat(np.arange(T).reshape((1, T)), F, axis=0).reshape((1, 1, F, T)), N, axis=0) #normalize frequency and time features to [-1, 1] freq_feats_min = freq_feats.min() freq_feats_max = freq_feats.max() time_feats_min = time_feats.min() time_feats_max = time_feats.max() freq_feats = (freq_feats - freq_feats_min) / (freq_feats_max - freq_feats_min) time_feats = (time_feats - time_feats_min) / (time_feats_max - time_feats_min) data_x = np.concatenate((freq_feats, time_feats), axis=1) data_y = np.concatenate((freq_feats, time_feats), axis=1) data_z = np.concatenate((freq_feats, time_feats), axis=1) data_x = np.transpose(data_x.reshape((N, 2, -1)), axes = [0, 2, 1]) data_y = np.transpose(data_y.reshape((N, 2, -1)), axes = [0, 2, 1]) data_z = np.transpose(data_z.reshape((N, 2, -1)), axes = [0, 2, 1]) data_x = np.repeat(data_x.reshape((1, N, -1, 2)), k, axis=0) data_y = np.repeat(data_y.reshape((1, N, -1, 2)), k, axis=0) data_z = np.repeat(data_z.reshape((1, N, -1, 2)), k, axis=0) #Now concatenate xyz and feats to get final feats matrix as [x, y, z, f, t, real, img] feats_x = np.concatenate((xyz_in_voxel, data_x), axis=3).reshape((-1, 5)) feats_y = np.concatenate((xyz_in_voxel, data_y), axis=3).reshape((-1, 5)) feats_z = np.concatenate((xyz_in_voxel, data_z), axis=3).reshape((-1, 5)) embed_fn, input_ch = AudioNet_model.get_embedder(10, 0) model = AudioNet_model.AudioNeRF(D = audio_network_depth, input_ch = input_ch) state_dic = checkpoint['AudioNet']["model_state_dict"] state_dic = AudioNet_utils.strip_prefix_if_present(state_dic, 'module.') model.load_state_dict(state_dic) model = nn.DataParallel(model).to(device) model.eval() loss_fn = torch.nn.MSELoss(reduction='mean') start_time = time.time() preds_x = np.zeros((feats_x.shape[0], 2)) preds_y = np.zeros((feats_y.shape[0], 2)) preds_z =
np.zeros((feats_z.shape[0], 2))
numpy.zeros
# utils/test_kronecker.py """Tests for rom_operator_inference.utils._kronecker.""" import pytest import numpy as np import rom_operator_inference as opinf # Index generation for fast self-product kronecker evaluation ================= def test_kron2c_indices(n_tests=100): """Test utils._kronecker.kron2c_indices().""" mask = opinf.utils.kron2c_indices(4) assert np.all(mask == np.array([[0, 0], [1, 0], [1, 1], [2, 0], [2, 1], [2, 2], [3, 0], [3, 1], [3, 2], [3, 3]], dtype=int)) submask = opinf.utils.kron2c_indices(3) assert np.allclose(submask, mask[:6]) r = 10 _r2 = r * (r + 1) // 2 mask = opinf.utils.kron2c_indices(r) assert mask.shape == (_r2, 2) assert np.all(mask[0] == 0) assert np.all(mask[-1] == r - 1) assert mask.sum(axis=0)[0] == sum(i*(i+1) for i in range(r)) # Ensure consistency with utils.kron2c(). for _ in range(n_tests): x = np.random.random(r) assert np.allclose(np.prod(x[mask], axis=1), opinf.utils.kron2c(x)) def test_kron3c_indices(n_tests=100): """Test utils._kronecker.kron3c_indices().""" mask = opinf.utils.kron3c_indices(2) assert np.all(mask == np.array([[0, 0, 0], [1, 0, 0], [1, 1, 0], [1, 1, 1]], dtype=int)) r = 10 mask = opinf.utils.kron3c_indices(r) _r3 = r * (r + 1) * (r + 2) // 6 mask = opinf.utils.kron3c_indices(r) assert mask.shape == (_r3, 3) assert np.all(mask[0] == 0) assert np.all(mask[-1] == r - 1) # Ensure consistency with utils.kron3c(). for _ in range(n_tests): x = np.random.random(r) assert np.allclose(np.prod(x[mask], axis=1), opinf.utils.kron3c(x)) # Kronecker (Khatri-Rao) products ============================================= # utils.kron2c() -------------------------------------------------------------- def _test_kron2c_single_vector(n): """Do one vector test of utils._kronecker.kron2c().""" x = np.random.random(n) x2 = opinf.utils.kron2c(x) assert x2.ndim == 1 assert x2.shape[0] == n*(n+1)//2 for i in range(n): assert np.allclose(x2[i*(i+1)//2:(i+1)*(i+2)//2], x[i]*x[:i+1]) def _test_kron2c_single_matrix(n): """Do one matrix test of utils._kronecker.kron2c().""" X = np.random.random((n,n)) X2 = opinf.utils.kron2c(X) assert X2.ndim == 2 assert X2.shape[0] == n*(n+1)//2 assert X2.shape[1] == n for i in range(n): assert np.allclose(X2[i*(i+1)//2:(i+1)*(i+2)//2], X[i]*X[:i+1]) def test_kron2c(n_tests=100): """Test utils._kronecker.kron2c().""" # Try with bad input. with pytest.raises(ValueError) as exc: opinf.utils.kron2c(np.random.random((3,3,3)), checkdim=True) assert exc.value.args[0] == "x must be one- or two-dimensional" # Correct inputs. for n in np.random.randint(2, 100, n_tests): _test_kron2c_single_vector(n) _test_kron2c_single_matrix(n) # utils.kron3c() -------------------------------------------------------------- def _test_kron3c_single_vector(n): """Do one vector test of utils._kronecker.kron3c().""" x = np.random.random(n) x3 = opinf.utils.kron3c(x) assert x3.ndim == 1 assert x3.shape[0] == n*(n+1)*(n+2)//6 for i in range(n): assert np.allclose(x3[i*(i+1)*(i+2)//6:(i+1)*(i+2)*(i+3)//6], x[i]*opinf.utils.kron2c(x[:i+1])) def _test_kron3c_single_matrix(n): """Do one matrix test of utils._kronecker.kron3c().""" X = np.random.random((n,n)) X3 = opinf.utils.kron3c(X) assert X3.ndim == 2 assert X3.shape[0] == n*(n+1)*(n+2)//6 assert X3.shape[1] == n for i in range(n): assert np.allclose(X3[i*(i+1)*(i+2)//6:(i+1)*(i+2)*(i+3)//6], X[i]*opinf.utils.kron2c(X[:i+1])) def test_kron3c(n_tests=50): """Test utils._kronecker.kron3c().""" # Try with bad input. with pytest.raises(ValueError) as exc: opinf.utils.kron3c(np.random.random((2,4,3)), checkdim=True) assert exc.value.args[0] == "x must be one- or two-dimensional" # Correct inputs. for n in np.random.randint(2, 30, n_tests): _test_kron3c_single_vector(n) _test_kron3c_single_matrix(n) # Matricized tensor management ================================================ # utils.expand_quadratic() ---------------------------------------------------- def _test_expand_quadratic_single(r): """Do one test of utils._kronecker.expand_quadratic().""" x = np.random.random(r) # Do a valid expand_quadratic() calculation and check dimensions. s = r*(r+1)//2 Hc = np.random.random((r,s)) H = opinf.utils.expand_quadratic(Hc) assert H.shape == (r,r**2) # Check that Hc(x^2) == H(x⊗x). Hxx = H @ np.kron(x,x) assert np.allclose(Hc @ opinf.utils.kron2c(x), Hxx) # Check properties of the tensor for H. Htensor = H.reshape((r,r,r)) assert np.allclose(Htensor @ x @ x, Hxx) for subH in H: assert np.allclose(subH, subH.T) def test_expand_quadratic(n_tests=100): """Test utils._kronecker.expand_quadratic().""" # Try to do expand_quadratic() with a bad second dimension. r = 5 sbad = r*(r+3)//2 Hc =
np.random.random((r, sbad))
numpy.random.random
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Created on Thu Apr 01 10:00:58 2021 @author: <NAME> """ #------------------------------------------------------------------# # # # # # Imports # # # # # #------------------------------------------------------------------# from math import e import numpy as np import pandas as pd import os import time import glob import itertools from joblib import Parallel, delayed from generate_files import GenerateFiles import matplotlib.pyplot as plt from mpl_toolkits.axes_grid1 import make_axes_locatable from matplotlib.colors import LogNorm import seaborn as sns import matplotlib.style as style style.use('seaborn-poster') #sets the size of the charts style.use('ggplot') from scipy import ndimage from astropy.io import fits from astropy.wcs import WCS from astropy.utils.data import get_pkg_data_filename from astropy.coordinates import SkyCoord, match_coordinates_sky import astropy.units as u from astropy.stats import mad_std import astrotools.healpytools as hpt import astropy_healpix as ahp from astropy.coordinates import ICRS from tqdm import tqdm from collections import Counter import warnings warnings.filterwarnings('ignore') import healpy as hp from hpproj import CutSky, to_coord import logging cs_logger = logging.getLogger('cutsky') cs_logger.setLevel(logging.WARNING) cs_logger.propagate = False hpproj_logger = logging.getLogger('hpproj') hpproj_logger.setLevel(logging.WARNING) mpl_logger = logging.getLogger('matplotlib') mpl_logger.setLevel(logging.WARNING) #------------------------------------------------------------------# # # # # # Functions # # # # # #------------------------------------------------------------------# class MakeData(object): """Class to create and preprocess input/output files from full sky-maps. """ def __init__(self, dataset, npix, loops, planck_path, milca_path, disk_radius=None, output_path=None): """ Args: dataset (str): file name for the cluster catalog that will used. Options are 'planck_z', 'planck_z_no-z', 'MCXC', 'RM30', 'RM50'. bands (list): list of full sky-maps that will be used for the input file. loops (int): number of times the dataset containing patches with at least one cluster within will be added again to training set with random variations (translations/rotations). Options are 100GHz','143GHz','217GHz','353GHz','545GHz','857GHz', and 'y-map'. More full sky-maps will be added later on (e.g. CO2, X-ray, density maps). planck_path (str): path to directory containing planck HFI 6 frequency maps. Files should be named as following 'HFI_SkyMap_100-field-IQU_2048_R3.00_full.fits', 'HFI_SkyMap_143-field-IQU_2048_R3.00_full.fits', 'HFI_SkyMap_143-field-IQU_2048_R3.00_full.fits', 'HFI_SkyMap_353-psb-field-IQU_2048_R3.00_full.fits', 'HFI_SkyMap_545-field-Int_2048_R3.00_full.fits', 'HFI_SkyMap_857-field-Int_2048_R3.00_full.fits'. milca_path (str): path to directory containing MILCA full sky map. File should be named 'milca_ymaps.fits'. disk_radius (float, optional): Disk radius that will be used to create segmentation masks for output files. Defaults to None. output_path (str, optional): Path to output directory. Output directory needs be created beforehand using 'python xcluster.py -m True' selecting same output directory in 'params.py'. If None, xcluster path will be used. Defaults to None. """ self.path = os.getcwd() + '/' self.dataset = dataset # 'planck_z', 'planck_z_no-z', 'MCXC', 'RM30', 'RM50' self.bands = ['100GHz','143GHz','217GHz','353GHz','545GHz','857GHz','y-map','CO','p-noise'] self.loops = loops self.n_labels = 2 maps = [] self.freq = 1022 self.planck_freq = 126 if '100GHz' in self.bands: maps.append((planck_path + "HFI_SkyMap_100-field-IQU_2048_R3.00_full.fits", {'legend': 'HFI 100', 'docontour': True})) # self.freq += 2 # self.planck_freq += 2 if '143GHz' in self.bands: maps.append((planck_path + "HFI_SkyMap_143-field-IQU_2048_R3.00_full.fits", {'legend': 'HFI 143', 'docontour': True})) # self.freq += 4 # self.planck_freq += 4 if '217GHz' in self.bands: maps.append((planck_path + "HFI_SkyMap_217-field-IQU_2048_R3.00_full.fits", {'legend': 'HFI 217', 'docontour': True})) # self.freq += 8 # self.planck_freq += 8 if '353GHz' in self.bands: maps.append((planck_path + "HFI_SkyMap_353-psb-field-IQU_2048_R3.00_full.fits", {'legend': 'HFI 353', 'docontour': True})) # self.freq += 16 # self.planck_freq += 16 if '545GHz' in self.bands: maps.append((planck_path + "HFI_SkyMap_545-field-Int_2048_R3.00_full.fits", {'legend': 'HFI 545', 'docontour': True})) # self.freq += 32 # self.planck_freq += 32 if '857GHz' in self.bands: maps.append((planck_path + "HFI_SkyMap_857-field-Int_2048_R3.00_full.fits", {'legend': 'HFI 857', 'docontour': True})) # self.freq += 64 # self.planck_freq += 64 if 'y-map' in self.bands: maps.append((milca_path + "milca_ymaps.fits", {'legend': 'MILCA y-map', 'docontour': True})) # self.freq += 128 if 'CO' in self.bands: maps.append((planck_path + "COM_CompMap_CO21-commander_2048_R2.00.fits", {'legend': 'CO', 'docontour': True})) # self.freq += 256 if 'p-noise' in self.bands: maps.append((planck_path + 'COM_CompMap_Compton-SZMap-milca-stddev_2048_R2.00.fits', {'legend': 'noise', 'docontour': True})) # self.freq += 512 maps.append((milca_path + "milca_ymaps.fits", {'legend': 'MILCA y-map', 'docontour': True})) #used for plots only self.maps = maps self.temp_path = self.path + 'to_clean/' self.disk_radius = disk_radius self.npix = npix #in pixels self.pixsize = 1.7 #in arcmin self.ndeg = (self.npix * self.pixsize)/60 #in deg self.nside = 2 if output_path is None: self.output_path = self.path + 'output/' + self.dataset + time.strftime("/%Y-%m-%d/") else: self.output_path = output_path + 'output/' + self.dataset + time.strftime("/%Y-%m-%d/") self.dataset_path = self.path + 'datasets/' + self.dataset + '/' self.planck_path = planck_path self.milca_path = milca_path self.test_regions = [[0, 360, 90, 70], [0, 120, 70, 40], [120, 240, 70, 40], [240, 360, 70, 40], [0, 120, 40, 18], [120, 240, 40, 18], [240, 360, 40, 18], [0, 120, -18, -40], [120, 240, -18, -40], [240, 360, -18, -40], [0, 120, -40, -70], [120, 240, -40, -70], [240, 360, -40, -70], [0, 360, -70, -90]] self.val_regions = [[0, 180, -20, -40], [0, 180, -20, -40], [0, 180, -20, -40], [0, 180, -20, -40], [0, 360, -40, -60], [0, 360, -40, -60], [0, 360, -40, -60], [0, 360, 60, 40], [0, 360, 60, 40], [0, 360, 60, 40], [0, 180, 40, 20], [0, 180, 40, 20], [0, 180, 40, 20], [0, 180, 40, 20]] def plot_psz2_clusters(self, healpix_path): """Saves plots containing patches for planck frequency maps and y-map. Function is deprecated and will be removed in later versions. Args: healpix_path (str): output path for plots (deprecated). """ maps = self.maps PSZ2 = fits.open(self.planck_path + 'PSZ2v1.fits') glon = PSZ2[1].data['GLON'] glat = PSZ2[1].data['GLAT'] freq = ['100GHz','143GHz','217GHz','353GHz','545GHz','857GHz', 'y-map'] for j in range(len(glon)): fig = plt.figure(figsize=(21,14), tight_layout=False) fig.suptitle(r'$glon=$ {:.2f} $^\circ$, $glat=$ {:.2f} $^\circ$'.format(glon[j], glat[j]), y=0.92, fontsize=20) cutsky = CutSky(maps, npix=self.npix, pixsize=self.pixsize, low_mem=False) coord = to_coord([glon[j], glat[j]]) result = cutsky.cut_fits(coord) for i,nu in enumerate(freq): ax = fig.add_subplot(3,4,1+i) divider = make_axes_locatable(ax) cax = divider.append_axes('right', size='5%', pad=0.05) HDU = result[i]['fits'] im = ax.imshow(HDU.data, origin="lower") w = WCS(HDU.header) sky = w.world_to_pixel_values(glon[j], glat[j]) segmentation = plt.Circle((sky[0], sky[1]), 2.5/1.7, color='white', alpha=0.1) ax.add_patch(segmentation) ax.axvline(sky[0], ymin=0, ymax=(self.npix//2-10)/self.npix, color='white', linestyle='--') ax.axvline(sky[0], ymin=(self.npix//2+10)/self.npix, ymax=1, color='white', linestyle='--') ax.axhline(sky[1], xmin=0, xmax=(self.npix//2-10)/self.npix, color='white', linestyle='--') ax.axhline(sky[1], xmin=(self.npix//2+10)/self.npix, xmax=1, color='white', linestyle='--') # ax.scatter(sky[0], sky[1], color='red') ax.set_title(r'%s'%nu) fig.colorbar(im, cax=cax, orientation='vertical') plt.savefig(healpix_path + 'PSZ2/PSZ2_skycut_%s.png'%j, bbox_inches='tight', transparent=False) plt.show() plt.close() def create_catalogs(self, plot=False): """Creates the following catalogs using 'PSZ2v1.fits', 'MCXC-Xray-clusters.fits', and 'redmapper_dr8_public_v6.3_catalog.fits' (see <NAME> 2018 for more details): planck_z (pd.DataFrame): dataframe with the following columns for PSZ2 clusters with known redshift: 'RA', 'DEC', 'GLON', 'GLAT', 'M500', 'R500', 'Y5R500', 'REDMAPPER', 'MCXC', 'Z' planck_no_z (pd.DataFrame): dataframe with the following columns for PSZ2 clusters with unknown redshift: 'RA', 'DEC', 'GLON', 'GLAT', 'M500', 'R500', 'Y5R500', 'REDMAPPER', 'MCXC' MCXC_no_planck (pd.DataFrame): dataframe with the following columns for MCXC clusters: 'RA', 'DEC', 'R500', 'M500', 'Z' RM50_no_planck (pd.DataFrame): dataframe with the following columns for RedMaPPer clusters with lambda>50: 'RA', 'DEC', 'LAMBDA', 'Z' RM30_no_planck (pd.DataFrame): dataframe with the following columns for RedMaPPer clusters with lambda>30: 'RA', 'DEC', 'LAMBDA', 'Z' Catalogs are saved in output_path + /catalogs/. Input catalogs are in planck_path. Args: plot (bool, optional): If True, will save duplicates distance from each other distribution plots. Defaults to False. """ PSZ2 = fits.open(self.planck_path + 'PSZ2v1.fits') df_psz2 = pd.DataFrame(data={'RA': PSZ2[1].data['RA'].tolist(), 'DEC': PSZ2[1].data['DEC'].tolist(), 'GLON': PSZ2[1].data['GLON'].tolist(), 'GLAT':PSZ2[1].data['GLAT'].tolist(), 'M500': PSZ2[1].data['MSZ'].tolist(), 'R500': PSZ2[1].data['Y5R500'].tolist(), 'REDMAPPER': PSZ2[1].data['REDMAPPER'].tolist(), 'MCXC': PSZ2[1].data['MCXC'].tolist(), 'Z': PSZ2[1].data['REDSHIFT'].tolist()}) df_psz2 = df_psz2.replace([-1, -10, -99], np.nan) planck_no_z = df_psz2.query('Z.isnull()', engine='python') planck_z = df_psz2.query('Z.notnull()', engine='python') # planck_no_z = planck_no_z[['RA', 'DEC']].copy() # planck_z = planck_z[['RA', 'DEC']].copy() planck_no_z.to_csv(self.path + 'catalogs/planck_no-z' + '.csv', index=False) planck_z.to_csv(self.path + 'catalogs/planck_z' + '.csv', index=False) MCXC = fits.open(self.planck_path + 'MCXC-Xray-clusters.fits') MCXC_skycoord = SkyCoord(ra=MCXC[1].data['RA'].tolist(), dec=MCXC[1].data['DEC'].tolist(), unit=u.degree) MCXC_GLON = list(MCXC_skycoord.galactic.l.degree) MCXC_GLAT = list(MCXC_skycoord.galactic.b.degree) df_MCXC = pd.DataFrame(data={'RA': MCXC[1].data['RA'].tolist(), 'DEC': MCXC[1].data['DEC'].tolist(), 'R500': MCXC[1].data['RADIUS_500'].tolist(), 'M500': MCXC[1].data['MASS_500'].tolist(), 'GLON': MCXC_GLON, 'GLAT': MCXC_GLAT, 'Z': MCXC[1].data['REDSHIFT'].tolist()}) REDMAPPER = fits.open(self.planck_path + 'redmapper_dr8_public_v6.3_catalog.fits') REDMAPPER_skycoord = SkyCoord(ra=REDMAPPER[1].data['RA'].tolist(), dec=REDMAPPER[1].data['DEC'].tolist(), unit=u.degree) REDMAPPER_GLON = list(REDMAPPER_skycoord.galactic.l.degree) REDMAPPER_GLAT = list(REDMAPPER_skycoord.galactic.b.degree) df_REDMAPPER = pd.DataFrame(data={'RA': REDMAPPER[1].data['RA'].tolist(), 'DEC': REDMAPPER[1].data['DEC'].tolist(), 'LAMBDA': REDMAPPER[1].data['LAMBDA'].tolist(), 'GLON': REDMAPPER_GLON, 'GLAT': REDMAPPER_GLAT, 'Z': REDMAPPER[1].data['Z_SPEC'].tolist()}) df_REDMAPPER_30 = df_REDMAPPER.query("LAMBDA > 30") df_REDMAPPER_50 = df_REDMAPPER.query("LAMBDA > 50") ACT = fits.open(self.planck_path + 'sptecs_catalog_oct919_forSZDB.fits') SPT = fits.open(self.planck_path + 'DR5_cluster-catalog_v1.1_forSZDB.fits') df_act = pd.DataFrame(data={'RA': list(ACT[1].data['RA']), 'DEC': list(ACT[1].data['DEC']), 'GLON': list(ACT[1].data['GLON']), 'GLAT': list(ACT[1].data['GLAT'])}) df_spt = pd.DataFrame(data={'RA': list(SPT[1].data['RA']), 'DEC': list(SPT[1].data['DEC']), 'GLON': list(SPT[1].data['GLON']), 'GLAT': list(SPT[1].data['GLAT'])}) self.remove_duplicates_on_radec(df_MCXC, df_psz2, output_name='MCXC_no_planck', plot=plot) self.remove_duplicates_on_radec(df_REDMAPPER_30, df_psz2, output_name='RM30_no_planck', plot=plot) self.remove_duplicates_on_radec(df_REDMAPPER_50, df_psz2, output_name='RM50_no_planck', plot=plot) self.remove_duplicates_on_radec(df_act, df_psz2, output_name='ACT_no_planck', plot=plot) self.remove_duplicates_on_radec(df_spt, df_psz2, output_name='SPT_no_planck', plot=plot) PSZ2.close() MCXC.close() MCXC.close() REDMAPPER.close() ACT.close() SPT.close() def create_fake_source_catalog(self): PGCC = fits.open(self.planck_path + 'HFI_PCCS_GCC_R2.02.fits') df_pgcc = pd.DataFrame(data={'RA': list(PGCC[1].data['RA']), 'DEC': list(PGCC[1].data['DEC']), 'GLON': list(PGCC[1].data['GLON']), 'GLAT': list(PGCC[1].data['GLAT'])}) PGCC.close() df_pgcc.to_csv(self.path + 'catalogs/' + 'PGCC' + '.csv', index=False) df = pd.DataFrame(columns=['RA','DEC','GLON','GLAT']) bands = ['100GHz', '143GHz', '217GHz', '353GHz', '545GHz', '857GHz'] cs_100 = fits.open(self.planck_path + 'COM_PCCS_100_R2.01.fits') cs_143 = fits.open(self.planck_path + 'COM_PCCS_143_R2.01.fits') cs_217 = fits.open(self.planck_path + 'COM_PCCS_217_R2.01.fits') cs_353 = fits.open(self.planck_path + 'COM_PCCS_353_R2.01.fits') cs_545 = fits.open(self.planck_path + 'COM_PCCS_545_R2.01.fits') cs_857 = fits.open(self.planck_path + 'COM_PCCS_857_R2.01.fits') df_cs_100 = pd.DataFrame(data={'RA': list(cs_100[1].data['RA']), 'DEC': list(cs_100[1].data['DEC']), 'GLON': list(cs_100[1].data['GLON']), 'GLAT': list(cs_100[1].data['GLAT'])}) df_cs_100.to_csv(self.path + 'catalogs/' + 'cs_100' + '.csv', index=False) df_cs_143 = pd.DataFrame(data={'RA': list(cs_143[1].data['RA']), 'DEC': list(cs_143[1].data['DEC']), 'GLON': list(cs_143[1].data['GLON']), 'GLAT': list(cs_143[1].data['GLAT'])}) df_cs_143.to_csv(self.path + 'catalogs/' + 'cs_143' + '.csv', index=False) df_cs_217 = pd.DataFrame(data={'RA': list(cs_217[1].data['RA']), 'DEC': list(cs_217[1].data['DEC']), 'GLON': list(cs_217[1].data['GLON']), 'GLAT': list(cs_217[1].data['GLAT'])}) df_cs_217.to_csv(self.path + 'catalogs/' + 'cs_217' + '.csv', index=False) df_cs_353 = pd.DataFrame(data={'RA': list(cs_353[1].data['RA']), 'DEC': list(cs_353[1].data['DEC']), 'GLON': list(cs_353[1].data['GLON']), 'GLAT': list(cs_353[1].data['GLAT'])}) df_cs_353.to_csv(self.path + 'catalogs/' + 'cs_353' + '.csv', index=False) df_cs_545 = pd.DataFrame(data={'RA': list(cs_545[1].data['RA']), 'DEC': list(cs_545[1].data['DEC']), 'GLON': list(cs_545[1].data['GLON']), 'GLAT': list(cs_545[1].data['GLAT'])}) df_cs_545.to_csv(self.path + 'catalogs/' + 'cs_545' + '.csv', index=False) df_cs_857 = pd.DataFrame(data={'RA': list(cs_857[1].data['RA']), 'DEC': list(cs_857[1].data['DEC']), 'GLON': list(cs_857[1].data['GLON']), 'GLAT': list(cs_857[1].data['GLAT'])}) df_cs_857.to_csv(self.path + 'catalogs/' + 'cs_857' + '.csv', index=False) freq = 0 if '100GHz' in bands: freq += 2 df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_100[1].data['RA']), 'DEC': list(cs_100[1].data['DEC']), 'GLON': list(cs_100[1].data['GLON']), 'GLAT': list(cs_100[1].data['GLAT'])}))) if '143GHz' in bands: freq += 4 df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_143[1].data['RA']), 'DEC': list(cs_143[1].data['DEC']), 'GLON': list(cs_143[1].data['GLON']), 'GLAT': list(cs_143[1].data['GLAT'])}))) if '217GHz' in bands: freq += 8 df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_217[1].data['RA']), 'DEC': list(cs_217[1].data['DEC']), 'GLON': list(cs_217[1].data['GLON']), 'GLAT': list(cs_217[1].data['GLAT'])}))) if '353GHz' in bands: freq += 16 df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_353[1].data['RA']), 'DEC': list(cs_353[1].data['DEC']), 'GLON': list(cs_353[1].data['GLON']), 'GLAT': list(cs_353[1].data['GLAT'])}))) if '545GHz' in bands: freq += 32 df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_545[1].data['RA']), 'DEC': list(cs_545[1].data['DEC']), 'GLON': list(cs_545[1].data['GLON']), 'GLAT': list(cs_545[1].data['GLAT'])}))) if '857GHz' in bands: freq += 64 df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_857[1].data['RA']), 'DEC': list(cs_857[1].data['DEC']), 'GLON': list(cs_857[1].data['GLON']), 'GLAT': list(cs_857[1].data['GLAT'])}))) df = pd.concat((df_pgcc, df)) df = self.remove_duplicates_on_radec(df, with_itself=True, tol=2) df.to_csv(self.path + 'catalogs/' + 'False_SZ_catalog_f%s'%freq + '.csv', index=False) df = pd.DataFrame(columns=['RA','DEC','GLON','GLAT']) for L in range(1, len(bands)): for subset in tqdm(itertools.combinations(bands, L)): freq = 0 if '100GHz' in subset: freq += 2 df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_100[1].data['RA']), 'DEC': list(cs_100[1].data['DEC']), 'GLON': list(cs_100[1].data['GLON']), 'GLAT': list(cs_100[1].data['GLAT'])}))) if '143GHz' in subset: freq += 4 df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_143[1].data['RA']), 'DEC': list(cs_143[1].data['DEC']), 'GLON': list(cs_143[1].data['GLON']), 'GLAT': list(cs_143[1].data['GLAT'])}))) if '217GHz' in subset: freq += 8 df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_217[1].data['RA']), 'DEC': list(cs_217[1].data['DEC']), 'GLON': list(cs_217[1].data['GLON']), 'GLAT': list(cs_217[1].data['GLAT'])}))) if '353GHz' in subset: freq += 16 df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_353[1].data['RA']), 'DEC': list(cs_353[1].data['DEC']), 'GLON': list(cs_353[1].data['GLON']), 'GLAT': list(cs_353[1].data['GLAT'])}))) if '545GHz' in subset: freq += 32 df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_545[1].data['RA']), 'DEC': list(cs_545[1].data['DEC']), 'GLON': list(cs_545[1].data['GLON']), 'GLAT': list(cs_545[1].data['GLAT'])}))) if '857GHz' in subset: freq += 64 df = pd.concat((df, pd.DataFrame(data={'RA': list(cs_857[1].data['RA']), 'DEC': list(cs_857[1].data['DEC']), 'GLON': list(cs_857[1].data['GLON']), 'GLAT': list(cs_857[1].data['GLAT'])}))) df = pd.concat((df_pgcc, df)) df = self.remove_duplicates_on_radec(df, with_itself=True, tol=2) df.to_csv(self.path + 'catalogs/' + 'False_SZ_catalog_f%s'%freq + '.csv', index=False) cs_100.close() cs_143.close() cs_217.close() cs_353.close() cs_545.close() cs_857.close() def remove_duplicates_on_radec(self, df_main, df_with_dup=None, output_name=None, with_itself=False, tol=5, plot=False): """"Takes two different dataframes with columns 'RA' & 'DEC' and performs a spatial coordinate match with a tol=5 arcmin tolerance. Saves a .csv file containing df_main without objects in common from df_with_dup. Args: df_main (pd.DataFrame): main dataframe. df_with_dup (pd.DataFrame): dataframe that contains objects from df_main. Defaults to None. output_name (str): name that will be used in the saved/plot file name. If None, no file will be saved. Defaults to None. with_itself (bool, optional): If True, the spatial coordinates match will be performed with df_main. Defaults to False. tol (int, optional): tolerance for spatial coordinates match in arcmin. Defaults to 5. plot (bool, optional): If True, will save duplicates distance from each other distribution plots. Defaults to False. """ if with_itself == True: scatalog_sub = SkyCoord(ra=df_main['RA'].values, dec=df_main['DEC'].values, unit='deg') idx, d2d, _ = match_coordinates_sky(scatalog_sub, scatalog_sub, nthneighbor=2) ismatched = d2d < tol*u.arcminute #threshold to consider whether or not two galaxies are the same df_d2d = pd.DataFrame(data={'ismatched': ismatched, 'idx': idx, 'd2d': d2d}) df_main['ismatched'], df_main['ID'] = ismatched, idx df_main.query("ismatched == False", inplace=True) df_main.drop(columns=['ismatched', 'ID'], inplace=True) df_main = df_main.replace([-1, -10, -99], np.nan) if output_name is not None: df_main.to_csv(self.path + 'catalogs/' + output_name + '.csv', index=False) elif with_itself == False: assert df_with_dup is not None ID = np.arange(0, len(df_with_dup)) df_with_dup = df_with_dup[['RA', 'DEC']].copy() df_with_dup.insert(loc=0, value=ID, column='ID') scatalog_sub = SkyCoord(ra=df_main['RA'].values, dec=df_main['DEC'].values, unit='deg') pcatalog_sub = SkyCoord(ra=df_with_dup['RA'].values, dec=df_with_dup['DEC'].values, unit='deg') idx, d2d, _ = match_coordinates_sky(scatalog_sub, pcatalog_sub, nthneighbor=1) ismatched = d2d < tol*u.arcminute #threshold to consider whether or not two galaxies are the same df_d2d = pd.DataFrame(data={'ismatched': ismatched, 'idx': idx, 'd2d': d2d}) df_main['ismatched'], df_main['ID'] = ismatched, idx df_with_dup.drop(columns=['RA', 'DEC'], inplace=True) df_wo_dup = pd.merge(df_main, df_with_dup, indicator=True, on='ID', how='outer').query('_merge=="both"').drop('_merge', axis=1) df_wo_dup.query("ismatched == False", inplace=True) df_wo_dup.drop(columns=['ismatched', 'ID'], inplace=True) df_wo_dup = df_wo_dup.replace([-1, -10, -99], np.nan) if output_name is not None: df_wo_dup.to_csv(self.path + 'catalogs/' + output_name + '.csv', index=False) df_main = df_wo_dup.copy() if plot == True and output_name is not None: fig = plt.figure(figsize=(8,8), tight_layout=False) ax = fig.add_subplot(111) ax.set_facecolor('white') ax.grid(True, color='grey', lw=0.5) ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0)) ax.set_xlabel(r'$\mathrm{angular\;distance\;\left(arcmin\right)}$', fontsize=20) ax.set_ylabel(output_name, fontsize=20) ax.hist(np.array(df_d2d['d2d'].values)*60, bins = 400) ax.axvline(tol, color='k', linestyle='--') ax.set_xlim(0, 2*tol) plt.savefig(self.output_path + 'figures/' + 'd2d_' + output_name + '.png', bbox_inches='tight', transparent=False) plt.show() plt.close() return df_main def remove_duplicates_on_lonlat(self, df_main, df_with_dup=None, output_name=None, with_itself=False, tol=2, plot=False): """"Takes two different dataframes with columns 'GLON' & 'GLAT' and performs a spatial coordinate match with a tol=2 arcmin tolerance. Saves a .csv file containing df_main without objects in common from df_with_dup. Args: df_main (pd.DataFrame): main dataframe. output_name (str): name that will be used in the saved/plot file name. If None, no file will be saved. Defaults to None. df_with_dup (pd.DataFrame): dataframe that contains objects from df_main. Defaults to None. with_itself (bool, optional): If True, the spatial coordinates match will be performed with df_main. Defaults to False. tol (int, optional): tolerance for spatial coordinates match in arcmin. Defaults to 2. plot (bool, optional): If True, will save duplicates distance from each other distribution plots. Defaults to False. """ if with_itself == True: scatalog_sub = SkyCoord(df_main['GLON'].values, df_main['GLAT'].values, unit='deg', frame='galactic') idx, d2d, _ = match_coordinates_sky(scatalog_sub, scatalog_sub, nthneighbor=2) ismatched = d2d < tol*u.arcminute #threshold to consider whether or not two galaxies are the same df_d2d = pd.DataFrame(data={'ismatched': ismatched, 'idx': idx, 'd2d': d2d}) df_main['ismatched'], df_main['ID'] = ismatched, idx df_main.query("ismatched == False", inplace=True) df_main.drop(columns=['ismatched', 'ID'], inplace=True) df_main = df_main.replace([-1, -10, -99], np.nan) if output_name is not None: df_main.to_csv(self.path + 'catalogs/' + output_name + '.csv', index=False) elif with_itself == False: assert df_with_dup is not None ID = np.arange(0, len(df_with_dup)) df_with_dup = df_with_dup[['GLON', 'GLAT']].copy() df_with_dup.insert(loc=0, value=ID, column='ID') scatalog_sub = SkyCoord(df_main['GLON'].values, df_main['GLAT'].values, unit='deg', frame='galactic') pcatalog_sub = SkyCoord(df_with_dup['GLON'].values, df_with_dup['GLAT'].values, unit='deg', frame='galactic') idx, d2d, _ = match_coordinates_sky(scatalog_sub, pcatalog_sub, nthneighbor=1) ismatched = d2d < tol*u.arcminute #threshold to consider whether or not two galaxies are the same df_d2d = pd.DataFrame(data={'ismatched': ismatched, 'idx': idx, 'd2d': d2d}) df_main['ismatched'], df_main['ID'] = ismatched, idx df_with_dup.drop(columns=['GLON', 'GLAT'], inplace=True) df_wo_dup = pd.merge(df_main, df_with_dup, indicator=True, on='ID', how='outer').query('_merge=="both"').drop('_merge', axis=1) df_wo_dup.query("ismatched == False", inplace=True) df_wo_dup.drop(columns=['ismatched', 'ID'], inplace=True) df_wo_dup = df_wo_dup.replace([-1, -10, -99], np.nan) if output_name is not None: df_wo_dup.to_csv(self.path + 'catalogs/' + output_name + '.csv', index=False) df_main = df_wo_dup.copy() if plot == True and output_name is not None: fig = plt.figure(figsize=(8,8), tight_layout=False) ax = fig.add_subplot(111) ax.set_facecolor('white') ax.grid(True, color='grey', lw=0.5) ax.ticklabel_format(style='sci', axis='y', scilimits=(0,0)) ax.set_xlabel(r'$\mathrm{angular\;distance\;\left(arcmin\right)}$', fontsize=20) ax.set_ylabel(output_name, fontsize=20) ax.hist(np.array(df_d2d['d2d'].values)*60, bins = 400) ax.axvline(tol, color='k', linestyle='--') ax.set_xlim(0, 2*tol) plt.savefig(self.output_path + 'figures/' + 'd2d_' + output_name + '.png', bbox_inches='tight', transparent=False) plt.show() plt.close() return df_main def create_circular_mask(self, h, w, center, ang_center, radius): """Takes a list of center positions and returns a segmentation mask with circulat masks at the center's position. Args: h (int): patch height. w (int): patch width. center (list of tuples): In pixels. List of tupples containing center coordinates to mask. ang_center (list of tuples): In ICRS. Same as center radius ([type]): In arcmin. Disk radius for mask Returns: np.ndarray: ndarray with shape (h,w) filled with zeros except at centers position where circular masks with size radius are equal to one. """ if radius is None: size_distribution = fits.open(self.path + 'catalogs/exp_joined_ami_carma_plck_psz1_psz2_act_spt_YT.fits')[1].data['T500'] heights, bins = np.histogram(size_distribution, bins=8, density=False, range=[0,15]) heights = heights/sum(heights) bins = bins[1:] radius = np.random.choice(bins, p=heights)/self.pixsize else: radius = radius/self.pixsize Y, X = np.ogrid[:h, :w] mask = np.zeros((h,w)) count = 0 ra, dec = [], [] for i,c in enumerate(center): if np.isnan(c[0]): continue elif np.isnan(c[1]): continue else: dist_from_center = np.sqrt((X - int(c[0]))**2 + (Y - int(c[1]))**2) mask += (dist_from_center <= radius).astype(int) is_all_zero = np.all(((dist_from_center <= radius).astype(int) == 0)) if is_all_zero == False: count += 1 ra.append(ang_center[i][0]) dec.append(ang_center[i][1]) return np.where(mask > 1, 1, mask), count, ra, dec def return_coord_catalog(self): """ Returns coordinate catalogs Returns: DataFrame: cluster coordinate catalog DataFrame: other sources coordinate catalog """ if self.dataset == 'planck_z': planck_z = pd.read_csv(self.path + 'catalogs/planck_z' + '.csv') coord_catalog = planck_z[['RA', 'DEC', 'GLON', 'GLAT']].copy() elif self.dataset == 'planck_no-z': planck_z = pd.read_csv(self.path + 'catalogs/planck_z' + '.csv') planck_no_z = pd.read_csv(self.path + 'catalogs/planck_no-z' + '.csv') coord_catalog = pd.concat([planck_z[['RA', 'DEC', 'GLON', 'GLAT']].copy(), planck_no_z[['RA', 'DEC', 'GLON', 'GLAT']].copy()], ignore_index=True) elif self.dataset == 'MCXC': planck_z = pd.read_csv(self.path + 'catalogs/planck_z' + '.csv') planck_no_z = pd.read_csv(self.path + 'catalogs/planck_no-z' + '.csv') MCXC = pd.read_csv(self.path + 'catalogs/MCXC_no_planck' + '.csv') coord_catalog = pd.concat([planck_z[['RA', 'DEC', 'GLON', 'GLAT']].copy(), planck_no_z[['RA', 'DEC', 'GLON', 'GLAT']].copy(), MCXC[['RA', 'DEC', 'GLON', 'GLAT']].copy()], ignore_index=True) elif self.dataset == 'RM30': planck_z = pd.read_csv(self.path + 'catalogs/planck_z' + '.csv') planck_no_z = pd.read_csv(self.path + 'catalogs/planck_no-z' + '.csv') MCXC = pd.read_csv(self.path + 'catalogs/MCXC_no_planck' + '.csv') RM30 = pd.read_csv(self.path + 'catalogs/RM30_no_planck' + '.csv') coord_catalog = pd.concat([planck_z[['RA', 'DEC', 'GLON', 'GLAT']].copy(), planck_no_z[['RA', 'DEC', 'GLON', 'GLAT']].copy(), MCXC[['RA', 'DEC', 'GLON', 'GLAT']].copy(), RM30[['RA', 'DEC']].copy()], ignore_index=True) elif self.dataset == 'RM50': planck_z = pd.read_csv(self.path + 'catalogs/planck_z' + '.csv') planck_no_z = pd.read_csv(self.path + 'catalogs/planck_no-z' + '.csv') MCXC = pd.read_csv(self.path + 'catalogs/MCXC_no_planck' + '.csv') RM50 = pd.read_csv(self.path + 'catalogs/RM50_no_planck' + '.csv') coord_catalog = pd.concat([planck_z[['RA', 'DEC', 'GLON', 'GLAT']].copy(), planck_no_z[['RA', 'DEC', 'GLON', 'GLAT']].copy(), MCXC[['RA', 'DEC', 'GLON', 'GLAT']].copy(), RM50[['RA', 'DEC']].copy()], ignore_index=True) false_catalog = pd.read_csv(self.path + 'catalogs/False_SZ_catalog_f%s.csv'%self.planck_freq) cold_cores = pd.read_csv(self.path + 'catalogs/PGCC.csv') # cs_100 = pd.read_csv(self.path + 'catalogs/cs_100.csv') # cs_143 = pd.read_csv(self.path + 'catalogs/cs_143.csv') # cs_217 = pd.read_csv(self.path + 'catalogs/cs_217.csv') # cs_343 = pd.read_csv(self.path + 'catalogs/cs_353.csv') # cs_545 = pd.read_csv(self.path + 'catalogs/cs_545.csv') # cs_857 = pd.read_csv(self.path + 'catalogs/cs_857.csv') return coord_catalog, false_catalog, cold_cores#, cs_100 def rotate(self, origin, point, angle): """ Rotate a point clockwise by a given angle around a given origin. The angle should be given in radians. Args: origin ([type]): [description] point ([type]): [description] angle ([type]): [description] Returns: [type]: [description] """ angle = -np.radians(angle) #transform in radians, - sign is there to ensure clockwise ox, oy = origin px, py = point qx = ox + np.cos(angle) * (px - ox) - np.sin(angle) * (py - oy) qy = oy + np.sin(angle) * (px - ox) + np.cos(angle) * (py - oy) return qx, qy def rotate_patch(self, p, i, band, patch_rot, coord_catalog, random_angle, n_rot): """[summary] Args: p (int): [description] i (int): [description] band (int): [description] patch_rot ([type]): [description] coord_catalog ([type]): [description] random_angle ([type]): [description] n_rot ([type]): [description] Returns: [type]: [description] """ HDU_rot = patch_rot[band]['fits'] HDU_rot_data = ndimage.rotate(np.array(HDU_rot.data), random_angle, reshape=False) wcs_rot = WCS(HDU_rot.header) x_rot,y_rot = wcs_rot.world_to_pixel_values(coord_catalog['RA'].values[i], coord_catalog['DEC'].values[i]) x_rot, y_rot = self.rotate(origin=(0.5*self.npix*np.sqrt(2), 0.5*self.npix*np.sqrt(2)), point=(x_rot,y_rot), angle=random_angle) x_rot, y_rot = x_rot-int(0.5*self.npix*(np.sqrt(2)-1)), y_rot-int(0.5*self.npix*(np.sqrt(2)-1)) if x_rot < 0 or x_rot > self.npix or y_rot < 0 or y_rot > self.npix: np.random.seed(p+i+200) random_int = np.random.randint(300,400) random_index = 0 while x_rot < 0 or x_rot > self.npix or y_rot < 0 or y_rot > self.npix: np.random.seed(random_int+random_index) random_angle = 360*float(np.random.rand(1)) HDU_rot_data = ndimage.rotate(np.array(HDU_rot.data), random_angle, reshape=False) wcs_rot = WCS(HDU_rot.header) x_rot,y_rot = wcs_rot.world_to_pixel_values(coord_catalog['RA'].values[i], coord_catalog['DEC'].values[i]) x_rot, y_rot = self.rotate(origin=(0.5*self.npix*np.sqrt(2), 0.5*self.npix*np.sqrt(2)), point=(x_rot,y_rot), angle=random_angle) x_rot, y_rot = x_rot-int(0.5*self.npix*(np.sqrt(2)-1)), y_rot-int(0.5*self.npix*(np.sqrt(2)-1)) random_index += 1 if len(HDU_rot_data[n_rot:-n_rot, n_rot:-n_rot]) != self.npix: return HDU_rot_data[n_rot:-n_rot-1, n_rot:-n_rot-1] else: return HDU_rot_data[n_rot:-n_rot, n_rot:-n_rot] def center_of_mass(self, input): """ Calculate the center of mass of the values of a coordinate array. Args: input (ndarray): Data from which to calculate center-of-mass. Returns: tuple: Coordinates of centers-of-mass. """ input_mod_sphere = np.zeros_like(input) for i,coordinate in enumerate(input): if np.abs(coordinate[0]) > 360 - 0.1*self.ndeg: input_mod_sphere[i,0] = coordinate[0] - 360 else: input_mod_sphere[i,0] = coordinate[0] if coordinate[1] < -90 + 0.1*self.ndeg: input_mod_sphere[i,1] = coordinate[1] + 180 else: input_mod_sphere[i,1] = coordinate[1] results = np.mean(input_mod_sphere, axis=0) if np.isscalar(results[0]): if results[0] > 360 and results[1] > 90: return tuple((results[1]-360, results[0]-180)) elif results[0] > 360 and results[1] < 90: return tuple((results[1]-360, results[0])) elif results[0] < 360 and results[1] > 90: return tuple((results[1], results[0]-180)) elif results[0] < 360 and results[1] < 90: return tuple((results[0], results[1])) else: raise ValueError("coordinates are not in the right format") else: raise ValueError("input has wrong dimensions") def neighbours(self, catalog_list, coord_list): assert len(catalog_list) == len(coord_list) close_neighbours_id_list, close_coord_neighbours_list, coord_neighbours_list, cluster_density_list = [], [], [], [] for h in range(len(catalog_list)): close_neighbours_id, close_coord_neighbours, coord_neighbours, cluster_density = [], [], [], [] for j in range(len(catalog_list)): idx_cluster_list = [] idx_false_cluster_list = [] if h == j: ini = 2 else: ini = 1 for k in range(ini,100): idx, _, _ = match_coordinates_sky(coord_list[h], coord_list[j], nthneighbor=k) idx_cluster_list.append(idx) for i in range(len(catalog_list[h])): close_neighb_id = [i] close_neighb = [[catalog_list[h]['RA'].values[i], catalog_list[j]['DEC'].values[i]]] ## Match between galaxy clusters and themselves k = 0 idx = idx_cluster_list[k] ra_diff = np.abs(catalog_list[h]['RA'].values[i] - catalog_list[j]['RA'].values[idx[i]]) if np.abs(np.abs(catalog_list[h]['RA'].values[i] - catalog_list[j]['RA'].values[idx[i]])-360) < ra_diff: ra_diff = np.abs(np.abs(catalog_list[h]['RA'].values[i] - catalog_list[j]['RA'].values[idx[i]]) - 360) dec_diff = np.abs(catalog_list[h]['DEC'].values[i] - catalog_list[j]['DEC'].values[idx[i]]) if np.abs(np.abs(catalog_list[h]['DEC'].values[i] - catalog_list[j]['DEC'].values[idx[i]]) - 180) < dec_diff: dec_diff = np.abs(np.abs(catalog_list[h]['DEC'].values[i] - catalog_list[j]['DEC'].values[idx[i]]) - 180) if ra_diff < 0.7*self.ndeg and dec_diff < 0.7*self.ndeg: close_neighb_id.append(idx[i]) close_neighb.append([catalog_list[h]['RA'].values[idx[i]], catalog_list[j]['DEC'].values[idx[i]]]) k += 1 neighb = [[catalog_list[h]['RA'].values[idx[i]], catalog_list[j]['DEC'].values[idx[i]]]] while ra_diff < 1.5*self.ndeg and dec_diff < 1.5*self.ndeg: # idx, _, _ = match_coordinates_sky(coords_ns, coords_ns, nthneighbor=k) idx = idx_cluster_list[k] ra_diff = np.abs(catalog_list[h]['RA'].values[i] - catalog_list[j]['RA'].values[idx[i]]) if np.abs(np.abs(catalog_list[h]['RA'].values[i] - catalog_list[j]['RA'].values[idx[i]]) - 360) < ra_diff: ra_diff = np.abs(np.abs(catalog_list[h]['RA'].values[i] - catalog_list[j]['RA'].values[idx[i]]) - 360) dec_diff = np.abs(catalog_list[h]['DEC'].values[i] - catalog_list[j]['DEC'].values[idx[i]]) if np.abs(np.abs(catalog_list[h]['DEC'].values[i] - catalog_list[j]['DEC'].values[idx[i]]) - 180) < dec_diff: dec_diff = np.abs(np.abs(catalog_list[h]['DEC'].values[i] - catalog_list[j]['DEC'].values[idx[i]]) - 180) if ra_diff < 0.7*self.ndeg and dec_diff < 0.7*self.ndeg: close_neighb_id.append(idx[i]) close_neighb.append([catalog_list[h]['RA'].values[idx[i]], catalog_list[j]['DEC'].values[idx[i]]]) neighb.append([catalog_list[h]['RA'].values[idx[i]], catalog_list[j]['DEC'].values[idx[i]]]) k += 1 close_neighbours_id.append(close_neighb_id) close_coord_neighbours.append(close_neighb) coord_neighbours.append(neighb) cluster_density.append(k-ini+1) close_neighbours_id_list.append(close_neighbours_id) close_coord_neighbours_list.append(close_coord_neighbours) coord_neighbours_list.append(coord_neighbours) cluster_density_list.append(cluster_density) return close_neighbours_id_list, close_coord_neighbours_list, coord_neighbours_list, cluster_density_list ## Match between galaxy clusters and cold cores # idx, _, _ = match_coordinates_sky(coords_ns, cold_cores_coords, nthneighbor=1) k = 0 idx = idx_false_cluster_list[k] ra_diff = np.abs(coord_catalog['RA'].values[i] - cold_cores_catalog['RA'].values[idx[i]]) if np.abs(np.abs(coord_catalog['RA'].values[i] - cold_cores_catalog['RA'].values[idx[i]]) - 360) < ra_diff: ra_diff = np.abs(np.abs(coord_catalog['RA'].values[i] - cold_cores_catalog['RA'].values[idx[i]]) - 360) dec_diff = np.abs(coord_catalog['DEC'].values[i] - cold_cores_catalog['DEC'].values[idx[i]]) if np.abs(np.abs(coord_catalog['DEC'].values[i] - cold_cores_catalog['DEC'].values[idx[i]]) - 180) < dec_diff: dec_diff = np.abs(np.abs(coord_catalog['DEC'].values[i] - cold_cores_catalog['DEC'].values[idx[i]]) - 180) k += 1 neighb = [[cold_cores_catalog['RA'].values[idx[i]], cold_cores_catalog['DEC'].values[idx[i]]]] while ra_diff < 1.5*self.ndeg and dec_diff < 1.5*self.ndeg: idx = idx_false_cluster_list[k] # idx, _, _ = match_coordinates_sky(coords_ns, cold_cores_coords, nthneighbor=k) ra_diff = np.abs(coord_catalog['RA'].values[i] - cold_cores_catalog['RA'].values[idx[i]]) if np.abs(np.abs(coord_catalog['RA'].values[i] - cold_cores_catalog['RA'].values[idx[i]]) - 360) < ra_diff: ra_diff = np.abs(np.abs(coord_catalog['RA'].values[i] - cold_cores_catalog['RA'].values[idx[i]]) - 360) dec_diff = np.abs(coord_catalog['DEC'].values[i] - cold_cores_catalog['DEC'].values[idx[i]]) if np.abs(np.abs(coord_catalog['DEC'].values[i] - cold_cores_catalog['DEC'].values[idx[i]]) - 180) < dec_diff: dec_diff = np.abs(np.abs(coord_catalog['DEC'].values[i] - cold_cores_catalog['DEC'].values[idx[i]]) - 180) neighb.append([cold_cores_catalog['RA'].values[idx[i]], cold_cores_catalog['DEC'].values[idx[i]]]) k += 1 false_coord_neighbours.append(neighb) false_cluster_density.append(k-1) def make_input(self, p, cold_cores=False, label_only=False, save_files=True, plot=False, verbose=False): """ Creates input/output datasets for all clusters in the selected cluster catalog. Patches contain at least one cluster that underwent a random translation. It also saves a .npz file containing a list of str in which training, validation and test belonging is specified. Args: p (int): loop number. plot (bool, optional): If True, will plot the number of potential objects per patch. Defaults to False. verbose (bool, optional): If True, will print additional information. Defaults to False. """ #------------------------------------------------------------------# # # # # # Create common catalog # # # # # #------------------------------------------------------------------# if p != 0: plot = False coord_catalog, false_catalog, cold_cores_catalog = self.return_coord_catalog() false_catalog_list = [cold_cores_catalog]#, cs_100] input_size = len(coord_catalog) coords_ns = SkyCoord(ra=coord_catalog['RA'].values, dec=coord_catalog['DEC'].values, unit='deg') #------------------------------------------------------------------# # # # # # Check for potential neighbours # # # # # #------------------------------------------------------------------# # false_coords = SkyCoord(ra=false_catalog['RA'].values, dec=false_catalog['DEC'].values, unit='deg') cold_cores_coords = SkyCoord(ra=cold_cores_catalog['RA'].values, dec=cold_cores_catalog['DEC'].values, unit='deg') # cs_100_coords = SkyCoord(ra=cs_100['RA'].values, dec=cs_100['DEC'].values, unit='deg') false_coords_list = [cold_cores_coords]#, cs_100_coords] cluster_density = [] false_cluster_density = [] coord_neighbours = [] close_coord_neighbours = [] false_coord_neighbours = [] close_neighbours_id_list = [] idx_cluster_list = [] idx_false_cluster_list = [] for k in range(2,10): idx, _, _ = match_coordinates_sky(coords_ns, coords_ns, nthneighbor=k) idx_cluster_list.append(idx) for k in range(1,100): idx, _, _ = match_coordinates_sky(coords_ns, cold_cores_coords, nthneighbor=k) idx_false_cluster_list.append(idx) for i in range(input_size): close_neighbours_id = [i] close_neighb = [[coord_catalog['RA'].values[i], coord_catalog['DEC'].values[i]]] ## Match between galaxy clusters and themselves # idx, _, _ = match_coordinates_sky(coords_ns, coords_ns, nthneighbor=2) k = 0 idx = idx_cluster_list[k] ra_diff = np.abs(coord_catalog['RA'].values[i] - coord_catalog['RA'].values[idx[i]]) if np.abs(np.abs(coord_catalog['RA'].values[i] - coord_catalog['RA'].values[idx[i]])-360) < ra_diff: ra_diff = np.abs(np.abs(coord_catalog['RA'].values[i] - coord_catalog['RA'].values[idx[i]]) - 360) dec_diff = np.abs(coord_catalog['DEC'].values[i] - coord_catalog['DEC'].values[idx[i]]) if np.abs(np.abs(coord_catalog['DEC'].values[i] - coord_catalog['DEC'].values[idx[i]]) - 180) < dec_diff: dec_diff = np.abs(np.abs(coord_catalog['DEC'].values[i] - coord_catalog['DEC'].values[idx[i]]) - 180) if ra_diff < 0.7*self.ndeg and dec_diff < 0.7*self.ndeg: close_neighbours_id.append(idx[i]) close_neighb.append([coord_catalog['RA'].values[idx[i]], coord_catalog['DEC'].values[idx[i]]]) k += 1 neighb = [[coord_catalog['RA'].values[idx[i]], coord_catalog['DEC'].values[idx[i]]]] while ra_diff < 1.5*self.ndeg and dec_diff < 1.5*self.ndeg: # idx, _, _ = match_coordinates_sky(coords_ns, coords_ns, nthneighbor=k) idx = idx_cluster_list[k] ra_diff = np.abs(coord_catalog['RA'].values[i] - coord_catalog['RA'].values[idx[i]]) if np.abs(np.abs(coord_catalog['RA'].values[i] - coord_catalog['RA'].values[idx[i]]) - 360) < ra_diff: ra_diff = np.abs(np.abs(coord_catalog['RA'].values[i] - coord_catalog['RA'].values[idx[i]]) - 360) dec_diff = np.abs(coord_catalog['DEC'].values[i] - coord_catalog['DEC'].values[idx[i]]) if np.abs(np.abs(coord_catalog['DEC'].values[i] - coord_catalog['DEC'].values[idx[i]]) - 180) < dec_diff: dec_diff = np.abs(np.abs(coord_catalog['DEC'].values[i] - coord_catalog['DEC'].values[idx[i]]) - 180) if ra_diff < 0.7*self.ndeg and dec_diff < 0.7*self.ndeg: close_neighbours_id.append(idx[i]) close_neighb.append([coord_catalog['RA'].values[idx[i]], coord_catalog['DEC'].values[idx[i]]]) neighb.append([coord_catalog['RA'].values[idx[i]], coord_catalog['DEC'].values[idx[i]]]) k += 1 close_neighbours_id_list.append(close_neighbours_id) close_coord_neighbours.append(close_neighb) coord_neighbours.append(neighb) cluster_density.append(k) ## Match between galaxy clusters and cold cores # idx, _, _ = match_coordinates_sky(coords_ns, cold_cores_coords, nthneighbor=1) k = 0 idx = idx_false_cluster_list[k] ra_diff = np.abs(coord_catalog['RA'].values[i] - cold_cores_catalog['RA'].values[idx[i]]) if np.abs(np.abs(coord_catalog['RA'].values[i] - cold_cores_catalog['RA'].values[idx[i]]) - 360) < ra_diff: ra_diff = np.abs(np.abs(coord_catalog['RA'].values[i] - cold_cores_catalog['RA'].values[idx[i]]) - 360) dec_diff = np.abs(coord_catalog['DEC'].values[i] - cold_cores_catalog['DEC'].values[idx[i]]) if np.abs(np.abs(coord_catalog['DEC'].values[i] - cold_cores_catalog['DEC'].values[idx[i]]) - 180) < dec_diff: dec_diff = np.abs(np.abs(coord_catalog['DEC'].values[i] - cold_cores_catalog['DEC'].values[idx[i]]) - 180) k += 1 neighb = [[cold_cores_catalog['RA'].values[idx[i]], cold_cores_catalog['DEC'].values[idx[i]]]] while ra_diff < 1.5*self.ndeg and dec_diff < 1.5*self.ndeg: idx = idx_false_cluster_list[k] # idx, _, _ = match_coordinates_sky(coords_ns, cold_cores_coords, nthneighbor=k) ra_diff = np.abs(coord_catalog['RA'].values[i] - cold_cores_catalog['RA'].values[idx[i]]) if np.abs(np.abs(coord_catalog['RA'].values[i] - cold_cores_catalog['RA'].values[idx[i]]) - 360) < ra_diff: ra_diff = np.abs(np.abs(coord_catalog['RA'].values[i] - cold_cores_catalog['RA'].values[idx[i]]) - 360) dec_diff = np.abs(coord_catalog['DEC'].values[i] - cold_cores_catalog['DEC'].values[idx[i]]) if np.abs(np.abs(coord_catalog['DEC'].values[i] - cold_cores_catalog['DEC'].values[idx[i]]) - 180) < dec_diff: dec_diff = np.abs(np.abs(coord_catalog['DEC'].values[i] - cold_cores_catalog['DEC'].values[idx[i]]) - 180) neighb.append([cold_cores_catalog['RA'].values[idx[i]], cold_cores_catalog['DEC'].values[idx[i]]]) k += 1 false_coord_neighbours.append(neighb) false_cluster_density.append(k-1) if cold_cores: idx_cluster_list = [] idx_false_cluster_list = [] for k in range(1,10): idx, _, _ = match_coordinates_sky(cold_cores_coords, coords_ns, nthneighbor=k) idx_cluster_list.append(idx) for k in range(2,200): idx, _, _ = match_coordinates_sky(cold_cores_coords, cold_cores_coords, nthneighbor=k) idx_false_cluster_list.append(idx) for i in range(len(cold_cores_catalog)): # close_neighbours_id = [i] # close_neighb = [[coord_catalog['RA'].values[i], coord_catalog['DEC'].values[i]]] ## Match between galaxy clusters and themselves # idx, _, _ = match_coordinates_sky(cold_cores_coords, coords_ns, nthneighbor=1) k = 0 idx = idx_cluster_list[k] ra_diff = np.abs(coord_catalog['RA'].values[idx[i]] - cold_cores_catalog['RA'].values[i]) if np.abs(np.abs(coord_catalog['RA'].values[idx[i]] - cold_cores_catalog['RA'].values[i])-360) < ra_diff: ra_diff = np.abs(np.abs(coord_catalog['RA'].values[idx[i]] - cold_cores_catalog['RA'].values[i]) - 360) dec_diff = np.abs(coord_catalog['DEC'].values[idx[i]] - cold_cores_catalog['DEC'].values[i]) if np.abs(np.abs(coord_catalog['DEC'].values[idx[i]] - cold_cores_catalog['DEC'].values[i]) - 180) < dec_diff: dec_diff = np.abs(np.abs(coord_catalog['DEC'].values[idx[i]] - cold_cores_catalog['DEC'].values[i]) - 180) k += 1 neighb = [[coord_catalog['RA'].values[idx[i]], coord_catalog['DEC'].values[idx[i]]]] while ra_diff < 1.5*self.ndeg and dec_diff < 1.5*self.ndeg: # idx, _, _ = match_coordinates_sky(cold_cores_coords, coords_ns, nthneighbor=k) idx = idx_cluster_list[k] ra_diff = np.abs(coord_catalog['RA'].values[idx[i]] - cold_cores_catalog['RA'].values[i]) if np.abs(np.abs(coord_catalog['RA'].values[idx[i]] - cold_cores_catalog['RA'].values[i]) - 360) < ra_diff: ra_diff = np.abs(np.abs(coord_catalog['RA'].values[idx[i]] - cold_cores_catalog['DEC'].values[i]) - 360) dec_diff = np.abs(coord_catalog['DEC'].values[idx[i]] - cold_cores_catalog['DEC'].values[i]) if np.abs(np.abs(coord_catalog['DEC'].values[idx[i]] - cold_cores_catalog['DEC'].values[i]) - 180) < dec_diff: dec_diff = np.abs(np.abs(coord_catalog['DEC'].values[idx[i]] - cold_cores_catalog['DEC'].values[i]) - 180) neighb.append([coord_catalog['RA'].values[idx[i]], coord_catalog['DEC'].values[idx[i]]]) k += 1 coord_neighbours.append(neighb) cluster_density.append(k-1) ## Match between galaxy clusters and cold cores close_neighbours_id = [i] close_neighb = [[cold_cores_catalog['RA'].values[i], cold_cores_catalog['DEC'].values[i]]] k = 0 idx = idx_false_cluster_list[k] # idx, _, _ = match_coordinates_sky(cold_cores_coords, cold_cores_coords, nthneighbor=2) ra_diff = np.abs(cold_cores_catalog['RA'].values[i] - cold_cores_catalog['RA'].values[idx[i]]) if np.abs(np.abs(cold_cores_catalog['RA'].values[i] - cold_cores_catalog['RA'].values[idx[i]]) - 360) < ra_diff: ra_diff = np.abs(np.abs(cold_cores_catalog['RA'].values[i] - cold_cores_catalog['RA'].values[idx[i]]) - 360) dec_diff = np.abs(cold_cores_catalog['DEC'].values[i] - cold_cores_catalog['DEC'].values[idx[i]]) if np.abs(np.abs(cold_cores_catalog['DEC'].values[i] - cold_cores_catalog['DEC'].values[idx[i]]) - 180) < dec_diff: dec_diff = np.abs(np.abs(cold_cores_catalog['DEC'].values[i] - cold_cores_catalog['DEC'].values[idx[i]]) - 180) if ra_diff < 0.5*self.ndeg and dec_diff < 0.5*self.ndeg: close_neighbours_id.append(idx[i]) close_neighb.append([cold_cores_catalog['RA'].values[idx[i]], cold_cores_catalog['DEC'].values[idx[i]]]) k += 1 neighb = [[cold_cores_catalog['RA'].values[idx[i]], cold_cores_catalog['DEC'].values[idx[i]]]] while ra_diff < 1.5*self.ndeg and dec_diff < 1.5*self.ndeg: # idx, _, _ = match_coordinates_sky(cold_cores_coords, cold_cores_coords, nthneighbor=k) idx = idx_false_cluster_list[k] ra_diff = np.abs(cold_cores_catalog['RA'].values[i] - cold_cores_catalog['RA'].values[idx[i]]) if np.abs(np.abs(cold_cores_catalog['RA'].values[i] - cold_cores_catalog['RA'].values[idx[i]]) - 360) < ra_diff: ra_diff = np.abs(np.abs(cold_cores_catalog['RA'].values[i] - cold_cores_catalog['RA'].values[idx[i]]) - 360) dec_diff = np.abs(cold_cores_catalog['DEC'].values[i] - cold_cores_catalog['DEC'].values[idx[i]]) if np.abs(np.abs(cold_cores_catalog['DEC'].values[i] - cold_cores_catalog['DEC'].values[idx[i]]) - 180) < dec_diff: dec_diff = np.abs(np.abs(cold_cores_catalog['DEC'].values[i] - cold_cores_catalog['DEC'].values[idx[i]]) - 180) if ra_diff < 0.5*self.ndeg and dec_diff < 0.5*self.ndeg: close_neighbours_id.append(idx[i]) close_neighb.append([cold_cores_catalog['RA'].values[idx[i]], cold_cores_catalog['DEC'].values[idx[i]]]) neighb.append([cold_cores_catalog['RA'].values[idx[i]], cold_cores_catalog['DEC'].values[idx[i]]]) k += 1 close_neighbours_id_list.append(close_neighbours_id) close_coord_neighbours.append(close_neighb) false_coord_neighbours.append(neighb) false_cluster_density.append(k) #------------------------------------------------------------------------------------------------# # # # # # Replace cluster coords that are too close form each other with center of mass # # # # # #------------------------------------------------------------------------------------------------# df = coord_catalog.copy() df = pd.concat((df, cold_cores_catalog), ignore_index=True) df = pd.concat((df, pd.DataFrame(data={'coord_neighbours': coord_neighbours, 'cluster_density': cluster_density, 'false_coord_neighbours': false_coord_neighbours, 'false_cluster_density': false_cluster_density})), axis=1) df_clusters = df.iloc[:input_size,:].copy() if cold_cores: df_cold_cores = df.iloc[input_size:,:].copy() df_cold_cores.reset_index(drop=True, inplace=True) removed_index_list = [] for i in range(input_size): if i in removed_index_list: continue else: if len(close_neighbours_id_list[i]) > 1: ra, dec = self.center_of_mass(close_coord_neighbours[i]) df_clusters.loc[close_neighbours_id_list[i][0],'RA'] = ra df_clusters.loc[close_neighbours_id_list[i][0],'DEC'] = dec for j,row in enumerate(close_neighbours_id_list[i]): if j > 0: try: df_clusters.drop(row, inplace=True) removed_index_list.append(row) except: pass print(df_clusters['false_cluster_density'].isnull().sum()) print(len(df_clusters)) print(len(set(removed_index_list))) print(df_clusters.head(10)) if cold_cores: removed_index_list = [] skip_index_list = [] for i in range(input_size, len(cold_cores_catalog)+input_size): if i in removed_index_list: continue else: if len(close_neighbours_id_list[i]) > 1 and close_neighbours_id_list[i][0] not in skip_index_list: skip_index_list.append(close_neighbours_id_list[i][0]) ra, dec = self.center_of_mass(close_coord_neighbours[i]) df_cold_cores.loc[close_neighbours_id_list[i][0],'RA'] = ra df_cold_cores.loc[close_neighbours_id_list[i][0],'DEC'] = dec for j,row in enumerate(close_neighbours_id_list[i]): if j > 0: try: removed_index_list.append(row) except: pass df_cold_cores.drop(removed_index_list, inplace=True) print(df_cold_cores['false_cluster_density'].isnull().sum()) print(len(df_cold_cores)) print(len(set(removed_index_list))) print(df_cold_cores.head(10)) df_cold_cores = df_cold_cores.sample(frac=0.2, random_state=p) df = pd.concat((df_clusters, df_cold_cores)) if not cold_cores: df = df_clusters.copy() coord_catalog = df[['RA', 'DEC']].copy() coord_catalog.reset_index(drop=True, inplace=True) coord_neighbours = df['coord_neighbours'].copy() coord_neighbours.reset_index(drop=True, inplace=True) cluster_density = df['cluster_density'].copy() cluster_density.reset_index(drop=True, inplace=True) false_coord_neighbours = df['false_coord_neighbours'].copy() false_coord_neighbours.reset_index(drop=True, inplace=True) false_cluster_density = df['false_cluster_density'].copy() false_cluster_density.reset_index(drop=True, inplace=True) input_size = len(coord_catalog) # if plot == True: # fig = plt.figure(figsize=(7,7), tight_layout=False) # ax = fig.add_subplot(111) # ax.set_facecolor('white') # ax.grid(True, color='grey', lw=0.5) # ax.set_xlabel('Neighbours per patch', fontsize=20) # ax.set_ylabel('Cluster number', fontsize=20) # ax.hist(cluster_density) # ax.set_yscale('log') # plt.savefig(self.output_path + 'figures/' + 'cluster_density' + '.png', bbox_inches='tight', transparent=False) # plt.show() # plt.close() # fig = plt.figure(figsize=(7,7), tight_layout=False) # ax = fig.add_subplot(111) # ax.set_facecolor('white') # ax.grid(True, color='grey', lw=0.5) # ax.set_xlabel('Neighbours per patch', fontsize=20) # ax.set_ylabel('False cluster number', fontsize=20) # ax.hist(false_cluster_density, bins=14) # ax.set_yscale('log') # plt.savefig(self.output_path + 'figures/' + 'false_cluster_density' + '.png', bbox_inches='tight', transparent=False) # plt.show() # plt.close() #------------------------------------------------------------------# # # # # # Create ramdon coordinate translations # # # # # #------------------------------------------------------------------# np.random.seed(p) random_coord_x = 2*np.random.rand(1, input_size).flatten() - np.ones_like(np.random.rand(1, input_size).flatten()) np.random.seed(p+100) random_coord_y = 2*np.random.rand(1, input_size).flatten() - np.ones_like(np.random.rand(1, input_size).flatten()) coords = SkyCoord(ra=coord_catalog['RA'].values + 0.5*(self.ndeg-0.2)*random_coord_x, dec=coord_catalog['DEC'].values + 0.5*(self.ndeg-0.2)*random_coord_y, unit='deg') #------------------------------------------------------------------# # # # # # Create patch & masks # # # # # #------------------------------------------------------------------# maps = self.maps cutsky = CutSky(maps, npix=self.npix, pixsize=self.pixsize, low_mem=False) cutsky_rot = CutSky(maps, npix=int(np.sqrt(2)*self.npix), pixsize=self.pixsize, low_mem=False) # skip_regions_up = [[0, 360, -60, -62], # [0, 360, -60, -62], [0, 360, -60, -62], # [0, 360, -60, -62], [0, 360, -60, -62], [0, 360, -60, -62], # [0, 360, 62, 60], [0, 360, 62, 60], [0, 360, 62, 60], # [0, 360, 62, 60], [0, 360, 62, 60], # [0, 360, 62, 60]] # skip_regions_down = [[0, 360, -38, -40], # [0, 360, -38, -40], [0, 360, -38, -40], # [0,0,0,0], [0,0,0,0], [0,0,0,0], # [0,0,0,0], [0,0,0,0], [0,0,0,0], # [0, 360, 42, 40], [0, 360, 42, 40], # [0, 360, 42, 40]] ## Skip for test set is done within the region itself, not in skip_regions for region,(x_left, x_right, y_up, y_down) in enumerate(self.test_regions): if not label_only: inputs = np.ndarray((input_size,self.npix,self.npix,len(self.bands))) labels = np.ndarray((input_size,self.npix,self.npix,self.n_labels)) milca = np.ndarray((input_size,self.npix,self.npix,1)) dataset_type = [] sum_cluster, sum_false = 0, 0 for i, coord in enumerate(coords): if coord.galactic.l.degree > x_left and coord.galactic.l.degree < x_right and coord.galactic.b.degree < y_up and coord.galactic.b.degree > y_down : dataset_type.append('test') labels[i,:,:,0] = np.zeros((self.npix, self.npix)) labels[i,:,:,1] = np.zeros((self.npix, self.npix)) milca[i,:,:,0] = np.zeros((self.npix, self.npix)) if not label_only: for j in range(len(self.bands)): inputs[i,:,:,j] = np.zeros((self.npix, self.npix)) continue elif coord.galactic.l.degree > self.val_regions[region][0] and coord.galactic.l.degree < self.val_regions[region][1] and coord.galactic.b.degree < self.val_regions[region][2] and coord.galactic.b.degree > self.val_regions[region][3]: dataset_type.append('val') # elif coord.galactic.l.degree > skip_regions_up[region][0] and coord.galactic.l.degree < skip_regions_up[region][1] and coord.galactic.b.degree < skip_regions_up[region][2] and coord.galactic.b.degree > skip_regions_up[region][3]: # dataset_type.append('skip') # labels[i,:,:,0] = np.zeros((self.npix, self.npix)) # labels[i,:,:,1] = np.zeros((self.npix, self.npix)) # milca[i,:,:,0] = np.zeros((self.npix, self.npix)) # if not label_only: # for j in range(len(self.bands)): # inputs[i,:,:,j] = np.zeros((self.npix, self.npix)) # continue # elif coord.galactic.l.degree > skip_regions_down[region][0] and coord.galactic.l.degree < skip_regions_down[region][1] and coord.galactic.b.degree < skip_regions_down[region][2] and coord.galactic.b.degree > skip_regions_down[region][3]: # dataset_type.append('skip') # labels[i,:,:,0] = np.zeros((self.npix, self.npix)) # labels[i,:,:,1] = np.zeros((self.npix, self.npix)) # milca[i,:,:,0] = np.zeros((self.npix, self.npix)) # if not label_only: # for j in range(len(self.bands)): # inputs[i,:,:,j] = np.zeros((self.npix, self.npix)) # continue else: dataset_type.append('train') #------------------------------------------------------------------# # # # # # Rotations # # # # # #------------------------------------------------------------------# np.random.seed(p+i+200) random_angle = 360*float(np.random.rand(1)) patch = cutsky.cut_fits(coord) HDU = patch[-1]['fits'] wcs = WCS(HDU.header) x,y = wcs.world_to_pixel_values(coord_catalog['RA'].values[i], coord_catalog['DEC'].values[i]) patch_rot = cutsky_rot.cut_fits(coord) HDU_rot = patch_rot[-1]['fits'] HDU_rot_data = ndimage.rotate(np.array(HDU_rot.data), random_angle, reshape=False) HDU_rot_data = ndimage.rotate(np.array(HDU_rot.data), random_angle, reshape=False) wcs_rot = WCS(HDU_rot.header) x_rot,y_rot = wcs_rot.world_to_pixel_values(coord_catalog['RA'].values[i], coord_catalog['DEC'].values[i]) x_rot, y_rot = self.rotate(origin=(0.5*self.npix*np.sqrt(2), 0.5*self.npix*np.sqrt(2)), point=(x_rot,y_rot), angle=random_angle) x_rot, y_rot = x_rot-int(0.5*self.npix*(np.sqrt(2)-1)), y_rot-int(0.5*self.npix*(np.sqrt(2)-1)) if x_rot < 0 or x_rot > self.npix or y_rot < 0 or y_rot > self.npix: np.random.seed(p+i+200) random_int = np.random.randint(300,400) random_index = 0 while x_rot < 0 or x_rot > self.npix or y_rot < 0 or y_rot > self.npix: np.random.seed(random_int+random_index) random_angle = 360*float(np.random.rand(1)) HDU_rot_data = ndimage.rotate(np.array(HDU_rot.data), random_angle, reshape=False) wcs_rot = WCS(HDU_rot.header) x_rot,y_rot = wcs_rot.world_to_pixel_values(coord_catalog['RA'].values[i], coord_catalog['DEC'].values[i]) x_rot, y_rot = self.rotate(origin=(0.5*self.npix*np.sqrt(2), 0.5*self.npix*np.sqrt(2)), point=(x_rot,y_rot), angle=random_angle) x_rot, y_rot = x_rot-int(0.5*self.npix*(np.sqrt(2)-1)), y_rot-int(0.5*self.npix*(np.sqrt(2)-1)) random_index += 1 h, w = self.npix, self.npix if plot == True: if i < len(df_clusters): center = [(x,y)] ang_center = [(coord_catalog['RA'].values[i], coord_catalog['DEC'].values[i])] else: center = [] ang_center = [] if cluster_density[i] == 0: mask = np.zeros((self.npix, self.npix)) elif cluster_density[i] == 1: mask, _, _, _ = self.create_circular_mask(h, w, center=center, ang_center= ang_center, radius=self.disk_radius) else: for j in range(int(cluster_density[i])-1): center.append(wcs.world_to_pixel_values(coord_neighbours[i][j][0], coord_neighbours[i][j][1])) ang_center.append((coord_neighbours[i][j][0], coord_neighbours[i][j][1])) mask, _, _, _ = self.create_circular_mask(h, w, center=center, ang_center= ang_center, radius=self.disk_radius) ## CLUSTERS if i < len(df_clusters): center = [(x_rot,y_rot)] ang_center = [(coord_catalog['RA'].values[i], coord_catalog['DEC'].values[i])] else: center = [] ang_center = [] if cluster_density[i] == 0: mask_rot = np.zeros((self.npix, self.npix)) labels[i,:,:,0] = mask_rot.astype(int) elif cluster_density[i] == 1: mask_rot, _, _, _ = self.create_circular_mask(h, w, center=center, ang_center= ang_center, radius=self.disk_radius) labels[i,:,:,0] = mask_rot.astype(int) else: for j in range(int(cluster_density[i])-1): x_rotn, y_rotn = wcs_rot.world_to_pixel_values(coord_neighbours[i][j][0], coord_neighbours[i][j][1]) x_rotn, y_rotn = self.rotate(origin=(0.5*self.npix*np.sqrt(2), 0.5*self.npix*np.sqrt(2)), point=(x_rotn,y_rotn), angle=random_angle) x_rotn, y_rotn = x_rotn-int(0.5*self.npix*(np.sqrt(2)-1)), y_rotn-int(0.5*self.npix*(np.sqrt(2)-1)) center.append((x_rotn, y_rotn)) ang_center.append((coord_neighbours[i][j][0], coord_neighbours[i][j][1])) mask_rot, _, _, _ = self.create_circular_mask(h, w, center=center, ang_center= ang_center, radius=self.disk_radius) labels[i,:,:,0] = mask_rot.astype(int) ## FALSE CLUSTERS if i >= len(df_clusters): center = [(x_rot,y_rot)] ang_center = [(coord_catalog['RA'].values[i], coord_catalog['DEC'].values[i])] else: center = [] ang_center = [] if false_cluster_density[i] == 0: mask_rot_false = np.zeros((self.npix, self.npix)) labels[i,:,:,1] = mask_rot_false else: for j in range(int(false_cluster_density[i])): x_rotn, y_rotn = wcs_rot.world_to_pixel_values(false_coord_neighbours[i][j][0], false_coord_neighbours[i][j][1]) x_rotn, y_rotn = self.rotate(origin=(0.5*self.npix*np.sqrt(2), 0.5*self.npix*np.sqrt(2)), point=(x_rotn,y_rotn), angle=random_angle) x_rotn, y_rotn = x_rotn-int(0.5*self.npix*(np.sqrt(2)-1)), y_rotn-int(0.5*self.npix*(np.sqrt(2)-1)) center.append((x_rotn, y_rotn)) ang_center.append((false_coord_neighbours[i][j][0], false_coord_neighbours[i][j][1])) mask_rot_false, _, _, _ = self.create_circular_mask(h, w, center=center, ang_center= ang_center, radius=self.disk_radius) labels[i,:,:,1] = mask_rot_false.astype(int) if verbose: print('\n') print(i) print('cluster density: %s'%cluster_density[i]) print('coords no shift: {:.2f}, {:.2f}'.format(coord_catalog['RA'].values[i], coord_catalog['DEC'].values[i])) print('coords shift: {:.2f}, {:.2f}'.format(coord_catalog['RA'].values[i] -30*self.pixsize/60 + (60*self.pixsize/60)*random_coord_x[i], coord_catalog['DEC'].values[i] -30*self.pixsize/60 + (60*self.pixsize/60)*random_coord_y[i])) print(coord_neighbours[i]) print(center) print('\n') n_rot = int(0.5*self.npix*(np.sqrt(2)-1)) if len(HDU_rot_data[n_rot:-n_rot, n_rot:-n_rot]) != self.npix: milca[i,:,:,0] = HDU_rot_data[n_rot:-n_rot-1, n_rot:-n_rot-1] else: milca[i,:,:,0] = HDU_rot_data[n_rot:-n_rot, n_rot:-n_rot] if not label_only: for j in range(len(self.bands)): inputs[i,:,:,j] = self.rotate_patch(p, i, j, patch_rot, coord_catalog, random_angle, n_rot) #------------------------------------------------------------------# # # # # # Plots # # # # # #------------------------------------------------------------------# if plot == True: fig = plt.figure(figsize=(25,5), tight_layout=False) ax = fig.add_subplot(151) im = ax.imshow(HDU.data, origin='lower') if i < len(df_clusters): ax.scatter(x,y) ax.set_title('x={:.2f}, y={:.2f}'.format(x,y)) ax = fig.add_subplot(152) im = ax.imshow(mask, origin='lower') ax.set_title('x={:.2f}, y={:.2f}'.format(x,y)) ax = fig.add_subplot(153) im = ax.imshow(milca[i,:,:,0], origin='lower') #[n_rot:-n_rot, n_rot:-n_rot] if i < len(df_clusters): ax.scatter(x_rot,y_rot) ax.set_title('xrot={:.2f}, yrot={:.2f}'.format(x_rot,y_rot)) ax = fig.add_subplot(154) im = ax.imshow(labels[i,:,:,0], origin='lower') ax.set_title('Potential clusters: {:.0f}'.format(cluster_density[i])) ax = fig.add_subplot(155) im = ax.imshow(labels[i,:,:,1], origin='lower') ax.set_title('Potential sources: {:.0f}'.format(false_cluster_density[i])) GenerateFiles.make_directory(self, path_to_file = self.temp_path + 'training_set_r%s_f%s_d%s_s%s_c%s/'%(region, self.freq, self.disk_radius, self.npix, int(cold_cores))) plt.savefig(self.temp_path + 'training_set_r%s_f%s_d%s_s%s_c%s/'%(region, self.freq, self.disk_radius, self.npix, int(cold_cores)) + 'training_%s'%(i) + '.png', bbox_inches='tight', transparent=False) plt.show() plt.close() sum_cluster += cluster_density[i] sum_false += false_cluster_density[i] print(sum_cluster, sum_false) #------------------------------------------------------------------# # # # # # Save files # # # # # #------------------------------------------------------------------# assert len(coords) == len(dataset_type) # if plot == True: # counts = Counter(dataset_type) # df = pd.DataFrame.from_dict(counts, orient='index') # ax = df.plot(kind='bar') # ax.figure.savefig(self.output_path + 'figures/' + 'dataset_type_density_r%s_f%s_d%s'%(region, self.freq, self.disk_radius) + '.png', bbox_inches='tight', transparent=False) if save_files: GenerateFiles.make_directory(self, path_to_file = self.output_path + 'files/' + 'r%s_f%s_d%s_s%s_c%s'%(region, self.freq, self.disk_radius, self.npix, int(cold_cores))) if not label_only: np.savez_compressed(self.output_path + 'files/r%s_f%s_d%s_s%s_c%s/'%(region, self.freq, self.disk_radius, self.npix, int(cold_cores)) + 'input_n%s_f%s_'%(p, self.freq) + self.dataset, inputs) np.savez_compressed(self.output_path + 'files/r%s_f%s_d%s_s%s_c%s/'%(region, self.freq, self.disk_radius, self.npix, int(cold_cores)) + 'milca_n%s_f%s_'%(p, self.freq) + self.dataset, milca) np.savez_compressed(self.output_path + 'files/r%s_f%s_d%s_s%s_c%s/'%(region, self.freq, self.disk_radius, self.npix, int(cold_cores)) + 'type_n%s_f%s_'%(p, self.freq) + self.dataset, np.array(dataset_type)) if p == 0: np.savez_compressed(self.dataset_path + 'type_test_r%s_f%s_c%s_'%(region, self.freq, int(cold_cores)) + self.dataset, np.array(dataset_type)) np.savez_compressed(self.output_path + 'files/r%s_f%s_d%s_s%s_c%s/'%(region, self.freq, self.disk_radius, self.npix, int(cold_cores)) + 'label_n%s_f%s_'%(p, self.freq) + self.dataset, labels) def test_coords(self, x_left, x_right, y_up, y_down): width = np.abs(x_right - x_left) width_contour = width%self.ndeg n_width = width//self.ndeg height = np.abs(y_up - y_down) height_contour = height%self.ndeg n_height = height//self.ndeg l, b = [], [] x = x_left + 0.5*width_contour + 0.5*self.ndeg y = y_down + 0.5*height_contour + 0.5*self.ndeg l.append(x) b.append(y) for i in range(int(n_height)): if i > 0: x = x_left + 0.5*width_contour + 0.5*self.ndeg y += self.ndeg for j in range(int(n_width)): x += self.ndeg if i > 0 or j > 0: l.append(x) b.append(y) print(self.ndeg, n_height, n_width, n_height * n_width, len(l)) assert int(n_height * n_width) == len(l) coords = SkyCoord(l, b, unit='deg', frame='galactic') catalog = pd.DataFrame(data={'GLON': l, 'GLAT': b}) # print(catalog.head(60)) return coords, catalog def make_test_input(self, region, x_left, x_right, y_up, y_down, cold_cores=False, label_only=False, plot=False, verbose=False): test_coords, test_catalog = self.test_coords(x_left, x_right, y_up, y_down) input_size = len(test_coords) maps = self.maps cutsky = CutSky(maps, npix=self.npix, pixsize=self.pixsize, low_mem=False) if not label_only: inputs = np.ndarray((len(test_coords),self.npix,self.npix,len(self.bands))) labels = np.ndarray((len(test_coords),self.npix,self.npix,2)) milca = np.ndarray((len(test_coords),self.npix,self.npix,1)) #------------------------------------------------------------------# # # # # # Check for potential neighbours # # # # # #------------------------------------------------------------------# coord_catalog, false_catalog, cold_cores_catalog = self.return_coord_catalog() # false_catalog_list = [cold_cores_catalog] cold_cores_catalog.query("GLAT > %s"%y_down, inplace=True) cold_cores_catalog.query("GLAT < %s"%y_up, inplace=True) cold_cores_catalog.query("GLON > %s"%x_left, inplace=True) cold_cores_catalog.query("GLON < %s"%x_right, inplace=True) # false_coords = SkyCoord(false_catalog['GLON'].values, false_catalog['GLAT'].values, unit='deg', frame='galactic') cold_cores_coords = SkyCoord(cold_cores_catalog['GLON'].values, cold_cores_catalog['GLAT'].values, unit='deg', frame='galactic') # false_coords_list = [cold_cores_catalog] coords_ns = SkyCoord(coord_catalog['GLON'].values, coord_catalog['GLAT'].values, unit='deg', frame='galactic') cluster_density = [] false_cluster_density = [] coord_neighbours = [] false_coord_neighbours = [] for i in range(input_size): ## Match between test patch center and galaxy clusters idx, _, _ = match_coordinates_sky(test_coords, coords_ns, nthneighbor=1) l_diff = np.abs(test_catalog['GLON'].values[i] - coord_catalog['GLON'].values[idx[i]]) if np.abs(np.abs(test_catalog['GLON'].values[i] - coord_catalog['GLON'].values[idx[i]]) - 360) < l_diff: l_diff = np.abs(np.abs(test_catalog['GLON'].values[i] - coord_catalog['GLON'].values[idx[i]]) - 360) b_diff = np.abs(test_catalog['GLAT'].values[i] - coord_catalog['GLAT'].values[idx[i]]) if np.abs(np.abs(test_catalog['GLAT'].values[i] - coord_catalog['GLAT'].values[idx[i]]) - 180) < b_diff: b_diff = np.abs(np.abs(test_catalog['GLAT'].values[i] - coord_catalog['GLAT'].values[idx[i]]) - 180) k = 2 neighb = [[coord_catalog['GLON'].values[idx[i]], coord_catalog['GLAT'].values[idx[i]]]] while l_diff <= 0.5*self.ndeg and b_diff <= 0.5*self.ndeg: idx, _, _ = match_coordinates_sky(test_coords, coords_ns, nthneighbor=k) l_diff = np.abs(test_catalog['GLON'].values[i] - coord_catalog['GLON'].values[idx[i]]) if np.abs(np.abs(test_catalog['GLON'].values[i] - coord_catalog['GLON'].values[idx[i]]) - 360) < l_diff: l_diff = np.abs(np.abs(test_catalog['GLON'].values[i] - coord_catalog['GLON'].values[idx[i]]) - 360) b_diff = np.abs(test_catalog['GLAT'].values[i] - coord_catalog['GLAT'].values[idx[i]]) if np.abs(np.abs(test_catalog['GLAT'].values[i] - coord_catalog['GLAT'].values[idx[i]]) - 180) < b_diff: b_diff = np.abs(np.abs(test_catalog['GLAT'].values[i] - coord_catalog['GLAT'].values[idx[i]]) - 180) neighb.append([coord_catalog['GLON'].values[idx[i]], coord_catalog['GLAT'].values[idx[i]]]) k += 1 coord_neighbours.append(neighb) cluster_density.append(k-2) ## Match between test patch center and false clusters idx, _, _ = match_coordinates_sky(test_coords, cold_cores_coords, nthneighbor=1) l_diff = np.abs(test_catalog['GLON'].values[i] - cold_cores_catalog['GLON'].values[idx[i]]) if np.abs(np.abs(test_catalog['GLON'].values[i] - cold_cores_catalog['GLON'].values[idx[i]]) - 360) < l_diff: l_diff = np.abs(np.abs(test_catalog['GLON'].values[i] - cold_cores_catalog['GLON'].values[idx[i]]) - 360) b_diff = np.abs(test_catalog['GLAT'].values[i] - cold_cores_catalog['GLAT'].values[idx[i]]) if np.abs(np.abs(test_catalog['GLAT'].values[i] - cold_cores_catalog['GLAT'].values[idx[i]]) - 180) < b_diff: b_diff = np.abs(np.abs(test_catalog['GLAT'].values[i] - cold_cores_catalog['GLAT'].values[idx[i]]) - 180) k = 2 neighb = [[cold_cores_catalog['GLON'].values[idx[i]], cold_cores_catalog['GLAT'].values[idx[i]]]] while l_diff <= 0.5*self.ndeg and b_diff <= 0.5*self.ndeg: idx, _, _ = match_coordinates_sky(test_coords, cold_cores_coords, nthneighbor=k) l_diff = np.abs(test_catalog['GLON'].values[i] - cold_cores_catalog['GLON'].values[idx[i]]) if np.abs(np.abs(test_catalog['GLON'].values[i] - cold_cores_catalog['GLON'].values[idx[i]]) - 360) < l_diff: l_diff = np.abs(np.abs(test_catalog['GLON'].values[i] - cold_cores_catalog['GLON'].values[idx[i]]) - 360) b_diff = np.abs(test_catalog['GLAT'].values[i] - cold_cores_catalog['GLAT'].values[idx[i]]) if np.abs(np.abs(test_catalog['GLAT'].values[i] - cold_cores_catalog['GLAT'].values[idx[i]]) - 180) < b_diff: b_diff = np.abs(np.abs(test_catalog['GLAT'].values[i] - cold_cores_catalog['GLAT'].values[idx[i]]) - 180) neighb.append([cold_cores_catalog['GLON'].values[idx[i]], cold_cores_catalog['GLAT'].values[idx[i]]]) k += 1 false_coord_neighbours.append(neighb) false_cluster_density.append(k-2) sum_cluster, sum_false = 0, 0 ra_list, dec_list = [], [] for i, coord in enumerate(test_coords): patch = cutsky.cut_fits(coord) HDU = patch[-1]['fits'] wcs = WCS(HDU.header) center = [] ang_center = [] if cluster_density[i] == 0: mask = np.zeros((self.npix, self.npix)) labels[i,:,:,0] = mask else: for j in range(cluster_density[i]): center.append(wcs.world_to_pixel_values(coord_neighbours[i][j][0], coord_neighbours[i][j][1])) ang_center.append((coord_neighbours[i][j][0], coord_neighbours[i][j][1])) mask, _, ra, dec = self.create_circular_mask(self.npix, self.npix, center=center, ang_center= ang_center, radius=self.disk_radius) ra_list = np.concatenate((ra_list, ra)) dec_list = np.concatenate((dec_list, dec)) labels[i,:,:,0] = mask.astype(int) center = [] ang_center = [] if false_cluster_density[i] == 0: mask_false = np.zeros((self.npix, self.npix)) labels[i,:,:,1] = mask_false else: for j in range(false_cluster_density[i]): center.append(wcs.world_to_pixel_values(false_coord_neighbours[i][j][0], false_coord_neighbours[i][j][1])) ang_center.append((false_coord_neighbours[i][j][0], false_coord_neighbours[i][j][1])) mask_false, _, ra, dec = self.create_circular_mask(self.npix, self.npix, center=center, ang_center= ang_center, radius=self.disk_radius) ra_list = np.concatenate((ra_list, ra)) dec_list = np.concatenate((dec_list, dec)) labels[i,:,:,1] = mask_false.astype(int) milca[i,:,:,0] = patch[-1]['fits'].data if not label_only: for j in range(len(self.bands)): inputs[i,:,:,j] = patch[j]['fits'].data if verbose: print('\n') print(i) print('cluster density: %s'%cluster_density[i]) print('false cluster density: %s'%false_cluster_density[i]) print('test centers: {:.2f}, {:.2f}'.format(test_catalog['GLON'].values[i], test_catalog['GLAT'].values[i])) print(false_coord_neighbours[i]) print(coord_neighbours[i]) print('\n') if plot == True: fig = plt.figure(figsize=(12,5), tight_layout=False) ax = fig.add_subplot(131) im = ax.imshow(HDU.data, origin='lower') ax.set_title('%s: '%i + 'l={:.2f}, b={:.2f}'.format(test_catalog['GLON'].values[i], test_catalog['GLAT'].values[i])) ax = fig.add_subplot(132) im = ax.imshow(mask, origin='lower') ax.set_title('potential clusters: %s'%cluster_density[i]) ax = fig.add_subplot(133) im = ax.imshow(mask_false, origin='lower') ax.set_title('potential sources: %s'%false_cluster_density[i]) GenerateFiles.make_directory(self, path_to_file = self.temp_path + 'test_set_r%s_f%s_d%s_s%s_c%s/'%(region, self.freq, self.disk_radius, self.npix, int(cold_cores))) plt.savefig(self.temp_path + 'test_set_r%s_f%s_d%s_s%s/'%(region, self.freq, self.disk_radius, self.npix) + 'test_%s'%i + '.png', bbox_inches='tight', transparent=False) plt.show() plt.close() sum_cluster += cluster_density[i] sum_false += false_cluster_density[i] # cluster_coords = np.array([ra_list, dec_list]) print(sum_cluster, sum_false) # np.savez_compressed(self.dataset_path + 'test_cluster_coordinates_f%s_'%self.freq + self.dataset, cluster_coords) if not label_only: np.savez_compressed(self.output_path + 'files/r%s_f%s_d%s_s%s_c%s/'%(region, self.freq, self.disk_radius, self.npix, int(cold_cores)) + 'input_test_f%s_'%(self.freq) + self.dataset, inputs) np.savez_compressed(self.output_path + 'files/r%s_f%s_d%s_s%s_c%s/'%(region, self.freq, self.disk_radius, self.npix, int(cold_cores)) + 'milca_test_f%s_'%(self.freq) + self.dataset, milca) np.savez_compressed(self.output_path + 'files/r%s_f%s_d%s_s%s_c%s/'%(region, self.freq, self.disk_radius, self.npix, int(cold_cores)) + 'label_test_f%s_'%(self.freq) + self.dataset, labels) def test_data_generator(self, cold_cores=False, label_only=False, n_jobs=1, plot=False, verbose=False): Parallel(n_jobs=n_jobs)(delayed(self.make_test_input)(region, x_left, x_right, y_up, y_down, cold_cores=cold_cores, label_only=label_only, plot=plot, verbose=verbose) for region,(x_left, x_right, y_up, y_down) in tqdm(enumerate(self.test_regions))) # for region,(x_left, x_right, y_up, y_down) in tqdm(enumerate(self.test_regions)): # self.make_test_input(region, x_left, x_right, y_up, y_down, label_only, plot, verbose) def train_data_generator(self, loops, cold_cores=False, label_only=False, n_jobs = 1, plot=False): """Calls make_input n=loops times to create input/output datasets for all clusters in the selected cluster catalog. Patches contain at least one cluster and for each loop, patches undergo random translations. Function saves the following datasets as .npz files: - dataset_type: list of str containing either 'train', 'val' or 'test'. - inputs: np.ndarray with shape (loops*len(self.dataset), self.npix, self.npix, len(self.bands)) containing input patches - labels: np.ndarray with shape (loops*len(self.dataset), self.npix, self.npix, 1) containing segmentation masks - milca: np.ndarray with shape (loops*len(self.dataset), self.npix, self.npix, 1) containing milca patches. Args: loops (int): number of times the dataset containing patches with at least one cluster within will be added again to training set with random variations (translations) n_jobs (int, optional): Core numbers that will be used. Core numbers cannot exceed loops. Defaults to 1. plot (bool, optional): If True, will plot the number of potential objects per patch. Defaults to True. """ for region in range(0,len(self.test_regions)): all_files = glob.glob(os.path.join(self.output_path + 'files/r%s_f%s_d%s_s%s_c%s/*.npz'%(region, self.freq, self.disk_radius, self.npix, int(cold_cores)))) for f in all_files: os.remove(f) Parallel(n_jobs=n_jobs)(delayed(self.make_input)(p, cold_cores=cold_cores, label_only=label_only, plot=plot) for p in tqdm(range(loops))) for region in range(0,len(self.test_regions)): ## dataset type to know if data is in training, validation or test set all_type = glob.glob(os.path.join(self.output_path + 'files/r%s_f%s_d%s_s%s_c%s/'%(region, self.freq, self.disk_radius, self.npix, int(cold_cores)), "type_n*.npz")) X = [] for f in all_type: X.append(np.load(f)['arr_0']) dataset_type = np.concatenate(X, axis=0) np.savez_compressed(self.dataset_path + 'type_r%s_f%s_d%s_s%s_c%s_'%(region, self.freq, self.disk_radius, self.npix, int(cold_cores)) + self.dataset, dataset_type) ## dataset type bar plot # if plot == True: # counts = Counter(dataset_type) # df = pd.DataFrame.from_dict(counts, orient='index') # ax = df.plot(kind='bar') # ax.figure.savefig(self.output_path + 'figures/' + 'dataset_type_density' + '.png', bbox_inches='tight', transparent=False) ## input file if not label_only: all_type = glob.glob(os.path.join(self.output_path + 'files/r%s_f%s_d%s_s%s_c%s/'%(region, self.freq, self.disk_radius, self.npix, int(cold_cores)), "input_n*.npz")) X = [] for f in all_type: X.append(
np.load(f)
numpy.load
# (ytz): eigenvalue problem solvers come from # ported from Numerical diagonalization of 3x3 matrcies (sic), v1.1 # https://www.mpi-hd.mpg.de/personalhomes/globes/3x3/index.html import jax import numpy as onp import jax.numpy as np DBL_EPSILON = 2.2204460492503131e-16 def dsyevc3(A): de = A[0][1] * A[1][2] dd = onp.square(A[0][1]) ee = onp.square(A[1][2]) ff = onp.square(A[0][2]) m = A[0][0] + A[1][1] + A[2][2] c1 = (A[0][0] * A[1][1] + A[0][0] * A[2][2] + A[1][1] * A[2][2]) - (dd + ee + ff) c0 = A[2][2] * dd + A[0][0] * ee + A[1][1] * ff - A[0][0] * A[1][1] * A[2][2] - 2.0 * A[0][2] * de p = onp.square(m) - 3.0 * c1 q = m * (p - (3.0 / 2.0) * c1) - (27.0 / 2.0) * c0 sqrt_p = onp.sqrt(onp.fabs(p)) phi = 27.0 * (0.25 * onp.square(c1) * (p - c1) + c0 * (q + 27.0 / 4.0 * c0)) phi = (1.0 / 3.0) * onp.arctan2(onp.sqrt(onp.fabs(phi)), q) c = sqrt_p * onp.cos(phi) s = (1.0 / onp.sqrt(3)) * sqrt_p * onp.sin(phi) w = onp.zeros(3) w[1] = (1.0 / 3.0) * (m - c) w[2] = w[1] + s w[0] = w[1] + c w[1] -= s return onp.sort(w) def dsyevv3(input_tensor): A = onp.asarray(input_tensor).copy() w = dsyevc3(A) Q = onp.zeros((3, 3)) # column eigenvectors wmax = onp.fabs(w[0]) if onp.fabs(w[1]) > wmax: wmax = onp.fabs(w[1]) if onp.fabs(w[2]) > wmax: wmax = onp.fabs(w[2]) thresh = onp.square(8.0 * DBL_EPSILON * wmax) # # Prepare calculation of eigenvectors n0tmp = onp.square(A[0][1]) + onp.square(A[0][2]) n1tmp = onp.square(A[0][1]) + onp.square(A[1][2]) Q[0][1] = A[0][1] * A[1][2] - A[0][2] * A[1][1] Q[1][1] = A[0][2] * A[0][1] - A[1][2] * A[0][0] Q[2][1] = onp.square(A[0][1]) # # Calculate first eigenvector by the formula # # v[0] = (A - w[0]).e1 x (A - w[0]).e2 A[0][0] -= w[0] A[1][1] -= w[0] Q[0][0] = Q[0][1] + A[0][2] * w[0] Q[1][0] = Q[1][1] + A[1][2] * w[0] Q[2][0] = A[0][0] * A[1][1] - Q[2][1] norm = onp.square(Q[0][0]) + onp.square(Q[1][0]) + onp.square(Q[2][0]) n0 = n0tmp + onp.square(A[0][0]) n1 = n1tmp + onp.square(A[1][1]) error = n0 * n1 if n0 <= thresh: # If the first column is zero, then (1,0,0) is an eigenvector Q[0][0] = 1.0 Q[1][0] = 0.0 Q[2][0] = 0.0 elif n1 <= thresh: # If the second column is zero, then (0,1,0) is an eigenvector Q[0][0] = 0.0 Q[1][0] = 1.0 Q[2][0] = 0.0 elif norm < onp.square(64.0 * DBL_EPSILON) * error: # If angle between A[0] and A[1] is too small, don't use # (ytz): don't handle this assert 0 t = onp.square(A[0][1]) # cross product, but calculate v ~ (1, -A0/A1, 0) f = -A[0][0] / A[0][1] if onp.square(A[1][1]) > t: t = onp.square(A[1][1]) f = -A[0][1] / A[1][1] if onp.square(A[1][2]) > t: f = -A[0][2] / A[1][2] norm = 1.0 / onp.sqrt(1 + onp.square(f)) Q[0][0] = norm Q[1][0] = f * norm Q[2][0] = 0.0 else: # This is the standard branch norm = onp.sqrt(1.0 / norm) for j in range(3): Q[j][0] = Q[j][0] * norm # Prepare calculation of second eigenvector t = w[0] - w[1] if onp.fabs(t) > 8.0 * DBL_EPSILON * wmax: # For non-degenerate eigenvalue, calculate second eigenvector by the formula # v[1] = (A - w[1]).e1 x (A - w[1]).e2 A[0][0] += t A[1][1] += t Q[0][1] = Q[0][1] + A[0][2] * w[1] Q[1][1] = Q[1][1] + A[1][2] * w[1] Q[2][1] = A[0][0] * A[1][1] - Q[2][1] norm = onp.square(Q[0][1]) + onp.square(Q[1][1]) + onp.square(Q[2][1]) n0 = n0tmp + onp.square(A[0][0]) n1 = n1tmp + onp.square(A[1][1]) error = n0 * n1 if n0 <= thresh: # If the first column is zero, then (1,0,0) is an eigenvector Q[0][1] = 1.0 Q[1][1] = 0.0 Q[2][1] = 0.0 elif n1 <= thresh: # If the second column is zero, then (0,1,0) is an eigenvector Q[0][1] = 0.0 Q[1][1] = 1.0 Q[2][1] = 0.0 elif norm < onp.square(64.0 * DBL_EPSILON) * error: t =
onp.square(A[0][1])
numpy.square
# -*- coding: utf-8 -*- from __future__ import print_function from collections import OrderedDict import os import sys import cPickle as pickle import codecs import time import numpy as np import theano from lasagne.updates import adam from nltk.translate.bleu_score import corpus_bleu from nltk.tokenize import word_tokenize sys.path.append('./src/data') theano.config.floatX = 'float32' from data_processing import chunker from SETTINGS import * class RunTranslation(object): def __init__(self, solver, solver_kwargs, recognition_model, generative_model, valid_vocab_x, valid_vocab_y, out_dir, dataset_path_x, dataset_path_y, load_param_dir=None, restrict_max_length=None, train_prop=0.95): """ :param solver: solver class that handles sgvb training and updating :param solver_kwargs: kwargs for solver :param recognition_model: instance of the recognition model class :param generative_model: instance of the generative model class :param valid_vocab_x: valid vocabulary for x :param valid_vocab_y: valid vocabulary for y :param out_dir: path to out directory :param dataset_path_x: path to dataset of x :param dataset_path_y: path to dataset of y :param load_param_dir: path to directory of saved variables. If None, train from start :param restricted_max_length: restrict the max lengths of the sentences :param train_prop: how much of the original data should be split into training/test set """ # set all attributes self.solver = solver # solver kwargs are the following # generative_model # recognition_model # max_len_x # max_len_y # vocab_size_x # vocab_size_y # num_time_steps # gen_nn_kwargs # rec_nn_kwargs # z_dim # z_dist_gen # x_dist_gen # y_dist_gen # z_dist_rec self.solver_kwargs = solver_kwargs self.recognition_model = recognition_model self.generative_model = generative_model self.valid_vocab_x = valid_vocab_x self.valid_vocab_y = valid_vocab_y self.out_dir = out_dir self.dataset_path_x = dataset_path_x self.dataset_path_y = dataset_path_y self.load_param_dir = load_param_dir self.restrict_max_length = restrict_max_length self.train_prop = train_prop # data sets self.x_train, self.x_test, self.y_train, self.y_test, self.L_x_train, self.L_x_test, self.L_y_train, self.L_y_test = self.load_data(train_prop, restrict_max_length) print('All data sets loaded') print('#data points (train): {}, #data points (Test): {}'.format(len(self.L_x_train), len(self.L_x_test))) # Number of training and test examples # Might need to use validation dataset as well self.train_size = len(self.L_x_train) self.test_size = len(self.L_x_test) # # max_length from the actual data set and instantiate the solver self.max_length_x = np.concatenate((self.x_train, self.x_test), axis=0).shape[1] self.max_length_y = np.concatenate((self.y_train, self.y_test), axis=0).shape[1] # self.sgvb = solver(max_length=self.max_length, **self.solver_kwargs) print('Maximum length of sentence (x, y): ({}, {})'.format(self.max_length_x, self.max_length_x)) # initialise sgvb solver (Check how it is done now) self.sgvb = self.solver(max_len_x=self.max_length_x, max_len_y=self.max_length_y, **self.solver_kwargs) # if pretrained, load saved parameters of the model and set # the parameters of the recognition/generative models if load_param_dir is not None: with open(os.path.join(self.load_param_dir, 'recog_params.save'), 'rb') as f: self.sgvb.recognition_model.set_param_values(pickle.load(f)) with open(os.path.join(self.load_param_dir, 'gen_params_x.save'), 'rb') as f: self.sgvb.generative_model_x.set_param_values(pickle.load(f)) with open(os.path.join(self.load_param_dir, 'gen_params_y.save'), 'rb') as f: self.sgvb.generative_model_y.set_param_values(pickle.load(f)) with open(os.path.join(self.load_param_dir, 'all_embeddings_x.save'), 'rb') as f: self.sgvb.all_embeddings_x.set_value(pickle.load(f)) with open(os.path.join(self.load_param_dir, 'all_embeddings_y.save'), 'rb') as f: self.sgvb.all_embeddings_y.set_value(pickle.load(f)) print('Parameters loaded and set.') def load_data(self, train_prop, restrict_max_length): """Load data set to use for training and testing :param train_prop: (float) float in [0, 1] indicating proportion of train/test split :param restrict_max_length: (int) upper restriction on the max lengths of sentences""" # We load the lists from the pickle files # datasets is of the form of list of lists, # each list consist of numbers from index of the # vocabulary. So N * max(L) list of lists of int. with open(self.dataset_path_x) as f: dataset_x = pickle.load(f) with open(self.dataset_path_y) as f: dataset_y = pickle.load(f) # words are interpreted abstractly (can be chars or words) words_x = [] words_y = [] # iterate over sentences if restrict_max_length is not None: for sent_x, sent_y in zip(dataset_x, dataset_y): # filtnner out the sentences that are longer than restrict_max_length if len(sent_x) <= restrict_max_length and len(sent_y) <= restrict_max_length: words_x.append(sent_x) words_y.append(sent_y) else: words_x = dataset_x words_y = dataset_y # lengths of all of the words in source and target dataset L_x = np.array([len(sent_x) for sent_x in words_x]) L_y = np.array([len(sent_y) for sent_y in words_y]) # Numpy broadcasting to create a mask N * max(L) # the mask is such that it is True when the index # has a valid character, False when the original sentence # is done (When we have gone into the padding) pad_x = L_x[:, None] > np.arange(max(L_x)) pad_y = L_y[:, None] > np.arange(max(L_y)) # padd the sentences with zeros after they have ended words_to_return_x = np.full(pad_x.shape, 0, dtype='int') words_to_return_x[pad_x] = np.concatenate(words_x) words_to_return_y =
np.full(pad_y.shape, 0, dtype='int')
numpy.full
import numpy as np class HMC(): def __init__(self, log_prob, grad_log_prob, invmetric_diag=None): self.log_prob, self.grad_log_prob = log_prob, grad_log_prob self.V = lambda x : self.log_prob(x)*-1. #self.V_g = lambda x : self.grad_log_prob(x)*-1. self.leapcount, self.Vgcount, self.Hcount = 0, 0, 0 if invmetric_diag is None: self.invmetric_diag = 1. else: self.invmetric_diag = invmetric_diag self.metricstd = self.invmetric_diag**-0.5 self.KE = lambda p: 0.5*(p**2 * self.invmetric_diag).sum() self.KE_g = lambda p: p * self.invmetric_diag def V_g(self, x): self.Vgcount += 1 return self.grad_log_prob(x)*-1. def unit_norm_KE(self, p): return 0.5 * (p**2).sum() def unit_norm_KE_g(self, p): return p def H(self, q,p): self.Hcount += 1 return self.V(q) + self.KE(p) def leapfrog(self, q, p, N, step_size): self.leapcount += 1 q0, p0 = q, p try: p = p - 0.5*step_size * self.V_g(q) for i in range(N-1): q = q + step_size * self.KE_g(p) p = p - step_size * self.V_g(q) q = q + step_size * self.KE_g(p) p = p - 0.5*step_size * self.V_g(q) return q, p except Exception as e: print(e) return q0, p0 def leapfrog1(self, q, p, step_size, Vgq=None): #This needs to be optimized to not estimate V_g again and again self.leapcount += 1 q0, p0 = q, p try: if Vgq is None: Vgq = self.V_g(q) p = p - 0.5*step_size * Vgq q = q + step_size * self.KE_g(p) p = p - 0.5*step_size * self.V_g(q) return q, p, Vgq except Exception as e: print(e) return q0, p0, Vgq def metropolis(self, qp0, qp1): q0, p0 = qp0 q1, p1 = qp1 H0 = self.H(q0, p0) H1 = self.H(q1, p1) prob = np.exp(H0 - H1) #prob = min(1., np.exp(H0 - H1)) if np.isnan(prob) or np.isinf(prob) or (q0-q1).sum()==0: return q0, p0, 2., [H0, H1] elif np.random.uniform(0., 1., size=1) > min(1., prob): return q0, p0, 0., [H0, H1] else: return q1, p1, 1., [H0, H1] def hmc_step(self, q, N, step_size): '''Single hmc iteration Parameters: ---------- q: initial position N: number of leapfrog steps step_size: step size for leapfrog iteration Returns: -------- A tuple of- q p accepted (0/1/2) acceptance probability list of [Hcounts, Vcounts, nleapfrogs] ''' self.leapcount, self.Vgcount, self.Hcount = 0, 0, 0 p = np.random.normal(size=q.size).reshape(q.shape) * self.metricstd q1, p1 = self.leapfrog(q, p, N, step_size) q, p, accepted, prob = self.metropolis([q, p], [q1, p1]) return q, p, accepted, prob, [self.Hcount, self.Vgcount, self.leapcount] ###################### class AdHMC_eps0(HMC): def __init__(self, log_prob, grad_log_prob, invmetric_diag=None): super().__init__(log_prob, grad_log_prob, invmetric_diag) def get_stepsize(self, q0, p0, smin=0.01, smax=1.0, ntry=20, logspace=True, nsteps=1, eps=None): H0 = self.H(q0, p0) Hs = np.zeros(ntry) if logspace: steps = np.logspace(np.log10(smin), np.log10(smax), ntry) else: steps = np.linspace(smin, smax, ntry) pwts = steps.copy()**0.5 #np.linspace(0.9, 1.1, steps.size) for iss, ss in enumerate(steps): #nsteps = int(steps.max()/ss)+1 q1, p1 = self.leapfrog(q0, p0, nsteps, ss) Hs[iss] = self.H(q1, p1) pp = np.exp(H0 - Hs) * pwts pp[np.isnan(pp)] = 0 pp[np.isinf(pp)] = 0 pp /= pp.sum() cdf = np.cumsum(pp) if eps is None: sx = np.random.uniform(low=cdf.min()) isx = np.where(sx > cdf)[0][-1] sx2 = np.random.uniform(steps[isx], steps[isx+1]) prob = pp[isx+1] # * 1/(steps[isx+1]-steps[isx+1]) return sx2, pp[isx+1] else: prob = pp[np.where(steps > eps)[0][0]] return prob def hmc_step(self, q0, Nleap, smin=0.01, smax=1.0, Tint=0, ntry=10, nsteps=1): '''Single hmc iteration Parameters: ---------- q: initial position N: number of leapfrog steps step_size: step size for leapfrog iteration smin: Minimum allowed step size smin: Maximum allowed step size Tint: Time of integration ntry: Number of points to try for estimating first step size nsteps: Number of steps per try for estimating first step size Returns: -------- A tuple of- q p accepted (0/1/2) acceptance probability array of [pfactor denominator, pfactor numberator, stepsize] list of [Hcounts, Vcounts, nleapfrogs] ''' self.leapcount, self.Vgcount, self.Hcount = 0, 0, 0 p0 = np.random.normal(size=q0.size).reshape(q0.shape) * self.metricstd H0 = self.H(q0, p0) if (Tint == 0) and (Nleap == 0): print("Tint and Nleap cannot be both zeros") import sys sys.exit() elif (Tint != 0) and (Nleap != 0): print("Tint and Nleap both given and are inconsistent") import sys sys.exit() #First step is drawn from a distribution ss, pf_den = self.get_stepsize(q0, p0, smin, smax, ntry=ntry, nsteps=nsteps) eps = ss if Tint == 0: N = Nleap else: N = int(Tint/eps) + 1 #print("Steps size is %0.2f, and number of steps is %d"%(eps, N)) q1, p1 = self.leapfrog(q0, p0, N, ss) H1 = self.H(q1, p1) pb_num = self.get_stepsize(q1, -p1, smin=smin, smax=smax, eps=ss, ntry=ntry, nsteps=nsteps) hastings_factor = pb_num/pf_den prob = np.exp(H0 - H1) * hastings_factor #print("prb, fac, metrop : ", prob, adfac, prob/adfac, pb_num, pf_den) toret = [[prob, prob/hastings_factor, hastings_factor], np.stack([pf_den, pb_num, eps]), [self.Hcount, self.Vgcount, self.leapcount]] if
np.isnan(prob)
numpy.isnan
#!/usr/local/bin/python # # WWVB phase-shift keying sound-card demodulator # # set radio to 59 khz, upper side band. # radio must be within 10 hz of correct frequency. # # my setup uses a Z10024A low-pass filter to keep out AM broadcast. # an outdoor dipole or indoor W1VLF antenna work well. # # <NAME>, AB1HL # import numpy import wave import weakaudio import weakcat import weakutil import weakargs import weakaudio import scipy import scipy.signal import sys import os import math import time import calendar import subprocess import argparse # a[] and b[] are -1/0/1 bit sequences. # in how many bits are they identical? def bitmatch(a, b): n = 0 i = 0 while i < len(a) and i < len(b): if a[i] != 0 and b[i] != 0 and a[i] == b[i]: n += 1 i += 1 return n # invert a -1/1 bit sequence. def invert(a): b = a[:] for i in range(0, len(b)): b[i] *= -1 return b # part of wwvb checksum work. # tm[] is 0/1 array of the 26 time bits. # a[] is the array of indices of bits to xor. def xsum(tm, a): z = 0 for i in range(0, len(a)): b = tm[a[i]] z ^= b if z == 0: return -1 else: return 1 # http://gordoncluster.wordpress.com/2014/02/13/python-numpy-how-to-generate-moving-averages-efficiently-part-2/ def smooth(values, window): weights = numpy.repeat(1.0, window)/window sma = numpy.convolve(values, weights, 'valid') return sma class WWVB: center = 1000 # 60 khz shifted to here in audio filterwidth = 20 # bandpass filter width in hertz searchhz = 10 # only look for WWVB at +/- searchhz # set these to True in order to search for the signal in # time and frequency. set them to False if the PC clock is # correct, the radio frequency is accurate, and the goal # is to measure reception quality rather than to learn the time. searchtime = True searchfreq = True debug = False filter = None c2filter = None c3filter = None samples = numpy.array([0]) offset = 0 flywheel = 0 flyfreq = None # carrier frequency of last good ecc # remember all the good CRC offset/minute pairs, # to try to guess the most likely correct time for # any given minute with a bad CRC. timepairs = numpy.zeros([0,2]) # each is [ offset, minute ] def __init__(self): pass def openwav(self, filename): self.wav = wave.open(filename) self.wav_channels = self.wav.getnchannels() self.wav_width = self.wav.getsampwidth() self.rate = self.wav.getframerate() # for guess1() / weakutil.freq_from_fft(). weakutil.init_freq_from_fft(59 * self.rate) weakutil.fft_sizes([ 59 * self.rate ]) def readwav(self, chan): z = self.wav.readframes(1024) if self.wav_width == 1: zz = numpy.fromstring(z, numpy.int8) elif self.wav_width == 2: if (len(z) % 2) == 1: return numpy.array([]) zz = numpy.fromstring(z, numpy.int16) else: sys.stderr.write("oops wave_width %d" % (self.wav_width)) sys.exit(1) if self.wav_channels == 1: return zz elif self.wav_channels == 2: return zz[chan::2] # chan 0/1 => left/right else: sys.stderr.write("oops wav_channels %d" % (self.wav_channels)) sys.exit(1) def gowav(self, filename, chan): self.openwav(filename) while True: buf = self.readwav(chan) if buf.size < 1: break self.gotsamples(buf, 0) while self.process(False): pass self.process(True) def opencard(self, desc): self.rate = 8000 self.audio = weakaudio.new(desc, self.rate) # for guess1() / weakutil.freq_from_fft(). weakutil.init_freq_from_fft(59 * self.rate) weakutil.fft_sizes([ 59 * self.rate ]) def gocard(self): while True: [ buf, buf_time ] = self.audio.read() if len(buf) > 0: mx = numpy.max(numpy.abs(buf)) if mx > 30000: sys.stderr.write("!") self.gotsamples(buf, buf_time) while self.process(False): pass else: time.sleep(0.2) def gotsamples(self, buf, time_of_last): # the band-pass filter. if self.filter == None: self.filter = weakutil.butter_bandpass(self.center - self.filterwidth/2, self.center + self.filterwidth/2, self.rate, 3) self.zi = scipy.signal.lfiltic(self.filter[0], self.filter[1], [0]) zi = scipy.signal.lfilter(self.filter[0], self.filter[1], buf, zi=self.zi) self.samples = numpy.concatenate((self.samples, zi[0])) self.zi = zi[1] # remember time of self.sample[0] # XXX off by filter delay self.samples_time = time_of_last - len(self.samples) / float(self.rate) def guess1(self, a, center, width): fx = weakutil.freq_from_fft(a, self.rate, center - width/2, center + width/2) return fx # guess the frequency of the WWVB carrier. # only looks +/- 10 hz. def guess(self): # apply FFT to abs(samples) then divide by two, # since psk has no energy at "carrier". sa = numpy.abs(self.samples) n = 0 fx = 0 sz = 59*self.rate while (n+1)*sz <= len(sa) and (n+1)*sz <= 60*self.rate: xx = self.guess1(sa[n*sz:(n+1)*sz], 2*self.center, self.searchhz * 2.0) fx += xx n += 1 fx /= n return fx / 2.0 # guess what the minute must be for a given sample offset, # based on past decoded minutes and their sample offsets. def guessminute(self, offset): if len(self.timepairs) < 1: return -1 offsets = self.timepairs[:,0] minutes = self.timepairs[:,1] xx = numpy.subtract(offset, offsets) xx = numpy.divide(xx, self.rate * 60.0) guesses =
numpy.add(minutes, xx)
numpy.add
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ @author: <NAME> """ import numpy as np import os import warnings warnings.filterwarnings("ignore") import scipy.io import random random.seed(2) from sklearn.metrics import confusion_matrix import csv import pickle from numpy import load import math import numpy as np import pandas as pd pd.options.display.max_columns = None pd.options.display.max_rows = None import os from sklearn import preprocessing from sklearn.linear_model import LogisticRegressionCV,LogisticRegression from sklearn.svm import LinearSVC from sklearn.model_selection import GridSearchCV from sklearn.ensemble import RandomForestClassifier import lightgbm as lgb from scipy import stats import statsmodels.api as sm from sklearn.linear_model import LinearRegression # import pylab as pl import json import copy # Functions ###################=================---------------------- map_folder='./Project/features/att_map_esm/' label_folder='./Project/lables/dist_map/' def Att_map_Data_Generation(att_map,label_map): att_map=att_map.transpose(0,2,3,1)[0,:,:,:] att_map=att_map[1:-1,1:-1,:] Data_Att_map_X = [] Data_Att_map_Y = [] for i in range(att_map.shape[0]): for j in range(i+1,att_map.shape[1]): P1 = list(att_map[i,j,:]) Data_Att_map_X.append(P1) Data_Att_map_Y.append(label_map[i,j]) return np.array(Data_Att_map_X),np.array(Data_Att_map_Y) def my_train(X_train, y_train, model='LR', penalty='l1', cv=5, scoring='f1', class_weight= 'balanced',seed=2020): if model=='SVM': svc=LinearSVC(penalty=penalty, class_weight= class_weight, dual=False, max_iter=10000)#, tol=0.0001 parameters = {'C':[0.001,0.01,0.1,1,10,100,1000]} #'kernel':('linear', 'rbf'), gsearch = GridSearchCV(svc, parameters, cv=cv, scoring=scoring) elif model=='LGB': param_grid = { 'num_leaves': range(2,15,4), 'n_estimators': [50,100,500,1000], 'colsample_bytree': [ 0.1,0.5, 0.9]#[0.6, 0.75, 0.9] } lgb_estimator = lgb.LGBMClassifier(boosting_type='gbdt', objective='binary', learning_rate=0.1, random_state=seed)# eval_metric='auc' num_boost_round=2000, gsearch = GridSearchCV(estimator=lgb_estimator, param_grid=param_grid, cv=cv,n_jobs=-1, scoring=scoring) elif model=='RF': rfc=RandomForestClassifier(random_state=seed, class_weight= class_weight, n_jobs=-1) param_grid = { 'max_features':[0.05,0.3, 0.5, 0.7, 1],#, 0.4, 0.5, 0.6, 0.7, 0.8 [ 'sqrt', 'log2',15],#'auto' 1.0/3, 'n_estimators': [50, 100,500,1000], 'max_depth' : range(2,10,1)#[2, 10] } gsearch = GridSearchCV(estimator=rfc, param_grid=param_grid, cv=cv, scoring=scoring) else: LR = LogisticRegression(penalty=penalty, class_weight= class_weight,solver='liblinear', random_state=seed) parameters = {'C':[0.01,0.1,1,10,100,1000] } gsearch = GridSearchCV(LR, parameters, cv=cv, scoring=scoring) # clf = LogisticRegressionCV(Cs=[10**-1,10**0, 10], penalty=penalty, class_weight= class_weight,solver='liblinear', cv=cv, scoring=scoring, random_state=seed)#, tol=0.01 gsearch.fit(X_train, y_train) clf=gsearch.best_estimator_ if model=='LGB' or model=='RF': print('Best parameters found by grid search are:', gsearch.best_params_) print('train set accuracy:', gsearch.best_score_) return clf def Prediction_Func(Test_X,reg,TOP_Num): att_map=Test_X.transpose(0,2,3,1)[0,:,:,:] att_map=att_map[1:-1,1:-1,:] att_map = att_map[:,:,(660-TOP_Num):] Predicted_Att_map = np.ones((att_map.shape[0],att_map.shape[1]))*100 for i in range(att_map.shape[0]): for j in range(i+1,att_map.shape[1]): P1= reg.predict(np.array(att_map[i,j,:]).reshape(1, -1)) Predicted_Att_map[i,j] = Predicted_Att_map[j,i] = P1 return Predicted_Att_map def score(distance_mat, predicted_mat): L = len(predicted_mat) results = [] for seq_sep in [5, 24]: for num_top in [5, 10, int(L/5), int(L/2), int(L)]: assert len(distance_mat) == len(predicted_mat) #L = len(predicted_mat) indices_upper_tri = np.triu_indices(L, seq_sep) df_data = pd.DataFrame() df_data['residue_i'] = indices_upper_tri[0] + 1 df_data['residue_j'] = indices_upper_tri[1] + 1 df_data['confidence'] = predicted_mat[indices_upper_tri] df_data['distance'] = distance_mat[indices_upper_tri] df_data['contact'] = ((df_data.distance < 8) * 1).tolist() df_data.sort_values(by='confidence', ascending=False, inplace=True) sub_true = (df_data.query('distance > 0').head(num_top).query('contact > 0')).shape[0] sub_false = (df_data.query('distance > 0').head(num_top).query('contact < 1')).shape[0] precision = 100 * sub_true / (sub_true + sub_false) results.append([seq_sep, num_top, precision]) df_results = pd.DataFrame(data = results, columns = ['seq_sep', 'num_top', 'precision']) return df_results # Model Training ###################=================---------------------- Training_Flag = 1 TOP_Num = 20 # Top 20 # load array Layer_SCORES =
load('Heads_Scores.npy')
numpy.load
import cv2 import numpy as np import dlib from image_utils import draw_triangle, draw_square, draw_points, draw_triangles def get_faces(img_gray): detector = dlib.get_frontal_face_detector() faces = detector(img_gray) if len(faces) == 0: print("No faces found") quit() return faces def get_landmarks_points(img_gray, face): # predictor_path = 'models/shape_predictor_68_face_landmarks.dat' predictor_path = 'models/shape_predictor_81_face_landmarks.dat' predictor = dlib.shape_predictor(predictor_path) landmarks = predictor(img_gray, face) landmarks_points = [] for n in range(0, landmarks.num_parts): x = landmarks.part(n).x y = landmarks.part(n).y landmarks_points.append((x, y)) return landmarks_points def get_faceImg_and_mask(img, img_gray, convexhull): mask = np.zeros_like(img_gray) # The pixels of the face are white, rest of the image is black cv2.fillConvexPoly(mask, convexhull, 255) # Only colored face (applied mask) face_image_1 = cv2.bitwise_and(img, img, mask=mask) return face_image_1, mask def extract_index_nparray(nparray): index = None for num in nparray[0]: index = num break return index def get_delaunay_triangulation(landmarks_points, convexhull): points = np.array(landmarks_points, np.int32) # Delaunay triangulation rect = cv2.boundingRect(convexhull) subdiv = cv2.Subdiv2D(rect) subdiv.insert(landmarks_points) triangles = subdiv.getTriangleList() triangles = np.array(triangles, dtype=np.int32) indexes_triangles = [] for t in triangles: pt1 = (t[0], t[1]) pt2 = (t[2], t[3]) pt3 = (t[4], t[5]) index_pt1 = np.where((points == pt1).all(axis=1)) index_pt1 = extract_index_nparray(index_pt1) index_pt2 = np.where((points == pt2).all(axis=1)) index_pt2 = extract_index_nparray(index_pt2) index_pt3 = np.where((points == pt3).all(axis=1)) index_pt3 = extract_index_nparray(index_pt3) if index_pt1 is not None and index_pt2 is not None and index_pt3 is not None: triangle = [index_pt1, index_pt2, index_pt3] indexes_triangles.append(triangle) return indexes_triangles def get_new_face(img, img2, landmarks_points, landmarks_points2, indexes_triangles): img_new_face = np.zeros(img2.shape, np.uint8) # Triangulation of both faces for triangle_index in indexes_triangles: # Triangulation of the first face tr1_pt1 = landmarks_points[triangle_index[0]] tr1_pt2 = landmarks_points[triangle_index[1]] tr1_pt3 = landmarks_points[triangle_index[2]] triangle1 = np.array([tr1_pt1, tr1_pt2, tr1_pt3], np.int32) rect1 = cv2.boundingRect(triangle1) (x, y, w, h) = rect1 cropped_triangle = img[y: y + h, x: x + w] cropped_tr1_mask = np.zeros((h, w), np.uint8) points = np.array([[tr1_pt1[0] - x, tr1_pt1[1] - y], [tr1_pt2[0] - x, tr1_pt2[1] - y], [tr1_pt3[0] - x, tr1_pt3[1] - y]], np.int32) cv2.fillConvexPoly(cropped_tr1_mask, points, 255) # Triangulation of second face tr2_pt1 = landmarks_points2[triangle_index[0]] tr2_pt2 = landmarks_points2[triangle_index[1]] tr2_pt3 = landmarks_points2[triangle_index[2]] triangle2 = np.array([tr2_pt1, tr2_pt2, tr2_pt3], np.int32) rect2 = cv2.boundingRect(triangle2) (x, y, w, h) = rect2 cropped_tr2_mask = np.zeros((h, w), np.uint8) points2 = np.array([[tr2_pt1[0] - x, tr2_pt1[1] - y], [tr2_pt2[0] - x, tr2_pt2[1] - y], [tr2_pt3[0] - x, tr2_pt3[1] - y]], np.int32) cv2.fillConvexPoly(cropped_tr2_mask, points2, 255) # Warp triangles points = np.float32(points) points2 = np.float32(points2) M = cv2.getAffineTransform(points, points2) warped_triangle = cv2.warpAffine(cropped_triangle, M, (w, h)) warped_triangle = cv2.bitwise_and(warped_triangle, warped_triangle, mask=cropped_tr2_mask) # Reconstructing destination face img_new_face_rect_area = img_new_face[y: y + h, x: x + w] img_new_face_rect_area_gray = cv2.cvtColor(img_new_face_rect_area, cv2.COLOR_BGR2GRAY) _, mask_triangles_designed = cv2.threshold(img_new_face_rect_area_gray, 1, 255, cv2.THRESH_BINARY_INV) warped_triangle = cv2.bitwise_and(warped_triangle, warped_triangle, mask=mask_triangles_designed) img_new_face_rect_area = cv2.add(img_new_face_rect_area, warped_triangle) img_new_face[y: y + h, x: x + w] = img_new_face_rect_area return img_new_face def change_face(img, convexhull, new_face): # Face swapped (putting new_face into convexhull) img_face_mask = np.zeros_like(img[:, :, 0]) img_head_mask = cv2.fillConvexPoly(img_face_mask, convexhull, 255) img_face_mask = cv2.bitwise_not(img_head_mask) img2_head_noface = cv2.bitwise_and(img, img, mask=img_face_mask) result = cv2.add(img2_head_noface, new_face) # Applying color filter (x, y, w, h) = cv2.boundingRect(convexhull) center_face2 = (int((x + x + w) / 2), int((y + y + h) / 2)) seamlessclone = cv2.seamlessClone(result, img, img_head_mask, center_face2, cv2.NORMAL_CLONE) return seamlessclone def swap_faces(img, img2=None): img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) if img2 is not None: img_gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY) face = get_faces(img_gray)[0] face2 = get_faces(img_gray2)[0] landmarks_points = get_landmarks_points(img_gray, face) landmarks_points2 = get_landmarks_points(img_gray2, face2) convexhull = cv2.convexHull(np.array(landmarks_points, np.int32)) convexhull2 = cv2.convexHull(np.array(landmarks_points2, np.int32)) indexes_triangles = get_delaunay_triangulation(landmarks_points, convexhull) indexes_triangles2 = get_delaunay_triangulation(landmarks_points2, convexhull2) img2_new_face = get_new_face(img, img2, landmarks_points, landmarks_points2, indexes_triangles) img_new_face = get_new_face(img2, img, landmarks_points2, landmarks_points, indexes_triangles2) img2_changed_face = change_face(img2, convexhull2, img2_new_face) img_changed_face = change_face(img, convexhull, img_new_face) return img_changed_face, img2_changed_face else: faces = get_faces(img_gray) face = faces[0] face2 = faces[1] landmarks_points = get_landmarks_points(img_gray, face) landmarks_points2 = get_landmarks_points(img_gray, face2) convexhull = cv2.convexHull(np.array(landmarks_points, np.int32)) convexhull2 = cv2.convexHull(np.array(landmarks_points2, np.int32)) indexes_triangles = get_delaunay_triangulation(landmarks_points, convexhull) indexes_triangles2 = get_delaunay_triangulation(landmarks_points2, convexhull2) img_new_face = get_new_face(img, img, landmarks_points2, landmarks_points, indexes_triangles2) img2_new_face = get_new_face(img, img, landmarks_points, landmarks_points2, indexes_triangles) img_changed_face1 = change_face(img, convexhull2, img2_new_face) img_changed_faces = change_face(img_changed_face1, convexhull, img_new_face) return img_changed_faces def get_process(img, img2): img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) img_gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY) face = get_faces(img_gray)[0] face2 = get_faces(img_gray2)[0] landmarks_points = get_landmarks_points(img_gray, face) landmarks_points2 = get_landmarks_points(img_gray2, face2) convexhull = cv2.convexHull(np.array(landmarks_points, np.int32)) convexhull2 = cv2.convexHull(np.array(landmarks_points2, np.int32)) indexes_triangles = get_delaunay_triangulation(landmarks_points, convexhull) indexes_triangles2 = get_delaunay_triangulation(landmarks_points2, convexhull2) img_new_face = get_new_face(img2, img, landmarks_points2, landmarks_points, indexes_triangles2) img2_new_face = get_new_face(img, img2, landmarks_points, landmarks_points2, indexes_triangles) img_changed_face_filtered = change_face(img, convexhull, img_new_face) img2_changed_face_filtered = change_face(img2, convexhull2, img2_new_face) color = (0, 255, 0) thickness = 2 # RESULTS 1 img_face = draw_square(img.copy(), [(face.left(), face.top()), (face.right(), face.bottom())], color=color, thickness=thickness) img_landmarks = draw_points(img.copy(), landmarks_points, color=color, thickness=thickness) img_face_contour = cv2.polylines(img.copy(), [convexhull], True, color, thickness) face_image, face_mask = get_faceImg_and_mask(img, img_gray, convexhull) # White mask for showing results white_mask = cv2.fillConvexPoly(np.ones_like(img) * 255, convexhull, 0) face_image = white_mask + face_image triangles_points = get_triangles_points(landmarks_points, indexes_triangles) face_triangles = draw_triangles(face_image.copy(), triangles_points, color=color, thickness=thickness) # Face swap without applying filter img_face_mask = np.zeros_like(img[:, :, 0]) img_head_mask = cv2.fillConvexPoly(img_face_mask, convexhull, 255) img_face_mask = cv2.bitwise_not(img_head_mask) img_head_noface = cv2.bitwise_and(img, img, mask=img_face_mask) img_changed_face = cv2.add(img_head_noface, img_new_face) # White mask for showing results img_face_mask_white = np.ones_like(img[:, :, :])*255 img_head_mask_white = cv2.fillConvexPoly(img_face_mask_white, convexhull, 0) # RESULTS 2 img_face2 = draw_square(img2.copy(), [(face2.left(), face2.top()), (face2.right(), face2.bottom())], color=color, thickness=thickness) img_landmarks2 = draw_points(img2.copy(), landmarks_points2, color=color, thickness=thickness) img_face_contour2 = cv2.polylines(img2.copy(), [convexhull2], True, color, thickness) face_image2, face_mask2 = get_faceImg_and_mask(img2, img_gray2, convexhull2) # White mask for showing results white_mask2 = cv2.fillConvexPoly(np.ones_like(img2) * 255, convexhull2, 0) face_image2 = white_mask2 + face_image2 triangles_points2 = get_triangles_points(landmarks_points2, indexes_triangles2) face_triangles2 = draw_triangles(face_image2.copy(), triangles_points2, color=color, thickness=thickness) # Face swap without applying filter img2_face_mask = np.zeros_like(img2[:, :, 0]) img2_head_mask = cv2.fillConvexPoly(img2_face_mask, convexhull2, 255) img2_face_mask = cv2.bitwise_not(img2_head_mask) img2_head_noface = cv2.bitwise_and(img2, img2, mask=img2_face_mask) img2_changed_face = cv2.add(img2_head_noface, img2_new_face) # White mask for showing results img2_face_mask_white = np.ones_like(img2[:, :, :]) * 255 img2_head_mask_white = cv2.fillConvexPoly(img2_face_mask_white, convexhull2, 0) result = [ (img, img_face, img_landmarks, img_face_contour, face_image, face_triangles), (img2, img_face2, img_landmarks2, img_face_contour2, face_image2, face_triangles2), (img_new_face + img_head_mask_white, img_changed_face, img_changed_face_filtered), (img2_new_face + img2_head_mask_white, img2_changed_face, img2_changed_face_filtered) ] return result def get_triangles_points(points, indexes): result_points = [] for index in indexes: tr2_pt1 = points[index[0]] tr2_pt2 = points[index[1]] tr2_pt3 = points[index[2]] result_points.append([tr2_pt1, tr2_pt2, tr2_pt3]) return result_points def get_triangles_img(img, img2): img_gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY) img_gray2 = cv2.cvtColor(img2, cv2.COLOR_BGR2GRAY) face = get_faces(img_gray)[0] face2 = get_faces(img_gray2)[0] landmarks_points = get_landmarks_points(img_gray, face) landmarks_points2 = get_landmarks_points(img_gray2, face2) convexhull = cv2.convexHull(np.array(landmarks_points, np.int32)) convexhull2 = cv2.convexHull(np.array(landmarks_points2, np.int32)) indexes_triangles = get_delaunay_triangulation(landmarks_points, convexhull) img_new_face = np.zeros(img2.shape, np.uint8) # Triangulation of both faces for triangle_index in indexes_triangles: # Triangulation of the first face tr1_pt1 = landmarks_points[triangle_index[0]] tr1_pt2 = landmarks_points[triangle_index[1]] tr1_pt3 = landmarks_points[triangle_index[2]] triangle1 = np.array([tr1_pt1, tr1_pt2, tr1_pt3], np.int32) rect1 = cv2.boundingRect(triangle1) (x, y, w, h) = rect1 cropped_triangle = img[y: y + h, x: x + w] cropped_tr1_mask = np.zeros((h, w), np.uint8) points = np.array([[tr1_pt1[0] - x, tr1_pt1[1] - y], [tr1_pt2[0] - x, tr1_pt2[1] - y], [tr1_pt3[0] - x, tr1_pt3[1] - y]], np.int32) cv2.fillConvexPoly(cropped_tr1_mask, points, 255) # Triangulation of second face tr2_pt1 = landmarks_points2[triangle_index[0]] tr2_pt2 = landmarks_points2[triangle_index[1]] tr2_pt3 = landmarks_points2[triangle_index[2]] triangle2 = np.array([tr2_pt1, tr2_pt2, tr2_pt3], np.int32) rect2 = cv2.boundingRect(triangle2) (x, y, w, h) = rect2 cropped_tr2_mask = np.zeros((h, w), np.uint8) points2 = np.array([[tr2_pt1[0] - x, tr2_pt1[1] - y], [tr2_pt2[0] - x, tr2_pt2[1] - y], [tr2_pt3[0] - x, tr2_pt3[1] - y]], np.int32) cv2.fillConvexPoly(cropped_tr2_mask, points2, 255) # Warp triangles points = np.float32(points) points2 =
np.float32(points2)
numpy.float32
from datetime import date as dt import numpy as np import pandas as pd import pytest import talib import os from finance_tools_py.simulation import Simulation from finance_tools_py.simulation.callbacks import talib as cb_talib from finance_tools_py.simulation import callbacks @pytest.fixture def init_global_data(): pytest.global_code = '000001' pytest.global_data = pd.DataFrame({ 'code': [pytest.global_code for x in range(1998, 2020)], 'date': [dt(y, 1, 1) for y in range(1998, 2020)], 'close': np.random.random((len(list(range(1998, 2020))), )), 'high': np.random.random((len(list(range(1998, 2020))), )), 'low': np.random.random((len(list(range(1998, 2020))), )), }) @pytest.fixture def mock_data(): pytest.mock_code = '600036' if "TRAVIS" in os.environ and os.environ["TRAVIS"] == "true": pytest.mock_data = pd.read_csv('tests/data/600036.csv', index_col=None) else: pytest.mock_data = pd.read_csv('data/600036.csv', index_col=None) pytest.mock_data['date'] = pd.to_datetime(pytest.mock_data['date']) def _mock_data(size): return pd.DataFrame({ 'close': np.random.random((len(list(range(size))), )), 'high': np.random.random((len(list(range(size))), )), 'low': np.random.random((len(list(range(size))), )), }) @pytest.mark.skipif( "TRAVIS" in os.environ and os.environ["TRAVIS"] == "true", reason="Skipping this test on Travis CI. This is an example.") def test_example_BBANDS(): print('>>> from finance_tools_py.simulation.callbacks.talib import BBANDS') print('>>> from finance_tools_py.simulation import Simulation') from finance_tools_py.simulation.callbacks.talib import BBANDS print(">>> data = pd.DataFrame({'close': [y for y in range(0, 8)]})") data = pd.DataFrame({'close': [y for y in np.arange(0.0, 8.0)]}) print(">>> print(data['close'].values)") print(data['close'].values) t = 3 u = 2.4 d = 2.7 print('>>> t = {}'.format(t)) print('>>> u = {}'.format(u)) print('>>> d = {}'.format(d)) print(">>> s = Simulation(data,'',callbacks=[BBANDS(t, u, d)])") print('>>> s.simulate()') s = Simulation(data, '', callbacks=[BBANDS(t, u, d)]) s.simulate() print(">>> cols = [col for col in data.columns if 'bbands' in col]") cols = [col for col in s.data.columns if 'bbands' in col] print(">>> for col in cols:") print(">>> print('{}:{}'.format(col,np.round(s.data[col].values,2)))") for col in cols: print('{}:{}'.format(col, np.round(s.data[col].values, 2))) @pytest.mark.skipif( "TRAVIS" in os.environ and os.environ["TRAVIS"] == "true", reason="Skipping this test on Travis CI. This is an example.") def test_example_WILLR(init_global_data): print('>>> from finance_tools_py.simulation.callbacks.talib import WILLR') print('>>> from finance_tools_py.simulation import Simulation') from finance_tools_py.simulation.callbacks.talib import WILLR print(">>> data = pd.DataFrame({'close': [y for y in range(0, 8)],\n\ 'high': [y for y in range(0.1, 8.2)],\n\ 'low': [y for y in range(0.2, 8.2)]})") data = pd.DataFrame({ 'close': [y for y in np.arange(0.0, 8.0)], 'high': [y for y in np.arange(0.1, 8.1)], 'low': [y for y in np.arange(0.2, 8.2)] }) print(">>> print(data)") print(data) t = 3 print('>>> t={}'.format(t)) print(">>> s = Simulation(data,'',callbacks=[WILLR(t)])") print('>>> s.simulate()') s = Simulation(data, '', callbacks=[WILLR(t)]) s.simulate() print(">>> cols = [col for col in data.columns if 'willr' in col]") cols = [col for col in s.data.columns if 'willr' in col] print(">>> for col in cols:") print(">>> print('{}:{}'.format(col,np.round(s.data[col].values,2)))") for col in cols: print('{}:{}'.format(col, np.round(s.data[col].values, 2))) @pytest.mark.skipif( "TRAVIS" in os.environ and os.environ["TRAVIS"] == "true", reason="Skipping this test on Travis CI. This is an example.") def test_example_CCI(init_global_data): print('>>> from finance_tools_py.simulation.callbacks.talib import CCI') print('>>> from finance_tools_py.simulation import Simulation') from finance_tools_py.simulation.callbacks.talib import CCI print(">>> data = pd.DataFrame({'close': [y for y in range(5.0, 10.0)],\n\ 'high': [y for y in range(10.1,15.0)],\n\ 'low': [y for y in range(0.0, 4.9)]})") data = pd.DataFrame({ 'close': [y for y in np.arange(5.0, 10.0)], 'high': [y for y in np.arange(10.1, 15.0)], 'low': [y for y in np.arange(0.0, 4.9)] }) print(">>> print(data)") print(data) t = 3 print('>>> t = {}'.format(t)) print(">>> s = Simulation(data,'',callbacks=[CCI(t)])") print('>>> s.simulate()') s = Simulation(data, '', callbacks=[CCI(t)]) s.simulate() print(">>> cols = [col for col in data.columns if 'cci' in col]") cols = [col for col in s.data.columns if 'cci' in col] print(">>> for col in cols:") print(">>> print('{}:{}'.format(col,np.round(s.data[col].values,2)))") for col in cols: print('{}:{}'.format(col, np.round(s.data[col].values, 2))) @pytest.mark.skipif( "TRAVIS" in os.environ and os.environ["TRAVIS"] == "true", reason="Skipping this test on Travis CI. This is an example.") def test_example_ATR(init_global_data): print('>>> from finance_tools_py.simulation.callbacks.talib import ATR') print('>>> from finance_tools_py.simulation import Simulation') from finance_tools_py.simulation.callbacks.talib import ATR print(">>> data = pd.DataFrame({'close': [y for y in range(5.0, 10.0)],\n\ 'high': [y for y in range(10.1,15.0)],\n\ 'low': [y for y in range(0.0, 4.9)]})") data = pd.DataFrame({ 'close': [y for y in np.arange(5.0, 10.0)], 'high': [y for y in np.arange(10.1, 15.0)], 'low': [y for y in np.arange(0.0, 4.9)] }) print(">>> print(data)") print(data) t = 3 print('>>> t = {}'.format(t)) print(">>> s = Simulation(data,'',callbacks=[ATR(t)])") print('>>> s.simulate()') s = Simulation(data, '', callbacks=[ATR(t)]) s.simulate() print(">>> cols = [col for col in data.columns if 'atr' in col]") cols = [col for col in s.data.columns if 'atr' in col] print(">>> for col in cols:") print(">>> print('{}:{}'.format(col,np.round(s.data[col].values,2)))") for col in cols: print('{}:{}'.format(col, np.round(s.data[col].values, 2))) @pytest.mark.skipif( "TRAVIS" in os.environ and os.environ["TRAVIS"] == "true", reason="Skipping this test on Travis CI. This is an example.") def test_example_LINEARREG_SLOPE(init_global_data): print('>>> from finance_tools_py.simulation.callbacks.talib import ATR') print('>>> from finance_tools_py.simulation import Simulation') from finance_tools_py.simulation.callbacks.talib import LINEARREG_SLOPE print(">>> data = pd.DataFrame({'close': [y for y in range(5.0, 10.0)]})") data = pd.DataFrame({ 'close': [y for y in np.arange(5.0, 10.0)] }) print(">>> print(data)") print(data) t = 3 print('>>> t = {}'.format(t)) print(">>> s = Simulation(data,'',callbacks=[LINEARREG_SLOPE('close',t)])") print('>>> s.simulate()') s = Simulation(data, '', callbacks=[LINEARREG_SLOPE('close',t)]) s.simulate() print(">>> cols = [col for col in data.columns if 'lineSlope' in col]") cols = [col for col in s.data.columns if 'lineSlope' in col] print(">>> for col in cols:") print(">>> print('{}:{}'.format(col,np.round(s.data[col].values,2)))") for col in cols: print('{}:{}'.format(col, np.round(s.data[col].values, 2))) @pytest.mark.skipif( "TRAVIS" in os.environ and os.environ["TRAVIS"] == "true", reason="Skipping this test on Travis CI. This is an example.") def test_example_TRANGE(init_global_data): print('>>> from finance_tools_py.simulation.callbacks.talib import TRANGE') print('>>> from finance_tools_py.simulation import Simulation') from finance_tools_py.simulation.callbacks.talib import TRANGE print(">>> data = pd.DataFrame({'close': [y for y in range(5.0, 10.0)],\n\ 'high': [y for y in range(10.1,15.0)],\n\ 'low': [y for y in range(0.0, 4.9)]})") data = pd.DataFrame({ 'close': [y for y in np.arange(5.0, 10.0)], 'high': [y for y in np.arange(10.1, 15.0)], 'low': [y for y in np.arange(0.0, 4.9)] }) print(">>> print(data)") print(data) print(">>> s = Simulation(data,'',callbacks=[TRANGE()])") print('>>> s.simulate()') s = Simulation(data, '', callbacks=[TRANGE()]) s.simulate() print(">>> cols = [col for col in data.columns if 'trange' in col]") cols = [col for col in s.data.columns if 'trange' in col] print(">>> for col in cols:") print(">>> print('{}:{}'.format(col,np.round(s.data[col].values,2)))") for col in cols: print('{}:{}'.format(col, np.round(s.data[col].values, 2))) @pytest.mark.skipif( "TRAVIS" in os.environ and os.environ["TRAVIS"] == "true", reason="Skipping this test on Travis CI. This is an example.") def test_example_NATR(init_global_data): print('>>> from finance_tools_py.simulation.callbacks.talib import NATR') print('>>> from finance_tools_py.simulation import Simulation') from finance_tools_py.simulation.callbacks.talib import NATR print(">>> data = pd.DataFrame({'close': [y for y in range(5.0, 10.0)],\n\ 'high': [y for y in range(10.1,15.0)],\n\ 'low': [y for y in range(0.0, 4.9)]})") data = pd.DataFrame({ 'close': [y for y in np.arange(5.0, 10.0)], 'high': [y for y in np.arange(10.1, 15.0)], 'low': [y for y in np.arange(0.0, 4.9)] }) print(">>> print(data)") print(data) t = 3 print('>>> t = {}'.format(t)) print(">>> s = Simulation(data,'',callbacks=[NATR(t)])") print('>>> s.simulate()') s = Simulation(data, '', callbacks=[NATR(t)]) s.simulate() print(">>> cols = [col for col in data.columns if 'atr' in col]") cols = [col for col in s.data.columns if 'natr' in col] print(">>> for col in cols:") print(">>> print('{}:{}'.format(col,np.round(s.data[col].values,2)))") for col in cols: print('{}:{}'.format(col,
np.round(s.data[col].values, 2)
numpy.round
import matplotlib.pyplot as plt import numpy as np from scipy.ndimage import gaussian_filter import scipy.stats as st def whist(x, smooth=True, kde_n=512, kde_range=None, bins='auto', plot=None, kde_kwargs=None, hist_kwargs=None, **kwargs): """ Turn an array of samples, x, into an estimate of probability density at a discrete set of x values, possibly with some weights for the samples, and possibly doing some smoothing. Return value is a dictionary with keys 'x' and 'density'. Calls scipy.stats.gaussian_kde if smooth=True, numpy.histogram otherwise. If smooth=False or None, does no smoothing. If smooth is a positive integer, do fixed-kernel Gaussian smoothing. Additional options: kde_n (kde) - number of points to evaluate at (linearly covers kde_range) kde_range (kde) - range of kde evaluation (defaults to range of x) bins (histogram) - number of bins to use or array of bin edges plot (either) - if not None, plot the thing to this matplotlib device kde_kwargs - dictionary of options to gaussian_kde hist_kwargs - dictionary of options to histogram **kwargs - additional options valid for EITHER gaussian_kde or histogram, especially `weights` """ if kde_kwargs is None: kde_kwargs = {} if hist_kwargs is None: hist_kwargs = {} if plot is not None: plot.hist(x, bins=bins, density=True, fill=False, **kwargs); if smooth is True: if kde_range is None: kde_range = (x.min(), x.max()) h = {'x': np.linspace(kde_range[0], kde_range[1], kde_n)} h['density'] = st.gaussian_kde(x, **kde_kwargs, **kwargs)(h['x']) else: hi = np.histogram(x, bins=bins, density=True, **hist_kwargs, **kwargs) nb = len(hi[0]) h = {'x': 0.5*(hi[1][range(1,nb+1)]+hi[1][range(nb)]), 'density': hi[0]} if smooth is False or smooth is None or smooth == 0: pass else: h['density'] = gaussian_filter(h['density'], smooth, mode='constant', cval=0.0) if plot is not None: plot.plot(h['x'], h['density'], 'b-'); return h def ci1D_plot(h, ci, plot, plot_mode=True, plot_levels=True, plot_ci=True, fill_ci=True, pdf_kwargs=None, mode_kwargs=None, level_kwargs=None, ci_kwargs=None, fill_kwargs=None, fill_colors=None): """ `h` is a dictionary with keys 'x' and 'density' (e.g. from `whist`). `ci` is output from whist_ci plot_mode, plot_levels, plot_ci, fill_ci - bells and whistles to include If `ci` has a 'color' entry, this will override `fill_colors` for shading of the intervals. This can be useful if there are multiply connected CI's, which is a pain for upstream programs to test for. """ if pdf_kwargs is None: pdf_kwargs = {} if mode_kwargs is None: mode_kwargs = {} if level_kwargs is None: level_kwargs = {} if ci_kwargs is None: ci_kwargs = {} if fill_kwargs is None: fill_kwargs = {} if fill_ci: for i in range(len(ci['low'])-1, -1, -1): kw = {'color':str(ci['level'][i])} for k in fill_kwargs.keys(): kw[k] = fill_kwargs[k] if fill_colors is not None: kw['color'] = fill_colors[i] try: kw['color'] = ci['color'][i] except KeyError: pass j = np.where(np.logical_and(h['x']>=ci['min'][i], h['x']<=ci['max'][i]))[0] plot.fill(np.concatenate(([ci['min'][i]], h['x'][j], [ci['max'][i]])),
np.concatenate(([0.0], h['density'][j], [0.0]))
numpy.concatenate
# -*- coding: utf-8 -*- """ @author: <NAME> @name: Bootstrap Controller @summary: This module provides functions that will control the bootstrapping procedure. """ from __future__ import absolute_import import sys import time import itertools from copy import deepcopy import numpy as np import pandas as pd # Import the following module for progressbar displays from tqdm import tqdm, tqdm_notebook from .display_names import model_type_to_display_name from . import bootstrap_sampler as bs from . import bootstrap_calcs as bc from . import bootstrap_abc as abc from .bootstrap_mle import retrieve_point_est from .bootstrap_utils import ensure_samples_is_ndim_ndarray from .construct_estimator import create_estimation_obj try: # Python 3.x does not natively support xrange from past.builtins import xrange except ImportError: pass def is_kernel(): """ Determines whether or not one's code is executed inside of an ipython notebook environment. """ if any([x in sys.modules for x in ['ipykernel', 'IPython']]): return True else: return False # Create a progressbar iterable based on wehther we are in ipython or not. PROGRESS = tqdm_notebook if is_kernel() else tqdm def get_param_names(model_obj): """ Extracts all the names to be displayed for the estimated parameters. Parameters ---------- model_obj : an instance of an MNDC object. Should have the following attributes: `['ind_var_names', 'intercept_names', 'shape_names', 'nest_names']`. Returns ------- all_names : list of strings. There will be one element for each estimated parameter. The order of the parameter names will be `['nest_parameters', 'shape_parameters', 'outside_intercepts', 'index_coefficients']`. """ # Get the index coefficient names all_names = deepcopy(model_obj.ind_var_names) # Add the intercept names if any exist if model_obj.intercept_names is not None: all_names = model_obj.intercept_names + all_names # Add the shape names if any exist if model_obj.shape_names is not None: all_names = model_obj.shape_names + all_names # Add the nest names if any exist if model_obj.nest_names is not None: all_names = model_obj.nest_names + all_names return all_names def get_param_list_for_prediction(model_obj, replicates): """ Create the `param_list` argument for use with `model_obj.predict`. Parameters ---------- model_obj : an instance of an MNDC object. Should have the following attributes: `['ind_var_names', 'intercept_names', 'shape_names', 'nest_names']`. This model should have already undergone a complete estimation process. I.e. its `fit_mle` method should have been called without `just_point=True`. replicates : 2D ndarray. Should represent the set of parameter values that we now wish to partition for use with the `model_obj.predict` method. Returns ------- param_list : list. Contains four elements, each being a numpy array. Either all of the arrays should be 1D or all of the arrays should be 2D. If 2D, the arrays should have the same number of columns. Each column being a particular set of parameter values that one wants to predict with. The first element in the list should be the index coefficients. The second element should contain the 'outside' intercept parameters if there are any, or None otherwise. The third element should contain the shape parameters if there are any or None otherwise. The fourth element should contain the nest coefficients if there are any or None otherwise. Default == None. """ # Check the validity of the passed arguments ensure_samples_is_ndim_ndarray(replicates, ndim=2, name='replicates') # Determine the number of index coefficients, outside intercepts, # shape parameters, and nest parameters num_idx_coefs = len(model_obj.ind_var_names) intercept_names = model_obj.intercept_names num_outside_intercepts =\ 0 if intercept_names is None else len(intercept_names) shape_names = model_obj.shape_names num_shapes = 0 if shape_names is None else len(shape_names) nest_names = model_obj.nest_names num_nests = 0 if nest_names is None else len(nest_names) parameter_numbers =\ [num_nests, num_shapes, num_outside_intercepts, num_idx_coefs] current_idx = 0 param_list = [] for param_num in parameter_numbers: if param_num == 0: param_list.insert(0, None) continue upper_idx = current_idx + param_num param_list.insert(0, replicates[:, current_idx:upper_idx].T) current_idx += param_num return param_list def ensure_replicates_kwarg_validity(replicate_kwarg): """ Ensures `replicate_kwarg` is either 'bootstrap' or 'jackknife'. Raises a helpful ValueError otherwise. """ if replicate_kwarg not in ['bootstrap', 'jackknife']: msg = "`replicates` MUST be either 'bootstrap' or 'jackknife'." raise ValueError(msg) return None class Boot(object): """ Class to perform bootstrap resampling and to store and display its results. Parameters ---------- model_obj : an instance or sublcass of the MNDC class. mle_params : 1D ndarray. Should contain the desired model's maximum likelihood point estimate. """ def __init__(self, model_obj, mle_params): # Store the model object. self.model_obj = model_obj # Determine the parameter names param_names = get_param_names(model_obj) # Store the MLE parameters self.mle_params = pd.Series(mle_params, index=param_names) # Initialize the attributes that will be used later on. desired_attributes =\ ["bootstrap_replicates", "jackknife_replicates", "percentile_interval", "bca_interval", "abc_interval", "all_intervals", "jackknife_log_likehoods", "bootstrap_log_likelihoods"] for attr_name in desired_attributes: setattr(self, attr_name, None) return None def generate_bootstrap_replicates(self, num_samples, mnl_obj=None, mnl_init_vals=None, mnl_fit_kwargs=None, extract_init_vals=None, print_res=False, method="BFGS", loss_tol=1e-06, gradient_tol=1e-06, maxiter=1000, ridge=None, constrained_pos=None, boot_seed=None, weights=None): """ Generates the bootstrap replicates for one's given model and dataset. Parameters ---------- num_samples : positive int. Specifies the number of bootstrap samples that are to be drawn. mnl_obj : an instance of pylogit.MNL or None, optional. Should be the MNL model object that is used to provide starting values for the final model being estimated. If None, then one's final model should be an MNL model. Default == None. mnl_init_vals : 1D ndarray or None, optional. If the model that is being estimated is not an MNL, then `mnl_init_val` should be passed. Should contain the values used to begin the estimation process for the MNL model that is used to provide starting values for our desired model. Default == None. mnl_fit_kwargs : dict or None. If the model that is being estimated is not an MNL, then `mnl_fit_kwargs` should be passed. extract_init_vals : callable or None, optional. Should accept 3 arguments, in the following order. First, it should accept `orig_model_obj`. Second, it should accept a pandas Series of estimated parameters from the MNL model. The Series' index will be the names of the coefficients from `mnl_names`. Thirdly, it should accept an int denoting the number of parameters in the final choice model. The callable should return a 1D ndarray of starting values for the final choice model. Default == None. print_res : bool, optional. Determines whether the timing and initial and final log likelihood results will be printed as they they are determined. Default `== True`. method : str, optional. Should be a valid string for scipy.optimize.minimize. Determines the optimization algorithm that is used for this problem. Default `== 'bfgs'`. loss_tol : float, optional. Determines the tolerance on the difference in objective function values from one iteration to the next that is needed to determine convergence. Default `== 1e-06`. gradient_tol : float, optional. Determines the tolerance on the difference in gradient values from one iteration to the next which is needed to determine convergence. Default `== 1e-06`. maxiter : int, optional. Determines the maximum number of iterations used by the optimizer. Default `== 1000`. ridge : int, float, long, or None, optional. Determines whether or not ridge regression is performed. If a scalar is passed, then that scalar determines the ridge penalty for the optimization. The scalar should be greater than or equal to zero. Default `== None`. constrained_pos : list or None, optional. Denotes the positions of the array of estimated parameters that are not to change from their initial values. If a list is passed, the elements are to be integers where no such integer is greater than `init_vals.size.` Default == None. boot_seed = non-negative int or None, optional. Denotes the random seed to be used when generating the bootstrap samples. If None, the sample generation process will generally be non-reproducible. Default == None. weights : 1D ndarray or None, optional. Allows for the calculation of weighted log-likelihoods. The weights can represent various things. In stratified samples, the weights may be the proportion of the observations in a given strata for a sample in relation to the proportion of observations in that strata in the population. In latent class models, the weights may be the probability of being a particular class. Returns ------- None. Will store the bootstrap replicates on the `self.bootstrap_replicates` attribute. """ print("Generating Bootstrap Replicates") print(time.strftime("%a %m-%d-%Y %I:%M%p")) sys.stdout.flush() # Check the passed arguments for validity. # Create an array of the observation ids obs_id_array = self.model_obj.data[self.model_obj.obs_id_col].values # Alias the alternative IDs and the Choice Array alt_id_array = self.model_obj.alt_IDs choice_array = self.model_obj.choices # Determine how many parameters are being estimated. num_params = self.mle_params.shape[0] # Figure out which observations are in each bootstrap sample. obs_id_per_sample =\ bs.create_cross_sectional_bootstrap_samples(obs_id_array, alt_id_array, choice_array, num_samples, seed=boot_seed) # Get the dictionary of sub-dataframes for each observation id dfs_by_obs_id =\ bs.create_deepcopied_groupby_dict(self.model_obj.data, self.model_obj.obs_id_col) # Create a column name for the bootstrap id columns. boot_id_col = "bootstrap_id" # Initialize an array to store the bootstrapped point estimates. point_estimates = np.empty((num_samples, num_params), dtype=float) # Get keyword arguments for final model estimation with new data. fit_kwargs = {"print_res": print_res, "method": method, "loss_tol": loss_tol, "gradient_tol": gradient_tol, "maxiter": maxiter, "ridge": ridge, "constrained_pos": constrained_pos, "just_point": True} # Get the specification and name dictionary of the MNL model. mnl_spec = None if mnl_obj is None else mnl_obj.specification mnl_names = None if mnl_obj is None else mnl_obj.name_spec # Create an iterable for iteration iterable_for_iteration = PROGRESS(xrange(num_samples), desc="Creating Bootstrap Replicates", total=num_samples) # Iterate through the bootstrap samples and perform the MLE for row in iterable_for_iteration: # Get the bootstrapped dataframe bootstrap_df =\ bs.create_bootstrap_dataframe(self.model_obj.data, self.model_obj.obs_id_col, obs_id_per_sample[row, :], dfs_by_obs_id, boot_id_col=boot_id_col) # Go through the necessary estimation routine to bootstrap the MLE. current_results =\ retrieve_point_est(self.model_obj, bootstrap_df, boot_id_col, num_params, mnl_spec, mnl_names, mnl_init_vals, mnl_fit_kwargs, extract_init_vals=extract_init_vals, **fit_kwargs) # Store the bootstrapped point estimate. point_estimates[row] = current_results["x"] # Store the point estimates as a pandas dataframe self.bootstrap_replicates =\ pd.DataFrame(point_estimates, columns=self.mle_params.index) # Print a 'finished' message for users print("Finished Generating Bootstrap Replicates") print(time.strftime("%a %m-%d-%Y %I:%M%p")) return None def generate_jackknife_replicates(self, mnl_obj=None, mnl_init_vals=None, mnl_fit_kwargs=None, extract_init_vals=None, print_res=False, method="BFGS", loss_tol=1e-06, gradient_tol=1e-06, maxiter=1000, ridge=None, constrained_pos=None): """ Generates the jackknife replicates for one's given model and dataset. Parameters ---------- mnl_obj : an instance of pylogit.MNL or None, optional. Should be the MNL model object that is used to provide starting values for the final model being estimated. If None, then one's final model should be an MNL model. Default == None. mnl_init_vals : 1D ndarray or None, optional. If the model that is being estimated is not an MNL, then `mnl_init_val` should be passed. Should contain the values used to begin the estimation process for the MNL model that is used to provide starting values for our desired model. Default == None. mnl_fit_kwargs : dict or None. If the model that is being estimated is not an MNL, then `mnl_fit_kwargs` should be passed. extract_init_vals : callable or None, optional. Should accept 3 arguments, in the following order. First, it should accept `orig_model_obj`. Second, it should accept a pandas Series of estimated parameters from the MNL model. The Series' index will be the names of the coefficients from `mnl_names`. Thirdly, it should accept an int denoting the number of parameters in the final choice model. The callable should return a 1D ndarray of starting values for the final choice model. Default == None. print_res : bool, optional. Determines whether the timing and initial and final log likelihood results will be printed as they they are determined. Default `== True`. method : str, optional. Should be a valid string for scipy.optimize.minimize. Determines the optimization algorithm that is used for this problem. Default `== 'bfgs'`. loss_tol : float, optional. Determines the tolerance on the difference in objective function values from one iteration to the next that is needed to determine convergence. Default `== 1e-06`. gradient_tol : float, optional. Determines the tolerance on the difference in gradient values from one iteration to the next which is needed to determine convergence. Default `== 1e-06`. maxiter : int, optional. Determines the maximum number of iterations used by the optimizer. Default `== 1000`. ridge : int, float, long, or None, optional. Determines whether or not ridge regression is performed. If a scalar is passed, then that scalar determines the ridge penalty for the optimization. The scalar should be greater than or equal to zero. Default `== None`. constrained_pos : list or None, optional. Denotes the positions of the array of estimated parameters that are not to change from their initial values. If a list is passed, the elements are to be integers where no such integer is greater than `init_vals.size.` Default == None. Returns ------- None. Will store the bootstrap replicates on the `self.bootstrap_replicates` attribute. """ print("Generating Jackknife Replicates") print(time.strftime("%a %m-%d-%Y %I:%M%p")) sys.stdout.flush() # Take note of the observation id column that is to be used obs_id_col = self.model_obj.obs_id_col # Get the array of original observation ids orig_obs_id_array =\ self.model_obj.data[obs_id_col].values # Get an array of the unique observation ids. unique_obs_ids = np.sort(np.unique(orig_obs_id_array)) # Determine how many observations are in one's dataset. num_obs = unique_obs_ids.size # Determine how many parameters are being estimated. num_params = self.mle_params.size # Get keyword arguments for final model estimation with new data. fit_kwargs = {"print_res": print_res, "method": method, "loss_tol": loss_tol, "gradient_tol": gradient_tol, "maxiter": maxiter, "ridge": ridge, "constrained_pos": constrained_pos, "just_point": True} # Get the specification and name dictionary of the MNL model. mnl_spec = None if mnl_obj is None else mnl_obj.specification mnl_names = None if mnl_obj is None else mnl_obj.name_spec # Initialize the array of jackknife replicates point_replicates = np.empty((num_obs, num_params), dtype=float) # Create an iterable for iteration iterable_for_iteration = PROGRESS(enumerate(unique_obs_ids), desc="Creating Jackknife Replicates", total=unique_obs_ids.size) # Populate the array of jackknife replicates for pos, obs_id in iterable_for_iteration: # Create the dataframe without the current observation new_df = self.model_obj.data.loc[orig_obs_id_array != obs_id] # Get the point estimate for this new dataset current_results =\ retrieve_point_est(self.model_obj, new_df, obs_id_col, num_params, mnl_spec, mnl_names, mnl_init_vals, mnl_fit_kwargs, extract_init_vals=extract_init_vals, **fit_kwargs) # Store the estimated parameters point_replicates[pos] = current_results['x'] # Store the jackknife replicates as a pandas dataframe self.jackknife_replicates =\ pd.DataFrame(point_replicates, columns=self.mle_params.index) # Print a 'finished' message for users print("Finished Generating Jackknife Replicates") print(time.strftime("%a %m-%d-%Y %I:%M%p")) return None def calc_log_likes_for_replicates(self, replicates='bootstrap', num_draws=None, seed=None): """ Calculate the log-likelihood value of one's replicates, given one's dataset. Parameters ---------- replicates : str in {'bootstrap', 'jackknife'}. Denotes which set of replicates should have their log-likelihoods calculated. num_draws : int greater than zero or None, optional. Denotes the number of random draws for mixed logit estimation. If None, then no random draws will be made. Default == None. seed : int greater than zero or None, optional. Denotes the random seed to be used for mixed logit estimation. If None, then no random seed will be set. Default == None. Returns ------- log_likelihoods : 1D ndarray. Each element stores the log-likelihood of the associated parameter values on the model object's dataset. The log-likelihoods are also stored on the `replicates + '_log_likelihoods'` attribute. """ # Check the validity of the kwargs ensure_replicates_kwarg_validity(replicates) # Get the desired type of replicates replicate_vec = getattr(self, replicates + "_replicates").values # Determine the choice column choice_col = self.model_obj.choice_col # Split the control flow based on whether we're using a Nested Logit current_model_type = self.model_obj.model_type non_2d_predictions =\ [model_type_to_display_name["Nested Logit"], model_type_to_display_name["Mixed Logit"]] if current_model_type not in non_2d_predictions: # Get the param list for this set of replicates param_list =\ get_param_list_for_prediction(self.model_obj, replicate_vec) # Get the 'chosen_probs' using the desired set of replicates chosen_probs =\ self.model_obj.predict(self.model_obj.data, param_list=param_list, return_long_probs=False, choice_col=choice_col) else: # Initialize a list of chosen probs chosen_probs_list = [] # Create an iterable for iteration iterable_for_iteration = PROGRESS(xrange(replicate_vec.shape[0]), desc="Calculate Gradient Norms", total=replicate_vec.shape[0]) # Populate the list of chosen probabilities for each vector of # parameter values for idx in iterable_for_iteration: # Get the param list for this set of replicates param_list =\ get_param_list_for_prediction(self.model_obj, replicate_vec[idx][None, :]) # Use 1D parameters in the prediction function param_list =\ [x.ravel() if x is not None else x for x in param_list] # Get the 'chosen_probs' using the desired set of replicates chosen_probs =\ self.model_obj.predict(self.model_obj.data, param_list=param_list, return_long_probs=False, choice_col=choice_col, num_draws=num_draws, seed=seed) # store those chosen prob_results chosen_probs_list.append(chosen_probs[:, None]) # Get the final array of chosen probs chosen_probs = np.concatenate(chosen_probs_list, axis=1) # Calculate the log_likelihood log_likelihoods =
np.log(chosen_probs)
numpy.log
""" """ from __future__ import absolute_import, division, print_function import numpy as np import pytest from astropy.utils.misc import NumpyRNGContext from ..mean_los_velocity_vs_rp import mean_los_velocity_vs_rp from ...tests.cf_helpers import generate_locus_of_3d_points __all__ = ('test_mean_los_velocity_vs_rp_correctness1', 'test_mean_los_velocity_vs_rp_correctness2', 'test_mean_los_velocity_vs_rp_correctness3', 'test_mean_los_velocity_vs_rp_correctness4', 'test_mean_los_velocity_vs_rp_parallel', 'test_mean_los_velocity_vs_rp_auto_consistency', 'test_mean_los_velocity_vs_rp_cross_consistency') fixed_seed = 43 def pure_python_mean_los_velocity_vs_rp( sample1, velocities1, sample2, velocities2, rp_min, rp_max, pi_max, Lbox=None): """ Brute force pure python function calculating mean los velocities in a single bin of separation. """ if Lbox is None: xperiod, yperiod, zperiod = np.inf, np.inf, np.inf else: xperiod, yperiod, zperiod = Lbox, Lbox, Lbox npts1, npts2 = len(sample1), len(sample2) running_tally = [] for i in range(npts1): for j in range(npts2): dx = sample1[i, 0] - sample2[j, 0] dy = sample1[i, 1] - sample2[j, 1] dz = sample1[i, 2] - sample2[j, 2] dvz = velocities1[i, 2] - velocities2[j, 2] if dx > xperiod/2.: dx = xperiod - dx elif dx < -xperiod/2.: dx = -(xperiod + dx) if dy > yperiod/2.: dy = yperiod - dy elif dy < -yperiod/2.: dy = -(yperiod + dy) if dz > zperiod/2.: dz = zperiod - dz zsign_flip = -1 elif dz < -zperiod/2.: dz = -(zperiod + dz) zsign_flip = -1 else: zsign_flip = 1 d_rp = np.sqrt(dx*dx + dy*dy) if (d_rp > rp_min) & (d_rp < rp_max) & (abs(dz) < pi_max): if abs(dz) > 0: vlos = dvz*dz*zsign_flip/abs(dz) else: vlos = dvz running_tally.append(vlos) if len(running_tally) > 0: return np.mean(running_tally) else: return 0. def test_mean_radial_velocity_vs_r_vs_brute_force_pure_python(): """ This function tests that the `~halotools.mock_observables.mean_radial_velocity_vs_r` function returns results that agree with a brute force pure python implementation for a random distribution of points, both with and without PBCs. """ npts = 99 with NumpyRNGContext(fixed_seed): sample1 = np.random.random((npts, 3)) sample2 =
np.random.random((npts, 3))
numpy.random.random
# # Packt Publishing # Hands-on Tensorflow Lite for Intelligent Mobile Apps # @author: <NAME> # # Section 5: Gesture recognition # Video 5-3: Parameter study and data augmentation # from PIL import Image import numpy as np import scipy.misc import os def hotvector(vector,classes): ''' This function will transform a vector of labels into a vector of one-hot vectors. ''' import numpy as np result = np.zeros((vector.shape[0],classes)) for i in range(vector.shape[0]): result[i][vector[i]]=1 return result def splitDataset(data,splits): import numpy as np import random ''' This function will generate Training, Testing and Validation sets. Data must be an np object array where the first element is the data and the second element are the labels. I.e: >> data = np.empty(2,dtype=object) >> data[0] = actualInputData >> data[1] = labels Other example: >> totalData = getDataset("emnist") >> data = divideDatasets(totalData,[0.8,0.2,0]) Splits is a list of percentages, i.e. [0.7,0.2,0.1], corresponding to 70% training, 20% testing and 10% validation data respectively. At first, it collects the data equally spaced from "data". Finally, it shuffles it. ''' if np.sum(np.array(splits)*10)!=10: print(np.sum(splits)) raise Exception("The splits are percentages and must sum 1 in total. Example: [0.7,0.2,0.1]") if len(splits)!=3: raise Exception("There must be 3 elements in 'splits' corresponding to training, testing and validation splits. If you don't need validation, write 0, [0.8,0.2,0]") rows = data[1].shape[0] cut1 = int(splits[0]*rows) cut2 = int(splits[1]*rows) cut3 = int(splits[2]*rows) totalIndices = np.array([x for x in range(rows)]) indicesTraining = np.array([i*rows//cut1 + rows//(2*cut1) for i in range(cut1)]) restIndices=np.array(sorted(list(set(totalIndices)-set(indicesTraining)))) if cut3!=0: indicesTesting = np.array([i*len(restIndices)//cut2 + len(restIndices)//(2*cut2) for i in range(cut2)]) indicesTesting = restIndices[indicesTesting] indicesValidating = np.array(sorted(list(set(restIndices)-set(indicesTesting)))) random.shuffle(indicesValidating) else: indicesTesting = restIndices random.shuffle(indicesTraining) random.shuffle(indicesTesting) newdata = {} newdata["train"] = np.empty(2,dtype=object) newdata["test"] = np.empty(2,dtype=object) newdata["train"][0] = data[0][indicesTraining] newdata["train"][1] = data[1][indicesTraining] newdata["test"][0] = data[0][indicesTesting] newdata["test"][1] = data[1][indicesTesting] if cut3!=0: newdata["validation"] = np.empty(2,dtype=object) newdata["validation"][0] = data[0][indicesValidating] newdata["validation"][1] = data[1][indicesValidating] return newdata def img2np(image,channels=1): im_arr = np.fromstring(image.tobytes(),dtype=np.uint8) if channels==1: im_arr = im_arr.reshape((image.size[1],image.size[0])) else: im_arr = im_arr.reshape((image.size[1],image.size[0],channels)) im_arr = np.array(im_arr,dtype=int) return im_arr def autocrop(matrix): # Detect top, bottom, left and right borders for i in range(matrix.shape[0]): if np.sum(matrix[i,:]==1)>0: topVal = i break for i in range(matrix.shape[0]-1,0,-1): if np.sum(matrix[i,:]==1)>0: bottomVal = i break for j in range(matrix.shape[1]): if np.sum(matrix[:,j]==1)>0: leftVal = j break for j in range(matrix.shape[1]-1,0,-1): if np.sum(matrix[:,j]==1)>0: rightVal = j break # Crop hand matrix = matrix[topVal:bottomVal,leftVal:rightVal] maxDist = matrix.shape[0] if matrix.shape[0]>matrix.shape[1] else matrix.shape[1] new_matrix = np.zeros((maxDist,maxDist)) offsetX = (maxDist-matrix.shape[1])/2 offsetY = (maxDist-matrix.shape[0])/2 # Make a square matrix new_matrix[offsetY:offsetY+matrix.shape[0],offsetX:offsetX+matrix.shape[1]]=matrix return new_matrix def getDataset(TARGET_DIM,folder): data = [] labels = [] im_back = img2np(Image.open(folder+"back.jpg").convert("RGBA"),4)[:,:,:-1] mapLabels = [0,0,0,1,1,1,2,2,2] for imi in range(1,len(os.listdir(folder))): print("Image: "+str(imi)) im_fore = img2np(Image.open(folder+str(imi)+".jpg").convert("RGBA"),4)[:,:,:-1] res = np.zeros((im_back.shape[0],im_back.shape[1])) for i in range(res.shape[0]): for j in range(res.shape[1]): if np.sum(np.abs(im_back[i,j,:]-im_fore[i,j,:]))>50: res[i,j]=1 # Autocrop (get only the hand) matrix = autocrop(res) # Convert to Image im_orig = Image.fromarray(matrix).convert("L") for deg in range(-45,50,5): # Rotate im = im_orig.rotate(deg,expand=1) # Convert to matrix res = img2np(im) # Autocrop again (rotating will add more pixels) matrix = autocrop(res) # Resize to fit the model im = im.resize((TARGET_DIM,TARGET_DIM)) im = img2np(im) data.append(im) labels.append(mapLabels[imi-1]) total = np.empty(2,dtype=object) total[0] =
np.array(data)
numpy.array
#!/usr/bin/env python3 import numpy as np from . import tshark def get_result(input_files, filter): time_list = [] for file in input_files: cmd_result = tshark.fields(file, filter, ['frame.time_delta_displayed']) time_list.extend([float(result) for result in cmd_result]) if len(time_list) > 0: freq, intervals =
np.histogram(time_list, 100)
numpy.histogram
""" """ import warnings import numpy as np from jax import jit as jjit from jax import grad from jax import numpy as jnp from .stars import ( _get_unbounded_sfr_params, calculate_sm_sfr_fstar_history_from_mah, DEFAULT_SFR_PARAMS, compute_fstar, _get_bounded_sfr_params_vmap, calculate_histories_batch, ) from .quenching import ( DEFAULT_Q_PARAMS, _get_bounded_qt, _get_unbounded_q_params, _get_bounded_lg_drop, _get_unbounded_qrejuv, _get_bounded_q_params_vmap, ) from .utils import _sigmoid from diffmah.individual_halo_assembly import _calc_halo_history import os import h5py T_FIT_MIN = 1.0 # Only fit snapshots above this threshold. Gyr units. DLOGM_CUT = 3.5 # Only fit SMH within this dex of the present day stellar mass. MIN_MASS_CUT = 7.0 # Only fit SMH above this threshold. Log10(Msun) units. FSTAR_TIME_DELAY = 1.0 # Time period of averaged SFH (aka fstar). Gyr units. SSFRH_FLOOR = 1e-12 # Clip SFH to this minimum sSFR value. 1/yr units. def get_header(): out = """\ # halo_id \ lgmcrit lgy_at_mcrit indx_lo indx_hi tau_dep \ qt qs q_drop q_rejuv \ loss success\n\ """ return out def load_diffstar_data( run_name, t_sim, fstar_tdelay, mah_params, data_drn, ): """Calculate Diffstar histories and best-fit parameters. Parameters ---------- run_name : string Filename where the Diffstar best-fit parameters are stored. t_sim : ndarray of shape (n_times, ) Cosmic time of each simulated snapshot in Gyr fstar_tdelay: float Time interval in Gyr for fstar definition. fstar = (mstar(t) - mstar(t-fstar_tdelay)) / fstar_tdelay[Gyr] mah_params : ndarray of shape (ng, 6, ) Best fit diffmah halo parameters. Includes (t0, logmp, logtc, k, early, late) data_drn : string Filepath where the Diffstar best-fit parameters are stored. Returns ------- hists : tuple of shape (5, ) with MAH and SFH histories that contains mstar : ndarray of shape (ng, n_times) Cumulative stellar mass history in units of Msun assuming h=1. sfr : ndarray of shape (ng, n_times) Star formation rate history in units of Msun/yr assuming h=1. fstar : ndarray of shape (ng, n_times_fstar) SFH averaged over timescale fstar_tdelay in units of Msun/yr assuming h=1. dmhdt : ndarray of shape (ng, n_times) Mass accretion rate in units of Msun/yr assuming h=1. log_mah : ndarray of shape (ng, n_times) Base-10 log of cumulative peak halo mass in units of Msun assuming h=1. fit_params : ndarray of shape (ng, 9) Best fit bounded Diffstar parameters. u_fit_params : ndarray of shape (ng, 9) Best fit unbounded Diffstar parameters. loss : ndarray of shape (ng, ) Best-fit loss value for each halo. success : ndarray of shape (ng, ) Success string for each halo. Successful : "L-BFGS-B" Unsuccessful: "Fail" """ sfr_fitdata = dict() fn = os.path.join(data_drn, run_name) with h5py.File(fn, "r") as hdf: for key in hdf.keys(): sfr_fitdata[key] = hdf[key][...] colnames = get_header()[1:].strip().split() sfr_colnames = colnames[1:6] q_colnames = colnames[6:10] u_fit_params = np.array([sfr_fitdata[key] for key in sfr_colnames + q_colnames]).T u_sfr_fit_params = np.array([sfr_fitdata[key] for key in sfr_colnames]).T u_q_fit_params = np.array([sfr_fitdata[key] for key in q_colnames]).T sfr_fit_params = _get_bounded_sfr_params_vmap(*u_sfr_fit_params.T) q_fit_params = _get_bounded_q_params_vmap(*u_q_fit_params.T) sfr_fit_params = np.array([np.array(x) for x in sfr_fit_params]).T q_fit_params = np.array([np.array(x) for x in q_fit_params]).T fit_params = np.concatenate((sfr_fit_params, q_fit_params), axis=1) success_names = np.array(["Adam", "L-BFGS-B", "Fail"]) sfr_fitdata["success"] = success_names[sfr_fitdata["success"].astype(int)] hists = calculate_histories_batch( t_sim, mah_params, u_sfr_fit_params, u_q_fit_params, fstar_tdelay, ) mstars_fit, sfrs_fit, fstars_fit, dmhdts_fit, log_mahs_fit = hists print(hists[0].shape, np.unique(sfr_fitdata["success"], return_counts=True)) return hists, fit_params, u_fit_params, sfr_fitdata["loss"], sfr_fitdata["success"] def get_mah_params(data_path, runname): """Load the Diffmah parameters. Parameters ---------- data_path : string Filepath where the Diffmah best-fit parameters are stored. runname : string Filename where the Diffmah best-fit parameters are stored. Returns ------- mah_params : ndarray of shape (ng, 6) Best fit diffmah halo parameters. Includes (t0, logmp, logtc, k, early, late) """ fitting_data = dict() fn = f"{data_path}{runname}" with h5py.File(fn, "r") as hdf: for key in hdf.keys(): if key == "halo_id": fitting_data[key] = hdf[key][...] else: fitting_data["fit_" + key] = hdf[key][...] mah_params = np.array( [ np.log10(fitting_data["fit_t0"]), fitting_data["fit_logmp_fit"], fitting_data["fit_mah_logtc"], fitting_data["fit_mah_k"], fitting_data["fit_early_index"], fitting_data["fit_late_index"], ] ).T return mah_params def get_weights( t_sim, log_smah_sim, log_fstar_sim, fstar_indx_high, dlogm_cut, t_fit_min, mass_fit_min, ): """Calculate weights to mask target SMH and fstar target data. Parameters ---------- t_sim : ndarray of shape (nt, ) Cosmic time of each simulated snapshot in Gyr units. log_smah_sim : ndarray of shape (nt, ) Base-10 log of cumulative stellar mass in Msun units. log_fstar_sim : ndarray of shape (nt, ) Base-10 log of SFH averaged over a time period in Msun/yr units. fstar_indx_high: ndarray of shape (n_times_fstar, ) Indices from np.searchsorted(t, t - fstar_tdelay)[index_select] dlogm_cut : float, optional Additional quantity used to place a cut on which simulated snapshots are used to define the target halo SFH. Snapshots will not be used when log_smah_sim falls below log_smah_sim[-1] - dlogm_cut. t_fit_min : float, optional Additional quantity used to place a cut on which simulated snapshots are used to define the target halo SFH. The value of t_fit_min defines the minimum cosmic time in Gyr used to define the target SFH. mass_fit_min : float Quantity used to place a cut on which simulated snapshots are used to define the target halo SFH. The value mass_fit_min is the base-10 log of the minimum stellar mass in the SFH used as target data. The final mass_fit_min cut is equal to min(log_smah_sim[-1] - 0.5, mass_fit_min). Returns ------- weight : ndarray of shape (nt, ) Weight for each snapshot, to effectively remove from the fit the SMH snapshots that fall below the threshold mass. weight_fstar : ndarray of shape (n_times_fstar, ) Weight for each snapshot, to effectively remove from the fit the SFH snapshots that fall below the threshold mass. """ mass_fit_min = min(log_smah_sim[-1] - 0.5, mass_fit_min) mask = log_smah_sim > (log_smah_sim[-1] - dlogm_cut) mask &= log_smah_sim > mass_fit_min mask &= t_sim >= t_fit_min weight = np.ones_like(t_sim) weight[~mask] = 1e10 weight[log_smah_sim[-1] - log_smah_sim < 0.1] = 0.5 weight = jnp.array(weight) weight_fstar = np.ones_like(t_sim) weight_fstar[~mask] = 1e10 weight_fstar = weight_fstar[fstar_indx_high] weight_fstar[log_fstar_sim.max() - log_fstar_sim < 0.1] = 0.5 weight_fstar[weight_fstar == -10.0] = 1e10 return weight, weight_fstar ################# # free parameters # ################# @jjit def loss_free(params, loss_data): """ MSE loss function for fitting individual stellar mass histories. The parameter k is fixed. """ ( lgt, dt, dmhdt, log_mah, sm_target, log_sm_target, sfr_target, fstar_target, index_select, fstar_indx_high, fstar_tdelay, ssfrh_floor, weight, weight_fstar, t_fstar_max, ) = loss_data sfr_params = params[0:5] q_params = params[5:9] _res = calculate_sm_sfr_fstar_history_from_mah( lgt, dt, dmhdt, log_mah, sfr_params, q_params, index_select, fstar_indx_high, fstar_tdelay, ) mstar, sfr, fstar = _res mstar = jnp.log10(mstar) fstar = jnp.log10(fstar) sfr_res = 1e8 * (sfr - sfr_target) / sm_target sfr_res = jnp.clip(sfr_res, -1.0, 1.0) loss = jnp.mean(((mstar - log_sm_target) / weight) ** 2) loss += jnp.mean(((fstar - fstar_target) / weight_fstar) ** 2) loss += jnp.mean((sfr_res / weight) ** 2) qt = _get_bounded_qt(q_params[0]) loss += _sigmoid(qt - t_fstar_max, 0.0, 50.0, 100.0, 0.0) return loss loss_free_deriv = jjit(grad(loss_free, argnums=(0))) def loss_free_deriv_np(params, data): return np.array(loss_free_deriv(params, data)).astype(float) def get_loss_data_free( t_sim, dt, sfrh, log_smah_sim, logmp, mah_params, dlogm_cut=DLOGM_CUT, t_fit_min=T_FIT_MIN, mass_fit_min=MIN_MASS_CUT, fstar_tdelay=FSTAR_TIME_DELAY, ssfrh_floor=SSFRH_FLOOR, ): """Retrieve the target data passed to the optimizer when fitting the halo SFH model for the case in which the parameter k is fixed. Parameters ---------- t_sim : ndarray of shape (nt, ) Cosmic time of each simulated snapshot in Gyr units. dt : ndarray of shape (nt, ) Cosmic time steps between each simulated snapshot in Gyr units. sfrh : ndarray of shape (nt, ) Star formation history of simulated snapshots in Msun/yr units. log_smah_sim : ndarray of shape (nt, ) Base-10 log of cumulative stellar mass in Msun units. logmp : float Base-10 log present day halo mass in Msun units. mah_params : ndarray of shape (4, ) Best fit diffmah halo parameters. Includes (logtc, k, early, late). dlogm_cut : float, optional Additional quantity used to place a cut on which simulated snapshots are used to define the target halo SFH. Snapshots will not be used when log_smah_sim falls below log_smah_sim[-1] - dlogm_cut. Default is set as global at top of module. t_fit_min : float, optional Additional quantity used to place a cut on which simulated snapshots are used to define the target halo SFH. The value of t_fit_min defines the minimum cosmic time in Gyr used to define the target SFH. Default is set as global at top of module. mass_fit_min : float Quantity used to place a cut on which simulated snapshots are used to define the target halo SFH. The value mass_fit_min is the base-10 log of the minimum stellar mass in the SFH used as target data. The final mass_fit_min cut is equal to min(log_smah_sim[-1] - 0.5, mass_fit_min). Default is set as global at top of module. fstar_tdelay : float Time interval in Gyr for fstar definition. fstar = mstar(t) - mstar(t-fstar_tdelay) Default is set as global at top of module. ssfrh_floor : float Lower bound value of star formation history used in the fits. SFH(t) = max(SFH(t), SMH(t) * ssfrh_floor) Default is set as global at top of module. Returns ------- p_init : ndarray of shape (5, ) Initial guess at the unbounded value of the best-fit parameter. Here we have p_init = (u_lgm, u_lgy, u_l, u_h, u_dt) loss_data : sequence consisting of the following data logt: ndarray of shape (nt, ) Base-10 log of cosmic time of each simulated snapshot in Gyr. dt : ndarray of shape (nt, ) Cosmic time steps between each simulated snapshot in Gyr dmhdt : ndarray of shape (nt, ) Diffmah halo mass accretion rate in units of Msun/yr. log_mah : ndarray of shape (nt, ) Diffmah halo mass accretion history in units of Msun. smh : ndarray of shape (nt, ) Cumulative stellar mass history in Msun. log_smah_sim : ndarray of shape (nt, ) Base-10 log of cumulative stellar mass in Msun. sfrh : ndarray of shape (nt, ) Star formation history in Msun/yr. log_fstar_sim : ndarray of shape (nt, ) Base-10 log of cumulative SFH averaged over a timescale in Msun/yr. index_select: ndarray of shape (n_times_fstar, ) Snapshot indices used in fstar computation. fstar_indx_high: ndarray of shape (n_times_fstar, ) Indices of np.searchsorted(t, t - fstar_tdelay)[index_select] fstar_tdelay: float Time interval in Gyr for fstar definition. fstar = (mstar(t) - mstar(t-fstar_tdelay)) / fstar_tdelay[Gyr] ssfrh_floor : float Lower bound value of star formation history used in the fits. weight : ndarray of shape (nt, ) Weight for each snapshot, to effectively remove from the fit the SMH snapshots that fall below the threshold mass. weight_fstar : ndarray of shape (n_times_fstar, ) Weight for each snapshot, to effectively remove from the fit the SFH snapshots that fall below the threshold mass. t_fstar_max : float Base-10 log of the cosmic time where SFH target history peaks. """ fstar_indx_high = np.searchsorted(t_sim, t_sim - fstar_tdelay) _mask = t_sim > fstar_tdelay + fstar_tdelay / 2.0 index_select = np.arange(len(t_sim))[_mask] fstar_indx_high = fstar_indx_high[_mask] smh = 10 ** log_smah_sim fstar_sim = compute_fstar(t_sim, smh, index_select, fstar_indx_high, fstar_tdelay) with warnings.catch_warnings(): warnings.simplefilter("ignore") ssfrh = fstar_sim / smh[index_select] ssfrh = np.clip(ssfrh, ssfrh_floor, np.inf) fstar_sim = ssfrh * smh[index_select] log_fstar_sim = np.where( fstar_sim == 0.0, np.log10(fstar_sim.max()) - 3.0, np.log10(fstar_sim) ) logt = jnp.log10(t_sim) logtmp = np.log10(t_sim[-1]) dmhdt, log_mah = _calc_halo_history(logt, logtmp, logmp, *mah_params) weight, weight_fstar = get_weights( t_sim, log_smah_sim, log_fstar_sim, fstar_indx_high, dlogm_cut, t_fit_min, mass_fit_min, ) t_fstar_max = logt[index_select][np.argmax(log_fstar_sim)] default_sfr_params = np.array(list(DEFAULT_SFR_PARAMS.values())) default_sfr_params[0] = np.clip(0.3 * (logmp - 11.0) + 11.4, 11.0, 13.0) default_sfr_params[1] = np.clip(0.2 * (logmp - 11.0) - 0.7, -1.5, -0.2) default_sfr_params[2] = np.clip(0.7 * (logmp - 11.0) - 0.3, 0.2, 3.0) default_sfr_params[4] = np.clip(-8.0 * (logmp - 11.0) + 15, 2.0, 15.0) u_default_sfr_params = np.array(_get_unbounded_sfr_params(*default_sfr_params)) sfr_ms_params = u_default_sfr_params sfr_ms_params_err = np.array([0.5, 0.5, 1.0, 0.3, 3.0]) default_q_params = np.array(list(DEFAULT_Q_PARAMS.values())) default_q_params[0] = np.clip(-0.5 * (logmp - 11.0) + 1.5, 0.7, 1.5) default_q_params[2] = -2.0 q_params = np.array(_get_unbounded_q_params(*default_q_params)) q_params_err = np.array([0.3, 0.5, 0.3, 0.3]) loss_data = ( logt, dt, dmhdt, log_mah, smh, log_smah_sim, sfrh, log_fstar_sim, index_select, fstar_indx_high, fstar_tdelay, ssfrh_floor, weight, weight_fstar, t_fstar_max, ) p_init = ( np.concatenate((sfr_ms_params, q_params)), np.concatenate((sfr_ms_params_err, q_params_err)), ) return p_init, loss_data def get_outline_free(halo_id, loss_data, p_best, loss_best, success): """Return the string storing fitting results that will be written to disk""" sfr_params = p_best[0:5] q_params = p_best[5:9] _d = np.concatenate((sfr_params, q_params)).astype("f4") data_out = (*_d, float(loss_best)) out = str(halo_id) + " " + " ".join(["{:.5e}".format(x) for x in data_out]) out = out + " " + str(success) return out + "\n" ################# # fixed_noquench # ################# @jjit def loss_fixed_noquench(params, loss_data): """ MSE loss function for fitting individual stellar mass histories. The parameter k is fixed. There is no quenching. """ ( lgt, dt, dmhdt, log_mah, sm_target, log_sm_target, sfr_target, fstar_target, index_select, fstar_indx_high, fstar_tdelay, ssfrh_floor, weight, weight_fstar, t_fstar_max, q_params, ) = loss_data sfr_params = params _res = calculate_sm_sfr_fstar_history_from_mah( lgt, dt, dmhdt, log_mah, sfr_params, q_params, index_select, fstar_indx_high, fstar_tdelay, ) mstar, sfr, fstar = _res mstar = jnp.log10(mstar) fstar = jnp.log10(fstar) sfr_res = 1e8 * (sfr - sfr_target) / sm_target sfr_res = jnp.clip(sfr_res, -1.0, 1.0) loss = jnp.mean(((mstar - log_sm_target) / weight) ** 2) loss += jnp.mean(((fstar - fstar_target) / weight_fstar) ** 2) loss += jnp.mean((sfr_res / weight) ** 2) qt = _get_bounded_qt(q_params[0]) loss += _sigmoid(qt - t_fstar_max, 0.0, 50.0, 100.0, 0.0) return loss loss_fixed_noquench_deriv = jjit(grad(loss_fixed_noquench, argnums=(0))) def loss_fixed_noquench_deriv_np(params, data): return np.array(loss_fixed_noquench_deriv(params, data)).astype(float) def get_loss_data_fixed_noquench( t_sim, dt, sfrh, log_smah_sim, logmp, mah_params, dlogm_cut=DLOGM_CUT, t_fit_min=T_FIT_MIN, mass_fit_min=MIN_MASS_CUT, fstar_tdelay=FSTAR_TIME_DELAY, ssfrh_floor=SSFRH_FLOOR, ): """Retrieve the target data passed to the optimizer when fitting the halo SFH model for the case in which k parameter is fixed and there is no quenching. Parameters ---------- t_sim : ndarray of shape (nt, ) Cosmic time of each simulated snapshot in Gyr units. dt : ndarray of shape (nt, ) Cosmic time steps between each simulated snapshot in Gyr units. sfrh : ndarray of shape (nt, ) Star formation history of simulated snapshots in Msun/yr units. log_smah_sim : ndarray of shape (nt, ) Base-10 log of cumulative stellar mass in Msun units. logmp : float Base-10 log present day halo mass in Msun units. mah_params : ndarray of shape (4, ) Best fit diffmah halo parameters. Includes (logtc, k, early, late). dlogm_cut : float, optional Additional quantity used to place a cut on which simulated snapshots are used to define the target halo SFH. Snapshots will not be used when log_smah_sim falls below log_smah_sim[-1] - dlogm_cut. Default is set as global at top of module. t_fit_min : float, optional Additional quantity used to place a cut on which simulated snapshots are used to define the target halo SFH. The value of t_fit_min defines the minimum cosmic time in Gyr used to define the target SFH. Default is set as global at top of module. mass_fit_min : float Quantity used to place a cut on which simulated snapshots are used to define the target halo SFH. The value mass_fit_min is the base-10 log of the minimum stellar mass in the SFH used as target data. The final mass_fit_min cut is equal to min(log_smah_sim[-1] - 0.5, mass_fit_min). Default is set as global at top of module. fstar_tdelay : float Time interval in Gyr for fstar definition. fstar = mstar(t) - mstar(t-fstar_tdelay) Default is set as global at top of module. ssfrh_floor : float Lower bound value of star formation history used in the fits. SFH(t) = max(SFH(t), SMH(t) * ssfrh_floor) Default is set as global at top of module. Returns ------- p_init : ndarray of shape (5, ) Initial guess at the unbounded value of the best-fit parameter. Here we have p_init = (u_lgm, u_lgy, u_l, u_h, u_dt) loss_data : sequence consisting of the following data logt: ndarray of shape (nt, ) Base-10 log of cosmic time of each simulated snapshot in Gyr. dt : ndarray of shape (nt, ) Cosmic time steps between each simulated snapshot in Gyr dmhdt : ndarray of shape (nt, ) Diffmah halo mass accretion rate in units of Msun/yr. log_mah : ndarray of shape (nt, ) Diffmah halo mass accretion history in units of Msun. smh : ndarray of shape (nt, ) Cumulative stellar mass history in Msun. log_smah_sim : ndarray of shape (nt, ) Base-10 log of cumulative stellar mass in Msun. sfrh : ndarray of shape (nt, ) Star formation history in Msun/yr. log_fstar_sim : ndarray of shape (nt, ) Base-10 log of cumulative SFH averaged over a timescale in Msun/yr. index_select: ndarray of shape (n_times_fstar, ) Snapshot indices used in fstar computation. fstar_indx_high: ndarray of shape (n_times_fstar, ) Indices of np.searchsorted(t, t - fstar_tdelay)[index_select] fstar_tdelay: float Time interval in Gyr for fstar definition. fstar = (mstar(t) - mstar(t-fstar_tdelay)) / fstar_tdelay[Gyr] ssfrh_floor : float Lower bound value of star formation history used in the fits. weight : ndarray of shape (nt, ) Weight for each snapshot, to effectively remove from the fit the SMH snapshots that fall below the threshold mass. weight_fstar : ndarray of shape (n_times_fstar, ) Weight for each snapshot, to effectively remove from the fit the SFH snapshots that fall below the threshold mass. t_fstar_max : float Base-10 log of the cosmic time where SFH target history peaks. q_params : ndarray of shape (4, ) Fixed values of the unbounded quenching parameters """ fstar_indx_high = np.searchsorted(t_sim, t_sim - fstar_tdelay) _mask = t_sim > fstar_tdelay + fstar_tdelay / 2.0 index_select = np.arange(len(t_sim))[_mask] fstar_indx_high = fstar_indx_high[_mask] smh = 10 ** log_smah_sim fstar_sim = compute_fstar(t_sim, smh, index_select, fstar_indx_high, fstar_tdelay) with warnings.catch_warnings(): warnings.simplefilter("ignore") ssfrh = fstar_sim / smh[index_select] ssfrh = np.clip(ssfrh, ssfrh_floor, np.inf) fstar_sim = ssfrh * smh[index_select] log_fstar_sim = np.where( fstar_sim == 0.0, np.log10(fstar_sim.max()) - 3.0, np.log10(fstar_sim) ) logt = jnp.log10(t_sim) logtmp = np.log10(t_sim[-1]) dmhdt, log_mah = _calc_halo_history(logt, logtmp, logmp, *mah_params) weight, weight_fstar = get_weights( t_sim, log_smah_sim, log_fstar_sim, fstar_indx_high, dlogm_cut, t_fit_min, mass_fit_min, ) t_fstar_max = logt[index_select][np.argmax(log_fstar_sim)] default_sfr_params = np.array(list(DEFAULT_SFR_PARAMS.values())) default_sfr_params[0] = np.clip(0.3 * (logmp - 11.0) + 11.4, 11.0, 13.0) default_sfr_params[1] = np.clip(0.2 * (logmp - 11.0) - 0.7, -1.5, -0.2) default_sfr_params[2] = np.clip(0.7 * (logmp - 11.0) - 0.3, 0.2, 3.0) default_sfr_params[4] = np.clip(-8.0 * (logmp - 11.0) + 15, 2.0, 15.0) u_default_sfr_params = np.array(_get_unbounded_sfr_params(*default_sfr_params)) sfr_ms_params = u_default_sfr_params sfr_ms_params_err = np.array([0.5, 0.5, 1.0, 0.3, 3.0]) default_q_params = np.array(list(DEFAULT_Q_PARAMS.values())) default_q_params[0] = 1.8 default_q_params[1] = -2.0 q_params = np.array(_get_unbounded_q_params(*default_q_params)) loss_data = ( logt, dt, dmhdt, log_mah, smh, log_smah_sim, sfrh, log_fstar_sim, index_select, fstar_indx_high, fstar_tdelay, ssfrh_floor, weight, weight_fstar, t_fstar_max, q_params, ) p_init = ( sfr_ms_params, sfr_ms_params_err, ) return p_init, loss_data def get_outline_fixed_noquench(halo_id, loss_data, p_best, loss_best, success): """Return the string storing fitting results that will be written to disk""" q_params = loss_data[-1] sfr_params = p_best _d = np.concatenate((sfr_params, q_params)).astype("f4") data_out = (*_d, float(loss_best)) out = str(halo_id) + " " + " ".join(["{:.5e}".format(x) for x in data_out]) out = out + " " + str(success) return out + "\n" ################# # fixed_hi # ################# @jjit def loss_fixed_hi(params, loss_data): """ MSE loss function for fitting individual stellar mass histories. The parameters k, indx_hi are fixed. """ ( lgt, dt, dmhdt, log_mah, sm_target, log_sm_target, sfr_target, fstar_target, index_select, fstar_indx_high, fstar_tdelay, ssfrh_floor, weight, weight_fstar, t_fstar_max, fixed_hi, ) = loss_data sfr_params = [*params[0:3], fixed_hi, params[3]] q_params = params[4:8] _res = calculate_sm_sfr_fstar_history_from_mah( lgt, dt, dmhdt, log_mah, sfr_params, q_params, index_select, fstar_indx_high, fstar_tdelay, ) mstar, sfr, fstar = _res mstar = jnp.log10(mstar) fstar = jnp.log10(fstar) sfr_res = 1e8 * (sfr - sfr_target) / sm_target sfr_res = jnp.clip(sfr_res, -1.0, 1.0) loss = jnp.mean(((mstar - log_sm_target) / weight) ** 2) loss += jnp.mean(((fstar - fstar_target) / weight_fstar) ** 2) loss += jnp.mean((sfr_res / weight) ** 2) qt = _get_bounded_qt(q_params[0]) loss += _sigmoid(qt - t_fstar_max, 0.0, 50.0, 100.0, 0.0) return loss loss_fixed_hi_deriv = jjit(grad(loss_fixed_hi, argnums=(0))) def loss_fixed_hi_deriv_np(params, data): return np.array(loss_fixed_hi_deriv(params, data)).astype(float) def get_loss_data_fixed_hi( t_sim, dt, sfrh, log_smah_sim, logmp, mah_params, dlogm_cut=DLOGM_CUT, t_fit_min=T_FIT_MIN, mass_fit_min=MIN_MASS_CUT, fstar_tdelay=FSTAR_TIME_DELAY, ssfrh_floor=SSFRH_FLOOR, ): """Retrieve the target data passed to the optimizer when fitting the halo SFH model for the case in which the parameters k, indx_hi are fixed. Parameters ---------- t_sim : ndarray of shape (nt, ) Cosmic time of each simulated snapshot in Gyr units. dt : ndarray of shape (nt, ) Cosmic time steps between each simulated snapshot in Gyr units. sfrh : ndarray of shape (nt, ) Star formation history of simulated snapshots in Msun/yr units. log_smah_sim : ndarray of shape (nt, ) Base-10 log of cumulative stellar mass in Msun units. logmp : float Base-10 log present day halo mass in Msun units. mah_params : ndarray of shape (4, ) Best fit diffmah halo parameters. Includes (logtc, k, early, late). dlogm_cut : float, optional Additional quantity used to place a cut on which simulated snapshots are used to define the target halo SFH. Snapshots will not be used when log_smah_sim falls below log_smah_sim[-1] - dlogm_cut. Default is set as global at top of module. t_fit_min : float, optional Additional quantity used to place a cut on which simulated snapshots are used to define the target halo SFH. The value of t_fit_min defines the minimum cosmic time in Gyr used to define the target SFH. Default is set as global at top of module. mass_fit_min : float Quantity used to place a cut on which simulated snapshots are used to define the target halo SFH. The value mass_fit_min is the base-10 log of the minimum stellar mass in the SFH used as target data. The final mass_fit_min cut is equal to min(log_smah_sim[-1] - 0.5, mass_fit_min). Default is set as global at top of module. fstar_tdelay : float Time interval in Gyr for fstar definition. fstar = mstar(t) - mstar(t-fstar_tdelay) Default is set as global at top of module. ssfrh_floor : float Lower bound value of star formation history used in the fits. SFH(t) = max(SFH(t), SMH(t) * ssfrh_floor) Default is set as global at top of module. Returns ------- p_init : ndarray of shape (5, ) Initial guess at the unbounded value of the best-fit parameter. Here we have p_init = (u_lgm, u_lgy, u_l, u_h, u_dt) loss_data : sequence consisting of the following data logt: ndarray of shape (nt, ) Base-10 log of cosmic time of each simulated snapshot in Gyr. dt : ndarray of shape (nt, ) Cosmic time steps between each simulated snapshot in Gyr dmhdt : ndarray of shape (nt, ) Diffmah halo mass accretion rate in units of Msun/yr. log_mah : ndarray of shape (nt, ) Diffmah halo mass accretion history in units of Msun. smh : ndarray of shape (nt, ) Cumulative stellar mass history in Msun. log_smah_sim : ndarray of shape (nt, ) Base-10 log of cumulative stellar mass in Msun. sfrh : ndarray of shape (nt, ) Star formation history in Msun/yr. log_fstar_sim : ndarray of shape (nt, ) Base-10 log of cumulative SFH averaged over a timescale in Msun/yr. index_select: ndarray of shape (n_times_fstar, ) Snapshot indices used in fstar computation. fstar_indx_high: ndarray of shape (n_times_fstar, ) Indices of np.searchsorted(t, t - fstar_tdelay)[index_select] fstar_tdelay: float Time interval in Gyr for fstar definition. fstar = (mstar(t) - mstar(t-fstar_tdelay)) / fstar_tdelay[Gyr] ssfrh_floor : float Lower bound value of star formation history used in the fits. weight : ndarray of shape (nt, ) Weight for each snapshot, to effectively remove from the fit the SMH snapshots that fall below the threshold mass. weight_fstar : ndarray of shape (n_times_fstar, ) Weight for each snapshot, to effectively remove from the fit the SFH snapshots that fall below the threshold mass. t_fstar_max : float Base-10 log of the cosmic time where SFH target history peaks. fixed_hi : float Fixed value of the unbounded diffstar parameter indx_hi """ fstar_indx_high = np.searchsorted(t_sim, t_sim - fstar_tdelay) _mask = t_sim > fstar_tdelay + fstar_tdelay / 2.0 index_select = np.arange(len(t_sim))[_mask] fstar_indx_high = fstar_indx_high[_mask] smh = 10 ** log_smah_sim fstar_sim = compute_fstar(t_sim, smh, index_select, fstar_indx_high, fstar_tdelay) with warnings.catch_warnings(): warnings.simplefilter("ignore") ssfrh = fstar_sim / smh[index_select] ssfrh = np.clip(ssfrh, ssfrh_floor, np.inf) fstar_sim = ssfrh * smh[index_select] log_fstar_sim = np.where( fstar_sim == 0.0, np.log10(fstar_sim.max()) - 3.0, np.log10(fstar_sim) ) logt = jnp.log10(t_sim) logtmp = np.log10(t_sim[-1]) dmhdt, log_mah = _calc_halo_history(logt, logtmp, logmp, *mah_params) weight, weight_fstar = get_weights( t_sim, log_smah_sim, log_fstar_sim, fstar_indx_high, dlogm_cut, t_fit_min, mass_fit_min, ) t_fstar_max = logt[index_select][np.argmax(log_fstar_sim)] default_sfr_params = np.array(list(DEFAULT_SFR_PARAMS.values())) default_sfr_params[0] = np.clip(0.3 * (logmp - 11.0) + 11.4, 11.0, 13.0) default_sfr_params[1] = np.clip(0.2 * (logmp - 11.0) - 0.7, -1.5, -0.2) default_sfr_params[2] = np.clip(0.7 * (logmp - 11.0) - 0.3, 0.2, 3.0) default_sfr_params[4] = np.clip(-8.0 * (logmp - 11.0) + 15, 2.0, 15.0) u_default_sfr_params = np.array(_get_unbounded_sfr_params(*default_sfr_params)) sfr_ms_params = np.zeros(4) sfr_ms_params[0:3] = u_default_sfr_params[0:3] sfr_ms_params[3] = u_default_sfr_params[4] fixed_hi = u_default_sfr_params[3] sfr_ms_params_err = np.array([0.5, 0.5, 1.0, 3.0]) default_q_params = np.array(list(DEFAULT_Q_PARAMS.values())) default_q_params[0] = np.clip(-0.5 * (logmp - 11.0) + 1.5, 0.7, 1.5) default_q_params[2] = -2.0 q_params = np.array(_get_unbounded_q_params(*default_q_params)) q_params_err = np.array([0.3, 0.5, 0.3, 0.3]) loss_data = ( logt, dt, dmhdt, log_mah, smh, log_smah_sim, sfrh, log_fstar_sim, index_select, fstar_indx_high, fstar_tdelay, ssfrh_floor, weight, weight_fstar, t_fstar_max, fixed_hi, ) p_init = ( np.concatenate((sfr_ms_params, q_params)), np.concatenate((sfr_ms_params_err, q_params_err)), ) return p_init, loss_data def get_outline_fixed_hi(halo_id, loss_data, p_best, loss_best, success): """Return the string storing fitting results that will be written to disk""" fixed_hi = loss_data[-1] sfr_params = np.zeros(5) sfr_params[0:3] = p_best[0:3] sfr_params[3] = fixed_hi sfr_params[4] = p_best[3] q_params = p_best[4:8] _d = np.concatenate((sfr_params, q_params)).astype("f4") data_out = (*_d, float(loss_best)) out = str(halo_id) + " " + " ".join(["{:.5e}".format(x) for x in data_out]) out = out + " " + str(success) return out + "\n" loss_default = loss_fixed_hi loss_grad_default_np = loss_fixed_hi_deriv_np get_loss_data_default = get_loss_data_fixed_hi get_outline_default = get_outline_fixed_hi ################# # fixed_hi_rej # ################# @jjit def loss_fixed_hi_rej(params, loss_data): """ MSE loss function for fitting individual stellar mass histories. The parameters indx_hi are fixed. Rejuvenation is deactivated. """ ( lgt, dt, dmhdt, log_mah, sm_target, log_sm_target, sfr_target, fstar_target, index_select, fstar_indx_high, fstar_tdelay, ssfrh_floor, weight, weight_fstar, t_fstar_max, fixed_hi, ) = loss_data sfr_params = [*params[0:3], fixed_hi, params[3]] u_lg_drop = params[6] lg_drop = _get_bounded_lg_drop(u_lg_drop) u_lg_rejuv = _get_unbounded_qrejuv(lg_drop + 0.01, lg_drop) q_params = [*params[4:7], u_lg_rejuv] _res = calculate_sm_sfr_fstar_history_from_mah( lgt, dt, dmhdt, log_mah, sfr_params, q_params, index_select, fstar_indx_high, fstar_tdelay, ) mstar, sfr, fstar = _res mstar = jnp.log10(mstar) fstar = jnp.log10(fstar) sfr_res = 1e8 * (sfr - sfr_target) / sm_target sfr_res = jnp.clip(sfr_res, -1.0, 1.0) loss = jnp.mean(((mstar - log_sm_target) / weight) ** 2) loss += jnp.mean(((fstar - fstar_target) / weight_fstar) ** 2) loss += jnp.mean((sfr_res / weight) ** 2) qt = _get_bounded_qt(q_params[0]) loss += _sigmoid(qt - t_fstar_max, 0.0, 50.0, 100.0, 0.0) return loss loss_fixed_hi_rej_deriv = jjit(grad(loss_fixed_hi_rej, argnums=(0))) def loss_fixed_hi_rej_deriv_np(params, data): return np.array(loss_fixed_hi_rej_deriv(params, data)).astype(float) def get_loss_data_fixed_hi_rej( t_sim, dt, sfrh, log_smah_sim, logmp, mah_params, dlogm_cut=DLOGM_CUT, t_fit_min=T_FIT_MIN, mass_fit_min=MIN_MASS_CUT, fstar_tdelay=FSTAR_TIME_DELAY, ssfrh_floor=SSFRH_FLOOR, ): """Retrieve the target data passed to the optimizer when fitting the halo SFH model for the case in which the parameters indx_hi are fixed. There is no rejuvenation. Parameters ---------- t_sim : ndarray of shape (nt, ) Cosmic time of each simulated snapshot in Gyr units. dt : ndarray of shape (nt, ) Cosmic time steps between each simulated snapshot in Gyr units. sfrh : ndarray of shape (nt, ) Star formation history of simulated snapshots in Msun/yr units. log_smah_sim : ndarray of shape (nt, ) Base-10 log of cumulative stellar mass in Msun units. logmp : float Base-10 log present day halo mass in Msun units. mah_params : ndarray of shape (4, ) Best fit diffmah halo parameters. Includes (logtc, k, early, late). dlogm_cut : float, optional Additional quantity used to place a cut on which simulated snapshots are used to define the target halo SFH. Snapshots will not be used when log_smah_sim falls below log_smah_sim[-1] - dlogm_cut. Default is set as global at top of module. t_fit_min : float, optional Additional quantity used to place a cut on which simulated snapshots are used to define the target halo SFH. The value of t_fit_min defines the minimum cosmic time in Gyr used to define the target SFH. Default is set as global at top of module. mass_fit_min : float Quantity used to place a cut on which simulated snapshots are used to define the target halo SFH. The value mass_fit_min is the base-10 log of the minimum stellar mass in the SFH used as target data. The final mass_fit_min cut is equal to min(log_smah_sim[-1] - 0.5, mass_fit_min). Default is set as global at top of module. fstar_tdelay : float Time interval in Gyr for fstar definition. fstar = mstar(t) - mstar(t-fstar_tdelay) Default is set as global at top of module. ssfrh_floor : float Lower bound value of star formation history used in the fits. SFH(t) = max(SFH(t), SMH(t) * ssfrh_floor) Default is set as global at top of module. Returns ------- p_init : ndarray of shape (5, ) Initial guess at the unbounded value of the best-fit parameter. Here we have p_init = (u_lgm, u_lgy, u_l, u_h, u_dt) loss_data : sequence consisting of the following data logt: ndarray of shape (nt, ) Base-10 log of cosmic time of each simulated snapshot in Gyr. dt : ndarray of shape (nt, ) Cosmic time steps between each simulated snapshot in Gyr dmhdt : ndarray of shape (nt, ) Diffmah halo mass accretion rate in units of Msun/yr. log_mah : ndarray of shape (nt, ) Diffmah halo mass accretion history in units of Msun. smh : ndarray of shape (nt, ) Cumulative stellar mass history in Msun. log_smah_sim : ndarray of shape (nt, ) Base-10 log of cumulative stellar mass in Msun. sfrh : ndarray of shape (nt, ) Star formation history in Msun/yr. log_fstar_sim : ndarray of shape (nt, ) Base-10 log of cumulative SFH averaged over a timescale in Msun/yr. index_select: ndarray of shape (n_times_fstar, ) Snapshot indices used in fstar computation. fstar_indx_high: ndarray of shape (n_times_fstar, ) Indices of np.searchsorted(t, t - fstar_tdelay)[index_select] fstar_tdelay: float Time interval in Gyr for fstar definition. fstar = (mstar(t) - mstar(t-fstar_tdelay)) / fstar_tdelay[Gyr] ssfrh_floor : float Lower bound value of star formation history used in the fits. weight : ndarray of shape (nt, ) Weight for each snapshot, to effectively remove from the fit the SMH snapshots that fall below the threshold mass. weight_fstar : ndarray of shape (n_times_fstar, ) Weight for each snapshot, to effectively remove from the fit the SFH snapshots that fall below the threshold mass. t_fstar_max : float Base-10 log of the cosmic time where SFH target history peaks. fixed_hi : float Fixed value of the unbounded diffstar parameter indx_hi """ fstar_indx_high = np.searchsorted(t_sim, t_sim - fstar_tdelay) _mask = t_sim > fstar_tdelay + fstar_tdelay / 2.0 index_select = np.arange(len(t_sim))[_mask] fstar_indx_high = fstar_indx_high[_mask] smh = 10 ** log_smah_sim fstar_sim = compute_fstar(t_sim, smh, index_select, fstar_indx_high, fstar_tdelay) with warnings.catch_warnings(): warnings.simplefilter("ignore") ssfrh = fstar_sim / smh[index_select] ssfrh = np.clip(ssfrh, ssfrh_floor, np.inf) fstar_sim = ssfrh * smh[index_select] log_fstar_sim = np.where( fstar_sim == 0.0, np.log10(fstar_sim.max()) - 3.0, np.log10(fstar_sim) ) logt = jnp.log10(t_sim) logtmp = np.log10(t_sim[-1]) dmhdt, log_mah = _calc_halo_history(logt, logtmp, logmp, *mah_params) weight, weight_fstar = get_weights( t_sim, log_smah_sim, log_fstar_sim, fstar_indx_high, dlogm_cut, t_fit_min, mass_fit_min, ) t_fstar_max = logt[index_select][np.argmax(log_fstar_sim)] default_sfr_params = np.array(list(DEFAULT_SFR_PARAMS.values())) default_sfr_params[0] = np.clip(0.3 * (logmp - 11.0) + 11.4, 11.0, 13.0) default_sfr_params[1] = np.clip(0.2 * (logmp - 11.0) - 0.7, -1.5, -0.2) default_sfr_params[2] = np.clip(0.7 * (logmp - 11.0) - 0.3, 0.2, 3.0) default_sfr_params[4] = np.clip(-8.0 * (logmp - 11.0) + 15, 2.0, 15.0) u_default_sfr_params = np.array(_get_unbounded_sfr_params(*default_sfr_params)) sfr_ms_params = np.zeros(4) sfr_ms_params[0:3] = u_default_sfr_params[0:3] sfr_ms_params[3] = u_default_sfr_params[4] fixed_hi = u_default_sfr_params[3] sfr_ms_params_err =
np.array([0.5, 0.5, 1.0, 3.0])
numpy.array
import sys import numpy as np from scipy.linalg import block_diag from scipy.sparse import csr_matrix from scipy.special import psi import pytest from sklearn.decomposition import LatentDirichletAllocation from sklearn.decomposition._online_lda import (_dirichlet_expectation_1d, _dirichlet_expectation_2d) from sklearn.utils.testing import assert_allclose from sklearn.utils.testing import assert_true from sklearn.utils.testing import assert_equal from sklearn.utils.testing import assert_array_almost_equal from sklearn.utils.testing import assert_almost_equal from sklearn.utils.testing import assert_greater_equal from sklearn.utils.testing import assert_raises_regexp from sklearn.utils.testing import if_safe_multiprocessing_with_blas from sklearn.exceptions import NotFittedError from sklearn.externals.six.moves import xrange from sklearn.externals.six import StringIO def _build_sparse_mtx(): # Create 3 topics and each topic has 3 distinct words. # (Each word only belongs to a single topic.) n_components = 3 block = np.full((3, 3), n_components, dtype=np.int) blocks = [block] * n_components X = block_diag(*blocks) X = csr_matrix(X) return (n_components, X) def test_lda_default_prior_params(): # default prior parameter should be `1 / topics` # and verbose params should not affect result n_components, X = _build_sparse_mtx() prior = 1. / n_components lda_1 = LatentDirichletAllocation(n_components=n_components, doc_topic_prior=prior, topic_word_prior=prior, random_state=0) lda_2 = LatentDirichletAllocation(n_components=n_components, random_state=0) topic_distr_1 = lda_1.fit_transform(X) topic_distr_2 = lda_2.fit_transform(X) assert_almost_equal(topic_distr_1, topic_distr_2) def test_lda_fit_batch(): # Test LDA batch learning_offset (`fit` method with 'batch' learning) rng =
np.random.RandomState(0)
numpy.random.RandomState
import os import sys class RedirectStdStreams(object): def __init__(self, stdout=None, stderr=None): self._stdout = stdout or sys.stdout self._stderr = stderr or sys.stderr def __enter__(self): self.old_stdout, self.old_stderr = sys.stdout, sys.stderr self.old_stdout.flush(); self.old_stderr.flush() sys.stdout, sys.stderr = self._stdout, self._stderr def __exit__(self, exc_type, exc_value, traceback): self._stdout.flush(); self._stderr.flush() sys.stdout = self.old_stdout sys.stderr = self.old_stderr devnull = open(os.devnull, 'w') # with RedirectStdStreams(stdout=devnull, stderr=devnull): import random import numpy as np
np.set_printoptions(threshold=np.nan)
numpy.set_printoptions
"""The top-level ETC algorithm. """ import time import json import pathlib import datetime import multiprocessing try: import multiprocessing.shared_memory shared_memory_available = True except ImportError: # We will complain about this later if we actually need it. shared_memory_available = False try: import DOSlib.logger as logging except ImportError: # Fallback when we are not running as a DOS application. import logging import numpy as np import fitsio import desietc.sky import desietc.gfa import desietc.gmm import desietc.accum import desietc.util import desietc.plot class ETCAlgorithm(object): SECS_PER_DAY = 86400 BUFFER_NAME = 'ETC_{0}_buffer' FFRAC_NOM = dict(PSF=0.56198, ELG=0.41220, BGS=0.18985) def __init__(self, sky_calib, gfa_calib, psf_pixels=25, guide_pixels=31, max_dither=7, num_dither=1200, Ebv_coef=2.165, X_coef=0.114, ffrac_ref=0.56, nbad_threshold=100, nll_threshold=100, avg_secs=60, avg_min_values=3, grid_resolution=0.5, min_exptime_secs=0, parallel=True): """Initialize once per session. Parameters ---------- sky_calib : str Path to the SKY camera calibration file to use. gfa_calib : str Path to the GFA camera calibration file to use. psf_pixels : int Size of stamp to use for acquisition image PSF measurements. Must be odd. guide_pixels : int Size of stamp to use for guide star measurements. Must be odd. max_dither : float Maximum dither to use in pixels relative to PSF centroid. num_dither : int Number of (x,y) dithers to use, extending out to max_dither with decreasing density. Ebv_coef : float Coefficient to use for converting Ebv into an equivalent MW transparency via 10 ** (-coef * Ebv / 2.5) X_coef : float Coefficient to use to convert observed transparency at airmass X into an equivalent X=1 value via 10 ** (-coef * (X-1) / 2.5) ffrac_ref : float Reference value of the fiber fraction that defines nominal conditions. nbad_threshold : int Maximum number of allowed bad overscan pixel values before a GFA is flagged as noisy. nll_threshold : float Maximum allowed GMM fit NLL value before a PSF fit is flagged as potentially bad. avg_secs : float Compute running averages over this time interval in seconds. avg_min_values : int A running average requires at least this many values. grid_resolution : float Resolution of grid (in seconds) to use for SNR calculations. min_exptime_secs : float Minimum allowed spectrograph exposure time in seconds. A stop or split request will never be issued until this interval has elapsed after the spectrograph shutters open. parallel : bool Process GFA images in parallel when True. """ self.Ebv_coef = Ebv_coef self.X_coef = X_coef self.nbad_threshold = nbad_threshold self.nll_threshold = nll_threshold self.avg_secs = avg_secs self.avg_min_values = avg_min_values self.gfa_calib = gfa_calib # Capture a git description of the code we are running. self.git = desietc.util.git_describe() if self.git: logging.info(f'Running desietc {self.git}') else: logging.warning('Could not determine git info.') # Initialize PSF fitting. if psf_pixels % 2 == 0: raise ValueError('psf_pixels must be odd.') psf_grid = np.arange(psf_pixels + 1) - psf_pixels / 2 self.GMMpsf = desietc.gmm.GMMFit(psf_grid, psf_grid) self.psf_pixels = psf_pixels psf_stacksize = desietc.gfa.GFACamera.psf_stacksize if self.psf_pixels > psf_stacksize: raise ValueError(f'psf_pixels must be <= {psf_stacksize}.') # Use a smaller stamp for PSF measurements. ntrim = (psf_stacksize - self.psf_pixels) // 2 self.psf_inset = slice(ntrim, ntrim + self.psf_pixels) self.measure = desietc.util.PSFMeasure(psf_stacksize) # Initialize guide star analysis. if guide_pixels % 2 == 0: raise ValueError('guide_pixels must be odd.') self.guide_pixels = guide_pixels guide_grid = np.arange(guide_pixels + 1) - guide_pixels / 2 self.GMMguide = desietc.gmm.GMMFit(guide_grid, guide_grid) self.xdither, self.ydither = desietc.util.diskgrid(num_dither, max_dither, alpha=2) # Create the mask of background pixels. bg_radius = 11 # pixels BGprofile = lambda x, y: 1.0 * ((x ** 2 + y ** 2 > bg_radius ** 2)) self.BGmask = (desietc.util.make_template(guide_pixels, BGprofile, dx=0, dy=0, normalized=False) > 0.5) # Initialize analysis results. self.exp_data = {} self.num_guide_frames = 0 self.num_sky_frames = 0 self.acquisition_data = None self.guide_stars = None self.image_path = None # Initialize observing conditions updated after each GFA or SKY frame. self.seeing = None self.ffrac_psf = None self.rel_ffrac_sbprof = None self.transp_obs = None self.transp_zenith = None self.skylevel = None # Initialize variables used for telemetry. self.ffrac_avg = None self.transp_avg = None self.thru_avg = None self.ffrac_psf = None self.ffrac_elg = None self.ffrac_bgs = None self.speed_dark = None self.speed_dark_nts = None self.speed_bright = None self.speed_bright_nts = None self.speed_backup = None self.speed_backup_nts = None # Initialize call counters. self.reset_counts() # Initialize per-exposure summaries. self.exposure_summary = {} # How many GUIDE and SKY cameras do we expect? self.ngfa = len(desietc.gfa.GFACamera.guide_names) self.nsky = len(desietc.sky.SkyCamera.sky_names) # Initialize buffers to record our signal- and sky-rate measurements. self.thru_measurements = desietc.util.MeasurementBuffer( maxlen=1000, default_value=1, aux_dtype=[ ('ffrac_gfa', np.float32, (self.ngfa,)), # fiber fraction measured from single GFA ('transp_gfa', np.float32, (self.ngfa,)), # transparency measured from single GFA ('dx_gfa', np.float32, (self.ngfa,)), # mean x shift of centroid from single GFA in pixels ('dy_gfa', np.float32, (self.ngfa,)), # mean y shift of centroid from single GFA in pixels ('transp_obs', np.float32), # Observed transparency averaged over all guide stars ('transp_zenith', np.float32), # Zenith transparency averaged over all guide stars ('ffrac_psf', np.float32), # FFRAC for PSF profile averaged over all guide stars ('ffrac_elg', np.float32), # FFRAC for ELG profile averaged over all guide stars ('ffrac_bgs', np.float32), # FFRAC for BGS profile averaged over all guide stars ('thru_psf', np.float32), # TRANSP*FFRAC for PSF profile ('thru_elg', np.float32), # TRANSP*FFRAC for ELG profile ('thru_bgs', np.float32), # TRANSP*FFRAC for BGS profile ('speed_dark', np.float32), # Speed for ELG profile with 2-min averaging ('speed_bright', np.float32), # Speed for BGS profile with 2-min averaging ('speed_backup', np.float32), # Speed for PSF profile with 2-min averaging ('speed_dark_nts', np.float32), # Speed for ELG profile with 20-min averaging ('speed_bright_nts', np.float32), # Speed for BGS profile with 20-min averaging ('speed_backup_nts', np.float32), # Speed for PSF profile with 20-min averaging ]) self.sky_measurements = desietc.util.MeasurementBuffer( maxlen=200, default_value=1, padding=900, aux_dtype=[ ('flux', np.float32, (self.nsky,)), # sky flux meausured from a single SKYCAM. ('dflux', np.float32, (self.nsky,)), # sky flux uncertainty meausured from a single SKYCAM. ('ndrop', np.int32, (self.nsky,)), # number of fibers dropped from the camera flux estimate. ]) # Initialize exposure accumulator. self.accum = desietc.accum.Accumulator( sig_buffer=self.thru_measurements, bg_buffer=self.sky_measurements, grid_resolution=grid_resolution, min_exptime_secs=min_exptime_secs) # Initialize the SKY camera processor. self.SKY = desietc.sky.SkyCamera(calib_name=sky_calib) # Prepare for parallel processing if necessary. self.GFAs = {} self.parallel = parallel if parallel: # Check that shared mem is available. if not shared_memory_available: raise RuntimeError('Python >= 3.8 required for the parallel ETC option.') # Initialize unallocated resources. self.shared_mem = {camera: None for camera in desietc.gfa.GFACamera.guide_names} self.pipes = {camera: None for camera in desietc.gfa.GFACamera.guide_names} self.processes = {camera: None for camera in desietc.gfa.GFACamera.guide_names} else: # All GFAs use the same GFACamera object. GFA = desietc.gfa.GFACamera(calib_name=self.gfa_calib) for camera in desietc.gfa.GFACamera.guide_names: self.GFAs[camera] = GFA def start(self): """Perform startup resource allocation. Must be paired with a call to shutdown(). """ if not self.parallel: return logging.info('Starting up ETC...') # Initialize the GFA camera processor(s). # Allocate shared-memory buffers for each guide camera's GFACamera object. bufsize = desietc.gfa.GFACamera.buffer_size for camera in desietc.gfa.GFACamera.guide_names: if self.shared_mem[camera] is not None: logging.warning(f'Shared memory already allocated for {camera}?') bufname = self.BUFFER_NAME.format(camera) try: self.shared_mem[camera] = multiprocessing.shared_memory.SharedMemory( name=bufname, size=bufsize, create=True) except FileExistsError: logging.error(f'Reconnecting to previously allocated shared memory for {camera}...') self.shared_mem[camera] = multiprocessing.shared_memory.SharedMemory( name=bufname, size=bufsize, create=False) self.GFAs[camera] = desietc.gfa.GFACamera( calib_name=self.gfa_calib, buffer=self.shared_mem[camera].buf) nbytes = bufsize * len(self.GFAs) logging.info(f'Allocated {nbytes/2**20:.1f}Mb of shared memory.') # Initialize per-GFA processes, each with its own pipe. context = multiprocessing.get_context(method='spawn') for camera in desietc.gfa.GFACamera.guide_names: if self.pipes[camera] is not None and self.processes[camera] is not None and self.processes[camera].is_alive(): logging.error(f'Process already running for {camera}. Will not restart...') else: self.pipes[camera], child = context.Pipe() self.processes[camera] = context.Process( target=ETCAlgorithm.gfa_process, daemon=True, args=( camera, self.gfa_calib, self.GMMpsf, self.psf_inset, self.measure, child)) self.processes[camera].start() logging.info(f'Initialized {len(self.GFAs)} GFA processes.') def shutdown(self, timeout=5): """Release any resources allocated in our constructor. """ if not self.parallel: return logging.info('Shutting down ETC...') # Shutdown the process and release the shared memory allocated for each GFA. for camera in desietc.gfa.GFACamera.guide_names: logging.info(f'Releasing {camera} resources') try: if self.processes[camera] is not None and self.processes[camera].is_alive(): self.pipes[camera].send('quit') self.processes[camera].join(timeout=timeout) logging.info(f'Shutdown pipe and process for {camera}.') else: logging.warning(f'Process for {camera} is not alive.') except Exception as e: logging.error(f'Failed to shutdown pipe and process for {camera}: {e}') self.pipes[camera] = None self.processes[camera] = None try: if self.shared_mem[camera] is not None: self.shared_mem[camera].close() self.shared_mem[camera].unlink() logging.info(f'Released shared memory for {camera}.') else: logging.warning(f'No shared memory allocated for {camera}.') except Exception as e: logging.error(f'Failed to released shared memory for {camera}: {e}') self.shared_mem[camera] = None self.GFAs = {} def set_image_path(self, image_path, create=False): """Set the path where future images should be written or None to prevent saving images. """ if image_path is not None: self.image_path = pathlib.Path(image_path) # Try to create if requested and necessary. if create: try: self.image_path.mkdir(parents=True, exist_ok=True) except OSError as e: logging.error(f'Failed to create {self.image_path}: {e}') self.image_path = None return False else: if not self.image_path.exists(): logging.error(f'Non-existent image path: {self.image_path}') self.image_path = None return False logging.info(f'Images will be written in: {image_path}.') else: logging.info('Images will no longer be written.') self.image_path = None def reset_counts(self): self.total_gfa_count = 0 self.total_sky_count = 0 self.total_desi_count = 0 def check_top_header(self, header, source): """Check EXPID in the top-level GUIDER or SKY header of an exposure. """ if 'EXPID' not in header: logging.error(f'Missing EXPID keyword in {source}') elif 'expid' in self.exp_data: expid = header['EXPID'] expected = self.exp_data['expid'] if expid != expected: logging.error(f'Got EXPID {expid} from {source} but expected {expected}.') def process_camera_header(self, header, source): """Check the header for a single camera. The expected keywords are: MJD-OBS, EXPTIME. Return True if it is possible to keep going, i.e. MJD-OBS and EXPTIME are both present with reasonable values. """ if 'MJD-OBS' not in header: logging.error(f'Missing MJD-OBS keyword in {source}.') return False mjd_obs = header['MJD-OBS'] if mjd_obs is None or mjd_obs < 58484: # 1-1-2019 logging.error(f'Invalid MJD-OBS {mjd_obs} from {source}.') return False if 'EXPTIME' not in header: logging.error(f'Missing EXPTIME keyword in {source}.') return False exptime = header['EXPTIME'] if exptime is None or exptime <= 0: logging.error(f'Invalid EXPTIME {exptime} from {source}.') return False self.mjd_obs = mjd_obs self.exptime = exptime return True @staticmethod def gfa_process(camera, calib_name, GMM, inset, measure, pipe): """Parallel process entry point for a single GFA. """ # Create a GFACamera object that shares its data and ivar arrays # with the parent process. bufname = ETCAlgorithm.BUFFER_NAME.format(camera) shared_mem = multiprocessing.shared_memory.SharedMemory(name=bufname) GFA = desietc.gfa.GFACamera(calib_name=calib_name, buffer=shared_mem.buf) # Command handling loop. while True: action = pipe.recv() if action == 'quit': shared_mem.close() pipe.send('bye') pipe.close() break # Handle other actions here... elif action == 'measure_psf': camera_result, psf_stack = ETCAlgorithm.measure_psf(GFA, GMM, inset, measure) pipe.send((camera_result, psf_stack)) @staticmethod def measure_psf(thisGFA, GMM, inset, measure): """Measure the PSF for a single GFA. This static method can either called from :meth:`gfa_process` or from the main thread. """ camera_result = {} # Find PSF-like objects. nstar = thisGFA.get_psfs() camera_result['nstar'] = nstar T, WT = thisGFA.psf_stack if T is None: return camera_result, None # Measure the FWHM and FFRAC of the stacked PSF. fwhm, ffrac = measure.measure(T, WT) camera_result['fwhm'] = fwhm if fwhm > 0 else np.nan camera_result['ffrac'] = ffrac if ffrac > 0 else np.nan # Use a smaller size for GMM fitting. T, WT = T[inset, inset], WT[inset, inset] # Save a copy of the cropped stacked image. psf_stack = np.stack((T, WT)) # Fit the PSF to a Gaussian mixture model. This is the slow step... gmm_params = GMM.fit(T, WT, maxgauss=3) if gmm_params is None: camera_result.update(dict(gmm=[], nll=0, ngauss=0)) else: camera_result.update(dict(gmm=gmm_params, nll=GMM.best_nll, ngauss=GMM.ngauss)) return camera_result, psf_stack def process_acquisition(self, data): """Process the initial GFA acquisition images. """ ncamera = 0 start = time.time() logging.info(f'Processing acquisition image for {self.exptag}.') self.check_top_header(data['GUIDER']['header'], 'acquisition image') # Pass 1: reduce the raw GFA data and measure the PSF. self.acquisition_data = {} self.psf_stack = {} self.noisy_gfa = set() pending = [] # Use the first per-camera header for these values since the top header is missing EXPTIME # and has MJD-OBS a few seconds earlier (the request time?) need_hdr = True hdr = {'MJD-OBS': None, 'EXPTIME': None, 'MOUNTAZ': None, 'MOUNTEL': 1., 'MOUNTHA': 0.} acq_mjd, acq_exptime, acq_airmass = None, None, None for camera in desietc.gfa.GFACamera.guide_names: if camera not in data: logging.warning(f'No acquisition image for {camera}.') continue if not self.process_camera_header(data[camera]['header'], f'{camera} acquisition image'): continue if need_hdr: for key in hdr: value = data[camera]['header'].get(key, None) if value is None: logging.error(f'Acquisition header missing "{key}": using default {hdr[key]}.') else: hdr[key] = value need_hdr = False if not self.preprocess_gfa(camera, data[camera], f'{camera} acquisition image'): continue if self.parallel: self.pipes[camera].send('measure_psf') pending.append(camera) else: self.acquisition_data[camera], self.psf_stack[camera] = self.measure_psf( self.GFAs[camera], self.GMMpsf, self.psf_inset, self.measure) ncamera += 1 # Calculate the atmospheric extintion factor to use. X = desietc.util.cos_zenith_to_airmass(np.sin(np.deg2rad(hdr['MOUNTEL']))) self.atm_extinction = 10 ** (-self.X_coef * (X - 1) / 2.5) logging.info(f'Using atmospheric extinction {self.atm_extinction:.4f} at X={X:.3f}.') # Save the header info. self.exp_data.update(dict( acq_mjd=hdr['MJD-OBS'], acq_exptime=hdr['EXPTIME'], hour_angle=hdr['MOUNTHA'], elevation=hdr['MOUNTEL'], azimuth=hdr['MOUNTAZ'], airmass=np.float32(X), atm_extinction=
np.float32(self.atm_extinction)
numpy.float32
import collections import itertools import numpy as np from qecsim import paulitools as pt import matplotlib.pyplot as plt import qecsim from qecsim import app from qecsim.models.generic import PhaseFlipErrorModel,DepolarizingErrorModel,BiasedDepolarizingErrorModel #from qecsim.models.planar import PlanarCode, PlanarMPSDecoder from qecsim.models.rotatedplanar import RotatedPlanarCode from qecsim.models.toric import ToricCode, ToricMWPMDecoder from _planarmpsdecoder_def import PlanarMPSDecoder_def from _rotatedplanarmpsdecoder_def import RotatedPlanarMPSDecoder_def from _toricmwpmdecoder_def import ToricMWPMDecoder_def import app_def import _rotatedplanarmpsdecoder_def import _toricmwpmdecoder_def import importlib as imp imp.reload(app_def) imp.reload(_rotatedplanarmpsdecoder_def) imp.reload(_toricmwpmdecoder_def) import os, time import multiprocessing as mp from functools import partial from random import randint from sklearn.utils.random import sample_without_replacement def random_coords(dims, nsamp): idx = sample_without_replacement(np.prod(dims), nsamp) return np.vstack(np.unravel_index(idx, dims)).T def parallel_step_p(code,hadamard_mat,hadamard_vec,XYperm_mat,XYperm_vec,ZYperm_mat,ZYperm_vec,error_model, decoder, max_runs, error_probability): result= app_def.run_def(code,hadamard_mat,hadamard_vec,XYperm_mat,XYperm_vec,ZYperm_mat,ZYperm_vec, error_model, decoder, error_probability, max_runs) return result def square(a): return a**2 vsquare=np.vectorize(square) """ --|----(0,0,0)----|----(0,0,1)----|----(0,0,2)-- | | | (1,0,0) (1,0,1) (1,0,2) | | | --|----(0,1,0)----|----(0,1,1)----|----(0,1,2)-- | | | (1,1,0) (1,1,1) (1,1,2) | | | --|----(0,2,0)----|----(0,2,1)----|----(0,2,2)-- | | | (1,2,0) (1,2,1) (1,2,2) | | | --|---- --0 ----|---- 1 ----|-------2----- | | | (1,0,0) (1,0,1) (1,0,2) | | | --|----(0,1,0)----|----(0,1,1)----|----(0,1,2)-- | | | (1,1,0) (1,1,1) (1,1,2) | | | --|----(0,2,0)----|----(0,2,1)----|----(0,2,2)-- | | | (1,2,0) (1,2,1) (1,2,2) | | | """ # set models sizes= range(7,13,4) #choose odd sizes since we have defined hadamard_mat for odd sizes codes_and_size = [ToricCode(*(size,size)) for size in sizes] bias_list=[10000] layout_name="rot" code_name="CSS" code_name="random" code_name="optimal" code_name="XZZX" if (code_name=="random"): realizations=30 else: realizations=1 # set physical error probabilities error_probability_min, error_probability_max = 0.05, 0.5 error_probabilities = np.linspace(error_probability_min, error_probability_max, 18) # set max_runs for each probability biasstr_list=['Z'] for biasstr in biasstr_list: timestr = time.strftime("%Y%m%d-%H%M%S ") #record current date and time dirname="./data/"+timestr+layout_name+code_name+'_bias='+biasstr os.mkdir(dirname) for bias in bias_list: error_model = BiasedDepolarizingErrorModel(bias,biasstr) decoder = _toricmwpmdecoder_def.ToricMWPMDecoder_def() # print run parameters print('layout:', layout_name) print('codes_and_size:', [code.label for code in codes_and_size]) print('Error model:', error_model.label) print('Decoder:', decoder.label) print('Error probabilities:', error_probabilities) # print('Maximum runs:', max_runs) pL_list_rand =np.zeros((len(codes_and_size),realizations,len(error_probabilities))) std_list_rand=np.zeros((len(codes_and_size),realizations,len(error_probabilities))) pL_list =np.zeros((len(codes_and_size),len(error_probabilities))) std_list=np.zeros((len(codes_and_size),len(error_probabilities))) log_pL_list =np.zeros((len(codes_and_size),len(error_probabilities))) log_std_list=np.zeros((len(codes_and_size),len(error_probabilities))) for code_index,code in enumerate(codes_and_size): hadamard_mat=np.zeros((sizes[code_index],sizes[code_index])) XYperm_mat,ZYperm_mat=hadamard_mat,hadamard_mat nrows, ncols=hadamard_mat.shape n_h_qubits=nrows*ncols n_v_qubits=n_h_qubits hadamard_vec=np.zeros(2*np.prod((nrows,ncols))) XYperm_vec,ZYperm_vec=hadamard_vec,hadamard_vec if code_name=="random": pH=0.5 pXY=0 pZY=0 max_runs =500 for realization_index in range(realizations): for i,j,k in np.ndindex(hadamard_mat.shape): if(np.random.rand(1,1))<pH: hadamard_mat[i,j,k]=1 for i,j,k in np.ndindex(hadamard_mat.shape): hadamard_vec[i*n_h_qubits+j*ncols+k]=hadamard_mat[i,j,k] p=mp.Pool() func=partial(parallel_step_p,code,hadamard_mat,hadamard_vec,XYperm_mat,XYperm_vec,ZYperm_mat,ZYperm_vec,error_model, decoder, max_runs) result=p.map(func, error_probabilities) #print(result) p.close() p.join() for i in range(len(result)): pL_list_rand[code_index][realization_index][i]=result[i][0] std_list_rand[code_index][realization_index][i]=result[i][1] pL_list[code_index] = np.sum(pL_list_rand[code_index],axis=0)/realizations std_list[code_index] = np.sqrt(np.sum(vsquare(std_list_rand[code_index]),axis=0))/realizations for i in range(len(pL_list[code_index])): log_pL_list[code_index][i]=-np.log(pL_list[code_index][i]) log_std_list[code_index][i]=std_list[code_index][i]/(pL_list[code_index][i]*
np.log(10)
numpy.log
import os import os.path as osp import sys import pickle import numpy as np import mmcv cur_dir = osp.dirname(osp.abspath(__file__)) sys.path.insert(0, osp.join(cur_dir, '.')) sys.path.insert(0, osp.join(cur_dir, '..')) sys.path.insert(0, osp.join(cur_dir, '../..')) def voc_ap(rec, prec, use_07_metric=False): """ ap = voc_ap(rec, prec, [use_07_metric]) Compute VOC AP given precision and recall. If use_07_metric is true, uses the VOC 07 11 point method (default:False). """ ap11_cls = [] # this is where changed if use_07_metric: # 11 point metric # average precision when recall is 0.0,0.1,0.2...0.9..1.0 (11 points) ap = 0.0 for t in np.arange(0.0, 1.1, 0.1): if np.sum(rec >= t) == 0: p = 0 else: p = np.max(prec[rec >= t]) ap = ap + p / 11.0 ap11_cls.append(p) # if use_07_metric is false, use area mode else: # correct AP calculation # first append sentinel values at the end mrec = np.concatenate(([0.0], rec, [1.0])) mpre = np.concatenate(([0.0], prec, [0.0])) # compute the precision envelope for i in range(mpre.size - 1, 0, -1): mpre[i - 1] = np.maximum(mpre[i - 1], mpre[i]) # to calculate area under PR curve, look for points # where X axis (recall) changes value i = np.where(mrec[1:] != mrec[:-1])[0] # and sum (\Delta recall) * prec ap = np.sum((mrec[i + 1] - mrec[i]) * mpre[i + 1]) ap11_cls = np.zeros(11) return ap, ap11_cls def parse_tencent_youtu_cache(annopath): ann = mmcv.load(annopath) recs = {} for image_path in ann.keys(): image_name = '/'.join(image_path.split('/')[-2:]) instances = ann[image_path] objects = [] for ins in instances: cls_name = ins.class_label x1 = ins.x_top_left y1 = ins.y_top_left x2 = x1 + ins.width - 1 y2 = y1 + ins.height - 1 difficult = ins.difficult # bool # truncated = ins.truncated # bool obj_struct = dict(name=cls_name, difficult=difficult, bbox=[x1,y1,x2,y2]) objects.append(obj_struct) recs[image_name] = objects return recs def get_gt(annopath, cachedir): # load gt if not os.path.isdir(cachedir): os.mkdir(cachedir) cachefile = os.path.join(cachedir, "annots_from_tencent_youtu_cache.pkl") # load annotations -------------------------------- if not os.path.isfile(cachefile): # load annot from tencent youtu cached format # parse the cached annotation ============= recs = parse_tencent_youtu_cache(annopath) # save print("Saving cached annotations to {:s}".format(cachefile)) with open(cachefile, "wb") as f: pickle.dump(recs, f) else: # load cached annotations print("****load cached annotaion: {}****".format(cachefile)) with open(cachefile, "rb") as f: recs = pickle.load(f) return recs # calculate recall and precision for one class def voc_eval( detpath, annots, classname, ovthresh=0.5, use_07_metric=False, ): """rec, prec, ap = voc_eval(detpath, annopath, imagesetfile, classname, [ovthresh], [use_07_metric]) Top level function that does the PASCAL VOC evaluation. detpath: Path to detections # results/<comp_id>_det_test_xxx.txt detpath.format(classname) should produce the detection results file. annopath: Path to cached annotation, tencent youtu cached format (brambox) classname: Category name (duh) cachedir: Directory for caching the annotations [ovthresh]: Overlap threshold (default = 0.5) [use_07_metric]: Whether to use VOC07's 11 point AP computation (default False) """ imagenames = ['/'.join(key.split('/')[-2:]) for key in annots.keys()] imagenames = sorted(imagenames) # extract gt objects for this class # # annotations of current class class_recs = {} # npos: number of objects npos = 0 for imagename in imagenames: # only get the annotations of the specific class in recs R = [obj for obj in annots[imagename] if obj["name"] == classname] bbox = np.array([x["bbox"] for x in R]) # if no difficult, all are 0. difficult = np.array([x["difficult"] for x in R]).astype(np.bool) # len(R) number of gt_bboxes for this class_det: whether is detected, initialized to False det = [False] * len(R) # add number of non-difficult gt bbox npos = npos + sum(~difficult) class_recs[imagename] = {"bbox": bbox, "difficult": difficult, "det": det} # read dets (read detection results) ------------------------------- detfile = detpath.format(classname) with open(detfile, "r") as f: lines = f.readlines() splitlines = [x.strip().split(" ") for x in lines] # TODO: fix this image_ids = ['/'.join(x[0].split('/')[-2:]) for x in splitlines] # image_ids = [os.path.basename(x[0]).split('.')[0] for x in splitlines] confidence = np.array([float(x[1]) for x in splitlines]) BB = np.array([[float(z) for z in x[2:]] for x in splitlines]) sorted_ind = np.argsort(-confidence) sorted_scores = np.sort(-confidence) BB = BB[sorted_ind, :] image_ids = [image_ids[x] for x in sorted_ind] # go down dets and mark TPs and FPs nd = len(image_ids) tp =
np.zeros(nd)
numpy.zeros
import os import glob import copy import random import numpy as np import numpy.ma as ma import cv2 from PIL import Image import matplotlib.pyplot as plt import scipy.io as scio import argparse ####################################### ####################################### import affpose.YCB_Aff.cfg as config from affpose.YCB_Aff.utils import helper_utils from affpose.YCB_Aff.utils.dataset import ycb_aff_dataset_utils from affpose.YCB_Aff.utils.pose.load_obj_part_ply_files import load_obj_part_ply_files from affpose.YCB_Aff.utils.bbox.extract_bboxs_from_label import get_bbox, get_obj_part_bbox ####################################### ####################################### def main(): ################################### # Load Ply files ################################### cld, cld_obj_centered, cld_obj_part_centered, \ obj_classes, obj_part_classes, \ obj_ids, TRAIN_OBJ_PART_IDS = load_obj_part_ply_files() ################################## ################################## # image_files = open('{}'.format(config.TRAIN_FILE), "r") image_files = open('{}'.format(config.TEST_FILE), "r") image_files = image_files.readlines() print("Loaded Files: {}".format(len(image_files))) # select random test images np.random.seed(1) num_files = 25 random_idx = np.random.choice(np.arange(0, int(len(image_files)), 1), size=int(num_files), replace=False) image_files = np.array(image_files)[random_idx] print("Chosen Files: {}".format(len(image_files))) for image_idx, image_addr in enumerate(image_files): image_addr = image_addr.rstrip() print('\n{}/{}, image_addr:{}'.format(image_idx+1, len(image_files), image_addr)) rgb_addr = config.AFF_DATASET_ROOT_PATH + image_addr + config.RGB_EXT depth_addr = config.AFF_DATASET_ROOT_PATH + image_addr + config.DEPTH_EXT label_addr = config.AFF_DATASET_ROOT_PATH + image_addr + config.AFF_LABEL_EXT rgb = np.array(Image.open(rgb_addr)) depth = np.array(Image.open(depth_addr)) aff_label = np.array(Image.open(label_addr)) # gt pose meta_addr = config.AFF_DATASET_ROOT_PATH + image_addr + config.META_EXT meta = scio.loadmat(meta_addr) ####################################### ####################################### color_aff_label = ycb_aff_dataset_utils.colorize_aff_mask(aff_label) color_obj_label = cv2.addWeighted(rgb, 0.5, color_aff_label, 0.5, 0) cv2_obj_parts_img = color_obj_label.copy() ####################################### ####################################### obj = meta['cls_indexes'].flatten().astype(np.int32) for idx in range(len(obj)): obj_id = obj[idx] print("Object:", obj_classes[int(obj_id) - 1]) ####################################### ####################################### if image_addr.split('/')[0] != 'data_syn' and int(image_addr.split('/')[1]) >= 60: cam_cx = config.CAM_CX_2 cam_cy = config.CAM_CY_2 cam_fx = config.CAM_FX_2 cam_fy = config.CAM_FY_2 else: cam_cx = config.CAM_CX_1 cam_cy = config.CAM_CY_1 cam_fx = config.CAM_FX_1 cam_fy = config.CAM_FY_1 ####################################### ####################################### obj_r = meta['poses'][:, :, idx][:, 0:3] obj_t = np.array([meta['poses'][:, :, idx][:, 3:4].flatten()]) obj_meta_idx = str(1000 + obj_id)[1:] # cmin, rmin, cmax, rmax obj_bbox = meta['obj_bbox_' + np.str(obj_meta_idx)].flatten() cmin, rmin, cmax, rmax = obj_bbox[0], obj_bbox[1], obj_bbox[2], obj_bbox[3] ####################################### # PROJECT TO SCREEN ####################################### obj_color = ycb_aff_dataset_utils.obj_color_map(obj_id) cam_mat = np.array([[cam_fx, 0, cam_cx], [0, cam_fy, cam_cy], [0, 0, 1]]) cam_dist =
np.array([0.0, 0.0, 0.0, 0.0, 0.0])
numpy.array
"""Data Management Structures These classes are responsible for storing the aerodynamic and structural time step information and relevant variables. """ import copy import ctypes as ct import numpy as np import sharpy.utils.algebra as algebra import sharpy.utils.multibody as mb class AeroTimeStepInfo(object): """ Aerodynamic Time step class. Contains the relevant aerodynamic attributes for a single time step. All variables should be expressed in ``G`` FoR unless otherwise stated. Attributes: ct_dimensions: Pointer to ``dimensions`` to interface the C++ library `uvlmlib`` ct_dimensions_star: Pointer to ``dimensions_star`` to interface the C++ library `uvlmlib`` dimensions (np.ndarray): Matrix defining the dimensions of the vortex grid on solid surfaces ``[num_surf x chordwise panels x spanwise panels]`` dimensions_star (np.ndarray): Matrix defining the dimensions of the vortex grid on wakes ``[num_surf x streamwise panels x spanwise panels]`` n_surf (int): Number of aerodynamic surfaces on solid bodies. Each aerodynamic surface on solid bodies will have an associted wake. zeta (list(np.ndarray): Location of solid grid vertices ``[n_surf][3 x (chordwise nodes + 1) x (spanwise nodes + 1)]`` zeta_dot (list(np.ndarray)): Time derivative of ``zeta`` normals (list(np.ndarray)): Normal direction to panels at the panel center ``[n_surf][3 x chordwise nodes x spanwise nodes]`` forces (list(np.ndarray)): Forces not associated to time derivatives on grid vertices ``[n_surf][3 x (chordwise nodes + 1) x (spanwise nodes + 1)]`` dynamic_forces (list(np.ndarray)): Forces associated to time derivatives on grid vertices ``[n_surf][3 x (chordwise nodes + 1) x (spanwise nodes + 1)]`` zeta_star (list(np.ndarray): Location of wake grid vertices ``[n_surf][3 x (streamwise nodes + 1) x (spanwise nodes + 1)]`` u_ext (list(np.ndarray)): Background flow velocity on solid grid nodes ``[n_surf][3 x (chordwise nodes + 1) x (spanwise nodes + 1)]`` u_ext_star (list(np.ndarray)): Background flow velocity on wake grid nodes ``[n_surf][3 x (streamwise nodes + 1) x (spanwise nodes + 1)]`` gamma (list(np.ndarray)): Circulation associated to solid panels ``[n_surf][3 x chordwise nodes x spanwise nodes]`` gamma_star (list(np.ndarray)): Circulation associated to wake panels ``[n_surf][3 x streamwise nodes x spanwise nodes]`` gamma_dot (list(np.ndarray)): Time derivative of ``gamma`` inertial_steady_forces (list(np.ndarray)): Total aerodynamic steady forces in ``G`` FoR ``[n_surf x 6]`` body_steady_forces (list(np.ndarray)): Total aerodynamic steady forces in ``A`` FoR ``[n_surf x 6]`` inertial_unsteady_forces (list(np.ndarray)): Total aerodynamic unsteady forces in ``G`` FoR ``[n_surf x 6]`` body_unsteady_forces (list(np.ndarray)): Total aerodynamic unsteady forces in ``A`` FoR ``[n_surf x 6]`` postproc_cell (dict): Variables associated to cells to be postprocessed postproc_node (dict): Variables associated to nodes to be postprocessed in_global_AFoR (bool): ``True`` if the variables are stored in the global A FoR. ``False`` if they are stored in the local A FoR of each body. Always ``True`` for single-body simulations. Currently not used. control_surface_deflection (np.ndarray): Deflection of the control surfaces, in `rad` and if fitted. Args: dimensions (np.ndarray): Matrix defining the dimensions of the vortex grid on solid surfaces ``[num_surf x chordwise panels x spanwise panels]`` dimensions_star (np.ndarray): Matrix defining the dimensions of the vortex grid on wakes ``[num_surf x streamwise panels x spanwise panels]`` """ def __init__(self, dimensions, dimensions_star): self.ct_dimensions = None self.ct_dimensions_star = None self.dimensions = dimensions.copy() self.dimensions_star = dimensions_star.copy() self.n_surf = self.dimensions.shape[0] # generate placeholder for aero grid zeta coordinates self.zeta = [] for i_surf in range(self.n_surf): self.zeta.append(np.zeros((3, dimensions[i_surf, 0] + 1, dimensions[i_surf, 1] + 1), dtype=ct.c_double)) self.zeta_dot = [] for i_surf in range(self.n_surf): self.zeta_dot.append(np.zeros((3, dimensions[i_surf, 0] + 1, dimensions[i_surf, 1] + 1), dtype=ct.c_double)) # panel normals self.normals = [] for i_surf in range(self.n_surf): self.normals.append(np.zeros((3, dimensions[i_surf, 0], dimensions[i_surf, 1]), dtype=ct.c_double)) # panel forces self.forces = [] for i_surf in range(self.n_surf): self.forces.append(np.zeros((6, dimensions[i_surf, 0] + 1, dimensions[i_surf, 1] + 1), dtype=ct.c_double)) # panel forces self.dynamic_forces = [] for i_surf in range(self.n_surf): self.dynamic_forces.append(np.zeros((6, dimensions[i_surf, 0] + 1, dimensions[i_surf, 1] + 1), dtype=ct.c_double)) # generate placeholder for aero grid zeta_star coordinates self.zeta_star = [] for i_surf in range(self.n_surf): self.zeta_star.append(np.zeros((3, dimensions_star[i_surf, 0] + 1, dimensions_star[i_surf, 1] + 1), dtype=ct.c_double)) # placeholder for external velocity self.u_ext = [] for i_surf in range(self.n_surf): self.u_ext.append(np.zeros((3, dimensions[i_surf, 0] + 1, dimensions[i_surf, 1] + 1), dtype=ct.c_double)) self.u_ext_star = [] for i_surf in range(self.n_surf): self.u_ext_star.append(np.zeros((3, dimensions_star[i_surf, 0] + 1, dimensions_star[i_surf, 1] + 1), dtype=ct.c_double)) # allocate gamma and gamma star matrices self.gamma = [] for i_surf in range(self.n_surf): self.gamma.append(np.zeros((dimensions[i_surf, 0], dimensions[i_surf, 1]), dtype=ct.c_double)) self.gamma_star = [] for i_surf in range(self.n_surf): self.gamma_star.append(np.zeros((dimensions_star[i_surf, 0], dimensions_star[i_surf, 1]), dtype=ct.c_double)) self.gamma_dot = [] for i_surf in range(self.n_surf): self.gamma_dot.append(np.zeros((dimensions[i_surf, 0], dimensions[i_surf, 1]), dtype=ct.c_double)) # Distance from the trailing edge of the wake vertices self.dist_to_orig = [] for i_surf in range(self.n_surf): self.dist_to_orig.append(np.zeros((dimensions_star[i_surf, 0] + 1, dimensions_star[i_surf, 1] + 1), dtype=ct.c_double)) # total forces - written by AeroForcesCalculator self.inertial_steady_forces = np.zeros((self.n_surf, 6)) self.body_steady_forces = np.zeros((self.n_surf, 6)) self.inertial_unsteady_forces = np.zeros((self.n_surf, 6)) self.body_unsteady_forces = np.zeros((self.n_surf, 6)) self.postproc_cell = dict() self.postproc_node = dict() # Multibody variables self.in_global_AFoR = True self.control_surface_deflection = np.array([]) def copy(self): """ Returns a copy of a deepcopy of a :class:`~sharpy.utils.datastructures.AeroTimeStepInfo` """ copied = AeroTimeStepInfo(self.dimensions, self.dimensions_star) # generate placeholder for aero grid zeta coordinates for i_surf in range(copied.n_surf): copied.zeta[i_surf] = self.zeta[i_surf].astype(dtype=ct.c_double, copy=True, order='C') for i_surf in range(copied.n_surf): copied.zeta_dot[i_surf] = self.zeta_dot[i_surf].astype(dtype=ct.c_double, copy=True, order='C') # panel normals for i_surf in range(copied.n_surf): copied.normals[i_surf] = self.normals[i_surf].astype(dtype=ct.c_double, copy=True, order='C') # panel forces for i_surf in range(copied.n_surf): copied.forces[i_surf] = self.forces[i_surf].astype(dtype=ct.c_double, copy=True, order='C') # panel forces for i_surf in range(copied.n_surf): copied.dynamic_forces[i_surf] = self.dynamic_forces[i_surf].astype(dtype=ct.c_double, copy=True, order='C') # generate placeholder for aero grid zeta_star coordinates for i_surf in range(copied.n_surf): copied.zeta_star[i_surf] = self.zeta_star[i_surf].astype(dtype=ct.c_double, copy=True, order='C') # placeholder for external velocity for i_surf in range(copied.n_surf): copied.u_ext[i_surf] = self.u_ext[i_surf].astype(dtype=ct.c_double, copy=True, order='C') for i_surf in range(copied.n_surf): copied.u_ext_star[i_surf] = self.u_ext_star[i_surf].astype(dtype=ct.c_double, copy=True, order='C') # allocate gamma and gamma star matrices for i_surf in range(copied.n_surf): copied.gamma[i_surf] = self.gamma[i_surf].astype(dtype=ct.c_double, copy=True, order='C') for i_surf in range(copied.n_surf): copied.gamma_dot[i_surf] = self.gamma_dot[i_surf].astype(dtype=ct.c_double, copy=True, order='C') for i_surf in range(copied.n_surf): copied.gamma_star[i_surf] = self.gamma_star[i_surf].astype(dtype=ct.c_double, copy=True, order='C') for i_surf in range(copied.n_surf): copied.dist_to_orig[i_surf] = self.dist_to_orig[i_surf].astype(dtype=ct.c_double, copy=True, order='C') # total forces copied.inertial_steady_forces = self.inertial_steady_forces.astype(dtype=ct.c_double, copy=True, order='C') copied.body_steady_forces = self.body_steady_forces.astype(dtype=ct.c_double, copy=True, order='C') copied.inertial_unsteady_forces = self.inertial_unsteady_forces.astype(dtype=ct.c_double, copy=True, order='C') copied.body_unsteady_forces = self.body_unsteady_forces.astype(dtype=ct.c_double, copy=True, order='C') copied.postproc_cell = copy.deepcopy(self.postproc_cell) copied.postproc_node = copy.deepcopy(self.postproc_node) copied.control_surface_deflection = self.control_surface_deflection.astype(dtype=ct.c_double, copy=True) return copied def generate_ctypes_pointers(self): """ Generates the pointers to aerodynamic variables used to interface the C++ library ``uvlmlib`` """ self.ct_dimensions = self.dimensions.astype(dtype=ct.c_uint, copy=True) self.ct_dimensions_star = self.dimensions_star.astype(dtype=ct.c_uint, copy=True) n_surf = len(self.dimensions) from sharpy.utils.constants import NDIM self.ct_zeta_list = [] for i_surf in range(self.n_surf): for i_dim in range(NDIM): self.ct_zeta_list.append(self.zeta[i_surf][i_dim, :, :].reshape(-1)) self.ct_zeta_dot_list = [] for i_surf in range(self.n_surf): for i_dim in range(NDIM): self.ct_zeta_dot_list.append(self.zeta_dot[i_surf][i_dim, :, :].reshape(-1)) self.ct_zeta_star_list = [] for i_surf in range(self.n_surf): for i_dim in range(NDIM): self.ct_zeta_star_list.append(self.zeta_star[i_surf][i_dim, :, :].reshape(-1)) self.ct_u_ext_list = [] for i_surf in range(self.n_surf): for i_dim in range(NDIM): self.ct_u_ext_list.append(self.u_ext[i_surf][i_dim, :, :].reshape(-1)) self.ct_u_ext_star_list = [] for i_surf in range(self.n_surf): for i_dim in range(NDIM): self.ct_u_ext_star_list.append(self.u_ext_star[i_surf][i_dim, :, :].reshape(-1)) self.ct_gamma_list = [] for i_surf in range(self.n_surf): self.ct_gamma_list.append(self.gamma[i_surf][:, :].reshape(-1)) self.ct_gamma_dot_list = [] for i_surf in range(self.n_surf): self.ct_gamma_dot_list.append(self.gamma_dot[i_surf][:, :].reshape(-1)) self.ct_gamma_star_list = [] for i_surf in range(self.n_surf): self.ct_gamma_star_list.append(self.gamma_star[i_surf][:, :].reshape(-1)) self.ct_normals_list = [] for i_surf in range(self.n_surf): for i_dim in range(NDIM): self.ct_normals_list.append(self.normals[i_surf][i_dim, :, :].reshape(-1)) self.ct_forces_list = [] for i_surf in range(self.n_surf): for i_dim in range(NDIM*2): self.ct_forces_list.append(self.forces[i_surf][i_dim, :, :].reshape(-1)) self.ct_dynamic_forces_list = [] for i_surf in range(self.n_surf): for i_dim in range(NDIM*2): self.ct_dynamic_forces_list.append(self.dynamic_forces[i_surf][i_dim, :, :].reshape(-1)) self.ct_dist_to_orig_list = [] for i_surf in range(self.n_surf): self.ct_dist_to_orig_list.append(self.dist_to_orig[i_surf][:, :].reshape(-1)) try: self.postproc_cell['incidence_angle'] except KeyError: with_incidence_angle = False else: with_incidence_angle = True if with_incidence_angle: self.ct_incidence_list = [] for i_surf in range(self.n_surf): self.ct_incidence_list.append(self.postproc_cell['incidence_angle'][i_surf][:, :].reshape(-1)) self.ct_p_dimensions = ((ct.POINTER(ct.c_uint)*n_surf) (* np.ctypeslib.as_ctypes(self.ct_dimensions))) self.ct_p_dimensions_star = ((ct.POINTER(ct.c_uint)*n_surf) (* np.ctypeslib.as_ctypes(self.ct_dimensions_star))) self.ct_p_zeta = ((ct.POINTER(ct.c_double)*len(self.ct_zeta_list)) (* [np.ctypeslib.as_ctypes(array) for array in self.ct_zeta_list])) self.ct_p_zeta_dot = ((ct.POINTER(ct.c_double)*len(self.ct_zeta_dot_list)) (* [np.ctypeslib.as_ctypes(array) for array in self.ct_zeta_dot_list])) self.ct_p_zeta_star = ((ct.POINTER(ct.c_double)*len(self.ct_zeta_star_list)) (* [np.ctypeslib.as_ctypes(array) for array in self.ct_zeta_star_list])) self.ct_p_u_ext = ((ct.POINTER(ct.c_double)*len(self.ct_u_ext_list)) (* [np.ctypeslib.as_ctypes(array) for array in self.ct_u_ext_list])) self.ct_p_u_ext_star = ((ct.POINTER(ct.c_double)*len(self.ct_u_ext_star_list)) (* [np.ctypeslib.as_ctypes(array) for array in self.ct_u_ext_star_list])) self.ct_p_gamma = ((ct.POINTER(ct.c_double)*len(self.ct_gamma_list)) (* [np.ctypeslib.as_ctypes(array) for array in self.ct_gamma_list])) self.ct_p_gamma_dot = ((ct.POINTER(ct.c_double)*len(self.ct_gamma_dot_list)) (* [np.ctypeslib.as_ctypes(array) for array in self.ct_gamma_dot_list])) self.ct_p_gamma_star = ((ct.POINTER(ct.c_double)*len(self.ct_gamma_star_list)) (* [np.ctypeslib.as_ctypes(array) for array in self.ct_gamma_star_list])) self.ct_p_normals = ((ct.POINTER(ct.c_double)*len(self.ct_normals_list)) (* [np.ctypeslib.as_ctypes(array) for array in self.ct_normals_list])) self.ct_p_forces = ((ct.POINTER(ct.c_double)*len(self.ct_forces_list)) (* [np.ctypeslib.as_ctypes(array) for array in self.ct_forces_list])) self.ct_p_dynamic_forces = ((ct.POINTER(ct.c_double)*len(self.ct_dynamic_forces_list)) (* [np.ctypeslib.as_ctypes(array) for array in self.ct_dynamic_forces_list])) self.ct_p_dist_to_orig = ((ct.POINTER(ct.c_double)*len(self.ct_dist_to_orig_list)) (* [np.ctypeslib.as_ctypes(array) for array in self.ct_dist_to_orig_list])) if with_incidence_angle: self.postproc_cell['incidence_angle_ct_pointer'] = ((ct.POINTER(ct.c_double)*len(self.ct_incidence_list)) (* [np.ctypeslib.as_ctypes(array) for array in self.ct_incidence_list])) def remove_ctypes_pointers(self): """ Removes the pointers to aerodynamic variables used to interface the C++ library ``uvlmlib`` """ try: del self.ct_p_dimensions except AttributeError: pass try: del self.ct_p_dimensions_star except AttributeError: pass try: del self.ct_p_zeta except AttributeError: pass try: del self.ct_p_zeta_star except AttributeError: pass try: del self.ct_p_zeta_dot except AttributeError: pass try: del self.ct_p_u_ext except AttributeError: pass try: del self.ct_p_u_ext_star except AttributeError: pass try: del self.ct_p_gamma except AttributeError: pass try: del self.ct_p_gamma_dot except AttributeError: pass try: del self.ct_p_gamma_star except AttributeError: pass try: del self.ct_p_normals except AttributeError: pass try: del self.ct_p_forces except AttributeError: pass try: del self.ct_p_dynamic_forces except AttributeError: pass try: del self.ct_p_dist_to_orig except AttributeError: pass for k in list(self.postproc_cell.keys()): if 'ct_list' in k: del self.postproc_cell[k] elif 'ct_pointer' in k: del self.postproc_cell[k] def init_matrix_structure(dimensions, with_dim_dimension, added_size=0): matrix = [] for i_surf in range(len(dimensions)): if with_dim_dimension: matrix.append(np.zeros((3, dimensions[i_surf, 0] + added_size, dimensions[i_surf, 1] + added_size), dtype=ct.c_double)) else: matrix.append(np.zeros((dimensions[i_surf, 0] + added_size, dimensions[i_surf, 1] + added_size), dtype=ct.c_double)) return matrix def standalone_ctypes_pointer(matrix): ct_list = [] n_surf = len(matrix) if len(matrix[0].shape) == 2: # [i_surf][m, n], like gamma for i_surf in range(n_surf): ct_list.append(matrix[i_surf][:, :].reshape(-1)) elif len(matrix[0].shape) == 3: # [i_surf][i_dim, m, n], like zeta for i_surf in range(n_surf): n_dim = matrix[i_surf].shape[0] for i_dim in range(n_dim): ct_list.append(matrix[i_surf][i_dim, :, :].reshape(-1)) ct_pointer = ((ct.POINTER(ct.c_double)*len(ct_list)) (* [np.ctypeslib.as_ctypes(array) for array in ct_list])) return ct_list, ct_pointer class StructTimeStepInfo(object): """ Structural Time Step Class. Contains the relevant attributes for the structural description of a single time step. Attributes: in_global_AFoR (bool): ``True`` if the variables are stored in the global A FoR. ``False'' if they are stored in the local A FoR of each body. Always ``True`` for single-body simulations num_node (int): Number of nodes num_elem (int): Number of elements num_node_elem (int): Number of nodes per element pos (np.ndarray): Displacements. ``[num_node x 3]`` containing the vector of ``x``, ``y`` and ``z`` coordinates (in ``A`` frame) of the beam nodes. pos_dot (np.ndarray): Velocities. Time derivative of ``pos``. pos_ddot (np.ndarray): Accelerations. Time derivative of ``pos_dot`` psi (np.ndarray): Cartesian Rotation Vector. ``[num_elem x num_node_elem x 3]`` CRV for each node in each element. psi_dot (np.ndarray): Time derivative of ``psi``. psi_ddot (np.ndarray): Time derivative of ``psi_dot``. quat (np.ndarray): Quaternion expressing the transformation between the ``A`` and ``G`` frames. for_pos (np.ndarray): ``A`` frame of reference position (with respect to the `G`` frame of reference). for_vel (np.ndarray): ``A`` frame of reference velocity. Expressed in A FoR for_acc (np.ndarray): ``A`` frame of reference acceleration. Expressed in A FoR steady_applied_forces (np.ndarray): Forces applied to the structure not associated to time derivatives ``[num_nodes x 6]``. Expressed in B FoR unsteady_applied_forces (np.ndarray): Forces applied to the structure associated to time derivatives ``[num_node x 6]``. Expressed in B FoR runtime_generated_forces (np.ndarray): Forces generated at runtime through runtime generators ``[num_node x 6]``. Expressed in B FoR gravity_forces (np.ndarray): Gravity forces at nodes ``[num_node x 6]``. Expressed in A FoR total_gravity_forces (np.ndarray): Total gravity forces on the structure ``[6]``. Expressed in A FoR total_forces (np.ndarray): Total forces applied to the structure ``[6]``. Expressed in A FoR q (np.ndarray): State vector associated to the structural system of equations ``[num_dof + 10]`` dqdt (np.ndarray): Time derivative of ``q`` dqddt (np.ndarray): Time derivative of ``dqdt`` postproc_cell (dict): Variables associated to cells to be postprocessed postproc_node (dict): Variables associated to nodes to be postprocessed mb_FoR_pos (np.ndarray): Position of the local A FoR of each body ``[num_bodies x 6]`` mb_FoR_vel (np.ndarray): Velocity of the local A FoR of each body ``[num_bodies x 6]`` mb_FoR_acc (np.ndarray): Acceleration of the local A FoR of each body ``[num_bodies x 6]`` mb_quat (np.ndarray): Quaternion of the local A FoR of each body ``[num_bodies x 4]`` mb_dquatdt (np.ndarray): Time derivative of ``mb_quat`` forces_constraints_nodes (np.ndarray): Forces associated to Lagrange Constraints on nodes ``[num_node x 6]`` forces_constraints_FoR (np.ndarray): Forces associated to Lagrange Contraints on frames of reference ``[num_bodies x 10]`` mb_dict (np.ndarray): Dictionary with the multibody information. It comes from the file ``case.mb.h5`` """ def __init__(self, num_node, num_elem, num_node_elem=3, num_dof=None, num_bodies=1): self.in_global_AFoR = True self.num_node = num_node self.num_elem = num_elem self.num_node_elem = num_node_elem # generate placeholder for node coordinates self.pos = np.zeros((self.num_node, 3), dtype=ct.c_double, order='F') self.pos_dot = np.zeros((self.num_node, 3), dtype=ct.c_double, order='F') self.pos_ddot = np.zeros((self.num_node, 3), dtype=ct.c_double, order='F') # placeholder for CRV self.psi = np.zeros((self.num_elem, num_node_elem, 3), dtype=ct.c_double, order='F') self.psi_dot = np.zeros((self.num_elem, num_node_elem, 3), dtype=ct.c_double, order='F') self.psi_ddot = np.zeros((self.num_elem, num_node_elem, 3), dtype=ct.c_double, order='F') # FoR data self.quat = np.array([1., 0, 0, 0], dtype=ct.c_double, order='F') self.for_pos = np.zeros((6,), dtype=ct.c_double, order='F') self.for_vel = np.zeros((6,), dtype=ct.c_double, order='F') self.for_acc = np.zeros((6,), dtype=ct.c_double, order='F') self.steady_applied_forces = np.zeros((self.num_node, 6), dtype=ct.c_double, order='F') self.unsteady_applied_forces = np.zeros((self.num_node, 6), dtype=ct.c_double, order='F') self.runtime_generated_forces = np.zeros((self.num_node, 6), dtype=ct.c_double, order='F') self.gravity_forces = np.zeros((self.num_node, 6), dtype=ct.c_double, order='F') self.total_gravity_forces = np.zeros((6,), dtype=ct.c_double, order='F') self.total_forces = np.zeros((6,), dtype=ct.c_double, order='F') if num_dof is None: # For backwards compatibility num_dof = (self.num_node.value - 1)*6 self.q = np.zeros((num_dof.value + 6 + 4,), dtype=ct.c_double, order='F') self.dqdt = np.zeros((num_dof.value + 6 + 4,), dtype=ct.c_double, order='F') self.dqddt = np.zeros((num_dof.value + 6 + 4,), dtype=ct.c_double, order='F') self.postproc_cell = dict() self.postproc_node = dict() # Multibody self.mb_FoR_pos = np.zeros((num_bodies,6), dtype=ct.c_double, order='F') self.mb_FoR_vel = np.zeros((num_bodies,6), dtype=ct.c_double, order='F') self.mb_FoR_acc = np.zeros((num_bodies,6), dtype=ct.c_double, order='F') self.mb_quat = np.zeros((num_bodies,4), dtype=ct.c_double, order='F') self.mb_dquatdt = np.zeros((num_bodies, 4), dtype=ct.c_double, order='F') self.forces_constraints_nodes = np.zeros((self.num_node, 6), dtype=ct.c_double, order='F') self.forces_constraints_FoR = np.zeros((num_bodies, 10), dtype=ct.c_double, order='F') self.mb_dict = None def copy(self): """ Returns a copy of a deepcopy of a :class:`~sharpy.utils.datastructures.StructTimeStepInfo` """ copied = StructTimeStepInfo(self.num_node, self.num_elem, self.num_node_elem, ct.c_int(len(self.q)-10), self.mb_quat.shape[0]) copied.in_global_AFoR = self.in_global_AFoR copied.num_node = self.num_node copied.num_elem = self.num_elem copied.num_node_elem = self.num_node_elem # generate placeholder for node coordinates copied.pos = self.pos.astype(dtype=ct.c_double, order='F', copy=True) copied.pos_dot = self.pos_dot.astype(dtype=ct.c_double, order='F', copy=True) copied.pos_ddot = self.pos_ddot.astype(dtype=ct.c_double, order='F', copy=True) # placeholder for CRV copied.psi = self.psi.astype(dtype=ct.c_double, order='F', copy=True) copied.psi_dot = self.psi_dot.astype(dtype=ct.c_double, order='F', copy=True) copied.psi_ddot = self.psi_ddot.astype(dtype=ct.c_double, order='F', copy=True) # FoR data copied.quat = self.quat.astype(dtype=ct.c_double, order='F', copy=True) copied.for_pos = self.for_pos.astype(dtype=ct.c_double, order='F', copy=True) copied.for_vel = self.for_vel.astype(dtype=ct.c_double, order='F', copy=True) copied.for_acc = self.for_acc.astype(dtype=ct.c_double, order='F', copy=True) copied.steady_applied_forces = self.steady_applied_forces.astype(dtype=ct.c_double, order='F', copy=True) copied.unsteady_applied_forces = self.unsteady_applied_forces.astype(dtype=ct.c_double, order='F', copy=True) copied.runtime_generated_forces = self.runtime_generated_forces.astype(dtype=ct.c_double, order='F', copy=True) copied.gravity_forces = self.gravity_forces.astype(dtype=ct.c_double, order='F', copy=True) copied.total_gravity_forces = self.total_gravity_forces.astype(dtype=ct.c_double, order='F', copy=True) copied.total_forces = self.total_forces.astype(dtype=ct.c_double, order='F', copy=True) copied.q = self.q.astype(dtype=ct.c_double, order='F', copy=True) copied.dqdt = self.dqdt.astype(dtype=ct.c_double, order='F', copy=True) copied.dqddt = self.dqddt.astype(dtype=ct.c_double, order='F', copy=True) copied.postproc_cell = copy.deepcopy(self.postproc_cell) copied.postproc_node = copy.deepcopy(self.postproc_node) #if not self.mb_quat is None: copied.mb_FoR_pos = self.mb_FoR_pos.astype(dtype=ct.c_double, order='F', copy=True) copied.mb_FoR_vel = self.mb_FoR_vel.astype(dtype=ct.c_double, order='F', copy=True) copied.mb_FoR_acc = self.mb_FoR_acc.astype(dtype=ct.c_double, order='F', copy=True) copied.mb_quat = self.mb_quat.astype(dtype=ct.c_double, order='F', copy=True) copied.mb_dquatdt = self.mb_dquatdt.astype(dtype=ct.c_double, order='F', copy=True) copied.forces_constraints_nodes = self.forces_constraints_nodes.astype(dtype=ct.c_double, order='F', copy=True) copied.forces_constraints_FoR = self.forces_constraints_FoR.astype(dtype=ct.c_double, order='F', copy=True) copied.mb_dict = copy.deepcopy(self.mb_dict) return copied def glob_pos(self, include_rbm=True): """ Returns the position of the nodes in ``G`` FoR """ coords = self.pos.copy() c = self.cga() for i_node in range(self.num_node): coords[i_node, :] = np.dot(c, coords[i_node, :]) if include_rbm: coords[i_node, :] += self.for_pos[0:3] return coords def cga(self): return algebra.quat2rotation(self.quat) def cag(self): return self.cga().T def euler_angles(self): """ Returns the 3 Euler angles (roll, pitch, yaw) for a given time step. :returns: `np.array` (roll, pitch, yaw) in radians. """ return algebra.quat2euler(self.quat) def get_body(self, beam, num_dof_ibody, ibody): """ get_body Extract the body number ``ibody`` from a multibody system This function returns a :class:`~sharpy.utils.datastructures.StructTimeStepInfo` class (``ibody_StructTimeStepInfo``) that only includes the body number ``ibody`` of the original multibody system ``self`` Args: beam(:class:`~sharpy.structure.models.beam.Beam`): beam information of the multibody system num_dof_ibody (int): Number of degrees of freedom associated to the ``ibody`` ibody(int): body number to be extracted Returns: StructTimeStepInfo: timestep information of the isolated body """ # Define the nodes and elements belonging to the body ibody_elems, ibody_nodes = mb.get_elems_nodes_list(beam, ibody) ibody_num_node = len(ibody_nodes) ibody_num_elem = len(ibody_elems) ibody_first_dof = 0 for index_body in range(ibody - 1): aux_elems, aux_nodes = mb.get_elems_nodes_list(beam, index_body) ibody_first_dof += np.sum(beam.vdof[aux_nodes] > -1)*6 # Initialize the new StructTimeStepInfo ibody_StructTimeStepInfo = StructTimeStepInfo(ibody_num_node, ibody_num_elem, self.num_node_elem, num_dof = num_dof_ibody, num_bodies = beam.num_bodies) # Assign all the variables ibody_StructTimeStepInfo.quat = self.mb_quat[ibody, :].astype(dtype=ct.c_double, order='F', copy=True) ibody_StructTimeStepInfo.for_pos = self.mb_FoR_pos[ibody, :].astype(dtype=ct.c_double, order='F', copy=True) ibody_StructTimeStepInfo.for_vel = self.mb_FoR_vel[ibody, :] ibody_StructTimeStepInfo.for_acc = self.mb_FoR_acc[ibody, :] ibody_StructTimeStepInfo.pos = self.pos[ibody_nodes,:].astype(dtype=ct.c_double, order='F', copy=True) ibody_StructTimeStepInfo.pos_dot = self.pos_dot[ibody_nodes,:].astype(dtype=ct.c_double, order='F', copy=True) ibody_StructTimeStepInfo.pos_ddot = self.pos_ddot[ibody_nodes,:].astype(dtype=ct.c_double, order='F', copy=True) ibody_StructTimeStepInfo.psi = self.psi[ibody_elems,:,:].astype(dtype=ct.c_double, order='F', copy=True) ibody_StructTimeStepInfo.psi_dot = self.psi_dot[ibody_elems,:,:].astype(dtype=ct.c_double, order='F', copy=True) ibody_StructTimeStepInfo.psi_ddot = self.psi_ddot[ibody_elems,:,:].astype(dtype=ct.c_double, order='F', copy=True) ibody_StructTimeStepInfo.steady_applied_forces = self.steady_applied_forces[ibody_nodes,:].astype(dtype=ct.c_double, order='F', copy=True) ibody_StructTimeStepInfo.unsteady_applied_forces = self.unsteady_applied_forces[ibody_nodes,:].astype(dtype=ct.c_double, order='F', copy=True) ibody_StructTimeStepInfo.runtime_generated_forces = self.runtime_generated_forces[ibody_nodes,:].astype(dtype=ct.c_double, order='F', copy=True) ibody_StructTimeStepInfo.gravity_forces = self.gravity_forces[ibody_nodes,:].astype(dtype=ct.c_double, order='F', copy=True) ibody_StructTimeStepInfo.total_gravity_forces = self.total_gravity_forces.astype(dtype=ct.c_double, order='F', copy=True) ibody_StructTimeStepInfo.q[0:num_dof_ibody.value] = self.q[ibody_first_dof:ibody_first_dof+num_dof_ibody.value].astype(dtype=ct.c_double, order='F', copy=True) ibody_StructTimeStepInfo.dqdt[0:num_dof_ibody.value] = self.dqdt[ibody_first_dof:ibody_first_dof+num_dof_ibody.value].astype(dtype=ct.c_double, order='F', copy=True) ibody_StructTimeStepInfo.dqddt[0:num_dof_ibody.value] = self.dqddt[ibody_first_dof:ibody_first_dof+num_dof_ibody.value].astype(dtype=ct.c_double, order='F', copy=True) ibody_StructTimeStepInfo.dqdt[-10:-4] = ibody_StructTimeStepInfo.for_vel.astype(dtype=ct.c_double, order='F', copy=True) ibody_StructTimeStepInfo.dqddt[-10:-4] = ibody_StructTimeStepInfo.for_acc.astype(dtype=ct.c_double, order='F', copy=True) ibody_StructTimeStepInfo.dqdt[-4:] = self.quat.astype(dtype=ct.c_double, order='F', copy=True) ibody_StructTimeStepInfo.dqddt[-4:] = self.mb_dquatdt[ibody, :].astype(dtype=ct.c_double, order='F', copy=True) ibody_StructTimeStepInfo.mb_dquatdt[ibody, :] = self.mb_dquatdt[ibody, :].astype(dtype=ct.c_double, order='F', copy=True) ibody_StructTimeStepInfo.mb_quat = None ibody_StructTimeStepInfo.mb_FoR_pos = None ibody_StructTimeStepInfo.mb_FoR_vel = None ibody_StructTimeStepInfo.mb_FoR_acc = None return ibody_StructTimeStepInfo def change_to_local_AFoR(self, for0_pos, for0_vel, quat0): """ change_to_local_AFoR Reference a :class:`~sharpy.utils.datastructures.StructTimeStepInfo` to the local A frame of reference Args: for0_pos (np.ndarray): Position of the global A FoR for0_vel (np.ndarray): Velocity of the global A FoR quat0 (np.ndarray): Quaternion of the global A FoR """ # Define the rotation matrices between the different FoR CAslaveG = algebra.quat2rotation(self.quat).T CGAmaster = algebra.quat2rotation(quat0) Csm = np.dot(CAslaveG, CGAmaster) delta_vel_ms = np.zeros((6,)) delta_pos_ms = self.for_pos[0:3] - for0_pos[0:3] delta_vel_ms[0:3] = np.dot(CAslaveG.T, self.for_vel[0:3]) - np.dot(CGAmaster, for0_vel[0:3]) delta_vel_ms[3:6] = np.dot(CAslaveG.T, self.for_vel[3:6]) - np.dot(CGAmaster, for0_vel[3:6]) # Modify position for inode in range(self.pos.shape[0]): pos_previous = self.pos[inode,:] + np.zeros((3,),) self.pos[inode,:] = np.dot(Csm,self.pos[inode,:]) - np.dot(CAslaveG,delta_pos_ms[0:3]) # self.pos_dot[inode,:] = np.dot(Csm,self.pos_dot[inode,:]) - np.dot(CAslaveG,delta_vel_ms[0:3]) self.pos_dot[inode,:] = (np.dot(Csm, self.pos_dot[inode,:]) - np.dot(CAslaveG, delta_vel_ms[0:3]) - np.dot(algebra.skew(np.dot( CAslaveG, self.for_vel[3:6])), self.pos[inode,:]) + np.dot(Csm, np.dot(algebra.skew(np.dot(CGAmaster.T, for0_vel[3:6])), pos_previous))) self.gravity_forces[inode,0:3] = np.dot(Csm, self.gravity_forces[inode,0:3]) self.gravity_forces[inode,3:6] = np.dot(Csm, self.gravity_forces[inode,3:6]) # Modify local rotations for ielem in range(self.psi.shape[0]): for inode in range(3): psi_previous = self.psi[ielem,inode,:] + np.zeros((3,),) self.psi[ielem,inode,:] = algebra.rotation2crv(np.dot(Csm,algebra.crv2rotation(self.psi[ielem,inode,:]))) self.psi_dot[ielem, inode, :] = np.dot(np.dot(algebra.crv2tan(self.psi[ielem,inode,:]),Csm), (np.dot(algebra.crv2tan(psi_previous).T,self.psi_dot[ielem,inode,:]) - np.dot(CGAmaster.T,delta_vel_ms[3:6]))) def change_to_global_AFoR(self, for0_pos, for0_vel, quat0): """ Reference a :class:`~sharpy.utils.datastructures.StructTimeStepInfo` to the global A frame of reference Args: for0_pos (np.ndarray): Position of the global A FoR for0_vel (np.ndarray): Velocity of the global A FoR quat0 (np.ndarray): Quaternion of the global A FoR """ # Define the rotation matrices between the different FoR CAslaveG = algebra.quat2rotation(self.quat).T CGAmaster = algebra.quat2rotation(quat0) Csm = np.dot(CAslaveG, CGAmaster) delta_vel_ms = np.zeros((6,)) delta_pos_ms = self.for_pos[0:3] - for0_pos[0:3] delta_vel_ms[0:3] = np.dot(CAslaveG.T, self.for_vel[0:3]) - np.dot(CGAmaster, for0_vel[0:3]) delta_vel_ms[3:6] = np.dot(CAslaveG.T, self.for_vel[3:6]) - np.dot(CGAmaster, for0_vel[3:6]) for inode in range(self.pos.shape[0]): pos_previous = self.pos[inode,:] + np.zeros((3,),) self.pos[inode,:] = (np.dot(
np.transpose(Csm)
numpy.transpose
# Author: <NAME> <<EMAIL>> # My imports from . import constants # Regular imports from datetime import datetime from copy import deepcopy from scipy import signal import numpy as np import warnings import librosa import random import torch # TODO - torch Tensor compatibility # TODO - try to ensure these won't break if extra dimensions (e.g. batch) are included # TODO - make sure there are no hard assignments (make return copies instead of original where necessary) ################################################## # TO BATCH-FRIENDLY NOTES # ################################################## def notes_to_batched_notes(pitches, intervals): """ Convert loose note groups into batch-friendly storage. Parameters ---------- pitches : ndarray (N) Array of pitches corresponding to notes N - number of notes intervals : ndarray (N x 2) Array of onset-offset time pairs corresponding to notes N - number of notes Returns ---------- batched_notes : ndarray (N x 3) Array of note intervals and pitches by row N - number of notes """ # Default the batched notes to an empty array of the correct shape batched_notes = np.empty([0, 3]) if len(pitches) > 0: # Add an extra dimension to the pitches to match dimensionality of intervals pitches = np.expand_dims(pitches, axis=-1) # Concatenate the loose arrays to obtain ndarray([[onset, offset, pitch]]) batched_notes = np.concatenate((intervals, pitches), axis=-1) return batched_notes def batched_notes_to_hz(batched_notes): """ Convert batched notes from MIDI to Hertz. Parameters ---------- batched_notes : ndarray (N x 3) Array of note intervals and MIDI pitches by row N - number of notes Returns ---------- batched_notes : ndarray (N x 3) Array of note intervals and Hertz pitches by row N - number of notes """ # Convert pitch column to Hertz batched_notes[..., 2] = librosa.midi_to_hz(batched_notes[..., 2]) return batched_notes def batched_notes_to_midi(batched_notes): """ Convert batched notes from Hertz to MIDI. Parameters ---------- batched_notes : ndarray (N x 3) Array of note intervals and Hertz pitches by row N - number of notes Returns ---------- batched_notes : ndarray (N x 3) Array of note intervals and MIDI pitches by row N - number of notes """ # Convert pitch column to MIDI batched_notes[..., 2] = librosa.hz_to_midi(batched_notes[..., 2]) return batched_notes def slice_batched_notes(batched_notes, start_time, stop_time): """ Remove note entries occurring outside of time window. Parameters ---------- batched_notes : ndarray (N x 3) Array of note intervals and pitches by row N - number of notes start_time : float Beginning of time window stop_time : float End of time window Returns ---------- batched_notes : ndarray (N x 3) Array of note intervals and pitches by row N - number of notes """ # Remove notes with offsets before the slice start time batched_notes = batched_notes[batched_notes[:, 1] > start_time] # Remove notes with onsets after the slice stop time batched_notes = batched_notes[batched_notes[:, 0] < stop_time] # Clip onsets at the slice start time batched_notes[:, 0] = np.maximum(batched_notes[:, 0], start_time) # Clip offsets at the slice stop time batched_notes[:, 1] = np.minimum(batched_notes[:, 1], stop_time) return batched_notes ################################################## # TO NOTES # ################################################## def batched_notes_to_notes(batched_notes): """ Convert batch-friendly notes into loose note groups. Parameters ---------- batched_notes : ndarray (N x 3) Array of note intervals and pitches by row N - number of notes Returns ---------- pitches : ndarray (N) Array of pitches corresponding to notes N - number of notes intervals : ndarray (N x 2) Array of onset-offset time pairs corresponding to notes N - number of notes """ # Split along the final dimension into the loose groups pitches, intervals = batched_notes[..., 2], batched_notes[:, :2] return pitches, intervals def stacked_notes_to_notes(stacked_notes): """ Convert a dictionary of stacked notes into a single representation. Parameters ---------- stacked_notes : dict Dictionary containing (slice -> (pitches, intervals)) pairs Returns ---------- pitches : ndarray (N) Array of pitches corresponding to notes N - number of notes intervals : ndarray (N x 2) Array of onset-offset time pairs corresponding to notes N - number of notes """ # Obtain the note pairs from the dictionary values note_pairs = list(stacked_notes.values()) # Extract the pitches and intervals respectively pitches = np.concatenate([pair[0] for pair in note_pairs]) intervals = np.concatenate([pair[1] for pair in note_pairs]) # Sort the notes by onset pitches, intervals = sort_notes(pitches, intervals) return pitches, intervals def notes_to_hz(pitches): """ Convert note pitches from MIDI to Hertz. Array of corresponding intervals does not change and is assumed to be managed outside of the function. Parameters ---------- pitches : ndarray (N) Array of MIDI pitches corresponding to notes N - number of notes Returns ---------- pitches : ndarray (N) Array of Hertz pitches corresponding to notes N - number of notes """ # Convert to Hertz pitches = librosa.midi_to_hz(pitches) return pitches def notes_to_midi(pitches): """ Convert note pitches from Hertz to MIDI. Array of corresponding intervals does not change and is assumed to be managed outside of the function. Parameters ---------- pitches : ndarray (N) Array of Hertz pitches corresponding to notes N - number of notes Returns ---------- pitches : ndarray (N) Array of MIDI pitches corresponding to notes N - number of notes """ # Convert to MIDI pitches = librosa.hz_to_midi(pitches) return pitches ################################################## # TO STACKED NOTES # ################################################## def notes_to_stacked_notes(pitches, intervals, i=0): """ Convert a collection of notes into a dictionary of stacked notes. Parameters ---------- pitches : ndarray (N) Array of pitches corresponding to notes N - number of notes intervals : ndarray (N x 2) Array of onset-offset time pairs corresponding to notes N - number of notes i : int Slice key to use Returns ---------- stacked_notes : dict Dictionary containing (slice -> (pitches, intervals)) pairs """ # Initialize a dictionary to hold the notes stacked_notes = dict() # Add the pitch-interval pairs to the stacked notes dictionary under the slice key stacked_notes[i] = sort_notes(pitches, intervals) return stacked_notes def stacked_notes_to_hz(stacked_notes): """ Convert stacked notes from MIDI to Hertz. Parameters ---------- stacked_notes : dict Dictionary containing (slice -> (pitches (MIDI), intervals)) pairs Returns ---------- stacked_notes : dict Dictionary containing (slice -> (pitches (Hertz), intervals)) pairs """ # Make a copy of the stacked notes for conversion stacked_notes = deepcopy(stacked_notes) # Loop through the stack of notes for slc in stacked_notes.keys(): # Get the pitches from the slice pitches, intervals = stacked_notes[slc] # Convert the pitches to Hertz pitches = notes_to_hz(pitches) # Add converted slice back to stack stacked_notes[slc] = pitches, intervals return stacked_notes def stacked_notes_to_midi(stacked_notes): """ Convert stacked notes from Hertz to MIDI. Parameters ---------- stacked_notes : dict Dictionary containing (slice -> (pitches (Hertz), intervals)) pairs Returns ---------- stacked_notes : dict Dictionary containing (slice -> (pitches (MIDI), intervals)) pairs """ # Make a copy of the stacked notes for conversion stacked_notes = deepcopy(stacked_notes) # Loop through the stack of notes for slc in stacked_notes.keys(): # Get the pitches from the slice pitches, intervals = stacked_notes[slc] # Convert the pitches to MIDI pitches = notes_to_midi(pitches) # Add converted slice back to stack stacked_notes[slc] = pitches, intervals return stacked_notes ################################################## # TO PITCH LIST # ################################################## def stacked_pitch_list_to_pitch_list(stacked_pitch_list): """ Convert a dictionary of stacked pitch lists into a single representation. Parameters ---------- stacked_pitch_list : dict Dictionary containing (slice -> (times, pitch_list)) pairs Returns ---------- times : ndarray (N) Time in seconds of beginning of each frame N - number of time samples (frames) pitch_list : list of ndarray (N x [...]) Array of pitches corresponding to notes N - number of pitch observations (frames) """ # Obtain the time-pitch list pairs from the dictionary values pitch_list_pairs = list(stacked_pitch_list.values()) # Collapse the times from each pitch_list into one array times = np.unique(np.concatenate([pair[0] for pair in pitch_list_pairs])) # Initialize empty pitch arrays for each time entry pitch_list = [np.empty(0)] * times.size # Loop through each pitch list for slice_times, slice_pitch_arrays in pitch_list_pairs: # Loop through the pitch list entries for entry in range(len(slice_pitch_arrays)): # Determine where this entry belongs in the new pitch list idx = np.where(times == slice_times[entry])[0].item() # Insert the frequencies at the corresponding time pitch_list[idx] = np.append(pitch_list[idx], slice_pitch_arrays[entry]) # Sort the time-pitch array pairs by time times, pitch_list = sort_pitch_list(times, pitch_list) return times, pitch_list def multi_pitch_to_pitch_list(multi_pitch, profile): """ Convert a multi pitch array into a pitch list. Array of corresponding times does not change and is assumed to be managed outside of the function. Parameters ---------- multi_pitch : ndarray (F x T) Discrete pitch activation map F - number of discrete pitches T - number of frames profile : InstrumentProfile (instrument.py) Instrument profile detailing experimental setup Returns ---------- pitch_list : list of ndarray (T x [...]) Array of pitches corresponding to notes T - number of pitch observations (frames) """ # Determine the number of frames in the multi pitch array num_frames = multi_pitch.shape[-1] # Initialize empty pitch arrays for each time entry pitch_list = [np.empty(0)] * num_frames # Determine which frames contain pitch activity non_silent_frames = np.where(np.sum(multi_pitch, axis=-2) > 0)[-1] # Loop through the frames containing pitch activity for i in list(non_silent_frames): # Determine the MIDI pitches active in the frame and add to the list pitch_list[i] = profile.low + np.where(multi_pitch[..., i])[-1] return pitch_list def pitch_list_to_hz(pitch_list): """ Convert pitch list from MIDI to Hertz. Array of corresponding times does not change and is assumed to be managed outside of the function. Parameters ---------- pitch_list : list of ndarray (T x [...]) Array of MIDI pitches corresponding to notes T - number of pitch observations (frames) Returns ---------- pitch_list : list of ndarray (T x [...]) Array of Hertz pitches corresponding to notes T - number of pitch observations (frames) """ # Convert to Hertz pitch_list = [librosa.midi_to_hz(pitch_list[i]) for i in range(len(pitch_list))] return pitch_list def pitch_list_to_midi(pitch_list): """ Convert pitch list from Hertz to MIDI. Array of corresponding times does not change and is assumed to be managed outside of the function. Parameters ---------- pitch_list : list of ndarray (T x [...]) Array of Hertz pitches corresponding to notes T - number of pitch observations (frames) Returns ---------- pitch_list : list of ndarray (T x [...]) Array of MIDI pitches corresponding to notes T - number of pitch observations (frames) """ # Convert to MIDI pitch_list = [librosa.hz_to_midi(pitch_list[i]) for i in range(len(pitch_list))] return pitch_list ################################################## # TO STACKED PITCH LIST # ################################################## def pitch_list_to_stacked_pitch_list(times, pitch_list, i=0): """ Convert a pitch list into a dictionary of stacked pitch lists. Parameters ---------- times : ndarray (N) Time in seconds of beginning of each frame N - number of time samples (frames) pitch_list : list of ndarray (N x [...]) Array of pitches corresponding to notes N - number of pitch observations (frames) i : int Slice key to use Returns ---------- stacked_pitch_list : dict Dictionary containing (slice -> (times, pitch_list)) pairs """ # Initialize a dictionary to hold the pitch_list stacked_pitch_list = dict() # Add the time-pitch array pairs to the stacked notes dictionary under the slice key stacked_pitch_list[i] = sort_pitch_list(times, pitch_list) return stacked_pitch_list def stacked_multi_pitch_to_stacked_pitch_list(stacked_multi_pitch, times, profile): """ Convert a stack of multi pitch arrays into a stack of pitch lists. Parameters ---------- stacked_multi_pitch : ndarray (S x F x T) Array of multiple discrete pitch activation maps S - number of slices in stack F - number of discrete pitches T - number of frames times : ndarray (T) Time in seconds of beginning of each frame T - number of time samples (frames) profile : InstrumentProfile (instrument.py) Instrument profile detailing experimental setup Returns ---------- stacked_pitch_list : dict Dictionary containing (slice -> (times, pitch_list)) pairs """ # Determine the number of slices in the stacked multi pitch array stack_size = stacked_multi_pitch.shape[-3] # Initialize a dictionary to hold the pitch lists stacked_pitch_list = dict() # Loop through the slices of the stack for slc in range(stack_size): # Extract the multi pitch array pertaining to this slice slice_multi_pitch = stacked_multi_pitch[slc] # Convert the multi pitch array to a pitch list slice_pitch_list = multi_pitch_to_pitch_list(slice_multi_pitch, profile) # Add the pitch list to the stacked pitch list dictionary under the slice key stacked_pitch_list.update(pitch_list_to_stacked_pitch_list(times, slice_pitch_list, slc)) return stacked_pitch_list def stacked_pitch_list_to_hz(stacked_pitch_list): """ Convert stacked pitch list from MIDI to Hertz. Parameters ---------- stacked_pitch_list : dict Dictionary containing (slice -> (times, pitch_list (MIDI))) pairs Returns ---------- stacked_pitch_list : dict Dictionary containing (slice -> (times, pitch_list (Hertz)) pairs """ # Make a copy of the stacked pitch lists for conversion stacked_pitch_list = deepcopy(stacked_pitch_list) # Loop through the stack of pitch lists for slc in stacked_pitch_list.keys(): # Get the pitch list from the slice times, pitch_list = stacked_pitch_list[slc] # Convert the pitches to Hertz pitch_list = pitch_list_to_hz(pitch_list) # Add converted slice back to stack stacked_pitch_list[slc] = times, pitch_list return stacked_pitch_list def stacked_pitch_list_to_midi(stacked_pitch_list): """ Convert stacked pitch list from Hertz to MIDI. Parameters ---------- stacked_pitch_list : dict Dictionary containing (slice -> (times, pitch_list (Hertz))) pairs Returns ---------- stacked_pitch_list : dict Dictionary containing (slice -> (times, pitch_list (MIDI)) pairs """ # Make a copy of the stacked pitch lists for conversion stacked_pitch_list = deepcopy(stacked_pitch_list) # Loop through the stack of pitch lists for slc in stacked_pitch_list.keys(): # Get the pitches from the slice times, pitch_list = stacked_pitch_list[slc] # Convert the pitches to MIDI pitch_list = pitch_list_to_midi(pitch_list) # Add converted slice back to stack stacked_pitch_list[slc] = times, pitch_list return stacked_pitch_list ################################################## # TO MULTI PITCH # ################################################## def notes_to_multi_pitch(pitches, intervals, times, profile): """ Convert loose MIDI note groups into a multi pitch array. Parameters ---------- pitches : ndarray (N) Array of pitches corresponding to notes in MIDI format N - number of notes intervals : ndarray (N x 2) Array of onset-offset time pairs corresponding to notes N - number of notes times : ndarray (N) Time in seconds of beginning of each frame N - number of time samples (frames) profile : InstrumentProfile (instrument.py) Instrument profile detailing experimental setup Returns ---------- multi_pitch : ndarray (F x T) Discrete pitch activation map F - number of discrete pitches T - number of frames """ # Determine the dimensionality of the multi pitch array num_pitches = profile.get_range_len() num_frames = len(times) # Initialize an empty multi pitch array multi_pitch = np.zeros((num_pitches, num_frames)) # Convert the pitches to number of semitones from lowest note pitches = np.round(pitches - profile.low).astype(constants.UINT) # Duplicate the array of times for each note and stack along a new axis times = np.concatenate([[times]] * max(1, len(pitches)), axis=0) # Determine the frame where each note begins and ends onsets = np.argmin((times <= intervals[..., :1]), axis=1) - 1 offsets = np.argmin((times < intervals[..., 1:]), axis=1) - 1 # Clip all offsets at last frame - they will end up at -1 from # previous operation if they occurred beyond last frame time offsets[offsets == -1] = num_frames - 1 # Loop through each note for i in range(len(pitches)): # Populate the multi pitch array with activations for the note multi_pitch[pitches[i], onsets[i] : offsets[i] + 1] = 1 return multi_pitch def pitch_list_to_multi_pitch(times, pitch_list, profile, tolerance=0.5): """ Convert a MIDI pitch list into a dictionary of stacked pitch lists. Parameters ---------- times : ndarray (N) Time in seconds of beginning of each frame N - number of time samples (frames) pitch_list : list of ndarray (N x [...]) Array of pitches corresponding to notes N - number of pitch observations (frames) profile : InstrumentProfile (instrument.py) Instrument profile detailing experimental setup tolerance : float Amount of semitone deviation allowed Returns ---------- multi_pitch : ndarray (F x T) Discrete pitch activation map F - number of discrete pitches T - number of frames """ # Determine the dimensionality of the multi pitch array num_pitches = profile.get_range_len() num_frames = len(times) # Initialize an empty multi pitch array multi_pitch = np.zeros((num_pitches, num_frames)) # Loop through each note for i in range(len(pitch_list)): # Calculate the pitch semitone difference from the lowest note difference = pitch_list[i] - profile.low # Determine the amount of semitone deviation for each pitch deviation = difference % 1 deviation[deviation > 0.5] -= 1 deviation = np.abs(deviation) # Convert the pitches to number of semitones from lowest note pitches = np.round(difference[deviation < tolerance]).astype(constants.UINT) # Populate the multi pitch array with activations multi_pitch[pitches, i] = 1 return multi_pitch def stacked_multi_pitch_to_multi_pitch(stacked_multi_pitch): """ Collapse stacked multi pitch arrays into a single representation. Parameters ---------- stacked_multi_pitch : ndarray (S x F x T) Array of multiple discrete pitch activation maps S - number of slices in stack F - number of discrete pitches T - number of frames Returns ---------- multi_pitch : ndarray (F x T) Discrete pitch activation map F - number of discrete pitches T - number of frames """ # Collapse the stacked arrays into one using the max operation multi_pitch = np.max(stacked_multi_pitch, axis=-3) return multi_pitch ################################################## # TO STACKED MULTI PITCH # ################################################## def stacked_notes_to_stacked_multi_pitch(stacked_notes, times, profile): """ Convert a dictionary of MIDI note groups into a stack of multi pitch arrays. Parameters ---------- stacked_notes : dict Dictionary containing (slice -> (pitches, intervals)) pairs times : ndarray (N) Time in seconds of beginning of each frame N - number of time samples (frames) profile : InstrumentProfile (instrument.py) Instrument profile detailing experimental setup Returns ---------- stacked_multi_pitch : ndarray (S x F x T) Array of multiple discrete pitch activation maps S - number of slices in stack F - number of discrete pitches T - number of frames """ # Initialize an empty list to hold the multi pitch arrays stacked_multi_pitch = list() # Loop through the slices of notes for slc in range(len(stacked_notes)): # Get the pitches and intervals from the slice pitches, intervals = stacked_notes[slc] # Convert to multi pitch and add to the list slice_multi_pitch = notes_to_multi_pitch(pitches, intervals, times, profile) stacked_multi_pitch.append(multi_pitch_to_stacked_multi_pitch(slice_multi_pitch)) # Collapse the list into an array stacked_multi_pitch = np.concatenate(stacked_multi_pitch) return stacked_multi_pitch def stacked_pitch_list_to_stacked_multi_pitch(stacked_pitch_list, profile): """ Convert a stacked MIDI pitch list into a stack of multi pitch arrays. Parameters ---------- stacked_pitch_list : dict Dictionary containing (slice -> (times, pitch_list)) pairs profile : InstrumentProfile (instrument.py) Instrument profile detailing experimental setup Returns ---------- stacked_multi_pitch : ndarray (S x F x T) Array of multiple discrete pitch activation maps S - number of slices in stack F - number of discrete pitches T - number of frames """ # Initialize an empty list to hold the multi pitch arrays stacked_multi_pitch = list() # Loop through the slices of notes for slc in range(len(stacked_pitch_list)): # Get the pitches and intervals from the slice times, pitch_list = stacked_pitch_list[slc] multi_pitch = pitch_list_to_multi_pitch(times, pitch_list, profile) stacked_multi_pitch.append(multi_pitch_to_stacked_multi_pitch(multi_pitch)) # Collapse the list into an array stacked_multi_pitch = np.concatenate(stacked_multi_pitch) return stacked_multi_pitch def multi_pitch_to_stacked_multi_pitch(multi_pitch): """ Convert a multi pitch array into a stacked representation. Parameters ---------- multi_pitch : ndarray (F x T) Discrete pitch activation map F - number of discrete pitches T - number of frames Returns ---------- stacked_multi_pitch : ndarray (S x F x T) Array of multiple discrete pitch activation maps S - number of slices in stack F - number of discrete pitches T - number of frames """ # Add an extra dimension for slice stacked_multi_pitch = np.expand_dims(multi_pitch, axis=-3) return stacked_multi_pitch def tablature_to_stacked_multi_pitch(tablature, profile): """ Convert a tablature representation into a stacked multi pitch array. Array of corresponding times does not change and is assumed to be managed outside of the function. Parameters ---------- tablature : ndarray (S x T) Array of class membership for multiple degrees of freedom (e.g. strings) S - number of strings or degrees of freedom T - number of frames profile : TablatureProfile (instrument.py) Tablature instrument profile detailing experimental setup Returns ---------- stacked_multi_pitch : ndarray (S x F x T) Array of multiple discrete pitch activation maps S - number of slices in stack F - number of discrete pitches T - number of frames """ # Determine the number of degrees of freedom and frames num_dofs, num_frames = tablature.shape # Determine the total number of pitches to be incldued num_pitches = profile.get_range_len() # Initialize and empty stacked multi pitch array stacked_multi_pitch = np.zeros((num_dofs, num_pitches, num_frames)) # Obtain the tuning for the tablature (lowest note for each degree of freedom) tuning = profile.get_midi_tuning() # Determine the place in the stacked multi pitch array where each degree of freedom begins dof_start =
np.expand_dims(tuning - profile.low, -1)
numpy.expand_dims
# Utility functions for the course Robot Modelling # <NAME> (<EMAIL>), sept. 2016 # # Additional functions added for more functionality # <NAME> (<EMAIL>), sept. 2018 # <NAME> (<EMAIL>), sept. 2018 ############################################################################### import numpy as np from numpy import cos, sin # Checks if a matrix is a valid rotation matrix. def isRotationMatrix(R): """ Check if input is a correct matrix :param R: :return: """ Rt = np.transpose(R.copy()) shouldBeIdentity = np.dot(Rt, R) I = np.identity(3, dtype = R.dtype) n = np.linalg.norm(I - shouldBeIdentity) return n < 1e-6 def inverse_kinematics_wrist(R): """ Calculates the inverse kinematics of the wrist of the robot :param R: :return: """ minplus = 1 t5 = np.arctan2(minplus * np.sqrt(1 - (R[2, 2]**2)), R[2, 2]) t4 = np.arctan2(minplus * R[1, 2], minplus * R[0, 2]) t6 = np.arctan2(minplus * R[2, 1], minplus * -R[2, 0]) R_check = np.array([[cos(t4) * cos(t5) * cos(t6) - sin(t4) * sin(t6) - R[0, 0], -cos(t4) * cos(t5) * sin(t6) -
sin(t4)
numpy.sin
import numpy as np from xrcnn.util import bbox as B class Anchor: def __init__(self, config): # def __init__(self, base_size=16, # anchor_ratios=[ # (1. / math.sqrt(2), 2. / math.sqrt(2)), # (1., 1.), # (2. / math.sqrt(2), 1. / math.sqrt(2))], # anchor_scales=[128 / 4, 256 / 4, 512 / 4], # backbone_shape=[64 / 4, 64 / 4]): """RoI予測の基準となるアンカーを生成する。 アンカーの基準となる値を指定する。 Args: base_size (number): アンカーを適用する特徴マップ1ピクセルが、入力画像において何ピクセルに値するか。 anchor_ratios (list of float): アンカーのアスペクト比。 :math:`[(h, w), ...]` anchor_scales (list of numbers): アンカーのサイズ(入力画像におけるサイズ)。 このサイズの正方形をアンカーの領域とする。 anchor_ratios (list of numbers): アンカーのアスペクト比 """ self.base_size = config.stride_per_base_nn_feature self.backbone_shape = config.backbone_shape self.anchor_ratios = config.anchor_box_aspect_ratios self.anchor_scales = config.anchor_box_scales self.bbox_refinement_std = config.bbox_refinement_std self.anchor_base = self._anchor_base( self.base_size, self.anchor_ratios, self.anchor_scales) self.anchors = self._generate_anchors(self.backbone_shape) def generate_gt_offsets(self, bbox_gt, img_size, pos_iou_thresh=0.5, neg_iou_thresh=0.3, n_max_sample=256, pos_ratio=0.5): """anchorにGroud truthなBBoxを適用し、anchor毎に最もIoUが大きいBBoxを特定し、そのBBoxとのオフセットを得る。 IoU値により、各アンカーを以下に分類する。 0.7以上:オブジェクト →0.5にする。 0.7だとVOCdevkit/VOC2007/Annotations/007325.xmlにあるようなサイズのBboxが GTとして得られなかったため。 0.3未満:非オブジェクト それ以外:評価対象外。つまり、トレーニングには使わないアンカー。 Args: bbox_gt (array): Ground truthなBBox Its shape is :math:`(R, 4)`. img_size (h,w): 入力画像の高さと幅のタプル. pos_iou_thresh: この値以上のIoUをclass=1とする。 pos_iou_thresh: この値未満のIoUをclass=0とする。 n_max_sample: 評価対象とする(classが1or0である)オフセットの上限 pos_ratio: 評価対象サンプル中のPositiveの割合 n_max_sample, pos_ratioは論文中の以下への対応。 考慮無しではNegativeサンプルが支配的になる。学習効率も考慮し、このような処理を行うものと思われる。 Each mini-batch arises from a single image that contains many positive and negative example anchors. It is possible to optimize for the loss functions of all anchors, but this will bias towards negative samples as they are dominate. Instead, we randomly sample 256 anchors in an image to compute the loss function of a mini-batch, where the sampled positive and negative anchors have a ratio of up to 1:1. If there are fewer than 128 positive samples in an image, we pad the mini-batch with negative ones. Returns: (offsets, obj_flags): offsets (array) : 各アンカーとGround TruthなBBoxとのオフセット。 Its shape is :math:`(S, 4)`. 2軸目の内容は以下の通り。 (x, y ,h, w) objects (array): 各アンカーがオブジェクトか否か。 Its shape is :math:`(S, 1)`. 2軸目の内容は以下の通り。 1:オブジェクト 0:非オブジェクト −1:評価対象外 """ h, w = img_size anchor = self.anchors n_anchor_initial = len(anchor) # 入力領域をはみ出すアンカーを除外 index_inside = np.where( (anchor[:, 0] >= 0) & (anchor[:, 1] >= 0) & (anchor[:, 2] <= h) & (anchor[:, 3] <= w) )[0] anchor = anchor[index_inside] # 各アンカー毎にGTとのIoUを算出し、最大か0.7以上のIoUを残す。 # IoU >= 0.7はオブジェクト候補とする(class = 1) # IoU < 0.3は非オブジェクト候補とする(class = 0) # それ以外のIoUは評価対象外とする(class = -1) argmax_ious, objects = self._create_label(anchor, bbox_gt, pos_iou_thresh, neg_iou_thresh, n_max_sample, pos_ratio) # アンカーとGroud truthのオフセットを得る。 offsets = B.get_offset(anchor, bbox_gt[argmax_ious]) # 既存実装に合わせた精度向上 offsets /= np.array(self.bbox_refinement_std) # 元の形状に戻す。 # index_insideに削減した1次元目の次元数をn_anchor_initialに戻す。 # 復元した座標は評価対象外なので、ラベルは−1、オフセットは0を設定して無効な状態に。 objects = self._unmap(objects, n_anchor_initial, index_inside, fill=-1) offsets = self._unmap(offsets, n_anchor_initial, index_inside, fill=0) return offsets, objects def _create_label(self, anchor, bbox, pos_iou_thresh, neg_iou_thresh, n_max_sample, pos_ratio): """ anchorとbboxのIoUを算出し、それぞれオブジェクト候補か否かを得る。 IoU >= 0.7はオブジェクト候補とする(class = 1) IoU < 0.3は非オブジェクト候補とする(class = 0) それ以外のIoUは評価対象外とする(class = -1) anchor毎に全bboxについてのIoUを算出する。 つまり、(len(anchor), len(bbox))のマトリクスになる。 このマトリクスから、anchor毎に最大のIoUを含むbboxのindexを得る。 Args: anchor (tensor): アンカー Its shape is :math:`(R, 4)`. bbox (tensor): Ground truthなBBox Its shape is :math:`(S, 4)`. pos_iou_thresh: この値以上のIoUをclass=1とする。 pos_iou_thresh: この値未満のIoUをclass=0とする。 n_max_sample: 評価対象とする(classが1or0である)オフセットの上限 pos_ratio: 評価対象サンプル中のPositiveの割合 Returns: (index_max_iou_per_anchor, label) index_max_iou_per_anchor: anchor毎のIoUが最大となるbboxのIndex。 Its shape is :math:`(R, 1)`. label:anchor毎のオブジェクト/非オブジェクト Its shape is :math:`(R, 1)`. """ # 評価対象外の−1で初期化 label = np.full((len(anchor)), -1) # アンカー毎にIoUが最大となるbboxの列Indexとその値、最大のIoUを含むアンカーのIndexを得る。 index_max_iou_per_anchor, max_ious, gt_argmax_ious = self._calc_ious( anchor, bbox) # 最大のIoUを含むアンカーはPositive label[gt_argmax_ious] = 1 # 閾値以上のIoUはPositive label[max_ious >= pos_iou_thresh] = 1 # 閾値未満のIoUはNegative label[max_ious < neg_iou_thresh] = 0 # Positiveのサンプル数を上限以内に抑える n_pos_max = int(pos_ratio * n_max_sample) pos_index = np.where(label == 1)[0] if len(pos_index) > n_pos_max: # n_pos_maxを超える場合は、Positiveをランダムに評価対象外にする disable_index = np.random.choice( pos_index, size=(len(pos_index) - n_pos_max), replace=False) label[disable_index] = -1 # Negativeサンプルも同様に上限以内に抑える n_neg = n_max_sample -
np.sum(label == 1)
numpy.sum