prompt
stringlengths
19
879k
completion
stringlengths
3
53.8k
api
stringlengths
8
59
from cytopy.tests import assets from ..data.population import Population from ..data.project import Project from ..data.experiment import FileGroup from mongoengine.connection import connect, disconnect import pandas as pd import numpy as np import pytest import shutil import sys import os @pytest.fixture(scope='session', autouse=True) def setup(): """ Setup testing database Yields ------- None """ sys.path.append("/home/ross/CytoPy") os.mkdir(f"{os.getcwd()}/test_data") connect("test", host="mongomock://localhost", alias="core") yield shutil.rmtree(f"{os.getcwd()}/test_data", ignore_errors=True) disconnect(alias="core") @pytest.fixture def example_populated_experiment(): """ Generate an example Experiment populated with a single FileGroup "test sample" Yields ------- Experiment """ test_project = Project(project_id="test", data_directory=f"{os.getcwd()}/test_data") exp = test_project.add_experiment(experiment_id="test experiment", panel_definition=f"{assets.__path__._path[0]}/test_panel.xlsx") exp.add_fcs_files(sample_id="test sample", primary=f"{assets.__path__._path[0]}/test.FCS", controls={"test_ctrl": f"{assets.__path__._path[0]}/test.FCS"}, compensate=False) yield exp test_project.reload() test_project.delete() os.mkdir(f"{os.getcwd()}/test_data") def reload_filegroup(project_id: str, exp_id: str, sample_id: str): """ Reload a FileGroup Parameters ---------- project_id: str exp_id: str sample_id: str Returns ------- FileGroup """ fg = (Project.objects(project_id=project_id) .get() .get_experiment(exp_id) .get_sample(sample_id)) return fg def create_example_populations(filegroup: FileGroup, n_populations: int = 3): """ Given a FileGroup add the given number of example populations. Parameters ---------- filegroup: FileGroup n_populations: int (default=3) Total number of populations to generate (must be at least 2) Returns ------- FileGroup """ for pname, parent in zip([f"pop{i + 1}" for i in range(n_populations)], ["root"] + [f"pop{i + 1}" for i in range(n_populations - 1)]): parent_df = filegroup.load_population_df(population=parent, transform="logicle") x = parent_df["FS Lin"].median() idx = parent_df[parent_df["FS Lin"] >= x].index.values p = Population(population_name=pname, n=len(idx), parent=parent, index=idx, source="gate") filegroup.add_population(population=p) filegroup.save() return filegroup def create_logicle_like(u: list, s: list, size: list): assert len(u) == len(s), "s and u should be equal length" lognormal = [np.random.lognormal(mean=u[i], sigma=s[i], size=int(size[i])) for i in range(len(u))] return np.concatenate(lognormal) def create_linear_data(): x = np.concatenate([np.random.normal(loc=3.2, scale=0.8, size=100000), np.random.normal(loc=0.95, scale=1.1, size=100000)]) y = np.concatenate([
np.random.normal(loc=3.1, scale=0.85, size=100000)
numpy.random.normal
#!/usr/bin/env python """The WaveBlocks Project This file contains code for computing the eigenstates of a given potential in terms of Hagedorn wavepackets. @author: <NAME> @copyright: Copyright (C) 2012, 2013, 2014, 2016 <NAME> @license: Modified BSD License """ import argparse import os from numpy import (argsort, atleast_1d, atleast_2d, complexfloating, conjugate, dot, ones, zeros, real, identity, squeeze, sum, transpose, zeros_like, argmax, angle, abs, pi) from scipy.optimize import fmin from scipy.linalg import sqrtm, inv, eigh, norm from WaveBlocksND import BlockFactory from WaveBlocksND import GradientHAWP from WaveBlocksND import IOManager from WaveBlocksND import ParameterLoader def compute_eigenstate(parameters, filename="eigenstates.hdf5", computepq=True, computePQ=True): r""" Special variables necessary in configuration: * eigenstate_of_level (default: 0) * eigenstates_indices (default: [0]) * starting_point (default: (2, ..., 2)) * hawp_template * innerproduct """ D = parameters["dimension"] if "eigenstate_of_level" in parameters: N = parameters["eigenstate_of_level"] else: # Upper-most potential surface N = 0 # Create output file now, in case this fails we did not waste computation time IOM = IOManager() IOM.create_file(filename) # Save the simulation parameters IOM.add_parameters() IOM.save_parameters(parameters) gid = IOM.create_group() BF = BlockFactory() # Create the potential V = BF.create_potential(parameters) V.calculate_local_quadratic() # Compute position and momentum if computepq: # Minimize the potential to find q0 f = lambda x: real((squeeze(V.evaluate_at(x)[N]))) # Start with an offset because exact 0.0 values can give # issues, especially with the Hessian evaluation. This way # the minimizer will always stay away from zero a tiny bit. # The current starting point can give issues if the potential # is stationary at the point (2, ..., 2) but that is less likely. if "starting_point" in parameters: x0 = atleast_1d(parameters["starting_point"]) else: x0 = 0.5 * ones(D) q0 = fmin(f, x0, xtol=1e-12) q0 = q0.reshape((D, 1)) # We are at the minimum with no momentum p0 = zeros_like(q0) else: if "q0" in parameters: q0 = atleast_2d(parameters["q0"]) else: q0 = zeros((D, 1)) if "p0" in parameters: p0 = atleast_2d(parameters["p0"]) else: p0 = zeros((D, 1)) # Compute spreads if computePQ: # Q_0 = H^(-1/4) H = V.evaluate_hessian_at(q0) Q0 = inv(sqrtm(sqrtm(H))) # P_0 = i Q_0^(-1) P0 = 1.0j * inv(Q0) else: if "Q0" in parameters: Q0 = atleast_2d(parameters["Q0"]) else: Q0 = identity(D) if "P0" in parameters: P0 = atleast_2d(parameters["P0"]) else: P0 = 1.0j * inv(Q0) # The parameter set Pi print(70 * "-") print("Parameter values are:") print("---------------------") print(" q0:") print(str(q0)) print(" p0:") print(str(p0)) print(" Q0:") print(str(Q0)) print(" P0:") print(str(P0)) # Consistency check print(" Consistency check:") print(" P^T Q - Q^T P =?= 0") print(dot(P0.T, Q0) - dot(Q0.T, P0)) print(" Q^H P - P^H Q =?= 2i") print(dot(transpose(conjugate(Q0)), P0) - dot(transpose(conjugate(P0)), Q0)) # Next find the new coefficients c' HAWP = BF.create_wavepacket(parameters["hawp_template"]) # Set the parameter values Pi = HAWP.get_parameters() Pi[0] = q0 Pi[1] = p0 Pi[2] = Q0 Pi[3] = P0 HAWP.set_parameters(Pi) # Next compute the matrix M_ij = <phi_i | T + V | phi_j> # The potential part HQ = BF.create_inner_product(parameters["innerproduct"]) opV = lambda x, q, entry: V.evaluate_at(x, entry=entry) MV = HQ.build_matrix(HAWP, operator=opV) # The kinetic part MT = zeros_like(MV, dtype=complexfloating) GR = GradientHAWP() BS = HAWP.get_basis_shapes(component=N) vects = {} for i in BS: z = zeros_like(HAWP.get_coefficient_vector(), dtype=complexfloating) HAWP.set_coefficient_vector(z) HAWP.set_coefficient(N, i, 1.0) Kn, cnew = GR.apply_gradient(HAWP, component=N, as_packet=False) vects[i] = cnew for j in BS: for k in BS: cj = vects[j] ck = vects[k] entry = 0.5 * squeeze(sum(conjugate(cj) * ck)) MT[BS[j], BS[k]] = entry # Find eigenvalues and eigenvectors of the whole matrix M = MT + MV ew, ev = eigh(M) ind = argsort(ew) # Build the requested energy levels and states if "eigenstates_indices" in parameters: states = parameters["eigenstates_indices"] else: # Groundstate only states = [0] BS = HAWP.get_basis_shapes(component=0) KEY = ("q", "p", "Q", "P", "S", "adQ") print(70 * "-") for state in states: if state > BS.get_basis_size(): print("Warning: can not compute energy level {} with basis size of {}".format((state, BS))) continue index = ind[state] coeffs = ev[:, index] energy = ew[index] # Try to resolve ambiguities in sign imax = argmax(
abs(coeffs)
numpy.abs
from keras.callbacks import LearningRateScheduler, Callback from keras.models import Model, load_model from keras.preprocessing import sequence from keras.preprocessing.text import Tokenizer, text_to_word_sequence from keras.utils import Sequence from keras import backend as K from .utils import chargen_encode_cat import numpy as np from .model import chargen_model from keras.models import Model, load_model from keras.preprocessing import sequence from .utils import * def train(text_filepath, textgen, num_epochs=50, gen_epochs=1, batch_size=1024, dropout=0.05, train_size=0.8, verbose=1, validation=True, gen_text_length=500, train_new_model=True, **kwargs): """Trains new model as well as generates samples and saves weights after a specified number of epochs. :param text_filepath: the filepath of the text to be trained :param textgen: the CharGen instance :param num_epochs: number of epochs that model should be trained for (default 50) :param gen_epochs: number of epochs after which it generates samples at different temperatures (default 1) :param batch_size: number of training examples used in one iteration (default 1024) :param dropout: fraction of neurons to be ignored in a single forward and backward pass (default 0.05) :param train_size: fraction of the data to be used for training (default .8) :param verbose: integer. 0, 1, or 2. Verbosity mode. 0 = silent, 1 = progress bar, 2 = one line per epoch. :param validation: Boolean. Specifies whether or not to conduct validation (default True) :param gen_text_length: max length of the generated text (default 500) :param train_new_model: Boolean. Specify whether training a new model or not (default True) :param kwargs: :return: None """ with open(text_filepath, 'r', encoding='utf8', errors='ignore') as f: texts = [f.read()] print("Training a {}LSTM model with {}-layers each with {} cells".format( 'Bidirectional ' if textgen.config['bidirectional'] else '', textgen.config['rnn_layers'], textgen.config['rnn_size'] )) if train_new_model: print('Training a new model...') if textgen.vocab_filepath is None: textgen.build_vocab(texts) textgen.model = chargen_model(textgen.num_of_classes, dropout=dropout, cfg=textgen.config) textgen.save_files() # calculate all of the combinations of token indices and text indices list_of_indices = [np.meshgrid(np.array(i), np.arange( len(text) + 1)) for i, text in enumerate(texts)] list_of_indices =
np.block(list_of_indices)
numpy.block
# ---------------------------------------------------------------------------- # Copyright (c) 2013--, scikit-bio development team. # # Distributed under the terms of the Modified BSD License. # # The full license is in the file COPYING.txt, distributed with this software. # ---------------------------------------------------------------------------- from __future__ import absolute_import, division, print_function try: # future >= 0.12 from future.backports.test.support import import_fresh_module except ImportError: from future.standard_library.test.support import import_fresh_module import unittest import warnings import numpy as np import numpy.testing as npt from skbio.stats import isubsample cy_subsample = import_fresh_module('skbio.stats._subsample', fresh=['skbio.stats.__subsample']) py_subsample = import_fresh_module('skbio.stats._subsample', blocked=['skbio.stats.__subsample']) def setup(): """Ignore warnings during tests.""" warnings.simplefilter("ignore") def teardown(): """Clear the list of warning filters, so that no filters are active.""" warnings.resetwarnings() class SubsampleCountsTests(object): def test_subsample_counts_nonrandom(self): a = np.array([0, 5, 0]) # Subsample same number of items that are in input (without # replacement). npt.assert_equal(self.module.subsample_counts(a, 5), a) # Can only choose from one bin. exp = np.array([0, 2, 0]) npt.assert_equal(self.module.subsample_counts(a, 2), exp) npt.assert_equal( self.module.subsample_counts(a, 2, replace=True), exp) # Subsample zero items. a = [3, 0, 1] exp = np.array([0, 0, 0]) npt.assert_equal(self.module.subsample_counts(a, 0), exp) npt.assert_equal(self.module.subsample_counts(a, 0, replace=True), exp) def test_subsample_counts_without_replacement(self): # Selecting 2 counts from the vector 1000 times yields each of the two # possible results at least once each. a =
np.array([2, 0, 1])
numpy.array
# -*- coding: utf-8 -*- """ Created on Wed Jun 17 12:06:45 2020 @author: biomet """ import numpy as np import pandas as pd import scipy as sp from statsmodels.stats.multitest import fdrcorrection import itertools from scipy import interp from itertools import cycle from sklearn.utils import check_random_state, check_X_y from sklearn.preprocessing import label_binarize from sklearn.model_selection import train_test_split from sklearn.metrics import confusion_matrix from sklearn.metrics import precision_score from sklearn.metrics import recall_score from sklearn.metrics import f1_score from sklearn import metrics from sklearn.metrics import roc_curve, auc from sklearn.metrics import accuracy_score from sklearn.model_selection import cross_val_score from sklearn.model_selection import cross_validate from sklearn.model_selection import StratifiedKFold from sklearn.model_selection import KFold from sklearn.ensemble import RandomForestClassifier from sklearn.model_selection import TimeSeriesSplit, GridSearchCV, RandomizedSearchCV import pickle import PCA_Analysis as pca import RF_Analysis_Multiclass as rfc import RF_Analysis_Binary as rfb from Auto_ML_Multiclass import AutoML_classification # ============================================================================= # Read data set # ============================================================================= cdi_meta = pd.read_csv("cdi_meta.csv").set_index("sample_id") cdi_microbiome = pd.read_csv("cdi_OTUs.csv").set_index("index") microbiome = cdi_microbiome y = cdi_meta["DiseaseState"] y = cdi_meta["DiseaseState"].apply(lambda x: 0 if x == "CDI" else 1 if x == "ignore-nonCDI" else 2) class_name = ["CDI", "ignore-nonCDI", "Health"] X_train, X_test, y_train, y_test = train_test_split(microbiome, y, test_size=0.3, random_state=42) # ============================================================================= # Step 1 - Run Auto_ML # ============================================================================= automl = AutoML_classification() result = automl.fit(X_train, y_train, X_test, y_test) # ============================================================================= # Step 2 - Run selected models # ============================================================================= rf_best, _, _, _, _ = automl.Random_Forest(X_train, y_train, X_test, y_test) evaluate_rf = automl.evaluate_multiclass(rf_best, X_train, y_train, X_test, y_test, model = "Random Forest", num_class=3, top_features=20, class_name = class_name) # ============================================================================= # Main function # ============================================================================= def _get_importance_value(X_train, y_train, n_estimators): """ Parameters ---------- X_train : TYPE DESCRIPTION. y_train : TYPE DESCRIPTION. Returns ------- imp : TYPE DESCRIPTION. """ """ # Numer of trees are used n_estimators = [5, 10, 50, 100, 150, 200, 250, 300] # Maximum depth of each tree max_depth = [5, 10, 25, 50, 75, 100] # Minimum number of samples per leaf min_samples_leaf = [1, 2, 4, 8, 10] # Minimum number of samples to split a node min_samples_split = [2, 4, 6, 8, 10] # Maximum numeber of features to consider for making splits max_features = ["auto", "sqrt", "log2", None] criterion = ["gini", "entropy"] hyperparameter = {'n_estimators': n_estimators, 'max_depth': max_depth, 'min_samples_leaf': min_samples_leaf, 'min_samples_split': min_samples_split, 'max_features': max_features, 'criterion': criterion} n_folds = 10 my_cv = TimeSeriesSplit(n_splits = n_folds).split(X_train) base_model_rf = RandomForestClassifier(random_state=42) rsearch_cv = RandomizedSearchCV(estimator=base_model_rf, random_state=42, param_distributions=hyperparameter, n_iter=30, cv=my_cv, scoring="f1_macro", n_jobs=-1) rsearch_cv.fit(X_train, y_train) rb_best = rsearch_cv.best_estimator_ rb_best.fit(X_train, y_train) imp = rb_best.feature_importances_ """ clf = RandomForestClassifier(n_estimators = n_estimators, random_state=42) clf.fit(X_train, y_train) imp = clf.feature_importances_ return imp def _get_tree_num(n_feat): depth = 10 f_repr = 100 multi = ((n_feat * 2) / (np.sqrt(n_feat * 2) * depth)) n_estimators = int(multi * f_repr) return n_estimators def _get_shuffle(seq): random_state = check_random_state(42) random_state.shuffle(seq) return seq def _add_shadows_get_imps(X_train, y_train, dec_reg): """ Expands the information system with newly built random attributes and calculates the importance value Parameters ---------- X_train : array-like The training input samples. y_train : array-like The target values. dec_reg : array-like Holds the decision about each feature 0 - default state = tentative in orginal code 1 - accepted in original code -1 - rejected in original code Returns ------- imp_real : The importance value of real values imp_sha : The importance value of shadow values """ # find features that tentative still x_cur_ind = np.where(dec_reg >= 0)[0] x_cur = np.copy(X_train[:, x_cur_ind]) x_cur_w = x_cur.shape[1] x_sha = np.copy(x_cur) # There must be at least 5 random attributes while (x_sha.shape[1] < 5): x_sha = np.hstack((x_sha, x_sha)) # Now, we permute values in each attribute x_sha = np.apply_along_axis(_get_shuffle, 0, x_sha) not_rejected = np.where(dec_reg >= 0)[0].shape[0] n_tree = _get_tree_num(not_rejected) # Get importance values from new shadow input data imp = _get_importance_value(np.hstack((x_cur, x_sha)), y_train, 500) # Separate importances value of real and shadow features imp_sha = imp[x_cur_w:] imp_real = np.zeros(X_train.shape[1]) imp_real[:] = np.nan imp_real[x_cur_ind] = imp[:x_cur_w] return imp_real, imp_sha def _assign_hits(hit_reg, cur_imp, imp_sha_max): """ Register which the importance value of features is more than the max value of shadows """ cur_imp_no_nan = cur_imp[0] cur_imp_no_nan[np.isnan(cur_imp_no_nan)] = 0 hits = np.where(cur_imp_no_nan > imp_sha_max)[0] hit_reg[hits] += 1 return hit_reg def _fdrcorrection(pvals, alpha=0.05): """ Benjamini/Hochberg p-value correction for false discovery rate in statsmodels package """ pvals = np.asarray(pvals) pvals_sortind = np.argsort(pvals) pvals_sorted = np.take(pvals, pvals_sortind) nobs = len(pvals_sorted) ecdffactor = np.arange(1, nobs+1) / float(nobs) reject = pvals_sorted <= ecdffactor * alpha if reject.any(): rejectmax = max(np.nonzero(reject)[0]) reject[:rejectmax] = True pvals_corrected_raw = pvals_sorted / ecdffactor pvals_corrected = np.minimum.accumulate(pvals_corrected_raw[::-1])[::-1] pvals_corrected[pvals_corrected > 1] = 1 # Reorder p-values and rejection mask to original order of pvals pvals_corrected_ = np.empty_like(pvals_corrected) pvals_corrected_[pvals_sortind] = pvals_corrected reject_ = np.empty_like(reject) reject_[pvals_sortind] = reject return reject_, pvals_corrected_ def _nan_rank_data(X, axis=1): """ Replaces bottleneck's nanrankdata with scipy and numpy alternative """ ranks = sp.stats.mstats.rankdata(X, axis=axis) ranks[
np.isnan(X)
numpy.isnan
''' 01_RandomWalk_alpha.py : Replication of Figure 9.2 <NAME>, 2020 MIT License ''' import numpy as np import pylab as pl from IRL.environments.ToyExamples import RandomWalk from IRL.agents.TemporalDifferenceApproximation import nStepSemiGradientTDPrediction from IRL.utils.FeatureTransformations import stateAggregation from IRL.utils.ApproximationFunctions import linearTransform, dLinearTransform def generateTrajectories(nEpisodes, env): trajectories_all = [] for e in range(nEpisodes): done = False state = env.reset() trajectories = [{}] while not done: trajectories[-1]['state']= state trajectories[-1]['done']= done new_state, reward, done = env.step() experience = {} experience['state'] = new_state experience['reward'] = reward experience['done'] = done trajectories.append(experience) state = new_state trajectories_all.append(trajectories) return trajectories_all def runExperiment(trajectories, agent, nStates): for e, trajectory in enumerate(trajectories): for t in range(len(trajectory)-1): agent.update(trajectory[t:t+2]) estimatedValues = [agent.getValue(state) for state in range(nStates)] return np.array(estimatedValues) if __name__=="__main__": nExperiments = 10 nEpisodes = 10 # Environment nStatesOneSide = 500 specialRewards = {nStatesOneSide*2:1.0, 0:-1.0} groundTruth =
np.zeros(nStatesOneSide*2+1)
numpy.zeros
import numpy as np import paddle.fluid as fluid import paddle.fluid.layers as layers class LearningRateScheduler(object): """ Wrapper for learning rate scheduling as described in the Transformer paper. LearningRateScheduler adapts the learning rate externally and the adapted learning rate will be feeded into the main_program as input data. """ def __init__(self, d_model, warmup_steps, place, learning_rate=0.001, current_steps=0, name="learning_rate"): self.current_steps = current_steps self.warmup_steps = warmup_steps self.d_model = d_model self.learning_rate = layers.create_global_var( name=name, shape=[1], value=float(learning_rate), dtype="float32", persistable=True) self.place = place def update_learning_rate(self, data_input): self.current_steps += 1 lr_value = np.power(self.d_model, -0.5) * np.min([ np.power(self.current_steps, -0.5), np.power(self.warmup_steps, -1.5) * self.current_steps ]) lr_tensor = fluid.LoDTensor() lr_tensor.set(
np.array([lr_value], dtype="float32")
numpy.array
# -*- coding: utf-8 -*- import numpy as np from ..fun.cal import nanfunc __all__ = ['mean', 'percentile'] def mean(sample1, sample2, axis=0, sample_size=130, borders=0, max_sample=1460, ratio=True, median=False, **kwargs): """ Adjustment method using mean differences or ratios ratio=False dataset[sampleout] + (MEAN(dataset[sample1]) - MEAN(dataset[sample2])) ratio=True dataset[sampleout] * (MEAN(dataset[sample1]) / MEAN(dataset[sample2])) Args: sample1 (np.ndarray): reference sample2 (np.ndarray): sample axis (int): date axis sample_size (int): minimum sample size ratio (bool): use ratio or difference? median (bool): use median instead of mean? borders (int): around breakpoint max_sample (int): maximum sample size Returns: np.ndarray : mean adjusted dataset """ # minimum sample size, maximum sample size if median: s1 = nanfunc(sample1, axis=axis, n=sample_size, nmax=max_sample, ffunc=np.nanmedian, borders=borders) s2 = nanfunc(sample2, axis=axis, n=sample_size, nmax=max_sample, ffunc=np.nanmedian, borders=borders, flip=True) else: s1 = nanfunc(sample1, axis=axis, n=sample_size, nmax=max_sample, ffunc=np.nanmean, borders=borders) s2 = nanfunc(sample2, axis=axis, n=sample_size, nmax=max_sample, ffunc=np.nanmean, borders=borders, flip=True) if ratio: # Todo factor amplifies extreme values dep = s1 / s2 dep = np.where(np.isfinite(dep), dep, 1.) # replace NaN with 1 sample2 *= dep else: dep = s1 - s2 sample2 += dep return sample2 def meanvar(sample1, sample2, axis=0, sample_size=130, borders=0, max_sample=1460, **kwargs): """ Adjustment method using mean differences or ratios dataset[sampleout] + (MEAN(dataset[sample1]) - MEAN(dataset[sample2])) Args: sample1 (np.ndarray): reference sample2 (np.ndarray): sample axis (int): date axis sample_size (int): minimum sample size borders (int): around breakpoint max_sample (int): maximum sample size Returns: np.ndarray : mean adjusted dataset """ s1 = nanfunc(sample1, axis=axis, n=sample_size, nmax=max_sample, ffunc=np.nanmean, borders=borders) s2 = nanfunc(sample2, axis=axis, n=sample_size, nmax=max_sample, ffunc=np.nanmean, borders=borders, flip=True) s1v = nanfunc(sample1, axis=axis, n=sample_size, nmax=max_sample, ffunc=np.nanvar, borders=borders) s2v = nanfunc(sample2, axis=axis, n=sample_size, nmax=max_sample, ffunc=np.nanvar, borders=borders, flip=True) # MEAN dep = s1 - s2 # VAR fac = np.divide(s1v, s2v, out=np.ones(s2v.shape), where=s2v != 0) sample2 += (dep * fac) return sample2 def percentile(sample1, sample2, percentiles, axis=0, sample_size=130, borders=0, max_sample=1460, ratio=True, apply=None, noise=False,**kwargs): """ Adjustment method using percentile differences or ratios ratio=False dataset[sample1] + ( percentiles(dataset[sample1]) - percentiles(dataset[sample2]) ) ratio=True dataset[sample1] * ( percentiles(dataset[sample1]) / percentiles(dataset[sample2]) ) Args: sample1 (np.ndarray): reference sample2 (np.ndarray): sample percentiles (list): percentiles to use axis (int): date axis sample_size (int): minimum sample size ratio (bool): use ratio or difference? borders (int): around breakpoint max_sample (int): maximum sample size Returns: np.ndarray : percentile adjusted dataset """ # Add 0 and 100, and remove them percentiles = np.unique(np.concatenate([[0], percentiles, [100]])) percentiles = percentiles[1:-1] # remove 0 and 100 # Sample sizes are enough? # nsample1 = np.isfinite(dataset[sample1]).sum(axis=axis) > sample_size # nsample2 = np.isfinite(dataset[sample2]).sum(axis=axis) > sample_size # Percentiles of the samples # if special: #s1 = np.rollaxis(np.nanpercentile(sample1, percentiles, axis=axis),0, axis) #s2 = np.rollaxis(np.nanpercentile(sample2, percentiles, axis=axis),0, axis) #print(s1.shape) #print(s1[:, 0, 5]) #print(s2[:, 0, 5]) # else: # # Percentiles can be duplicated (because DPD might be integers) # limit calculations by sample_size, max_sample, borders # # (part A) | (part B) # break # >borders< # <max sample max sample> # # (part B) s1 = nanfunc(sample1, axis=axis, n=sample_size, nmax=max_sample, ffunc=np.nanpercentile, borders=borders, fargs=(percentiles,)) # flip means that beginning from the back (part A) s2 = nanfunc(sample2, axis=axis, n=sample_size, nmax=max_sample, ffunc=np.nanpercentile, borders=borders, fargs=(percentiles,), flip=True) # print(s1.shape) # print(s1[0, :, 5]) # print(s2[0, :, 5]) if ratio: dep = np.divide(s1, s2, where=(s2 != 0), out=np.full(s2.shape, 1.)) dep = np.where(np.isfinite(dep), dep, 1.) # replace NaN else: dep = s1 - s2 dep = np.where(np.isfinite(dep), dep, 0.) # Interpolate adjustments if apply is None: apply = sample2.copy() else: apply = apply.copy() dep = apply_percentile_adjustments(apply, s2, dep, axis=axis, noise=noise) if ratio: dep = np.where(np.isfinite(dep), dep, 1.) apply *= dep else: dep = np.where(np.isfinite(dep), dep, 0.) apply += dep return apply # # Helper functions # def apply_percentile_adjustments(data, percentiles, adjustment, axis=0, noise=False): """ Helper Function for applying percentile adjustments Args: data (np.ndarray): dataset percentiles (np.ndarray): percentiles, points of adjustments adjustment (np.ndarray): adjustments to be interpolated axis (int): axis of datetime Returns: np.ndarray : interpolated adjustment, same shape as dataset """ in_dims = list(range(data.ndim)) # last dim == axis, Last dim should be time/date # print(data.shape) data = np.transpose(data, in_dims[:axis] + in_dims[axis + 1:] + [axis]) # print(data.shape) percentiles =
np.transpose(percentiles, in_dims[:axis] + in_dims[axis + 1:] + [axis])
numpy.transpose
from tqdm import tqdm import numpy as np import torch from torch.distributions import Normal from neuralpredictors.measures import corr from .model_evaluation import ( get_conditional_means, get_conditional_variances, spearman_corr, ) def get_conditional_means_and_variances_flowfa( dataloader, flowfa_model, data_key, use_torch=True ): C, psi_diag = flowfa_model.C_and_psi_diag R = psi_diag.diag().cpu().data.numpy() C = C.cpu().data.numpy() transformed_responses, predicted_means = [], [] for b in dataloader: transformed_responses.append( flowfa_model.sample_transform(b[1])[0].cpu().data.numpy() ) predicted_means.append(flowfa_model(*b, data_key=data_key).cpu().data.numpy()) transformed_responses = np.concatenate(transformed_responses) predicted_means =
np.concatenate(predicted_means)
numpy.concatenate
"""Various utility classes and functions.""" from os import PathLike from pathlib import Path from typing import ( Any, Callable, Container, Dict, Iterable, List, Optional, Sequence, Tuple, TypeVar, Union, overload, ) import click import joblib import numpy as np import tqdm import yaml from sklearn.base import BaseEstimator, TransformerMixin from sklearn.metrics import get_scorer from sklearn.model_selection import ( BaseCrossValidator, GroupKFold, GroupShuffleSplit, LeaveOneGroupOut, StratifiedKFold, StratifiedShuffleSplit, LeaveOneOut, ) PathOrStr = Union[PathLike, str] # Class adapted from user394430's answer here: # https://stackoverflow.com/a/61900501/10044861 # Licensed under CC BY-SA 4.0 class TqdmParallel(joblib.Parallel): """Convenience class that acts identically to joblib.Parallel except it uses a tqdm progress bar. """ def __init__( self, total: int = 1, desc: str = "", unit: str = "it", leave: bool = True, **kwargs, ): self.total = total self.tqdm_args = {"desc": desc, "unit": unit, "leave": leave, "disable": None} kwargs["verbose"] = 0 super().__init__(**kwargs) def __call__(self, iterable): with tqdm.tqdm(total=self.total, **self.tqdm_args) as self.pbar: return super().__call__(iterable) def print_progress(self): self.pbar.n = self.n_completed_tasks self.pbar.refresh() class PathlibPath(click.Path): """Convenience class that acts identically to `click.Path` except it converts the value to a `pathlib.Path` object. """ def convert(self, value, param, ctx) -> Path: return Path(super().convert(value, param, ctx)) T1 = TypeVar("T1") T2 = TypeVar("T2") def itmap(s: Callable[[T1], T2]): """Returns a new map function that additionally maps tuples to tuples and lists to lists. """ @overload def _map(x: T1) -> T2: ... @overload def _map(x: List[T1]) -> List[T2]: ... @overload def _map(x: Tuple[T1, ...]) -> Tuple[T2, ...]: ... def _map(x): if isinstance(x, (list, tuple)): return type(x)(s(y) for y in x) else: return s(x) return _map def ordered_intersect(a: Iterable, b: Container) -> List: """Returns a list of the intersection of `a` and `b`, in the order elements appear in `a`. """ return [x for x in a if x in b] def get_arg_mapping_multi(s: str) -> Dict[str, List[Any]]: """Given a string mapping from the command-line, returns a dict representing that mapping. The string form of the mapping is: key:value[,key:value]+ Duplicate keys will be mapped to a list of values. Args: ----- s: str String representing the mapping. It cannot contain spaces or shell symbols (unless escaped). Returns: -------- mapping: dict A dictionary mapping keys to lists of values from the string. """ mapping: Dict[str, List[str]] = {} for cls in s.split(","): key, val = cls.split(":") if key in mapping: mapping[key].append(val) else: mapping[key] = [val] return mapping def get_arg_mapping(s: Union[Path, str]) -> Dict[str, Any]: """Given a mapping on the command-line, returns a dict representing that mapping. Mapping can be a string or a more complex YAML file. The string form of the mapping is: key:value[,key:value]+ Args: ----- s: PathLike or str String representing the mapping or path to YAML containing mapping. If a string, it cannot contain spaces or shell symbols (unless escaped). Returns: -------- mapping: dict A dictionary mapping keys to values from the string. """ if isinstance(s, Path) or Path(s).exists(): with open(s) as fid: return yaml.safe_load(fid) or {} return {k: v[0] if len(v) == 1 else v for k, v in get_arg_mapping_multi(s).items()} def flat_to_inst(x: np.ndarray, slices: np.ndarray) -> np.ndarray: """Takes a concatenated 2D data array and converts it to either a contiguous 2D/3D array or a variable-length 3D array, with one feature vector/matrix per instance. """ if len(x) == len(slices): # 2-D contiguous array return x elif all(x == slices[0] for x in slices): # 3-D contiguous array assert len(x) % len(slices) == 0 seq_len = len(x) // len(slices) return np.reshape(x, (len(slices), seq_len, x[0].shape[-1])) else: # 3-D variable length array start_idx = np.cumsum(slices)[:-1] return np.array(np.split(x, start_idx, axis=0), dtype=object) def inst_to_flat(x: np.ndarray) -> Tuple[np.ndarray, np.ndarray]: """The inverse of flat_to_inst(). Takes an instance matrix and converts to a "flattened" 2D matrix. """ slices = np.ones(len(x), dtype=int) if len(x.shape) != 2: slices = np.array([len(_x) for _x in x]) x = np.concatenate(x) assert sum(slices) == len(x) return x, slices def check_3d(arrays: Union[Sequence[np.ndarray], np.ndarray]): """Checks if an array is 3D or each array in a list is 2D. Raises an exception if this isn't the case. """ if any(len(x.shape) != 2 for x in arrays): raise ValueError("arrays must be 3D (contiguous or vlen).") def frame_arrays( arrays: Union[List[np.ndarray], np.ndarray], frame_size: int = 640, frame_shift: int = 160, num_frames: Optional[int] = None, ): """Creates sequences of frames from the given arrays. Each input array is a 1-D or L x 1 time domain signal. Each corresponding output array is a 2-D array of frames of shape (num_frames, frame_size). """ # TODO: Make option for vlen output if num_frames is None: max_len = max(len(x) for x in arrays) num_frames = (max_len - frame_size) // frame_shift + 1 _arrs = [] for seq in arrays: seq = np.squeeze(seq) arr = np.zeros((num_frames, frame_size), dtype=np.float32) for i in range(0, len(seq), frame_shift): idx = i // frame_shift if idx >= num_frames: break maxl = min(len(seq) - i, frame_size) arr[idx, :maxl] = seq[i : i + frame_size] _arrs.append(arr) arrs = np.array(_arrs) assert tuple(arrs.shape) == (len(arrays), num_frames, frame_size) return arrs def pad_arrays(arrays: Union[List[np.ndarray], np.ndarray], pad: int = 32): """Pads each array to the nearest multiple of `pad` greater than the array size. Assumes axis 0 of each sub-array, or axis 1 of x, is the time axis. """ if isinstance(arrays, np.ndarray) and len(arrays.shape) > 1: padding = int(np.ceil(arrays.shape[1] / pad)) * pad - arrays.shape[1] extra_dims = tuple((0, 0) for _ in arrays.shape[2:]) return np.pad(arrays, ((0, 0), (0, padding)) + extra_dims) new_arrays = [] for x in arrays: padding = int(np.ceil(x.shape[0] / pad)) * pad - x.shape[0] new_arrays.append(np.pad(x, ((0, padding), (0, 0)))) if isinstance(arrays, np.ndarray): if all(x.shape == new_arrays[0].shape for x in new_arrays): return np.array(new_arrays) return np.array(new_arrays, dtype=object) return new_arrays def clip_arrays( arrays: Union[List[np.ndarray], np.ndarray], length: int, copy: bool = True ): """Clips each array to the specified maximum length.""" if isinstance(arrays, np.ndarray): if len(arrays.shape) > 1: return arrays[:, :length, ...].copy() if copy else arrays[:, :length, ...] new_arrays = [x[:length].copy() if copy else x[:length] for x in arrays] if all(x.shape == new_arrays[0].shape for x in new_arrays): # Return contiguous array return np.stack(new_arrays) return np.array(new_arrays, dtype=object) return [x[:length].copy() if copy else x[:length] for x in arrays] def transpose_time(arrays: Union[List[np.ndarray], np.ndarray]): """Transpose the time and feature axis of each array. Requires each array be 2-D. NOTE: This function modifies the arrays in-place. """ check_3d(arrays) if isinstance(arrays, np.ndarray) and len(arrays.shape) == 3: arrays = arrays.transpose(0, 2, 1) else: for i in range(len(arrays)): arrays[i] = arrays[i].transpose() assert all(x.shape[0] == arrays[0].shape[0] for x in arrays) return arrays def shuffle_multiple(*arrays: np.ndarray, numpy_indexing: bool = True): """Shuffles multiple arrays or lists in sync. Useful for shuffling the data and labels in a dataset separately while keeping them synchronised. Parameters: ----------- arrays, iterable of array-like The arrays to shuffle. They must all have the same size of first dimension. numpy_indexing: bool, default = True Whether to use NumPy-style indexing or list comprehension. Returns: shuffled_arrays: iterable of array-like The shuffled arrays. """ if any(len(arrays[0]) != len(x) for x in arrays): raise ValueError("Not all arrays have equal first dimension.") perm = np.random.default_rng().permutation(len(arrays[0])) new_arrays = [ array[perm] if numpy_indexing else [array[i] for i in perm] for array in arrays ] return new_arrays def batch_arrays( arrays_x: Union[np.ndarray, List[np.ndarray]], y: np.ndarray, batch_size: int = 32, shuffle: bool = True, uniform_batch_size: bool = False, ) -> Tuple[np.ndarray, np.ndarray]: """Batches a list of arrays of different sizes, grouping them by size. This is designed for use with variable length sequences. Each batch will have a maximum of batch_size arrays, but may have less if there are fewer arrays of the same length. It is recommended to use the pad_arrays() method of the LabelledDataset instance before using this function, in order to quantise the lengths. Parameters: ----- arrays_x: list of ndarray A list of N-D arrays, possibly of different lengths, to batch. The assumption is that all the arrays have the same rank and only axis 0 differs in length. y: ndarray The labels for each of the arrays in arrays_x. batch_size: int Arrays will be grouped together by size, up to a maximum of batch_size, after which a new batch will be created. Thus each batch produced will have between 1 and batch_size items. shuffle: bool, default = True Whether to shuffle array order in a batch. uniform_batch_size: bool, default = False Whether to keep all batches the same size, batch_size, and pad with zeros if necessary, or have batches of different sizes if there aren't enough sequences to group together. Returns: -------- x_list: ndarray, The batched arrays. x_list[i] is the i'th batch, having between 1 and batch_size items, each of length lengths[i]. y_list: ndarray The batched labels corresponding to sequences in x_list. y_list[i] has the same length as x_list[i]. """ if isinstance(arrays_x, list): arrays_x = np.array(arrays_x, dtype=object) if shuffle: arrays_x, y = shuffle_multiple(arrays_x, y, numpy_indexing=False) fixed_shape = arrays_x[0].shape[1:] lengths = [x.shape[0] for x in arrays_x] unique_len = np.unique(lengths) x_dtype = arrays_x[0].dtype y_dtype = y.dtype xlist = [] ylist = [] for length in unique_len: idx = np.nonzero(lengths == length)[0] for b in range(0, len(idx), batch_size): batch_idx = idx[b : b + batch_size] size = batch_size if uniform_batch_size else len(batch_idx) _x =
np.zeros((size, length) + fixed_shape, dtype=x_dtype)
numpy.zeros
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import unittest import numpy as np import paddle import copy np.random.seed(10) paddle.seed(10) class TestNormalAPI(unittest.TestCase): def setUp(self): self.mean = 1.0 self.std = 0.0 self.shape = None self.repeat_num = 2000 self.set_attrs() self.dtype = self.get_dtype() self.place=paddle.CUDAPlace(0) \ if paddle.fluid.core.is_compiled_with_cuda() \ else paddle.CPUPlace() def set_attrs(self): self.shape = [8, 12] def get_shape(self): if isinstance(self.mean, np.ndarray): shape = self.mean.shape elif isinstance(self.std, np.ndarray): shape = self.std.shape else: shape = self.shape return list(shape) def get_dtype(self): if isinstance(self.mean, np.ndarray): return self.mean.dtype elif isinstance(self.std, np.ndarray): return self.std.dtype else: return 'float32' def static_api(self): shape = self.get_shape() ret_all_shape = copy.deepcopy(shape) ret_all_shape.insert(0, self.repeat_num) ret_all = np.zeros(ret_all_shape, self.dtype) if isinstance(self.mean, np.ndarray) \ and isinstance(self.std, np.ndarray): with paddle.static.program_guard(paddle.static.Program()): mean = paddle.fluid.data('Mean', self.mean.shape, self.mean.dtype) std = paddle.fluid.data('Std', self.std.shape, self.std.dtype) out = paddle.normal(mean, std, self.shape) exe = paddle.static.Executor(self.place) for i in range(self.repeat_num): ret = exe.run(feed={ 'Mean': self.mean, 'Std': self.std.reshape(shape) }, fetch_list=[out]) ret_all[i] = ret[0] return ret_all elif isinstance(self.mean, np.ndarray): with paddle.static.program_guard(paddle.static.Program()): mean = paddle.fluid.data('Mean', self.mean.shape, self.mean.dtype) out = paddle.normal(mean, self.std, self.shape) exe = paddle.static.Executor(self.place) for i in range(self.repeat_num): ret = exe.run(feed={'Mean': self.mean}, fetch_list=[out]) ret_all[i] = ret[0] return ret_all elif isinstance(self.std, np.ndarray): with paddle.static.program_guard(paddle.static.Program()): std = paddle.fluid.data('Std', self.std.shape, self.std.dtype) out = paddle.normal(self.mean, std, self.shape) exe = paddle.static.Executor(self.place) for i in range(self.repeat_num): ret = exe.run(feed={'Std': self.std}, fetch_list=[out]) ret_all[i] = ret[0] return ret_all else: with paddle.static.program_guard(paddle.static.Program()): out = paddle.normal(self.mean, self.std, self.shape) exe = paddle.static.Executor(self.place) for i in range(self.repeat_num): ret = exe.run(fetch_list=[out]) ret_all[i] = ret[0] return ret_all def dygraph_api(self): paddle.disable_static(self.place) shape = self.get_shape() ret_all_shape = copy.deepcopy(shape) ret_all_shape.insert(0, self.repeat_num) ret_all = np.zeros(ret_all_shape, self.dtype) mean = paddle.to_tensor(self.mean) \ if isinstance(self.mean, np.ndarray) else self.mean std = paddle.to_tensor(self.std) \ if isinstance(self.std, np.ndarray) else self.std for i in range(self.repeat_num): out = paddle.normal(mean, std, self.shape) ret_all[i] = out.numpy() paddle.enable_static() return ret_all def test_api(self): ret_static = self.static_api() ret_dygraph = self.dygraph_api() for ret in [ret_static, ret_dygraph]: shape_ref = self.get_shape() self.assertEqual(shape_ref, list(ret[0].shape)) ret = ret.flatten().reshape([self.repeat_num, -1]) mean = np.mean(ret, axis=0) std = np.std(ret, axis=0) mean_ref=self.mean.reshape([1, -1]) \ if isinstance(self.mean, np.ndarray) else self.mean std_ref=self.std.reshape([1, -1]) \ if isinstance(self.std, np.ndarray) else self.std self.assertTrue(np.allclose(mean_ref, mean, 0.2, 0.2)) self.assertTrue(np.allclose(std_ref, std, 0.2, 0.2)) class TestNormalAPI_mean_is_tensor(TestNormalAPI): def set_attrs(self): self.mean = np.random.uniform(-2, -1, [2, 3, 4, 5]).astype('float64') class TestNormalAPI_std_is_tensor(TestNormalAPI): def set_attrs(self): self.std = np.random.uniform(0.7, 1, [2, 3, 17]).astype('float64') class TestNormalAPI_mean_std_are_tensor(TestNormalAPI): def set_attrs(self): self.mean =
np.random.uniform(1, 2, [1, 100])
numpy.random.uniform
from __future__ import division import numpy as np import copy from pysph.base.nnps import LinkedListNNPS from pysph.base.utils import get_particle_array, get_particle_array_wcsph from cyarray.api import UIntArray from numpy.linalg import norm, matrix_power from pysph.sph.equation import Equation from pysph.tools.sph_evaluator import SPHEvaluator from pysph.base.particle_array import ParticleArray def distance(point1, point2=np.array([0.0, 0.0, 0.0])): return np.sqrt(sum((point1 - point2) * (point1 - point2))) def distance_2d(point1, point2=np.array([0.0, 0.0])): return np.sqrt(sum((point1 - point2) * (point1 - point2))) def matrix_exp(matrix): """ Exponential of a matrix. Finds the exponential of a square matrix of any order using the formula exp(A) = I + (A/1!) + (A**2/2!) + (A**3/3!) + ......... Parameters ---------- matrix : numpy matrix of order nxn (square) filled with numbers Returns ------- result : numpy matrix of the same order Examples -------- >>>A = np.matrix([[1, 2],[2, 3]]) >>>matrix_exp(A) matrix([[19.68002699, 30.56514746], [30.56514746, 50.24517445]]) >>>B = np.matrix([[0, 0],[0, 0]]) >>>matrix_exp(B) matrix([[1., 0.], [0., 1.]]) """ matrix = np.asarray(matrix) tol = 1.0e-16 result = matrix_power(matrix, 0) n = 1 condition = True while condition: adding = matrix_power(matrix, n) / (1.0 * np.math.factorial(n)) result += adding residue = np.sqrt(np.sum(np.square(adding)) / np.sum(np.square(result))) condition = (residue > tol) n += 1 return result def extrude(x, y, dx=0.01, extrude_dist=1.0, z_center=0.0): """ Extrudes a 2d geometry. Takes a 2d geometry with x, y values and extrudes it in z direction by the amount extrude_dist with z_center as center Parameters ---------- x : 1d array object with numbers y : 1d array object with numbers dx : a number extrude_dist : a number z_center : a number x, y should be of the same length and no x, y pair should be the same Returns ------- x_new : 1d numpy array object with new x values y_new : 1d numpy array object with new y values z_new : 1d numpy array object with z values x_new, y_new, z_new are of the same length Examples -------- >>>x = np.array([0.0]) >>>y = np.array([0.0]) >>>extrude(x, y, 0.1, 0.2, 0.0) (array([ 0., 0., 0.]), array([ 0., 0., 0.]), array([-0.1, 0., 0.1])) """ z = np.arange(z_center - extrude_dist / 2., z_center + (extrude_dist + dx) / 2., dx) x_new = np.tile(np.asarray(x), len(z)) y_new = np.tile(np.asarray(y), len(z)) z_new = np.repeat(z, len(x)) return x_new, y_new, z_new def translate(x, y, z, x_translate=0.0, y_translate=0.0, z_translate=0.0): """ Translates set of points in 3d cartisean space. Takes set of points and translates each and every point by some mentioned amount in all the 3 directions. Parameters ---------- x : 1d array object with numbers y : 1d array object with numbers z : 1d array object with numbers x_translate : a number y_translate : a number z_translate : a number Returns ------- x_new : 1d numpy array object with new x values y_new : 1d numpy array object with new y values z_new : 1d numpy array object with new z values Examples -------- >>>x = np.array([0.0, 1.0, 2.0]) >>>y = np.array([-1.0, 0.0, 1.5]) >>>z = np.array([0.5, -1.5, 0.0]) >>>translate(x, y, z, 1.0, -0.5, 2.0) (array([ 1., 2., 3.]), array([-1.5, -0.5, 1.]), array([2.5, 0.5, 2.])) """ x_new = np.asarray(x) + x_translate y_new = np.asarray(y) + y_translate z_new = np.asarray(z) + z_translate return x_new, y_new, z_new def rotate(x, y, z, axis=np.array([0.0, 0.0, 1.0]), angle=90.0): """ Rotates set of points in 3d cartisean space. Takes set of points and rotates each point with some angle w.r.t a mentioned axis. Parameters ---------- x : 1d array object with numbers y : 1d array object with numbers z : 1d array object with numbers axis : 1d array with 3 numbers angle(in degrees) : number Returns ------- x_new : 1d numpy array object with new x values y_new : 1d numpy array object with new y values z_new : 1d numpy array object with new z values Examples -------- >>>x = np.array([0.0, 1.0, 2.0]) >>>y = np.array([-1.0, 0.0, 1.5]) >>>z = np.array([0.5, -1.5, 0.0]) >>>axis = np.array([0.0, 0.0, 1.0]) >>>rotate(x, y, z, axis, 90.0) (array([ 1.00000000e+00, 4.25628483e-17, -1.50000000e+00]), array([-4.25628483e-17, 1.00000000e+00, 2.00000000e+00]), array([ 0.5, -1.5, 0. ])) """ theta = angle * np.pi / 180.0 unit_vector = np.asarray(axis) / norm(np.asarray(axis)) matrix = np.cross(np.eye(3), unit_vector * theta) rotation_matrix = matrix_exp(matrix) new_points = [] for xi, yi, zi in zip(np.asarray(x), np.asarray(y), np.asarray(z)): point =
np.array([xi, yi, zi])
numpy.array
import cv2 import numpy as np import random import os from helper import add_noise, add_shadow, apply_motion_blur, add_spot_light, add_parallel_light class backgroundOverlayer(object): """ Overlay's april tag on the background image """ def __init__(self, apriltag_generator , mx_tags): self.generator = apriltag_generator self.mx_tags = mx_tags def __call__(self, background_img): corners_collection = [] tags_to_overlay = 50 out_response = np.zeros(background_img.shape[:2], dtype = np.uint8) real_out_response = np.full((background_img.shape[0],background_img.shape[1], 5),0, dtype = np.uint8) real_out_response[:,:,-1] = 255 really_real_out_response = np.full((background_img.shape[0],background_img.shape[1], 5),0, dtype = np.uint8) really_real_out_response[:,:,-1] = 255 id_real_out_response = np.full((background_img.shape[0],background_img.shape[1], 2),0, dtype = np.uint8) #It attemps to generate as many tags as possible till the upper_limit tags_to_overlay, but sometimes two might overlap it will just remove the later one for tag in range(tags_to_overlay): index = random.randrange(len(self.generator)) # index= random.choice([27,28, 29,30,31,32, 33, 34, 35,36, 37, 38, 38, 39, 40,41, 42, 43, 44]) # index = random.randrange(500) # index = 27 result = self.generator[index] response = result["response"] response_in_use = result["response_in_use"] mask = result["mask"] tag_img = result["image"] corners_coords = result["corners_uv"] # mask = np.maximum(mask, tag_img) _, mask = cv2.threshold(mask, 254, 255, cv2.THRESH_BINARY) mask_inv = cv2.bitwise_not(mask) width = tag_img.shape[1] height = tag_img.shape[0] if background_img.shape[1] < width or background_img.shape[0] < height: continue x_offset = random.randrange(background_img.shape[1] - width + 1) y_offset = random.randrange(background_img.shape[0] - height + 1) out_response_view = out_response[y_offset:y_offset + height, x_offset:x_offset + width] real_out_response_view = real_out_response[y_offset:y_offset + height , x_offset:x_offset + width] really_real_out_response_view = real_out_response[y_offset:y_offset + height , x_offset:x_offset + width] if cv2.bitwise_and(out_response_view, mask).any(): continue #Merge with the image background_img_view = background_img[y_offset:y_offset + height , x_offset:x_offset + width] img_masked = cv2.bitwise_and(background_img_view, background_img_view, mask=mask_inv) tag_img = cv2.cvtColor(tag_img, cv2.COLOR_GRAY2BGR) tag_img = np.clip(tag_img, random.randint(0,10)*10, 255) tag_img_masked = cv2.bitwise_and(tag_img, tag_img, mask = mask) #Find light if np.random.uniform(0, 1, 1)[0] > 0.1: background_img_view_lab = cv2.cvtColor(background_img_view, cv2.COLOR_BGR2LAB) tag_img_view_lab = cv2.cvtColor(tag_img_masked, cv2.COLOR_BGR2LAB) light_background = background_img_view_lab[:, :,0].mean() light_tag = tag_img_view_lab[:,:,0].sum()/ np.count_nonzero(mask) # kernel = np.ones((5,5),np.float32)/25 # light_tag = cv2.filter2D(light_tag,-1,kernel) w_light = (( light_background/(light_tag + 0.0001))) # w_light = np.ones((height, width), dtype = np.float32)*w_light # w_light = (w_light +np.random.normal(0, 0.1, w_light.shape)) tag_img_view_lab[:, :, 0] = np.clip(np.multiply(tag_img_view_lab[:,:,0] ,w_light), 0, 255); if np.random.uniform(0, 1, 1)[0] > 1.7: tag_img_view_lab[:, :,0] = add_spot_light(tag_img_view_lab[:,:,0][..., np.newaxis]) tag_img_view_lab[:, :,0] = add_parallel_light(tag_img_view_lab[:,:,0][..., np.newaxis]) tag_img_masked= cv2.cvtColor(tag_img_view_lab, cv2.COLOR_LAB2BGR) tag_img_masked = cv2.bitwise_and(tag_img_masked, tag_img, mask = mask) background_img_view = cv2.add(img_masked, tag_img_masked) #make sure no overlaps out_response_view = out_response[y_offset:y_offset + height, x_offset:x_offset + width] real_out_response_view = real_out_response[y_offset:y_offset + height , x_offset:x_offset + width] id_real_out_response_view = id_real_out_response[y_offset:y_offset + height , x_offset:x_offset + width, 0] really_real_out_response_view = really_real_out_response[y_offset:y_offset + height , x_offset:x_offset + width] if not cv2.bitwise_and(out_response_view, mask).any(): if np.random.uniform(0, 1, 1)[0] > 0.8: blurred_background_img_view = cv2.GaussianBlur(background_img_view, (5, 5), 0) contours, hierarchy = cv2.findContours(mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE) tmp_mask = np.zeros(background_img_view.shape, dtype = np.uint8) cv2.drawContours(tmp_mask, contours, -1, (255,255, 255),5) background_img_view = np.where(tmp_mask==np.array([255, 255, 255]), blurred_background_img_view, background_img_view) background_img[y_offset:y_offset + height , x_offset:x_offset + width] = background_img_view out_response[y_offset:y_offset + height , x_offset:x_offset + width] = cv2.bitwise_or(out_response_view, mask) real_out_response[y_offset:y_offset + height , x_offset:x_offset + width, :-1] = np.maximum(response[:,:,:-1], real_out_response_view[:,:,:-1]) real_out_response[y_offset:y_offset + height , x_offset:x_offset + width, -1] = np.minimum(response[:,:,-1], real_out_response_view[:,:,-1]) id_real_out_response[y_offset:y_offset + height , x_offset:x_offset + width, 0] = np.maximum(id_real_out_response_view, mask/255*index) really_real_out_response[y_offset:y_offset + height , x_offset:x_offset + width, :-1] = np.maximum(response_in_use[:,:,:-1], really_real_out_response_view[:,:,:-1]) really_real_out_response[y_offset:y_offset + height , x_offset:x_offset + width, -1] = np.minimum(response_in_use[:,:,-1], really_real_out_response_view[:,:,-1]) corners_collection.append([np.array([x_offset, y_offset])+corners_coords ]) if np.random.uniform(0, 1, 1)[0] > 1.8: background_img[:,:,0] = cv2.equalizeHist(background_img[:,:,0]); background_img[:,:,1] = cv2.equalizeHist(background_img[:,:,1]); background_img[:,:,2] = cv2.equalizeHist(background_img[:,:,2]); if np.random.uniform(0, 1, 1)[0] > 1.5: background_img = add_shadow(background_img, random.randrange(2)) if np.random.uniform(0, 1, 1)[0] > 1.5: background_img = add_spot_light(background_img) if np.random.uniform(0, 1, 1)[0] > 1.5: background_img = add_parallel_light(background_img) if np.random.uniform(0, 1, 1)[0] > 1.5: background_img = add_noise(background_img, "gauss") if np.random.uniform(0, 1, 1)[0] > 1.8: background_img = add_noise(background_img, "s&p") if np.random.uniform(0, 1, 1)[0] > 1.8: background_img = add_noise(background_img, "speckle") # Motion blur if np.random.uniform(0, 1, 1)[0] > 1.8 : size = np.random.randint(3, 7) deg = np.random.randint(-180, 180) background_img = apply_motion_blur(background_img, size, deg) return background_img, out_response, np.clip(real_out_response,0,255),
np.clip(really_real_out_response,0,255)
numpy.clip
#! /usr/bin/env python3 # -*- coding: utf-8 -*- import sys import numpy as np import blosum as bl from random import randint def load_pssm(namefile, aa) : """ Lecture d'un fichier .aamtx et renvoie matrice PSSM """ with open (namefile, 'r') as f : for line in f : if line[0] == '>' : name = line[1:-1] elif line[0] in aa : seq = line[:-1] pssm = [] else : pssm.append([float(j) for j in line[:-1].split(' ') if j]) return(name, list(seq), pssm) def calc_score(vect1, vect2) : """ Calcule du score d'alignement entre 2 aa pour une position donnée """ s = 0 n1, n2 = len(vect1), len(vect2) for i in range(n1) : for j in range(n2) : s += vect1[i]*vect2[j] return(s) def calc_gap_optimal(nq, nt, pssmQ, pssmT, blosum): """ Fonction qui calcule et renvoie les pénalités d'ouverture (po) et d'extension de gap (pe) optimales """ gaps = np.arange(0.1, 10, 0.1) A = [] scores = [] blsmean = blosum.stack().mean() blsstd = blosum.stack().std() for gap in gaps: A.append((gap - blsmean) / blsstd) # Scores - random PSSM for i in range(nq): for j in range(nt): ri, rj = randint(0, nq-1), randint(0, nt-1) #sélection des indices de la pssm de manière aléatoires scores.append(calc_score(pssmQ[ri], pssmT[rj])) smean =
np.array(scores)
numpy.array
""" Generating sample data to try cohorts package """ import os import numpy as np import pandas as pd file_path = os.path.dirname(os.path.realpath(__file__)) out_dir = file_path+'/../sample_data/' #####helper functions##### def make_replicates_from_samples(samples,n_replicates=2): tmp = [s + "_Rep" for s in samples] replicates_bare = np.repeat(tmp,n_replicates) order = np.arange(1,n_replicates + 1,1) rep_nums = np.tile(order,n_samples) replicates = [] for i, num in enumerate(rep_nums): replicates.append(replicates_bare[i] + str(num)) return replicates def make_df_replicates(markers,replicates,loc1=2,loc2=20): mat_num = len(markers) * len(replicates) marker_values_ref = np.random.exponential(loc1,mat_num//2) marker_values_trt = np.random.exponential(loc2,mat_num//2) df_replicates_ref = pd.DataFrame(marker_values_ref.reshape(len(markers),len(replicates)//2), index = markers, columns = replicates[:len(replicates)//2]) df_replicates_trt = pd.DataFrame(marker_values_trt.reshape(len(markers),len(replicates)//2), index = markers, columns = replicates[len(replicates)//2:len(replicates)]) df_replicates = df_replicates_ref.join(df_replicates_trt) return df_replicates def make_df_samples(markers,samples,loc1=2,loc2=20): mat_num = len(markers) * len(samples) marker_values_ref = np.random.exponential(loc1,mat_num//2) marker_values_trt = np.random.exponential(loc2,mat_num//2) df_samples_ref = pd.DataFrame(marker_values_ref.reshape(len(markers),len(samples)//2), index = markers, columns = samples[:len(samples)//2]) df_samples_trt = pd.DataFrame(marker_values_trt.reshape(len(markers),len(samples)//2), index = markers, columns = samples[len(samples)//2:len(samples)]) df_samples = df_samples_ref.join(df_samples_trt) return df_samples def make_df_sample_groups(samples): n_samples = len(samples) trt_status = np.repeat([1,0],np.floor(n_samples/2).astype(int)) ref_status = np.repeat([0,1],np.ceil(n_samples/2).astype(int)) n_cov = 2 tmp = np.repeat(1,n_cov) cov_status = np.pad(tmp,(0,n_samples-n_cov),mode="constant") df_sample_groups = pd.DataFrame( [ trt_status, ref_status,cov_status ], columns = samples,index = ["trt","ref","cov"]) return df_sample_groups ########## #Set markers - here, I picked the marker to be proteins, so I just picked random Uniprot identifiers proteins = [ 'E7EX29', 'P62191', 'Q99460', 'P52209', 'P08253', 'P27338', 'P15144', 'P05067', 'Q9UJX5', 'Q8N302', 'P03950', 'Q9Y5C1'] #####SAMPLE DATA##### #Make Samples n_samples = 10 samples = ["S"+ str(i) for i in np.arange(1,n_samples + 1,1)] #Make Replicates replicates = make_replicates_from_samples(samples) #Make df_replicates dataframes df_replicates = make_df_replicates(markers=proteins,replicates=replicates,loc1=2,loc2=20) df_replicates.to_csv(out_dir+"df_replicates.tsv",sep="\t") #Make df_samples dataframe df_samples = make_df_samples(markers=proteins,samples=samples,loc1=2,loc2=20) df_samples.to_csv(out_dir+"df_samples.tsv",sep="\t") #Make df_sample_groups dataframe df_sample_groups = make_df_sample_groups(samples) df_sample_groups.to_csv(out_dir+"df_sample_groups.tsv",sep="\t") ########## ref_loc = 5 trt_loc = 25 #####PATIENT COHORT 1##### out_dir = file_path+'/../sample_data/patient_cohort_1/' #Make Samples n_samples = 10 samples = ["P1-"+ str(i) for i in np.arange(1,n_samples + 1,1)] #Make Replicates replicates = make_replicates_from_samples(samples) #Make df_replicates dataframes df_replicates = make_df_replicates(markers=proteins,replicates=replicates,loc1=ref_loc,loc2=trt_loc) df_replicates.to_csv(out_dir+"df_replicates.tsv",sep="\t") #Make df_samples dataframe df_samples = make_df_samples(markers=proteins,samples=samples,loc1=ref_loc,loc2=trt_loc) df_samples.to_csv(out_dir+"df_samples.tsv",sep="\t") #Make df_sample_groups dataframe df_sample_groups = make_df_sample_groups(samples) df_sample_groups.to_csv(out_dir+"df_sample_groups.tsv",sep="\t") #####PATIENT COHORT 2##### out_dir = file_path+'/../sample_data/patient_cohort_2/' #Make Samples n_samples = 10 samples = ["P2-"+ str(i) for i in np.arange(1,n_samples + 1,1)] #Make Replicates replicates = make_replicates_from_samples(samples) #Make df_replicates dataframes df_replicates = make_df_replicates(markers=proteins,replicates=replicates,loc1=ref_loc,loc2=trt_loc) df_replicates.to_csv(out_dir+"df_replicates.tsv",sep="\t") #Make df_samples dataframe df_samples = make_df_samples(markers=proteins,samples=samples,loc1=ref_loc,loc2=trt_loc) df_samples.to_csv(out_dir+"df_samples.tsv",sep="\t") #Make df_sample_groups dataframe df_sample_groups = make_df_sample_groups(samples) df_sample_groups.to_csv(out_dir+"df_sample_groups.tsv",sep="\t") #####PATIENT COHORT 3##### out_dir = file_path+'/../sample_data/patient_cohort_3/' #Make Samples n_samples = 10 samples = ["P3-"+ str(i) for i in
np.arange(1,n_samples + 1,1)
numpy.arange
""" Implements lambert's method for two topocentric radius vectors at different times. Supports both Earth-centered.""" import numpy as np from poliastro.core.stumpff import c2, c3 from astropy import units as uts from astropy import constants as cts # declare astronomical constants in appropriate units mu_Earth = cts.GM_earth.to(uts.Unit('km3 / s2')).value def F_z_i(z, t, r1, r2, A): """ Function F for Newton's method :param z: :param t: :param r1: :param r2: :param A: :return: F: function """ mu = mu_Earth C_z_i = c2(z) S_z_i = c3(z) y_z = r1 + r2 + A * (z * S_z_i - 1.0) /
np.sqrt(C_z_i)
numpy.sqrt
import numpy as np import pandas as pd import matplotlib.pyplot as plt from tqdm import tqdm from cleverhans.attacks import FastGradientMethod, DeepFool, MadryEtAl from cleverhans.utils_keras import KerasModelWrapper from keras.datasets import cifar10 from keras.models import load_model from keras.utils import to_categorical from keras import backend as K import os os.environ["CUDA_VISIBLE_DEVICES"]="0" K.set_learning_phase(0) def batches(lst, n): """Yield successive n-sized chunks from lst.""" for i in range(0, len(lst), n): yield lst[i:i + n] # Data (x_train, y_train), (x_test, y_test) = cifar10.load_data() x_train = x_train / 255.0 x_test = x_test / 255.0 y_test_tmp = np.squeeze(y_test) y_train = to_categorical(y_train) y_test = to_categorical(y_test) # Models vgglike = load_model('Saved_models/good_models/cifar10/cifar10_vgg_like_175.h5') resnet20 = load_model('Saved_models/good_models/cifar10/cifar10_ResNet20v2_model.176.h5') resnet56 = load_model('Saved_models/good_models/cifar10/cifar10_ResNet56v2_model.136.h5') densenet = load_model('Saved_models/good_models/cifar10/cifar10_densenet121_083.h5') vgglike.name = 'Deep_CNN' resnet20.name = 'ResNet20' resnet56.name = 'ResNet56' densenet.name = 'Densenet121' train_loss_acc_clean = [vgglike.evaluate(x_train, y_train), resnet20.evaluate(x_train, y_train), resnet56.evaluate(x_train, y_train), densenet.evaluate(x_train, y_train)] train_loss_clean = [x[0] for x in train_loss_acc_clean] train_acc_clean = [x[1] for x in train_loss_acc_clean] test_loss_acc_clean = [vgglike.evaluate(x_test, y_test), resnet20.evaluate(x_test, y_test), resnet56.evaluate(x_test, y_test), densenet.evaluate(x_test, y_test)] test_loss_clean = [x[0] for x in test_loss_acc_clean] test_acc_clean = [x[1] for x in test_loss_acc_clean] row_names = ["Deep_CNN", "ResNet20", "ResNet56", "Densenet121"] col_names = [('Train', 'Loss'), ('Train', 'Acc'), ('Test', 'Loss'), ('Test', 'Acc')] df = pd.DataFrame([train_loss_clean, train_acc_clean, test_loss_clean, test_acc_clean], columns=row_names).T df.to_csv('csvs/cifar_models_loss_acc.csv') models = [vgglike, resnet20, resnet56, densenet] eps_list = [1.0/255, 3.0/255, 8.0/255, 13.0/255] for eps in eps_list: fgsm_df = pd.DataFrame() L1_avgs = [] L2_avgs = [] Linf_avgs = [] for model in tqdm(models, desc='Model'): ch_model = KerasModelWrapper(model) fgsm = FastGradientMethod(ch_model, sess=K.get_session()) fgsm_params = {'eps': eps, 'clip_min': 0.0, 'clip_max': 1.0} # Batch to avoid ResourceExhaustedError adv = [] for batch in tqdm(batches(x_test, 100), desc='Make adv'): adv.append(fgsm.generate_np(batch, **fgsm_params)) adv = np.concatenate(adv) # Get loss/acc for adversarial examples test_loss_acc_adv = [vgglike.evaluate(adv, y_test), resnet20.evaluate(adv, y_test), resnet56.evaluate(adv, y_test), densenet.evaluate(adv, y_test)] # Debug print(model.name) print(test_loss_acc_adv) test_loss_adv = [x[0] for x in test_loss_acc_adv] test_acc_adv = [x[1] for x in test_loss_acc_adv] L1 = [] L2 = [] Linf = [] for i in range(x_test.shape[0]): vector = (adv[i] - x_test[i]).copy() vector = vector.flatten() L1.append(np.linalg.norm(vector, ord=1)) L2.append(np.linalg.norm(vector)) Linf.append(np.linalg.norm(vector, ord=np.inf)) L1 = np.array(L1) L2 = np.array(L2) Linf = np.array(Linf) L1_avgs.append(L1.mean()) L2_avgs.append(L2.mean()) Linf_avgs.append(Linf.mean()) # Make df # use list instead of dict to preserve order tmp = pd.DataFrame([test_loss_adv, test_acc_adv], columns=row_names, index=[(model.name, 'Loss'), (model.name, 'Acc')]).T #tmp.columns = pd.MultiIndex.from_tuples(tmp.columns) fgsm_df = pd.concat([fgsm_df, tmp], axis=1) L_df = pd.DataFrame([L1_avgs, L2_avgs, Linf_avgs], columns=row_names, index=['L1', 'L2', 'L_inf']).T fgsm_df = pd.concat([L_df, fgsm_df], axis=1) fgsm_df.to_csv('csvs/cifar10_fgsm_' + str(eps) + ".csv") deepfool_df = pd.DataFrame() L1_avgs = [] L2_avgs = [] Linf_avgs = [] for model in tqdm(models, desc='Model'): ch_model = KerasModelWrapper(model) deepfool = DeepFool(ch_model, sess=K.get_session()) # Batch to avoid ResourceExhaustedError adv = [] for batch in tqdm(batches(x_test, 100), desc='Make adv'): adv.append(deepfool.generate_np(batch)) adv = np.concatenate(adv) # Get loss/acc for adversarial examples test_loss_acc_adv = [vgglike.evaluate(adv, y_test), resnet20.evaluate(adv, y_test), resnet56.evaluate(adv, y_test), densenet.evaluate(adv, y_test)] # Debug print(model.name) print(test_loss_acc_adv) test_loss_adv = [x[0] for x in test_loss_acc_adv] test_acc_adv = [x[1] for x in test_loss_acc_adv] L1 = [] L2 = [] Linf = [] for i in range(x_test.shape[0]): vector = (adv[i] - x_test[i]).copy() vector = vector.flatten() L1.append(np.linalg.norm(vector, ord=1)) L2.append(np.linalg.norm(vector)) Linf.append(np.linalg.norm(vector, ord=np.inf)) L1 = np.array(L1) L2 = np.array(L2) Linf = np.array(Linf) L1_avgs.append(L1.mean()) L2_avgs.append(L2.mean()) Linf_avgs.append(Linf.mean()) # Make df # use list instead of dict to preserve order tmp = pd.DataFrame([test_loss_adv, test_acc_adv], columns=row_names, index=[(model.name, 'Loss'), (model.name, 'Acc')]).T #tmp.columns = pd.MultiIndex.from_tuples(tmp.columns) deepfool_df = pd.concat([deepfool_df, tmp], axis=1) L_df = pd.DataFrame([L1_avgs, L2_avgs, Linf_avgs], columns=row_names, index=['L1', 'L2', 'L_inf']).T deepfool_df = pd.concat([L_df, deepfool_df], axis=1) deepfool_df.to_csv('csvs/cifar10_deepfool.csv') eps_list = [1.0/255, 3.0/255, 8.0/255, 13.0/255] for eps in eps_list: madry_df = pd.DataFrame() L1_avgs = [] L2_avgs = [] Linf_avgs = [] for model in tqdm(models, desc='Model'): ch_model = KerasModelWrapper(model) madry = MadryEtAl(ch_model, sess=K.get_session()) madry_params = {'eps': eps, 'clip_min': 0.0, 'clip_max': 1.0} # Batch to avoid ResourceExhaustedError adv = [] for batch in tqdm(batches(x_test, 100), desc='Make adv'): adv.append(madry.generate_np(batch, **madry_params)) adv = np.concatenate(adv) # Get loss/acc for adversarial examples test_loss_acc_adv = [vgglike.evaluate(adv, y_test), resnet20.evaluate(adv, y_test), resnet56.evaluate(adv, y_test), densenet.evaluate(adv, y_test)] # Debug print(model.name) print(test_loss_acc_adv) test_loss_adv = [x[0] for x in test_loss_acc_adv] test_acc_adv = [x[1] for x in test_loss_acc_adv] L1 = [] L2 = [] Linf = [] for i in range(x_test.shape[0]): vector = (adv[i] - x_test[i]).copy() vector = vector.flatten() L1.append(
np.linalg.norm(vector, ord=1)
numpy.linalg.norm
""" =============================================================================== | process_data.py | =============================================================================== | A collection of scripts to read data from exodus files using the yt library.| | Note: A good way to generate the functions is to: | | from functools import partial | | fxns = [partial(f,*args,**keywords) for args,keywords in ...] | | | | This makes independent functions which can be given different args | | and keywords. | =============================================================================== """ #Import modules import yt import numpy as np import functools def get_dof_coordinate_data(data_set, dof, meshname = 'connect1'): """ ================================= | get_dof_coordinate_data | ================================= Get the degree of freedom data and the x,y,z coordinates at which that data is defined. note that dof can either be a string or a list of strings. """ if type(dof)==str: dof_data = [[float(d) for d in data] for data in data_set.all_data()[meshname, dof]] elif type(dof)==list: _dof_data = [[[float(d) for d in data] for data in data_set.all_data()[meshname, _dof]] for _dof in dof] dof_data = [zip(*v) for v in zip(*_dof_data)] coord_x = data_set.all_data()[meshname, 'vertex_x'] coord_y = data_set.all_data()[meshname, 'vertex_y'] coord_z = data_set.all_data()[meshname, 'vertex_z'] return [[(d,float(_x),float(_y),float(_z)) for d,_x,_y,_z in zip(data,x,y,z)] for data,x,y,z in zip(dof_data, coord_x, coord_y, coord_z)] def evaluate_functions_at_coordinates(list_of_functions,coordinates): """ =========================================== | evaluate_functions_at_coordinates | =========================================== Evaluate functions at the given coordinate points. Functions should be of the form v = f(x,y,z) """ return [tuple([f(x,y,z) for f in list_of_functions]) for x,y,z in coordinates] def evaluate_manufactured_solution_result_at_step(filename,dof,list_of_functions,step=-1,meshname='connect1',rtol=1e-5): """ ======================================================= | evaluate_manufactured_solution_result_at_step | ======================================================= Evaluate the manufactured solution result and return a true-false statement if the solution has converged at a given step. Defaults to the last step of the simulation. """ data_set = yt.load(filename,step=-1) simulation_results = get_dof_coordinate_data(data_set, dof, meshname = meshname); flat_simulation_results = [item for sublist in simulation_results for item in sublist] manufactured_solution = evaluate_functions_at_coordinates(list_of_functions,[sr[1:] for sr in flat_simulation_results]) if(len(flat_simulation_results) != len(manufactured_solution)): print("Error: there aren't as many simulation results as manufactured solutions") return False result = all([np.allclose(a,b,rtol=rtol) for a,b in zip([r[0] for r in flat_simulation_results],manufactured_solution)]) if(result==False): print("Result failed. Computing maximum differences...\n") diffs = np.array([np.array(a)-np.array(b) for a,b in zip([r[0] for r in flat_simulation_results],manufactured_solution)]) print("maximum abs differences: {0}".format(np.max(np.abs(diffs),axis=0))) return result ### UTILITY FUNCTIONS ### def const_fxn(x,y,z,v=0.): return v def linear_fxn(x,y,z,a=0.,b=0.,c=0.,d=0.): return a*x + b*y + c*z + d def generate_linear_functions(n,bounds=[1.0,-1.0],seed=123): """ =================================== | generate_linear_functions | =================================== Generate linear functions with random coefficients """ np.random.seed(seed) coefs = [(bounds[1] - bounds[0])*np.random.rand(4) + bounds[0] for _ in range(n)] strings = ["{0}*x+{1}*y+{2}*z+{3}".format(coef[0],coef[1],coef[2],coef[3]) for coef in coefs] return [functools.partial(linear_fxn,a=coef[0],b=coef[1],c=coef[2],d=coef[3]) for coef in coefs],coefs,strings def generate_random_phis(stretch_scale_bounds = [0.5,2.0], theta_bounds = [-0.5*np.pi,0.5*np.pi],seed=123): """ ============================== | generate_random_phis | ============================== Generate random values of phi that will be physical. """ np.random.seed(seed) S = np.diag([(b-a)*np.random.rand(3) + a])*np.eye(3) thetas = (theta_bounds[1]-theta_bounds[0])*np.random.rand(3) + theta_bounds[0] Rx = np.array([[ 1, 0, 0],\ [ 0, np.cos(thetas[0]), -np.sin(thetas[0])],\ [ 0, np.sin(thetas[0]), np.cos(thetas[0])]]) Ry = np.array([[ np.cos(thetas[1]), 0, np.sin(thetas[1])],\ [ 0, 1, 0],\ [-np.sin(thetas[1]), 0, np.cos(thetas[1])]]) Rz = np.array([[ np.cos(thetas[2]),-np.sin(thetas[2]), 0],\ [ np.sin(thetas[2]), np.cos(thetas[2]), 0],\ [ 0, 0, 1]]) phi = Rx*Ry*Rz*S def rotate_matrix(A,thetas): """ ======================= | rotate_matrix | ======================= Rotate the given matrix by the provided angles. The order with which these are applied are x rotation, then y, then z. thetas = [theta_x, theta_y, theta_z] """ Rx = np.array([[ 1, 0, 0],\ [ 0, np.cos(thetas[0]), -np.sin(thetas[0])],\ [ 0, np.sin(thetas[0]), np.cos(thetas[0])]]) Ry = np.array([[ np.cos(thetas[1]), 0, np.sin(thetas[1])],\ [ 0, 1, 0],\ [-np.sin(thetas[1]), 0,
np.cos(thetas[1])
numpy.cos
import pandas as pd import numpy as np import scipy import os, sys, time, json, math import matplotlib.pyplot as plt import seaborn as sns from functools import reduce from os.path import join from datetime import datetime from scipy.integrate import odeint from numpy import loadtxt from scipy.optimize import minimize rootDir = os.path.abspath(os.path.curdir) print(rootDir) sys.path.insert(0, os.path.join(rootDir, 'lib')) ## use JD's optimizer #from systemSolver import optimizer as optimizer from optimizer import Differential_Evolution from getPatientData import getPatientData import copy from matplotlib.font_manager import FontProperties #riskConfig = json.load(open('amgen-risk-model/amgen-risk-model/config/riskModel_3y_LipidCxoOptimized.json')) # classConfig = json.load(open(riskConfig['patientClassConfig'])) classConfig = json.load(open('../config/lipidoptimizing.json')) def differentialequations(I, t, p): ''' This function has the differential equations of the lipids (LDL, Total Cholesterol, Triglyceride, HDL) Inputs: I: Initial conditions t: timepoints p: parameters ''' try: # Initial conditions Cldl, Cchol, Ctrig, Chdl = I # Parameters adherence, dose, Imaxldl, Imaxchol, Imaxtrig, Imaxhdl, Ic50, n, dx, Sx0ldl, Sx0chol, Sx0trig, Sx0hdl = p t = np.round(t) t = t.astype(int) # print dose.shape if t > (dose.shape[0] - 1): t = (dose.shape[0] - 1) div = (Ic50+(dose[t]*adherence[t])**n) h0 = ((dose[t] * adherence[t])**n) # llipid equation dCldldt = (Sx0ldl * (1 - np.sum((Imaxldl*h0)/div))) - (dx*Cldl) dCcholdt = (Sx0chol * (1 - np.sum((Imaxchol*h0)/div))) - (dx*Cchol) dCtrigdt = (Sx0trig * (1 - np.sum((Imaxtrig*h0)/div))) - (dx*Ctrig) dChdldt = (Sx0hdl * (1 + np.sum((Imaxhdl*h0)/div))) - (dx*Chdl) f = [dCldldt, dCcholdt, dCtrigdt, dChdldt] return f except Exception as e: # print 'There was some problem with the differentialequations function: {}'.format(e) print(dose.shape, t) raise def differential_solve(adherence, t, Sx0ldl, Sx0chol, Sx0trig, Sx0hdl, Cldl0, Cchol0, Ctrig0, Chdl0, dose): ''' This function solves the differential equations with odeint Inputs: adherence: patient's adherence for all the statins, 2-d numpy array t: timepoints Sx0: synthesis terms for all the lipids C0: baseline values for all the lipids dose: doses for all the statins, 2-d numpy array ''' try: dx = math.log(2)/14 ldl_eff = np.load('../data/final/Efficacy/ldl_efficacy.npy') chol_eff = np.load('../data/final/Efficacy/tc_efficacy.npy') trig_eff = np.load('../data/final/Efficacy/trig_efficacy.npy') hdl_eff = np.load('../data/final/Efficacy/hdl_efficacy.npy') Imaxldl = ldl_eff[0] Imaxchol = chol_eff[0] Imaxtrig = trig_eff[0] Imaxhdl = hdl_eff[0] # Imaxldl, Imaxchol, Imaxtrig, Imaxhdl = np.array([0,0,0,0,0,0]), np.array([0,0,0,0,0,0]), np.array([0,0,0,0,0,0]), np.array([0,0,0,0,0,0]) Ic50 = ldl_eff[1] n = 0.7 I0 = [Cldl0, Cchol0, Ctrig0, Chdl0] p = [adherence, dose, Imaxldl, Imaxchol, Imaxtrig, Imaxhdl, Ic50, n, dx, Sx0ldl, Sx0chol, Sx0trig, Sx0hdl] sol = odeint(differentialequations, I0, t, args = (p,)) # print(sol) Cldl = [] Cchol = [] Ctrig = [] Chdl = [] for s1 in sol: Cldl.append(s1[0]) Cchol.append(s1[1]) Ctrig.append(s1[2]) Chdl.append(s1[3]) # print(Cldl) return Cldl, Cchol, Ctrig, Chdl except Exception as e: # print('There was some problem with the differential_solve function: {}'.format(e)) raise def adherence_coding(adherence, periods): ''' This function takes the adherence and identifies where it is -1 and returns the pairs of rows and columns, number of windows and the flag Parameters ---------- adhrenece : {2-d numpy array for each patient} It has the adherence values for all the medications for each day periods_total : {1-d numpy array} It has the Returns ------- [type] [description] ''' try: # print(periods_total) period_nonzero = periods[periods!=0] row, col = np.where(adherence==-1) pairs = list(map(list, zip(row, col))) windows = len(np.where(np.roll(period_nonzero,1)!=period_nonzero)[0]) if windows == 0: windows = 1 else: windows = windows return pairs, windows, period_nonzero except Exception as e: print('There was some problem with the adherence_coding function: {}'.format(e)) def adherence_guess(adherence, pairs, values, flag): try: for i in range(len(flag)): l = pairs[i] adherence[l[0]][l[1]] = values[flag[i]-1] return adherence except Exception as e: # print 'There was some problem with the adherence_guess function: {}'.format(e) raise def h0_cal(dose, Imax, Ic50, n, adherence): try: h0 = (Imax*((dose*adherence)**n))/(Ic50 + ((dose*adherence)**n)) if all(np.isnan(h0)): h0[:] = 0 h0_dictionary = {'Atorvastatin':h0[0], 'Fluvastatin':h0[1], 'Lovastatin':h0[2], 'Pravastatin':h0[3], 'Rosuvastatin':h0[4], 'Simvastatin':h0[5]} # print(h0_dictionary) return h0_dictionary except Exception as e: print('There was some problem with the h0_cal function: {}'.format(e)) def rmse_function(real_data,real_time,max_value, t, ode_solution): try: real_time = np.array(real_time) weight = (1/max_value)**2 indices = [] for j in real_time: k = np.where(t == j)[0][0] # print(k) indices.append(k) ode_final_values = np.array(ode_solution)[indices] # print(indices) # quit() # print(ode_final_values) rmse = np.average(weight*((ode_final_values - np.array(real_data))**2)) return rmse except Exception as e: print('There was some problem with the rmse_function function: {}'.format(e)) def get_total_rmse_nonNorm(adherence, Sx0ldl, Sx0chol, Sx0trig, Sx0hdl, Cldl0, Cchol0, Ctrig0, Chdl0, dose, t_ldl, ldl, t_tc, tc, t_trig, trig, t_hdl, hdl,t): try: ldl_max = max(ldl) tc_max = max(tc) # if len(trig)>0: # trig_max = max(trig) # else: # trig_max = 1 trig_max = 1 # max(trig) hdl_max = 1 # max(hdl) Cldl, Cchol, Ctrig, Chdl = differential_solve(adherence, t, Sx0ldl, Sx0chol, Sx0trig, Sx0hdl, Cldl0, Cchol0, Ctrig0, Chdl0, dose) rmse_ldl = rmse_function(ldl, t_ldl, 1, t, Cldl) rmse_tc = rmse_function(tc, t_tc, 1, t, Cchol) # rmse_trig = rmse_function(trig, t_trig, trig_max, t, Ctrig) rmse_trig = 0 rmse_hdl = 0 #rmse_function(hdl, t_hdl, 1, t, Chdl) rmse_total = rmse_ldl + rmse_tc + (rmse_trig * 0) + rmse_hdl return rmse_total except Exception as e: # print 'There was some problem with the get_total_rmse function: {}'.format(e) raise def get_total_rmse(x, pairs, windows, period_nonzero, adherence, Sx0ldl, Sx0chol, Sx0trig, Sx0hdl, Cldl0, Cchol0, Ctrig0, Chdl0, dose, t_ldl, ldl, t_tc, tc, t_trig, trig, t_hdl, hdl,t,count, biomarker,pre_adherence,prestatin, statintype, statin_dose): try: values_adherence = x[0:windows] if count > 0: values_biomarker = x[windows:] for i in range(count): if biomarker[i] == 'ldl': Cldl0 = values_biomarker[i] if biomarker[i] == 'chol': Cchol0 = values_biomarker[i] if biomarker[i] == 'trig': Ctrig0 = values_biomarker[i] if biomarker[i] == 'hdl': Chdl0 = values_biomarker[i] if biomarker[i] == 'pre_adherence': pre_adherence = values_biomarker[i] if biomarker[i] == 'alpha': alpha = values_biomarker[i] if 'alpha' in biomarker: Cldl0 = Cldl0 * alpha Cchol0 = Cchol0 * alpha Sx0ldl, Sx0chol, Sx0trig, Sx0hdl, Cldl0, Cchol0, Ctrig0, Chdl0 = synthesis_calculation(Cldl0, Cchol0, Ctrig0, Chdl0, prestatin, statintype, statin_dose, pre_adherence) adherence = adherence_guess(adherence, pairs, values_adherence, period_nonzero) ldl_max = max(ldl) tc_max = max(tc) # if len(trig)>0: # trig_max = max(trig) # else: # trig_max = 1 trig_max = 1 #max(trig) hdl_max = 1 #max(hdl) Cldl, Cchol, Ctrig, Chdl = differential_solve(adherence, t, Sx0ldl, Sx0chol, Sx0trig, Sx0hdl, Cldl0, Cchol0, Ctrig0, Chdl0, dose) rmse_ldl = rmse_function(ldl, t_ldl, ldl_max, t, Cldl) rmse_tc = rmse_function(tc, t_tc, tc_max, t, Cchol) # rmse_trig = rmse_function(trig, t_trig, trig_max, t, Ctrig) rmse_trig = 0 rmse_hdl = 0 #rmse_function(hdl, t_hdl, hdl_max, t, Chdl) rmse_total = (1.2 * rmse_ldl) + rmse_tc + (rmse_trig * 0) +rmse_hdl return rmse_total except Exception as e: # print 'There was some problem with the get_total_rmse function: {}'.format(e) raise def synthesis_calculation(Cldl0, Cchol0, Ctrig0, Chdl0, prestatin, statintype, statin_dose, pre_adherence): try: ldl_eff = np.load('../data/final/Efficacy/ldl_efficacy.npy') chol_eff = np.load('../data/final/Efficacy/tc_efficacy.npy') trig_eff = np.load('../data/final/Efficacy/trig_efficacy.npy') hdl_eff = np.load('../data/final/Efficacy/hdl_efficacy.npy') n = 0.7 dx = math.log(2)/14 if pd.isnull(Cldl0) | pd.isnull(Cchol0) | pd.isnull(Ctrig0) | pd.isnull(Chdl0): print(Cldl0, Cchol0, Ctrig0, Chdl0, prestatin, statintype, statin_dose) Cldl0, Cchol0, Ctrig0, Chdl0 = baseline_map(Cldl0, Cchol0, Ctrig0, Chdl0, prestatin, statintype, statin_dose) if prestatin: Sx0ldl = (dx*Cldl0)/(1-h0_cal(statin_dose, ldl_eff[0], ldl_eff[1], n, pre_adherence)[statintype]) Sx0chol = (dx*Cchol0)/(1-h0_cal(statin_dose, chol_eff[0], chol_eff[1], n, pre_adherence)[statintype]) Sx0trig = (dx*Ctrig0)/(1-h0_cal(statin_dose, trig_eff[0], trig_eff[1], n, pre_adherence)[statintype]) Sx0hdl = (dx*Chdl0)/(1-h0_cal(statin_dose, hdl_eff[0], hdl_eff[1], n, pre_adherence)[statintype]) else: Sx0ldl = (dx*Cldl0) Sx0chol = (dx*Cchol0) Sx0trig = (dx*Ctrig0) Sx0hdl = (dx*Chdl0) # print(Cldl0, Cchol0, Ctrig0, Chdl0) return Sx0ldl, Sx0chol, Sx0trig, Sx0hdl, Cldl0, Cchol0, Ctrig0, Chdl0 except Exception as e: # print 'There was some problem with the synthesis_calculation function: {}'.format(e) raise def baseline_map(Cldl0, Cchol0, Ctrig0, Chdl0, prestatin, statintype, statin_dose): try: ldl = {'Atorvastatin': {'5': 0.31, '10': 0.37, '15': 0.40, '20': 0.43, '30': 0.46,'40': 0.49, '45': 0.50, '50': 0.51, '60': 0.52, '70': np.nan, '80': 0.55}, 'Fluvastatin': {'5': 0.10, '10': 0.15, '15': np.nan, '20': 0.21, '30': np.nan, '40': 0.27, '45': np.nan, '50': np.nan, '60': np.nan, '70': np.nan, '80': 0.33}, 'Lovastatin': {'5': np.nan, '10': 0.21 , '15': np.nan, '20': 0.29, '30': 0.33, '40': 0.37, '45': np.nan, '50': np.nan, '60': np.nan, '70': np.nan, '80': 0.45}, 'Pravastatin': {'5': 0.15, '10': 0.2, '15': np.nan, '20': 0.24, '30': 0.27, '40': 0.29, '45': np.nan, '50': np.nan, '60': np.nan, '70': np.nan, '80': 0.33}, 'Rosuvastatin': {'5': 0.38, '10': 0.43, '15': 0.46, '20': 0.48, '30': 0.51, '40': 0.53, '45': np.nan, '50': np.nan, '60': np.nan, '70': np.nan, '80': 0.58}, 'Simvastatin': {'5': 0.23, '10': 0.27, '15': 0.3, '20': 0.32, '30': 0.35, '40': 0.37, '45': 0.38, '50': 0.38, '60': 0.4, '70': 0.41, '80': 0.42}} tc = {'Atorvastatin': {'5': 0.24, '10': 0.29, '15': 0.31, '20': 0.33, '30': 0.36, '40': 0.38, '45': 0.39, '50': 0.39, '60': 0.4, '70': np.nan, '80': 0.43}, 'Fluvastatin': {'5': 0.07, '10': 0.12, '15': np.nan, '20': 0.17, '30': np.nan, '40': 0.21, '45': np.nan, '50': np.nan, '60': np.nan, '70':np.nan, '80': 0.26}, 'Lovastatin': {'5': np.nan, '10': 0.17, '15': np.nan, '20': 0.23, '30': 0.26, '40': 0.29, '45': np.nan, '50': np.nan, '60': np.nan, '70': np.nan, '80': 0.35}, 'Pravastatin': {'5': 0.12, '10': 0.15, '15': np.nan, '20': 0.19, '30': 0.21, '40': 0.22, '45': np.nan, '50': np.nan, '60': np.nan, '70': np.nan, '80': 0.26}, 'Rosuvastatin': {'5': 0.3, '10': 0.34, '15': 0.36, '20': 0.38, '30': 0.39, '40': 0.41, '45': np.nan, '50': np.nan, '60': np.nan, '70': np.nan, '80': 0.45}, 'Simvastatin': {'5': 0.17, '10': 0.21, '15': 0.23, '20': 0.25, '30': 0.27, '40': 0.29, '45': np.nan, '50': 0.3, '60': 0.31, '70': 0.32, '80': 0.33}} trig = {'Atorvastatin': {'5': 0.16, '10': 0.19, '15': 0.2, '20': 0.21, '30': 0.23, '40': 0.25, '45': 0.25, '50': 0.25, '60': 0.26, '70': np.nan, '80': 0.27}, 'Fluvastatin': {'5': 0.05, '10': 0.08, '15': np.nan, '20': 0.11, '30': np.nan, '40': 0.14, '45': np.nan, '50': np.nan, '60': np.nan, '70': np.nan, '80': 0.16}, 'Lovastatin': {'5': np.nan, '10': 0.11, '15': np.nan, '20': 0.15, '30': 0.16, '40': 0.18, '45': np.nan, '50': np.nan, '60': np.nan, '70': np.nan, '80': 0.22}, 'Pravastatin': {'5': 0.08, '10': 0.10, '15': np.nan, '20': 0.12, '30': 0.13, '40': 0.14, '45': np.nan, '50': np.nan, '60': np.nan, '70': np.nan, '80': 0.17}, 'Rosuvastatin': {'5': 0.19, '10': 0.22, '15': 0.23, '20': 0.24, '30': 0.25, '40': 0.27, '45': np.nan, '50': np.nan, '60': np.nan, '70': np.nan, '80': 0.29}, 'Simvastatin': {'5': 0.11, '10': 0.14, '15': 0.15, '20': 0.16, '30': 0.17, '40': 0.18, '45': np.nan, '50': 0.19, '60': 0.20, '70': 0.20, '80': 0.21}} hdl = {'Atorvastatin': {'5': 1.0, '10': 1.0, '15': 1.0, '20': 1.0, '30': 1.0, '40': 1.0, '45': 1.0, '50': 1.0, '60': 1.0, '70':1.0, '80': 1.0}, 'Fluvastatin': {'5': 1.0, '10': 1.0, '15': 1.0, '20': 1.0, '30': 1.0, '40': 1.0, '45': 1.0, '50': 1.0, '60': 1.0, '70': 1.0, '80': 1.0}, 'Lovastatin': {'5': 1.0, '10': 1.0, '15': 1.0, '20': 1.0, '30': 1.0, '40': 1.0, '45': 1.0, '50': 1.0, '60': 1.0, '70': 1.0, '80': 1.0}, 'Pravastatin': {'5': 1.0, '10': 1.0, '15': 1.0, '20': 1.0, '30': 1.0, '40': 1.0, '45': 1.0, '50': 1.0, '60': 1.0, '70': 1.0, '80': 1.0}, 'Rosuvastatin': {'5': 1.0, '10': 1.0, '15': 1.0, '20': 1.0, '30': 1.0, '40': 1.0, '45': 1.0, '50': 1.0, '60': 1.0, '70': 1.0, '80': 1.0}, 'Simvastatin': {'5': 1.0, '10': 1.0, '15': 1.0, '20': 1.0, '30': 1.0, '40': 1.0, '45': 1.0, '50': 1.0, '60': 1.0, '70': 1.0, '80': 1.0}} Cldl_prestatin = 4.78407034 Cchol_prestatin = 6.77527799 Ctrig_prestatin = 4.65168793 Chdl_prestatin = 1.81018878 if prestatin == False: if pd.isnull(Cldl0): Cldl0 = Cldl_prestatin if pd.isnull(Cchol0): Cchol0 = Cchol_prestatin if pd.isnull(Ctrig0): Ctrig0 = Ctrig_prestatin if pd.isnull(Chdl0): Chdl0 = Chdl_prestatin if prestatin: if ~(pd.isnull(statin_dose)): statin_dose = str(int(statin_dose)) if pd.isnull(Cldl0): Cldl0 = Cldl_prestatin * ldl[statintype][statin_dose] if pd.isnull(Cchol0): Cchol0 = Cchol_prestatin * tc[statintype][statin_dose] if pd.isnull(Ctrig0): Ctrig0 = Ctrig_prestatin * trig[statintype][statin_dose] if pd.isnull(Chdl0): Chdl0 = Chdl_prestatin * hdl[statintype][statin_dose] return Cldl0, Cchol0, Ctrig0, Chdl0 except Exception as e: print('There was some problem with the baseline_map function: {}'.format(e)) def optimize_callback(x, pairs, windows, period_nonzero, adherence, Sx0ldl, Sx0chol, Sx0trig, Sx0hdl, Cldl0, Cchol0, Ctrig0, Chdl0, dose, t_ldl, ldl, t_tc, tc, t_trig, trig, t_hdl, hdl,t,count, biomarker,pre_adherence,prestatin, statintype, statin_dose): try: values_adherence = x[0:windows] if count > 0: values_biomarker = x[windows:] for i in range(count): if biomarker[i] == 'ldl': Cldl0 = values_biomarker[i] if biomarker[i] == 'chol': Cchol0 = values_biomarker[i] if biomarker[i] == 'trig': Ctrig0 = values_biomarker[i] if biomarker[i] == 'hdl': Chdl0 = values_biomarker[i] if biomarker[i] == 'pre_adherence': pre_adherence = values_biomarker[i] if biomarker[i] == 'alpha': alpha = values_biomarker[i] if 'alpha' in biomarker: Cldl0 = Cldl0 * alpha Cchol0 = Cchol0 * alpha Sx0ldl, Sx0chol, Sx0trig, Sx0hdl, Cldl0, Cchol0, Ctrig0, Chdl0 = synthesis_calculation(Cldl0, Cchol0, Ctrig0, Chdl0, prestatin, statintype, statin_dose, pre_adherence) adherence = adherence_guess(adherence, pairs, values_adherence, period_nonzero) ldl_max = max(ldl) tc_max = max(tc) # if len(trig)>0: # trig_max = max(trig) # else: # trig_max = 1 trig_max = max(trig) hdl_max = max(hdl) Cldl, Cchol, Ctrig, Chdl = differential_solve(adherence, t, Sx0ldl, Sx0chol, Sx0trig, Sx0hdl, Cldl0, Cchol0, Ctrig0, Chdl0, dose) rmse_ldl = rmse_function(ldl, t_ldl, ldl_max, t, Cldl) rmse_tc = rmse_function(tc, t_tc, tc_max, t, Cchol) # rmse_trig = rmse_function(trig, t_trig, trig_max, t, Ctrig) rmse_trig = 0 rmse_hdl = 0 #rmse_function(hdl, t_hdl, hdl_max, t, Chdl) rmse_total = (1.2 * rmse_ldl) + rmse_tc + (rmse_trig * 0) +rmse_hdl print(rmse_total) return rmse_total except Exception as e: # print 'There was some problem with the get_total_rmse function: {}'.format(e) raise def optimize_params(pairs, windows, period_nonzero, adherence, Sx0ldl, Sx0chol, Sx0trig, Sx0hdl, Cldl0, Cchol0, Ctrig0, Chdl0, dose, t_ldl, ldl, t_tc, tc, t_trig, trig, t_hdl, hdl,t, count, biomarker, pre_adherence, prestatin, statintype, statin_dose): print('optimize_params') try: if (('ldl' not in biomarker) and ('chol' not in biomarker) and ('hdl' not in biomarker)): alpha_lower = np.nanmax((0.1, Chdl0 / (Cchol0 - Cldl0))) else: print('else statement') alpha_lower = 0.1 alpha_upper = 3.0 optimal_range = {'ldl' : {'lo': 1.292, 'hi':5.171}, 'chol' : {'lo': 2.585, 'hi':9.05}, 'trig' : {'lo': 1.129, 'hi':5.645}, 'hdl' : {'lo': 0.775, 'hi':1.81}, 'pre_adherence' : {'lo': 0.01, 'hi': 1.0}, 'alpha' : {'lo': alpha_lower, 'hi': alpha_upper} } print('optimal range done') low = [] high = [] for name in biomarker: low.append(optimal_range[name]['lo']) high.append(optimal_range[name]['hi']) print('setting bounds') npar = windows+count bounds = np.zeros([npar,2]) bounds[:,0] = [0.01]*windows + low bounds[:,1] = [1]*windows + high # Convert bounds to list of tuples boundsList = [tuple(bounds[i,:]) for i in range(bounds.shape[0])] #solver = minimize(get_total_rmse, x0=np.mean(bounds, axis=1).tolist(), bounds=boundsList, # args = (pairs, windows, period_nonzero, adherence, Sx0ldl, Sx0chol, Sx0trig, Sx0hdl, # Cldl0, Cchol0, Ctrig0, Chdl0, dose, t_ldl, ldl, t_tc, tc, t_trig, trig, t_hdl, hdl, # t, count, biomarker, pre_adherence, prestatin, statintype, statin_dose)) #best_rmse = np.inf #all_vals = {} #for i in range(1): # result = solver.fun # vals = solver.x # if result < best_rmse: # best_rmse, best_vals = result, vals # all_vals[i] = {} # all_vals[i]['Error'] = result # all_vals[i]['params'] = list(vals) solver = Differential_Evolution(obj_fun=get_total_rmse, bounds=bounds, parallel= True, npar=npar, npool=npar*8, CR=0.85, strategy=2, fmin=0, fmax=2) print(solver.parameter_number) result = solver.optimize(args = [pairs, windows, period_nonzero, adherence, Sx0ldl, Sx0chol, Sx0trig, Sx0hdl, Cldl0, Cchol0, Ctrig0 , Chdl0, dose, t_ldl, ldl, t_tc, tc, t_trig, trig, t_hdl, hdl, t, count, biomarker, pre_adherence, prestatin, statintype, statin_dose]) best_rmse, best_vals = result[:2] return best_rmse, best_vals except Exception as e: # print 'There was some problem with the optimize_params function: {}'.format(e) raise def plotting(t_ldl, ldl, t_tc, tc, t_trig, trig, t_hdl, hdl, Cldl_f, Cchol_f, Ctrig_f, Chdl_f, t, p, tmax=1095): try: # fontP = FontProperties() plt.style.use(['seaborn-white', 'seaborn-talk']) sns.set_style('ticks', {'font.family': ['Times New Roman'], 'font.size': ['18']}) sns.set_context('talk', font_scale=1) # fontP.set_size('24') fig = plt.figure(figsize=(12,8)) ax = fig.add_subplot(111) ax.plot(t_ldl, ldl, '*', color='teal', label='Real data', markersize = '24') ax.plot(t, Cldl_f, color='darkblue', label='ODE Simulation') ax.set_xlabel('Days from baseline') ax.set_ylabel('LDL, mmol/L') ax.set_xlim(0, tmax) ax.legend(frameon=True, framealpha=0.7, fontsize = '18') fig.tight_layout() outdir = os.path.join(classConfig['outputPath'], 'LDL_Simulation') if not os.path.exists(outdir): os.makedirs(outdir) fig.savefig('{}/Patient{}'.format(outdir, p)) fig = plt.figure(figsize=(12,8)) ax = fig.add_subplot(111) ax.plot(t_tc, tc, '*', color='teal', label='Real data', markersize = '24') ax.plot(t, Cchol_f, color='darkblue', label='ODE Simulation') ax.set_xlabel('Days from baseline') ax.set_ylabel('Total cholesterol, mmol/L') ax.set_xlim(0, tmax) ax.legend(frameon=True, framealpha=0.7, fontsize = '18') fig.tight_layout() outdir = os.path.join(classConfig['outputPath'], 'Chol_Simulation') if not os.path.exists(outdir): os.makedirs(outdir) fig.savefig('{}/Patient{}'.format(outdir, p)) # fig = plt.figure(figsize=(12,8)) # ax = fig.add_subplot(111) # ax.plot(t_trig, trig, '*', color='teal', label='Real data', markersize = '24') # ax.plot(t, Ctrig_f, color='darkblue', label='ODE Simulation') # ax.set_xlabel('Days from baseline') # ax.set_ylabel('Triglycerides, mmol/L') # ax.set_xlim(0, 730) # ax.legend(frameon=True, framealpha=0.7, fontsize = '18') # fig.tight_layout() # outdir = os.path.join(classConfig['outputPath'], 'Trig_Simulation') # if not os.path.exists(outdir): # os.makedirs(outdir) # fig.savefig('{}/Patient{}'.format(outdir, p)) fig = plt.figure(figsize=(12,8)) ax = fig.add_subplot(111) ax.plot(t_hdl, hdl, '*', color='teal', label='Real data', markersize = '24') ax.plot(t, Chdl_f, color='darkblue', label='ODE Simulation') ax.set_xlabel('Days from baseline') ax.set_ylabel('HDL, mmol/L') ax.set_xlim(0, tmax) ax.legend(frameon=True, framealpha=0.7, fontsize = '18') fig.tight_layout() outdir = os.path.join(classConfig['outputPath'], 'HDL_Simulation') if not os.path.exists(outdir): os.makedirs(outdir) fig.savefig('{}/Patient{}'.format(outdir, p)) plt.close('all') except Exception as e: # print 'There was some problem with the plotting function: {}'.format(e) raise def plotAdherence(adhData, scatterPoints, labels, outputFileName, tmax=1095): try: plt.style.use(['seaborn-white', 'seaborn-talk']) sns.set_style('ticks', {'font.family': ['Times New Roman']}) sns.set_context('talk', font_scale=1) fig = plt.figure(figsize=(12,8)) ax = fig.add_subplot(111) for m in np.arange(0, adhData.shape[1]): ax.plot(adhData[:,m], label=labels[m]) ax.scatter(np.arange(0, scatterPoints[:,m].shape[0]), scatterPoints[:,m]) ax.set_xlabel('Days from baseline') ax.set_ylabel('Adherence') ax.set_xlim(0, tmax) ax.set_ylim(bottom=0) ax.legend(frameon=True, framealpha=0.7) fig.tight_layout() outdir = os.path.join(classConfig['outputPath'], 'Adherence') if not os.path.exists(outdir): os.makedirs(outdir) fig.savefig('{}/Adherence_{}.png'.format(outdir, outputFileName)) plt.close('all') except Exception as e: raise def main_simulate(patientListPickleFile, **kwargs): try: tic = time.time() patientsList = pd.read_pickle(str(patientListPickleFile)) patientList = np.array(patientsList['NRIC_X'].astype(str)) #Note the difference in names of the above two patSN = pd.read_pickle('../../data/intermediate/patientSN_info.pkl') patSN_dict = dict(patSN.apply(lambda x: tuple(x), axis=1).values) # patientsToRun = ['1841', '1993', '2022', '2134', '2272', '2457', '2682', '3088', '3606', '3670', # '2341', '2360', '2466', '2534', '2743', '2787', '2849', '3198', '4267', '4347'] # patientsToRun = ['2326'] rmseFinal = pd.DataFrame(columns=['NRIC_X', 'PatientSN', 'TotalRMSE_nonNorm']) for p in patientList: try: print(p, patSN_dict) p1 = patSN_dict[p] # if p1 not in patientsToRun: # continue print('Loading data for patient {}'.format(p1)) myPatient = getPatientData(p, **kwargs) myPatient.patientSN = p1 myPatient.loadOrigMedications() adherence = myPatient.origMeds['statin']['adherence'] periods = myPatient.origMeds['statin']['periods'] dose = myPatient.origMeds['statin']['dose'] t_ldl, ldl = myPatient.biomarker(['LDL']) t_tc, tc = myPatient.biomarker(['Cholesterol']) t_trig, trig = myPatient.biomarker(['Triglycerides']) t_hdl, hdl = myPatient.biomarker(['HDL']) t_ldl_1 = [] ldl_1 = [] for i in range(len(t_ldl)): t_ldl_1.append(t_ldl[i][0]) ldl_1.append(ldl[i][0]) t_tc_1 = [] tc_1 = [] for i in range(len(t_tc)): t_tc_1.append(t_tc[i][0]) tc_1.append(tc[i][0]) t_trig_1 = [] trig_1 = [] for i in range(len(t_trig)): t_trig_1.append(t_trig[i][0]) trig_1.append(trig[i][0]) t_hdl_1 = [] hdl_1 = [] for i in range(len(t_hdl)): t_hdl_1.append(t_hdl[i][0]) hdl_1.append(hdl[i][0]) t_ldl, ldl, t_tc, tc, t_trig, trig, t_hdl, hdl = t_ldl_1, ldl_1, t_tc_1, tc_1, t_trig_1, trig_1, t_hdl_1, hdl_1 # print(t_ldl, ldl, t_tc, tc, t_trig, trig, t_hdl, hdl) print('Loading data for patient {}'.format(p1)) Cldl0 = myPatient.baseline(['LDL'])[0][0] Cchol0 = myPatient.baseline(['Cholesterol'])[0][0] Ctrig0 = myPatient.baseline(['Triglycerides'])[0][0] Chdl0 = myPatient.baseline(['HDL'])[0][0] # print(Cldl0, Chdl0, Cchol0, Ctrig0) prestatin = myPatient.baseline(['Statin_Prior'])[0][0] statintype = myPatient.baseline(['Statin_Prior_Type'])[0][0] statin_dose = myPatient.baseline(['Statin_Prior_Dose'])[0][0] # pre_adherence = myPatient.baseline(['Statin_Pre_Adherence'])[0][0] pre_adherence = 0 ldl_pre = int(pd.isnull(Cldl0)) chol_pre = int(pd.isnull(Cchol0)) # trig_pre = int(pd.isnull(Ctrig0)) hdl_pre = int(pd.isnull(Chdl0)) if prestatin == 1: pre_adherence = 1 # Added this so that lipid sim will be evaluated for each day # t = np.sort(reduce(np.union1d, [t_ldl, t_tc, t_trig, t_hdl])) t_max = 1095 t = np.arange(0, (t_max+1), 1) # Load optimized values myPatient.loadOptimizedMedications() currAdherence = myPatient.optimizedMeds['statin']['adherence'] currDose = myPatient.optimizedMeds['statin']['dose'] currSxo = myPatient.optimizedMeds['statin']['Sxo'] currCxo = myPatient.optimizedMeds['statin']['Cxo'] total_rmse_nonNorm = get_total_rmse_nonNorm(currAdherence, currSxo['LDL'], currSxo['Cholesterol'], currSxo['Triglycerides'], currSxo['HDL'], currCxo['LDL'], currCxo['Cholesterol'], currCxo['Triglycerides'], currCxo['HDL'], currDose, t_ldl, ldl, t_tc, tc, t_trig, trig, t_hdl, hdl, t) rmseFinal = rmseFinal.append({'NRIC_X': p, 'PatientSN': p1, 'TotalRMSE_nonNorm': total_rmse_nonNorm}, ignore_index=True) except Exception as e: print('Patient cannot be processed: {}\n'.format(e)) continue # raise rmseFinal.to_pickle('{}/{}/TotalRMSE.pkl'.format(classConfig['savePath']['final'], classConfig['savePath']['optimizedMeds'])) except Exception as e: print('There was some problem with the main function: {}'.format(e)) # raise def main(patientList, **kwargs): try: tic = time.time() #patientsList = pd.read_pickle(str(patientListPickleFile)) #print('patientsList', patientsList) #patientList = [np.array(patientsList['NRIC_X'].astype(str))] #Note the difference in names of the above two #patSN = pd.read_pickle('../../data/intermediate/patientSN_info.pkl') #patSN_dict = dict(patSN.apply(lambda x: tuple(x), axis=1).values) # patientsToRun = ['1841', '1993', '2022', '2134', '2272', '2457', '2682', '3088', '3606', '3670', # '2341', '2360', '2466', '2534', '2743', '2787', '2849', '3198', '4267', '4347'] # patientsToRun = ['2326'] #print(patSN_dict) print(patientList) for p in patientList: try: print('p iterated') p1 = p # if p1 not in patientsToRun: # continue print('Loading data for patient {}'.format(p1)) myPatient = getPatientData(p, **kwargs) print('initiated patient data') myPatient.patientSN = p1 print('initiated patient SN') myPatient.loadOrigMedications() print('loadedOrigMeds') print('Loaded patient {} medications'.format(p1)) adherence = myPatient.origMeds['statin']['adherence'] periods = myPatient.origMeds['statin']['periods'] dose = myPatient.origMeds['statin']['dose'] print(adherence.shape) print('Loading biomarkers for patient {}'.format(p1)) print(myPatient.biomarker(['LDL'])) t_ldl, ldl = myPatient.biomarker(['LDL']) t_tc, tc = myPatient.biomarker(['Cholesterol']) t_trig, trig = myPatient.biomarker(['Triglycerides']) t_hdl, hdl = myPatient.biomarker(['HDL']) print('Loaded biomarkers for patient{}'.format(p1)) t_ldl_1 = [] ldl_1 = [] for i in range(len(t_ldl)): t_ldl_1.append(t_ldl[i][0]) ldl_1.append(ldl[i][0]) print('loaded ldl') t_tc_1 = [] tc_1 = [] for i in range(len(t_tc)): t_tc_1.append(t_tc[i][0]) tc_1.append(tc[i][0]) print('loaded tc') t_trig_1 = [] trig_1 = [] for i in range(len(t_trig)): t_trig_1.append(t_trig[i][0]) trig_1.append(trig[i][0]) print('loaded trig') t_hdl_1 = [] hdl_1 = [] for i in range(len(t_hdl)): t_hdl_1.append(t_hdl[i][0]) hdl_1.append(hdl[i][0]) print('loaded hdl') t_ldl, ldl, t_tc, tc, t_trig, trig, t_hdl, hdl = t_ldl_1, ldl_1, t_tc_1, tc_1, t_trig_1, trig_1, t_hdl_1, hdl_1 # print(t_ldl, ldl, t_tc, tc, t_trig, trig, t_hdl, hdl) print('Loading data for patient {} for baseline'.format(p1)) Cldl0 = myPatient.baseline(['LDL'])[0][0] print('Cldl0', Cldl0) Cchol0 = myPatient.baseline(['Cholesterol'])[0][0] print('Cchol0', Cchol0) Ctrig0 = myPatient.baseline(['Triglycerides'])[0][0] print('Ctrig0', Ctrig0) Chdl0 = myPatient.baseline(['HDL'])[0][0] print('Chdl0', Chdl0) # print(Cldl0, Chdl0, Cchol0, Ctrig0) print('loading statins') prestatin = myPatient.baseline(['Statin_Prior'])[0][0] statintype = myPatient.baseline(['Statin_Prior_Type'])[0][0] statin_dose = myPatient.baseline(['Statin_Prior_Dose'])[0][0] # pre_adherence = myPatient.baseline(['Statin_Pre_Adherence'])[0][0] pre_adherence = 0 print('loaded statins') print('loading prebiomarkers') ldl_pre = int(pd.isnull(Cldl0)) chol_pre = int(pd.isnull(Cchol0)) # trig_pre = int(pd.isnull(Ctrig0)) hdl_pre = int(pd.isnull(Chdl0)) print('loaded prebiomarkers') if prestatin == 1: pre_adherence = 1 # If baseline values are not present, don't optimize alpha if ((ldl_pre==1) & (chol_pre==1) & (hdl_pre==1)): optAlpha = 0 else: optAlpha = 1 print('optimizing') optimize_dict = { 'ldl': 0, #ldl_pre 'chol': 0, #chol_pre, 'trig': 0, 'hdl': 0, #hdl_pre, 'pre_adherence': 0, #pre_adherence, 'alpha': 0 #optAlpha } alpha = 1 print('loading optimizing dict iterms') count = 0 biomarker = [] for bio, opt in optimize_dict.items(): if opt == 1: count += 1 biomarker.append(bio) # Added this so that lipid sim will be evaluated for each day # t = np.sort(reduce(np.union1d, [t_ldl, t_tc, t_trig, t_hdl])) t_max = 1300 t = np.arange(0, (t_max+1), 1) print('adherence coding') pairs, windows, period_nonzero = adherence_coding(adherence, periods) print('adherence coding done \nsynthesis calculation ') Sx0ldl, Sx0chol, Sx0trig, Sx0hdl, Cldl0, Cchol0, Ctrig0, Chdl0 = synthesis_calculation(Cldl0, Cchol0, Ctrig0, Chdl0, prestatin, statintype, statin_dose, pre_adherence) print('synthesis calculation done') print('Starting optimisation: {} minutes elapsed'.format((time.time()-tic)/60.0)) total_rmse, best_vals = optimize_params(pairs, windows, period_nonzero, adherence, Sx0ldl, Sx0chol, Sx0trig, Sx0hdl, Cldl0, Cchol0, Ctrig0, Chdl0, dose, t_ldl, ldl, t_tc, tc, t_trig, trig, t_hdl, hdl, t, count, biomarker, pre_adherence, prestatin, statintype, statin_dose) print('Total RMSE = {}'.format(total_rmse)) values_adherence = best_vals[0:windows] if count > 0: values_biomarker = best_vals[windows:] for i in range(count): if biomarker[i] == 'ldl': Cldl0 = values_biomarker[i] if biomarker[i] == 'chol': Cchol0 = values_biomarker[i] if biomarker[i] == 'trig': Ctrig0 = values_biomarker[i] if biomarker[i] == 'hdl': Chdl0 = values_biomarker[i] if biomarker[i] == 'pre_adherence': pre_adherence = values_biomarker[i] if biomarker[i] == 'alpha': alpha = values_biomarker[i] if 'ldl' not in biomarker: Cldl0 = Cldl0 * alpha if 'chol' not in biomarker: Cchol0 = Cchol0 * alpha adherence_final = adherence_guess(adherence, pairs, best_vals[0:windows], period_nonzero) Sx0ldl, Sx0chol, Sx0trig, Sx0hdl, Cldl0, Cchol0, Ctrig0, Chdl0 = synthesis_calculation(Cldl0, Cchol0, Ctrig0, Chdl0, prestatin, statintype, statin_dose, pre_adherence) total_rmse_nonNorm = get_total_rmse_nonNorm(adherence_final, Sx0ldl, Sx0chol, Sx0trig, Sx0hdl, Cldl0, Cchol0, Ctrig0, Chdl0, dose, t_ldl, ldl, t_tc, tc, t_trig, trig, t_hdl, hdl, t) t_plotting =
np.arange(0, t_max)
numpy.arange
import os import sys import numpy as np import pickle import matplotlib.pylab as plt sys.path.append(os.pardir) from dataset.mnist import load_mnist (x_train, t_train), (x_test, t_test) = load_mnist(flatten=True, normalize=True, one_hot_label=False) network = pickle.load(open("../ch03/sample_weight.pkl", "rb")) W1, W2, W3 = network["W1"], network["W2"], network["W3"] b1, b2, b3 = network["b1"], network["b2"], network["b3"] def sigmoid(x): return 1 / (1 + np.exp(-x)) def softmax(x): c = np.max(x) return np.exp(x - c) / np.sum(np.exp(x - c)) def predict(x): a1 =
np.dot(x, W1)
numpy.dot
import pytest import numpy as np from ardent.utilities import _validate_scalar_to_multi from ardent.utilities import _validate_ndarray from ardent.utilities import _validate_xyz_resolution from ardent.utilities import _compute_axes from ardent.utilities import _compute_coords from ardent.utilities import _multiply_by_affine # TODO: write test for this function. """ Test _validate_scalar_to_multi. """ def test__validate_scalar_to_multi(): # Test proper use. kwargs = dict(value=1, size=1, dtype=float) correct_output = np.array([1], float) assert np.array_equal(_validate_scalar_to_multi(**kwargs), correct_output) kwargs = dict(value=1, size=0, dtype=int) correct_output = np.array([], int) assert np.array_equal(_validate_scalar_to_multi(**kwargs), correct_output) kwargs = dict(value=9.5, size=4, dtype=int) correct_output = np.full(4, 9, int) assert np.array_equal(_validate_scalar_to_multi(**kwargs), correct_output) kwargs = dict(value=[1, 2, 3.5], size=3, dtype=float) correct_output = np.array([1, 2, 3.5], float) assert np.array_equal(_validate_scalar_to_multi(**kwargs), correct_output) kwargs = dict(value=[1, 2, 3.5], size=3, dtype=int) correct_output = np.array([1, 2, 3], int) assert np.array_equal(_validate_scalar_to_multi(**kwargs), correct_output) kwargs = dict(value=(1, 2, 3), size=3, dtype=int) correct_output = np.array([1, 2, 3], int) assert np.array_equal(_validate_scalar_to_multi(**kwargs), correct_output) kwargs = dict(value=np.array([1, 2, 3], float), size=3, dtype=int) correct_output = np.array([1, 2, 3], int) assert np.array_equal(_validate_scalar_to_multi(**kwargs), correct_output) # Test improper use. kwargs = dict(value=[1, 2, 3, 4], size='size: not an int', dtype=float) expected_exception = TypeError match = "size must be interpretable as an integer." with pytest.raises(expected_exception, match=match): _validate_scalar_to_multi(**kwargs) kwargs = dict(value=[], size=-1, dtype=float) expected_exception = ValueError match = "size must be non-negative." with pytest.raises(expected_exception, match=match): _validate_scalar_to_multi(**kwargs) kwargs = dict(value=[1, 2, 3, 4], size=3, dtype=int) expected_exception = ValueError match = "The length of value must either be 1 or it must match size." with pytest.raises(expected_exception, match=match): _validate_scalar_to_multi(**kwargs) kwargs = dict(value=np.arange(3*4, dtype=int).reshape(3,4), size=3, dtype=float) expected_exception = ValueError match = "value must not have more than 1 dimension." with pytest.raises(expected_exception, match=match): _validate_scalar_to_multi(**kwargs) kwargs = dict(value=[1, 2, 'c'], size=3, dtype=int) expected_exception = ValueError match = "value and dtype are incompatible with one another." with pytest.raises(expected_exception, match=match): _validate_scalar_to_multi(**kwargs) kwargs = dict(value='c', size=3, dtype=int) expected_exception = ValueError match = "value and dtype are incompatible with one another." with pytest.raises(expected_exception, match=match): _validate_scalar_to_multi(**kwargs) """ Test _validate_ndarray. """ def test__validate_ndarray(): # Test proper use. kwargs = dict(array=np.arange(3, dtype=int), dtype=float) correct_output = np.arange(3, dtype=float) assert np.array_equal(_validate_ndarray(**kwargs), correct_output) kwargs = dict(array=[[0,1,2], [3,4,5]], dtype=float) correct_output = np.arange(2*3, dtype=float).reshape(2,3) assert np.array_equal(_validate_ndarray(**kwargs), correct_output) kwargs = dict(array=np.array([0,1,2]), broadcast_to_shape=(2,3)) correct_output = np.array([[0,1,2], [0,1,2]]) assert np.array_equal(_validate_ndarray(**kwargs), correct_output) kwargs = dict(array=np.array(7), required_ndim=1) correct_output = np.array([7]) assert np.array_equal(_validate_ndarray(**kwargs), correct_output) # Test improper use. # Validate arguments. kwargs = dict(array=np.arange(3), minimum_ndim=1.5) expected_exception = TypeError match = "minimum_ndim must be of type int." with pytest.raises(expected_exception, match=match): _validate_ndarray(**kwargs) kwargs = dict(array=np.arange(3), minimum_ndim=-1) expected_exception = ValueError match = "minimum_ndim must be non-negative." with pytest.raises(expected_exception, match=match): _validate_ndarray(**kwargs) kwargs = dict(array=np.arange(3), required_ndim=1.5) expected_exception = TypeError match = "required_ndim must be either None or of type int." with pytest.raises(expected_exception, match=match): _validate_ndarray(**kwargs) kwargs = dict(array=np.arange(3), required_ndim=-1) expected_exception = ValueError match = "required_ndim must be non-negative." with pytest.raises(expected_exception, match=match): _validate_ndarray(**kwargs) kwargs = dict(array=np.arange(3), dtype="not of type type") expected_exception = TypeError match = "dtype must be either None or a valid type." with pytest.raises(expected_exception, match=match): _validate_ndarray(**kwargs) # Validate array. kwargs = dict(array=np.array(print), dtype=int) expected_exception = TypeError match = "array is of a type that is incompatible with dtype." with pytest.raises(expected_exception, match=match): _validate_ndarray(**kwargs) kwargs = dict(array=
np.array('string that is not an int')
numpy.array
import cv2 as cv import numpy as np import pickle from homography_ransac import homography_ransac from match import Match from camera import Camera class Matcher: ''' Matcher class for finding parwise matches between images by finding corresponding SIFT keypoints between image ''' def __init__(self, imgs): ''' imgs = [Image] ''' self._imgs = imgs self._matches = None self._cameras = [Camera(img) for img in self._imgs] @property def matches(self): return self._matches def pairwise_match(self): ''' 1. Extract SIFT keypoints 2. Match keypoints 3. Find good matches (RANSAC) 4. Order matches by confidence ''' print(f'Images count: {len(self._imgs)}') try: pairwise_matches = pickle.load(open(f'pairwise_matches_{len(self._imgs)}.p', 'rb')) print('Loaded previous pairwise_matches') except (OSError, IOError): for img in self._imgs: img.extract_sift_features() paired, potential_pairs_matches = self._match_keypoints() pairwise_matches = self._pairwise_match_images(paired, potential_pairs_matches) pickle.dump(pairwise_matches, open(f'pairwise_matches_{len(self._imgs)}.p', 'wb')) self._matches = pairwise_matches return pairwise_matches def _match_keypoints(self): ''' Use an approx KD tree to find the best matches ''' # Initialise approx KD tree FLANN_INDEX_KDTREE = 0 index_params = dict(algorithm = FLANN_INDEX_KDTREE, trees = 5) search_params = dict(checks = 50) flann = cv.FlannBasedMatcher(index_params, search_params) # Find good matches all_keypoints = [] all_descriptors = [] for img in self._imgs: all_keypoints.append(img.keypoints) all_descriptors.append(img.descriptors) # Find matches for the descriptors of one image paired = [] potential_pairs_matches = [] for i in range(0, len(self._imgs)): flann.clear() train_descriptors = [x for j,x in enumerate(all_descriptors) if j != i] query_descriptors = all_descriptors[i] flann.add(train_descriptors) flann.train() # might be included in the knnMatch method, so may be able to remove... matches = flann.knnMatch(query_descriptors, k=4) # print(f'len(matches): {len(matches)}') # print(f'len(query_descriptors): {len(query_descriptors)}') potential_pairs = np.empty((len(self._imgs), len(query_descriptors)), dtype=int) potential_pairs.fill(-1) for (j, nearest_neighbours) in enumerate(matches): # potential_pairs[[n.imgIdx if n.imgIdx < i else n.imgIdx + 1 for n in nearest_neighbours]] += 1 # Reverse so that closest overrides further points for n in reversed(nearest_neighbours): query_img_index = n.imgIdx if n.imgIdx < i else n.imgIdx + 1 potential_pairs[query_img_index][j] = n.trainIdx # Take 6 best matching pairs' indexes potential_pairs_positive_count = np.sum(np.array(potential_pairs) >= 0, axis=1) # print(f'potential_pairs_nonzero_count: {potential_pairs_nonzero_count}') pairs = np.argsort(potential_pairs_positive_count)[::-1][:6] # print(f'pairs: {pairs}') paired.append(pairs.tolist()) potential_pairs_matches.append(potential_pairs) return paired, potential_pairs_matches def _pairwise_match_images(self, paired, potential_pairs_matches): confirmed_matches = [] all_keypoints = [img.keypoints for img in self._imgs] for (query_img_index, img_pair_indexes) in enumerate(paired): for pair_index in img_pair_indexes: match_names = [(match.cam_from.image.filename, match.cam_to.image.filename) for match in confirmed_matches] pair_filename = self._imgs[pair_index].filename query_img_filename = self._imgs[query_img_index].filename if ((query_img_filename, pair_filename) in match_names or (pair_filename, query_img_filename) in match_names): continue if query_img_index == pair_index: continue query_keypoints = np.take(
np.array(all_keypoints[query_img_index])
numpy.array
from unittest import TestCase import pytaf import numpy as np class TestResampleN(TestCase): # Both source and target are 1-d. def test_resample_n_1d(self): slat = np.arange(12, dtype=np.float64) slon = np.arange(12, dtype=np.float64) sdata = slat * -333 tlat = np.arange(12, dtype=np.float64) tlon = np.arange(12, dtype=np.float64) r = 5555 g = pytaf.resample_n(slat, slon, tlat, tlon, sdata, r) h = np.array([-0., -333., -666., -999., -1332., -1665., -1998.,-2331., -999., -2997., -3330., -3663.]) try: np.testing.assert_array_equal(g, h) res = True except AssertionError as err: res = False print(err) self.assertTrue(res) # Both source and target are 2-d. def test_resample_n_2d(self): slat = np.arange(12, dtype=np.float64).reshape((3,4)) slon = np.arange(12, dtype=np.float64).reshape((3,4)) sdata = slat * -333 tlat = np.arange(12, dtype=np.float64).reshape((3,4)) tlon = np.arange(12, dtype=np.float64).reshape((3,4)) r = 5555 g = pytaf.resample_n(slat, slon, tlat, tlon, sdata, r) h = np.array([[-0., -333., -666., -999.], [-1332., -1665., -1998.,-2331.], [-999, -2997., -3330., -3663.]]) try: np.testing.assert_array_equal(g, h) res = True except AssertionError as err: res = False print(err) self.assertTrue(res) # Source is 2-d and target is 1-d. def test_resample_n_2d_to_1d(self): slat = np.arange(12, dtype=np.float64).reshape((3,4)) slon = np.arange(12, dtype=np.float64).reshape((3,4)) sdata = slat * -333 tlat = np.arange(12, dtype=np.float64) tlon = np.arange(12, dtype=np.float64) r = 5555 g = pytaf.resample_n(slat, slon, tlat, tlon, sdata, r) h = np.array([-0., -333., -666., -999., -1332., -1665., -1998.,-2331., -999., -2997., -3330., -3663.]) try: np.testing.assert_array_equal(g, h) res = True except AssertionError as err: res = False print(err) self.assertTrue(res) # Source is 1-d and target is 2-d. def test_resample_n_1d_to_2d(self): slat = np.arange(12, dtype=np.float64) slon = np.arange(12, dtype=np.float64) sdata = slat * -333 tlat = np.arange(12, dtype=np.float64).reshape((3,4)) tlon = np.arange(12, dtype=np.float64).reshape((3,4)) r = 5555 g = pytaf.resample_n(slat, slon, tlat, tlon, sdata, r) h = np.array([[-0., -333., -666., -999.], [-1332., -1665., -1998.,-2331.], [-999, -2997., -3330., -3663.]]) try:
np.testing.assert_array_equal(g, h)
numpy.testing.assert_array_equal
# -*- coding: utf-8 -*- import numpy as np from scipy import interpolate from .pattern import Pattern from .utility import convert_density_to_atoms_per_cubic_angstrom, calculate_incoherent_scattering, \ calculate_f_mean_squared, calculate_f_squared_mean, extrapolate_to_zero_linear from .calc import calculate_normalization_factor_raw, calculate_sq_raw, calculate_fr, calculate_gr_raw from .optimization import optimize_sq class GlassureCalculator(object): def __init__(self, original_pattern, background_pattern, composition, density, r=np.linspace(0, 10, 1000)): self.original_pattern = original_pattern self.background_pattern = background_pattern self.sample_pattern = self.original_pattern - self.background_pattern self.elemental_abundances = composition self.density = density self.atomic_density = convert_density_to_atoms_per_cubic_angstrom(composition, density) q, _ = self.sample_pattern.data self.incoherent_scattering = calculate_incoherent_scattering(composition, q) self.f_mean_squared = calculate_f_mean_squared(composition, q) self.f_squared_mean = calculate_f_squared_mean(composition, q) self.sq_pattern = None self.fr_pattern = None self.gr_pattern = None self.r = r self.calculate_transforms(r) def calculate_transforms(self, r): self.sq_pattern = self.calc_sq() self.fr_pattern = self.calc_fr(r) self.gr_pattern = self.calc_gr() def update_density(self, density): self.density = density self.atomic_density = convert_density_to_atoms_per_cubic_angstrom(self.elemental_abundances, density) self.calculate_transforms() def get_normalization_factor(self): raise NotImplementedError def calc_sq(self): raise NotImplementedError def calc_fr(self, r): raise NotImplementedError def calc_gr(self): raise NotImplementedError def optimize_sq(self, r): raise NotImplementedError class StandardCalculator(GlassureCalculator): def __init__(self, original_pattern, background_pattern, composition, density, r=np.linspace(0, 10, 1000), normalization_attenuation_factor=0.001, use_modification_fcn=False, extrapolation_method=None, extrapolation_parameters=None): self.attenuation_factor = normalization_attenuation_factor self.use_modification_fcn = use_modification_fcn self.extrapolation_method = extrapolation_method self.extrapolation_parameters = extrapolation_parameters super(StandardCalculator, self).__init__(original_pattern, background_pattern, composition, density, r) def get_normalization_factor(self): return calculate_normalization_factor_raw(self.sample_pattern, self.atomic_density, self.f_squared_mean, self.f_mean_squared, self.incoherent_scattering, self.attenuation_factor) def calc_sq(self): n = self.get_normalization_factor() q, structure_factor = calculate_sq_raw(self.sample_pattern, self.f_squared_mean, self.f_mean_squared, self.incoherent_scattering, n).data if self.extrapolation_method is None: return Pattern(q, structure_factor) else: step = q[1] - q[0] q_low = np.arange(step, min(q), step) if self.extrapolation_method == 'linear': return extrapolate_to_zero_linear(Pattern(q, structure_factor)) elif self.extrapolation_method == 'spline': q_low_cutoff = np.arange(step, self.extrapolation_parameters['cutoff'], step) intensity_low_cutoff = np.zeros(q_low_cutoff.shape) ind_to_q_max = np.where(q <= self.extrapolation_parameters['q_max']) q_spline = np.concatenate((q_low_cutoff, q[ind_to_q_max])) int_spline = np.concatenate((intensity_low_cutoff, structure_factor[ind_to_q_max])) tck = interpolate.splrep(q_spline, int_spline) sq_low = interpolate.splev(q_low, tck) return Pattern(
np.concatenate((q_low, q))
numpy.concatenate
# Copyright (c) 2014, <NAME>. # Licensed under the BSD 3-clause license (see LICENSE.txt) import numpy as np from scipy.special import wofz from .kern import Kern from ...core.parameterization import Param from ...core.parameterization.transformations import Logexp from ...util.caching import Cache_this class EQ_ODE2(Kern): """ Covariance function for second order differential equation driven by an exponentiated quadratic covariance. This outputs of this kernel have the form .. math:: \frac{\text{d}^2y_j(t)}{\text{d}^2t} + C_j\frac{\text{d}y_j(t)}{\text{d}t} + B_jy_j(t) = \sum_{i=1}^R w_{j,i} u_i(t) where :math:`R` is the rank of the system, :math:`w_{j,i}` is the sensitivity of the :math:`j`th output to the :math:`i`th latent function, :math:`d_j` is the decay rate of the :math:`j`th output and :math:`f_i(t)` and :math:`g_i(t)` are independent latent Gaussian processes goverened by an exponentiated quadratic covariance. :param output_dim: number of outputs driven by latent function. :type output_dim: int :param W: sensitivities of each output to the latent driving function. :type W: ndarray (output_dim x rank). :param rank: If rank is greater than 1 then there are assumed to be a total of rank latent forces independently driving the system, each with identical covariance. :type rank: int :param C: damper constant for the second order system. :type C: array of length output_dim. :param B: spring constant for the second order system. :type B: array of length output_dim. """ #This code will only work for the sparseGP model, due to limitations in models for this kernel def __init__(self, input_dim=2, output_dim=1, rank=1, W=None, lengthscale=None, C=None, B=None, active_dims=None, name='eq_ode2'): #input_dim should be 1, but kern._slice_X is not returning index information required to evaluate kernels assert input_dim == 2, "only defined for 1 input dims" super(EQ_ODE2, self).__init__(input_dim=input_dim, active_dims=active_dims, name=name) self.rank = rank self.output_dim = output_dim if lengthscale is None: lengthscale = .5+np.random.rand(self.rank) else: lengthscale = np.asarray(lengthscale) assert lengthscale.size in [1, self.rank], "Bad number of lengthscales" if lengthscale.size != self.rank: lengthscale = np.ones(self.input_dim)*lengthscale if W is None: #W = 0.5*np.random.randn(self.output_dim, self.rank)/np.sqrt(self.rank) W = np.ones((self.output_dim, self.rank)) else: assert W.shape == (self.output_dim, self.rank) if C is None: C = np.ones(self.output_dim) if B is None: B = np.ones(self.output_dim) self.C = Param('C', C, Logexp()) self.B = Param('B', B, Logexp()) self.lengthscale = Param('lengthscale', lengthscale, Logexp()) self.W = Param('W', W) self.link_parameters(self.lengthscale, self.C, self.B, self.W) @Cache_this(limit=2) def K(self, X, X2=None): #This way is not working, indexes are lost after using k._slice_X #index = np.asarray(X, dtype=np.int) #index = index.reshape(index.size,) if hasattr(X, 'values'): X = X.values index = np.int_(X[:, 1]) index = index.reshape(index.size,) X_flag = index[0] >= self.output_dim if X2 is None: if X_flag: #Calculate covariance function for the latent functions index -= self.output_dim return self._Kuu(X, index) else: raise NotImplementedError else: #This way is not working, indexes are lost after using k._slice_X #index2 = np.asarray(X2, dtype=np.int) #index2 = index2.reshape(index2.size,) if hasattr(X2, 'values'): X2 = X2.values index2 = np.int_(X2[:, 1]) index2 = index2.reshape(index2.size,) X2_flag = index2[0] >= self.output_dim #Calculate cross-covariance function if not X_flag and X2_flag: index2 -= self.output_dim return self._Kfu(X, index, X2, index2) #Kfu else: index -= self.output_dim return self._Kfu(X2, index2, X, index).T #Kuf #Calculate the covariance function for diag(Kff(X,X)) def Kdiag(self, X): #This way is not working, indexes are lost after using k._slice_X #index = np.asarray(X, dtype=np.int) #index = index.reshape(index.size,) if hasattr(X, 'values'): X = X.values index = np.int_(X[:, 1]) index = index.reshape(index.size,) #terms that move along t t = X[:, 0].reshape(X.shape[0], 1) d = np.unique(index) #Output Indexes B = self.B.values[d] C = self.C.values[d] S = self.W.values[d, :] #Index transformation indd = np.arange(self.output_dim) indd[d] = np.arange(d.size) index = indd[index] #Check where wd becomes complex wbool = C*C >= 4.*B B = B.reshape(B.size, 1) C = C.reshape(C.size, 1) alpha = .5*C C2 = C*C wbool2 = wbool[index] ind2t = np.where(wbool2) ind3t = np.where(np.logical_not(wbool2)) #Terms that move along q lq = self.lengthscale.values.reshape(1, self.lengthscale.size) S2 = S*S kdiag = np.empty((t.size, )) indD = np.arange(B.size) #(1) When wd is real if np.any(np.logical_not(wbool)): #Indexes of index and t related to (2) t1 = t[ind3t] ind = index[ind3t] d = np.asarray(np.where(np.logical_not(wbool))[0]) #Selection of outputs indd = indD.copy() indd[d] = np.arange(d.size) ind = indd[ind] #Dx1 terms S2lq = S2[d]*(.5*lq) c0 = S2lq*np.sqrt(np.pi) w = .5*np.sqrt(4.*B[d] - C2[d]) alphad = alpha[d] w2 = w*w gam = alphad + 1j*w gamc = alphad - 1j*w c1 = .5/(alphad*w2) c2 = .5/(gam*w2) c = c1 - c2 #DxQ terms nu = lq*(gam*.5) K01 = c0*c #Nx1 terms gamt = -gam[ind]*t1 gamct = -gamc[ind]*t1 egamt = np.exp(gamt) ec = egamt*c2[ind] - np.exp(gamct)*c1[ind] #NxQ terms t_lq = t1/lq # Upsilon Calculations # Using wofz wnu = wofz(1j*nu) lwnu = np.log(wnu) t2_lq2 = -t_lq*t_lq upm = wnu[ind] - np.exp(t2_lq2 + gamt + np.log(wofz(1j*(t_lq + nu[ind])))) upm[t1[:, 0] == 0, :] = 0. nu2 = nu*nu z1 = nu[ind] - t_lq indv1 = np.where(z1.real >= 0.) indv2 = np.where(z1.real < 0.) upv = -np.exp(lwnu[ind] + gamt) if indv1[0].shape > 0: upv[indv1] += np.exp(t2_lq2[indv1] + np.log(wofz(1j*z1[indv1]))) if indv2[0].shape > 0: upv[indv2] += np.exp(nu2[ind[indv2[0]], indv2[1]] + gamt[indv2[0], 0] + np.log(2.))\ - np.exp(t2_lq2[indv2] + np.log(wofz(-1j*z1[indv2]))) upv[t1[:, 0] == 0, :] = 0. #Covariance calculation kdiag[ind3t] = np.sum(np.real(K01[ind]*upm), axis=1) kdiag[ind3t] += np.sum(np.real((c0[ind]*ec)*upv), axis=1) #(2) When w_d is complex if np.any(wbool): t1 = t[ind2t] ind = index[ind2t] #Index transformation d = np.asarray(np.where(wbool)[0]) indd = indD.copy() indd[d] = np.arange(d.size) ind = indd[ind] #Dx1 terms S2lq = S2[d]*(lq*.25) c0 = S2lq*np.sqrt(np.pi) w = .5*np.sqrt(C2[d] - 4.*B[d]) alphad = alpha[d] gam = alphad - w gamc = alphad + w w2 = -w*w c1 = .5/(alphad*w2) c21 = .5/(gam*w2) c22 = .5/(gamc*w2) c = c1 - c21 c2 = c1 - c22 #DxQ terms K011 = c0*c K012 = c0*c2 nu = lq*(.5*gam) nuc = lq*(.5*gamc) #Nx1 terms gamt = -gam[ind]*t1 gamct = -gamc[ind]*t1 egamt = np.exp(gamt) egamct = np.exp(gamct) ec = egamt*c21[ind] - egamct*c1[ind] ec2 = egamct*c22[ind] - egamt*c1[ind] #NxQ terms t_lq = t1/lq #Upsilon Calculations using wofz t2_lq2 = -t_lq*t_lq #Required when using wofz wnu = wofz(1j*nu).real lwnu = np.log(wnu) upm = wnu[ind] - np.exp(t2_lq2 + gamt + np.log(wofz(1j*(t_lq + nu[ind])).real)) upm[t1[:, 0] == 0., :] = 0. nu2 = nu*nu z1 = nu[ind] - t_lq indv1 = np.where(z1 >= 0.) indv2 = np.where(z1 < 0.) upv = -np.exp(lwnu[ind] + gamt) if indv1[0].shape > 0: upv[indv1] += np.exp(t2_lq2[indv1] + np.log(wofz(1j*z1[indv1]).real)) if indv2[0].shape > 0: upv[indv2] += np.exp(nu2[ind[indv2[0]], indv2[1]] + gamt[indv2[0], 0] + np.log(2.))\ - np.exp(t2_lq2[indv2] + np.log(wofz(-1j*z1[indv2]).real)) upv[t1[:, 0] == 0, :] = 0. wnuc = wofz(1j*nuc).real lwnuc = np.log(wnuc) upmc = wnuc[ind] - np.exp(t2_lq2 + gamct + np.log(wofz(1j*(t_lq + nuc[ind])).real)) upmc[t1[:, 0] == 0., :] = 0. nuc2 = nuc*nuc z1 = nuc[ind] - t_lq indv1 = np.where(z1 >= 0.) indv2 = np.where(z1 < 0.) upvc = - np.exp(lwnuc[ind] + gamct) if indv1[0].shape > 0: upvc[indv1] += np.exp(t2_lq2[indv1] + np.log(wofz(1j*z1[indv1]).real)) if indv2[0].shape > 0: upvc[indv2] += np.exp(nuc2[ind[indv2[0]], indv2[1]] + gamct[indv2[0], 0] + np.log(2.))\ - np.exp(t2_lq2[indv2] + np.log(wofz(-1j*z1[indv2]).real)) upvc[t1[:, 0] == 0, :] = 0. #Covariance calculation kdiag[ind2t] = np.sum(K011[ind]*upm + K012[ind]*upmc + (c0[ind]*ec)*upv + (c0[ind]*ec2)*upvc, axis=1) return kdiag def update_gradients_full(self, dL_dK, X, X2 = None): #index = np.asarray(X, dtype=np.int) #index = index.reshape(index.size,) if hasattr(X, 'values'): X = X.values self.B.gradient = np.zeros(self.B.shape) self.C.gradient = np.zeros(self.C.shape) self.W.gradient = np.zeros(self.W.shape) self.lengthscale.gradient = np.zeros(self.lengthscale.shape) index = np.int_(X[:, 1]) index = index.reshape(index.size,) X_flag = index[0] >= self.output_dim if X2 is None: if X_flag: #Kuu or Kmm index -= self.output_dim tmp = dL_dK*self._gkuu_lq(X, index) for q in np.unique(index): ind = np.where(index == q) self.lengthscale.gradient[q] = tmp[np.ix_(ind[0], ind[0])].sum() else: raise NotImplementedError else: #Kfu or Knm #index2 = np.asarray(X2, dtype=np.int) #index2 = index2.reshape(index2.size,) if hasattr(X2, 'values'): X2 = X2.values index2 = np.int_(X2[:, 1]) index2 = index2.reshape(index2.size,) X2_flag = index2[0] >= self.output_dim if not X_flag and X2_flag: index2 -= self.output_dim else: dL_dK = dL_dK.T #so we obtaing dL_Kfu indtemp = index - self.output_dim Xtemp = X X = X2 X2 = Xtemp index = index2 index2 = indtemp glq, gSdq, gB, gC = self._gkfu(X, index, X2, index2) tmp = dL_dK*glq for q in np.unique(index2): ind = np.where(index2 == q) self.lengthscale.gradient[q] = tmp[:, ind].sum() tmpB = dL_dK*gB tmpC = dL_dK*gC tmp = dL_dK*gSdq for d in np.unique(index): ind = np.where(index == d) self.B.gradient[d] = tmpB[ind, :].sum() self.C.gradient[d] = tmpC[ind, :].sum() for q in np.unique(index2): ind2 = np.where(index2 == q) self.W.gradient[d, q] = tmp[np.ix_(ind[0], ind2[0])].sum() def update_gradients_diag(self, dL_dKdiag, X): #index = np.asarray(X, dtype=np.int) #index = index.reshape(index.size,) if hasattr(X, 'values'): X = X.values self.B.gradient = np.zeros(self.B.shape) self.C.gradient = np.zeros(self.C.shape) self.W.gradient = np.zeros(self.W.shape) self.lengthscale.gradient = np.zeros(self.lengthscale.shape) index = np.int_(X[:, 1]) index = index.reshape(index.size,) glq, gS, gB, gC = self._gkdiag(X, index) tmp = dL_dKdiag.reshape(index.size, 1)*glq self.lengthscale.gradient = tmp.sum(0) #TODO: Avoid the reshape by a priori knowing the shape of dL_dKdiag tmpB = dL_dKdiag*gB.reshape(dL_dKdiag.shape) tmpC = dL_dKdiag*gC.reshape(dL_dKdiag.shape) tmp = dL_dKdiag.reshape(index.size, 1)*gS for d in np.unique(index): ind = np.where(index == d) self.B.gradient[d] = tmpB[ind].sum() self.C.gradient[d] = tmpC[ind].sum() self.W.gradient[d, :] = tmp[ind].sum(0) def gradients_X(self, dL_dK, X, X2=None): #index = np.asarray(X, dtype=np.int) #index = index.reshape(index.size,) if hasattr(X, 'values'): X = X.values index = np.int_(X[:, 1]) index = index.reshape(index.size,) X_flag = index[0] >= self.output_dim #If input_dim == 1, use this #gX = np.zeros((X.shape[0], 1)) #Cheat to allow gradient for input_dim==2 gX = np.zeros(X.shape) if X2 is None: #Kuu or Kmm if X_flag: index -= self.output_dim gX[:, 0] = 2.*(dL_dK*self._gkuu_X(X, index)).sum(0) return gX else: raise NotImplementedError else: #Kuf or Kmn #index2 = np.asarray(X2, dtype=np.int) #index2 = index2.reshape(index2.size,) if hasattr(X2, 'values'): X2 = X2.values index2 = np.int_(X2[:, 1]) index2 = index2.reshape(index2.size,) X2_flag = index2[0] >= self.output_dim if X_flag and not X2_flag: #gradient of Kuf(Z, X) wrt Z index -= self.output_dim gX[:, 0] = (dL_dK*self._gkfu_z(X2, index2, X, index).T).sum(1) return gX else: raise NotImplementedError #---------------------------------------# # Helper functions # #---------------------------------------# #Evaluation of squared exponential for LFM def _Kuu(self, X, index): index = index.reshape(index.size,) t = X[:, 0].reshape(X.shape[0],) lq = self.lengthscale.values.reshape(self.rank,) lq2 = lq*lq #Covariance matrix initialization kuu = np.zeros((t.size, t.size)) #Assign 1. to diagonal terms kuu[np.diag_indices(t.size)] = 1. #Upper triangular indices indtri1, indtri2 = np.triu_indices(t.size, 1) #Block Diagonal indices among Upper Triangular indices ind = np.where(index[indtri1] == index[indtri2]) indr = indtri1[ind] indc = indtri2[ind] r = t[indr] - t[indc] r2 = r*r #Calculation of covariance function kuu[indr, indc] = np.exp(-r2/lq2[index[indr]]) #Completation of lower triangular part kuu[indc, indr] = kuu[indr, indc] return kuu #Evaluation of cross-covariance function def _Kfu(self, X, index, X2, index2): #terms that move along t t = X[:, 0].reshape(X.shape[0], 1) d = np.unique(index) #Output Indexes B = self.B.values[d] C = self.C.values[d] S = self.W.values[d, :] #Index transformation indd = np.arange(self.output_dim) indd[d] = np.arange(d.size) index = indd[index] #Check where wd becomes complex wbool = C*C >= 4.*B #Output related variables must be column-wise C = C.reshape(C.size, 1) B = B.reshape(B.size, 1) C2 = C*C #Input related variables must be row-wise z = X2[:, 0].reshape(1, X2.shape[0]) lq = self.lengthscale.values.reshape((1, self.rank)) #print np.max(z), np.max(z/lq[0, index2]) alpha = .5*C wbool2 = wbool[index] ind2t = np.where(wbool2) ind3t = np.where(np.logical_not(wbool2)) kfu = np.empty((t.size, z.size)) indD = np.arange(B.size) #(1) when wd is real if np.any(np.logical_not(wbool)): #Indexes of index and t related to (2) t1 = t[ind3t] ind = index[ind3t] #Index transformation d = np.asarray(np.where(np.logical_not(wbool))[0]) indd = indD.copy() indd[d] = np.arange(d.size) ind = indd[ind] #Dx1 terms w = .5*np.sqrt(4.*B[d] - C2[d]) alphad = alpha[d] gam = alphad - 1j*w #DxQ terms Slq = (S[d]/w)*(.5*lq) c0 = Slq*np.sqrt(np.pi) nu = gam*(.5*lq) #1xM terms z_lq = z/lq[0, index2] #NxQ terms t_lq = t1/lq #NxM terms zt_lq = z_lq - t_lq[:, index2] # Upsilon Calculations #Using wofz tz = t1-z fullind = np.ix_(ind, index2) zt_lq2 = -zt_lq*zt_lq z_lq2 = -z_lq*z_lq gamt = -gam[ind]*t1 upsi = - np.exp(z_lq2 + gamt + np.log(wofz(1j*(z_lq + nu[fullind])))) z1 = zt_lq + nu[fullind] indv1 = np.where(z1.real >= 0.) indv2 = np.where(z1.real < 0.) if indv1[0].shape > 0: upsi[indv1] += np.exp(zt_lq2[indv1] + np.log(wofz(1j*z1[indv1]))) if indv2[0].shape > 0: nua2 = nu[ind[indv2[0]], index2[indv2[1]]]**2 upsi[indv2] += np.exp(nua2 - gam[ind[indv2[0]], 0]*tz[indv2] + np.log(2.))\ - np.exp(zt_lq2[indv2] + np.log(wofz(-1j*z1[indv2]))) upsi[t1[:, 0] == 0., :] = 0. #Covariance calculation kfu[ind3t] = c0[fullind]*upsi.imag #(2) when wd is complex if np.any(wbool): #Indexes of index and t related to (2) t1 = t[ind2t] ind = index[ind2t] #Index transformation d = np.asarray(np.where(wbool)[0]) indd = indD.copy() indd[d] = np.arange(d.size) ind = indd[ind] #Dx1 terms w = .5*np.sqrt(C2[d] - 4.*B[d]) alphad = alpha[d] gam = alphad - w gamc = alphad + w #DxQ terms Slq = S[d]*(lq*.25) c0 = -Slq*(np.sqrt(np.pi)/w) nu = gam*(lq*.5) nuc = gamc*(lq*.5) #1xM terms z_lq = z/lq[0, index2] #NxQ terms t_lq = t1/lq[0, index2] #NxM terms zt_lq = z_lq - t_lq # Upsilon Calculations tz = t1-z z_lq2 = -z_lq*z_lq zt_lq2 = -zt_lq*zt_lq gamt = -gam[ind]*t1 gamct = -gamc[ind]*t1 fullind = np.ix_(ind, index2) upsi = np.exp(z_lq2 + gamt + np.log(wofz(1j*(z_lq + nu[fullind])).real))\ - np.exp(z_lq2 + gamct + np.log(wofz(1j*(z_lq + nuc[fullind])).real)) z1 = zt_lq + nu[fullind] indv1 = np.where(z1 >= 0.) indv2 = np.where(z1 < 0.) if indv1[0].shape > 0: upsi[indv1] -= np.exp(zt_lq2[indv1] + np.log(wofz(1j*z1[indv1]).real)) if indv2[0].shape > 0: nua2 = nu[ind[indv2[0]], index2[indv2[1]]]**2 upsi[indv2] -= np.exp(nua2 - gam[ind[indv2[0]], 0]*tz[indv2] + np.log(2.))\ - np.exp(zt_lq2[indv2] + np.log(wofz(-1j*z1[indv2]).real)) z1 = zt_lq + nuc[fullind] indv1 = np.where(z1 >= 0.) indv2 = np.where(z1 < 0.) if indv1[0].shape > 0: upsi[indv1] += np.exp(zt_lq2[indv1] + np.log(wofz(1j*z1[indv1]).real)) if indv2[0].shape > 0: nuac2 = nuc[ind[indv2[0]], index2[indv2[1]]]**2 upsi[indv2] += np.exp(nuac2 - gamc[ind[indv2[0]], 0]*tz[indv2] + np.log(2.))\ - np.exp(zt_lq2[indv2] + np.log(wofz(-1j*z1[indv2]).real)) upsi[t1[:, 0] == 0., :] = 0. kfu[ind2t] = c0[np.ix_(ind, index2)]*upsi return kfu #Gradient of Kuu wrt lengthscale def _gkuu_lq(self, X, index): t = X[:, 0].reshape(X.shape[0],) index = index.reshape(X.shape[0],) lq = self.lengthscale.values.reshape(self.rank,) lq2 = lq*lq #Covariance matrix initialization glq = np.zeros((t.size, t.size)) #Upper triangular indices indtri1, indtri2 = np.triu_indices(t.size, 1) #Block Diagonal indices among Upper Triangular indices ind = np.where(index[indtri1] == index[indtri2]) indr = indtri1[ind] indc = indtri2[ind] r = t[indr] - t[indc] r2 = r*r r2_lq2 = r2/lq2[index[indr]] #Calculation of covariance function er2_lq2 = np.exp(-r2_lq2) #Gradient wrt lq c = 2.*r2_lq2/lq[index[indr]] glq[indr, indc] = er2_lq2*c #Complete the lower triangular glq[indc, indr] = glq[indr, indc] return glq #Be careful this derivative should be transpose it def _gkuu_X(self, X, index): #Diagonal terms are always zero t = X[:, 0].reshape(X.shape[0],) index = index.reshape(index.size,) lq = self.lengthscale.values.reshape(self.rank,) lq2 = lq*lq #Covariance matrix initialization gt = np.zeros((t.size, t.size)) #Upper triangular indices indtri1, indtri2 = np.triu_indices(t.size, 1) #Offset of 1 from the diagonal #Block Diagonal indices among Upper Triangular indices ind = np.where(index[indtri1] == index[indtri2]) indr = indtri1[ind] indc = indtri2[ind] r = t[indr] - t[indc] r2 = r*r r2_lq2 = r2/(-lq2[index[indr]]) #Calculation of covariance function er2_lq2 = np.exp(r2_lq2) #Gradient wrt t c = 2.*r/lq2[index[indr]] gt[indr, indc] = er2_lq2*c #Complete the lower triangular gt[indc, indr] = -gt[indr, indc] return gt #Gradients for Diagonal Kff def _gkdiag(self, X, index): index = index.reshape(index.size,) #terms that move along t d = np.unique(index) B = self.B[d].values C = self.C[d].values S = self.W[d, :].values #Index transformation indd = np.arange(self.output_dim) indd[d] = np.arange(d.size) index = indd[index] #Check where wd becomes complex wbool = C*C >= 4.*B #Output related variables must be column-wise t = X[:, 0].reshape(X.shape[0], 1) B = B.reshape(B.size, 1) C = C.reshape(C.size, 1) alpha = .5*C C2 = C*C S2 = S*S wbool2 = wbool[index] ind2t = np.where(wbool2) ind3t = np.where(np.logical_not(wbool2)) #Input related variables must be row-wise lq = self.lengthscale.values.reshape(1, self.rank) lq2 = lq*lq gB = np.empty((t.size,)) gC = np.empty((t.size,)) glq = np.empty((t.size, lq.size)) gS = np.empty((t.size, lq.size)) indD = np.arange(B.size) #(1) When wd is real if np.any(np.logical_not(wbool)): #Indexes of index and t related to (1) t1 = t[ind3t] ind = index[ind3t] #Index transformation d = np.asarray(np.where(np.logical_not(wbool))[0]) indd = indD.copy() indd[d] = np.arange(d.size) ind = indd[ind] #Dx1 terms S2lq = S2[d]*(.5*lq) c0 = S2lq*np.sqrt(np.pi) w = .5*np.sqrt(4.*B[d] - C2[d]) alphad = alpha[d] alpha2 = alphad*alphad w2 = w*w gam = alphad + 1j*w gam2 = gam*gam gamc = alphad - 1j*w c1 = 0.5/alphad c2 = 0.5/gam c = c1 - c2 #DxQ terms c0 = c0/w2 nu = (.5*lq)*gam #Nx1 terms gamt = -gam[ind]*t1 gamct = -gamc[ind]*t1 egamt = np.exp(gamt) egamct = np.exp(gamct) ec = egamt*c2[ind] - egamct*c1[ind] #NxQ terms t_lq = t1/lq t2_lq2 = -t_lq*t_lq t_lq2 = t_lq/lq et2_lq2 = np.exp(t2_lq2) etlq2gamt = np.exp(t2_lq2 + gamt) ##Upsilon calculations #Using wofz wnu = wofz(1j*nu) lwnu = np.log(wnu) t2_lq2 = -t_lq*t_lq upm = wnu[ind] - np.exp(t2_lq2 + gamt + np.log(wofz(1j*(t_lq + nu[ind])))) upm[t1[:, 0] == 0, :] = 0. nu2 = nu*nu z1 = nu[ind] - t_lq indv1 = np.where(z1.real >= 0.) indv2 = np.where(z1.real < 0.) upv = -np.exp(lwnu[ind] + gamt) if indv1[0].shape > 0: upv[indv1] += np.exp(t2_lq2[indv1] + np.log(wofz(1j*z1[indv1]))) if indv2[0].shape > 0: upv[indv2] += np.exp(nu2[ind[indv2[0]], indv2[1]] + gamt[indv2[0], 0] + np.log(2.))\ - np.exp(t2_lq2[indv2] + np.log(wofz(-1j*z1[indv2]))) upv[t1[:, 0] == 0, :] = 0. #Gradient wrt S Slq = S[d]*lq #For grad wrt S c0_S = Slq*np.sqrt(np.pi)/w2 K01 = c0_S*c gS[ind3t] = np.real(K01[ind]*upm) + np.real((c0_S[ind]*ec)*upv) #For B and C upmd = etlq2gamt - 1. upvd = egamt - et2_lq2 # gradient wrt B dw_dB = 0.5/w dgam_dB = 1j*dw_dB Ba1 = c0*(0.5*dgam_dB/gam2 + (0.5*lq2*gam*dgam_dB - 2.*dw_dB/w)*c) Ba2_1 = c0*(dgam_dB*(0.5/gam2 - 0.25*lq2) + dw_dB/(w*gam)) Ba2_2 = c0*dgam_dB/gam Ba3 = c0*(-0.25*lq2*gam*dgam_dB/alphad + dw_dB/(w*alphad)) Ba4_1 = (S2lq*lq)*dgam_dB/w2 Ba4 = Ba4_1*c gB[ind3t] = np.sum(np.real(Ba1[ind]*upm) - np.real(((Ba2_1[ind] + Ba2_2[ind]*t1)*egamt - Ba3[ind]*egamct)*upv)\ + np.real(Ba4[ind]*upmd) + np.real((Ba4_1[ind]*ec)*upvd), axis=1) # gradient wrt C dw_dC = - alphad*dw_dB dgam_dC = 0.5 + 1j*dw_dC Ca1 = c0*(-0.25/alpha2 + 0.5*dgam_dC/gam2 + (0.5*lq2*gam*dgam_dC - 2.*dw_dC/w)*c) Ca2_1 = c0*(dgam_dC*(0.5/gam2 - 0.25*lq2) + dw_dC/(w*gam)) Ca2_2 = c0*dgam_dC/gam Ca3_1 = c0*(0.25/alpha2 - 0.25*lq2*gam*dgam_dC/alphad + dw_dC/(w*alphad)) Ca3_2 = 0.5*c0/alphad Ca4_1 = (S2lq*lq)*dgam_dC/w2 Ca4 = Ca4_1*c gC[ind3t] = np.sum(np.real(Ca1[ind]*upm) - np.real(((Ca2_1[ind] + Ca2_2[ind]*t1)*egamt - (Ca3_1[ind] + Ca3_2[ind]*t1)*egamct)*upv)\ + np.real(Ca4[ind]*upmd) + np.real((Ca4_1[ind]*ec)*upvd), axis=1) #Gradient wrt lengthscale #DxQ terms la = (1./lq + nu*gam)*c0 la1 = la*c c0l = (S2[d]/w2)*lq la3 = c0l*c gam_2 = .5*gam glq[ind3t] = (la1[ind]*upm).real + ((la[ind]*ec)*upv).real\ + (la3[ind]*(-gam_2[ind] + etlq2gamt*(-t_lq2 + gam_2[ind]))).real\ + ((c0l[ind]*ec)*(-et2_lq2*(t_lq2 + gam_2[ind]) + egamt*gam_2[ind])).real #(2) When w_d is complex if np.any(wbool): t1 = t[ind2t] ind = index[ind2t] #Index transformation d = np.asarray(np.where(wbool)[0]) indd = indD.copy() indd[d] = np.arange(d.size) ind = indd[ind] #Dx1 terms S2lq = S2[d]*(.25*lq) c0 = S2lq*np.sqrt(np.pi) w = .5*np.sqrt(C2[d]-4.*B[d]) w2 = -w*w alphad = alpha[d] alpha2 = alphad*alphad gam = alphad - w gamc = alphad + w gam2 = gam*gam gamc2 = gamc*gamc c1 = .5/alphad c21 = .5/gam c22 = .5/gamc c = c1 - c21 c2 = c1 - c22 #DxQ terms c0 = c0/w2 nu = .5*lq*gam nuc = .5*lq*gamc #Nx1 terms gamt = -gam[ind]*t1 gamct = -gamc[ind]*t1 egamt = np.exp(gamt) egamct = np.exp(gamct) ec = egamt*c21[ind] - egamct*c1[ind] ec2 = egamct*c22[ind] - egamt*c1[ind] #NxQ terms t_lq = t1/lq t2_lq2 = -t_lq*t_lq et2_lq2 = np.exp(t2_lq2) etlq2gamct = np.exp(t2_lq2 + gamct) etlq2gamt = np.exp(t2_lq2 + gamt) #Upsilon Calculations using wofz t2_lq2 = -t_lq*t_lq #Required when using wofz wnu = np.real(wofz(1j*nu)) lwnu = np.log(wnu) upm = wnu[ind] - np.exp(t2_lq2 + gamt + np.log(wofz(1j*(t_lq + nu[ind])).real)) upm[t1[:, 0] == 0., :] = 0. nu2 = nu*nu z1 = nu[ind] - t_lq indv1 = np.where(z1 >= 0.) indv2 = np.where(z1 < 0.) upv = -np.exp(lwnu[ind] + gamt) if indv1[0].shape > 0: upv[indv1] += np.exp(t2_lq2[indv1] + np.log(wofz(1j*z1[indv1]).real)) if indv2[0].shape > 0: upv[indv2] += np.exp(nu2[ind[indv2[0]], indv2[1]] + gamt[indv2[0], 0] + np.log(2.)) - np.exp(t2_lq2[indv2]\ + np.log(wofz(-1j*z1[indv2]).real)) upv[t1[:, 0] == 0, :] = 0. wnuc = wofz(1j*nuc).real upmc = wnuc[ind] - np.exp(t2_lq2 + gamct + np.log(wofz(1j*(t_lq + nuc[ind])).real)) upmc[t1[:, 0] == 0., :] = 0. lwnuc = np.log(wnuc) nuc2 = nuc*nuc z1 = nuc[ind] - t_lq indv1 = np.where(z1 >= 0.) indv2 = np.where(z1 < 0.) upvc = -np.exp(lwnuc[ind] + gamct) if indv1[0].shape > 0: upvc[indv1] += np.exp(t2_lq2[indv1] + np.log(wofz(1j*z1[indv1]).real)) if indv2[0].shape > 0: upvc[indv2] += np.exp(nuc2[ind[indv2[0]], indv2[1]] + gamct[indv2[0], 0] + np.log(2.)) - np.exp(t2_lq2[indv2]\ + np.log(wofz(-1j*z1[indv2]).real)) upvc[t1[:, 0] == 0, :] = 0. #Gradient wrt S #NxQ terms c0_S = (S[d]/w2)*(lq*(np.sqrt(np.pi)*.5)) K011 = c0_S*c K012 = c0_S*c2 gS[ind2t] = K011[ind]*upm + K012[ind]*upmc + (c0_S[ind]*ec)*upv + (c0_S[ind]*ec2)*upvc #Is required to cache this, C gradient also required them upmd = -1. + etlq2gamt upvd = -et2_lq2 + egamt upmdc = -1. + etlq2gamct upvdc = -et2_lq2 + egamct # Gradient wrt B dgam_dB = 0.5/w dgamc_dB = -dgam_dB Ba1 = c0*(0.5*dgam_dB/gam2 + (0.5*lq2*gam*dgam_dB - 1./w2)*c) Ba3 = c0*(-0.25*lq2*gam*dgam_dB/alphad + 0.5/(w2*alphad)) Ba4_1 = (S2lq*lq)*dgam_dB/w2 Ba4 = Ba4_1*c Ba2_1 = c0*(dgam_dB*(0.5/gam2 - 0.25*lq2) + 0.5/(w2*gam)) Ba2_2 = c0*dgam_dB/gam Ba1c = c0*(0.5*dgamc_dB/gamc2 + (0.5*lq2*gamc*dgamc_dB - 1./w2)*c2) Ba3c = c0*(-0.25*lq2*gamc*dgamc_dB/alphad + 0.5/(w2*alphad)) Ba4_1c = (S2lq*lq)*dgamc_dB/w2 Ba4c = Ba4_1c*c2 Ba2_1c = c0*(dgamc_dB*(0.5/gamc2 - 0.25*lq2) + 0.5/(w2*gamc)) Ba2_2c = c0*dgamc_dB/gamc gB[ind2t] = np.sum(Ba1[ind]*upm - ((Ba2_1[ind] + Ba2_2[ind]*t1)*egamt - Ba3[ind]*egamct)*upv\ + Ba4[ind]*upmd + (Ba4_1[ind]*ec)*upvd\ + Ba1c[ind]*upmc - ((Ba2_1c[ind] + Ba2_2c[ind]*t1)*egamct - Ba3c[ind]*egamt)*upvc\ + Ba4c[ind]*upmdc + (Ba4_1c[ind]*ec2)*upvdc, axis=1) ##Gradient wrt C dw_dC = 0.5*alphad/w dgam_dC = 0.5 - dw_dC dgamc_dC = 0.5 + dw_dC S2lq2 = S2lq*lq Ca1 = c0*(-0.25/alpha2 + 0.5*dgam_dC/gam2 + (0.5*lq2*gam*dgam_dC + alphad/w2)*c) Ca2_1 = c0*(dgam_dC*(0.5/gam2 - 0.25*lq2) - 0.5*alphad/(w2*gam)) Ca2_2 = c0*dgam_dC/gam Ca3_1 = c0*(0.25/alpha2 - 0.25*lq2*gam*dgam_dC/alphad - 0.5/w2) Ca3_2 = 0.5*c0/alphad Ca4_1 = S2lq2*(dgam_dC/w2) Ca4 = Ca4_1*c Ca1c = c0*(-0.25/alpha2 + 0.5*dgamc_dC/gamc2 + (0.5*lq2*gamc*dgamc_dC + alphad/w2)*c2) Ca2_1c = c0*(dgamc_dC*(0.5/gamc2 - 0.25*lq2) - 0.5*alphad/(w2*gamc)) Ca2_2c = c0*dgamc_dC/gamc Ca3_1c = c0*(0.25/alpha2 - 0.25*lq2*gamc*dgamc_dC/alphad - 0.5/w2) Ca3_2c = 0.5*c0/alphad Ca4_1c = S2lq2*(dgamc_dC/w2) Ca4c = Ca4_1c*c2 gC[ind2t] = np.sum(Ca1[ind]*upm - ((Ca2_1[ind] + Ca2_2[ind]*t1)*egamt - (Ca3_1[ind] + Ca3_2[ind]*t1)*egamct)*upv\ + Ca4[ind]*upmd + (Ca4_1[ind]*ec)*upvd\ + Ca1c[ind]*upmc - ((Ca2_1c[ind] + Ca2_2c[ind]*t1)*egamct - (Ca3_1c[ind] + Ca3_2c[ind]*t1)*egamt)*upvc\ + Ca4c[ind]*upmdc + (Ca4_1c[ind]*ec2)*upvdc, axis=1) #Gradient wrt lengthscale #DxQ terms la = (1./lq + nu*gam)*c0 lac = (1./lq + nuc*gamc)*c0 la1 = la*c la1c = lac*c2 t_lq2 = t_lq/lq c0l = (S2[d]/w2)*(.5*lq) la3 = c0l*c la3c = c0l*c2 gam_2 = .5*gam gamc_2 = .5*gamc glq[ind2t] = la1c[ind]*upmc + (lac[ind]*ec2)*upvc\ + la3c[ind]*(-gamc_2[ind] + etlq2gamct*(-t_lq2 + gamc_2[ind]))\ + (c0l[ind]*ec2)*(-et2_lq2*(t_lq2 + gamc_2[ind]) + egamct*gamc_2[ind])\ + la1[ind]*upm + (la[ind]*ec)*upv\ + la3[ind]*(-gam_2[ind] + etlq2gamt*(-t_lq2 + gam_2[ind]))\ + (c0l[ind]*ec)*(-et2_lq2*(t_lq2 + gam_2[ind]) + egamt*gam_2[ind]) return glq, gS, gB, gC def _gkfu(self, X, index, Z, index2): index = index.reshape(index.size,) #TODO: reduce memory usage #terms that move along t d = np.unique(index) B = self.B[d].values C = self.C[d].values S = self.W[d, :].values #Index transformation indd = np.arange(self.output_dim) indd[d] = np.arange(d.size) index = indd[index] #Check where wd becomes complex wbool = C*C >= 4.*B #t column t = X[:, 0].reshape(X.shape[0], 1) C = C.reshape(C.size, 1) B = B.reshape(B.size, 1) C2 = C*C #z row z = Z[:, 0].reshape(1, Z.shape[0]) index2 = index2.reshape(index2.size,) lq = self.lengthscale.values.reshape((1, self.rank)) lq2 = lq*lq alpha = .5*C wbool2 = wbool[index] ind2t = np.where(wbool2) ind3t = np.where(np.logical_not(wbool2)) #kfu = np.empty((t.size, z.size)) glq = np.empty((t.size, z.size)) gSdq = np.empty((t.size, z.size)) gB = np.empty((t.size, z.size)) gC = np.empty((t.size, z.size)) indD = np.arange(B.size) #(1) when wd is real if np.any(np.logical_not(wbool)): #Indexes of index and t related to (2) t1 = t[ind3t] ind = index[ind3t] #Index transformation d = np.asarray(np.where(np.logical_not(wbool))[0]) indd = indD.copy() indd[d] = np.arange(d.size) ind = indd[ind] #Dx1 terms w = .5*np.sqrt(4.*B[d] - C2[d]) alphad = alpha[d] gam = alphad - 1j*w gam_2 = .5*gam S_w = S[d]/w S_wpi = S_w*(.5*np.sqrt(np.pi)) #DxQ terms c0 = S_wpi*lq #lq*Sdq*sqrt(pi)/(2w) nu = gam*lq nu2 = 1.+.5*(nu*nu) nu *= .5 #1xM terms z_lq = z/lq[0, index2] z_lq2 = -z_lq*z_lq #NxQ terms t_lq = t1/lq #DxM terms gamt = -gam[ind]*t1 #NxM terms zt_lq = z_lq - t_lq[:, index2] zt_lq2 = -zt_lq*zt_lq ezt_lq2 = -np.exp(zt_lq2) ezgamt = np.exp(z_lq2 + gamt) # Upsilon calculations fullind = np.ix_(ind, index2) upsi = - np.exp(z_lq2 + gamt + np.log(wofz(1j*(z_lq + nu[fullind])))) tz = t1-z z1 = zt_lq + nu[fullind] indv1 = np.where(z1.real >= 0.) indv2 = np.where(z1.real < 0.) if indv1[0].shape > 0: upsi[indv1] += np.exp(zt_lq2[indv1] + np.log(wofz(1j*z1[indv1]))) if indv2[0].shape > 0: nua2 = nu[ind[indv2[0]], index2[indv2[1]]]**2 upsi[indv2] += np.exp(nua2 - gam[ind[indv2[0]], 0]*tz[indv2] + np.log(2.))\ - np.exp(zt_lq2[indv2] + np.log(wofz(-1j*z1[indv2]))) upsi[t1[:, 0] == 0., :] = 0. #Gradient wrt S #DxQ term Sa1 = lq*(.5*np.sqrt(np.pi))/w gSdq[ind3t] = Sa1[np.ix_(ind, index2)]*upsi.imag #Gradient wrt lq la1 = S_wpi*nu2 la2 = S_w*lq uplq = ezt_lq2*(gam_2[ind]) uplq += ezgamt*(-z_lq/lq[0, index2] + gam_2[ind]) glq[ind3t] = (la1[np.ix_(ind, index2)]*upsi).imag glq[ind3t] += la2[np.ix_(ind, index2)]*uplq.imag #Gradient wrt B #Dx1 terms dw_dB = .5/w dgam_dB = -1j*dw_dB #DxQ terms Ba1 = -c0*dw_dB/w #DXQ Ba2 = c0*dgam_dB #DxQ Ba3 = lq2*gam_2 #DxQ Ba4 = (dgam_dB*S_w)*(.5*lq2) #DxQ gB[ind3t] = ((Ba1[np.ix_(ind, index2)] + Ba2[np.ix_(ind, index2)]*(Ba3[np.ix_(ind, index2)] - (t1-z)))*upsi).imag\ + (Ba4[np.ix_(ind, index2)]*(ezt_lq2 + ezgamt)).imag #Gradient wrt C (it uses some calculations performed in B) #Dx1 terms dw_dC = -.5*alphad/w dgam_dC = 0.5 - 1j*dw_dC #DxQ terms Ca1 = -c0*dw_dC/w #DXQ Ca2 = c0*dgam_dC #DxQ Ca4 = (dgam_dC*S_w)*(.5*lq2) #DxQ gC[ind3t] = ((Ca1[np.ix_(ind, index2)] + Ca2[np.ix_(ind, index2)]*(Ba3[np.ix_(ind, index2)] - (t1-z)))*upsi).imag\ + (Ca4[np.ix_(ind, index2)]*(ezt_lq2 + ezgamt)).imag #(2) when wd is complex if np.any(wbool): #Indexes of index and t related to (2) t1 = t[ind2t] ind = index[ind2t] #Index transformation d = np.asarray(np.where(wbool)[0]) indd = indD.copy() indd[d] = np.arange(d.size) ind = indd[ind] #Dx1 terms w = .5*np.sqrt(C2[d] - 4.*B[d]) w2 = w*w alphad = alpha[d] gam = alphad - w gamc = alphad + w #DxQ terms S_w= -S[d]/w #minus is given by j*j S_wpi = S_w*(.25*np.sqrt(np.pi)) c0 = S_wpi*lq gam_2 = .5*gam gamc_2 = .5*gamc nu = gam*lq nuc = gamc*lq nu2 = 1.+.5*(nu*nu) nuc2 = 1.+.5*(nuc*nuc) nu *= .5 nuc *= .5 #1xM terms z_lq = z/lq[0, index2] z_lq2 = -z_lq*z_lq #Nx1 gamt = -gam[ind]*t1 gamct = -gamc[ind]*t1 #NxQ terms t_lq = t1/lq[0, index2] #NxM terms zt_lq = z_lq - t_lq zt_lq2 = -zt_lq*zt_lq ezt_lq2 = -np.exp(zt_lq2) ezgamt = np.exp(z_lq2 + gamt) ezgamct = np.exp(z_lq2 + gamct) # Upsilon calculations fullind = np.ix_(ind, index2) upsi1 = - np.exp(z_lq2 + gamct + np.log(wofz(1j*(z_lq + nuc[fullind])).real)) tz = t1-z z1 = zt_lq + nuc[fullind] indv1 = np.where(z1 >= 0.) indv2 = np.where(z1 < 0.) if indv1[0].shape > 0: upsi1[indv1] += np.exp(zt_lq2[indv1] + np.log(wofz(1j*z1[indv1]).real)) if indv2[0].shape > 0: nuac2 = nuc[ind[indv2[0]], index2[indv2[1]]]**2 upsi1[indv2] += np.exp(nuac2 - gamc[ind[indv2[0]], 0]*tz[indv2] + np.log(2.))\ - np.exp(zt_lq2[indv2] + np.log(wofz(-1j*z1[indv2]).real)) upsi1[t1[:, 0] == 0., :] = 0. upsi2 = - np.exp(z_lq2 + gamt + np.log(wofz(1j*(z_lq + nu[fullind])).real)) z1 = zt_lq + nu[fullind] indv1 = np.where(z1 >= 0.) indv2 = np.where(z1 < 0.) if indv1[0].shape > 0: upsi2[indv1] += np.exp(zt_lq2[indv1] + np.log(wofz(1j*z1[indv1]).real)) if indv2[0].shape > 0: nua2 = nu[ind[indv2[0]], index2[indv2[1]]]**2 upsi2[indv2] += np.exp(nua2 - gam[ind[indv2[0]], 0]*tz[indv2] + np.log(2.))\ - np.exp(zt_lq2[indv2] + np.log(wofz(-1j*z1[indv2]).real)) upsi2[t1[:, 0] == 0., :] = 0. #Gradient wrt lq la1 = S_wpi*nu2 la1c = S_wpi*nuc2 la2 = S_w*(.5*lq) uplq = ezt_lq2*(gamc_2[ind]) + ezgamct*(-z_lq/lq[0, index2] + gamc_2[ind])\ - ezt_lq2*(gam_2[ind]) - ezgamt*(-z_lq/lq[0, index2] + gam_2[ind]) glq[ind2t] = la1c[np.ix_(ind, index2)]*upsi1 - la1[np.ix_(ind, index2)]*upsi2\ + la2[np.ix_(ind, index2)]*uplq #Gradient wrt S Sa1 = (lq*(-.25*np.sqrt(np.pi)))/w gSdq[ind2t] = Sa1[np.ix_(ind, index2)]*(upsi1 - upsi2) #Gradient wrt B #Dx1 terms dgam_dB = .5/w dgamc_dB = -dgam_dB #DxQ terms Ba1 = .5*(c0/w2) Ba2 = c0*dgam_dB Ba3 = lq2*gam_2 Ba4 = (dgam_dB*S_w)*(.25*lq2) Ba2c = c0*dgamc_dB Ba3c = lq2*gamc_2 Ba4c = (dgamc_dB*S_w)*(.25*lq2) gB[ind2t] = (Ba1[np.ix_(ind, index2)] + Ba2c[np.ix_(ind, index2)]*(Ba3c[np.ix_(ind, index2)] - (t1-z)))*upsi1\ + Ba4c[np.ix_(ind, index2)]*(ezt_lq2 + ezgamct)\ - (Ba1[np.ix_(ind, index2)] + Ba2[np.ix_(ind, index2)]*(Ba3[np.ix_(ind, index2)] - (t1-z)))*upsi2\ - Ba4[np.ix_(ind, index2)]*(ezt_lq2 + ezgamt) #Gradient wrt C #Dx1 terms dgam_dC = 0.5 - .5*(alphad/w) dgamc_dC = 0.5 + .5*(alphad/w) #DxQ terms Ca1 = -c0*(.5*alphad/w2) Ca2 = c0*dgam_dC Ca4 = (dgam_dC*S_w)*(.25*lq2) Ca2c = c0*dgamc_dC Ca4c = (dgamc_dC*S_w)*(.25*lq2) gC[ind2t] = (Ca1[np.ix_(ind, index2)] + Ca2c[np.ix_(ind, index2)]*(Ba3c[np.ix_(ind, index2)] - (t1-z)))*upsi1\ + Ca4c[np.ix_(ind, index2)]*(ezt_lq2 + ezgamct)\ - (Ca1[np.ix_(ind, index2)] + Ca2[
np.ix_(ind, index2)
numpy.ix_
""" Blending module. Check Blending_ section of W3C recommendation for blending mode definitions. .. _Blending: https://www.w3.org/TR/compositing/#blending """ from __future__ import absolute_import, unicode_literals import logging from psd_tools.utils import new_registry from psd_tools.constants import BlendMode from psd_tools.terminology import Enum logger = logging.getLogger(__name__) BLEND_FUNCTIONS, register = new_registry() def blend(backdrop, image, offset, mode=None): from PIL import Image, ImageChops, ImageMath # Align the canvas size. if offset[0] < 0: if image.width <= -offset[0]: return backdrop image = image.crop((-offset[0], 0, image.width, image.height)) offset = (0, offset[1]) if offset[1] < 0: if image.height <= -offset[1]: return backdrop image = image.crop((0, -offset[1], image.width, image.height)) offset = (offset[0], 0) # Operations must happen in RGBA in Pillow. image_ = Image.new(image.mode, backdrop.size) image_.paste(image, offset) image = image_.convert('RGBA') target_mode = backdrop.mode if target_mode != 'RGBA': backdrop = backdrop.convert('RGBA') # Composite blended image. if mode not in (BlendMode.NORMAL, Enum.Normal, None): blend_func = BLEND_FUNCTIONS.get(mode, _normal) image = _blend_image(backdrop, image, blend_func) backdrop = Image.alpha_composite(backdrop, image) if target_mode != 'RGBA': backdrop = backdrop.convert(target_mode) return backdrop def _blend_image(backdrop, source, blend_fn): from PIL import Image import numpy as np Cb = np.asarray(backdrop.convert('RGB')).astype(np.float) / 255. Cs = np.asarray(source.convert('RGB')).astype(np.float) / 255. Ab = np.asarray(backdrop.getchannel('A')).astype(np.float) / 255. Ab = np.expand_dims(Ab, axis=2) Cr = (1. - Ab) * Cs + Ab * blend_fn(Cs, Cb) result = Image.fromarray((Cr * 255).round().astype(np.uint8), mode='RGB') result.putalpha(source.getchannel('A')) return result @register(BlendMode.NORMAL) @register(Enum.Normal) def _normal(Cs, Cb): return Cs @register(BlendMode.MULTIPLY) @register(Enum.Multiply) def _multiply(Cs, Cb): return Cs * Cb @register(BlendMode.SCREEN) @register(Enum.Screen) def _screen(Cs, Cb): return Cb + Cs - (Cb * Cs) @register(BlendMode.OVERLAY) @register(Enum.Overlay) def _overlay(Cs, Cb): return _hard_light(Cb, Cs) @register(BlendMode.DARKEN) @register(Enum.Darken) def _darken(Cs, Cb): import numpy as np return np.minimum(Cb, Cs) @register(BlendMode.LIGHTEN) @register(Enum.Lighten) def _lighten(Cs, Cb): import numpy as np return np.maximum(Cb, Cs) @register(BlendMode.COLOR_DODGE) @register(Enum.ColorDodge) def _color_dodge(Cs, Cb, s=1.0): import numpy as np B =
np.zeros_like(Cs)
numpy.zeros_like
import seaborn as sns import pandas as pd import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt import test_rnn_realtime as net import seaborn as sns import pandas as pd import numpy as np import matplotlib as mpl import matplotlib.pyplot as plt import test_rnn_realtime as net import traning as tr import time # 近似データの定義 high_value = 1.0 low_value = 0.0 category_num = 2 steps_num = 3 def make_input_4data(high_value, low_value, category_num, steps_num): one_cate_data = np.empty((0, 0, category_num)) high_element = np.array([[high_value]]) low_element = np.array([[low_value]]) much = np.array([1, 0]) non_much = np.array([0, 1]) input_data = np.empty((0, steps_num, category_num)) target_data = np.empty((0, 2)) for q in range(category_num): for i in range(category_num): for j in range(category_num): for k in range(category_num): one_cate_data = np.empty((0, category_num)) one_low_data = np.empty((1, 0)) for l in range(category_num): if l == q: one_low_data = np.hstack( (one_low_data, high_element)) else: one_low_data = np.hstack( (one_low_data, low_element)) one_cate_data = np.vstack((one_cate_data, one_low_data)) one_low_data = np.empty((1, 0)) for l in range(category_num): if l == i: one_low_data = np.hstack( (one_low_data, high_element)) else: one_low_data = np.hstack( (one_low_data, low_element)) one_cate_data = np.vstack((one_cate_data, one_low_data)) one_low_data =
np.empty((1, 0))
numpy.empty
# Copyright 2014-2019 The PySCF Developers. All Rights Reserved. # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. # # Author: <NAME> <<EMAIL>> # <NAME> <<EMAIL>> # ''' Restricted algebraic diagrammatic construction ''' import time import numpy as np import pyscf.ao2mo as ao2mo from pyscf import lib from pyscf.lib import logger from pyscf.adc import radc_ao2mo from pyscf.adc import dfadc from pyscf import __config__ from pyscf import df from pyscf import symm def kernel(adc, nroots=1, guess=None, eris=None, verbose=None): adc.method = adc.method.lower() if adc.method not in ("adc(2)", "adc(2)-x", "adc(3)"): raise NotImplementedError(adc.method) cput0 = (time.clock(), time.time()) log = logger.Logger(adc.stdout, adc.verbose) if adc.verbose >= logger.WARN: adc.check_sanity() adc.dump_flags() if eris is None: eris = adc.transform_integrals() imds = adc.get_imds(eris) matvec, diag = adc.gen_matvec(imds, eris) guess = adc.get_init_guess(nroots, diag, ascending = True) conv, adc.E, U = lib.linalg_helper.davidson_nosym1(lambda xs : [matvec(x) for x in xs], guess, diag, nroots=nroots, verbose=log, tol=adc.conv_tol, max_cycle=adc.max_cycle, max_space=adc.max_space,tol_residual=adc.tol_residual) adc.U = np.array(U).T.copy() if adc.compute_properties: adc.P,adc.X = adc.get_properties(nroots) nfalse = np.shape(conv)[0] - np.sum(conv) str = ("\n*************************************************************" "\n ADC calculation summary" "\n*************************************************************") logger.info(adc, str) if nfalse >= 1: logger.warn(adc, "Davidson iterations for " + str(nfalse) + " root(s) not converged\n") for n in range(nroots): print_string = ('%s root %d | Energy (Eh) = %14.10f | Energy (eV) = %12.8f ' % (adc.method, n, adc.E[n], adc.E[n]*27.2114)) if adc.compute_properties: print_string += ("| Spec factors = %10.8f " % adc.P[n]) print_string += ("| conv = %s" % conv[n]) logger.info(adc, print_string) log.timer('ADC', *cput0) return adc.E, adc.U, adc.P, adc.X def compute_amplitudes_energy(myadc, eris, verbose=None): t1, t2, myadc.imds.t2_1_vvvv = myadc.compute_amplitudes(eris) e_corr = myadc.compute_energy(t2, eris) return e_corr, t1, t2 def compute_amplitudes(myadc, eris): cput0 = (time.clock(), time.time()) log = logger.Logger(myadc.stdout, myadc.verbose) if myadc.method not in ("adc(2)", "adc(2)-x", "adc(3)"): raise NotImplementedError(myadc.method) nocc = myadc._nocc nvir = myadc._nvir eris_oooo = eris.oooo eris_ovoo = eris.ovoo eris_oovv = eris.oovv eris_ovvo = eris.ovvo # Compute first-order doubles t2 (tijab) v2e_oovv = eris_ovvo[:].transpose(0,3,1,2).copy() e = myadc.mo_energy d_ij = e[:nocc][:,None] + e[:nocc] d_ab = e[nocc:][:,None] + e[nocc:] D2 = d_ij.reshape(-1,1) - d_ab.reshape(-1) D2 = D2.reshape((nocc,nocc,nvir,nvir)) D1 = e[:nocc][:None].reshape(-1,1) - e[nocc:].reshape(-1) D1 = D1.reshape((nocc,nvir)) t2_1 = v2e_oovv/D2 if not isinstance(eris.oooo, np.ndarray): t2_1 = radc_ao2mo.write_dataset(t2_1) del v2e_oovv del D2 cput0 = log.timer_debug1("Completed t2_1 amplitude calculation", *cput0) # Compute second-order singles t1 (tij) if isinstance(eris.ovvv, type(None)): chnk_size = radc_ao2mo.calculate_chunk_size(myadc) else: chnk_size = nocc a = 0 t1_2 = np.zeros((nocc,nvir)) for p in range(0,nocc,chnk_size): if getattr(myadc, 'with_df', None): eris_ovvv = dfadc.get_ovvv_df(myadc, eris.Lov, eris.Lvv, p, chnk_size).reshape(-1,nvir,nvir,nvir) else : eris_ovvv = radc_ao2mo.unpack_eri_1(eris.ovvv, nvir) k = eris_ovvv.shape[0] t1_2 += 0.5*lib.einsum('kdac,ikcd->ia',eris_ovvv,t2_1[:,a:a+k],optimize=True) t1_2 -= 0.5*lib.einsum('kdac,kicd->ia',eris_ovvv,t2_1[a:a+k,:],optimize=True) t1_2 -= 0.5*lib.einsum('kcad,ikcd->ia',eris_ovvv,t2_1[:,a:a+k],optimize=True) t1_2 += 0.5*lib.einsum('kcad,kicd->ia',eris_ovvv,t2_1[a:a+k,:],optimize=True) t1_2 += lib.einsum('kdac,ikcd->ia',eris_ovvv,t2_1[:,a:a+k],optimize=True) del eris_ovvv a += k t1_2 -= 0.5*lib.einsum('lcki,klac->ia',eris_ovoo,t2_1[:],optimize=True) t1_2 += 0.5*lib.einsum('lcki,lkac->ia',eris_ovoo,t2_1[:],optimize=True) t1_2 -= 0.5*lib.einsum('kcli,lkac->ia',eris_ovoo,t2_1[:],optimize=True) t1_2 += 0.5*lib.einsum('kcli,klac->ia',eris_ovoo,t2_1[:],optimize=True) t1_2 -= lib.einsum('lcki,klac->ia',eris_ovoo,t2_1[:],optimize=True) t1_2 = t1_2/D1 cput0 = log.timer_debug1("Completed t1_2 amplitude calculation", *cput0) t2_2 = None t1_3 = None t2_1_vvvv = None if (myadc.method == "adc(2)-x" or myadc.method == "adc(3)"): # Compute second-order doubles t2 (tijab) eris_oooo = eris.oooo eris_ovvo = eris.ovvo if isinstance(eris.vvvv, np.ndarray): eris_vvvv = eris.vvvv temp = t2_1.reshape(nocc*nocc,nvir*nvir) t2_1_vvvv = np.dot(temp,eris_vvvv.T).reshape(nocc,nocc,nvir,nvir) elif isinstance(eris.vvvv, list): t2_1_vvvv = contract_ladder(myadc,t2_1[:],eris.vvvv) else: t2_1_vvvv = contract_ladder(myadc,t2_1[:],eris.Lvv) if not isinstance(eris.oooo, np.ndarray): t2_1_vvvv = radc_ao2mo.write_dataset(t2_1_vvvv) t2_2 = t2_1_vvvv[:].copy() t2_2 += lib.einsum('kilj,klab->ijab',eris_oooo,t2_1[:],optimize=True) t2_2 += lib.einsum('kcbj,kica->ijab',eris_ovvo,t2_1[:],optimize=True) t2_2 -= lib.einsum('kcbj,ikca->ijab',eris_ovvo,t2_1[:],optimize=True) t2_2 += lib.einsum('kcbj,ikac->ijab',eris_ovvo,t2_1[:],optimize=True) t2_2 -= lib.einsum('kjbc,ikac->ijab',eris_oovv,t2_1[:],optimize=True) t2_2 -= lib.einsum('kibc,kjac->ijab',eris_oovv,t2_1[:],optimize=True) t2_2 -= lib.einsum('kjac,ikcb->ijab',eris_oovv,t2_1[:],optimize=True) t2_2 += lib.einsum('kcai,kjcb->ijab',eris_ovvo,t2_1[:],optimize=True) t2_2 -= lib.einsum('kcai,jkcb->ijab',eris_ovvo,t2_1[:],optimize=True) t2_2 += lib.einsum('kcai,kjcb->ijab',eris_ovvo,t2_1[:],optimize=True) t2_2 -= lib.einsum('kiac,kjcb->ijab',eris_oovv,t2_1[:],optimize=True) D2 = d_ij.reshape(-1,1) - d_ab.reshape(-1) D2 = D2.reshape((nocc,nocc,nvir,nvir)) t2_2 = t2_2/D2 if not isinstance(eris.oooo, np.ndarray): t2_2 = radc_ao2mo.write_dataset(t2_2) del D2 cput0 = log.timer_debug1("Completed t2_2 amplitude calculation", *cput0) if (myadc.method == "adc(3)"): eris_ovoo = eris.ovoo t1_3 = lib.einsum('d,ilad,ld->ia',e[nocc:],t2_1[:],t1_2,optimize=True) t1_3 -= lib.einsum('d,liad,ld->ia',e[nocc:],t2_1[:],t1_2,optimize=True) t1_3 += lib.einsum('d,ilad,ld->ia',e[nocc:],t2_1[:],t1_2,optimize=True) t1_3 -= lib.einsum('l,ilad,ld->ia',e[:nocc],t2_1[:], t1_2,optimize=True) t1_3 += lib.einsum('l,liad,ld->ia',e[:nocc],t2_1[:], t1_2,optimize=True) t1_3 -= lib.einsum('l,ilad,ld->ia',e[:nocc],t2_1[:],t1_2,optimize=True) t1_3 += 0.5*lib.einsum('a,ilad,ld->ia',e[nocc:],t2_1[:], t1_2,optimize=True) t1_3 -= 0.5*lib.einsum('a,liad,ld->ia',e[nocc:],t2_1[:], t1_2,optimize=True) t1_3 += 0.5*lib.einsum('a,ilad,ld->ia',e[nocc:],t2_1[:],t1_2,optimize=True) t1_3 -= 0.5*lib.einsum('i,ilad,ld->ia',e[:nocc],t2_1[:], t1_2,optimize=True) t1_3 += 0.5*lib.einsum('i,liad,ld->ia',e[:nocc],t2_1[:], t1_2,optimize=True) t1_3 -= 0.5*lib.einsum('i,ilad,ld->ia',e[:nocc],t2_1[:],t1_2,optimize=True) t1_3 += lib.einsum('ld,iadl->ia',t1_2,eris_ovvo,optimize=True) t1_3 -= lib.einsum('ld,ladi->ia',t1_2,eris_ovvo,optimize=True) t1_3 += lib.einsum('ld,iadl->ia',t1_2,eris_ovvo,optimize=True) t1_3 += lib.einsum('ld,ldai->ia',t1_2,eris_ovvo ,optimize=True) t1_3 -= lib.einsum('ld,liad->ia',t1_2,eris_oovv ,optimize=True) t1_3 += lib.einsum('ld,ldai->ia',t1_2,eris_ovvo,optimize=True) t1_3 -= 0.5*lib.einsum('lmad,mdli->ia',t2_2[:],eris_ovoo,optimize=True) t1_3 += 0.5*lib.einsum('mlad,mdli->ia',t2_2[:],eris_ovoo,optimize=True) t1_3 += 0.5*lib.einsum('lmad,ldmi->ia',t2_2[:],eris_ovoo,optimize=True) t1_3 -= 0.5*lib.einsum('mlad,ldmi->ia',t2_2[:],eris_ovoo,optimize=True) t1_3 -= lib.einsum('lmad,mdli->ia',t2_2[:],eris_ovoo,optimize=True) if isinstance(eris.ovvv, type(None)): chnk_size = radc_ao2mo.calculate_chunk_size(myadc) else : chnk_size = nocc a = 0 for p in range(0,nocc,chnk_size): if getattr(myadc, 'with_df', None): eris_ovvv = dfadc.get_ovvv_df(myadc, eris.Lov, eris.Lvv, p, chnk_size).reshape(-1,nvir,nvir,nvir) else : eris_ovvv = radc_ao2mo.unpack_eri_1(eris.ovvv, nvir) k = eris_ovvv.shape[0] t1_3 += 0.5*lib.einsum('ilde,lead->ia', t2_2[:,a:a+k],eris_ovvv,optimize=True) t1_3 -= 0.5*lib.einsum('lide,lead->ia', t2_2[a:a+k],eris_ovvv,optimize=True) t1_3 -= 0.5*lib.einsum('ilde,ldae->ia', t2_2[:,a:a+k],eris_ovvv,optimize=True) t1_3 += 0.5*lib.einsum('lide,ldae->ia', t2_2[a:a+k],eris_ovvv,optimize=True) t1_3 -= lib.einsum('ildf,mefa,lmde->ia',t2_1[:], eris_ovvv, t2_1[:,a:a+k] ,optimize=True) t1_3 += lib.einsum('ildf,mefa,mlde->ia',t2_1[:], eris_ovvv, t2_1[a:a+k] ,optimize=True) t1_3 += lib.einsum('lidf,mefa,lmde->ia',t2_1[:], eris_ovvv, t2_1[:,a:a+k] ,optimize=True) t1_3 -= lib.einsum('lidf,mefa,mlde->ia',t2_1[:], eris_ovvv, t2_1[a:a+k] ,optimize=True) t1_3 += lib.einsum('ildf,mafe,lmde->ia',t2_1[:], eris_ovvv, t2_1[:,a:a+k] ,optimize=True) t1_3 -= lib.einsum('ildf,mafe,mlde->ia',t2_1[:], eris_ovvv, t2_1[a:a+k] ,optimize=True) t1_3 -= lib.einsum('lidf,mafe,lmde->ia',t2_1[:], eris_ovvv, t2_1[:,a:a+k] ,optimize=True) t1_3 += lib.einsum('lidf,mafe,mlde->ia',t2_1[:], eris_ovvv, t2_1[a:a+k] ,optimize=True) t1_3 += lib.einsum('ilfd,mefa,mled->ia', t2_1[:],eris_ovvv, t2_1[a:a+k],optimize=True) t1_3 -= lib.einsum('ilfd,mafe,mled->ia', t2_1[:],eris_ovvv, t2_1[a:a+k],optimize=True) t1_3 += 0.5*lib.einsum('ilaf,mefd,lmde->ia',t2_1[:],eris_ovvv,t2_1[:,a:a+k],optimize=True) t1_3 -= 0.5*lib.einsum('ilaf,mefd,mlde->ia',t2_1[:],eris_ovvv,t2_1[a:a+k],optimize=True) t1_3 -= 0.5*lib.einsum('liaf,mefd,lmde->ia',t2_1[:],eris_ovvv,t2_1[:,a:a+k],optimize=True) t1_3 += 0.5*lib.einsum('liaf,mefd,mlde->ia',t2_1[:],eris_ovvv,t2_1[a:a+k],optimize=True) t1_3 -= 0.5*lib.einsum('ilaf,mdfe,lmde->ia',t2_1[:],eris_ovvv,t2_1[:,a:a+k],optimize=True) t1_3 += 0.5*lib.einsum('ilaf,mdfe,mlde->ia',t2_1[:],eris_ovvv,t2_1[a:a+k],optimize=True) t1_3 += 0.5*lib.einsum('liaf,mdfe,lmde->ia',t2_1[:],eris_ovvv,t2_1[:,a:a+k],optimize=True) t1_3 -= 0.5*lib.einsum('liaf,mdfe,mlde->ia',t2_1[:],eris_ovvv,t2_1[a:a+k],optimize=True) t1_3[a:a+k] += 0.5*lib.einsum('lmdf,iaef,lmde->ia',t2_1[:],eris_ovvv,t2_1[:],optimize=True) t1_3[a:a+k] -= 0.5*lib.einsum('lmdf,iaef,mlde->ia',t2_1[:],eris_ovvv,t2_1[:],optimize=True) t1_3[a:a+k] -= 0.5*lib.einsum('mldf,iaef,lmde->ia',t2_1[:],eris_ovvv,t2_1[:],optimize=True) t1_3[a:a+k] += 0.5*lib.einsum('mldf,iaef,mlde->ia',t2_1[:],eris_ovvv,t2_1[:],optimize=True) t1_3[a:a+k] -= 0.5*lib.einsum('lmdf,ifea,lmde->ia',t2_1[:],eris_ovvv,t2_1[:],optimize=True) t1_3[a:a+k] += 0.5*lib.einsum('lmdf,ifea,mlde->ia',t2_1[:],eris_ovvv,t2_1[:],optimize=True) t1_3[a:a+k] += 0.5*lib.einsum('mldf,ifea,lmde->ia',t2_1[:],eris_ovvv,t2_1[:],optimize=True) t1_3[a:a+k] -= 0.5*lib.einsum('mldf,ifea,mlde->ia',t2_1[:],eris_ovvv,t2_1[:],optimize=True) t1_3[a:a+k] += lib.einsum('mlfd,iaef,mled->ia',t2_1[:],eris_ovvv,t2_1[:],optimize=True) t1_3[a:a+k] -= lib.einsum('mlfd,ifea,mled->ia',t2_1[:],eris_ovvv,t2_1[:],optimize=True) t1_3[a:a+k] -= 0.25*lib.einsum('lmef,iedf,lmad->ia',t2_1[:],eris_ovvv,t2_1[:],optimize=True) t1_3[a:a+k] += 0.25*lib.einsum('lmef,iedf,mlad->ia',t2_1[:],eris_ovvv,t2_1[:],optimize=True) t1_3[a:a+k] += 0.25*lib.einsum('mlef,iedf,lmad->ia',t2_1[:],eris_ovvv,t2_1[:],optimize=True) t1_3[a:a+k] -= 0.25*lib.einsum('mlef,iedf,mlad->ia',t2_1[:],eris_ovvv,t2_1[:],optimize=True) t1_3[a:a+k] += 0.25*lib.einsum('lmef,ifde,lmad->ia',t2_1[:],eris_ovvv,t2_1[:],optimize=True) t1_3[a:a+k] -= 0.25*lib.einsum('lmef,ifde,mlad->ia',t2_1[:],eris_ovvv,t2_1[:],optimize=True) t1_3[a:a+k] -= 0.25*lib.einsum('mlef,ifde,lmad->ia',t2_1[:],eris_ovvv,t2_1[:],optimize=True) t1_3[a:a+k] += 0.25*lib.einsum('mlef,ifde,mlad->ia',t2_1[:],eris_ovvv,t2_1[:],optimize=True) t1_3 += 0.5*lib.einsum('ilaf,mefd,lmde->ia',t2_1[:],eris_ovvv,t2_1[:,a:a+k],optimize=True) t1_3 -= 0.5*lib.einsum('ilaf,mefd,mlde->ia',t2_1[:],eris_ovvv,t2_1[a:a+k],optimize=True) t1_3 -= 0.5*lib.einsum('ilaf,mdfe,lmde->ia',t2_1[:],eris_ovvv,t2_1[:,a:a+k],optimize=True) t1_3 += 0.5*lib.einsum('ilaf,mdfe,mlde->ia',t2_1[:],eris_ovvv,t2_1[a:a+k],optimize=True) t1_3 -= lib.einsum('ildf,mafe,mlde->ia',t2_1[:],eris_ovvv,t2_1[a:a+k],optimize=True) t1_3 += lib.einsum('ilaf,mefd,mled->ia',t2_1[:],eris_ovvv,t2_1[a:a+k],optimize=True) t1_3[a:a+k] += 0.5*lib.einsum('lmdf,iaef,lmde->ia',t2_1[:],eris_ovvv,t2_1[:],optimize=True) t1_3[a:a+k] -= 0.5*lib.einsum('lmdf,iaef,mlde->ia',t2_1[:],eris_ovvv,t2_1[:],optimize=True) t1_3[a:a+k] -= 0.5*lib.einsum('mldf,iaef,lmde->ia',t2_1[:],eris_ovvv,t2_1[:],optimize=True) t1_3[a:a+k] += 0.5*lib.einsum('mldf,iaef,mlde->ia',t2_1[:],eris_ovvv,t2_1[:],optimize=True) t1_3[a:a+k] += lib.einsum('lmdf,iaef,lmde->ia',t2_1[:],eris_ovvv,t2_1[:],optimize=True) t1_3[a:a+k] -= lib.einsum('lmef,iedf,lmad->ia',t2_1[:],eris_ovvv,t2_1[:],optimize=True) t1_3 += lib.einsum('ilde,lead->ia',t2_2[:,a:a+k],eris_ovvv,optimize=True) t1_3 -= lib.einsum('ildf,mefa,lmde->ia',t2_1[:],eris_ovvv, t2_1[:,a:a+k],optimize=True) t1_3 += lib.einsum('lidf,mefa,lmde->ia',t2_1[:],eris_ovvv, t2_1[:,a:a+k],optimize=True) t1_3 += lib.einsum('ilfd,mefa,lmde->ia',t2_1[:],eris_ovvv,t2_1[:,a:a+k] ,optimize=True) t1_3 -= lib.einsum('ilfd,mefa,mlde->ia',t2_1[:],eris_ovvv,t2_1[a:a+k] ,optimize=True) t1_3 += lib.einsum('ilaf,mefd,lmde->ia',t2_1[:],eris_ovvv,t2_1[:,a:a+k],optimize=True) t1_3 -= lib.einsum('liaf,mefd,lmde->ia',t2_1[:],eris_ovvv,t2_1[:,a:a+k],optimize=True) del eris_ovvv a += k t1_3 += 0.25*lib.einsum('inde,lamn,lmde->ia',t2_1[:],eris_ovoo,t2_1[:],optimize=True) t1_3 -= 0.25*lib.einsum('inde,lamn,mlde->ia',t2_1[:],eris_ovoo,t2_1[:],optimize=True) t1_3 -= 0.25*lib.einsum('nide,lamn,lmde->ia',t2_1[:],eris_ovoo,t2_1[:],optimize=True) t1_3 += 0.25*lib.einsum('nide,lamn,mlde->ia',t2_1[:],eris_ovoo,t2_1[:],optimize=True) t1_3 -= 0.25*lib.einsum('inde,maln,lmde->ia',t2_1[:],eris_ovoo,t2_1[:],optimize=True) t1_3 += 0.25*lib.einsum('inde,maln,mlde->ia',t2_1[:],eris_ovoo,t2_1[:],optimize=True) t1_3 += 0.25*lib.einsum('nide,maln,lmde->ia',t2_1[:],eris_ovoo,t2_1[:],optimize=True) t1_3 -= 0.25*lib.einsum('nide,maln,mlde->ia',t2_1[:],eris_ovoo,t2_1[:],optimize=True) t1_3 += lib.einsum('inde,lamn,lmde->ia',t2_1[:],eris_ovoo,t2_1[:],optimize=True) t1_3 += 0.5 * lib.einsum('inad,lemn,lmde->ia',t2_1[:],eris_ovoo,t2_1[:],optimize=True) t1_3 -= 0.5 * lib.einsum('inad,lemn,mlde->ia',t2_1[:],eris_ovoo,t2_1[:],optimize=True) t1_3 -= 0.5 * lib.einsum('niad,lemn,lmde->ia',t2_1[:],eris_ovoo,t2_1[:],optimize=True) t1_3 += 0.5 * lib.einsum('niad,lemn,mlde->ia',t2_1[:],eris_ovoo,t2_1[:],optimize=True) t1_3 -= 0.5 * lib.einsum('inad,meln,lmde->ia',t2_1[:],eris_ovoo,t2_1[:],optimize=True) t1_3 += 0.5 * lib.einsum('inad,meln,mlde->ia',t2_1[:],eris_ovoo,t2_1[:],optimize=True) t1_3 += 0.5 * lib.einsum('niad,meln,lmde->ia',t2_1[:],eris_ovoo,t2_1[:],optimize=True) t1_3 -= 0.5 * lib.einsum('niad,meln,mlde->ia',t2_1[:],eris_ovoo,t2_1[:],optimize=True) t1_3 -= 0.5 * lib.einsum('inad,lemn,mlde->ia',t2_1[:],eris_ovoo,t2_1[:],optimize=True) t1_3 += 0.5 * lib.einsum('niad,lemn,mlde->ia',t2_1[:],eris_ovoo,t2_1[:],optimize=True) t1_3 -= 0.5 * lib.einsum('inad,meln,lmde->ia',t2_1[:],eris_ovoo,t2_1[:],optimize=True) t1_3 += 0.5 * lib.einsum('niad,meln,lmde->ia',t2_1[:],eris_ovoo,t2_1[:],optimize=True) t1_3 -= 0.5 * lib.einsum('inad,lemn,lmed->ia',t2_1[:],eris_ovoo,t2_1[:],optimize=True) t1_3 -= 0.5 * lib.einsum('inad,meln,mled->ia',t2_1[:],eris_ovoo,t2_1[:],optimize=True) t1_3 += 0.5 * lib.einsum('inad,lemn,lmde->ia',t2_1[:],eris_ovoo,t2_1[:],optimize=True) t1_3 -= 0.5 * lib.einsum('inad,lemn,mlde->ia',t2_1[:],eris_ovoo,t2_1[:],optimize=True) t1_3 -= 0.5 * lib.einsum('inad,meln,lmde->ia',t2_1[:],eris_ovoo,t2_1[:],optimize=True) t1_3 += 0.5 * lib.einsum('inad,meln,mlde->ia',t2_1[:],eris_ovoo,t2_1[:],optimize=True) t1_3 -= 0.5 * lib.einsum('lnde,ianm,lmde->ia',t2_1[:],eris_ovoo,t2_1[:],optimize=True) t1_3 += 0.5 * lib.einsum('lnde,ianm,mlde->ia',t2_1[:],eris_ovoo,t2_1[:],optimize=True) t1_3 += 0.5 * lib.einsum('nlde,ianm,lmde->ia',t2_1[:],eris_ovoo,t2_1[:],optimize=True) t1_3 -= 0.5 * lib.einsum('nlde,ianm,mlde->ia',t2_1[:],eris_ovoo,t2_1[:],optimize=True) t1_3 += 0.5 * lib.einsum('lnde,naim,lmde->ia',t2_1[:],eris_ovoo,t2_1[:],optimize=True) t1_3 -= 0.5 * lib.einsum('lnde,naim,mlde->ia',t2_1[:],eris_ovoo,t2_1[:],optimize=True) t1_3 -= 0.5 * lib.einsum('nlde,naim,lmde->ia',t2_1[:],eris_ovoo,t2_1[:],optimize=True) t1_3 += 0.5 * lib.einsum('nlde,naim,mlde->ia',t2_1[:],eris_ovoo,t2_1[:],optimize=True) t1_3 -= lib.einsum('nled,ianm,mled->ia',t2_1[:],eris_ovoo,t2_1[:],optimize=True) t1_3 += lib.einsum('nled,naim,mled->ia',t2_1[:],eris_ovoo,t2_1[:],optimize=True) t1_3 -= 0.5*lib.einsum('lnde,ianm,lmde->ia',t2_1[:],eris_ovoo,t2_1[:],optimize=True) t1_3 += 0.5*lib.einsum('lnde,ianm,mlde->ia',t2_1[:],eris_ovoo,t2_1[:],optimize=True) t1_3 += 0.5*lib.einsum('nlde,ianm,lmde->ia',t2_1[:],eris_ovoo,t2_1[:],optimize=True) t1_3 -= 0.5*lib.einsum('nlde,ianm,mlde->ia',t2_1[:],eris_ovoo,t2_1[:],optimize=True) t1_3 -= lib.einsum('lnde,ianm,lmde->ia',t2_1[:],eris_ovoo,t2_1[:],optimize=True) t1_3 -= lib.einsum('lnde,ienm,lmad->ia',t2_1[:],eris_ovoo,t2_1[:],optimize=True) t1_3 += lib.einsum('lnde,ienm,mlad->ia',t2_1[:],eris_ovoo,t2_1[:],optimize=True) t1_3 += lib.einsum('nlde,ienm,lmad->ia',t2_1[:],eris_ovoo,t2_1[:],optimize=True) t1_3 -= lib.einsum('nlde,ienm,mlad->ia',t2_1[:],eris_ovoo,t2_1[:],optimize=True) t1_3 += lib.einsum('lnde,neim,lmad->ia',t2_1[:],eris_ovoo,t2_1[:],optimize=True) t1_3 -= lib.einsum('lnde,neim,mlad->ia',t2_1[:],eris_ovoo,t2_1[:],optimize=True) t1_3 -= lib.einsum('nlde,neim,lmad->ia',t2_1[:],eris_ovoo,t2_1[:],optimize=True) t1_3 += lib.einsum('nlde,neim,mlad->ia',t2_1[:],eris_ovoo,t2_1[:],optimize=True) t1_3 += lib.einsum('lnde,neim,lmad->ia',t2_1[:],eris_ovoo,t2_1[:],optimize=True) t1_3 -= lib.einsum('lnde,neim,mlad->ia',t2_1[:],eris_ovoo,t2_1[:],optimize=True) t1_3 += lib.einsum('nled,ienm,mlad->ia',t2_1[:],eris_ovoo,t2_1[:],optimize=True) t1_3 -= lib.einsum('nled,neim,mlad->ia',t2_1[:],eris_ovoo,t2_1[:],optimize=True) t1_3 += lib.einsum('lned,ienm,lmad->ia',t2_1[:],eris_ovoo,t2_1[:],optimize=True) t1_3 -= lib.einsum('lnde,neim,mlad->ia',t2_1[:],eris_ovoo,t2_1[:],optimize=True) t1_3 += lib.einsum('nlde,neim,mlad->ia',t2_1[:],eris_ovoo,t2_1[:],optimize=True) t1_3 = t1_3/D1 cput0 = log.timer_debug1("Completed amplitude calculation", *cput0) t1 = (t1_2, t1_3) t2 = (t2_1, t2_2) return t1, t2, t2_1_vvvv def compute_energy(myadc, t2, eris): cput0 = (time.clock(), time.time()) log = logger.Logger(myadc.stdout, myadc.verbose) if myadc.method not in ("adc(2)", "adc(2)-x", "adc(3)"): raise NotImplementedError(myadc.method) nocc = myadc._nocc nvir = myadc._nvir eris_ovvo = eris.ovvo t2_new = t2[0][:].copy() if (myadc.method == "adc(3)"): t2_new += t2[1][:] #Compute MP2 correlation energy e_mp = 0.5 * lib.einsum('ijab,iabj', t2_new, eris_ovvo,optimize=True) e_mp -= 0.5 * lib.einsum('ijab,ibaj', t2_new, eris_ovvo,optimize=True) e_mp -= 0.5 * lib.einsum('jiab,iabj', t2_new, eris_ovvo,optimize=True) e_mp += 0.5 * lib.einsum('jiab,ibaj', t2_new, eris_ovvo,optimize=True) e_mp += lib.einsum('ijab,iabj', t2_new, eris_ovvo,optimize=True) del t2_new return e_mp def contract_ladder(myadc,t_amp,vvvv): log = logger.Logger(myadc.stdout, myadc.verbose) nocc = myadc._nocc nvir = myadc._nvir t_amp = np.ascontiguousarray(t_amp.reshape(nocc*nocc,nvir*nvir).T) t = np.zeros((nvir,nvir, nocc*nocc)) chnk_size = radc_ao2mo.calculate_chunk_size(myadc) a = 0 if isinstance(vvvv, list): for dataset in vvvv: k = dataset.shape[0] dataset = dataset[:].reshape(-1,nvir*nvir) t[a:a+k] = np.dot(dataset,t_amp).reshape(-1,nvir,nocc*nocc) a += k elif getattr(myadc, 'with_df', None): for p in range(0,nvir,chnk_size): vvvv_p = dfadc.get_vvvv_df(myadc, vvvv, p, chnk_size) k = vvvv_p.shape[0] vvvv_p = vvvv_p.reshape(-1,nvir*nvir) t[a:a+k] = np.dot(vvvv_p,t_amp).reshape(-1,nvir,nocc*nocc) del vvvv_p a += k else : raise Exception("Unknown vvvv type") del t_amp t = np.ascontiguousarray(t.transpose(2,0,1)).reshape(nocc, nocc, nvir, nvir) return t def density_matrix(myadc, T=None): if T is None: T = RADCIP(myadc).get_trans_moments() nocc = myadc._nocc nvir = myadc._nvir n_singles = nocc n_doubles = nvir * nocc * nocc ij_ind = np.tril_indices(nocc, k=-1) s1 = 0 f1 = n_singles s2 = f1 f2 = s2 + n_doubles T_doubles = T[:,n_singles:] T_doubles = T_doubles.reshape(-1,nvir,nocc,nocc) T_doubles_transpose = T_doubles.transpose(0,1,3,2).copy() T_bab = (2/3)*T_doubles + (1/3)*T_doubles_transpose T_aaa = T_bab - T_bab.transpose(0,1,3,2) T_a = T[:,s1:f1] T_bab = T_bab.reshape(-1,n_doubles) T_aaa = T_aaa.reshape(-1,n_doubles) dm = 2 * np.dot(T_a,T_a.T) + np.dot(T_aaa, T_aaa.T) + 2 * np.dot(T_bab, T_bab.T) return dm def analyze(myadc): str = ("\n*************************************************************" "\n Eigenvector analysis summary" "\n*************************************************************") logger.info(myadc, str) myadc.analyze_eigenvector() if myadc.compute_properties: str = ("\n*************************************************************" "\n Spectroscopic factors analysis summary" "\n*************************************************************") logger.info(myadc, str) myadc.analyze_spec_factor() def compute_dyson_mo(myadc): X = myadc.X if X is None: nroots = myadc.U.shape[1] P,X = myadc.get_properties(nroots) nroots = X.shape[1] dyson_mo = np.dot(myadc.mo_coeff,X) return dyson_mo class RADC(lib.StreamObject): '''Ground state calculations Attributes: verbose : int Print level. Default value equals to :class:`Mole.verbose` max_memory : float or int Allowed memory in MB. Default value equals to :class:`Mole.max_memory` incore_complete : bool Avoid all I/O. Default is False. method : string nth-order ADC method. Options are : ADC(2), ADC(2)-X, ADC(3). Default is ADC(2). >>> mol = gto.M(atom = 'H 0 0 0; F 0 0 1.1', basis = 'ccpvdz') >>> mf = scf.RHF(mol).run() >>> myadc = adc.RADC(mf).run() Saved results e_corr : float MPn correlation correction e_tot : float Total energy (HF + correlation) t1, t2 : T amplitudes t1[i,a], t2[i,j,a,b] (i,j in occ, a,b in virt) ''' incore_complete = getattr(__config__, 'adc_radc_RADC_incore_complete', False) async_io = getattr(__config__, 'adc_radc_RADC_async_io', True) blkmin = getattr(__config__, 'adc_radc_RADC_blkmin', 4) memorymin = getattr(__config__, 'adc_radc_RADC_memorymin', 2000) def __init__(self, mf, frozen=0, mo_coeff=None, mo_occ=None): from pyscf import gto if 'dft' in str(mf.__module__): raise NotImplementedError('DFT reference for UADC') if mo_coeff is None: mo_coeff = mf.mo_coeff if mo_occ is None: mo_occ = mf.mo_occ self.mol = mf.mol self._scf = mf self.verbose = self.mol.verbose self.stdout = self.mol.stdout self.max_memory = mf.max_memory self.max_space = getattr(__config__, 'adc_radc_RADC_max_space', 12) self.max_cycle = getattr(__config__, 'adc_radc_RADC_max_cycle', 50) self.conv_tol = getattr(__config__, 'adc_radc_RADC_conv_tol', 1e-12) self.tol_residual = getattr(__config__, 'adc_radc_RADC_tol_res', 1e-6) self.scf_energy = mf.e_tot self.frozen = frozen self.incore_complete = self.incore_complete or self.mol.incore_anyway self.mo_coeff = mo_coeff self.mo_occ = mo_occ self.e_corr = None self.t1 = None self.t2 = None self.imds = lambda:None self._nocc = mf.mol.nelectron//2 self._nmo = mo_coeff.shape[1] self._nvir = self._nmo - self._nocc self.mo_energy = mf.mo_energy self.chkfile = mf.chkfile self.method = "adc(2)" self.method_type = "ip" self.with_df = None self.compute_properties = True self.evec_print_tol = 0.1 self.spec_factor_print_tol = 0.1 self.E = None self.U = None self.P = None self.X = None keys = set(('tol_residual','conv_tol', 'e_corr', 'method', 'mo_coeff', 'mol', 'mo_energy', 'max_memory', 'incore_complete', 'scf_energy', 'e_tot', 't1', 'frozen', 'chkfile', 'max_space', 't2', 'mo_occ', 'max_cycle')) self._keys = set(self.__dict__.keys()).union(keys) compute_amplitudes = compute_amplitudes compute_energy = compute_energy transform_integrals = radc_ao2mo.transform_integrals_incore make_rdm1 = density_matrix def dump_flags(self, verbose=None): logger.info(self, '') logger.info(self, '******** %s ********', self.__class__) logger.info(self, 'max_space = %d', self.max_space) logger.info(self, 'max_cycle = %d', self.max_cycle) logger.info(self, 'conv_tol = %s', self.conv_tol) logger.info(self, 'max_memory %d MB (current use %d MB)', self.max_memory, lib.current_memory()[0]) return self def dump_flags_gs(self, verbose=None): logger.info(self, '') logger.info(self, '******** %s ********', self.__class__) logger.info(self, 'max_memory %d MB (current use %d MB)', self.max_memory, lib.current_memory()[0]) return self def kernel_gs(self): assert(self.mo_coeff is not None) assert(self.mo_occ is not None) self.method = self.method.lower() if self.method not in ("adc(2)", "adc(2)-x", "adc(3)"): raise NotImplementedError(self.method) if self.verbose >= logger.WARN: self.check_sanity() self.dump_flags_gs() nmo = self._nmo nao = self.mo_coeff.shape[0] nmo_pair = nmo * (nmo+1) // 2 nao_pair = nao * (nao+1) // 2 mem_incore = (max(nao_pair**2, nmo**4) + nmo_pair**2) * 8/1e6 mem_now = lib.current_memory()[0] if getattr(self, 'with_df', None) or getattr(self._scf, 'with_df', None): if getattr(self, 'with_df', None): self.with_df = self.with_df else : self.with_df = self._scf.with_df def df_transform(): return radc_ao2mo.transform_integrals_df(self) self.transform_integrals = df_transform elif (self._scf._eri is None or (mem_incore+mem_now >= self.max_memory and not self.incore_complete)): def outcore_transform(): return radc_ao2mo.transform_integrals_outcore(self) self.transform_integrals = outcore_transform eris = self.transform_integrals() self.e_corr, self.t1, self.t2 = compute_amplitudes_energy(self, eris=eris, verbose=self.verbose) self._finalize() return self.e_corr, self.t1, self.t2 def kernel(self, nroots=1, guess=None, eris=None): assert(self.mo_coeff is not None) assert(self.mo_occ is not None) self.method = self.method.lower() if self.method not in ("adc(2)", "adc(2)-x", "adc(3)"): raise NotImplementedError(self.method) if self.verbose >= logger.WARN: self.check_sanity() self.dump_flags_gs() nmo = self._nmo nao = self.mo_coeff.shape[0] nmo_pair = nmo * (nmo+1) // 2 nao_pair = nao * (nao+1) // 2 mem_incore = (max(nao_pair**2, nmo**4) + nmo_pair**2) * 8/1e6 mem_now = lib.current_memory()[0] if getattr(self, 'with_df', None) or getattr(self._scf, 'with_df', None): if getattr(self, 'with_df', None): self.with_df = self.with_df else : self.with_df = self._scf.with_df def df_transform(): return radc_ao2mo.transform_integrals_df(self) self.transform_integrals = df_transform elif (self._scf._eri is None or (mem_incore+mem_now >= self.max_memory and not self.incore_complete)): def outcore_transform(): return radc_ao2mo.transform_integrals_outcore(self) self.transform_integrals = outcore_transform eris = self.transform_integrals() self.e_corr, self.t1, self.t2 = compute_amplitudes_energy(self, eris=eris, verbose=self.verbose) self._finalize() self.method_type = self.method_type.lower() if(self.method_type == "ea"): e_exc, v_exc, spec_fac, x, adc_es = self.ea_adc(nroots=nroots, guess=guess, eris=eris) elif(self.method_type == "ip"): e_exc, v_exc, spec_fac, x, adc_es = self.ip_adc(nroots=nroots, guess=guess, eris=eris) else: raise NotImplementedError(self.method_type) self._adc_es = adc_es return e_exc, v_exc, spec_fac, x def _finalize(self): '''Hook for dumping results and clearing up the object.''' logger.note(self, 'E_corr = %.8f', self.e_corr) return self def ea_adc(self, nroots=1, guess=None, eris=None): adc_es = RADCEA(self) e_exc, v_exc, spec_fac, x = adc_es.kernel(nroots, guess, eris) return e_exc, v_exc, spec_fac, x, adc_es def ip_adc(self, nroots=1, guess=None, eris=None): adc_es = RADCIP(self) e_exc, v_exc, spec_fac, x = adc_es.kernel(nroots, guess, eris) return e_exc, v_exc, spec_fac, x, adc_es def density_fit(self, auxbasis=None, with_df = None): if with_df is None: self.with_df = df.DF(self._scf.mol) self.with_df.max_memory = self.max_memory self.with_df.stdout = self.stdout self.with_df.verbose = self.verbose if auxbasis is None: self.with_df.auxbasis = self._scf.with_df.auxbasis else : self.with_df.auxbasis = auxbasis else : self.with_df = with_df return self def analyze(self): self._adc_es.analyze() def compute_dyson_mo(self): return self._adc_es.compute_dyson_mo() def get_imds_ea(adc, eris=None): cput0 = (time.clock(), time.time()) log = logger.Logger(adc.stdout, adc.verbose) if adc.method not in ("adc(2)", "adc(2)-x", "adc(3)"): raise NotImplementedError(adc.method) method = adc.method t1 = adc.t1 t2 = adc.t2 t1_2 = t1[0] eris_ovvo = eris.ovvo nocc = adc._nocc nvir = adc._nvir e_occ = adc.mo_energy[:nocc].copy() e_vir = adc.mo_energy[nocc:].copy() idn_occ = np.identity(nocc) idn_vir = np.identity(nvir) if eris is None: eris = adc.transform_integrals() # a-b block # Zeroth-order terms M_ab = lib.einsum('ab,a->ab', idn_vir, e_vir) # Second-order terms t2_1 = t2[0][:] M_ab += lib.einsum('l,lmad,lmbd->ab',e_occ ,t2_1, t2_1,optimize=True) M_ab -= lib.einsum('l,lmad,mlbd->ab',e_occ ,t2_1, t2_1,optimize=True) M_ab -= lib.einsum('l,mlad,lmbd->ab',e_occ ,t2_1, t2_1,optimize=True) M_ab += lib.einsum('l,mlad,mlbd->ab',e_occ ,t2_1, t2_1,optimize=True) M_ab += lib.einsum('l,lmad,lmbd->ab',e_occ,t2_1, t2_1,optimize=True) M_ab += lib.einsum('l,mlad,mlbd->ab',e_occ,t2_1, t2_1,optimize=True) M_ab -= 0.5 * lib.einsum('d,lmad,lmbd->ab',e_vir,t2_1, t2_1,optimize=True) M_ab += 0.5 * lib.einsum('d,lmad,mlbd->ab',e_vir,t2_1, t2_1,optimize=True) M_ab += 0.5 * lib.einsum('d,mlad,lmbd->ab',e_vir,t2_1, t2_1,optimize=True) M_ab -= 0.5 * lib.einsum('d,mlad,mlbd->ab',e_vir,t2_1, t2_1,optimize=True) M_ab -= 0.5 * lib.einsum('d,lmad,lmbd->ab',e_vir,t2_1, t2_1,optimize=True) M_ab -= 0.5 * lib.einsum('d,mlad,mlbd->ab',e_vir,t2_1, t2_1,optimize=True) M_ab_t = lib.einsum('lmad,lmbd->ab', t2_1,t2_1, optimize=True) M_ab -= 1 * lib.einsum('a,ab->ab',e_vir,M_ab_t,optimize=True) M_ab -= 1 * lib.einsum('b,ab->ab',e_vir,M_ab_t,optimize=True) M_ab_t = lib.einsum('lmad,mlbd->ab', t2_1,t2_1, optimize=True) M_ab += 0.5 * lib.einsum('a,ab->ab',e_vir,M_ab_t,optimize=True) M_ab += 0.5 * lib.einsum('b,ab->ab',e_vir,M_ab_t,optimize=True) del M_ab_t M_ab -= 0.5 * lib.einsum('lmad,lbdm->ab',t2_1, eris_ovvo,optimize=True) M_ab += 0.5 * lib.einsum('mlad,lbdm->ab',t2_1, eris_ovvo,optimize=True) M_ab += 0.5 * lib.einsum('lmad,ldbm->ab',t2_1, eris_ovvo,optimize=True) M_ab -= 0.5 * lib.einsum('mlad,ldbm->ab',t2_1, eris_ovvo,optimize=True) M_ab -= lib.einsum('lmad,lbdm->ab',t2_1, eris_ovvo,optimize=True) M_ab -= 0.5 * lib.einsum('lmbd,ladm->ab',t2_1, eris_ovvo,optimize=True) M_ab += 0.5 * lib.einsum('mlbd,ladm->ab',t2_1, eris_ovvo,optimize=True) M_ab += 0.5 * lib.einsum('lmbd,ldam->ab',t2_1, eris_ovvo,optimize=True) M_ab -= 0.5 * lib.einsum('mlbd,ldam->ab',t2_1, eris_ovvo,optimize=True) M_ab -= lib.einsum('lmbd,ladm->ab',t2_1, eris_ovvo,optimize=True) del t2_1 cput0 = log.timer_debug1("Completed M_ab second-order terms ADC(2) calculation", *cput0) #Third-order terms if(method =='adc(3)'): eris_oovv = eris.oovv eris_oooo = eris.oooo if isinstance(eris.ovvv, type(None)): chnk_size = radc_ao2mo.calculate_chunk_size(adc) else : chnk_size = nocc a = 0 for p in range(0,nocc,chnk_size): if getattr(adc, 'with_df', None): eris_ovvv = dfadc.get_ovvv_df(adc, eris.Lov, eris.Lvv, p, chnk_size).reshape(-1,nvir,nvir,nvir) else : eris_ovvv = radc_ao2mo.unpack_eri_1(eris.ovvv, nvir) k = eris_ovvv.shape[0] M_ab += 4. * lib.einsum('ld,ldab->ab',t1_2[a:a+k], eris_ovvv,optimize=True) M_ab -= lib.einsum('ld,lbad->ab',t1_2[a:a+k], eris_ovvv,optimize=True) M_ab -= lib.einsum('ld,ladb->ab',t1_2[a:a+k], eris_ovvv,optimize=True) del eris_ovvv a += k cput0 = log.timer_debug1("Completed M_ab ovvv ADC(3) calculation", *cput0) t2_2 = t2[1][:] M_ab -= 0.5 * lib.einsum('lmad,lbdm->ab',t2_2, eris_ovvo,optimize=True) M_ab += 0.5 * lib.einsum('mlad,lbdm->ab',t2_2, eris_ovvo,optimize=True) M_ab += 0.5 * lib.einsum('lmad,ldbm->ab',t2_2, eris_ovvo,optimize=True) M_ab -= 0.5 * lib.einsum('mlad,ldbm->ab',t2_2, eris_ovvo,optimize=True) M_ab -= lib.einsum('lmad,lbdm->ab',t2_2, eris_ovvo,optimize=True) M_ab -= 0.5 * lib.einsum('lmbd,ladm->ab',t2_2,eris_ovvo,optimize=True) M_ab += 0.5 * lib.einsum('mlbd,ladm->ab',t2_2,eris_ovvo,optimize=True) M_ab += 0.5 * lib.einsum('lmbd,ldam->ab',t2_2, eris_ovvo,optimize=True) M_ab -= 0.5 * lib.einsum('mlbd,ldam->ab',t2_2, eris_ovvo,optimize=True) M_ab -= lib.einsum('lmbd,ladm->ab',t2_2,eris_ovvo,optimize=True) t2_1 = t2[0][:] M_ab += lib.einsum('l,lmbd,lmad->ab',e_occ, t2_1, t2_2, optimize=True) M_ab -= lib.einsum('l,lmbd,mlad->ab',e_occ, t2_1, t2_2, optimize=True) M_ab -= lib.einsum('l,mlbd,lmad->ab',e_occ, t2_1, t2_2, optimize=True) M_ab += lib.einsum('l,mlbd,mlad->ab',e_occ, t2_1, t2_2, optimize=True) M_ab += lib.einsum('l,lmbd,lmad->ab',e_occ, t2_1, t2_2, optimize=True) M_ab += lib.einsum('l,mlbd,mlad->ab',e_occ, t2_1, t2_2, optimize=True) M_ab += lib.einsum('l,lmad,lmbd->ab',e_occ, t2_1, t2_2, optimize=True) M_ab -= lib.einsum('l,lmad,mlbd->ab',e_occ, t2_1, t2_2, optimize=True) M_ab -= lib.einsum('l,mlad,lmbd->ab',e_occ, t2_1, t2_2, optimize=True) M_ab += lib.einsum('l,mlad,mlbd->ab',e_occ, t2_1, t2_2, optimize=True) M_ab += lib.einsum('l,lmad,lmbd->ab',e_occ, t2_1, t2_2, optimize=True) M_ab += lib.einsum('l,mlad,mlbd->ab',e_occ, t2_1, t2_2, optimize=True) M_ab -= 0.5*lib.einsum('d,lmbd,lmad->ab', e_vir, t2_1 ,t2_2, optimize=True) M_ab += 0.5*lib.einsum('d,lmbd,mlad->ab', e_vir, t2_1 ,t2_2, optimize=True) M_ab += 0.5*lib.einsum('d,mlbd,lmad->ab', e_vir, t2_1 ,t2_2, optimize=True) M_ab -= 0.5*lib.einsum('d,mlbd,mlad->ab', e_vir, t2_1 ,t2_2, optimize=True) M_ab -= 0.5*lib.einsum('d,lmbd,lmad->ab', e_vir, t2_1 ,t2_2, optimize=True) M_ab -= 0.5*lib.einsum('d,mlbd,mlad->ab', e_vir, t2_1 ,t2_2, optimize=True) M_ab -= 0.5*lib.einsum('d,lmad,lmbd->ab', e_vir, t2_1, t2_2, optimize=True) M_ab += 0.5*lib.einsum('d,lmad,mlbd->ab', e_vir, t2_1, t2_2, optimize=True) M_ab += 0.5*lib.einsum('d,mlad,lmbd->ab', e_vir, t2_1, t2_2, optimize=True) M_ab -= 0.5*lib.einsum('d,mlad,mlbd->ab', e_vir, t2_1, t2_2, optimize=True) M_ab -= 0.5*lib.einsum('d,lmad,lmbd->ab', e_vir, t2_1, t2_2, optimize=True) M_ab -= 0.5*lib.einsum('d,mlad,mlbd->ab', e_vir, t2_1, t2_2, optimize=True) M_ab_t = lib.einsum('lmbd,lmad->ab', t2_1,t2_2, optimize=True) M_ab -= 1. * lib.einsum('a,ab->ab',e_vir, M_ab_t, optimize=True) M_ab -= 1. * lib.einsum('a,ba->ab',e_vir, M_ab_t, optimize=True) M_ab -= 1. * lib.einsum('b,ab->ab',e_vir, M_ab_t, optimize=True) M_ab -= 1. * lib.einsum('b,ba->ab',e_vir, M_ab_t, optimize=True) del M_ab_t M_ab_t_1 = lib.einsum('lmbd,mlad->ab', t2_1,t2_2, optimize=True) del t2_2 M_ab += 0.5 * lib.einsum('a,ab->ab',e_vir, M_ab_t_1, optimize=True) M_ab += 0.5 * lib.einsum('a,ba->ab',e_vir, M_ab_t_1, optimize=True) M_ab += 0.5 * lib.einsum('b,ab->ab',e_vir, M_ab_t_1, optimize=True) M_ab += 0.5 * lib.einsum('b,ba->ab',e_vir, M_ab_t_1, optimize=True) del M_ab_t_1 log.timer_debug1("Starting the small integrals calculation") temp_t2_v_1 = lib.einsum('lned,mlbd->nemb',t2_1, t2_1,optimize=True) M_ab -= lib.einsum('nemb,nmae->ab',temp_t2_v_1, eris_oovv, optimize=True) M_ab -= lib.einsum('mbne,nmae->ab',temp_t2_v_1, eris_oovv, optimize=True) M_ab += lib.einsum('nemb,maen->ab',temp_t2_v_1, eris_ovvo, optimize=True) M_ab += lib.einsum('mbne,maen->ab',temp_t2_v_1, eris_ovvo, optimize=True) M_ab += lib.einsum('nemb,neam->ab',temp_t2_v_1, eris_ovvo, optimize=True) M_ab -= lib.einsum('name,nmeb->ab',temp_t2_v_1, eris_oovv, optimize=True) M_ab -= lib.einsum('mena,nmeb->ab',temp_t2_v_1, eris_oovv, optimize=True) M_ab += 2. * lib.einsum('name,nbem->ab',temp_t2_v_1, eris_ovvo, optimize=True) M_ab += 2. * lib.einsum('mena,nbem->ab',temp_t2_v_1, eris_ovvo, optimize=True) M_ab += lib.einsum('nbme,mean->ab',temp_t2_v_1, eris_ovvo, optimize=True) del temp_t2_v_1 temp_t2_v_2 = lib.einsum('nled,mlbd->nemb',t2_1, t2_1,optimize=True) M_ab += 2. * lib.einsum('nemb,nmae->ab',temp_t2_v_2, eris_oovv, optimize=True) M_ab -= 2. * lib.einsum('nemb,maen->ab',temp_t2_v_2, eris_ovvo, optimize=True) M_ab -= lib.einsum('nemb,neam->ab',temp_t2_v_2, eris_ovvo, optimize=True) M_ab += 2. * lib.einsum('mena,nmeb->ab',temp_t2_v_2, eris_oovv, optimize=True) M_ab -= 4. * lib.einsum('mena,nbem->ab',temp_t2_v_2, eris_ovvo, optimize=True) M_ab -= lib.einsum('nemb,neam->ab',temp_t2_v_2, eris_ovvo, optimize=True) del temp_t2_v_2 temp_t2_v_3 = lib.einsum('lned,lmbd->nemb',t2_1, t2_1,optimize=True) M_ab -= lib.einsum('nemb,maen->ab',temp_t2_v_3, eris_ovvo, optimize=True) M_ab += 2. * lib.einsum('nemb,nmae->ab',temp_t2_v_3, eris_oovv, optimize=True) M_ab += 2. * lib.einsum('mena,nmeb->ab',temp_t2_v_3, eris_oovv, optimize=True) M_ab -= lib.einsum('mena,nbem->ab',temp_t2_v_3, eris_ovvo, optimize=True) del temp_t2_v_3 temp_t2_v_4 = lib.einsum('lnae,nmde->lmad',t2_1, eris_oovv,optimize=True) M_ab -= lib.einsum('mlbd,lmad->ab',t2_1, temp_t2_v_4,optimize=True) M_ab += 2. * lib.einsum('lmbd,lmad->ab',t2_1, temp_t2_v_4,optimize=True) del temp_t2_v_4 temp_t2_v_5 = lib.einsum('nlae,nmde->lamd',t2_1, eris_oovv,optimize=True) M_ab += 2. * lib.einsum('mlbd,lamd->ab',t2_1, temp_t2_v_5, optimize=True) M_ab -= lib.einsum('lmbd,lamd->ab',t2_1, temp_t2_v_5, optimize=True) del temp_t2_v_5 temp_t2_v_6 = lib.einsum('lnae,nedm->ladm',t2_1, eris_ovvo,optimize=True) M_ab += 2. * lib.einsum('mlbd,ladm->ab',t2_1, temp_t2_v_6, optimize=True) M_ab -= 4. * lib.einsum('lmbd,ladm->ab',t2_1, temp_t2_v_6, optimize=True) del temp_t2_v_6 temp_t2_v_7 = lib.einsum('nlae,nedm->ladm',t2_1, eris_ovvo,optimize=True) M_ab -= lib.einsum('mlbd,ladm->ab',t2_1, temp_t2_v_7, optimize=True) M_ab += 2. * lib.einsum('lmbd,ladm->ab',t2_1, temp_t2_v_7, optimize=True) del temp_t2_v_7 temp_t2_v_8 = lib.einsum('lned,mled->mn',t2_1, t2_1,optimize=True) M_ab += 2.* lib.einsum('mn,nmab->ab',temp_t2_v_8, eris_oovv, optimize=True) M_ab -= lib.einsum('mn,nbam->ab', temp_t2_v_8, eris_ovvo, optimize=True) del temp_t2_v_8 temp_t2_v_9 = lib.einsum('nled,mled->mn',t2_1, t2_1,optimize=True) M_ab -= 4.* lib.einsum('mn,nmab->ab',temp_t2_v_9, eris_oovv, optimize=True) M_ab += 2. * lib.einsum('mn,nbam->ab',temp_t2_v_9, eris_ovvo, optimize=True) del temp_t2_v_9 temp_t2_v_10 = lib.einsum('noad,nmol->mlad',t2_1, eris_oooo,optimize=True) M_ab -= 0.25*lib.einsum('mlbd,mlad->ab',t2_1, temp_t2_v_10, optimize=True) M_ab += 0.25*lib.einsum('lmbd,mlad->ab',t2_1, temp_t2_v_10, optimize=True) M_ab += 0.25*lib.einsum('mlbd,lmad->ab',t2_1, temp_t2_v_10, optimize=True) M_ab -= 0.25*lib.einsum('lmbd,lmad->ab',t2_1, temp_t2_v_10, optimize=True) M_ab -= lib.einsum('mlbd,mlad->ab',t2_1, temp_t2_v_10, optimize=True) del temp_t2_v_10 temp_t2_v_11 = lib.einsum('onad,nmol->mlad',t2_1, eris_oooo,optimize=True) M_ab += 0.25*lib.einsum('mlbd,mlad->ab',t2_1, temp_t2_v_11, optimize=True) M_ab -= 0.25*lib.einsum('lmbd,mlad->ab',t2_1, temp_t2_v_11, optimize=True) M_ab -= 0.25*lib.einsum('mlbd,lmad->ab',t2_1, temp_t2_v_11, optimize=True) M_ab += 0.25*lib.einsum('lmbd,lmad->ab',t2_1, temp_t2_v_11, optimize=True) del temp_t2_v_11 log.timer_debug1("Completed M_ab ADC(3) small integrals calculation") log.timer_debug1("Starting M_ab vvvv ADC(3) calculation") if isinstance(eris.vvvv, np.ndarray): temp_t2 = adc.imds.t2_1_vvvv M_ab -= 0.25*lib.einsum('mlaf,mlbf->ab',t2_1, temp_t2, optimize=True) M_ab += 0.25*lib.einsum('mlaf,lmbf->ab',t2_1, temp_t2, optimize=True) M_ab += 0.25*lib.einsum('lmaf,mlbf->ab',t2_1, temp_t2, optimize=True) M_ab -= 0.25*lib.einsum('lmaf,lmbf->ab',t2_1, temp_t2, optimize=True) M_ab += 0.25*lib.einsum('mlaf,mlfb->ab',t2_1, temp_t2, optimize=True) M_ab -= 0.25*lib.einsum('mlaf,lmfb->ab',t2_1, temp_t2, optimize=True) M_ab -= 0.25*lib.einsum('lmaf,mlfb->ab',t2_1, temp_t2, optimize=True) M_ab += 0.25*lib.einsum('lmaf,lmfb->ab',t2_1, temp_t2, optimize=True) M_ab -= lib.einsum('mlaf,mlbf->ab',t2_1, temp_t2, optimize=True) M_ab -= 0.25*lib.einsum('mlad,mlbd->ab', temp_t2, t2_1, optimize=True) M_ab += 0.25*lib.einsum('mlad,lmbd->ab', temp_t2, t2_1, optimize=True) M_ab += 0.25*lib.einsum('lmad,mlbd->ab', temp_t2, t2_1, optimize=True) M_ab -= 0.25*lib.einsum('lmad,lmbd->ab', temp_t2, t2_1, optimize=True) M_ab -= lib.einsum('mlad,mlbd->ab', temp_t2, t2_1, optimize=True) M_ab += 0.25*lib.einsum('lmad,mlbd->ab',temp_t2, t2_1, optimize=True) M_ab -= 0.25*lib.einsum('lmad,lmbd->ab',temp_t2, t2_1, optimize=True) M_ab -= 0.25*lib.einsum('mlad,mlbd->ab',temp_t2, t2_1, optimize=True) M_ab += 0.25*lib.einsum('mlad,lmbd->ab',temp_t2, t2_1, optimize=True) del temp_t2 eris_vvvv = eris.vvvv eris_vvvv = eris_vvvv.reshape(nvir,nvir,nvir,nvir) M_ab -= lib.einsum('mldf,mled,aebf->ab',t2_1, t2_1, eris_vvvv, optimize=True) M_ab += lib.einsum('mldf,lmed,aebf->ab',t2_1, t2_1, eris_vvvv, optimize=True) M_ab += lib.einsum('lmdf,mled,aebf->ab',t2_1, t2_1, eris_vvvv, optimize=True) M_ab -= lib.einsum('lmdf,lmed,aebf->ab',t2_1, t2_1, eris_vvvv, optimize=True) M_ab += 0.5*lib.einsum('mldf,mled,aefb->ab',t2_1, t2_1, eris_vvvv, optimize=True) M_ab -= 0.5*lib.einsum('mldf,lmed,aefb->ab',t2_1, t2_1, eris_vvvv, optimize=True) M_ab -= 0.5*lib.einsum('lmdf,mled,aefb->ab',t2_1, t2_1, eris_vvvv, optimize=True) M_ab += 0.5*lib.einsum('lmdf,lmed,aefb->ab',t2_1, t2_1, eris_vvvv, optimize=True) M_ab += 2.*lib.einsum('mlfd,mled,aebf->ab',t2_1, t2_1, eris_vvvv, optimize=True) M_ab -= lib.einsum('mlfd,mled,aefb->ab',t2_1, t2_1, eris_vvvv, optimize=True) eris_vvvv = eris_vvvv.reshape(nvir*nvir,nvir*nvir) else: temp_t2_vvvv = adc.imds.t2_1_vvvv[:] M_ab -= 0.25*lib.einsum('mlaf,mlbf->ab',t2_1, temp_t2_vvvv, optimize=True) M_ab += 0.25*lib.einsum('mlaf,lmbf->ab',t2_1, temp_t2_vvvv, optimize=True) M_ab += 0.25*lib.einsum('lmaf,mlbf->ab',t2_1, temp_t2_vvvv, optimize=True) M_ab -= 0.25*lib.einsum('lmaf,lmbf->ab',t2_1, temp_t2_vvvv, optimize=True) M_ab += 0.25*lib.einsum('mlaf,mlfb->ab',t2_1, temp_t2_vvvv, optimize=True) M_ab -= 0.25*lib.einsum('mlaf,lmfb->ab',t2_1, temp_t2_vvvv, optimize=True) M_ab -= 0.25*lib.einsum('lmaf,mlfb->ab',t2_1, temp_t2_vvvv, optimize=True) M_ab += 0.25*lib.einsum('lmaf,lmfb->ab',t2_1, temp_t2_vvvv, optimize=True) M_ab -= lib.einsum('mlaf,mlbf->ab',t2_1, temp_t2_vvvv, optimize=True) M_ab += 0.25*lib.einsum('lmad,mlbd->ab',temp_t2_vvvv, t2_1, optimize=True) M_ab -= 0.25*lib.einsum('lmad,lmbd->ab',temp_t2_vvvv, t2_1, optimize=True) M_ab -= 0.25*lib.einsum('mlad,mlbd->ab',temp_t2_vvvv, t2_1, optimize=True) M_ab += 0.25*lib.einsum('mlad,lmbd->ab',temp_t2_vvvv, t2_1, optimize=True) M_ab -= 0.25*lib.einsum('mlad,mlbd->ab', temp_t2_vvvv, t2_1, optimize=True) M_ab += 0.25*lib.einsum('mlad,lmbd->ab', temp_t2_vvvv, t2_1, optimize=True) M_ab += 0.25*lib.einsum('lmad,mlbd->ab', temp_t2_vvvv, t2_1, optimize=True) M_ab -= 0.25*lib.einsum('lmad,lmbd->ab', temp_t2_vvvv, t2_1, optimize=True) M_ab -= lib.einsum('mlad,mlbd->ab', temp_t2_vvvv, t2_1, optimize=True) del temp_t2_vvvv chnk_size = radc_ao2mo.calculate_chunk_size(adc) a = 0 temp = np.zeros((nvir,nvir)) if isinstance(eris.vvvv, list): for dataset in eris.vvvv: k = dataset.shape[0] eris_vvvv = dataset[:].reshape(-1,nvir,nvir,nvir) temp[a:a+k] -= lib.einsum('mldf,mled,aebf->ab',t2_1, t2_1, eris_vvvv, optimize=True) temp[a:a+k] += lib.einsum('mldf,lmed,aebf->ab',t2_1, t2_1, eris_vvvv, optimize=True) temp[a:a+k] += lib.einsum('lmdf,mled,aebf->ab',t2_1, t2_1, eris_vvvv, optimize=True) temp[a:a+k] -= lib.einsum('lmdf,lmed,aebf->ab',t2_1, t2_1, eris_vvvv, optimize=True) temp[a:a+k] += 0.5*lib.einsum('mldf,mled,aefb->ab',t2_1, t2_1, eris_vvvv, optimize=True) temp[a:a+k] -= 0.5*lib.einsum('mldf,lmed,aefb->ab',t2_1, t2_1, eris_vvvv, optimize=True) temp[a:a+k] -= 0.5*lib.einsum('lmdf,mled,aefb->ab',t2_1, t2_1, eris_vvvv, optimize=True) temp[a:a+k] += 0.5*lib.einsum('lmdf,lmed,aefb->ab',t2_1, t2_1, eris_vvvv, optimize=True) temp[a:a+k] += 2.*lib.einsum('mlfd,mled,aebf->ab',t2_1, t2_1, eris_vvvv, optimize=True) temp[a:a+k] -= lib.einsum('mlfd,mled,aefb->ab',t2_1, t2_1, eris_vvvv, optimize=True) del eris_vvvv a += k else : for p in range(0,nvir,chnk_size): vvvv = dfadc.get_vvvv_df(adc, eris.Lvv, p, chnk_size).reshape(-1,nvir,nvir,nvir) k = vvvv.shape[0] temp[a:a+k] -= lib.einsum('mldf,mled,aebf->ab',t2_1, t2_1, vvvv, optimize=True) temp[a:a+k] += lib.einsum('mldf,lmed,aebf->ab',t2_1, t2_1, vvvv, optimize=True) temp[a:a+k] += lib.einsum('lmdf,mled,aebf->ab',t2_1, t2_1, vvvv, optimize=True) temp[a:a+k] -= lib.einsum('lmdf,lmed,aebf->ab',t2_1, t2_1, vvvv, optimize=True) temp[a:a+k] += 0.5*lib.einsum('mldf,mled,aefb->ab',t2_1, t2_1, vvvv, optimize=True) temp[a:a+k] -= 0.5*lib.einsum('mldf,lmed,aefb->ab',t2_1, t2_1, vvvv, optimize=True) temp[a:a+k] -= 0.5*lib.einsum('lmdf,mled,aefb->ab',t2_1, t2_1, vvvv, optimize=True) temp[a:a+k] += 0.5*lib.einsum('lmdf,lmed,aefb->ab',t2_1, t2_1, vvvv, optimize=True) temp[a:a+k] += 2.*lib.einsum('mlfd,mled,aebf->ab',t2_1, t2_1, vvvv, optimize=True) temp[a:a+k] -= lib.einsum('mlfd,mled,aefb->ab',t2_1, t2_1, vvvv, optimize=True) del vvvv a += k M_ab += temp del temp del t2_1 cput0 = log.timer_debug1("Completed M_ab ADC(3) calculation", *cput0) return M_ab def get_imds_ip(adc, eris=None): cput0 = (time.clock(), time.time()) log = logger.Logger(adc.stdout, adc.verbose) if adc.method not in ("adc(2)", "adc(2)-x", "adc(3)"): raise NotImplementedError(adc.method) method = adc.method t1 = adc.t1 t2 = adc.t2 t1_2 = t1[0] nocc = adc._nocc nvir = adc._nvir e_occ = adc.mo_energy[:nocc] e_vir = adc.mo_energy[nocc:] idn_occ = np.identity(nocc) idn_vir = np.identity(nvir) if eris is None: eris = adc.transform_integrals() eris_ovvo = eris.ovvo # i-j block # Zeroth-order terms M_ij = lib.einsum('ij,j->ij', idn_occ ,e_occ) # Second-order terms t2_1 = t2[0][:] M_ij += lib.einsum('d,ilde,jlde->ij',e_vir,t2_1, t2_1, optimize=True) M_ij -= lib.einsum('d,ilde,ljde->ij',e_vir,t2_1, t2_1, optimize=True) M_ij -= lib.einsum('d,lide,jlde->ij',e_vir,t2_1, t2_1, optimize=True) M_ij += lib.einsum('d,lide,ljde->ij',e_vir,t2_1, t2_1, optimize=True) M_ij += lib.einsum('d,ilde,jlde->ij',e_vir,t2_1, t2_1, optimize=True) M_ij += lib.einsum('d,iled,jled->ij',e_vir,t2_1, t2_1, optimize=True) M_ij -= 0.5 * lib.einsum('l,ilde,jlde->ij',e_occ,t2_1, t2_1, optimize=True) M_ij += 0.5 * lib.einsum('l,ilde,ljde->ij',e_occ,t2_1, t2_1, optimize=True) M_ij += 0.5 * lib.einsum('l,lide,jlde->ij',e_occ,t2_1, t2_1, optimize=True) M_ij -= 0.5 * lib.einsum('l,lide,ljde->ij',e_occ,t2_1, t2_1, optimize=True) M_ij -= 0.5*lib.einsum('l,ilde,jlde->ij',e_occ,t2_1, t2_1, optimize=True) M_ij -= 0.5*lib.einsum('l,ilde,jlde->ij',e_occ,t2_1, t2_1, optimize=True) M_ij_t = lib.einsum('ilde,jlde->ij', t2_1,t2_1, optimize=True) M_ij -= lib.einsum('i,ij->ij',e_occ,M_ij_t, optimize=True) M_ij -= lib.einsum('j,ij->ij',e_occ,M_ij_t, optimize=True) M_ij_t = lib.einsum('ilde,ljde->ij', t2_1,t2_1, optimize=True) M_ij += 0.5 * lib.einsum('i,ij->ij',e_occ,M_ij_t, optimize=True) M_ij += 0.5 * lib.einsum('j,ij->ij',e_occ,M_ij_t, optimize=True) del M_ij_t M_ij += 0.5 * lib.einsum('ilde,jdel->ij',t2_1, eris_ovvo,optimize=True) M_ij -= 0.5 * lib.einsum('lide,jdel->ij',t2_1, eris_ovvo,optimize=True) M_ij -= 0.5 * lib.einsum('ilde,jedl->ij',t2_1, eris_ovvo,optimize=True) M_ij += 0.5 * lib.einsum('lide,jedl->ij',t2_1, eris_ovvo,optimize=True) M_ij += lib.einsum('ilde,jdel->ij',t2_1, eris_ovvo,optimize=True) M_ij += 0.5 * lib.einsum('jlde,idel->ij',t2_1, eris_ovvo,optimize=True) M_ij -= 0.5 * lib.einsum('ljde,idel->ij',t2_1, eris_ovvo,optimize=True) M_ij -= 0.5 * lib.einsum('jlde,ldei->ij',t2_1, eris_ovvo,optimize=True) M_ij += 0.5 * lib.einsum('ljde,ldei->ij',t2_1, eris_ovvo,optimize=True) M_ij += lib.einsum('jlde,idel->ij',t2_1, eris_ovvo,optimize=True) del t2_1 cput0 = log.timer_debug1("Completed M_ij second-order terms ADC(2) calculation", *cput0) # Third-order terms if (method == "adc(3)"): eris_oovv = eris.oovv eris_ovoo = eris.ovoo eris_oooo = eris.oooo M_ij += lib.einsum('ld,ldji->ij',t1_2, eris_ovoo,optimize=True) M_ij -= lib.einsum('ld,jdli->ij',t1_2, eris_ovoo,optimize=True) M_ij += lib.einsum('ld,ldji->ij',t1_2, eris_ovoo,optimize=True) M_ij += lib.einsum('ld,ldij->ij',t1_2, eris_ovoo,optimize=True) M_ij -= lib.einsum('ld,idlj->ij',t1_2, eris_ovoo,optimize=True) M_ij += lib.einsum('ld,ldij->ij',t1_2, eris_ovoo,optimize=True) t2_2 = t2[1][:] M_ij += 0.5* lib.einsum('ilde,jdel->ij',t2_2, eris_ovvo,optimize=True) M_ij -= 0.5* lib.einsum('lide,jdel->ij',t2_2, eris_ovvo,optimize=True) M_ij -= 0.5* lib.einsum('ilde,jedl->ij',t2_2, eris_ovvo,optimize=True) M_ij += 0.5* lib.einsum('lide,jedl->ij',t2_2, eris_ovvo,optimize=True) M_ij += lib.einsum('ilde,jdel->ij',t2_2, eris_ovvo,optimize=True) M_ij += 0.5* lib.einsum('jlde,ledi->ij',t2_2, eris_ovvo,optimize=True) M_ij -= 0.5* lib.einsum('ljde,ledi->ij',t2_2, eris_ovvo,optimize=True) M_ij -= 0.5* lib.einsum('jlde,iedl->ij',t2_2, eris_ovvo,optimize=True) M_ij += 0.5* lib.einsum('ljde,iedl->ij',t2_2, eris_ovvo,optimize=True) M_ij += lib.einsum('jlde,ledi->ij',t2_2, eris_ovvo,optimize=True) t2_1 = t2[0][:] M_ij += lib.einsum('d,ilde,jlde->ij',e_vir,t2_1, t2_2,optimize=True) M_ij -= lib.einsum('d,ilde,ljde->ij',e_vir,t2_1, t2_2,optimize=True) M_ij -= lib.einsum('d,lide,jlde->ij',e_vir,t2_1, t2_2,optimize=True) M_ij += lib.einsum('d,lide,ljde->ij',e_vir,t2_1, t2_2,optimize=True) M_ij += lib.einsum('d,ilde,jlde->ij',e_vir,t2_1, t2_2,optimize=True) M_ij += lib.einsum('d,iled,jled->ij',e_vir,t2_1, t2_2,optimize=True) M_ij += lib.einsum('d,jlde,ilde->ij',e_vir,t2_1, t2_2,optimize=True) M_ij -= lib.einsum('d,jlde,lide->ij',e_vir,t2_1, t2_2,optimize=True) M_ij -= lib.einsum('d,ljde,ilde->ij',e_vir,t2_1, t2_2,optimize=True) M_ij += lib.einsum('d,ljde,lide->ij',e_vir,t2_1, t2_2,optimize=True) M_ij += lib.einsum('d,jlde,ilde->ij',e_vir,t2_1, t2_2,optimize=True) M_ij += lib.einsum('d,jled,iled->ij',e_vir,t2_1, t2_2,optimize=True) M_ij -= 0.5 * lib.einsum('l,ilde,jlde->ij',e_occ,t2_1, t2_2,optimize=True) M_ij += 0.5 * lib.einsum('l,ilde,ljde->ij',e_occ,t2_1, t2_2,optimize=True) M_ij += 0.5 * lib.einsum('l,lide,jlde->ij',e_occ,t2_1, t2_2,optimize=True) M_ij -= 0.5 * lib.einsum('l,lide,ljde->ij',e_occ,t2_1, t2_2,optimize=True) M_ij -= 0.5*lib.einsum('l,ilde,jlde->ij',e_occ,t2_1, t2_2,optimize=True) M_ij -= 0.5*lib.einsum('l,ilde,jlde->ij',e_occ,t2_1, t2_2,optimize=True) M_ij -= 0.5 * lib.einsum('l,jlde,ilde->ij',e_occ,t2_1, t2_2,optimize=True) M_ij += 0.5 * lib.einsum('l,jlde,lide->ij',e_occ,t2_1, t2_2,optimize=True) M_ij += 0.5 * lib.einsum('l,ljde,ilde->ij',e_occ,t2_1, t2_2,optimize=True) M_ij -= 0.5 * lib.einsum('l,ljde,lide->ij',e_occ,t2_1, t2_2,optimize=True) M_ij -= 0.5*lib.einsum('l,jlde,ilde->ij',e_occ,t2_1, t2_2,optimize=True) M_ij -= 0.5*lib.einsum('l,jlde,ilde->ij',e_occ,t2_1, t2_2,optimize=True) M_ij_t = lib.einsum('ilde,jlde->ij', t2_1,t2_2, optimize=True) M_ij -= 1. * lib.einsum('i,ij->ij',e_occ, M_ij_t, optimize=True) M_ij -= 1. * lib.einsum('i,ji->ij',e_occ, M_ij_t, optimize=True) M_ij -= 1. * lib.einsum('j,ij->ij',e_occ, M_ij_t, optimize=True) M_ij -= 1. * lib.einsum('j,ji->ij',e_occ, M_ij_t, optimize=True) del M_ij_t M_ij_t_1 = lib.einsum('ilde,ljde->ij', t2_1,t2_2, optimize=True) del t2_2 M_ij += 0.5 * lib.einsum('i,ij->ij',e_occ, M_ij_t_1, optimize=True) M_ij += 0.5 * lib.einsum('i,ji->ij',e_occ, M_ij_t_1, optimize=True) M_ij += 0.5 * lib.einsum('j,ij->ij',e_occ, M_ij_t_1, optimize=True) M_ij += 0.5 * lib.einsum('j,ji->ij',e_occ, M_ij_t_1, optimize=True) del M_ij_t_1 temp_t2_vvvv = adc.imds.t2_1_vvvv[:] M_ij += 0.25*lib.einsum('ilde,jlde->ij',t2_1, temp_t2_vvvv, optimize = True) M_ij -= 0.25*lib.einsum('ilde,ljde->ij',t2_1, temp_t2_vvvv, optimize = True) M_ij -= 0.25*lib.einsum('lide,jlde->ij',t2_1, temp_t2_vvvv, optimize = True) M_ij += 0.25*lib.einsum('lide,ljde->ij',t2_1, temp_t2_vvvv, optimize = True) M_ij -= 0.25*lib.einsum('ilde,jled->ij',t2_1, temp_t2_vvvv, optimize = True) M_ij += 0.25*lib.einsum('ilde,ljed->ij',t2_1, temp_t2_vvvv, optimize = True) M_ij += 0.25*lib.einsum('lide,jled->ij',t2_1, temp_t2_vvvv, optimize = True) M_ij -= 0.25*lib.einsum('lide,ljed->ij',t2_1, temp_t2_vvvv, optimize = True) M_ij +=lib.einsum('ilde,jlde->ij',t2_1, temp_t2_vvvv, optimize = True) del temp_t2_vvvv log.timer_debug1("Starting the small integrals calculation") temp_t2_v_1 = lib.einsum('lmde,jldf->mejf',t2_1, t2_1,optimize=True) M_ij -= 2 * lib.einsum('mejf,mefi->ij',temp_t2_v_1, eris_ovvo,optimize = True) M_ij -= 2 * lib.einsum('jfme,mefi->ij',temp_t2_v_1, eris_ovvo,optimize = True) M_ij += lib.einsum('mejf,mife->ij',temp_t2_v_1, eris_oovv,optimize = True) M_ij += lib.einsum('jfme,mife->ij',temp_t2_v_1, eris_oovv,optimize = True) M_ij -= 2 * lib.einsum('meif,mefj->ij',temp_t2_v_1, eris_ovvo ,optimize = True) M_ij -= 2 * lib.einsum('ifme,mefj->ij',temp_t2_v_1, eris_ovvo ,optimize = True) M_ij += lib.einsum('meif,mjfe->ij',temp_t2_v_1, eris_oovv ,optimize = True) M_ij += lib.einsum('ifme,mjfe->ij',temp_t2_v_1, eris_oovv ,optimize = True) del temp_t2_v_1 temp_t2_v_2 = lib.einsum('lmde,ljdf->mejf',t2_1, t2_1,optimize=True) M_ij += 4 * lib.einsum('mejf,mefi->ij',temp_t2_v_2, eris_ovvo,optimize = True) M_ij += 4 * lib.einsum('meif,mefj->ij',temp_t2_v_2, eris_ovvo,optimize = True) M_ij -= 2 * lib.einsum('meif,mjfe->ij',temp_t2_v_2, eris_oovv,optimize = True) M_ij -= 2 * lib.einsum('mejf,mife->ij',temp_t2_v_2, eris_oovv,optimize = True) del temp_t2_v_2 temp_t2_v_3 = lib.einsum('mlde,jldf->mejf',t2_1, t2_1,optimize=True) M_ij += lib.einsum('mejf,mefi->ij',temp_t2_v_3, eris_ovvo,optimize = True) M_ij += lib.einsum('meif,mefj->ij',temp_t2_v_3, eris_ovvo,optimize = True) M_ij -= 2 *lib.einsum('meif,mjfe->ij',temp_t2_v_3, eris_oovv,optimize = True) M_ij -= 2 * lib.einsum('mejf,mife->ij',temp_t2_v_3, eris_oovv,optimize = True) del temp_t2_v_3 temp_t2_v_4 = lib.einsum('ilde,lmfe->idmf',t2_1, eris_oovv,optimize=True) M_ij -= 2 * lib.einsum('idmf,jmdf->ij',temp_t2_v_4, t2_1, optimize = True) M_ij += lib.einsum('idmf,mjdf->ij',temp_t2_v_4, t2_1, optimize = True) del temp_t2_v_4 temp_t2_v_5 = lib.einsum('lide,lmfe->idmf',t2_1, eris_oovv,optimize=True) M_ij += lib.einsum('idmf,jmdf->ij',temp_t2_v_5, t2_1, optimize = True) M_ij -= 2 * lib.einsum('idmf,mjdf->ij',temp_t2_v_5, t2_1, optimize = True) del temp_t2_v_5 temp_t2_v_6 = lib.einsum('ilde,lefm->idfm',t2_1, eris_ovvo,optimize=True) M_ij += 4 * lib.einsum('idfm,jmdf->ij',temp_t2_v_6, t2_1,optimize = True) M_ij -= 2 * lib.einsum('idfm,mjdf->ij',temp_t2_v_6, t2_1,optimize = True) del temp_t2_v_6 temp_t2_v_7 = lib.einsum('lide,lefm->idfm',t2_1, eris_ovvo,optimize=True) M_ij -= 2 * lib.einsum('idfm,jmdf->ij',temp_t2_v_7, t2_1,optimize = True) M_ij += lib.einsum('idfm,mjdf->ij',temp_t2_v_7, t2_1,optimize = True) del temp_t2_v_7 temp_t2_v_8 = lib.einsum('lmdf,lmde->fe',t2_1, t2_1,optimize=True) M_ij += 3 *lib.einsum('fe,jief->ij',temp_t2_v_8, eris_oovv, optimize = True) M_ij -= 1.5 *lib.einsum('fe,jfei->ij',temp_t2_v_8, eris_ovvo, optimize = True) M_ij += lib.einsum('ef,jief->ij',temp_t2_v_8, eris_oovv, optimize = True) M_ij -= 0.5 * lib.einsum('ef,jfei->ij',temp_t2_v_8, eris_ovvo, optimize = True) del temp_t2_v_8 temp_t2_v_9 = lib.einsum('lmdf,mlde->fe',t2_1, t2_1,optimize=True) M_ij -= 1.0 * lib.einsum('fe,jief->ij',temp_t2_v_9, eris_oovv, optimize = True) M_ij -= 1.0 * lib.einsum('ef,jief->ij',temp_t2_v_9, eris_oovv, optimize = True) M_ij += 0.5 * lib.einsum('fe,jfei->ij',temp_t2_v_9, eris_ovvo, optimize = True) M_ij += 0.5 * lib.einsum('ef,jfei->ij',temp_t2_v_9, eris_ovvo, optimize = True) del temp_t2_v_9 temp_t2_v_10 = lib.einsum('lnde,lmde->nm',t2_1, t2_1,optimize=True) M_ij -= 3.0 * lib.einsum('nm,jinm->ij',temp_t2_v_10, eris_oooo, optimize = True) M_ij -= 1.0 * lib.einsum('mn,jinm->ij',temp_t2_v_10, eris_oooo, optimize = True) M_ij += 1.5 * lib.einsum('nm,jmni->ij',temp_t2_v_10, eris_oooo, optimize = True) M_ij += 0.5 * lib.einsum('mn,jmni->ij',temp_t2_v_10, eris_oooo, optimize = True) del temp_t2_v_10 temp_t2_v_11 = lib.einsum('lnde,mlde->nm',t2_1, t2_1,optimize=True) M_ij += 1.0 * lib.einsum('nm,jinm->ij',temp_t2_v_11, eris_oooo, optimize = True) M_ij -= 0.5 * lib.einsum('nm,jmni->ij',temp_t2_v_11, eris_oooo, optimize = True) M_ij -= 0.5 * lib.einsum('mn,jmni->ij',temp_t2_v_11, eris_oooo, optimize = True) M_ij += 1.0 * lib.einsum('mn,jinm->ij',temp_t2_v_11, eris_oooo, optimize = True) del temp_t2_v_11 temp_t2_v_12 = lib.einsum('inde,lmde->inlm',t2_1, t2_1,optimize=True) M_ij += 1.25 * lib.einsum('inlm,jlnm->ij',temp_t2_v_12, eris_oooo, optimize = True) M_ij += 0.25 * lib.einsum('lmin,jlnm->ij',temp_t2_v_12, eris_oooo, optimize = True) M_ij -= 0.25 * lib.einsum('inlm,jmnl->ij',temp_t2_v_12, eris_oooo, optimize = True) M_ij -= 0.25 * lib.einsum('lmin,jmnl->ij',temp_t2_v_12, eris_oooo, optimize = True) M_ij += 0.25 * lib.einsum('inlm,jlnm->ji',temp_t2_v_12, eris_oooo, optimize = True) M_ij -= 0.25 * lib.einsum('inlm,lnmj->ji',temp_t2_v_12, eris_oooo, optimize = True) M_ij += 1.00 * lib.einsum('inlm,ljmn->ji',temp_t2_v_12, eris_oooo, optimize = True) M_ij -= 0.25 * lib.einsum('lmin,lnmj->ji',temp_t2_v_12, eris_oooo, optimize = True) M_ij += 0.25 * lib.einsum('lmin,ljmn->ji',temp_t2_v_12, eris_oooo, optimize = True) del temp_t2_v_12 temp_t2_v_13 = lib.einsum('inde,mlde->inml',t2_1, t2_1,optimize=True) M_ij -= 0.25 * lib.einsum('inml,jlnm->ij',temp_t2_v_13, eris_oooo, optimize = True) M_ij -= 0.25 * lib.einsum('mlin,jlnm->ij',temp_t2_v_13, eris_oooo, optimize = True) M_ij += 0.25 * lib.einsum('inml,jmnl->ij',temp_t2_v_13, eris_oooo, optimize = True) M_ij += 0.25 * lib.einsum('mlin,jmnl->ij',temp_t2_v_13, eris_oooo, optimize = True) M_ij -= 0.25 * lib.einsum('inml,jlnm->ji',temp_t2_v_13, eris_oooo, optimize = True) M_ij += 0.25 * lib.einsum('inml,lnmj->ji',temp_t2_v_13, eris_oooo, optimize = True) M_ij -= 0.25 * lib.einsum('inml,ljmn->ji',temp_t2_v_13, eris_oooo, optimize = True) M_ij += 0.25 * lib.einsum('inml,lnmj->ji',temp_t2_v_13, eris_oooo, optimize = True) del temp_t2_v_13 del t2_1 cput0 = log.timer_debug1("Completed M_ij ADC(n) calculation", *cput0) return M_ij def ea_adc_diag(adc,M_ab=None,eris=None): log = logger.Logger(adc.stdout, adc.verbose) if adc.method not in ("adc(2)", "adc(2)-x", "adc(3)"): raise NotImplementedError(adc.method) method = adc.method if M_ab is None: M_ab = adc.get_imds() nocc = adc._nocc nvir = adc._nvir n_singles = nvir n_doubles = nocc * nvir * nvir dim = n_singles + n_doubles e_occ = adc.mo_energy[:nocc] e_vir = adc.mo_energy[nocc:] idn_occ = np.identity(nocc) idn_vir = np.identity(nvir) s1 = 0 f1 = n_singles s2 = f1 f2 = s2 + n_doubles d_ab = e_vir[:,None] + e_vir d_i = e_occ[:,None] D_n = -d_i + d_ab.reshape(-1) D_iab = D_n.reshape(-1) diag = np.zeros(dim) # Compute precond in p1-p1 block M_ab_diag = np.diagonal(M_ab) diag[s1:f1] = M_ab_diag.copy() # Compute precond in 2p1h-2p1h block diag[s2:f2] = D_iab del D_iab # ###### Additional terms for the preconditioner #### # # if (method == "adc(2)-x" or method == "adc(3)"): # # if eris is None: # eris = adc.transform_integrals() # # #TODO Implement this for out-of-core and density-fitted algorithms # if isinstance(eris.vvvv, np.ndarray): # # eris_oovv = eris.oovv # eris_ovvo = eris.ovvo # eris_vvvv = eris.vvvv # # temp = np.zeros((nocc, eris_vvvv.shape[0])) # temp[:] += np.diag(eris_vvvv) # diag[s2:f2] += temp.reshape(-1) # # eris_ovov_p = np.ascontiguousarray(eris_oovv[:].transpose(0,2,1,3)) # eris_ovov_p = eris_ovov_p.reshape(nocc*nvir, nocc*nvir) # # temp = np.zeros((nvir, nocc, nvir)) # temp[:] += np.diagonal(eris_ovov_p).reshape(nocc, nvir) # temp = np.ascontiguousarray(temp.transpose(1,0,2)) # diag[s2:f2] += -temp.reshape(-1) # # eris_ovov_p = np.ascontiguousarray(eris_oovv[:].transpose(0,2,1,3)) # eris_ovov_p = eris_ovov_p.reshape(nocc*nvir, nocc*nvir) # # temp = np.zeros((nvir, nocc, nvir)) # temp[:] += np.diagonal(eris_ovov_p).reshape(nocc, nvir) # temp = np.ascontiguousarray(temp.transpose(1,2,0)) # diag[s2:f2] += -temp.reshape(-1) # else : # raise Exception("Precond not available for out-of-core and density-fitted algo") log.timer_debug1("Completed ea_diag calculation") return diag def ip_adc_diag(adc,M_ij=None,eris=None): log = logger.Logger(adc.stdout, adc.verbose) if adc.method not in ("adc(2)", "adc(2)-x", "adc(3)"): raise NotImplementedError(adc.method) method = adc.method if M_ij is None: M_ij = adc.get_imds() nocc = adc._nocc nvir = adc._nvir n_singles = nocc n_doubles = nvir * nocc * nocc dim = n_singles + n_doubles e_occ = adc.mo_energy[:nocc] e_vir = adc.mo_energy[nocc:] idn_occ = np.identity(nocc) idn_vir = np.identity(nvir) s1 = 0 f1 = n_singles s2 = f1 f2 = s2 + n_doubles d_ij = e_occ[:,None] + e_occ d_a = e_vir[:,None] D_n = -d_a + d_ij.reshape(-1) D_aij = D_n.reshape(-1) diag = np.zeros(dim) # Compute precond in h1-h1 block M_ij_diag = np.diagonal(M_ij) diag[s1:f1] = M_ij_diag.copy() # Compute precond in 2p1h-2p1h block diag[s2:f2] = D_aij.copy() # ###### Additional terms for the preconditioner #### # if (method == "adc(2)-x" or method == "adc(3)"): # # if eris is None: # eris = adc.transform_integrals() # # if isinstance(eris.vvvv, np.ndarray): # # eris_oooo = eris.oooo # eris_oovv = eris.oovv # eris_ovvo = eris.ovvo # # eris_oooo_p = np.ascontiguousarray(eris_oooo.transpose(0,2,1,3)) # eris_oooo_p = eris_oooo_p.reshape(nocc*nocc, nocc*nocc) # # temp = np.zeros((nvir, eris_oooo_p.shape[0])) # temp[:] += np.diag(eris_oooo_p) # diag[s2:f2] += -temp.reshape(-1) # # eris_ovov_p = np.ascontiguousarray(eris_oovv.transpose(0,2,1,3)) # eris_ovov_p = eris_ovov_p.reshape(nocc*nvir, nocc*nvir) # # temp = np.zeros((nocc, nocc, nvir)) # temp[:] += np.diagonal(eris_ovov_p).reshape(nocc, nvir) # temp = np.ascontiguousarray(temp.transpose(2,1,0)) # diag[s2:f2] += temp.reshape(-1) # # eris_ovov_p = np.ascontiguousarray(eris_oovv.transpose(0,2,1,3)) # eris_ovov_p = eris_ovov_p.reshape(nocc*nvir, nocc*nvir) # # temp = np.zeros((nocc, nocc, nvir)) # temp[:] += np.diagonal(eris_ovov_p).reshape(nocc, nvir) # temp = np.ascontiguousarray(temp.transpose(2,0,1)) # diag[s2:f2] += temp.reshape(-1) # else : # raise Exception("Precond not available for out-of-core and density-fitted algo") diag = -diag log.timer_debug1("Completed ea_diag calculation") return diag def ea_contract_r_vvvv(myadc,r2,vvvv): nocc = myadc._nocc nvir = myadc._nvir r2_vvvv = np.zeros((nocc,nvir,nvir)) r2 = np.ascontiguousarray(r2.reshape(nocc,-1)) chnk_size = radc_ao2mo.calculate_chunk_size(myadc) a = 0 if isinstance(vvvv, list): for dataset in vvvv: k = dataset.shape[0] dataset = dataset[:].reshape(-1,nvir*nvir) r2_vvvv[:,a:a+k] = np.dot(r2,dataset.T).reshape(nocc,-1,nvir) del dataset a += k elif getattr(myadc, 'with_df', None): for p in range(0,nvir,chnk_size): vvvv_p = dfadc.get_vvvv_df(myadc, vvvv, p, chnk_size) k = vvvv_p.shape[0] vvvv_p = vvvv_p.reshape(-1,nvir*nvir) r2_vvvv[:,a:a+k] =
np.dot(r2,vvvv_p.T)
numpy.dot
# Python libraries import cv2 import numpy as np import math # Global Variable COLORS = dict() def __add_color(label): if label not in COLORS.keys(): COLORS[label] = np.random.uniform(0, 255, size=(1, 3)).flatten() def draw_keypoints(image, kp): """ Function to draw keypoints on the image. INPUT image(numpy.ndarray): Numpy image. This is the complete image with object to be detected. kp : Keypoints to be plotted on the image RETURN Returns the image with the keypoints plotted on the image. """ # Draw Key points return cv2.drawKeypoints(image,kp ,image, flags=cv2.DRAW_MATCHES_FLAGS_DRAW_RICH_KEYPOINTS) def draw_detections(image, bbox, **kwargs): """ Function to draw the rectangle, label and mask on the image. INPUT image(numpy.ndarray): Image on which the info is drawn. bbox(numpy.array) : 4 corners of the bounding box on the image KWARGS Elements obj_label(str) : Label to put on the top of the image. confidence(float) : Detection confidence from the object detection model. mask(numpy.array) : Array of the mask directions(float) : heading direction of the object id(int): : object ID used for tracking color(tuple) : color of the box RETURN <numpy.ndarray> Image with the bbox and label on the image """ clone = image.copy() # Choose a random color if "obj_label" not in kwargs: obj_label= "_" else: obj_label = kwargs["obj_label"] __add_color(obj_label) # Detection parameters (startX, startY, endX, endY) = bbox # Put rectangle around the objects detected if "color" not in kwargs: cv2.rectangle(clone, (startX, startY), (endX, endY), COLORS[obj_label], 2) else: cv2.rectangle(clone, (startX, startY), (endX, endY), kwargs["color"], 2) # Put label and confidence y = startY - 10 if startY - 10 > 10 else startY + 10 label = "{}".format(obj_label) if "confidence" in kwargs: label += " {:.2f}%".format(kwargs["confidence"]) if "direction" in kwargs: label += " {}".format(kwargs["directions"]) + u"\N{DEGREE SIGN}" cv2.putText(clone, label, (startX, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 2) # If mask is there then put mask also in the image if "mask" in kwargs: mask = kwargs["mask"] # Put mask around the detection roi = clone[startY:endY, startX:endX] roi = roi[mask] # Color for the mask blended = ((0.4 * COLORS[obj_label]) + (0.6 * roi)).astype("uint8") clone[startY:endY, startX:endX][mask] = blended return clone def draw_compare_features(image1, image2, kp1, kp2, goodmatches): """ Function to draw matching keypoints on the two images. INPUT image1(numpy.ndarray): First Image on which the info is drawn. image2(numpy.ndarray): Second Image on which the info is drawn. kp1(list) : List of Keypoints for image 1 kp2(list) : List of Keypoints for image 2. goodmatches(list) : List of good matches of keypoints between the two images. RETURN <numpy.ndarray> 2 images, stiched together with mathces marked. """ # Drawing parameters draw_params = dict(matchColor = (0,255,0), singlePointColor = (255,0,0), flags = cv2.DrawMatchesFlags_DEFAULT) return cv2.drawMatchesKnn(image1,kp1,image2,kp2,goodmatches,None,**draw_params) def get_videotimestamp(cameraCapture, ret_type="str"): """ Function to get the timestamps of the video. INPUT cameraCapture(<class 'cv2.VideoCapture'>): Video capture object for the video currenty read. ret_type(str): Return Type. Defines the type of return ("str", "datetime") RETURN <str> or <datetime.datetime> Current timestamp of the video either in string or datetime format, based on the type. """ import datetime seconds = 0 minutes = 0 hours = 0 milliseconds = cameraCapture.get(cv2.CAP_PROP_POS_MSEC) seconds = milliseconds//1000 milliseconds = milliseconds%1000 if seconds >= 60: minutes = seconds//60 seconds = seconds % 60 if minutes >= 60: hours = minutes//60 minutes = minutes % 60 if milliseconds < 10: ts = "{}:{}:{}.00{}".format(int(hours), int(minutes), int(seconds), int(milliseconds)) elif milliseconds < 100: ts = "{}:{}:{}.0{}".format(int(hours), int(minutes), int(seconds), int(milliseconds)) else: ts = "{}:{}:{}.{}".format(int(hours), int(minutes), int(seconds), int(milliseconds)) if ret_type=="str": return ts elif ret_type=="datetime": return datetime.datetime.strptime(ts, "%H:%M:%S.%f") else: raise ValueError("Choose either str or datetime as ret_type") def pp_detectionlist(dectList): """ Function to Pretty Print (PP) detection from single image. INPUT dectList(list): Output the list of dictonary with {label, confidence, box} """ for detection in dectList: obj = detection["label"] confidence = detection["confidence"] print("[DETECTED] {}: {:.2f}".format(obj, confidence)) def draw_metadata(image, **kwargs): """ Function to put textual data on the bottom of the screen INPUT image(numpy.ndarray): Image on which the info is drawn. kwargs(dict): Data on the image """ (height, width, channel) = image.shape # loop over the info tuples and draw them on our frame itr = 0 for k, v in kwargs.items(): text = "{}: {}".format(k, v) cv2.putText(image, text, (10, height - ((itr * 30) + 20)), cv2.FONT_HERSHEY_SIMPLEX, 1, (0, 0, 255), 2) itr += 1 def draw_marker(image): """ Function to draw XY-axis in image plane on the image. INPUT image(numpy.ndarray): Image on which the info is drawn. """ height, width, channel = image.shape # Center of image cx = int(width/2) cy = int(height/2) # Draw Center of the Image cv2.circle(image, (cx, cy), 5, (0, 0, 255), -1) # Draw line cv2.line(image, (cx, 0), (cx, height), (255, 0, 0), 1) cv2.line(image, (0, cy), (width, cy), (255, 0, 0), 1) # Put Coordinates cv2.putText(image, "+X", (cx-50, 30), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1) cv2.putText(image, "+Y", (10, cy), cv2.FONT_HERSHEY_SIMPLEX, 1, (255, 0, 0), 1) def read_srt(filepath: str) -> list: """Function to read the meta data from OpenCamera .srt files with location and timestamp data INPUT: filepath(str): Path for file to read. RETURN: List of dictonary with keys start_time: Start time of the data acq for video end_time: Start time of the data acq for video date: Timestamp of video lat: Latitude in degree lng: Longitude in degree alt: Altitude above sea-level in meters heading: Heading angle from North in degree """ import datetime import re # Return list metaData = list() # Check if the file exits import os if os.path.isfile(filepath): pass else: raise Exception("File does not exits!") with open(filepath, "r") as f: data = f.readlines() counter = 1 for itr, line in enumerate(data): try: if int(line) == counter: counter += 1 loc = re.split('[°\'\"\,\ ]', data[itr+3]) s_time = data[itr+1].split("-->")[0].split(" ")[0] e_time = data[itr+1].split("-->")[1].split("\n")[0] metaData.append(dict(start_time= datetime.datetime.strptime(s_time, "%H:%M:%S,%f"), end_time = datetime.datetime.strptime(e_time, " %H:%M:%S,%f"), date = data[itr+2].split("\n")[0], lat = float(int(loc[:3][0]) + int(loc[:3][1])/60 + int(loc[:3][2])/3600), lng = float(int(loc[5:8][0]) + int(loc[5:8][1])/60 + int(loc[5:8][2])/3600), alt = loc[10], heading = loc[12] ) ) except ValueError: pass return metaData # Malisiewicz et al. def non_max_suppression_fast(detections, overlapThresh): boxes = list() for detect in detections: boxes.append(detect["bbox"]) boxes = np.array(boxes, dtype="float") # if there are no boxes, return an empty list if len(boxes) == 0: return [] # initialize the list of picked indexes pick = [] # grab the coordinates of the bounding boxes x1 = boxes[:,0] y1 = boxes[:,1] x2 = boxes[:,2] y2 = boxes[:,3] # compute the area of the bounding boxes and sort the bounding # boxes by the bottom-right y-coordinate of the bounding box area = (x2 - x1 + 1) * (y2 - y1 + 1) idxs = np.argsort(y2) # keep looping while some indexes still remain in the indexes # list while len(idxs) > 0: # grab the last index in the indexes list and add the # index value to the list of picked indexes last = len(idxs) - 1 i = idxs[last] pick.append(i) # find the largest (x, y) coordinates for the start of # the bounding box and the smallest (x, y) coordinates # for the end of the bounding box xx1 = np.maximum(x1[i], x1[idxs[:last]]) yy1 = np.maximum(y1[i], y1[idxs[:last]]) xx2 = np.minimum(x2[i], x2[idxs[:last]]) yy2 = np.minimum(y2[i], y2[idxs[:last]]) # compute the width and height of the bounding box w = np.maximum(0, xx2 - xx1 + 1) h = np.maximum(0, yy2 - yy1 + 1) # compute the ratio of overlap overlap = (w * h) / area[idxs[:last]] # delete all indexes from the index list that have idxs = np.delete(idxs, np.concatenate(([last], np.where(overlap > overlapThresh)[0]))) # return only the bounding boxes that were picked using the return [detections[i] for i in pick] def draw_polygon(frame, pts, color=None): # Polygon Box bbox = np.array(pts, np.int32) bbox = bbox.reshape((-1,1,2)) if color is None: cv2.polylines(frame, [bbox], True, (0, 0, 255), 1) else: cv2.polylines(frame, [bbox], True, color ,1) def draw_line(frame, pts): # Line point1 = pts[0] point2 = pts[1] cv2.line(frame, point1, point2, (0, 0, 255), 1) def get_fps(video): # Find OpenCV version (major_ver, minor_ver, subminor_ver) = (cv2.__version__).split('.') # With webcam get(CV_CAP_PROP_FPS) does not work. if int(major_ver) < 3 : return video.get(cv2.cv.CV_CAP_PROP_FPS) else : return video.get(cv2.CAP_PROP_FPS) def get_number_of_frames(video): video.set(cv2.CAP_PROP_POS_AVI_RATIO,1) num_of_frames = video.get(cv2.CAP_PROP_POS_FRAMES) video.set(cv2.CAP_PROP_POS_AVI_RATIO,0) assert video.get(cv2.CAP_PROP_POS_FRAMES) == float(0); "Some error in getting number of frames!" return num_of_frames def distance_from_camera(bbox, image_shape, real_life_size): """ Calculates the distance of the object from the camera. PARMS bbox: Bounding box [px] image_shape: Size of the image (width, height) [px] real_life_size: Height of the object in real world [cms] """ ## REFERENCE FOR GOPRO # Focal Length and Image Size # https://clicklikethis.com/gopro-sensor-size/ # https://gethypoxic.com/blogs/technical/gopro-hero9-teardown # https://www.sony-semicon.co.jp/products/common/pdf/IMX677-AAPH5_AAPJ_Flyer.pdf # http://photoseek.com/2013/compare-digital-camera-sensor-sizes-full-frame-35mm-aps-c-micro-four-thirds-1-inch-type/ # https://www.gophotonics.com/products/cmos-image-sensors/sony-corporation/21-209-imx677 # Camera Sensor: Sony IMX677 # Camera Sensor array pixel size: 1.12[um] X 1.12[um] # Camera Resolution: 5663(H) X 4223(V) # Camera Sensor dimensions: 6.343[mm/H] X 4.730[mm/V] # Camera Focal Length: 2.92 mm # 5633(px) # 4 ------------------- # 2 - - # 2 - - # 3 - - # (p - - # x) - - # ------------------- # REFERNCE FOR CALCULATION # https://www.scantips.com/lights/subjectdistance.html # GoPro Intrensic Camera Settings # ################################### focal_length_mm = 5.21 unit_pixel_length = 1.12 sen_res = (5663, 4223) sensor_height_mm = (unit_pixel_length*sen_res[1])/1000 sensor_width_mm = (unit_pixel_length*sen_res[0])/1000 ################################### # Calculation image_height_px = image_shape[0] image_width_px = image_shape[1] (startX, startY, endX, endY) = bbox height_of_object_px = endY - startY width_of_object_px = endX - startX obj_height_on_sensor_mm = (sensor_height_mm * height_of_object_px) / image_height_px return (real_life_size * focal_length_mm)/obj_height_on_sensor_mm def lat_lng_from_camera(origin, heading_angle, distance, EARTH_RADIUS_KMS = 6378.1): """ Calculates the latitude and longitude (in degree) of the object from the camera. PARMS origin: Origin point in (lat, lng) heading_angle: Heading angle of the object from the camera distance: Distance of the object from camera [kms] EARTH_RADIUS_KMS: Radius of earth in kms """ lat1 = math.radians(origin[0]) lng1 = math.radians(origin[1]) brng = math.radians(heading_angle) lat2 = math.asin(math.sin(lat1)*math.cos(distance/EARTH_RADIUS_KMS) + math.cos(lat1)*math.sin(distance/EARTH_RADIUS_KMS)*math.cos(brng)) lng2 = lng1 + math.atan2(math.sin(brng)*math.sin(distance/EARTH_RADIUS_KMS)*math.cos(lat1), math.cos(distance/EARTH_RADIUS_KMS)-math.sin(lat1)*math.sin(lat2)) lat2 = math.degrees(lat2) lng2 = math.degrees(lng2) return (lat2, lng2) def get_latlon(image, detection, origin, heading_angle, real_life_size=150): # Distance in cms distance = distance_from_camera(detection["bbox"], image.shape, real_life_size) # Latitude and logitude of object return lat_lng_from_camera(origin, heading_angle, distance/100000) def heading(start_point, end_point): # Reference: # https://www.igismap.com/formula-to-find-bearing-or-heading-angle-between-two-points-latitude-longitude/ # https://www.movable-type.co.uk/scripts/latlong.html # Convert all the points to start_point["lat"] = math.radians(start_point["lat"]) start_point["lon"] = math.radians(start_point["lon"]) end_point["lat"] = math.radians(end_point["lat"]) end_point["lon"] = math.radians(end_point["lon"]) dL = end_point["lon"]-start_point["lon"] Y = (math.cos(start_point["lat"])*math.sin(end_point["lat"])) - (math.sin(start_point["lat"])*math.cos(end_point["lat"])*math.cos(dL)) X = math.cos(end_point["lat"])*math.sin(dL) return math.degrees(math.atan2(X, Y)) # in degrees def heading_from_camera(bbox, image_shape): """ Calculates the heading angle (in degree) of the object from the camera. PARMS bbox: Bounding box [px] image_shape: Size of the image (width, height) [px] """ # GoPro Intrensic Camera Settings # ################################### focal_length_mm = 5.21 unit_pixel_length = 1.12 sen_res = (5663, 4223) sensor_height_mm = (unit_pixel_length*sen_res[1])/1000 sensor_width_mm = (unit_pixel_length*sen_res[0])/1000 ################################### # Image Center (cX, cY) = image_shape[1]/2, image_shape[0]/2 # Object Center (startX, startY, endX, endY) = bbox (centerX, centerY) = (startX+endX)/2, (startY+endY)/2 # Distance between the two points distance = math.sqrt((centerX - cX)**2 + (centerY - cY)**2) # Focal Length in px img_width_px = image_shape[1] f_px = (focal_length_mm * img_width_px)/ (sensor_width_mm) # Heading Angle angle = math.degrees(math.asin(distance/f_px)) if centerX > cX: return angle else: return -angle def distance_btw_lalon_points(lat1, lon1, lat2, lon2): # Distance between two lat/long points # Using Haversine formula # https://stackoverflow.com/questions/27928/calculate-distance-between-two-latitude-longitude-points-haversine-formula lat1, lon1 = np.radians(lat1), np.radians(lon1) lat2, lon2 = np.radians(lat2), np.radians(lon2) R = 6371 dLat = lat2-lat1 dLon = lon2-lon1 a = np.sin(dLat/2) *np.sin(dLat/2) + np.cos(lat1)*
np.cos(lat2)
numpy.cos
from keras.models import Sequential, Model from keras.utils import plot_model import numpy as np import scipy import sys import argparse from random import randint, uniform import time import matplotlib.pyplot as plt import tensorflow as t import random import cv2 import dlib from imutils.face_utils import rect_to_bb, shape_to_np from keras_preprocessing.image import load_img from PIL import ImageFilter # ----------------------------------------------------------------------------------------------- # import the essential functions required for computation from cnn_networks.SLNET_A_GAP_disp_dual_inp_A import cnn_hybrid_color_single from ess_func import read_pairs, sample_people, prewhiten, store_loss, hog_to_tensor, custom_loss import os detector = dlib.get_frontal_face_detector() face_cascade = cv2.CascadeClassifier('/home/yaurehman2/virtualenv-py2/opencv/opencv-3.3.0/data/haarcascades_cuda/haarcascade_frontalface_default.xml') def format(value): return "%.3f" % value def store_predictions(file_path, prediction, label): with open(file_path,'a+') as f: f.write("%10.4e \t %10.4e \t %10.4e \t %10.4e" % (prediction[0][0], prediction[0][1], prediction[0][2], prediction[0][3])) f.write("\t") f.write(str(label)) f.write("\n") def store_predictions_binary(file_path, prediction, label): with open(file_path,'a+') as f: f.write("%10.4e \t %10.4e" % (prediction[0][0], prediction[0][1])) f.write("\t") f.write(str(label)) f.write("\n") accuracy = [] def main(args): nrf_fp = 0 img_rows = args.img_rows img_cols = args.img_cols img_dim_color = args.img_channels model_alex = cnn_hybrid_color_single(img_rows, img_cols, img_dim_color) model_final = model_alex # model_final = Model(model_alex.input, model_alex.output) model_final.load_weights(args.weights_path) test_pairs, test_data_r, test_labels_r = read_pairs(args.tst_img_lab_r) # read the test data test_pairs, test_data_l, test_labels_l = read_pairs(args.tst_img_lab_l) # read the test data assert len(test_data_r) == len(test_data_l) assert len(test_labels_r) == len(test_labels_l) # Generating teh test samples images_sampled = [] # images to be sampled from the data-set labels_sampled = [] # sampled labels len_test = len(test_data_r) # get the length of the test data indices_test =
np.arange(len_test)
numpy.arange
#!/usr/bin/python from __future__ import division from __future__ import print_function import sys import os import re import datetime import zipfile import tempfile import argparse import math import warnings import json import csv import numpy as np import scipy.stats as scp from lxml import etree as et def get_rdml_lib_version(): """Return the version string of the RDML library. Returns: The version string of the RDML library. """ return "1.0.0" class NpEncoder(json.JSONEncoder): def default(self, obj): if isinstance(obj, np.integer): return int(obj) elif isinstance(obj, np.floating): return float(obj) elif isinstance(obj, np.bool_): return bool(obj) elif isinstance(obj, np.ndarray): return obj.tolist() else: return super(NpEncoder, self).default(obj) class RdmlError(Exception): """Basic exception for errors raised by the RDML-Python library""" def __init__(self, message): Exception.__init__(self, message) pass class secondError(RdmlError): """Just to have, not used yet""" def __init__(self, message): RdmlError.__init__(self, message) pass def _get_first_child(base, tag): """Get a child element of the base node with a given tag. Args: base: The base node element. (lxml node) tag: Child elements group tag used to select the elements. (string) Returns: The first child lxml node element found or None. """ for node in base: if node.tag.replace("{http://www.rdml.org}", "") == tag: return node return None def _get_first_child_text(base, tag): """Get a child element of the base node with a given tag. Args: base: The base node element. (lxml node) tag: Child elements group tag used to select the elements. (string) Returns: The text of first child node element found or an empty string. """ for node in base: if node.tag.replace("{http://www.rdml.org}", "") == tag: return node.text return "" def _get_first_child_bool(base, tag, triple=True): """Get a child element of the base node with a given tag. Args: base: The base node element. (lxml node) tag: Child elements group tag used to select the elements. (string) triple: If True, None is returned if not found, if False, False Returns: The a bool value of tag or if triple is True None. """ for node in base: if node.tag.replace("{http://www.rdml.org}", "") == tag: return _string_to_bool(node.text, triple) if triple is False: return False else: return None def _get_step_sort_nr(elem): """Get the number of the step eg. for sorting. Args: elem: The node element. (lxml node) Returns: The a int value of the step node nr. """ if elem is None: raise RdmlError('A step element must be provided for sorting.') ret = _get_first_child_text(elem, "nr") if ret == "": raise RdmlError('A step element must have a \"nr\" element for sorting.') return int(ret) def _sort_list_int(elem): """Get the first element of the array as int. for sorting. Args: elem: The 2d list Returns: The a int value of the first list element. """ return int(elem[0]) def _sort_list_float(elem): """Get the first element of the array as float. for sorting. Args: elem: The 2d list Returns: The a float value of the first list element. """ return float(elem[0]) def _sort_list_digital_PCR(elem): """Get the first column of the list as int. for sorting. Args: elem: The list Returns: The a int value of the first list element. """ arr = elem.split("\t") return int(arr[0]), arr[4] def _string_to_bool(value, triple=True): """Translates a string into bool value or None. Args: value: The string value to evaluate. (string) triple: If True, None is returned if not found, if False, False Returns: The a bool value of tag or if triple is True None. """ if value is None or value == "": if triple is True: return None else: return False if type(value) is bool: return value if type(value) is int: if value != 0: return True else: return False if type(value) is str: if value.lower() in ['false', '0', 'f', '-', 'n', 'no']: return False else: return True return def _value_to_booldic(value): """Translates a string, list or dic to a dictionary with true/false. Args: value: The string value to evaluate. (string) Returns: The a bool value of tag or if triple is True None. """ ret = {} if type(value) is str: ret[value] = True if type(value) is list: for ele in value: ret[ele] = True if type(value) is dict: for key, val in value.items(): ret[key] = _string_to_bool(val, triple=False) return ret def _get_first_child_by_pos_or_id(base, tag, by_id, by_pos): """Get a child element of the base node with a given tag and position or id. Args: base: The base node element. (lxml node) tag: Child elements group tag used to select the elements. (string) by_id: The unique id to search for. (string) by_pos: The position of the element in the list (int) Returns: The child node element found or raise error. """ if by_id is None and by_pos is None: raise RdmlError('Either an ' + tag + ' id or a position must be provided.') if by_id is not None and by_pos is not None: raise RdmlError('Only an ' + tag + ' id or a position can be provided.') allChildren = _get_all_children(base, tag) if by_id is not None: for node in allChildren: if node.get('id') == by_id: return node raise RdmlError('The ' + tag + ' id: ' + by_id + ' was not found in RDML file.') if by_pos is not None: if by_pos < 0 or by_pos > len(allChildren) - 1: raise RdmlError('The ' + tag + ' position ' + by_pos + ' is out of range.') return allChildren[by_pos] def _add_first_child_to_dic(base, dic, opt, tag): """Adds the first child element with a given tag to a dictionary. Args: base: The base node element. (lxml node) dic: The dictionary to add the element to (dictionary) opt: If false and id is not found in base, the element is added with an empty string (Bool) tag: Child elements group tag used to select the elements. (string) Returns: The dictionary with the added element. """ for node in base: if node.tag.replace("{http://www.rdml.org}", "") == tag: dic[tag] = node.text return dic if not opt: dic[tag] = "" return dic def _get_all_children(base, tag): """Get a list of all child elements with a given tag. Args: base: The base node element. (lxml node) tag: Child elements group tag used to select the elements. (string) Returns: A list with all child node elements found or an empty list. """ ret = [] for node in base: if node.tag.replace("{http://www.rdml.org}", "") == tag: ret.append(node) return ret def _get_all_children_id(base, tag): """Get a list of ids of all child elements with a given tag. Args: base: The base node element. (lxml node) tag: Child elements group tag used to select the elements. (string) Returns: A list with all child id strings found or an empty list. """ ret = [] for node in base: if node.tag.replace("{http://www.rdml.org}", "") == tag: ret.append(node.get('id')) return ret def _get_number_of_children(base, tag): """Count all child elements with a given tag. Args: base: The base node element. (lxml node) tag: Child elements group tag used to select the elements. (string) Returns: A int number of the found child elements with the id. """ counter = 0 for node in base: if node.tag.replace("{http://www.rdml.org}", "") == tag: counter += 1 return counter def _check_unique_id(base, tag, id): """Find all child elements with a given group and check if the id is already used. Args: base: The base node element. (lxml node) tag: Child elements group tag used to select the elements. (string) id: The unique id to search for. (string) Returns: False if the id is already used, True if not. """ for node in base: if node.tag.replace("{http://www.rdml.org}", "") == tag: if node.get('id') == id: return False return True def _create_new_element(base, tag, id): """Create a new element with a given tag and id. Args: base: The base node element. (lxml node) tag: Child elements group tag. (string) id: The unique id of the new element. (string) Returns: False if the id is already used, True if not. """ if id is None or id == "": raise RdmlError('An ' + tag + ' id must be provided.') if not _check_unique_id(base, tag, id): raise RdmlError('The ' + tag + ' id "' + id + '" must be unique.') return et.Element(tag, id=id) def _add_new_subelement(base, basetag, tag, text, opt): """Create a new element with a given tag and id. Args: base: The base node element. (lxml node) basetag: Child elements group tag. (string) tag: Child elements own tag, to be created. (string) text: The text content of the new element. (string) opt: If true, the element is optional (Bool) Returns: Nothing, the base lxml element is modified. """ if opt is False: if text is None or text == "": raise RdmlError('An ' + basetag + ' ' + tag + ' must be provided.') et.SubElement(base, tag).text = text else: if text is not None and text != "": et.SubElement(base, tag).text = text def _change_subelement(base, tag, xmlkeys, value, opt, vtype, id_as_element=False): """Change the value of the element with a given tag. Args: base: The base node element. (lxml node) tag: Child elements own tag, to be created. (string) xmlkeys: The list of possible keys in the right order for xml (list strings) value: The text content of the new element. opt: If true, the element is optional (Bool) vtype: If true, the element is optional ("string", "int", "float") id_as_element: If true, handle tag "id" as element, else as attribute Returns: Nothing, the base lxml element is modified. """ # Todo validate values with vtype goodVal = value if vtype == "bool": ev = _string_to_bool(value, triple=True) if ev is None or ev == "": goodVal = "" else: if ev: goodVal = "true" else: goodVal = "false" if opt is False: if goodVal is None or goodVal == "": raise RdmlError('A value for ' + tag + ' must be provided.') if tag == "id" and id_as_element is False: if base.get('id') != goodVal: par = base.getparent() groupTag = base.tag.replace("{http://www.rdml.org}", "") if not _check_unique_id(par, groupTag, goodVal): raise RdmlError('The ' + groupTag + ' id "' + goodVal + '" is not unique.') base.attrib['id'] = goodVal return # Check if the tag already excists elem = _get_first_child(base, tag) if elem is not None: if goodVal is None or goodVal == "": base.remove(elem) else: elem.text = goodVal else: if goodVal is not None and goodVal != "": new_node = et.Element(tag) new_node.text = goodVal place = _get_tag_pos(base, tag, xmlkeys, 0) base.insert(place, new_node) def _get_or_create_subelement(base, tag, xmlkeys): """Get element with a given tag, if not present, create it. Args: base: The base node element. (lxml node) tag: Child elements own tag, to be created. (string) xmlkeys: The list of possible keys in the right order for xml (list strings) Returns: The node element with the tag. """ # Check if the tag already excists if _get_first_child(base, tag) is None: new_node = et.Element(tag) place = _get_tag_pos(base, tag, xmlkeys, 0) base.insert(place, new_node) return _get_first_child(base, tag) def _remove_irrelevant_subelement(base, tag): """If element with a given tag has no children, remove it. Args: base: The base node element. (lxml node) tag: Child elements own tag, to be created. (string) Returns: The node element with the tag. """ # Check if the tag already excists elem = _get_first_child(base, tag) if elem is None: return if len(elem) == 0: base.remove(elem) def _move_subelement(base, tag, id, xmlkeys, position): """Change the value of the element with a given tag. Args: base: The base node element. (lxml node) tag: The id to search for. (string) id: The unique id of the new element. (string) xmlkeys: The list of possible keys in the right order for xml (list strings) position: the new position of the element (int) Returns: Nothing, the base lxml element is modified. """ pos = _get_tag_pos(base, tag, xmlkeys, position) ele = _get_first_child_by_pos_or_id(base, tag, id, None) base.insert(pos, ele) def _move_subelement_pos(base, tag, oldpos, xmlkeys, position): """Change the value of the element with a given tag. Args: base: The base node element. (lxml node) tag: The id to search for. (string) oldpos: The unique id of the new element. (string) xmlkeys: The list of possible keys in the right order for xml (list strings) position: the new position of the element (int) Returns: Nothing, the base lxml element is modified. """ pos = _get_tag_pos(base, tag, xmlkeys, position) ele = _get_first_child_by_pos_or_id(base, tag, None, oldpos) base.insert(pos, ele) def _get_tag_pos(base, tag, xmlkeys, pos): """Returns a position were to add a subelement with the given tag inc. pos offset. Args: base: The base node element. (lxml node) tag: The id to search for. (string) xmlkeys: The list of possible keys in the right order for xml (list strings) pos: The position relative to the tag elements (int) Returns: The int number of were to add the element with the tag. """ count = _get_number_of_children(base, tag) offset = pos if pos is None or pos < 0: offset = 0 pos = 0 if pos > count: offset = count return _get_first_tag_pos(base, tag, xmlkeys) + offset def _get_first_tag_pos(base, tag, xmlkeys): """Returns a position were to add a subelement with the given tag. Args: base: The base node element. (lxml node) tag: The id to search for. (string) xmlkeys: The list of possible keys in the right order for xml (list strings) Returns: The int number of were to add the element with the tag. """ listrest = xmlkeys[xmlkeys.index(tag):] counter = 0 for node in base: if node.tag.replace("{http://www.rdml.org}", "") in listrest: return counter counter += 1 return counter def _writeFileInRDML(rdmlName, fileName, data): """Writes a file in the RDML zip, even if it existed before. Args: rdmlName: The name of the RDML zip file fileName: The name of the file to write into the zip data: The data string of the file Returns: Nothing, modifies the RDML file. """ needRewrite = False if os.path.isfile(rdmlName): with zipfile.ZipFile(rdmlName, 'r') as RDMLin: for item in RDMLin.infolist(): if item.filename == fileName: needRewrite = True if needRewrite: tempFolder, tempName = tempfile.mkstemp(dir=os.path.dirname(rdmlName)) os.close(tempFolder) # copy everything except the filename with zipfile.ZipFile(rdmlName, 'r') as RDMLin: with zipfile.ZipFile(tempName, mode='w', compression=zipfile.ZIP_DEFLATED) as RDMLout: RDMLout.comment = RDMLin.comment for item in RDMLin.infolist(): if item.filename != fileName: RDMLout.writestr(item, RDMLin.read(item.filename)) if data != "": RDMLout.writestr(fileName, data) os.remove(rdmlName) os.rename(tempName, rdmlName) else: with zipfile.ZipFile(rdmlName, mode='a', compression=zipfile.ZIP_DEFLATED) as RDMLout: RDMLout.writestr(fileName, data) def _lrp_linReg(xIn, yUse): """A function which calculates the slope or the intercept by linear regression. Args: xIn: The numpy array of the cycles yUse: The numpy array that contains the fluorescence Returns: An array with the slope and intercept. """ counts = np.ones(yUse.shape) xUse = xIn.copy() xUse[np.isnan(yUse)] = 0 counts[np.isnan(yUse)] = 0 cycSqared = xUse * xUse cycFluor = xUse * yUse sumCyc = np.nansum(xUse, axis=1) sumFluor = np.nansum(yUse, axis=1) sumCycSquared = np.nansum(cycSqared, axis=1) sumCycFluor = np.nansum(cycFluor, axis=1) n = np.nansum(counts, axis=1) ssx = sumCycSquared - (sumCyc * sumCyc) / n sxy = sumCycFluor - (sumCyc * sumFluor) / n slope = sxy / ssx intercept = (sumFluor / n) - slope * (sumCyc / n) return [slope, intercept] def _lrp_findStopCyc(fluor, aRow): """Find the stop cycle of the log lin phase in fluor. Args: fluor: The array with the fluorescence values aRow: The row to work on Returns: An int with the stop cycle. """ # Take care of nan values validTwoLessCyc = 3 # Cycles so +1 to array while (validTwoLessCyc <= fluor.shape[1] and (np.isnan(fluor[aRow, validTwoLessCyc - 1]) or np.isnan(fluor[aRow, validTwoLessCyc - 2]) or np.isnan(fluor[aRow, validTwoLessCyc - 3]))): validTwoLessCyc += 1 # First and Second Derivative values calculation fluorShift = np.roll(fluor[aRow], 1, axis=0) # Shift to right - real position is -0.5 fluorShift[0] = np.nan firstDerivative = fluor[aRow] - fluorShift if np.isfinite(firstDerivative).any(): FDMaxCyc = np.nanargmax(firstDerivative, axis=0) + 1 # Cycles so +1 to array else: return fluor.shape[1] firstDerivativeShift = np.roll(firstDerivative, -1, axis=0) # Shift to left firstDerivativeShift[-1] = np.nan secondDerivative = firstDerivativeShift - firstDerivative if FDMaxCyc + 2 <= fluor.shape[1]: # Only add two cycles if there is an increase without nan if (not np.isnan(fluor[aRow, FDMaxCyc - 1]) and not np.isnan(fluor[aRow, FDMaxCyc]) and not np.isnan(fluor[aRow, FDMaxCyc + 1]) and fluor[aRow, FDMaxCyc + 1] > fluor[aRow, FDMaxCyc] > fluor[aRow, FDMaxCyc - 1]): FDMaxCyc += 2 else: FDMaxCyc = fluor.shape[1] maxMeanSD = 0.0 stopCyc = fluor.shape[1] for cycInRange in range(validTwoLessCyc, FDMaxCyc): with warnings.catch_warnings(): warnings.simplefilter("ignore", category=RuntimeWarning) tempMeanSD = np.mean(secondDerivative[cycInRange - 2: cycInRange + 1], axis=0) # The > 0.000000000001 is to avoid float differences to the pascal version if not np.isnan(tempMeanSD) and (tempMeanSD - maxMeanSD) > 0.000000000001: maxMeanSD = tempMeanSD stopCyc = cycInRange if stopCyc + 2 >= fluor.shape[1]: stopCyc = fluor.shape[1] return stopCyc def _lrp_findStartCyc(fluor, aRow, stopCyc): """A function which finds the start cycle of the log lin phase in fluor. Args: fluor: The array with the fluorescence values aRow: The row to work on stopCyc: The stop cycle Returns: An array [int, int] with the start cycle and the fixed start cycle. """ startCyc = stopCyc - 1 # startCyc might be NaN, so shift it to the first value firstNotNaN = 1 # Cycles so +1 to array while np.isnan(fluor[aRow, firstNotNaN - 1]) and firstNotNaN < startCyc: firstNotNaN += 1 while startCyc > firstNotNaN and np.isnan(fluor[aRow, startCyc - 1]): startCyc -= 1 # As long as there are no NaN and new values are increasing while (startCyc > firstNotNaN and not np.isnan(fluor[aRow, startCyc - 2]) and fluor[aRow, startCyc - 2] <= fluor[aRow, startCyc - 1]): startCyc -= 1 startCycFix = startCyc if (not np.isnan(fluor[aRow, startCyc]) and not np.isnan(fluor[aRow, startCyc - 1]) and not np.isnan(fluor[aRow, stopCyc - 1]) and not np.isnan(fluor[aRow, stopCyc - 2])): startStep = np.log10(fluor[aRow, startCyc]) - np.log10(fluor[aRow, startCyc - 1]) stopStep = np.log10(fluor[aRow, stopCyc - 1]) - np.log10(fluor[aRow, stopCyc - 2]) if startStep > 1.1 * stopStep: startCycFix += 1 return [startCyc, startCycFix] def _lrp_testSlopes(fluor, aRow, stopCyc, startCycFix): """Splits the values and calculates a slope for the upper and the lower half. Args: fluor: The array with the fluorescence values aRow: The row to work on stopCyc: The stop cycle startCycFix: The start cycle Returns: An array with [slopelow, slopehigh]. """ # Both start with full range loopStart = [startCycFix[aRow], stopCyc[aRow]] loopStop = [startCycFix[aRow], stopCyc[aRow]] # Now find the center ignoring nan while True: loopStart[1] -= 1 loopStop[0] += 1 while (loopStart[1] - loopStop[0]) > 1 and np.isnan(fluor[aRow, loopStart[1] - 1]): loopStart[1] -= 1 while (loopStart[1] - loopStop[0]) > 1 and
np.isnan(fluor[aRow, loopStop[1] - 1])
numpy.isnan
import numpy as np import matplotlib.pyplot as plt import time from SciDataTool.Functions.Plot.plot_3D import plot_3D t0 = time.time() # % SDM validation step 1.1 : # % Paper Lubin 2010b and Lubin_2011b # % Machine with no load rotor, rotor slots, current sheet on stator side # %% VALIDATION CASES # % 0 = Lubin_2010a fig.6 # % 1 = Lubin_2010a fig.11 # % 2 = Lubin_2010a fig.12 # % 3 = Lubin_2011b fig.5 validation_case = 3 # %% PROBLEM PARAMETERS mu_0 = np.pi * 4e-7 R_1 = 0.04 R_2 = 0.07 R_3 = 0.08 L = 0.1 theta_i0 = 135 * np.pi / 180 N = 50 K = 50 K_1 = 1e5 if validation_case == 0: Z_r = 1 p = 1 alpha = 0 beta = 0.78 m = 1 if validation_case == 1: Z_r = 4 p = 2 N = 50 K = 50 K_1 = 1e5 theta_i0 = 135 * np.pi / 180 alpha = np.pi / 4 beta = 0.78 m = 1 if validation_case == 2: Z_r = 4 p = 2 N = 50 K = 50 K_1 = 1e5 theta_i0 = 135 * np.pi / 180 alpha = 0 m = 1 if validation_case == 3: Z_r = 18 p = 2 N = 200 K = 20 K_1 = 8e4 theta_i0 = 140 * np.pi / 180 alpha = np.pi / 2 beta = 0.6 * 3.14 / Z_r m = 1 R_1 = 0.04 R_2 = 0.06 R_3 = 0.063 L = 0.5 # %% AIRGAP FLUX COMPUTATION i = np.arange(0, Z_r) + 1 k = np.arange(0, K) + 1 n = np.arange(0, N) + 1 kni, nik, ikn = np.meshgrid(k, n, i) kni = np.reshape(kni, (N, K * Z_r)) nik = np.reshape(nik, (N, K * Z_r)) ikn = np.reshape(ikn, (N, K * Z_r)) theta_rot = np.linspace(0, 2 * np.pi, 100) Csts = np.zeros((100, 4 * N + K * Z_r)) for ii, a0 in enumerate(theta_rot): theta_i = 2 * np.pi / Z_r * (i - 1) + theta_i0 + a0 theta_ikn = 2 * np.pi / Z_r * (ikn - 1) + theta_i0 * np.ones(ikn.shape) + a0 I_coskcosni = ( beta ** 2 * nik * ( (-1) ** kni * np.sin(nik * (beta + 2 * theta_ikn) / 2) + np.sin(nik * (beta - 2 * theta_ikn) / 2) ) / (beta ** 2 * nik ** 2 - np.pi ** 2 * kni ** 2) ) I_cosksinni = ( beta ** 2 * nik * ( -((-1) ** kni) * np.cos(nik * (beta + 2 * theta_ikn) / 2) + np.cos(nik * (beta - 2 * theta_ikn) / 2) ) / (beta ** 2 * nik ** 2 - np.pi ** 2 * kni ** 2) ) P_nik_R2_R3 = (R_3 / R_2) ** nik + (R_2 / R_3) ** nik E_nik_R2_R3 = -((R_3 / R_2) ** nik) + (R_2 / R_3) ** nik E_kni_R1_R2 = -((R_2 / R_1) ** (np.pi * kni / beta)) + (R_1 / R_2) ** ( np.pi * kni / beta ) P_kni_R1_R2 = (R_2 / R_1) ** (np.pi * kni / beta) + (R_1 / R_2) ** ( np.pi * kni / beta ) Jm_cos = np.zeros(N) Jm_cos[m * p - 1] = mu_0 * K_1 *
np.cos(m * p * (alpha + a0))
numpy.cos
import numpy as np from numpy import cos,sin # script to check that the rotation is done correctly az_test = 45*np.pi/180 baz_test = 225*np.pi/180 npts=1 # rotation in seisnoise # convert [EE,EN,NN,NE] into [TT,RR,TR,RT] big_rot = np.asarray([[-cos(az_test)*cos(baz_test),cos(az_test)*sin(baz_test),-sin(az_test)*sin(baz_test),sin(az_test)*cos(baz_test)], [-sin(az_test)*sin(baz_test),-sin(az_test)*cos(baz_test),-cos(az_test)*cos(baz_test),-cos(az_test)*sin(baz_test)], [-cos(az_test)*sin(baz_test),-cos(az_test)*cos(baz_test),sin(az_test)*cos(baz_test),sin(az_test)*sin(baz_test)], [-sin(az_test)*cos(baz_test),
sin(az_test)
numpy.sin
import numpy as np import pandas as pd import seaborn as sns from mpl_toolkits import mplot3d from pathlib import Path ############################################################################### #Non-Standard Imports ############################################################################### import dunlin._utils_plot as upp ############################################################################### #Raw Data ############################################################################### class TimeResponseData: consolidated_colors = {} ########################################################################### #Instantiation ########################################################################### def __init__(self, data, base_colors=None, palette_type='light_palette', roll=2, thin=2, truncate=None, levels=None, drop_scenarios=None, consolidate_colors=True, ): def _2dict(df): return {i: g.droplevel(axis=1, level=0) for i, g in df.groupby(axis=1, level=0)} data = self.preprocess(data, roll, thin, truncate, levels, drop_scenarios) colors = self.make_colors(data, base_colors, palette_type) self.colors = colors self._data = data self._dct = _2dict(data) self._t = pd.DataFrame(dict.fromkeys(colors, data.index), index=data.index) ########################################################################### #Supporting Methods ########################################################################### @classmethod def make_colors(cls, df, base_colors=None, palette_type='light_palette'): levels = list(range(df.columns.nlevels))[1:] scenarios = sorted([i for i, g in df.groupby(axis=1, level=levels)]) if palette_type == 'light_palette': colors = upp.make_light_scenarios(scenarios, base_colors) elif palette_type == 'dark_palette': colors = upp.make_dark_scenarios(scenarios, base_colors) else: colors = upp.make_color_scenarios(scenarios, base_colors) return colors @staticmethod def preprocess(df, roll=2, thin=2, truncate=None, levels=None, drop_scenarios=None): if levels: to_drop = [lvl for lvl in df.columns.names if lvl not in levels] df = df.droplevel(to_drop, axis=1) df = df.reorder_levels(levels, axis=1) if truncate: lb, ub = truncate df = df.loc[lb:ub] if drop_scenarios: lvls = df.columns.names[1:] temp = [g for i, g in df.groupby(level=lvls, axis=1) if i not in drop_scenarios] df = pd.concat(temp, axis=1, sort=False) df = df.rolling(roll, min_periods=1).mean() df = df.iloc[::thin] return df ########################################################################### #Dict-like Behaviour ########################################################################### def __contains__(self, key): return key in self._dct def __getitem__(self, key): return self._dct[key] def __setitem__(self, key, value): if key in self: raise ValueError(f'Cannot add a state that already exists. Delete {key} first and add the new values.') elif type(key) != str: raise TypeError('Can only use strings as keys.') self._dct[key] = value def keys(self): return self._dct.keys() def values(self): return self._dct.values() def items(self): return self._dct.items() def __iter__(self): return iter(self._dct) def get_color(self, scenario): return self.colors[scenario] def getsd(self, key, ignore_none=False): if self._dct_sd is None: if ignore_none: return None else: raise AttributeError('No SD data.') return self._dct_sd[key] def setsd(self, key, value): if type(key) != str: raise TypeError('Can only use strings as keys.') if self._dct_sd: if key in self._dct_sd: raise ValueError(f'Cannot add a state that already exists. Delete {key} first and add the new values.') if self._dct_sd is None: self._dct_sd = {} self._dct_sd[key] = value def get_size(self): tspan = self._data.index return {'bounds': (tspan[0], tspan[-1]), 'tspan' : tspan } def _getvar(self, var): if type(var) == str: return self[var] else: return var ########################################################################### #Representation ########################################################################### def __str__(self): return f'{type(self).__name__}{tuple(self.keys())}' def __repr__(self): return self.__str__() ########################################################################### #Operators ########################################################################### def dup(self, x, name): self[name] = self[x].copy() def evaluate(self, name, expr): result = eval(expr, self._dct) if name: self[name] = result def diff(self, x, name=None): x = self._getvar(x) dt = np.diff(x.index, prepend=np.NAN) dxdt = x.diff().divide(dt, axis=0) if name: self[name] = dxdt return dxdt def spec_diff(self, x, name=None): x = self._getvar(x) dt = np.diff(x.index, prepend=np.NAN) dxdt = x.diff().divide(dt, axis=0) / x if name: self[name] = dxdt return dxdt def add(self, x, *x_, name=None): x = self._getvar(x) for i in x_: i = self._getvar(i) x = x+i if name: self[name] = x return x def sub(self, x, *x_, name=None): x = self._getvar(x) for i in x_: i = self._getvar(i) x = x-i if name: self[name] = x return x def mul(self, x, *x_, name=None): x = self._getvar(x).copy() for i in x_: i = self._getvar(i) x = x*i if name: self[name] = x return x def div(self, x, *x_, name=None): x = self._getvar(x) for i in x_: i = self._getvar(i) x = x/i if name: self[name] = x return x def apply(self, func, *x_, name=None): x_ = [self._getvar(x) for x in x_] result = func(*x_) if name: self[name] = result return result def first_order_gen(self, dx, x, decay, name=None): dx = self._getvar(dx) x = self._getvar(x) decay = self._getvar(decay) gen_x = dx + decay*x if name: self[name] = gen_x return gen_x ########################################################################### #Plotting ########################################################################### def _set_axis_lim(self, ax, xlim, ylim, zlim=None): def helper(func, lim): if lim is None: pass elif hasattr(lim, 'items'): func(**lim) else: func(*lim) if zlim is None: helper(ax.set_xlim, xlim) helper(ax.set_ylim, ylim) else: helper(ax.set_xlim, xlim) helper(ax.set_ylim, ylim) helper(ax.set_zlim, zlim) def _parse_color(self, args): color = args.get('color') if type(color) == str: args['color'] = upp.colors[color] return args else: return args def plot(self, AX, yvar, bounds=None, **kwargs): xvar = self._t return self.plot2(AX, xvar, yvar, bounds, **kwargs) def plot2(self, AX, xvar, yvar, bounds=None, xsd=None, ysd=None, skip=lambda scenario: False, title=None, xlim=None, ylim=None, halflife=None, thin=1, **line_args): x = self._getvar(xvar) y = self._getvar(yvar) if bounds: lb, ub = bounds x = x.loc[lb:ub] y = y.loc[lb:ub] lines = {} ax_with_title = set() for scenario, color in self.colors.items(): if skip(scenario): continue x_vals = x[scenario] y_vals = y[scenario] if halflife is None: x_vals = x_vals y_vals = y_vals else: x_vals = x_vals.ewm(halflife=halflife, ignore_na=True).mean() y_vals = y_vals.ewm(halflife=halflife, ignore_na=True).mean() if x_vals.index.nlevels > 1: raise NotImplementedError() else: x_vals = x_vals.values[::thin] xerr = None if y.index.nlevels > 1: raise NotImplementedError() else: y_vals = y_vals.values[::thin] yerr = None ax = AX[scenario] if hasattr(AX, 'items') else AX defaults = {'marker': 'o', 'linestyle': 'None', 'color' : color, 'label' : ', '.join([str(s) for s in scenario]) } line_args_ = self._parse_color({**defaults, **line_args}) lines[scenario] = ax.errorbar(x_vals, y_vals, yerr=yerr, xerr=xerr, **line_args_) if title is not None and ax not in ax_with_title: ax.set_title(title) ax_with_title.add(ax) self._set_axis_lim(ax, xlim, ylim) return lines def plot3(self, AX, xvar, yvar, zvar, bounds=None, skip=lambda scenario: False, title=None, xlim=None, ylim=None, zlim=None, halflife=None, thin=1, **line_args): x = self._getvar(xvar) y = self._getvar(yvar) z = self._getvar(zvar) if bounds: lb, ub = bounds x = x.loc[lb:ub] y = y.loc[lb:ub] z = z.loc[lb:ub] lines = {} ax_with_title = set() for scenario, color in self.colors.items(): if skip(scenario): continue x_vals = x[scenario] y_vals = y[scenario] z_vals = z[scenario] if halflife is not None: x_vals = x_vals.ewm(halflife=halflife, ignore_na=True).mean() y_vals = y_vals.ewm(halflife=halflife, ignore_na=True).mean() z_vals = z_vals.ewm(halflife=halflife, ignore_na=True).mean() if x_vals.index.nlevels > 1: raise NotImplementedError() else: x_vals = x_vals.values[::thin] xerr = None if y.index.nlevels > 1: raise NotImplementedError() else: y_vals = y_vals.values[::thin] yerr = None if z.index.nlevels > 1: raise NotImplementedError() else: z_vals = z_vals.values[::thin] zerr = None ax = AX[scenario] if hasattr(AX, 'items') else AX defaults = {'marker': 'o', 'linestyle': 'None', 'color' : color, 'label' : ', '.join([str(s) for s in scenario]) } line_args_ = self._parse_color({**defaults, **line_args}) lines[scenario] = ax.plot(x_vals, y_vals, z_vals, **line_args_) if title is not None and ax not in ax_with_title: ax.set_title(title) ax_with_title.add(ax) self._set_axis_lim(ax, xlim, ylim, zlim) return lines def plot_linear_average(self, ax, xvar, yvar, bounds=None, skip=lambda scenario: False, title=None, xlim=None, ylim=None, xspan=None, **line_args): if title is not None: ax.set_title(title) x = self._getvar(xvar) y = self._getvar(yvar) to_plot = [c for c in x.columns if not skip(c)] if not to_plot: return x = x.loc[:,to_plot] y = y.loc[:,to_plot] if bounds: lb, ub = bounds x = x.loc[lb:ub] y = y.loc[lb:ub] x = x.values.flatten() y = y.values.flatten() idx = np.isnan(x) | np.isnan(y) idx = ~idx x = x[idx] y = y[idx] A = np.vstack([x, np.ones(len(x))]).T m, c = np.linalg.lstsq(A, y, rcond=None)[0] xmax = max(x) if xspan is None else xspan[1] xmin = min(x) if xspan is None else xspan[0] x_ =
np.linspace(xmin, xmax)
numpy.linspace
import numpy as np from scipy.special import ellipe, ellipk def curl(quantity, spacing=(1, 1, 1), mesh=None, vector_grad=None): r""" Return 3D curl. """ if not vector_grad: dx, dy, dz = spacing if mesh: dx = mesh[0][0, 1, 0] - mesh[0][0, 0, 0] dy = mesh[1][1, 0, 0] - mesh[1][0, 0, 0] dz = mesh[2][0, 0, 1] - mesh[2][0, 0, 0] dx_dy = np.gradient(quantity[0], axis=0)/dy dx_dz = np.gradient(quantity[0], axis=2)/dz dy_dx = np.gradient(quantity[1], axis=1)/dx dy_dz = np.gradient(quantity[1], axis=2)/dz dz_dx = np.gradient(quantity[2], axis=1)/dx dz_dy = np.gradient(quantity[2], axis=0)/dy else: dx_dy = vector_grad[0][1] dx_dz = vector_grad[0][2] dy_dx = vector_grad[1][0] dy_dz = vector_grad[1][2] dz_dx = vector_grad[2][0] dz_dy = vector_grad[2][1] curl_x = dz_dy - dy_dz curl_y = dx_dz - dz_dx curl_z = dy_dx - dx_dy return curl_x, curl_y, curl_z def dot_product(vector1, vector2): r""" Return dot product. """ assert vector1[0].shape == vector2[0].shape, 'Vector fields do not have the same dimensions.' assert 4 > len(vector1) > 1, 'Vectors should have at least 2 no more then 3 components.' if len(vector1) == 3: return vector1[0]*vector2[0] + vector1[1]*vector2[1] + vector1[2]*vector2[2] else: return vector1[0]*vector2[0] + vector1[1]*vector2[1] def gradient(scalar, dx=1, dy=1, dz=1, mesh=None): if mesh: dx = mesh[0][0, 1, 0] - mesh[0][0, 0, 0] dy = mesh[1][1, 0, 0] - mesh[1][0, 0, 0] dz = mesh[2][0, 0, 1] - mesh[2][0, 0, 0] grad = np.gradient(scalar, dy, dx, dz) return grad[1], grad[0], grad[2] def magnitude(vector): r""" Return magnitude of vector. """ assert 4 > len(vector) > 1, 'Vectors should have at least 2 no more then 3 components.' if len(vector) == 3: return np.sqrt(vector[0]**2 + vector[1]**2 + vector[2]**2) else: return np.sqrt(vector[0]**2 + vector[1]**2) def field_from_wire(limits=(-5, 5, -5, 5, -5, 5), points=(11, 11, 11), center=(0, 0), mu_0I=1, a=1, wire_along_axis='y'): r""" """ x, y, z = np.meshgrid(np.linspace(limits[0], limits[1], points[0]), np.linspace(limits[2], limits[3], points[1]), np.linspace(limits[4], limits[5], points[2])) mesh = [x, y, z] B_x = np.zeros(x.shape) B_y = np.zeros(x.shape) B_z = np.zeros(x.shape) A_x = np.zeros(x.shape) A_y = np.zeros(x.shape) A_z = np.zeros(x.shape) if wire_along_axis == 'y': theta = np.arctan2(z - center[1], x - center[0]) r = np.sqrt((x - center[0])**2. + (z - center[1])**2.) elif wire_along_axis == 'z': theta = np.arctan2(y - center[1], x - center[0]) r = np.sqrt((x - center[0])**2. + (y - center[1])**2.) inside = np.where(r <= a) outside = np.where(r > a) inside = (inside[0], inside[1], inside[2]) outside = (outside[0], outside[1], outside[2]) if wire_along_axis == 'y': B_x[inside] = -mu_0I*r[inside]/(2.*np.pi*a**2)*np.sin(theta[inside]) B_z[inside] = mu_0I*r[inside]/(2.*np.pi*a**2)*np.cos(theta[inside]) B_x[outside] = -mu_0I/(2.*np.pi*r[outside])*np.sin(theta[outside]) B_z[outside] = mu_0I/(2.*np.pi*r[outside])*np.cos(theta[outside]) A_y[inside] = -mu_0I/(4.*np.pi*a**2)*(r[inside]**2 - a**2) A_y[outside] = -mu_0I/(2.*np.pi)*np.log(r[outside]/a) elif wire_along_axis == 'z': B_x[inside] = -mu_0I*r[inside]/(2.*np.pi*a**2)*np.sin(theta[inside]) B_y[inside] = mu_0I*r[inside]/(2.*np.pi*a**2)*np.cos(theta[inside]) B_x[outside] = -mu_0I/(2.*np.pi*r[outside])*np.sin(theta[outside]) B_y[outside] = mu_0I/(2.*np.pi*r[outside])*np.cos(theta[outside]) A_z[inside] = -mu_0I/(4.*np.pi*a**2)*(r[inside]**2 - a**2) A_z[outside] = -mu_0I/(2.*np.pi)*np.log(r[outside]/a) return mesh, A_x, A_y, A_z, B_x, B_y, B_z def field_from_loop(limits=(-5, 5, -5, 5, -5, 5), points=(11, 11, 11), mu_0I=1., a=1.): r""" """ x, y, z = np.meshgrid(np.linspace(limits[0], limits[1], points[0]), np.linspace(limits[2], limits[3], points[1]), np.linspace(limits[4], limits[5], points[2])) mesh = [x, y, z] r = np.sqrt(x**2 + y**2) phi = np.arctan2(y, x) B_x = np.zeros(x.shape) B_y = np.zeros(x.shape) B_z = np.zeros(x.shape) A_x = np.zeros(x.shape) A_y =
np.zeros(x.shape)
numpy.zeros
import anndata import multiprocessing as mp import numpy as np import os import pandas as pd import pytest import rpy2.robjects.packages import rpy2.robjects.pandas2ri import scipy.sparse as ss import scipy.stats as st import scmodes import scmodes.benchmark.gof from .fixtures import test_data ashr = rpy2.robjects.packages.importr('ashr') rpy2.robjects.pandas2ri.activate() def test__gof(): np.random.seed(0) mu = 10 px = st.poisson(mu=mu) x = px.rvs(size=100) d, p = scmodes.benchmark.gof._gof(x, cdf=px.cdf, pmf=px.pmf) assert d >= 0 assert 0 <= p <= 1 def test__rpp(): np.random.seed(0) mu = 10 px = st.poisson(mu=mu) x = px.rvs(size=100) F = px.cdf(x - 1) f = px.pmf(x) vals = scmodes.benchmark.gof._rpp(F, f) assert vals.shape == x.shape def test_gof_point(test_data): x = test_data res = scmodes.benchmark.gof_point(x) assert res.shape[0] == x.shape[1] assert np.isfinite(res['stat']).all() assert np.isfinite(res['p']).all() def test_gamma_cdf(): np.random.seed(0) x = st.nbinom(n=10, p=.1).rvs(size=100) Fx = scmodes.benchmark.gof._zig_cdf(x, size=1, log_mu=-5, log_phi=-1) assert Fx.shape == x.shape assert np.isfinite(Fx).all() assert (Fx >= 0).all() assert (Fx <= 1).all() def test_zig_cdf(): np.random.seed(0) x = st.nbinom(n=10, p=.1).rvs(size=100) Fx = scmodes.benchmark.gof._zig_cdf(x, size=1, log_mu=-5, log_phi=-1, logodds=-3) assert Fx.shape == x.shape assert (Fx >= 0).all() assert (Fx <= 1).all() def test_zig_pmf_cdf(): x = np.arange(50) import scmodes.benchmark.gof size = 1000 log_mu=-5 log_phi=-1 logodds=-1 Fx = scmodes.benchmark.gof._zig_cdf(x, size=size, log_mu=log_mu, log_phi=log_phi, logodds=logodds) Fx_1 = scmodes.benchmark.gof._zig_cdf(x - 1, size=size, log_mu=log_mu, log_phi=log_phi, logodds=logodds) fx = scmodes.benchmark.gof._zig_pmf(x, size=size, log_mu=log_mu, log_phi=log_phi, logodds=logodds) assert np.isclose(Fx - Fx_1, fx).all() def test_gof_gamma(test_data): x = test_data res = scmodes.benchmark.gof_gamma(x) assert res.shape[0] == x.shape[1] assert np.isfinite(res['stat']).all() assert np.isfinite(res['p']).all() def test_gof_gamma_size(test_data): x = test_data s = 1 + np.median(x, axis=1).reshape(-1, 1) res = scmodes.benchmark.gof_gamma(x, s=s, lr=1e-3) assert res.shape[0] == x.shape[1] assert np.isfinite(res['stat']).all() assert np.isfinite(res['p']).all() def test_gof_gamma_adata(test_data): x = test_data y = anndata.AnnData(x.values, obs=pd.DataFrame(x.index), var=pd.DataFrame(x.columns)) res = scmodes.benchmark.gof_gamma(y, lr=1e-3) assert res.shape[0] == x.shape[1] assert np.isfinite(res['stat']).all() assert np.isfinite(res['p']).all() assert (res.index == x.columns).all() def test_gof_gamma_adata_key(test_data): x = test_data y = anndata.AnnData(x.values, obs=pd.DataFrame(x.index), var=pd.DataFrame(x.columns)) res = scmodes.benchmark.gof_gamma(y, key=0, lr=1e-3) assert res.shape[0] == x.shape[1] assert np.isfinite(res['stat']).all() assert np.isfinite(res['p']).all() assert (res.index == x.columns).all() def test_gof_zig(test_data): x = test_data res = scmodes.benchmark.gof_zig(x) assert res.shape[0] == x.shape[1] assert np.isfinite(res['stat']).all() assert np.isfinite(res['p']).all() def test_gof_zig_size(test_data): x = test_data s = 1 + np.median(x, axis=1).reshape(-1, 1) res = scmodes.benchmark.gof_zig(x, s=s, lr=1e-3) assert res.shape[0] == x.shape[1] assert np.isfinite(res['stat']).all() assert np.isfinite(res['p']).all() def test_gof_zig_adata(test_data): x = test_data y = anndata.AnnData(x.values, obs=pd.DataFrame(x.index), var=pd.DataFrame(x.columns)) res = scmodes.benchmark.gof_zig(y, lr=1e-3) assert res.shape[0] == x.shape[1] assert np.isfinite(res['stat']).all() assert np.isfinite(res['p']).all() assert (res.index == x.columns).all() def test_gof_zig_adata_key(test_data): x = test_data y = anndata.AnnData(x.values, obs=pd.DataFrame(x.index), var=pd.DataFrame(x.columns)) res = scmodes.benchmark.gof_zig(y, key=0, lr=1e-3) assert res.shape[0] == x.shape[1] assert np.isfinite(res['stat']).all() assert np.isfinite(res['p']).all() assert (res.index == x.columns).all() def test__ash_pmf(test_data): x = test_data gene = 'ENSG00000116251' xj = x[gene] size = x.sum(axis=1) lam = xj / size fit = ashr.ash_workhorse( # these are ignored by ash pd.Series(np.zeros(xj.shape)), 1, outputlevel=pd.Series(['fitted_g', 'data']), # numpy2ri doesn't DTRT, so we need to use pandas lik=ashr.lik_pois(y=xj, scale=size, link='identity'), mixsd=pd.Series(np.geomspace(lam.min() + 1e-8, lam.max(), 25)), mode=pd.Series([lam.min(), lam.max()])) res = scmodes.benchmark.gof._ash_pmf(xj, fit) assert res.shape == xj.shape assert np.isfinite(res).all() assert (res >= 0).all() assert (res <= 1).all() def test__ash_cdf(test_data): x = test_data gene = 'ENSG00000116251' xj = x[gene] size = x.sum(axis=1) lam = xj / size fit = ashr.ash_workhorse( # these are ignored by ash pd.Series(np.zeros(xj.shape)), 1, outputlevel=pd.Series(['fitted_g', 'data']), # numpy2ri doesn't DTRT, so we need to use pandas lik=ashr.lik_pois(y=xj, scale=size, link='identity'), mixsd=pd.Series(np.geomspace(lam.min() + 1e-8, lam.max(), 25)), mode=pd.Series([lam.min(), lam.max()])) res = scmodes.benchmark.gof._ash_cdf(xj, fit, s=size) assert np.isfinite(res).all() assert (res >= 0).all() assert (res <= 1).all() def test__ash_cdf_pmf(test_data): x = test_data gene = 'ENSG00000116251' xj = x[gene] size = x.sum(axis=1) lam = xj / size fit = ashr.ash_workhorse( # these are ignored by ash pd.Series(
np.zeros(xj.shape)
numpy.zeros
from numpy.testing import (assert_, assert_allclose, assert_raises, assert_equal) import numpy as np from scipy.optimize._lsq.common import ( step_size_to_bound, find_active_constraints, make_strictly_feasible, CL_scaling_vector, intersect_trust_region, build_quadratic_1d, minimize_quadratic_1d, evaluate_quadratic, reflective_transformation) class TestBounds(object): def test_step_size_to_bounds(self): lb = np.array([-1.0, 2.5, 10.0]) ub = np.array([1.0, 5.0, 100.0]) x = np.array([0.0, 2.5, 12.0]) s = np.array([0.1, 0.0, 0.0]) step, hits = step_size_to_bound(x, s, lb, ub) assert_equal(step, 10) assert_equal(hits, [1, 0, 0]) s = np.array([0.01, 0.05, -1.0]) step, hits = step_size_to_bound(x, s, lb, ub) assert_equal(step, 2) assert_equal(hits, [0, 0, -1]) s = np.array([10.0, -0.0001, 100.0]) step, hits = step_size_to_bound(x, s, lb, ub) assert_equal(step, np.array(-0)) assert_equal(hits, [0, -1, 0]) s = np.array([1.0, 0.5, -2.0]) step, hits = step_size_to_bound(x, s, lb, ub) assert_equal(step, 1.0) assert_equal(hits, [1, 0, -1]) s = np.zeros(3) step, hits = step_size_to_bound(x, s, lb, ub) assert_equal(step, np.inf) assert_equal(hits, [0, 0, 0]) def test_find_active_constraints(self): lb = np.array([0.0, -10.0, 1.0]) ub = np.array([1.0, 0.0, 100.0]) x = np.array([0.5, -5.0, 2.0]) active = find_active_constraints(x, lb, ub) assert_equal(active, [0, 0, 0]) x = np.array([0.0, 0.0, 10.0]) active = find_active_constraints(x, lb, ub) assert_equal(active, [-1, 1, 0]) active = find_active_constraints(x, lb, ub, rtol=0) assert_equal(active, [-1, 1, 0]) x = np.array([1e-9, -1e-8, 100 - 1e-9]) active = find_active_constraints(x, lb, ub) assert_equal(active, [0, 0, 1]) active = find_active_constraints(x, lb, ub, rtol=1.5e-9) assert_equal(active, [-1, 0, 1]) lb = np.array([1.0, -np.inf, -np.inf]) ub = np.array([np.inf, 10.0, np.inf]) x = np.ones(3) active = find_active_constraints(x, lb, ub) assert_equal(active, [-1, 0, 0]) # Handles out-of-bound cases. x = np.array([0.0, 11.0, 0.0]) active = find_active_constraints(x, lb, ub) assert_equal(active, [-1, 1, 0]) active = find_active_constraints(x, lb, ub, rtol=0) assert_equal(active, [-1, 1, 0]) def test_make_strictly_feasible(self): lb = np.array([-0.5, -0.8, 2.0]) ub = np.array([0.8, 1.0, 3.0]) x = np.array([-0.5, 0.0, 2 + 1e-10]) x_new = make_strictly_feasible(x, lb, ub, rstep=0) assert_(x_new[0] > -0.5) assert_equal(x_new[1:], x[1:]) x_new = make_strictly_feasible(x, lb, ub, rstep=1e-4) assert_equal(x_new, [-0.5 + 1e-4, 0.0, 2 * (1 + 1e-4)]) x = np.array([-0.5, -1, 3.1]) x_new = make_strictly_feasible(x, lb, ub) assert_(np.all((x_new >= lb) & (x_new <= ub))) x_new = make_strictly_feasible(x, lb, ub, rstep=0) assert_(np.all((x_new >= lb) & (x_new <= ub))) lb = np.array([-1, 100.0]) ub = np.array([1, 100.0 + 1e-10]) x = np.array([0, 100.0]) x_new = make_strictly_feasible(x, lb, ub, rstep=1e-8) assert_equal(x_new, [0, 100.0 + 0.5e-10]) def test_scaling_vector(self): lb = np.array([-np.inf, -5.0, 1.0, -np.inf]) ub = np.array([1.0, np.inf, 10.0, np.inf]) x = np.array([0.5, 2.0, 5.0, 0.0]) g = np.array([1.0, 0.1, -10.0, 0.0]) v, dv = CL_scaling_vector(x, g, lb, ub) assert_equal(v, [1.0, 7.0, 5.0, 1.0]) assert_equal(dv, [0.0, 1.0, -1.0, 0.0]) class TestQuadraticFunction(object): def setup_method(self): self.J = np.array([ [0.1, 0.2], [-1.0, 1.0], [0.5, 0.2]]) self.g = np.array([0.8, -2.0]) self.diag = np.array([1.0, 2.0]) def test_build_quadratic_1d(self): s = np.zeros(2) a, b = build_quadratic_1d(self.J, self.g, s) assert_equal(a, 0) assert_equal(b, 0) a, b = build_quadratic_1d(self.J, self.g, s, diag=self.diag) assert_equal(a, 0) assert_equal(b, 0) s = np.array([1.0, -1.0]) a, b = build_quadratic_1d(self.J, self.g, s) assert_equal(a, 2.05) assert_equal(b, 2.8) a, b = build_quadratic_1d(self.J, self.g, s, diag=self.diag) assert_equal(a, 3.55) assert_equal(b, 2.8) s0 = np.array([0.5, 0.5]) a, b, c = build_quadratic_1d(self.J, self.g, s, diag=self.diag, s0=s0) assert_equal(a, 3.55) assert_allclose(b, 2.39) assert_allclose(c, -0.1525) def test_minimize_quadratic_1d(self): a = 5 b = -1 t, y = minimize_quadratic_1d(a, b, 1, 2) assert_equal(t, 1) assert_equal(y, a * t**2 + b * t) t, y = minimize_quadratic_1d(a, b, -2, -1) assert_equal(t, -1) assert_equal(y, a * t**2 + b * t) t, y = minimize_quadratic_1d(a, b, -1, 1) assert_equal(t, 0.1) assert_equal(y, a * t**2 + b * t) c = 10 t, y = minimize_quadratic_1d(a, b, -1, 1, c=c) assert_equal(t, 0.1) assert_equal(y, a * t**2 + b * t + c) def test_evaluate_quadratic(self): s =
np.array([1.0, -1.0])
numpy.array
""" Utility functions for EO charge transfer inefficiency tests """ import numpy as np __all__ = ["DetectorResponse"] def _fwcSolve(f1Pars, f2Pars, g=0.1): """ The solution (provided by e2v) for the flux corresponding to the full-well capacity described in LCA-10103-A. This is simply the flux at which the quadratic fit to the data in the vicinity of full well lies below the linear extrapolation of the detector response from lower flux levels by a fraction g. """ a, b, c = f2Pars d, f = f1Pars x = (-np.sqrt((b + d*g - d)**2 - 4.*a*(c + f*g - f)) - b - d*g + d)/2./a return x class DetectorResponse: """Class to extract and cache parameters from linearity analysis of flat-pair data""" def __init__(self, flux): """C'tor from set of flux values Parameters ---------- flux : `np.array` The flux values from the photodiode (a.u.) """ self._flux = flux self._index = np.argsort(self._flux) def fullWell(self, Ne, maxNonLinearity=0.02, fracOffset=0.1, fitRange=(1e2, 5e4)): """Compute and return estimate of the full well charge in e- Parameters ---------- Ne : `numpy.array` Mean signals in electrons maxNonLinearity : `float` Maximum deviation from linearity to use define start of turn off fracOffset : `float` Fractional deviation for points used to fit turn off curve fitRange : `tuple` [`float`] Range of values of Ne to use for fitting linearity Returns ------- fullWellEst : `float` Full well estimate f1 : `np.poly1d` Fitted linearity curve Notes ----- The solution (provided by e2v) for the flux corresponding to the full-well capacity described in LCA-10103-A. This is simply the flux at which the quadratic fit to the data in the vicinity of full well lies below the linear extrapolation of the detector response from lower flux levels by a fraction `fracOffset`. """ results = self.linearity(Ne, fitRange=fitRange) _, f1Pars, Ne, flux = results[:4] f1 =
np.poly1d(f1Pars)
numpy.poly1d
import numpy as np import gym from gym import spaces from numpy.random import default_rng import pickle import os import math import matplotlib.pyplot as plt from PIL import Image from gym_flp import rewards from IPython.display import display, clear_output import anytree from anytree import Node, RenderTree, PreOrderIter, LevelOrderIter, LevelOrderGroupIter ''' v0.0.3 Significant changes: 08.09.2020: - Dicrete option removed from spaces; only Box allowed - Classes for quadtratic set covering and mixed integer programming (-ish) added - Episodic tasks: no more terminal states (exception: max. no. of trials reached) 12.10.2020: - mip added - fbs added ''' class qapEnv(gym.Env): metadata = {'render.modes': ['rgb_array', 'human']} def __init__(self, mode=None, instance=None): __location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) self.DistanceMatrices, self.FlowMatrices = pickle.load(open(os.path.join(__location__,'discrete', 'qap_matrices.pkl'), 'rb')) self.transport_intensity = None self.instance = instance self.mode = mode while not (self.instance in self.DistanceMatrices.keys() or self.instance in self.FlowMatrices.keys() or self.instance in ['Neos-n6', 'Neos-n7', 'Brewery']): print('Available Problem Sets:', self.DistanceMatrices.keys()) self.instance = input('Pick a problem:').strip() self.D = self.DistanceMatrices[self.instance] self.F = self.FlowMatrices[self.instance] # Determine problem size relevant for much stuff in here: self.n = len(self.D[0]) # Action space has two option: # 1) Define as Box with shape (1, 2) and allow values to range from 1 through self.n # 2) Define as Discrete with x = 1+((n^2-n)/2) actions (one half of matrix + 1 value from diagonal) --> Omit "+1" to obtain range from 0 to x! # self.action_space = spaces.Box(low=-1, high=6, shape=(1,2), dtype=np.int) # Doubles complexity of the problem as it allows the identical action (1,2) and (2,1) self.action_space = spaces.Discrete(int((self.n**2-self.n)*0.5)+1) # If you are using images as input, the input values must be in [0, 255] as the observation is normalized (dividing by 255 to have values in [0, 1]) when using CNN policies. if self.mode == "rgb_array": self.observation_space = spaces.Box(low = 0, high = 255, shape=(1, self.n, 3), dtype = np.uint8) # Image representation elif self.mode == 'human': self.observation_space = spaces.Box(low=1, high = self.n, shape=(self.n,), dtype=np.float32) self.states = {} # Create an empty dictonary where states and their respective reward will be stored for future reference self.actions = self.pairwiseExchange(self.n) # Initialize Environment with empty state and action self.action = None self.state = None self.internal_state = None #Initialize moving target to incredibly high value. To be updated if reward obtained is smaller. self.movingTargetReward = np.inf self.MHC = rewards.mhc.MHC() # Create an instance of class MHC in module mhc.py from package rewards def reset(self): state = default_rng().choice(range(1,self.n+1), size=self.n, replace=False) #MHC, self.TM = self.MHC.compute(self.D, self.F, state) self.internal_state = state.copy() return state def step(self, action): # Create new State based on action fromState = self.internal_state.copy() swap = self.actions[action] fromState[swap[0]-1], fromState[swap[1]-1] = fromState[swap[1]-1], fromState[swap[0]-1] newState = fromState.copy() #MHC, self.TM = self.MHC.compute(self.D, self.F, current_permutation) MHC, self.TM = self.MHC.compute(self.D, self.F, newState) if self.mode == 'human': self.states[tuple(fromState)] = MHC if self.movingTargetReward == np.inf: self.movingTargetReward = MHC #reward = self.movingTargetReward - MHC reward = -1 if MHC > self.movingTargetReward else 10 self.movingTargetReward = MHC if MHC < self.movingTargetReward else self.movingTargetReward if self.mode == "rgb_array": rgb = np.zeros((1,self.n,3), dtype=np.uint8) sources = np.sum(self.TM, axis = 1) sinks = np.sum(self.TM, axis = 0) R = np.array((fromState-np.min(fromState))/(np.max(fromState)-np.min(fromState))*255).astype(int) G = np.array((sources-np.min(sources))/(np.max(sources)-np.min(sources))*255).astype(int) B = np.array((sinks-np.min(sinks))/(np.max(sinks)-np.min(sinks))*255).astype(int) for i, s in enumerate(fromState): rgb[0:1, i] = [R[s-1], G[s-1], B[s-1]] newState = np.array(rgb) self.state = newState.copy() self.internal_state = fromState.copy() return newState, reward, False, {} def render(self, mode=None): if self.mode == "human": SCALE = 1 # Scale size of pixels for displayability img_h, img_w = SCALE, (len(self.internal_state))*SCALE data = np.zeros((img_h, img_w, 3), dtype=np.uint8) sources = np.sum(self.TM, axis = 1) sinks = np.sum(self.TM, axis = 0) R = np.array((self.internal_state-np.min(self.internal_state))/(np.max(self.internal_state)-np.min(self.internal_state))*255).astype(int) G = np.array((sources-np.min(sources))/(np.max(sources)-np.min(sources))*255).astype(int) B = np.array((sinks-np.min(sinks))/(np.max(sinks)-np.min(sinks))*255).astype(int) for i, s in enumerate(self.internal_state): data[0*SCALE:1*SCALE, i*SCALE:(i+1)*SCALE] = [R[s-1], G[s-1], B[s-1]] img = Image.fromarray(data, 'RGB') if self.mode == 'rgb_array': img = Image.fromarray(self.state, 'RGB') plt.imshow(img) plt.axis('off') plt.show() return img def close(self): pass def pairwiseExchange(self, x): actions = [(i,j) for i in range(1,x) for j in range(i+1,x+1) if not i==j] actions.append((1,1)) return actions class fbsEnv(gym.Env): metadata = {'render.modes': ['rgb_array', 'human']} def __init__(self, mode=None, instance = None): __location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) self.problems, self.FlowMatrices, self.sizes, self.LayoutWidths, self.LayoutLengths = pickle.load(open(os.path.join(__location__,'continual', 'cont_instances.pkl'), 'rb')) self.mode = mode self.instance = instance while not (self.instance in self.FlowMatrices.keys() or self.instance in ['Brewery']): print('Available Problem Sets:', self.FlowMatrices.keys()) self.instance = input('Pick a problem:').strip() self.F = self.FlowMatrices[self.instance] self.n = self.problems[self.instance] self.AreaData = self.sizes[self.instance] # Obtain size data: FBS needs a length and area self.beta, self.l, self.w, self.a, self.min_side_length = getAreaData(self.AreaData) #Investigate available area data and compute missing values if needed ''' Nomenclature: W --> Width of Plant (y coordinate) L --> Length of Plant (x coordinate) w --> Width of facility/bay (x coordinate) l --> Length of facility/bay (y coordinate) A --> Area of Plant a --> Area of facility Point of origin analoguous to numpy indexing (top left corner of plant) beta --> aspect ratios (as alpha is reserved for learning rate) ''' #if self.l is None or self.w is None: # self.l = np.random.randint(max(self.min_side_length, np.min(self.a)/self.min_side_length), max(self.min_side_length, np.min(self.a)/self.min_side_length), size=(self.n,)) # self.l = np.sqrt(self.A/self.aspect_ratio) # self.w = np.round(self.a/self.l) # Check if there are Layout Dimensions available, if not provide enough (sqrt(a)*1.5) if self.instance in self.LayoutWidths.keys() and self.instance in self.LayoutLengths.keys(): self.L = int(self.LayoutLengths[self.instance]) # We need both values to be integers for converting into image self.W = int(self.LayoutWidths[self.instance]) else: self.A = np.sum(self.a) # Design a squared plant layout self.L = int(round(math.sqrt(self.A),0)) # We want the plant dimensions to be integers to fit them into an image self.W = self.L # Design a layout with l = 1,5 * w #self.L = divisor(int(self.A)) #self.W = self.A/self.L # These values need to be set manually, e.g. acc. to data from literature. Following Eq. 1 in Ulutas & Kulturel-Konak (2012), the minimum side length can be determined by assuming the smallest facility will occupy alone. self.aspect_ratio = int(max(self.beta)) if not self.beta is None else 1 self.min_length = np.min(self.a) / self.L self.min_width = np.min(self.a) / self.W # We define minimum side lengths to be 1 in order to be displayable in array self.min_length = 1 self.min_width = 1 self.action_space = spaces.Discrete(5) #Taken from doi:10.1016/j.engappai.2020.103697 self.actions = {0: 'Randomize', 1: 'Bit Swap', 2: 'Bay Exchange', 3: 'Inverse', 4: 'Idle'} #self.state_space = spaces.Box(low=1, high = self.n, shape=(self.n,), dtype=np.int) self.bay_space = spaces.Box(low=0, high = 1, shape=(self.n,), dtype=np.int) # binary vector indicating bay breaks (i = 1 means last facility in bay) self.state = None self.permutation = None # Permutation of all n facilities, read from top to bottom self.bay = None self.done = False self.MHC = rewards.mhc.MHC() if self.mode == "rgb_array": self.observation_space = spaces.Box(low = 0, high = 255, shape= (self.W, self.L,3), dtype = np.uint8) # Image representation elif self.mode == "human": observation_low = np.tile(np.array([0,0,self.min_length,self.min_width],dtype=int), self.n) observation_high = np.tile(np.array([self.W, self.L, self.W, self.L], dtype=int), self.n) self.observation_space = spaces.Box(low=observation_low, high=observation_high, dtype = int) # Vector representation of coordinates else: print("Nothing correct selected") def reset(self): # 1. Get a random permutation and bays self.permutation, self.bay = self.sampler() # 2. Last position in bay break vector has to be 1 by default. self.bay[-1] = 1 self.fac_x, self.fac_y, self.fac_b, self.fac_h = self.getCoordinates() self.D = getDistances(self.fac_x, self.fac_y) reward, self.TM = self.MHC.compute(self.D, self.F, self.permutation[:]) self.state = self.constructState(self.fac_x, self.fac_y, self.fac_b, self.fac_h, self.n) return self.state def constructState(self, x, y, l, w, n): # Construct state state_prelim = np.zeros((4*n,), dtype=float) state_prelim[0::4] = y state_prelim[1::4] = x state_prelim[2::4] = w state_prelim[3::4] = l if self.mode == "human": self.state = np.array(state_prelim) elif self.mode == "rgb_array": self.state = self.ConvertCoordinatesToState(state_prelim) return self.state[:] def ConvertCoordinatesToState(self, state_prelim): data = np.zeros((self.observation_space.shape)) if self.mode == 'rgb_array' else np.zeros((self.W, self.L, 3),dtype=np.uint8) sources = np.sum(self.TM, axis = 1) sinks = np.sum(self.TM, axis = 0) R = np.array((self.permutation-np.min(self.permutation))/(np.max(self.permutation)-np.min(self.permutation))*255).astype(int) G = np.array((sources-np.min(sources))/(np.max(sources)-np.min(sources))*255).astype(int) B = np.array((sinks-np.min(sinks))/(np.max(sinks)-np.min(sinks))*255).astype(int) for x, p in enumerate(self.permutation): x_from = state_prelim[4*x+1] -0.5 * state_prelim[4*x+3] y_from = state_prelim[4*x+0] -0.5 * state_prelim[4*x+2] x_to = state_prelim[4*x+1] + 0.5 * state_prelim[4*x+3] y_to = state_prelim[4*x+0] + 0.5 * state_prelim[4*x+2] data[int(y_from):int(y_to), int(x_from):int(x_to)] = [R[p-1], G[p-1], B[p-1]] return np.array(data, dtype=np.uint8) def sampler(self): return default_rng().choice(range(1,self.n+1), size=self.n, replace=False), self.bay_space.sample() def getCoordinates(self): facilities = np.where(self.bay==1)[0] #Read all positions with a bay break bays = np.split(self.permutation, facilities[:-1]+1) lengths = np.zeros((len(self.permutation,))) widths = np.zeros((len(self.permutation,))) fac_x = np.zeros((len(self.permutation,))) fac_y = np.zeros((len(self.permutation,))) x = 0 start = 0 for b in bays: #Get the facilities that are located in the bay areas = self.a[b-1] #Get the area associated with the facilities end = start + len(areas) lengths[start:end] = np.sum(areas)/self.W #Calculate all facility widhts in bay acc. to Eq. (1) in https://doi.org/10.1016/j.eswa.2011.11.046 widths[start:end] = areas/lengths[start:end] fac_x[start:end] = lengths[start:end] * 0.5 + x x += np.sum(areas)/self.W y = np.ones(len(b)) ll = 0 for idx, l in enumerate(widths[start:end]): y[idx] = ll + 0.5*l ll += l fac_y[start:end] = y start = end return fac_x, fac_y, lengths, widths def step(self, action): a = self.actions[action] #k = np.count_nonzero(self.bay) fromState = np.array(self.permutation) # Get lists with a bay positions and facilities in each bay facilities = np.where(self.bay==1)[0] bay_breaks = np.split(self.bay, facilities[:-1]+1) # Load indiv. facilities into bay acc. to breaks; omit break on last position to avoid empty array in list. bays = np.split(self.permutation, facilities[:-1]+1) if a == 'Randomize': # Two vector elements randomly chosen are exchanged. Bay vector remains untouched. k = default_rng().choice(range(len(self.permutation-1)), size=1, replace=False) l = default_rng().choice(range(len(self.permutation-1)), size=1, replace=False) fromState[k], fromState[l] = fromState[l], fromState[k] self.permutation = np.array(fromState) elif a == 'Bit Swap': #One element randomly selected flips its value (1 to 0 or 0 to 1) j = default_rng().choice(range(len(self.bay-1)), size=1, replace=False) temp_bay = np.array(self.bay) # Make a copy of bay temp_bay[j] = 1 if temp_bay[j] == 0 else 0 self.bay = np.array(temp_bay) elif a == 'Bay Exchange': #Two bays are randomly selected and exchange facilities contained in them o = int(default_rng().choice(range(len(bays)), size=1, replace=False)) p = int(default_rng().choice(range(len(bays)), size=1, replace=False)) while p==o: # Make sure bays are not the same p = int(default_rng().choice(range(len(bays)), size=1, replace=False)) # Swap bays and break points accordingly: bays[o], bays[p] = bays[p], bays[o] bay_breaks[o], bay_breaks[p] = bay_breaks[p], bay_breaks[o] new_bay = np.concatenate(bay_breaks) new_state = np.concatenate(bays) # Make sure state is saved as copy self.permutation = np.array(new_state) self.bay = np.array(new_bay) elif a == 'Inverse': #Facilities present in a certain bay randomly chosen are inverted. q = default_rng().choice(range(len(bays))) bays[q] = np.flip(bays[q]) new_bay = np.concatenate(bay_breaks) new_state = np.concatenate(bays) # Make sure state is saved as copy self.permutation = np.array(new_state) self.bay = np.array(new_bay) elif a == 'Idle': pass # Keep old state self.fac_x, self.fac_y, self.fac_b, self.fac_h = self.getCoordinates() self.D = getDistances(self.fac_x, self.fac_y) reward, self.TM = self.MHC.compute(self.D, self.F, fromState) self.state = self.constructState(self.fac_x, self.fac_y, self.fac_b, self.fac_h, self.n) self.done = False #Always false for continuous task return self.state[:], reward, self.done, {} def render(self, mode= None): if self.mode== "human": # Mode 'human' needs intermediate step to convert state vector into image array data = self.ConvertCoordinatesToState(self.state[:]) img = Image.fromarray(data, 'RGB') if self.mode == "rgb_array": data = self.state[:] img = Image.fromarray(self.state, 'RGB') plt.imshow(img) plt.axis('off') plt.show() #23.02.21: Switched to data instead of img for testing video return img def close(self): pass #self.close() class ofpEnv(gym.Env): metadata = {'render.modes': ['rgb_array', 'human']} def __init__(self, mode = None, instance = None, distance = None, aspect_ratio = None, step_size = None, greenfield = None): __location__ = os.path.realpath(os.path.join(os.getcwd(), os.path.dirname(__file__))) self.problems, self.FlowMatrices, self.sizes, self.LayoutWidths, self.LayoutLengths = pickle.load(open(os.path.join(__location__,'continual', 'cont_instances.pkl'), 'rb')) self.mode = mode self.aspect_ratio = 2 if aspect_ratio is None else aspect_ratio self.step_size = 2 if step_size is None else step_size self.greenfield = greenfield self.instance = instance while not (self.instance in self.FlowMatrices.keys() or self.instance in ['Brewery']): print('Available Problem Sets:', self.FlowMatrices.keys()) self.instance = input('Pick a problem:').strip() self.F = self.FlowMatrices[self.instance] self.n = self.problems[self.instance] self.AreaData = self.sizes[self.instance] self.counter = 0 self.done = False self.pseudo_stability = 0 #If the reward has not improved in the last 200 steps, terminate the episode self.best_reward = None # Obtain size data: FBS needs a length and area self.beta, self.l, self.w, self.a, self.min_side_length = getAreaData(self.AreaData) #Investigate available area data and compute missing values if needed ''' Nomenclature: W --> Width of Plant (x coordinate) L --> Length of Plant (y coordinate) w --> Width of facility/bay (x coordinate) l --> Length of facility/bay (y coordinate) A --> Area of Plant a --> Area of facility Point of origin analoguous to numpy indexing (top left corner of plant) beta --> aspect ratios (as alpha is reserved for learning rate) ''' #if self.l is None or self.w is None: # self.l = np.random.randint(max(self.min_side_length, np.min(self.a)/self.min_side_length), max(self.min_side_length, np.min(self.a)/self.min_side_length), size=(self.n,)) # self.l = np.sqrt(self.A/self.aspect_ratio) # self.w = np.round(self.a/self.l) # Check if there are Layout Dimensions available, if not provide enough (sqrt(a)*1.5) if self.instance in self.LayoutWidths.keys() and self.instance in self.LayoutLengths.keys(): self.L = int(self.LayoutLengths[self.instance]) # We need both values to be integers for converting into image self.W = int(self.LayoutWidths[self.instance]) else: self.A = np.sum(self.a) # Design a squared plant layout self.L = int(round(math.sqrt(self.A),0)) # We want the plant dimensions to be integers to fit them into an image self.W = self.L if self.greenfield: self.L = 2*self.L self.W = 2*self.W # Design a layout with l = 1,5 * w #self.L = divisor(int(self.A)) #self.W = self.A/self.L # These values need to be set manually, e.g. acc. to data from literature. Following Eq. 1 in Ulutas & Kulturel-Konak (2012), the minimum side length can be determined by assuming the smallest facility will occupy alone. self.aspect_ratio = int(max(self.beta)) if not self.beta is None else self.aspect_ratio self.min_length = 1 self.min_width = 1 # 3. Define the possible actions: 5 for each box [toDo: plus 2 to manipulate sizes] + 1 idle action for all self.actions = {} for i in range(self.n): self.actions[0+(i)*5] = "up" self.actions[1+(i)*5] = "down" self.actions[2+(i)*5] = "right" self.actions[3+(i)*5] = "left" self.actions[4+(i)*5] = "rotate" self.actions[len(self.actions)] = "keep" # 4. Define actions space as Discrete Space self.action_space = spaces.Discrete(1+5*self.n) #5 actions for each facility: left, up, down, right, rotate + idle action across all # 5. Set some starting points self.reward = 0 self.state = None self.internal_state = None #Placeholder for state variable for internal manipulation in rgb_array mode if self.w is None or self.l is None: self.l = np.random.randint(self.min_side_length*self.aspect_ratio, np.min(self.a), size=(self.n, )) self.w = np.round(self.a/self.l) # 6. Set upper and lower bound for observation space # min x position can be point of origin (0,0) [coordinates map to upper left corner] # min y position can be point of origin (0,0) [coordinates map to upper left corner] # min width can be smallest area divided by its length # min lenght can be smallest width (above) multiplied by aspect ratio # max x pos can be bottom right edge of grid # max y pos can be bottpm right edge of grid if self.mode == "rgb_array": self.observation_space = spaces.Box(low = 0, high = 255, shape= (self.W, self.L,3), dtype = np.uint8) # Image representation elif self.mode == "human": #observation_low = np.tile(np.array([0,0,self.min_side_length, self.min_side_length],dtype=float), self.n) #observation_high = np.tile(np.array([self.L, self.W, max(self.l), max(self.w)], dtype=float), self.n) observation_low = np.zeros(4* self.n) observation_high = np.zeros(4* self.n) observation_low[0::4] = max(self.w) observation_low[1::4] = max(self.l) observation_low[2::4] = max(self.w) observation_low[3::4] = max(self.l) observation_high[0::4] = self.W - max(self.w) observation_high[1::4] = self.L - max(self.l) observation_high[2::4] = self.W - max(self.w) observation_high[3::4] = self.L - max(self.l) self.observation_space = spaces.Box(low=observation_low, high=observation_high, dtype = np.uint8) # Vector representation of coordinates else: print("Nothing correct selected") self.MHC = rewards.mhc.MHC() # Set Boundaries self.upper_bound = self.L- max(self.l)/2 self.lower_bound = 0 + max(self.l)/2 self.left_bound = 0 + max(self.w)/2 self.right_bound = self.W- max(self.w)/2 def reset(self): # Start with random x and y positions if self.mode == 'human': state_prelim = self.observation_space.sample() # Override length (l) and width (w) or facilities with data from instances state_prelim[2::4] = self.w state_prelim[3::4] = self.l self.D = getDistances(state_prelim[0::4], state_prelim[1::4]) self.internal_state = np.array(state_prelim) self.state = np.array(state_prelim) reward, self.TM = self.MHC.compute(self.D, self.F, np.array(range(1,self.n+1))) self.counter = 0 self.best_reward = reward self.reward = 0 elif self.mode == 'rgb_array': state_prelim = np.zeros((self.W, self.L, 3),dtype=np.uint8) x =
np.random.uniform(0, self.L, size=(self.n,))
numpy.random.uniform
import pandas as pd import numpy as np from scipy import interpolate import os, sys def pseudo_wells_model(zmin, zmax, sr, no_wells, zones={}, zones_ss={}, depth='Depth', zone_idx='Zone_idx', zone_col='Zone'): depth_log = np.arange(zmin, zmax, sr) pseudo_wells = pd.DataFrame(np.zeros((len(depth_log), no_wells))) pseudo_wells[depth] = depth_log zones_df = pd.DataFrame() zones_df[depth] = [float(i) for i in zones.values()] zones_df[zone_col] = zones.keys() pseudo_wells = pd.merge_asof(pseudo_wells, zones_df, on=depth) zone_dict = dict(zip(pseudo_wells[zone_col].unique(), [int(i) for i in range(len(pseudo_wells[zone_col].unique()))])) pseudo_wells[zone_idx] = pseudo_wells[zone_col].map(zone_dict) for zone in zones_ss.keys(): if zones_ss[zone] != 0: for well in range(no_wells): ntg = 100* (well) / (no_wells - 1) zone_list = pseudo_wells[pseudo_wells[zone_col] == zone][well].values locs = [] for i in range(zones_ss[zone]): if zones_ss[zone] > 1: locs.append(int((len(zone_list)-1) * i/(zones_ss[zone]-1))) else: locs.append(0) ones = 1 while (sum(zone_list)/len(zone_list)) < ntg/100: zone_list = 0 * zone_list disp = np.ones(ones) if zones_ss[zone] == 1: zone_list[0:ones] = disp else: for i in range(len(locs)): if i == 0: zone_list[0:ones] = disp elif i == len(locs)-1: zone_list[-ones:] = disp break else: insert = int(locs[i]-(len(disp)/2)) zone_list[insert:insert+len(disp):1] = disp ones += 1 ind = 0 for idx, row in pseudo_wells[pseudo_wells[zone_col] == zone].iterrows(): pseudo_wells.loc[row.name, well] = zone_list[ind] ind += 1 return pseudo_wells def dict_mapper(row, sand, shale, no_wells, zone_col): for i in range(no_wells): if row[i] == 0: row[i] = sand[row[zone_col]] else: row[i] = shale[row[zone_col]] return row def property_mapper(pseudo_wells, sand_density, shale_density, sand_vp, shale_vp, sand_vs, shale_vs, zone_col='Zone'): no_wells = len(pseudo_wells.columns) - 3 density = pseudo_wells.apply(dict_mapper, args=(sand_density, shale_density, no_wells, zone_col), axis=1) vp = pseudo_wells.apply(dict_mapper, args=(sand_vp, shale_vp, no_wells, zone_col), axis=1) vs = pseudo_wells.apply(dict_mapper, args=(sand_vs, shale_vs, no_wells, zone_col), axis=1) return density, vp, vs def time_model(pseudo_wells, density, vp, vs, wcs_file, skip=1, zones={}, time='Time', depth='Depth', zone_idx='Zone_idx', zone='Zone'): wcs = np.loadtxt(wcs_file, skiprows=skip) idx1 = (np.abs(np.asarray(wcs[:,0]) - pseudo_wells[depth].min())).argmin() idx2 = (np.abs(np.asarray(wcs[:,0]) - pseudo_wells[depth].max())).argmin() time_frame = np.arange(np.around(wcs[idx1,1], decimals=0), np.around(wcs[idx2,1], decimals=0), 2) depth_frame = time_frame * 0 for i in range(len(depth_frame)): idx = (np.abs(np.asarray(wcs[:,1]) - time_frame[i])).argmin() depth_frame[i] = np.around(wcs[idx,0], decimals=0) df_sampled = pd.DataFrame() df_sampled[depth] = depth_frame df_sampled[time] = time_frame dens_twt = pd.DataFrame() vp_twt = pd.DataFrame() vs_twt = pd.DataFrame() dens_twt[[time,depth]] = df_sampled[[time,depth]] vp_twt[[time,depth]] = df_sampled[[time,depth]] vs_twt[[time,depth]] = df_sampled[[time,depth]] for i, row in dens_twt.iterrows(): if i > 0: dens_ = density[(density[depth] >= dens_twt.loc[i-1, depth]) & (density[depth] < dens_twt.loc[i, depth])] vp_ = vp[(vp[depth] >= vp_twt.loc[i-1, depth]) & (vp[depth] <= vp_twt.loc[i, depth])] vs_ = vs[(vs[depth] >= vs_twt.loc[i-1, depth]) & (vs[depth] <= vs_twt.loc[i, depth])] for j in range(len(pseudo_wells.columns)-3): dens_twt.at[i, j] = dens_.mean()[j] dens_twt.at[i, zone_idx] = dens_.min()[zone_idx] dens_twt.at[i, zone] = list(zones.keys())[int(dens_.min()[zone_idx])] vp_twt.loc[i, j] = vp_.mean()[j] vp_twt.loc[i, zone_idx] = vp_.min()[zone_idx] vp_twt.loc[i, zone] = list(zones.keys())[int(vp_.min()[zone_idx])] vs_twt.loc[i, j] = vs_.mean()[j] vs_twt.loc[i, zone_idx] = vs_.min()[zone_idx] vs_twt.at[i, zone] = list(zones.keys())[int(vs_.min()[zone_idx])] dens_twt.loc[0,:] = dens_twt.loc[1,:] vp_twt.loc[0,:] = vp_twt.loc[1,:] vs_twt.loc[0,:] = vs_twt.loc[1,:] return df_sampled, dens_twt, vp_twt, vs_twt def shuey(df_sampled, vp_twt, vs_twt, dens_twt, no_wells, angles, time='Time', depth='Depth'): r0_twt = pd.DataFrame() G_twt = pd.DataFrame() F_twt = pd.DataFrame() r0_twt[[time,depth]] = df_sampled[[time,depth]] G_twt[[time,depth]] = df_sampled[[time,depth]] F_twt[[time,depth]] = df_sampled[[time,depth]] for i, row in df_sampled.iterrows(): if i > 0: for j in range(no_wells): dens_ = (dens_twt.loc[i,j] + dens_twt.loc[i-1,j]) / 2 vp_ = (vp_twt.loc[i,j] + vp_twt.loc[i-1,j]) / 2 vs_ = (vs_twt.loc[i,j] + vs_twt.loc[i-1,j]) / 2 dens_term = (dens_twt.loc[i,j] - dens_twt.loc[i-1,j]) / dens_ vp_term = (vp_twt.loc[i,j] - vp_twt.loc[i-1,j]) / vp_ vs_term = (vs_twt.loc[i,j] - vs_twt.loc[i-1,j]) / vs_ r0_twt.loc[i, j] = 0.5 * (vp_term + dens_term) G_twt.loc[i,j] = 0.5 * vp_term - 2 * (vp_twt.loc[i,j]/vs_twt.loc[i,j])**2 * (dens_term + 2 * vs_term) F_twt.loc[i,j] = 0.5 * vp_term r0_twt.loc[0,:] = r0_twt.loc[1,:] G_twt.loc[0,:] = G_twt.loc[1,:] F_twt.loc[0,:] = F_twt.loc[1,:] reflectivity = np.zeros((len(r0_twt), len(angles), no_wells)) for i in range(1, len(r0_twt)-1): for j in range(len(angles)): for k in range(no_wells): reflectivity[i-1,j,k] = r0_twt.loc[i,k] + G_twt.loc[i,k] * np.sin(np.radians(angles[j]))**2 reflectivity[i-1,j,k] += F_twt.loc[i,k]*(np.tan(np.radians(angles[j]))**2 - np.sin(np.radians(angles[j]))**2) return r0_twt, G_twt, F_twt, reflectivity def ricker(f, length=128, dt=2): length = length / 1000 dt = dt / 1000 t = np.arange(-length/2, (length-dt)/2, dt) y = (1.0 - 2.0*(np.pi**2)*(f**2)*(t**2)) * np.exp(-(np.pi**2)*(f**2)*(t**2)) return 1000*t, y def seis_convolve(reflectivity, wavelet, model, angles=None, time='Time', depth='Depth', zone_idx='Zone_idx', zone='Zone'): amplitude = 0 * reflectivity for j in range(reflectivity.shape[1]): for k in range(reflectivity.shape[2]): amplitude[:, j, k] = np.convolve(reflectivity[:,j,k], wavelet, mode='same') stack = pd.DataFrame() stack[[depth, time, zone, zone_idx]] = model[[depth, time, zone, zone_idx]] nears = pd.DataFrame() nears[[depth, time, zone, zone_idx]] = model[[depth, time, zone, zone_idx]] fars = pd.DataFrame() fars[[depth, time, zone, zone_idx]] = model[[depth, time, zone, zone_idx]] for k in range(len(model.columns)-3): stack[k] = 0 nears[k] = 0 fars[k] = 0 if angles is not None: for k in range(reflectivity.shape[2]): for j in range(reflectivity.shape[1]): stack[k] = stack[k] + amplitude[:,j,k] if angles[j] <= 10: nears[k] = nears[k] + amplitude[:,j,k] elif angles[j] >= 20: fars[k] = fars[k] + amplitude[:,j,k] return amplitude, stack, nears, fars def create_wedge(tmin, tmax, sr, no_traces, top, pad, thick, wedge_dens, wedge_vp, wedge_vs, outer_dens, outer_vp, outer_vs, start, stop, wavelet): time_log = np.arange(tmin, tmax, sr) wedge = pd.DataFrame(np.zeros((len(time_log), no_traces))) wedge['TWT'] = time_log for idx, row in wedge.iterrows(): if row.TWT > top and row.TWT < top+thick: for well in range(no_traces): if well > pad*no_traces: if row.TWT <= top + thick * (well - pad*no_traces) / ((1-pad)*no_traces): wedge.loc[idx, well] = 1 avg_dens = 1/2 * (wedge_dens + outer_dens) avg_vp = 1/2 * (wedge_vp + outer_vp) avg_vs = 1/2 * (wedge_vs + outer_vs) dens_top = (wedge_dens - outer_dens) / avg_dens vp_top = (wedge_vp - outer_vp) / avg_vp vs_top = (wedge_vs - outer_vs) / avg_vs r0_top = 0.5 * (vp_top + dens_top) g_top = 0.5 * vp_top - 2 * (wedge_vp/wedge_vs)**2 * (dens_top + 2 * vs_top) f_top = 0.5 * vp_top dens_base = -dens_top vp_base = -vp_top vs_base = -vs_top r0_base = 0.5 * (vp_base + dens_base) g_base = 0.5 * vp_base - 2 * (outer_vp/outer_vs)**2 * (dens_base + 2 * vs_base) f_base = 0.5 * vp_base angles = np.arange(start, stop) reflectivity = np.zeros((len(wedge), len(angles), no_traces)) amplitude = 0 * reflectivity wedge_stk = np.zeros((len(wedge), no_traces)) for k in range(no_traces): if k> pad*no_traces: top_idx = wedge.loc[wedge[k]==1,k].index[0] base_idx = wedge.loc[wedge[k]==1,k].index[-1] for j in range(len(angles)): theta =
np.radians(angles[j])
numpy.radians
""" This saves figures showing the results of various clustering algorithms, including AutoGMM, agglomerative clustering, k-means, naive GM (using single or mutiple inits), on the double-cigar dataset. It outputs the two subplots of Figure 7 in the paper. """ #%% import numpy as np import matplotlib.pyplot as plt from matplotlib import colors from joblib import Parallel, delayed import pandas as pd import seaborn as sns from sklearn.metrics import adjusted_rand_score from sklearn.cluster import KMeans, AgglomerativeClustering from sklearn.mixture import GaussianMixture import sys sys.path.append("/code") from autogmm import AutoGMMCluster sns.set_context("talk") #%% def AGM(X, y, n_clusters_range, n_init): agmm = AutoGMMCluster( max_components=n_clusters_range[-1], min_components=n_clusters_range[0], kmeans_n_init=n_init, init_params="kmeans", ) pred = agmm.fit_predict(X, y) return agmm.model_, agmm.ari_, pred def KM(X, y, n_clusters_range, n_init): ari_km = -1 for n_clus in np.unique(n_clusters_range): kmeans = KMeans(n_clusters=n_clus, n_init=n_init) pred_km = kmeans.fit_predict(X) ari_ = adjusted_rand_score(pred_km, y) if ari_ > ari_km: ari_km = ari_ best_model = kmeans return best_model, ari_km, best_model.predict(X) def Agg(X, y, n_clusters_range): af = ["euclidean", "manhattan", "cosine"] li = ["ward", "complete", "single", "average"] ari_ag = -1 for af_ in af: for li_ in li: if li_ == "ward" and af_ != "euclidean": continue else: for n_clus in np.unique(n_clusters_range): agg = AgglomerativeClustering( n_clusters=n_clus, affinity=af_, linkage=li_ ) pred_ag = agg.fit_predict(X) ari_ = adjusted_rand_score(pred_ag, y) if ari_ > ari_ag: ari_ag = ari_ best_model = agg best_pred = pred_ag return best_model, ari_ag, best_pred def naive_GMM(X, y, n_components_range, n_init, init_params="kmeans"): lowest_bic = np.infty cv_types = ["spherical", "tied", "diag", "full"] for cv_type in cv_types: for n_components in np.unique(n_components_range): gmm = GaussianMixture( n_components=n_components, covariance_type=cv_type, n_init=n_init, init_params=init_params, ) gmm.fit(X) bic_ = gmm.bic(X) if bic_ < lowest_bic: lowest_bic = bic_ best_gmm = gmm pred = best_gmm.predict(X) return best_gmm, adjusted_rand_score(pred, y), pred def exp(X, y, n_clusters_range, n_init=10): agm, ari_agm, pred_agm = AGM(X, y, n_clusters_range, n_init) gm, ari_gm, pred_gm = naive_GMM(X, y, n_clusters_range, 1) gm_m, ari_gm_multi, pred_gm_multi = naive_GMM(X, y, n_clusters_range, n_init) km, ari_km, pred_km = KM(X, y, n_clusters_range, n_init) ag, ari_ag, pred_ag = Agg(X, y, n_clusters_range) aris = [ari_agm, ari_gm, ari_gm_multi, ari_km, ari_ag] preds = [pred_agm, pred_gm, pred_gm_multi, pred_km, pred_ag] models = [agm, gm, gm_m, km, ag] return aris, preds, models #%% # plot predictions on single cigar dataset by various clustering algorithms np.random.seed(32) cov_1 = 1 cov_2 = 200 n = 100 mu_ = 3 mu1 = [-mu_, 0] mu2 = [mu_, 0] cov1 = np.array([[cov_1, 0], [0, cov_2]]) cov2 = np.array([[cov_1, 0], [0, cov_2]]) X1 = np.random.multivariate_normal(mu1, cov1, n) X2 =
np.random.multivariate_normal(mu2, cov2, n)
numpy.random.multivariate_normal
import pytest import numpy as np from qibo import K from numpy.random import random as rand METHODS = [ ("to_complex", [rand(3), rand(3)]), ("cast", [rand(4)]), ("diag", [rand(4)]), ("copy", [rand(5)]), ("zeros_like", [rand((4, 4))]), ("ones_like", [rand((4, 4))]), ("real", [rand(5)]), ("imag", [rand(5)]), ("conj", [rand(5)]), ("exp", [rand(5)]), ("sin", [rand(5)]), ("cos", [rand(5)]), ("square", [rand(5)]), ("sqrt", [rand(5)]), ("log", [rand(5)]), ("abs", [rand(5)]), ("trace", [rand((6, 6))]), ("sum", [rand((4, 4))]), ("matmul", [rand((4, 6)), rand((6, 5))]), ("outer", [rand((4,)), rand((3,))]), ("eigvalsh", [rand((4, 4))]), ("less", [rand(10), rand(10)]), ("array_equal", [rand(10), rand(10)]), ("eye", [5]), ("zeros", [(2, 3)]), ("ones", [(2, 3)]), ("einsum", ["xy,axby->ab", rand((2, 2)), rand(4 * (2,))]), ("tensordot", [rand((2, 2)), rand(4 * (2,)), [[0, 1], [1, 3]]]), ("transpose", [rand((3, 3, 3)), [0, 2, 1]]), ("gather_nd", [rand((5, 3)), [0, 1]]), ("expm", [rand((4, 4))]), ("mod", [np.random.randint(10), np.random.randint(2, 10)]), ("right_shift", [np.random.randint(10), np.random.randint(10)]), ("kron", [rand((4, 4)), rand((5, 5))]), ("inv", [rand((4, 4))]), ("initial_state", [5, True]), ("initial_state", [3, False]) ] @pytest.mark.parametrize("method,args", METHODS) def test_backend_methods_list(tested_backend, target_backend, method, args): tested_backend = K.construct_backend(tested_backend) target_backend = K.construct_backend(target_backend) tested_func = getattr(tested_backend, method) target_func = getattr(target_backend, method) target_result = target_func(*args) if tested_backend.name == "qibojit" and tested_backend.op.get_backend() == "cupy": # pragma: no cover # cupy is not tested by CI! args = [tested_backend.cast(v) if isinstance(v, np.ndarray) else v for v in args] try: tested_result = tested_func(*args) except NotImplementedError: with pytest.raises(NotImplementedError): tested_func(*args) return tested_backend.assert_allclose(tested_result, target_result) @pytest.mark.parametrize("method,kwargs", [ ("reshape", {"x": rand(4), "shape": (2, 2)}), ("expand_dims", {"x": rand((5, 5)), "axis": 1}), ("range", {"start": 0, "finish": 10, "step": 2}), ("range", {"start": 0, "finish": 10, "step": 2, "dtype": "DTYPE"}), ("pow", {"base": rand(5), "exponent": 4}), ("sum", {"x": rand((4, 4, 3)), "axis": 1}), ("squeeze", {"x": rand((5, 1, 2)), "axis": 1}), ]) def test_backend_methods_dict(tested_backend, target_backend, method, kwargs): tested_backend = K.construct_backend(tested_backend) target_backend = K.construct_backend(target_backend) tested_func = getattr(tested_backend, method) target_func = getattr(target_backend, method) target_result = target_func(**kwargs) kwargs = {k: tested_backend.cast(v) if isinstance(v, np.ndarray) else v for k, v in kwargs.items()} tested_result = tested_func(**kwargs) tested_backend.assert_allclose(tested_result, target_result) def test_backend_concatenate(tested_backend, target_backend): tested_backend = K.construct_backend(tested_backend) target_backend = K.construct_backend(target_backend) tensors = [rand((2, 3)), rand((2, 4))] target_result = target_backend.concatenate(tensors, axis=1) tensors = [tested_backend.cast(x) for x in tensors] tested_result = tested_backend.concatenate(tensors, axis=1) tested_backend.assert_allclose(tested_result, target_result) def test_backend_stack(tested_backend, target_backend): tested_backend = K.construct_backend(tested_backend) target_backend = K.construct_backend(target_backend) tensors = [rand(4), rand(4)] target_result = target_backend.stack(tensors) tensors = [tested_backend.cast(x) for x in tensors] tested_result = tested_backend.stack(tensors) tested_backend.assert_allclose(tested_result, target_result) def test_backend_eigh(tested_backend, target_backend): tested_backend = K.construct_backend(tested_backend) target_backend = K.construct_backend(target_backend) m = rand((5, 5)) eigvals2, eigvecs2 = target_backend.eigh(m) eigvals1, eigvecs1 = tested_backend.eigh(tested_backend.cast(m)) tested_backend.assert_allclose(eigvals1, eigvals2) tested_backend.assert_allclose(np.abs(eigvecs1), np.abs(eigvecs2)) def test_backend_compile(tested_backend, target_backend): tested_backend = K.construct_backend(tested_backend) target_backend = K.construct_backend(target_backend) func = lambda x: x + 1 x = rand(5) cfunc1 = tested_backend.compile(func) cfunc2 = target_backend.compile(func) tested_backend.assert_allclose(cfunc1(x), cfunc2(x)) def test_backend_gather(tested_backend, target_backend): tested_backend = K.construct_backend(tested_backend) target_backend = K.construct_backend(target_backend) x = rand(5) target_result = target_backend.gather(x, indices=[0, 1, 3]) test_result = tested_backend.gather(x, indices=[0, 1, 3]) tested_backend.assert_allclose(test_result, target_result) x = rand((5, 5)) target_result = target_backend.gather(x, indices=[0, 1, 3], axis=-1) test_result = tested_backend.gather(x, indices=[0, 1, 3], axis=-1) tested_backend.assert_allclose(test_result, target_result) x = rand(3) target_result = target_backend.gather(x, condition=[True, False, True]) test_result = tested_backend.gather(x, condition=[True, False, True]) tested_backend.assert_allclose(test_result, target_result) with pytest.raises(ValueError): result1 = target_backend.gather(x) with pytest.raises(ValueError): result2 = tested_backend.gather(x) @pytest.mark.parametrize("return_counts", [False, True]) def test_backend_unique(tested_backend, target_backend, return_counts): tested_backend = K.construct_backend(tested_backend) target_backend = K.construct_backend(target_backend) x = np.random.randint(10, size=(10,)) target_result = target_backend.unique(x, return_counts=return_counts) test_result = tested_backend.unique(x, return_counts=return_counts) if return_counts: idx = np.argsort(test_result[0]) tested_backend.assert_allclose(np.array(test_result[0])[idx], target_result[0]) tested_backend.assert_allclose(
np.array(test_result[1])
numpy.array
""" Proto Contains the following library code useful for prototyping robotic algorithms: - YAML - TIME - PROFILING - MATHS - LINEAR ALGEBRA - GEOMETRY - LIE - TRANSFORM - MATPLOTLIB - CV - DATASET - FILTER - STATE ESTIMATION - CALIBRATION - SIMULATION - UNITTESTS """ import os import sys import glob import math import time import copy import random import pickle import json import signal from datetime import datetime from pathlib import Path from enum import Enum from dataclasses import dataclass from collections import namedtuple from types import FunctionType from typing import Optional import cv2 import yaml import numpy as np import scipy import scipy.sparse import scipy.sparse.linalg import pandas import cProfile from pstats import Stats ############################################################################### # YAML ############################################################################### def load_yaml(yaml_path): """ Load YAML and return a named tuple """ assert yaml_path is not None assert yaml_path != "" # Load yaml_file yaml_data = None with open(yaml_path, "r") as stream: yaml_data = yaml.safe_load(stream) # Convert dict to named tuple data = json.dumps(yaml_data) # Python dict to json data = json.loads( data, object_hook=lambda d: namedtuple('X', d.keys())(*d.values())) return data ############################################################################### # TIME ############################################################################### def sec2ts(time_s): """ Convert time in seconds to timestamp """ return int(time_s * 1e9) def ts2sec(ts): """ Convert timestamp to seconds """ return ts * 1e-9 ############################################################################### # PROFILING ############################################################################### def profile_start(): """ Start profile """ prof = cProfile.Profile() prof.enable() return prof def profile_stop(prof, **kwargs): """ Stop profile """ key = kwargs.get('key', 'cumtime') N = kwargs.get('N', 10) stats = Stats(prof) stats.strip_dirs() stats.sort_stats(key).print_stats(N) ############################################################################### # MATHS ############################################################################### from math import pi from math import isclose from math import sqrt # from math import floor from math import cos from math import sin from math import tan from math import acos from math import atan def rmse(errors): """ Root Mean Squared Error """ return np.sqrt(np.mean(errors**2)) ############################################################################### # LINEAR ALGEBRA ############################################################################### from numpy import rad2deg from numpy import deg2rad from numpy import sinc from numpy import zeros from numpy import ones from numpy import eye from numpy import trace from numpy import diagonal as diag from numpy import cross from numpy.linalg import norm from numpy.linalg import inv from numpy.linalg import pinv from numpy.linalg import matrix_rank as rank from numpy.linalg import eig from numpy.linalg import svd from numpy.linalg import cholesky as chol def normalize(v): """ Normalize vector v """ n = np.linalg.norm(v) if n == 0: return v return v / n def full_rank(A): """ Check if matrix A is full rank """ return rank(A) == A.shape[0] def skew(vec): """ Form skew-symmetric matrix from vector `vec` """ assert vec.shape == (3,) or vec.shape == (3, 1) x, y, z = vec return np.array([[0.0, -z, y], [z, 0.0, -x], [-y, x, 0.0]]) def skew_inv(A): """ Form skew symmetric matrix vector """ assert A.shape == (3, 3) return np.array([A[2, 1], A[0, 2], A[1, 0]]) def fwdsubs(L, b): """ Solving a lower triangular system by forward-substitution Input matrix L is an n by n lower triangular matrix Input vector b is n by 1 Output vector x is the solution to the linear system L x = b """ assert L.shape[1] == b.shape[0] n = b.shape[0] x = zeros((n, 1)) for j in range(n): if L[j, j] == 0: raise RuntimeError('Matrix is singular!') x[j] = b[j] / L[j, j] b[j:n] = b[j:n] - L[j:n, j] * x[j] def bwdsubs(U, b): """ Solving an upper triangular system by back-substitution Input matrix U is an n by n upper triangular matrix Input vector b is n by 1 Output vector x is the solution to the linear system U x = b """ assert U.shape[1] == b.shape[0] n = b.shape[0] x = zeros((n, 1)) for j in range(n): if U[j, j] == 0: raise RuntimeError('Matrix is singular!') x[j] = b[j] / U(j, j) b[0:j] = b[0:j] - U[0:j, j] * x[j] def solve_svd(A, b): """ Solve Ax = b with SVD """ # compute svd of A U, s, Vh = svd(A) # U diag(s) Vh x = b <=> diag(s) Vh x = U.T b = c c = np.dot(U.T, b) # diag(s) Vh x = c <=> Vh x = diag(1/s) c = w (trivial inversion of a diagonal matrix) w = np.dot(np.diag(1 / s), c) # Vh x = w <=> x = Vh.H w (where .H stands for hermitian = conjugate transpose) x = np.dot(Vh.conj().T, w) return x def schurs_complement(H, g, m, r, precond=False): """ Shurs-complement """ assert H.shape[0] == (m + r) # H = [Hmm, Hmr # Hrm, Hrr]; Hmm = H[0:m, 0:m] Hmr = H[0:m, m:] Hrm = Hmr.T Hrr = H[m:, m:] # g = [gmm, grr] gmm = g[1:] grr = g[m:] # Precondition Hmm if precond: Hmm = 0.5 * (Hmm + Hmm.T) # Invert Hmm assert rank(Hmm) == Hmm.shape[0] (w, V) = eig(Hmm) W_inv = diag(1.0 / w) Hmm_inv = V * W_inv * V.T # Schurs complement H_marg = Hrr - Hrm * Hmm_inv * Hmr g_marg = grr - Hrm * Hmm_inv * gmm return (H_marg, g_marg) def is_pd(B): """Returns true when input is positive-definite, via Cholesky""" try: _ = chol(B) return True except np.linalg.LinAlgError: return False def nearest_pd(A): """Find the nearest positive-definite matrix to input A Python/Numpy port of <NAME>'s `nearestSPD` MATLAB code [1], which credits [2]. [1] https://www.mathworks.com/matlabcentral/fileexchange/42885-nearestspd [2] <NAME>, "Computing a nearest symmetric positive semidefinite matrix" (1988): https://doi.org/10.1016/0024-3795(88)90223-6 """ B = (A + A.T) / 2 _, s, V = svd(B) H = np.dot(V.T, np.dot(np.diag(s), V)) A2 = (B + H) / 2 A3 = (A2 + A2.T) / 2 if is_pd(A3): return A3 spacing = np.spacing(np.linalg.norm(A)) # The above is different from [1]. It appears that MATLAB's `chol` Cholesky # decomposition will accept matrixes with exactly 0-eigenvalue, whereas # Numpy's will not. So where [1] uses `eps(mineig)` (where `eps` is Matlab # for `np.spacing`), we use the above definition. CAVEAT: our `spacing` # will be much larger than [1]'s `eps(mineig)`, since `mineig` is usually on # the order of 1e-16, and `eps(1e-16)` is on the order of 1e-34, whereas # `spacing` will, for Gaussian random matrixes of small dimension, be on # othe order of 1e-16. In practice, both ways converge, as the unit test # below suggests. I = np.eye(A.shape[0]) k = 1 while not is_pd(A3): mineig = np.min(np.real(np.linalg.eigvals(A3))) A3 += I * (-mineig * k**2 + spacing) k += 1 return A3 def matrix_equal(A, B, tol=1e-8, verbose=False): """ Compare matrices `A` and `B` """ diff = A - B if len(diff.shape) == 1: for i in range(diff.shape[0]): if abs(diff[i]) > tol: if verbose: print("A - B:") print(diff) elif len(diff.shape) == 2: for i in range(diff.shape[0]): for j in range(diff.shape[1]): if abs(diff[i, j]) > tol: if verbose: print("A - B:") print(diff) return False return True def plot_compare_matrices(title_A, A, title_B, B): """ Plot compare matrices """ plt.matshow(A) plt.colorbar() plt.title(title_A) plt.matshow(B) plt.colorbar() plt.title(title_B) diff = A - B plt.matshow(diff) plt.colorbar() plt.title(f"{title_A} - {title_B}") print(f"max_coeff({title_A}): {np.max(np.max(A))}") print(f"max_coeff({title_B}): {np.max(np.max(B))}") print(f"min_coeff({title_A}): {np.min(np.min(A))}") print(f"min_coeff({title_B}): {np.min(np.min(B))}") print(f"max_diff: {np.max(np.max(np.abs(diff)))}") plt.show() def check_jacobian(jac_name, fdiff, jac, threshold, verbose=False): """ Check jacobians """ # Check if numerical diff is same as analytical jacobian if matrix_equal(fdiff, jac, threshold): if verbose: print(f"Check [{jac_name}] passed!") return True # Failed - print differences if verbose: fdiff_minus_jac = fdiff - jac print(f"Check [{jac_name}] failed!") print("-" * 60) print("J_fdiff - J:") print(np.round(fdiff_minus_jac, 4)) print() print("J_fdiff:") print(np.round(fdiff, 4)) print() print("J:") print(np.round(jac, 4)) print() print("-" * 60) return False ############################################################################### # GEOMETRY ############################################################################### def lerp(x0, x1, t): """ Linear interpolation """ return (1.0 - t) * x0 + t * x1 def lerp2d(p0, p1, t): """ Linear interpolation 2D """ assert len(p0) == 2 assert len(p1) == 2 assert t <= 1.0 and t >= 0.0 x = lerp(p0[0], p1[0], t) y = lerp(p0[1], p1[1], t) return np.array([x, y]) def lerp3d(p0, p1, t): """ Linear interpolation 3D """ assert len(p0) == 3 assert len(p1) == 3 assert t <= 1.0 and t >= 0.0 x = lerp(p0[0], p1[0], t) y = lerp(p0[1], p1[1], t) z = lerp(p0[2], p1[2], t) return np.array([x, y, z]) def circle(r, theta): """ Circle """ x = r * cos(theta) y = r * sin(theta) return np.array([x, y]) def sphere(rho, theta, phi): """ Sphere Args: rho (float): Sphere radius theta (float): longitude [rad] phi (float): Latitude [rad] Returns: Point on sphere """ x = rho * sin(theta) * cos(phi) y = rho * sin(theta) * sin(phi) z = rho * cos(theta) return np.array([x, y, z]) def circle_loss(c, x, y): """ Calculate the algebraic distance between the data points and the mean circle centered at c=(xc, yc) """ xc, yc = c # Euclidean dist from center (xc, yc) Ri = np.sqrt((x - xc)**2 + (y - yc)**2) return Ri - Ri.mean() def find_circle(x, y): """ Find the circle center and radius given (x, y) data points using least squares. Returns `(circle_center, circle_radius, residual)` """ x_m = np.mean(x) y_m = np.mean(y) center_init = x_m, y_m center, _ = scipy.optimize.leastsq(circle_loss, center_init, args=(x, y)) xc, yc = center radii = np.sqrt((x - xc)**2 + (y - yc)**2) radius = radii.mean() residual = np.sum((radii - radius)**2) return (center, radius, residual) def bresenham(p0, p1): """ Bresenham's line algorithm is a line drawing algorithm that determines the points of an n-dimensional raster that should be selected in order to form a close approximation to a straight line between two points. It is commonly used to draw line primitives in a bitmap image (e.g. on a computer screen), as it uses only integer addition, subtraction and bit shifting, all of which are very cheap operations in standard computer architectures. Args: p0 (np.array): Starting point (x, y) p1 (np.array): End point (x, y) Returns: A list of (x, y) intermediate points from p0 to p1. """ x0, y0 = p0 x1, y1 = p1 dx = abs(x1 - x0) dy = abs(y1 - y0) sx = 1.0 if x0 < x1 else -1.0 sy = 1.0 if y0 < y1 else -1.0 err = dx - dy line = [] while True: line.append([x0, y0]) if x0 == x1 and y0 == y1: return line e2 = 2 * err if e2 > -dy: # overshot in the y direction err = err - dy x0 = x0 + sx if e2 < dx: # overshot in the x direction err = err + dx y0 = y0 + sy ############################################################################### # LIE ############################################################################### def Exp(phi): """ Exponential Map """ assert phi.shape == (3,) or phi.shape == (3, 1) if norm(phi) < 1e-3: C = eye(3) + skew(phi) return C phi_norm = norm(phi) phi_skew = skew(phi) phi_skew_sq = phi_skew @ phi_skew C = eye(3) C += (sin(phi_norm) / phi_norm) * phi_skew C += ((1 - cos(phi_norm)) / phi_norm**2) * phi_skew_sq return C def Log(C): """ Logarithmic Map """ assert C.shape == (3, 3) # phi = acos((trace(C) - 1) / 2); # u = skew_inv(C - C') / (2 * sin(phi)); # rvec = phi * u; C00, C01, C02 = C[0, :] C10, C11, C12 = C[1, :] C20, C21, C22 = C[2, :] tr = np.trace(C) rvec = None if tr + 1.0 < 1e-10: if abs(C22 + 1.0) > 1.0e-5: x = np.array([C02, C12, 1.0 + C22]) rvec = (pi / np.sqrt(2.0 + 2.0 * C22)) @ x elif abs(C11 + 1.0) > 1.0e-5: x = np.array([C01, 1.0 + C11, C21]) rvec = (pi / np.sqrt(2.0 + 2.0 * C11)) @ x else: x = np.array([1.0 + C00, C10, C20]) rvec = (pi / np.sqrt(2.0 + 2.0 * C00)) @ x else: tr_3 = tr - 3.0 # always negative if tr_3 < -1e-7: theta = acos((tr - 1.0) / 2.0) magnitude = theta / (2.0 * sin(theta)) else: # when theta near 0, +-2pi, +-4pi, etc. (trace near 3.0) # use Taylor expansion: theta \approx 1/2-(t-3)/12 + O((t-3)^2) # see https://github.com/borglab/gtsam/issues/746 for details magnitude = 0.5 - tr_3 / 12.0 rvec = magnitude @ np.array([C21 - C12, C02 - C20, C10 - C01]) return rvec def Jr(theta): """ Right jacobian Forster, Christian, et al. "IMU preintegration on manifold for efficient visual-inertial maximum-a-posteriori estimation." Georgia Institute of Technology, 2015. [Page 2, Equation (8)] """ theta_norm = norm(theta) theta_norm_sq = theta_norm * theta_norm theta_norm_cube = theta_norm_sq * theta_norm theta_skew = skew(theta) theta_skew_sq = theta_skew @ theta_skew J = eye(3) J -= ((1 - cos(theta_norm)) / theta_norm_sq) * theta_skew J += (theta_norm - sin(theta_norm)) / (theta_norm_cube) * theta_skew_sq return J def Jr_inv(theta): """ Inverse right jacobian """ theta_norm = norm(theta) theta_norm_sq = theta_norm * theta_norm theta_skew = skew(theta) theta_skew_sq = theta_skew @ theta_skew A = 1.0 / theta_norm_sq B = (1 + cos(theta_norm)) / (2 * theta_norm * sin(theta_norm)) J = eye(3) J += 0.5 * theta_skew J += (A - B) * theta_skew_sq return J def boxplus(C, alpha): """ Box plus """ # C_updated = C [+] alpha C_updated = C * Exp(alpha) return C_updated def boxminus(C_a, C_b): """ Box minus """ # alpha = C_a [-] C_b alpha = Log(inv(C_b) * C_a) return alpha ############################################################################### # TRANSFORM ############################################################################### def homogeneous(p): """ Turn point `p` into its homogeneous form """ return np.array([*p, 1.0]) def dehomogeneous(hp): """ De-homogenize point `hp` into `p` """ return hp[0:3] def rotx(theta): """ Form rotation matrix around x axis """ row0 = [1.0, 0.0, 0.0] row1 = [0.0, cos(theta), -sin(theta)] row2 = [0.0, sin(theta), cos(theta)] return np.array([row0, row1, row2]) def roty(theta): """ Form rotation matrix around y axis """ row0 = [cos(theta), 0.0, sin(theta)] row1 = [0.0, 1.0, 0.0] row2 = [-sin(theta), 0.0, cos(theta)] return np.array([row0, row1, row2]) def rotz(theta): """ Form rotation matrix around z axis """ row0 = [cos(theta), -sin(theta), 0.0] row1 = [sin(theta), cos(theta), 0.0] row2 = [0.0, 0.0, 1.0] return np.array([row0, row1, row2]) def aa2quat(angle, axis): """ Convert angle-axis to quaternion Source: <NAME>. "Quaternion kinematics for the error-state Kalman filter." arXiv preprint arXiv:1711.02508 (2017). [Page 22, eq (101), "Quaternion and rotation vector"] """ ax, ay, az = axis qw = cos(angle / 2.0) qx = ax * sin(angle / 2.0) qy = ay * sin(angle / 2.0) qz = az * sin(angle / 2.0) return np.array([qw, qx, qy, qz]) def rvec2rot(rvec): """ Rotation vector to rotation matrix """ # If small rotation theta = sqrt(rvec @ rvec) # = norm(rvec), but faster eps = 1e-8 if theta < eps: return skew(rvec) # Convert rvec to rotation matrix rvec = rvec / theta x, y, z = rvec c = cos(theta) s = sin(theta) C = 1 - c xs = x * s ys = y * s zs = z * s xC = x * C yC = y * C zC = z * C xyC = x * yC yzC = y * zC zxC = z * xC row0 = [x * xC + c, xyC - zs, zxC + ys] row1 = [xyC + zs, y * yC + c, yzC - xs] row2 = [zxC - ys, yzC + xs, z * zC + c] return np.array([row0, row1, row2]) def vecs2axisangle(u, v): """ From 2 vectors form an axis-angle vector """ angle = math.acos(u.T * v) ax = normalize(np.cross(u, v)) return ax * angle def euler321(yaw, pitch, roll): """ Convert yaw, pitch, roll in radians to a 3x3 rotation matrix. Source: Kuipers, <NAME>. Quaternions and Rotation Sequences: A Primer with Applications to Orbits, Aerospace, and Virtual Reality. Princeton, N.J: Princeton University Press, 1999. Print. [Page 85-86, "The Aerospace Sequence"] """ psi = yaw theta = pitch phi = roll cpsi = cos(psi) spsi = sin(psi) ctheta = cos(theta) stheta = sin(theta) cphi = cos(phi) sphi = sin(phi) C11 = cpsi * ctheta C21 = spsi * ctheta C31 = -stheta C12 = cpsi * stheta * sphi - spsi * cphi C22 = spsi * stheta * sphi + cpsi * cphi C32 = ctheta * sphi C13 = cpsi * stheta * cphi + spsi * sphi C23 = spsi * stheta * cphi - cpsi * sphi C33 = ctheta * cphi return np.array([[C11, C12, C13], [C21, C22, C23], [C31, C32, C33]]) def euler2quat(yaw, pitch, roll): """ Convert yaw, pitch, roll in radians to a quaternion. Source: Kuipers, <NAME>. Quaternions and Rotation Sequences: A Primer with Applications to Orbits, Aerospace, and Virtual Reality. Princeton, N.J: Princeton University Press, 1999. Print. [Page 166-167, "Euler Angles to Quaternion"] """ psi = yaw # Yaw theta = pitch # Pitch phi = roll # Roll c_phi = cos(phi / 2.0) c_theta = cos(theta / 2.0) c_psi = cos(psi / 2.0) s_phi = sin(phi / 2.0) s_theta = sin(theta / 2.0) s_psi = sin(psi / 2.0) qw = c_psi * c_theta * c_phi + s_psi * s_theta * s_phi qx = c_psi * c_theta * s_phi - s_psi * s_theta * c_phi qy = c_psi * s_theta * c_phi + s_psi * c_theta * s_phi qz = s_psi * c_theta * c_phi - c_psi * s_theta * s_phi mag = sqrt(qw**2 + qx**2 + qy**2 + qz**2) return np.array([qw / mag, qx / mag, qy / mag, qz / mag]) def quat2euler(q): """ Convert quaternion to euler angles (yaw, pitch, roll). Source: Kuipers, <NAME>. Quaternions and Rotation Sequences: A Primer with Applications to Orbits, Aerospace, and Virtual Reality. Princeton, N.J: Princeton University Press, 1999. Print. [Page 168, "Quaternion to Euler Angles"] """ qw, qx, qy, qz = q m11 = (2 * qw**2) + (2 * qx**2) - 1 m12 = 2 * (qx * qy + qw * qz) m13 = 2 * qx * qz - 2 * qw * qy m23 = 2 * qy * qz + 2 * qw * qx m33 = (2 * qw**2) + (2 * qz**2) - 1 psi = math.atan2(m12, m11) theta = math.asin(-m13) phi = math.atan2(m23, m33) ypr = np.array([psi, theta, phi]) return ypr def quat2rot(q): """ Convert quaternion to 3x3 rotation matrix. Source: <NAME>. "A tutorial on se (3) transformation parameterizations and on-manifold optimization." University of Malaga, Tech. Rep 3 (2010): 6. [Page 18, Equation (2.20)] """ assert len(q) == 4 qw, qx, qy, qz = q qx2 = qx**2 qy2 = qy**2 qz2 = qz**2 qw2 = qw**2 # Homogeneous form C11 = qw2 + qx2 - qy2 - qz2 C12 = 2.0 * (qx * qy - qw * qz) C13 = 2.0 * (qx * qz + qw * qy) C21 = 2.0 * (qx * qy + qw * qz) C22 = qw2 - qx2 + qy2 - qz2 C23 = 2.0 * (qy * qz - qw * qx) C31 = 2.0 * (qx * qz - qw * qy) C32 = 2.0 * (qy * qz + qw * qx) C33 = qw2 - qx2 - qy2 + qz2 return np.array([[C11, C12, C13], [C21, C22, C23], [C31, C32, C33]]) def rot2euler(C): """ Convert 3x3 rotation matrix to euler angles (yaw, pitch, roll). """ assert C.shape == (3, 3) q = rot2quat(C) return quat2euler(q) def rot2quat(C): """ Convert 3x3 rotation matrix to quaternion. """ assert C.shape == (3, 3) m00 = C[0, 0] m01 = C[0, 1] m02 = C[0, 2] m10 = C[1, 0] m11 = C[1, 1] m12 = C[1, 2] m20 = C[2, 0] m21 = C[2, 1] m22 = C[2, 2] tr = m00 + m11 + m22 if tr > 0: S = sqrt(tr + 1.0) * 2.0 # S=4*qw qw = 0.25 * S qx = (m21 - m12) / S qy = (m02 - m20) / S qz = (m10 - m01) / S elif ((m00 > m11) and (m00 > m22)): S = sqrt(1.0 + m00 - m11 - m22) * 2.0 # S=4*qx qw = (m21 - m12) / S qx = 0.25 * S qy = (m01 + m10) / S qz = (m02 + m20) / S elif m11 > m22: S = sqrt(1.0 + m11 - m00 - m22) * 2.0 # S=4*qy qw = (m02 - m20) / S qx = (m01 + m10) / S qy = 0.25 * S qz = (m12 + m21) / S else: S = sqrt(1.0 + m22 - m00 - m11) * 2.0 # S=4*qz qw = (m10 - m01) / S qx = (m02 + m20) / S qy = (m12 + m21) / S qz = 0.25 * S return quat_normalize(np.array([qw, qx, qy, qz])) # QUATERNION ################################################################## def quat_norm(q): """ Returns norm of a quaternion """ qw, qx, qy, qz = q return sqrt(qw**2 + qx**2 + qy**2 + qz**2) def quat_normalize(q): """ Normalize quaternion """ n = quat_norm(q) qw, qx, qy, qz = q return np.array([qw / n, qx / n, qy / n, qz / n]) def quat_conj(q): """ Return conjugate quaternion """ qw, qx, qy, qz = q q_conj = np.array([qw, -qx, -qy, -qz]) return q_conj def quat_inv(q): """ Invert quaternion """ return quat_conj(q) def quat_left(q): """ Quaternion left product matrix """ qw, qx, qy, qz = q row0 = [qw, -qx, -qy, -qz] row1 = [qx, qw, -qz, qy] row2 = [qy, qz, qw, -qx] row3 = [qz, -qy, qx, qw] return np.array([row0, row1, row2, row3]) def quat_right(q): """ Quaternion right product matrix """ qw, qx, qy, qz = q row0 = [qw, -qx, -qy, -qz] row1 = [qx, qw, qz, -qy] row2 = [qy, -qz, qw, qx] row3 = [qz, qy, -qx, qw] return np.array([row0, row1, row2, row3]) def quat_lmul(p, q): """ Quaternion left multiply """ assert len(p) == 4 assert len(q) == 4 lprod = quat_left(p) return lprod @ q def quat_rmul(p, q): """ Quaternion right multiply """ assert len(p) == 4 assert len(q) == 4 rprod = quat_right(q) return rprod @ p def quat_mul(p, q): """ Quaternion multiply p * q """ return quat_lmul(p, q) def quat_omega(w): """ Quaternion omega matrix """ return np.block([[-1.0 * skew(w), w], [w.T, 0.0]]) def quat_delta(dalpha): """ Form quaternion from small angle rotation vector dalpha """ half_norm = 0.5 * norm(dalpha) scalar = cos(half_norm) vector = sinc(half_norm) * 0.5 * dalpha dqw = scalar dqx, dqy, dqz = vector dq = np.array([dqw, dqx, dqy, dqz]) return dq def quat_integrate(q_k, w, dt): """ <NAME>. "Quaternion kinematics for the error-state Kalman filter." arXiv preprint arXiv:1711.02508 (2017). [Section 4.6.1 Zeroth-order integration, p.47] """ w_norm = norm(w) q_scalar = 0.0 q_vec = np.array([0.0, 0.0, 0.0]) if w_norm > 1e-5: q_scalar = cos(w_norm * dt * 0.5) q_vec = w / w_norm * sin(w_norm * dt * 0.5) else: q_scalar = 1.0 q_vec = [0.0, 0.0, 0.0] q_kp1 = quat_mul(q_k, np.array([q_scalar, q_vec])) return q_kp1 def quat_slerp(q_i, q_j, t): """ Quaternion Slerp `q_i` and `q_j` with parameter `t` """ assert len(q_i) == 4 assert len(q_j) == 4 assert t >= 0.0 and t <= 1.0 # Compute the cosine of the angle between the two vectors. dot_result = q_i @ q_j # If the dot product is negative, slerp won't take # the shorter path. Note that q_j and -q_j are equivalent when # the negation is applied to all four components. Fix by # reversing one quaternion. if dot_result < 0.0: q_j = -q_j dot_result = -dot_result DOT_THRESHOLD = 0.9995 if dot_result > DOT_THRESHOLD: # If the inputs are too close for comfort, linearly interpolate # and normalize the result. return q_i + t * (q_j - q_i) # Since dot is in range [0, DOT_THRESHOLD], acos is safe theta_0 = acos(dot_result) # theta_0 = angle between input vectors theta = theta_0 * t # theta = angle between q_i and result sin_theta = sin(theta) # compute this value only once sin_theta_0 = sin(theta_0) # compute this value only once # == sin(theta_0 - theta) / sin(theta_0) s0 = cos(theta) - dot_result * sin_theta / sin_theta_0 s1 = sin_theta / sin_theta_0 return (s0 * q_i) + (s1 * q_j) # TF ########################################################################## def tf(rot, trans): """ Form 4x4 homogeneous transformation matrix from rotation `rot` and translation `trans`. Where the rotation component `rot` can be a rotation matrix or a quaternion. """ C = None if rot.shape == (4,) or rot.shape == (4, 1): C = quat2rot(rot) elif rot.shape == (3, 3): C = rot else: raise RuntimeError("Invalid rotation!") T = np.eye(4, 4) T[0:3, 0:3] = C T[0:3, 3] = trans return T def tf_rot(T): """ Return rotation matrix from 4x4 homogeneous transform """ assert T.shape == (4, 4) return T[0:3, 0:3] def tf_quat(T): """ Return quaternion from 4x4 homogeneous transform """ assert T.shape == (4, 4) return rot2quat(tf_rot(T)) def tf_trans(T): """ Return translation vector from 4x4 homogeneous transform """ assert T.shape == (4, 4) return T[0:3, 3] def tf_inv(T): """ Invert 4x4 homogeneous transform """ assert T.shape == (4, 4) return np.linalg.inv(T) def tf_point(T, p): """ Transform 3d point """ assert T.shape == (4, 4) assert p.shape == (3,) or p.shape == (3, 1) hpoint = np.array([p[0], p[1], p[2], 1.0]) return (T @ hpoint)[0:3] def tf_hpoint(T, hp): """ Transform 3d point """ assert T.shape == (4, 4) assert hp.shape == (4,) or hp.shape == (4, 1) return (T @ hp)[0:3] def tf_decompose(T): """ Decompose into rotation matrix and translation vector""" assert T.shape == (4, 4) C = tf_rot(T) r = tf_trans(T) return (C, r) def tf_lerp(pose_i, pose_j, t): """ Interpolate pose `pose_i` and `pose_j` with parameter `t` """ assert pose_i.shape == (4, 4) assert pose_j.shape == (4, 4) assert t >= 0.0 and t <= 1.0 # Decompose start pose r_i = tf_trans(pose_i) q_i = tf_quat(pose_i) # Decompose end pose r_j = tf_trans(pose_j) q_j = tf_quat(pose_j) # Interpolate translation and rotation r_lerp = lerp(r_i, r_j, t) q_lerp = quat_slerp(q_i, q_j, t) return tf(q_lerp, r_lerp) def tf_perturb(T, i, step_size): """ Perturb transformation matrix """ assert T.shape == (4, 4) assert i >= 0 and i <= 5 # Setup C = tf_rot(T) r = tf_trans(T) if i >= 0 and i <= 2: # Perturb translation r[i] += step_size elif i >= 3 and i <= 5: # Perturb rotation rvec = np.array([0.0, 0.0, 0.0]) rvec[i - 3] = step_size q = rot2quat(C) dq = quat_delta(rvec) q_diff = quat_mul(q, dq) q_diff = quat_normalize(q_diff) C = quat2rot(q_diff) return tf(C, r) def tf_update(T, dx): """ Update transformation matrix """ assert T.shape == (4, 4) q = tf_quat(T) r = tf_trans(T) dr = dx[0:3] dalpha = dx[3:6] dq = quat_delta(dalpha) return tf(quat_mul(q, dq), r + dr) ############################################################################### # MATPLOTLIB ############################################################################### import matplotlib.pylab as plt def plot_set_axes_equal(ax): """ Make axes of 3D plot have equal scale so that spheres appear as spheres, cubes as cubes, etc.. This is one possible solution to Matplotlib's ax.set_aspect('equal') and ax.axis('equal') not working for 3D. Input ax: a matplotlib axis, e.g., as output from plt.gca(). """ x_limits = ax.get_xlim3d() y_limits = ax.get_ylim3d() z_limits = ax.get_zlim3d() x_range = abs(x_limits[1] - x_limits[0]) x_middle = np.mean(x_limits) y_range = abs(y_limits[1] - y_limits[0]) y_middle = np.mean(y_limits) z_range = abs(z_limits[1] - z_limits[0]) z_middle = np.mean(z_limits) # The plot bounding box is a sphere in the sense of the infinity # norm, hence I call half the max range the plot radius. plot_radius = 0.5 * max([x_range, y_range, z_range]) ax.set_xlim3d([x_middle - plot_radius, x_middle + plot_radius]) ax.set_ylim3d([y_middle - plot_radius, y_middle + plot_radius]) ax.set_zlim3d([z_middle - plot_radius, z_middle + plot_radius]) def plot_tf(ax, T, **kwargs): """ Plot 4x4 Homogeneous Transform Args: ax (matplotlib.axes.Axes): Plot axes object T (np.array): 4x4 homogeneous transform (i.e. Pose in the world frame) Keyword args: size (float): Size of the coordinate-axes linewidth (float): Thickness of the coordinate-axes name (str): Frame name name_offset (np.array or list): Position offset for displaying the frame's name fontsize (float): Frame font size fontweight (float): Frame font weight """ assert T.shape == (4, 4) size = kwargs.get('size', 1) # linewidth = kwargs.get('linewidth', 3) name = kwargs.get('name', None) name_offset = kwargs.get('name_offset', [0, 0, -0.01]) fontsize = kwargs.get('fontsize', 10) fontweight = kwargs.get('fontweight', 'bold') colors = kwargs.get('colors', ['r-', 'g-', 'b-']) origin = tf_trans(T) lx = tf_point(T, np.array([size, 0.0, 0.0])) ly = tf_point(T, np.array([0.0, size, 0.0])) lz = tf_point(T, np.array([0.0, 0.0, size])) # Draw x-axis px = [origin[0], lx[0]] py = [origin[1], lx[1]] pz = [origin[2], lx[2]] ax.plot(px, py, pz, colors[0]) # Draw y-axis px = [origin[0], ly[0]] py = [origin[1], ly[1]] pz = [origin[2], ly[2]] ax.plot(px, py, pz, colors[1]) # Draw z-axis px = [origin[0], lz[0]] py = [origin[1], lz[1]] pz = [origin[2], lz[2]] ax.plot(px, py, pz, colors[2]) # Draw label if name is not None: x = origin[0] + name_offset[0] y = origin[1] + name_offset[1] z = origin[2] + name_offset[2] ax.text(x, y, z, name, fontsize=fontsize, fontweight=fontweight) def plot_xyz(title, data, key_time, key_x, key_y, key_z, ylabel): """ Plot XYZ plot Args: title (str): Plot title data (Dict[str, pandas.DataFrame]): Plot data key_time (str): Dictionary key for timestamps key_x (str): Dictionary key x-axis key_y (str): Dictionary key y-axis key_z (str): Dictionary key z-axis ylabel (str): Y-axis label """ axis = ['x', 'y', 'z'] colors = ["r", "g", "b"] keys = [key_x, key_y, key_z] line_styles = ["--", "-", "x"] # Time time_data = {} for label, series_data in data.items(): ts0 = series_data[key_time][0] time_data[label] = ts2sec(series_data[key_time].to_numpy() - ts0) # Plot subplots plt.figure() for i in range(3): plt.subplot(3, 1, i + 1) for (label, series_data), line in zip(data.items(), line_styles): line_style = colors[i] + line x_data = time_data[label] y_data = series_data[keys[i]].to_numpy() plt.plot(x_data, y_data, line_style, label=label) plt.xlabel("Time [s]") plt.ylabel(ylabel) plt.legend(loc=0) plt.title(f"{title} in {axis[i]}-axis") plt.subplots_adjust(hspace=0.65) ############################################################################### # CV ############################################################################### # UTILS ####################################################################### def lookat(cam_pos, target_pos, **kwargs): """ Form look at matrix """ up_axis = kwargs.get('up_axis', np.array([0.0, -1.0, 0.0])) assert len(cam_pos) == 3 assert len(target_pos) == 3 assert len(up_axis) == 3 # Note: If we were using OpenGL the cam_dir would be the opposite direction, # since in OpenGL the camera forward is -z. In robotics however our camera is # +z forward. cam_z = normalize(target_pos - cam_pos) cam_x = normalize(cross(up_axis, cam_z)) cam_y = cross(cam_z, cam_x) T_WC = zeros((4, 4)) T_WC[0:3, 0] = cam_x.T T_WC[0:3, 1] = cam_y.T T_WC[0:3, 2] = cam_z.T T_WC[0:3, 3] = cam_pos T_WC[3, 3] = 1.0 return T_WC # GEOMETRY #################################################################### def linear_triangulation(P_i, P_j, z_i, z_j): """ Linear triangulation This function is used to triangulate a single 3D point observed by two camera frames (be it in time with the same camera, or two different cameras with known extrinsics). Args: P_i (np.array): First camera 3x4 projection matrix P_j (np.array): Second camera 3x4 projection matrix z_i (np.array): First keypoint measurement z_j (np.array): Second keypoint measurement Returns: p_Ci (np.array): 3D point w.r.t first camera """ # First three rows of P_i and P_j P1T_i = P_i[0, :] P2T_i = P_i[1, :] P3T_i = P_i[2, :] P1T_j = P_j[0, :] P2T_j = P_j[1, :] P3T_j = P_j[2, :] # Image point from the first and second frame x_i, y_i = z_i x_j, y_j = z_j # Form the A matrix of AX = 0 A = zeros((4, 4)) A[0, :] = x_i * P3T_i - P1T_i A[1, :] = y_i * P3T_i - P2T_i A[2, :] = x_j * P3T_j - P1T_j A[3, :] = y_j * P3T_j - P2T_j # Use SVD to solve AX = 0 (_, _, Vh) = svd(A.T @ A) hp = Vh.T[:, -1] # Get the best result from SVD (last column of V) hp = hp / hp[-1] # Normalize the homogeneous 3D point p = hp[0:3] # Return only the first three components (x, y, z) return p # PINHOLE ##################################################################### def focal_length(image_width, fov_deg): """ Estimated focal length based on `image_width` and field of fiew `fov_deg` in degrees. """ return (image_width / 2.0) / tan(deg2rad(fov_deg / 2.0)) def pinhole_K(params): """ Form camera matrix K """ fx, fy, cx, cy = params return np.array([[fx, 0.0, cx], [0.0, fy, cy], [0.0, 0.0, 1.0]]) def pinhole_P(params, T_WC): """ Form 3x4 projection matrix P """ K = pinhole_K(params) T_CW = inv(T_WC) C = tf_rot(T_CW) r = tf_trans(T_CW) P = zeros((3, 4)) P[0:3, 0:3] = C P[0:3, 3] = r P = K @ P return P def pinhole_project(proj_params, p_C): """ Project 3D point onto image plane using pinhole camera model """ assert len(proj_params) == 4 assert len(p_C) == 3 # Project x = np.array([p_C[0] / p_C[2], p_C[1] / p_C[2]]) # Scale and center fx, fy, cx, cy = proj_params z = np.array([fx * x[0] + cx, fy * x[1] + cy]) return z def pinhole_params_jacobian(x): """ Form pinhole parameter jacobian """ return np.array([[x[0], 0.0, 1.0, 0.0], [0.0, x[1], 0.0, 1.0]]) def pinhole_point_jacobian(proj_params): """ Form pinhole point jacobian """ fx, fy, _, _ = proj_params return np.array([[fx, 0.0], [0.0, fy]]) # RADTAN4 ##################################################################### def radtan4_distort(dist_params, p): """ Distort point with Radial-Tangential distortion """ assert len(dist_params) == 4 assert len(p) == 2 # Distortion parameters k1, k2, p1, p2 = dist_params # Point x, y = p # Apply radial distortion x2 = x * x y2 = y * y r2 = x2 + y2 r4 = r2 * r2 radial_factor = 1.0 + (k1 * r2) + (k2 * r4) x_dash = x * radial_factor y_dash = y * radial_factor # Apply tangential distortion xy = x * y x_ddash = x_dash + (2.0 * p1 * xy + p2 * (r2 + 2.0 * x2)) y_ddash = y_dash + (p1 * (r2 + 2.0 * y2) + 2.0 * p2 * xy) return np.array([x_ddash, y_ddash]) def radtan4_point_jacobian(dist_params, p): """ Radial-tangential point jacobian """ assert len(dist_params) == 4 assert len(p) == 2 # Distortion parameters k1, k2, p1, p2 = dist_params # Point x, y = p # Apply radial distortion x2 = x * x y2 = y * y r2 = x2 + y2 r4 = r2 * r2 # Point Jacobian # Let u = [x; y] normalized point # Let u' be the distorted u # The jacobian of u' w.r.t. u (or du'/du) is: J_point = zeros((2, 2)) J_point[0, 0] = k1 * r2 + k2 * r4 + 2.0 * p1 * y + 6.0 * p2 * x J_point[0, 0] += x * (2.0 * k1 * x + 4.0 * k2 * x * r2) + 1.0 J_point[1, 0] = 2.0 * p1 * x + 2.0 * p2 * y J_point[1, 0] += y * (2.0 * k1 * x + 4.0 * k2 * x * r2) J_point[0, 1] = J_point[1, 0] J_point[1, 1] = k1 * r2 + k2 * r4 + 6.0 * p1 * y + 2.0 * p2 * x J_point[1, 1] += y * (2.0 * k1 * y + 4.0 * k2 * y * r2) + 1.0 # Above is generated using sympy return J_point def radtan4_undistort(dist_params, p0): """ Un-distort point with Radial-Tangential distortion """ assert len(dist_params) == 4 assert len(p0) == 2 # Undistort p = p0 max_iter = 5 for _ in range(max_iter): # Error p_distorted = radtan4_distort(dist_params, p) J = radtan4_point_jacobian(dist_params, p) err = (p0 - p_distorted) # Update # dp = inv(J' * J) * J' * err dp = pinv(J) @ err p = p + dp # Check threshold if (err.T @ err) < 1e-15: break return p def radtan4_params_jacobian(dist_params, p): """ Radial-Tangential distortion parameter jacobian """ assert len(dist_params) == 4 assert len(p) == 2 # Point x, y = p # Setup x2 = x * x y2 = y * y xy = x * y r2 = x2 + y2 r4 = r2 * r2 # Params Jacobian J_params = zeros((2, 4)) J_params[0, 0] = x * r2 J_params[0, 1] = x * r4 J_params[0, 2] = 2.0 * xy J_params[0, 3] = 3.0 * x2 + y2 J_params[1, 0] = y * r2 J_params[1, 1] = y * r4 J_params[1, 2] = x2 + 3.0 * y2 J_params[1, 3] = 2.0 * xy return J_params # EQUI4 ####################################################################### def equi4_distort(dist_params, p): """ Distort point with Equi-distant distortion """ assert len(dist_params) == 4 assert len(p) == 2 # Distortion parameters k1, k2, k3, k4 = dist_params # Distort x, y = p r = sqrt(x * x + y * y) th = math.atan(r) th2 = th * th th4 = th2 * th2 th6 = th4 * th2 th8 = th4 * th4 thd = th * (1.0 + k1 * th2 + k2 * th4 + k3 * th6 + k4 * th8) s = thd / r x_dash = s * x y_dash = s * y return np.array([x_dash, y_dash]) def equi4_undistort(dist_params, p): """ Undistort point using Equi-distant distortion """ thd = sqrt(p(0) * p(0) + p[0] * p[0]) # Distortion parameters k1, k2, k3, k4 = dist_params th = thd # Initial guess for _ in range(20): th2 = th * th th4 = th2 * th2 th6 = th4 * th2 th8 = th4 * th4 th = thd / (1.0 + k1 * th2 + k2 * th4 + k3 * th6 + k4 * th8) scaling = tan(th) / thd return np.array([p[0] * scaling, p[1] * scaling]) def equi4_params_jacobian(dist_params, p): """ Equi-distant distortion params jacobian """ assert len(dist_params) == 4 assert len(p) == 2 # Jacobian x, y = p r = sqrt(x**2 + y**2) th = atan(r) J_params = zeros((2, 4)) J_params[0, 0] = x * th**3 / r J_params[0, 1] = x * th**5 / r J_params[0, 2] = x * th**7 / r J_params[0, 3] = x * th**9 / r J_params[1, 0] = y * th**3 / r J_params[1, 1] = y * th**5 / r J_params[1, 2] = y * th**7 / r J_params[1, 3] = y * th**9 / r return J_params def equi4_point_jacobian(dist_params, p): """ Equi-distant distortion point jacobian """ assert len(dist_params) == 4 assert len(p) == 2 # Distortion parameters k1, k2, k3, k4 = dist_params # Jacobian x, y = p r = sqrt(x**2 + y**2) th = math.atan(r) th2 = th**2 th4 = th**4 th6 = th**6 th8 = th**8 thd = th * (1.0 + k1 * th2 + k2 * th4 + k3 * th6 + k4 * th8) th_r = 1.0 / (r * r + 1.0) thd_th = 1.0 + 3.0 * k1 * th2 thd_th += 5.0 * k2 * th4 thd_th += 7.0 * k3 * th6 thd_th += 9.0 * k4 * th8 s = thd / r s_r = thd_th * th_r / r - thd / (r * r) r_x = 1.0 / r * x r_y = 1.0 / r * y J_point = zeros((2, 2)) J_point[0, 0] = s + x * s_r * r_x J_point[0, 1] = x * s_r * r_y J_point[1, 0] = y * s_r * r_x J_point[1, 1] = s + y * s_r * r_y return J_point # PINHOLE RADTAN4 ############################################################# def pinhole_radtan4_project(proj_params, dist_params, p_C): """ Pinhole + Radial-Tangential project """ assert len(proj_params) == 4 assert len(dist_params) == 4 assert len(p_C) == 3 # Project x = np.array([p_C[0] / p_C[2], p_C[1] / p_C[2]]) # Distort x_dist = radtan4_distort(dist_params, x) # Scale and center to image plane fx, fy, cx, cy = proj_params z = np.array([fx * x_dist[0] + cx, fy * x_dist[1] + cy]) return z def pinhole_radtan4_backproject(proj_params, dist_params, z): """ Pinhole + Radial-Tangential back-project """ assert len(proj_params) == 4 assert len(dist_params) == 4 assert len(z) == 2 # Convert image pixel coordinates to normalized retinal coordintes fx, fy, cx, cy = proj_params x = np.array([(z[0] - cx) / fx, (z[1] - cy) / fy, 1.0]) # Undistort x = radtan4_undistort(dist_params, x) # 3D ray p = np.array([x[0], x[1], 1.0]) return p def pinhole_radtan4_undistort(proj_params, dist_params, z): """ Pinhole + Radial-Tangential undistort """ assert len(proj_params) == 4 assert len(dist_params) == 4 assert len(z) == 2 # Back project and undistort fx, fy, cx, cy = proj_params p = np.array([(z[0] - cx) / fx, (z[1] - cy) / fy]) p_undist = radtan4_undistort(dist_params, p) # Project undistorted point to image plane return np.array([p_undist[0] * fx + cx, p_undist[1] * fy + cy]) def pinhole_radtan4_project_jacobian(proj_params, dist_params, p_C): """ Pinhole + Radial-Tangential project jacobian """ assert len(proj_params) == 4 assert len(dist_params) == 4 assert len(p_C) == 3 # Project 3D point x = np.array([p_C[0] / p_C[2], p_C[1] / p_C[2]]) # Jacobian J_proj = zeros((2, 3)) J_proj[0, :] = [1 / p_C[2], 0, -p_C[0] / p_C[2]**2] J_proj[1, :] = [0, 1 / p_C[2], -p_C[1] / p_C[2]**2] J_dist_point = radtan4_point_jacobian(dist_params, x) J_proj_point = pinhole_point_jacobian(proj_params) return J_proj_point @ J_dist_point @ J_proj def pinhole_radtan4_params_jacobian(proj_params, dist_params, p_C): """ Pinhole + Radial-Tangential params jacobian """ assert len(proj_params) == 4 assert len(dist_params) == 4 assert len(p_C) == 3 x = np.array([p_C[0] / p_C[2], p_C[1] / p_C[2]]) # Project 3D point x_dist = radtan4_distort(dist_params, x) # Distort point J_proj_point = pinhole_point_jacobian(proj_params) J_dist_params = radtan4_params_jacobian(dist_params, x) J = zeros((2, 8)) J[0:2, 0:4] = pinhole_params_jacobian(x_dist) J[0:2, 4:8] = J_proj_point @ J_dist_params return J # PINHOLE EQUI4 ############################################################### def pinhole_equi4_project(proj_params, dist_params, p_C): """ Pinhole + Equi-distant project """ assert len(proj_params) == 4 assert len(dist_params) == 4 assert len(p_C) == 3 # Project x = np.array([p_C[0] / p_C[2], p_C[1] / p_C[2]]) # Distort x_dist = equi4_distort(dist_params, x) # Scale and center to image plane fx, fy, cx, cy = proj_params z = np.array([fx * x_dist[0] + cx, fy * x_dist[1] + cy]) return z def pinhole_equi4_backproject(proj_params, dist_params, z): """ Pinhole + Equi-distant back-project """ assert len(proj_params) == 4 assert len(dist_params) == 4 assert len(z) == 2 # Convert image pixel coordinates to normalized retinal coordintes fx, fy, cx, cy = proj_params x = np.array([(z[0] - cx) / fx, (z[1] - cy) / fy, 1.0]) # Undistort x = equi4_undistort(dist_params, x) # 3D ray p = np.array([x[0], x[1], 1.0]) return p def pinhole_equi4_undistort(proj_params, dist_params, z): """ Pinhole + Equi-distant undistort """ assert len(proj_params) == 4 assert len(dist_params) == 4 assert len(z) == 2 # Back project and undistort fx, fy, cx, cy = proj_params p = np.array([(z[0] - cx) / fx, (z[1] - cy) / fy]) p_undist = equi4_undistort(dist_params, p) # Project undistorted point to image plane return np.array([p_undist[0] * fx + cx, p_undist[1] * fy + cy]) def pinhole_equi4_project_jacobian(proj_params, dist_params, p_C): """ Pinhole + Equi-distant project jacobian """ assert len(proj_params) == 4 assert len(dist_params) == 4 assert len(p_C) == 3 # Project 3D point x = np.array([p_C[0] / p_C[2], p_C[1] / p_C[2]]) # Jacobian J_proj = zeros((2, 3)) J_proj[0, :] = [1 / p_C[2], 0, -p_C[0] / p_C[2]**2] J_proj[1, :] = [0, 1 / p_C[2], -p_C[1] / p_C[2]**2] J_dist_point = equi4_point_jacobian(dist_params, x) J_proj_point = pinhole_point_jacobian(proj_params) return J_proj_point @ J_dist_point @ J_proj def pinhole_equi4_params_jacobian(proj_params, dist_params, p_C): """ Pinhole + Equi-distant params jacobian """ assert len(proj_params) == 4 assert len(dist_params) == 4 assert len(p_C) == 3 x = np.array([p_C[0] / p_C[2], p_C[1] / p_C[2]]) # Project 3D point x_dist = equi4_distort(dist_params, x) # Distort point J_proj_point = pinhole_point_jacobian(proj_params) J_dist_params = equi4_params_jacobian(dist_params, x) J = zeros((2, 8)) J[0:2, 0:4] = pinhole_params_jacobian(x_dist) J[0:2, 4:8] = J_proj_point @ J_dist_params return J # CAMERA GEOMETRY ############################################################# @dataclass class CameraGeometry: """ Camera Geometry """ cam_idx: int resolution: tuple proj_model: str dist_model: str proj_params_size: int dist_params_size: int project_fn: FunctionType backproject_fn: FunctionType undistort_fn: FunctionType J_proj_fn: FunctionType J_params_fn: FunctionType def get_proj_params_size(self): """ Return projection parameter size """ return self.proj_params_size def get_dist_params_size(self): """ Return distortion parameter size """ return self.dist_params_size def get_params_size(self): """ Return parameter size """ return self.get_proj_params_size() + self.get_dist_params_size() def proj_params(self, params): """ Extract projection parameters """ return params[:self.proj_params_size] def dist_params(self, params): """ Extract distortion parameters """ return params[-self.dist_params_size:] def project(self, params, p_C): """ Project point `p_C` with camera parameters `params` """ # Project proj_params = params[:self.proj_params_size] dist_params = params[-self.dist_params_size:] z = self.project_fn(proj_params, dist_params, p_C) # Make sure point is infront of camera if p_C[2] < 0.0: return False, z # Make sure image point is within image bounds x_ok = z[0] >= 0.0 and z[0] <= self.resolution[0] y_ok = z[1] >= 0.0 and z[1] <= self.resolution[1] if x_ok and y_ok: return True, z return False, z def backproject(self, params, z): """ Back-project image point `z` with camera parameters `params` """ proj_params = params[:self.proj_params_size] dist_params = params[-self.dist_params_size:] return self.project_fn(proj_params, dist_params, z) def undistort(self, params, z): """ Undistort image point `z` with camera parameters `params` """ proj_params = params[:self.proj_params_size] dist_params = params[-self.dist_params_size:] return self.undistort_fn(proj_params, dist_params, z) def J_proj(self, params, p_C): """ Form Jacobian w.r.t. p_C """ proj_params = params[:self.proj_params_size] dist_params = params[-self.dist_params_size:] return self.J_proj_fn(proj_params, dist_params, p_C) def J_params(self, params, p_C): """ Form Jacobian w.r.t. camera parameters """ proj_params = params[:self.proj_params_size] dist_params = params[-self.dist_params_size:] return self.J_params_fn(proj_params, dist_params, p_C) def pinhole_radtan4_setup(cam_idx, cam_res): """ Setup Pinhole + Radtan4 camera geometry """ return CameraGeometry( cam_idx, cam_res, "pinhole", "radtan4", 4, 4, pinhole_radtan4_project, pinhole_radtan4_backproject, pinhole_radtan4_undistort, pinhole_radtan4_project_jacobian, pinhole_radtan4_params_jacobian) def pinhole_equi4_setup(cam_idx, cam_res): """ Setup Pinhole + Equi camera geometry """ return CameraGeometry(cam_idx, cam_res, "pinhole", "equi4", 4, 4, pinhole_equi4_project, pinhole_equi4_backproject, pinhole_equi4_undistort, pinhole_equi4_project_jacobian, pinhole_equi4_params_jacobian) def camera_geometry_setup(cam_idx, cam_res, proj_model, dist_model): """ Setup camera geometry """ if proj_model == "pinhole" and dist_model == "radtan4": return pinhole_radtan4_setup(cam_idx, cam_res) elif proj_model == "pinhole" and dist_model == "equi4": return pinhole_equi4_setup(cam_idx, cam_res) else: raise RuntimeError(f"Unrecognized [{proj_model}]-[{dist_model}] combo!") ################################################################################ # DATASET ################################################################################ # TIMELINE###################################################################### @dataclass class CameraEvent: """ Camera Event """ ts: int cam_idx: int image: np.array @dataclass class ImuEvent: """ IMU Event """ ts: int imu_idx: int acc: np.array gyr: np.array @dataclass class Timeline: """ Timeline """ def __init__(self): self.data = {} def num_timestamps(self): """ Return number of timestamps """ return len(self.data) def num_events(self): """ Return number of events """ nb_events = 0 for _, events in self.data: nb_events += len(events) return nb_events def get_timestamps(self): """ Get timestamps """ return sorted(list(self.data.keys())) def add_event(self, ts, event): """ Add event """ if ts not in self.data: self.data[ts] = [event] else: self.data[ts].append(event) def get_events(self, ts): """ Get events """ return self.data[ts] # EUROC ######################################################################## class EurocSensor: """ Euroc Sensor """ def __init__(self, yaml_path): # Load yaml file config = load_yaml(yaml_path) # General sensor definitions. self.sensor_type = config.sensor_type self.comment = config.comment # Sensor extrinsics wrt. the body-frame. self.T_BS = np.array(config.T_BS.data).reshape((4, 4)) # Camera specific definitions. if config.sensor_type == "camera": self.rate_hz = config.rate_hz self.resolution = config.resolution self.camera_model = config.camera_model self.intrinsics = config.intrinsics self.distortion_model = config.distortion_model self.distortion_coefficients = config.distortion_coefficients elif config.sensor_type == "imu": self.rate_hz = config.rate_hz self.gyro_noise_density = config.gyroscope_noise_density self.gyro_random_walk = config.gyroscope_random_walk self.accel_noise_density = config.accelerometer_noise_density self.accel_random_walk = config.accelerometer_random_walk class EurocImuData: """ Euroc Imu data """ def __init__(self, data_dir): self.imu_dir = Path(data_dir, 'mav0', 'imu0') self.config = EurocSensor(Path(self.imu_dir, 'sensor.yaml')) self.timestamps = [] self.acc = {} self.gyr = {} # Load data df = pandas.read_csv(Path(self.imu_dir, 'data.csv')) df = df.rename(columns=lambda x: x.strip()) # -- Timestamp timestamps = df['#timestamp [ns]'].to_numpy() # -- Accelerometer measurement acc_x = df['a_RS_S_x [m s^-2]'].to_numpy() acc_y = df['a_RS_S_y [m s^-2]'].to_numpy() acc_z = df['a_RS_S_z [m s^-2]'].to_numpy() # -- Gyroscope measurement gyr_x = df['w_RS_S_x [rad s^-1]'].to_numpy() gyr_y = df['w_RS_S_y [rad s^-1]'].to_numpy() gyr_z = df['w_RS_S_z [rad s^-1]'].to_numpy() # -- Load for i, ts in enumerate(timestamps): self.timestamps.append(ts) self.acc[ts] = np.array([acc_x[i], acc_y[i], acc_z[i]]) self.gyr[ts] = np.array([gyr_x[i], gyr_y[i], gyr_z[i]]) class EurocCameraData: """ Euroc Camera data """ def __init__(self, data_dir, cam_idx): self.cam_idx = cam_idx self.cam_dir = Path(data_dir, 'mav0', 'cam' + str(cam_idx)) self.config = EurocSensor(Path(self.cam_dir, 'sensor.yaml')) self.timestamps = [] self.image_paths = {} # Load image paths cam_data_dir = str(Path(self.cam_dir, 'data', '*.png')) for img_file in sorted(glob.glob(cam_data_dir)): ts_str, _ = os.path.basename(img_file).split('.') ts = int(ts_str) self.timestamps.append(ts) self.image_paths[ts] = img_file def get_image_path_list(self): """ Return list of image paths """ return [img_path for _, img_path in self.image_paths] class EurocGroundTruth: """ Euroc ground truth """ def __init__(self, data_dir): self.timestamps = [] self.T_WB = {} self.v_WB = {} self.w_WB = {} self.a_WB = {} # Load data dir_name = 'state_groundtruth_estimate0' data_csv = Path(data_dir, 'mav0', dir_name, 'data.csv') df = pandas.read_csv(data_csv) df = df.rename(columns=lambda x: x.strip()) # -- Timestamp timestamps = df['#timestamp'].to_numpy() # -- Body pose in world frame rx_list = df['p_RS_R_x [m]'].to_numpy() ry_list = df['p_RS_R_y [m]'].to_numpy() rz_list = df['p_RS_R_z [m]'].to_numpy() qw_list = df['q_RS_w []'].to_numpy() qx_list = df['q_RS_x []'].to_numpy() qy_list = df['q_RS_y []'].to_numpy() qz_list = df['q_RS_z []'].to_numpy() # -- Body velocity in world frame vx_list = df['v_RS_R_x [m s^-1]'].to_numpy() vy_list = df['v_RS_R_y [m s^-1]'].to_numpy() vz_list = df['v_RS_R_z [m s^-1]'].to_numpy() # -- Add to class for i, ts in enumerate(timestamps): r_WB = np.array([rx_list[i], ry_list[i], rz_list[i]]) q_WB = np.array([qw_list[i], qx_list[i], qy_list[i], qz_list[i]]) v_WB = np.array([vx_list[i], vy_list[i], vz_list[i]]) self.timestamps.append(ts) self.T_WB[ts] = tf(q_WB, r_WB) self.v_WB[ts] = v_WB class EurocDataset: """ Euroc Dataset """ def __init__(self, data_path): # Data path self.data_path = data_path if os.path.isdir(data_path) is False: raise RuntimeError(f"Path {data_path} does not exist!") # Data self.imu0_data = EurocImuData(self.data_path) self.cam0_data = EurocCameraData(self.data_path, 0) self.cam1_data = EurocCameraData(self.data_path, 1) self.ground_truth = EurocGroundTruth(self.data_path) self.timeline = self._form_timeline() def _form_timeline(self): timeline = Timeline() # Form timeline # -- Add imu0 events for ts in self.imu0_data.timestamps: acc = self.imu0_data.acc[ts] gyr = self.imu0_data.gyr[ts] timeline.add_event(ts, ImuEvent(ts, 0, acc, gyr)) # -- Add cam0 events for ts, img_path in self.cam0_data.image_paths.items(): timeline.add_event(ts, CameraEvent(ts, 0, img_path)) # -- Add cam1 events for ts, img_path in self.cam1_data.image_paths.items(): timeline.add_event(ts, CameraEvent(ts, 1, img_path)) return timeline def get_camera_image(self, cam_idx, ts): """ Get camera image """ img_path = None if cam_idx == 0: img_path = self.cam0_data.image_paths[ts] elif cam_idx == 1: img_path = self.cam1_data.image_paths[ts] else: raise RuntimeError("cam_idx has to be 0 or 1") return cv2.imread(img_path, cv2.IMREAD_GRAYSCALE) def get_ground_truth_pose(self, ts): """ Get ground truth pose T_WB at timestamp `ts` """ # Pre-check if ts <= self.ground_truth.timestamps[0]: return None elif ts >= self.ground_truth.timestamps[-1]: return None # Loop throught timestamps for k, ground_truth_ts in enumerate(self.ground_truth.timestamps): if ts == ground_truth_ts: return self.ground_truth.T_WB[ts] elif self.ground_truth.timestamps[k] > ts: ts_i = self.ground_truth.timestamps[k - 1] ts_j = self.ground_truth.timestamps[k] alpha = float(ts_j - ts) / float(ts_j - ts_i) pose_i = self.ground_truth.T_WB[ts_i] pose_j = self.ground_truth.T_WB[ts_j] return tf_lerp(pose_i, pose_j, alpha) return None # KITTI ####################################################################### class KittiCameraData: """ KittiCameraDataset """ def __init__(self, cam_idx, seq_path): self.cam_idx = cam_idx self.seq_path = seq_path self.cam_path = Path(self.seq_path, "image_" + str(self.cam_idx).zfill(2)) self.img_dir = Path(self.cam_path, "data") self.img_paths = sorted(glob.glob(str(Path(self.img_dir, "*.png")))) class KittiRawDataset: """ KittiRawDataset """ def __init__(self, data_dir, date, seq, is_sync): # Paths self.data_dir = data_dir self.date = date self.seq = seq.zfill(4) self.sync = "sync" if is_sync else "extract" self.seq_name = "_".join([self.date, "drive", self.seq, self.sync]) self.seq_path = Path(self.data_dir, self.date, self.seq_name) # Camera data self.cam0_data = KittiCameraData(0, self.seq_path) self.cam1_data = KittiCameraData(1, self.seq_path) self.cam2_data = KittiCameraData(2, self.seq_path) self.cam3_data = KittiCameraData(3, self.seq_path) # Calibration calib_cam_to_cam_filepath = Path(self.data_dir, "calib_cam_to_cam.txt") calib_imu_to_velo_filepath = Path(self.data_dir, "calib_imu_to_velo.txt") calib_velo_to_cam_filepath = Path(self.data_dir, "calib_velo_to_cam.txt") self.calib_cam_to_cam = self._read_calib_file(calib_cam_to_cam_filepath) self.calib_imu_to_velo = self._read_calib_file(calib_imu_to_velo_filepath) self.calib_velo_to_cam = self._read_calib_file(calib_velo_to_cam_filepath) @classmethod def _read_calib_file(cls, fp): data = {} with open(fp, 'r') as f: for line in f.readlines(): key, value = line.split(':', 1) # The only non-float values in these files are dates, which # we don't care about anyway try: data[key] = np.array([float(x) for x in value.split()]) except ValueError: pass return data def nb_camera_images(self, cam_idx=0): """ Return number of camera images """ assert cam_idx >= 0 and cam_idx <= 3 if cam_idx == 0: return len(self.cam0_data.img_paths) elif cam_idx == 1: return len(self.cam1_data.img_paths) elif cam_idx == 2: return len(self.cam2_data.img_paths) elif cam_idx == 3: return len(self.cam3_data.img_paths) return None def get_velodyne_extrinsics(self): """ Get velodyne extrinsics """ # Form imu-velo extrinsics T_BV C_VB = self.calib_imu_to_velo['R'].reshape((3, 3)) r_VB = self.calib_imu_to_velo['T'] T_VB = tf(C_VB, r_VB) T_BV = inv(T_VB) return T_BV def get_camera_extrinsics(self, cam_idx): """ Get camera extrinsics T_BCi """ # Form imu-velo extrinsics T_VB C_VB = self.calib_imu_to_velo['R'].reshape((3, 3)) r_VB = self.calib_imu_to_velo['T'] T_VB = tf(C_VB, r_VB) # Form velo-cam extrinsics T_C0V C_C0V = self.calib_velo_to_cam['R'].reshape((3, 3)) r_C0V = self.calib_velo_to_cam['T'] T_C0V = tf(C_C0V, r_C0V) # Form cam-cam extrinsics T_CiC0 cam_str = str(cam_idx) C_CiC0 = self.calib_cam_to_cam['R_' + cam_str.zfill(2)].reshape((3, 3)) r_CiC0 = self.calib_cam_to_cam['T_' + cam_str.zfill(2)] T_CiC0 = tf(C_CiC0, r_CiC0) # Form camera extrinsics T_BC0 T_CiB = T_CiC0 @ T_C0V @ T_VB T_BCi = inv(T_CiB) return T_BCi def get_camera_image(self, cam_idx, **kwargs): """ Get camera image """ assert cam_idx >= 0 and cam_idx <= 3 imread_flag = kwargs.get('imread_flag', cv2.IMREAD_GRAYSCALE) img_idx = kwargs['index'] if cam_idx == 0: return cv2.imread(self.cam0_data.img_paths[img_idx], imread_flag) elif cam_idx == 1: return cv2.imread(self.cam1_data.img_paths[img_idx], imread_flag) elif cam_idx == 2: return cv2.imread(self.cam2_data.img_paths[img_idx], imread_flag) elif cam_idx == 3: return cv2.imread(self.cam3_data.img_paths[img_idx], imread_flag) return None def plot_frames(self): """ Plot Frames """ T_BV = self.get_velodyne_extrinsics() T_BC0 = self.get_camera_extrinsics(0) T_BC1 = self.get_camera_extrinsics(1) T_BC2 = self.get_camera_extrinsics(2) T_BC3 = self.get_camera_extrinsics(3) plt.figure() ax = plt.axes(projection='3d') plot_tf(ax, eye(4), size=0.1, name="imu") plot_tf(ax, T_BV, size=0.1, name="velo") plot_tf(ax, T_BC0, size=0.1, name="cam0") plot_tf(ax, T_BC1, size=0.1, name="cam1") plot_tf(ax, T_BC2, size=0.1, name="cam2") plot_tf(ax, T_BC3, size=0.1, name="cam3") ax.set_xlabel("x [m]") ax.set_ylabel("y [m]") ax.set_zlabel("z [m]") plot_set_axes_equal(ax) plt.show() ############################################################################### # FILTER ############################################################################### def compl_filter(gyro, accel, dt, roll, pitch): """ A simple complementary filter that uses `gyro` and `accel` measurements to estimate the attitude in `roll` and `pitch`. Where `dt` is the update rate of the `gyro` measurements in seconds. """ # Calculate pitch and roll using gyroscope wx, wy, _ = gyro gyro_roll = (wx * dt) + roll gyro_pitch = (wy * dt) + pitch # Calculate pitch and roll using accelerometer ax, ay, az = accel accel_roll = (atan(ay / sqrt(ax * ay + az * az))) * 180.0 / pi accel_pitch = (atan(ax / sqrt(ay * ay + az * az))) * 180.0 / pi # Complimentary filter pitch = (0.98 * gyro_pitch) + (0.02 * accel_pitch) roll = (0.98 * gyro_roll) + (0.02 * accel_roll) return (roll, pitch) ############################################################################### # STATE ESTIMATION ############################################################################### # STATE VARIABLES ############################################################# @dataclass class StateVariable: """ State variable """ ts: int var_type: str param: np.array parameterization: str min_dims: int fix: bool data: Optional[dict] = None param_id: int = None def set_param_id(self, pid): """ Set parameter id """ self.param_id = pid class StateVariableType(Enum): """ State Variable Type """ POSE = 1 EXTRINSICS = 2 FEATURE = 3 CAMERA = 4 SPEED_AND_BIASES = 5 class FeatureMeasurements: """ Feature measurements """ def __init__(self): self._init = False self._data = {} def initialized(self): """ Check if feature is initialized """ return self._init def has_overlap(self, ts): """ Check if feature has overlap at timestamp `ts` """ return len(self._data[ts]) > 1 def set_initialized(self): """ Set feature as initialized """ self._init = True def update(self, ts, cam_idx, z): """ Add feature measurement """ assert len(z) == 2 if ts not in self._data: self._data[ts] = {} self._data[ts][cam_idx] = z def get(self, ts, cam_idx): """ Get feature measurement """ return self._data[ts][cam_idx] def get_overlaps(self, ts): """ Get feature overlaps """ overlaps = [] for cam_idx, z in self._data[ts].items(): overlaps.append((cam_idx, z)) return overlaps def tf2pose(T): """ Form pose vector """ rx, ry, rz = tf_trans(T) qw, qx, qy, qz = tf_quat(T) return np.array([rx, ry, rz, qx, qy, qz, qw]) def pose2tf(pose_vec): """ Convert pose vector to transformation matrix """ rx, ry, rz = pose_vec[0:3] qx, qy, qz, qw = pose_vec[3:7] return tf(np.array([qw, qx, qy, qz]), np.array([rx, ry, rz])) def pose_setup(ts, param, **kwargs): """ Form pose state-variable """ fix = kwargs.get('fix', False) param = tf2pose(param) if param.shape == (4, 4) else param return StateVariable(ts, "pose", param, None, 6, fix) def extrinsics_setup(param, **kwargs): """ Form extrinsics state-variable """ fix = kwargs.get('fix', False) param = tf2pose(param) if param.shape == (4, 4) else param return StateVariable(None, "extrinsics", param, None, 6, fix) def camera_params_setup(cam_idx, res, proj_model, dist_model, param, **kwargs): """ Form camera parameters state-variable """ fix = kwargs.get('fix', False) data = camera_geometry_setup(cam_idx, res, proj_model, dist_model) return StateVariable(None, "camera", param, None, len(param), fix, data) def feature_setup(param, **kwargs): """ Form feature state-variable """ fix = kwargs.get('fix', False) data = FeatureMeasurements() return StateVariable(None, "feature", param, None, len(param), fix, data) def speed_biases_setup(ts, vel, ba, bg, **kwargs): """ Form speed and biases state-variable """ fix = kwargs.get('fix', False) param = np.block([vel, ba, bg]) return StateVariable(ts, "speed_and_biases", param, None, len(param), fix) def perturb_state_variable(sv, i, step_size): """ Perturb state variable """ if sv.var_type == "pose" or sv.var_type == "extrinsics": T = pose2tf(sv.param) T_dash = tf_perturb(T, i, step_size) sv.param = tf2pose(T_dash) else: sv.param[i] += step_size return sv def update_state_variable(sv, dx): """ Update state variable """ if sv.var_type == "pose" or sv.var_type == "extrinsics": T = pose2tf(sv.param) T_prime = tf_update(T, dx) sv.param = tf2pose(T_prime) else: sv.param += dx # FACTORS ###################################################################### class Factor: """ Factor """ def __init__(self, ftype, pids, z, covar): self.factor_id = None self.factor_type = ftype self.param_ids = pids self.measurement = z self.covar = covar self.sqrt_info = chol(inv(self.covar)).T def set_factor_id(self, fid): """ Set factor id """ self.factor_id = fid class PoseFactor(Factor): """ Pose Factor """ def __init__(self, pids, z, covar): assert len(pids) == 1 assert z.shape == (4, 4) assert covar.shape == (6, 6) Factor.__init__(self, "PoseFactor", pids, z, covar) def eval(self, params, **kwargs): """ Evaluate """ assert len(params) == 1 assert len(params[0]) == 7 # Measured pose T_meas = self.measurement q_meas = tf_quat(T_meas) r_meas = tf_trans(T_meas) # Estimated pose T_est = pose2tf(params[0]) q_est = tf_quat(T_est) r_est = tf_trans(T_est) # Form residuals (pose - pose_est) dr = r_meas - r_est dq = quat_mul(quat_inv(q_meas), q_est) dtheta = 2 * dq[1:4] r = self.sqrt_info @ np.block([dr, dtheta]) if kwargs.get('only_residuals', False): return r # Form jacobians J = zeros((6, 6)) J[0:3, 0:3] = -eye(3) J[3:6, 3:6] = quat_left(dq)[1:4, 1:4] J = self.sqrt_info @ J return (r, [J]) class MultiCameraBuffer: """ Multi-camera buffer """ def __init__(self, nb_cams=0): self.nb_cams = nb_cams self._ts = [] self._data = {} def reset(self): """ Reset buffer """ self._ts = [] self._data = {} def add(self, ts, cam_idx, data): """ Add camera event """ if self.nb_cams == 0: raise RuntimeError("MulitCameraBuffer not initialized yet!") self._ts.append(ts) self._data[cam_idx] = data def ready(self): """ Check whether buffer has all the camera frames ready """ if self.nb_cams == 0: raise RuntimeError("MulitCameraBuffer not initialized yet!") check_ts_same = (len(set(self._ts)) == 1) check_ts_len = (len(self._ts) == self.nb_cams) check_data = (len(self._data) == self.nb_cams) check_cam_indices = (len(set(self._data.keys())) == self.nb_cams) return check_ts_same and check_ts_len and check_data and check_cam_indices def get_camera_indices(self): """ Get camera indices """ return self._data.keys() def get_data(self): """ Get camera data """ if self.nb_cams is None: raise RuntimeError("MulitCameraBuffer not initialized yet!") return self._data class BAFactor(Factor): """ BA Factor """ def __init__(self, cam_geom, pids, z, covar=eye(2)): assert len(pids) == 3 assert len(z) == 2 assert covar.shape == (2, 2) Factor.__init__(self, "BAFactor", pids, z, covar) self.cam_geom = cam_geom def get_reproj_error(self, cam_pose, feature, cam_params): """ Get reprojection error """ T_WC = pose2tf(cam_pose) p_W = feature p_C = tf_point(inv(T_WC), p_W) status, z_hat = self.cam_geom.project(cam_params, p_C) if status is False: return None z = self.measurement reproj_error = norm(z - z_hat) return reproj_error def eval(self, params, **kwargs): """ Evaluate """ assert len(params) == 3 assert len(params[0]) == 7 assert len(params[1]) == 3 assert len(params[2]) == self.cam_geom.get_params_size() # Setup r = np.array([0.0, 0.0]) J0 = zeros((2, 6)) J1 = zeros((2, 3)) J2 = zeros((2, self.cam_geom.get_params_size())) # Map params cam_pose, feature, cam_params = params # Project point in world frame to image plane T_WC = pose2tf(cam_pose) z_hat = zeros((2, 1)) p_W = zeros((3, 1)) p_W = feature p_C = tf_point(inv(T_WC), p_W) status, z_hat = self.cam_geom.project(cam_params, p_C) # Calculate residual sqrt_info = self.sqrt_info z = self.measurement r = sqrt_info @ (z - z_hat) if kwargs.get('only_residuals', False): return r # Calculate Jacobians if status is False: return (r, [J0, J1, J2]) # -- Measurement model jacobian neg_sqrt_info = -1.0 * sqrt_info Jh = self.cam_geom.J_proj(cam_params, p_C) Jh_weighted = neg_sqrt_info @ Jh # -- Jacobian w.r.t. camera pose T_WC C_WC = tf_rot(T_WC) C_CW = C_WC.T r_WC = tf_trans(T_WC) J0 = zeros((2, 6)) # w.r.t Camera pose T_WC J0[0:2, 0:3] = Jh_weighted @ -C_CW J0[0:2, 3:6] = Jh_weighted @ -C_CW @ skew(p_W - r_WC) @ -C_WC # -- Jacobian w.r.t. feature J1 = zeros((2, 3)) J1 = Jh_weighted @ C_CW # -- Jacobian w.r.t. camera parameters J_cam_params = self.cam_geom.J_params(cam_params, p_C) J2 = zeros((2, self.cam_geom.get_params_size())) J2 = neg_sqrt_info @ J_cam_params return (r, [J0, J1, J2]) class VisionFactor(Factor): """ Vision Factor """ def __init__(self, cam_geom, pids, z, covar=eye(2)): assert len(pids) == 4 assert len(z) == 2 assert covar.shape == (2, 2) Factor.__init__(self, "VisionFactor", pids, z, covar) self.cam_geom = cam_geom def get_reproj_error(self, pose, cam_exts, feature, cam_params): """ Get reprojection error """ T_WB = pose2tf(pose) T_BCi = pose2tf(cam_exts) p_W = feature p_C = tf_point(inv(T_WB @ T_BCi), p_W) status, z_hat = self.cam_geom.project(cam_params, p_C) if status is False: return None z = self.measurement reproj_error = norm(z - z_hat) return reproj_error def eval(self, params, **kwargs): """ Evaluate """ assert len(params) == 4 assert len(params[0]) == 7 assert len(params[1]) == 7 assert len(params[2]) == 3 assert len(params[3]) == self.cam_geom.get_params_size() # Setup r = np.array([0.0, 0.0]) J0 = zeros((2, 6)) J1 = zeros((2, 6)) J2 = zeros((2, 3)) J3 = zeros((2, self.cam_geom.get_params_size())) # Project point in world frame to image plane pose, cam_exts, feature, cam_params = params T_WB = pose2tf(pose) T_BCi = pose2tf(cam_exts) p_W = feature p_C = tf_point(inv(T_WB @ T_BCi), p_W) status, z_hat = self.cam_geom.project(cam_params, p_C) # Calculate residual sqrt_info = self.sqrt_info z = self.measurement r = sqrt_info @ (z - z_hat) if kwargs.get('only_residuals', False): return r # Calculate Jacobians if status is False: return (r, [J0, J1, J2, J3]) C_BCi = tf_rot(T_BCi) C_WB = tf_rot(T_WB) C_CB = C_BCi.T C_BW = C_WB.T C_CW = C_CB @ C_WB.T r_WB = tf_trans(T_WB) neg_sqrt_info = -1.0 * sqrt_info # -- Measurement model jacobian Jh = self.cam_geom.J_proj(cam_params, p_C) Jh_weighted = neg_sqrt_info @ Jh # -- Jacobian w.r.t. pose T_WB J0 = zeros((2, 6)) J0[0:2, 0:3] = Jh_weighted @ C_CB @ -C_BW J0[0:2, 3:6] = Jh_weighted @ C_CB @ -C_BW @ skew(p_W - r_WB) @ -C_WB # -- Jacobian w.r.t. camera extrinsics T_BCi J1 = zeros((2, 6)) J1[0:2, 0:3] = Jh_weighted @ -C_CB J1[0:2, 3:6] = Jh_weighted @ -C_CB @ skew(C_BCi @ p_C) @ -C_BCi # -- Jacobian w.r.t. feature J2 = zeros((2, 3)) J2 = Jh_weighted @ C_CW # -- Jacobian w.r.t. camera parameters J_cam_params = self.cam_geom.J_params(cam_params, p_C) J3 = zeros((2, 8)) J3 = neg_sqrt_info @ J_cam_params return (r, [J0, J1, J2, J3]) class CalibVisionFactor(Factor): """ Calibration Vision Factor """ def __init__(self, cam_geom, pids, grid_data, covar=eye(2)): assert len(pids) == 3 assert len(grid_data) == 4 assert covar.shape == (2, 2) tag_id, corner_idx, r_FFi, z = grid_data Factor.__init__(self, "CalibVisionFactor", pids, z, covar) self.cam_geom = cam_geom self.tag_id = tag_id self.corner_idx = corner_idx self.r_FFi = r_FFi def get_residual(self, pose, cam_exts, cam_params): """ Get residual """ T_BF = pose2tf(pose) T_BCi = pose2tf(cam_exts) T_CiB = inv(T_BCi) r_CiFi = tf_point(T_CiB @ T_BF, self.r_FFi) status, z_hat = self.cam_geom.project(cam_params, r_CiFi) if status is False: return None r = self.measurement - z_hat return r def get_reproj_error(self, pose, cam_exts, cam_params): """ Get reprojection error """ r = self.get_residual(pose, cam_exts, cam_params) if r is None: return None return norm(r) def eval(self, params, **kwargs): """ Evaluate """ assert len(params) == 3 assert len(params[0]) == 7 assert len(params[1]) == 7 assert len(params[2]) == self.cam_geom.get_params_size() # Setup r = np.array([0.0, 0.0]) J0 = zeros((2, 6)) J1 = zeros((2, 6)) J2 = zeros((2, self.cam_geom.get_params_size())) # Map parameters out pose, cam_exts, cam_params = params T_BF = pose2tf(pose) T_BCi = pose2tf(cam_exts) # Transform and project point to image plane T_CiB = inv(T_BCi) r_CiFi = tf_point(T_CiB @ T_BF, self.r_FFi) status, z_hat = self.cam_geom.project(cam_params, r_CiFi) # Calculate residual sqrt_info = self.sqrt_info z = self.measurement r = sqrt_info @ (z - z_hat) if kwargs.get('only_residuals', False): return r # Calculate Jacobians if status is False: return (r, [J0, J1, J2]) neg_sqrt_info = -1.0 * sqrt_info Jh = self.cam_geom.J_proj(cam_params, r_CiFi) Jh_weighted = neg_sqrt_info @ Jh # -- Jacobians w.r.t relative camera pose T_BF C_CiB = tf_rot(T_CiB) C_BF = tf_rot(T_BF) J0 = zeros((2, 6)) J0[0:2, 0:3] = Jh_weighted @ C_CiB J0[0:2, 3:6] = Jh_weighted @ C_CiB @ -C_BF @ skew(self.r_FFi) # -- Jacobians w.r.t T_BCi r_BFi = tf_point(T_BF, self.r_FFi) r_BCi = tf_trans(T_BCi) C_BCi = tf_rot(T_BCi) J1 = zeros((2, 6)) J1[0:2, 0:3] = Jh_weighted @ -C_CiB J1[0:2, 3:6] = Jh_weighted @ -C_CiB @ skew(r_BFi - r_BCi) @ -C_BCi # -- Jacobians w.r.t cam params J_cam_params = self.cam_geom.J_params(cam_params, r_CiFi) J2 = neg_sqrt_info @ J_cam_params return (r, [J0, J1, J2]) class ImuBuffer: """ IMU buffer """ def __init__(self, ts=None, acc=None, gyr=None): self.ts = ts if ts is not None else [] self.acc = acc if acc is not None else [] self.gyr = gyr if gyr is not None else [] def add(self, ts, acc, gyr): """ Add imu measurement """ self.ts.append(ts) self.acc.append(acc) self.gyr.append(gyr) def add_event(self, imu_event): """ Add imu event """ self.ts.append(imu_event.ts) self.acc.append(imu_event.acc) self.gyr.append(imu_event.gyr) def length(self): """ Return length of imu buffer """ return len(self.ts) @dataclass class ImuParams: """ IMU parameters """ noise_acc: np.array noise_gyr: np.array noise_ba: np.array noise_bg: np.array g: np.array = np.array([0.0, 0.0, 9.81]) @dataclass class ImuFactorData: """ IMU Factor data """ state_F: np.array state_P: np.array dr: np.array dv: np.array dC: np.array ba: np.array bg: np.array g: np.array Dt: float class ImuFactor(Factor): """ Imu Factor """ def __init__(self, pids, imu_params, imu_buf, sb_i): assert len(pids) == 4 self.imu_params = imu_params self.imu_buf = imu_buf data = self.propagate(imu_buf, imu_params, sb_i) Factor.__init__(self, "ImuFactor", pids, None, data.state_P) self.state_F = data.state_F self.state_P = data.state_P self.dr = data.dr self.dv = data.dv self.dC = data.dC self.ba = data.ba self.bg = data.bg self.g = data.g self.Dt = data.Dt @staticmethod def propagate(imu_buf, imu_params, sb_i): """ Propagate imu measurements """ # Setup Dt = 0.0 g = imu_params.g state_F = eye(15) # State jacobian state_P = zeros((15, 15)) # State covariance # Noise matrix Q Q = zeros((12, 12)) Q[0:3, 0:3] = imu_params.noise_acc**2 * eye(3) Q[3:6, 3:6] = imu_params.noise_gyr**2 * eye(3) Q[6:9, 6:9] = imu_params.noise_ba**2 * eye(3) Q[9:12, 9:12] = imu_params.noise_bg**2 * eye(3) # Pre-integrate relative position, velocity, rotation and biases dr = np.array([0.0, 0.0, 0.0]) # Relative position dv = np.array([0.0, 0.0, 0.0]) # Relative velocity dC = eye(3) # Relative rotation ba_i = sb_i.param[3:6] # Accel biase at i bg_i = sb_i.param[6:9] # Gyro biase at i # Pre-integrate imu measuremenets for k in range(len(imu_buf.ts) - 1): # Timestep ts_i = imu_buf.ts[k] ts_j = imu_buf.ts[k + 1] dt = ts2sec(ts_j - ts_i) dt_sq = dt * dt # Accelerometer and gyroscope measurements acc_i = imu_buf.acc[k] gyr_i = imu_buf.gyr[k] # Propagate IMU state using Euler method dr = dr + (dv * dt) + (0.5 * dC @ (acc_i - ba_i) * dt_sq) dv = dv + dC @ (acc_i - ba_i) * dt dC = dC @ Exp((gyr_i - bg_i) * dt) ba = ba_i bg = bg_i # Make sure determinant of rotation is 1 by normalizing the quaternion dq = quat_normalize(rot2quat(dC)) dC = quat2rot(dq) # Continuous time transition matrix F F = zeros((15, 15)) F[0:3, 3:6] = eye(3) F[3:6, 6:9] = -1.0 * dC @ skew(acc_i - ba_i) F[3:6, 9:12] = -1.0 * dC F[6:9, 6:9] = -1.0 * skew(gyr_i - bg_i) F[6:9, 12:15] = -eye(3) # Continuous time input jacobian G G = zeros((15, 12)) G[3:6, 0:3] = -1.0 * dC G[6:9, 3:6] = -eye(3) G[9:12, 6:9] = eye(3) G[12:15, 9:12] = eye(3) # Update G_dt = G * dt I_F_dt = eye(15) + F * dt state_F = I_F_dt @ state_F state_P = I_F_dt @ state_P @ I_F_dt.T + G_dt @ Q @ G_dt.T Dt += dt state_P = (state_P + state_P.T) / 2.0 return ImuFactorData(state_F, state_P, dr, dv, dC, ba, bg, g, Dt) def eval(self, params, **kwargs): """ Evaluate IMU factor """ assert len(params) == 4 assert len(params[0]) == 7 assert len(params[1]) == 9 assert len(params[2]) == 7 assert len(params[3]) == 9 # Map params pose_i, sb_i, pose_j, sb_j = params # Timestep i T_i = pose2tf(pose_i) r_i = tf_trans(T_i) C_i = tf_rot(T_i) q_i = tf_quat(T_i) v_i = sb_i[0:3] ba_i = sb_i[3:6] bg_i = sb_i[6:9] # Timestep j T_j = pose2tf(pose_j) r_j = tf_trans(T_j) C_j = tf_rot(T_j) q_j = tf_quat(T_j) v_j = sb_j[0:3] # Correct the relative position, velocity and orientation # -- Extract jacobians from error-state jacobian dr_dba = self.state_F[0:3, 9:12] dr_dbg = self.state_F[0:3, 12:15] dv_dba = self.state_F[3:6, 9:12] dv_dbg = self.state_F[3:6, 12:15] dq_dbg = self.state_F[6:9, 12:15] dba = ba_i - self.ba dbg = bg_i - self.bg # -- Correct the relative position, velocity and rotation dr = self.dr + dr_dba @ dba + dr_dbg @ dbg dv = self.dv + dv_dba @ dba + dv_dbg @ dbg dC = self.dC @ Exp(dq_dbg @ dbg) dq = quat_normalize(rot2quat(dC)) # Form residuals sqrt_info = self.sqrt_info g = self.g Dt = self.Dt Dt_sq = Dt * Dt dr_meas = (C_i.T @ ((r_j - r_i) - (v_i * Dt) + (0.5 * g * Dt_sq))) dv_meas = (C_i.T @ ((v_j - v_i) + (g * Dt))) err_pos = dr_meas - dr err_vel = dv_meas - dv err_rot = (2.0 * quat_mul(quat_inv(dq), quat_mul(quat_inv(q_i), q_j)))[1:4] err_ba = np.array([0.0, 0.0, 0.0]) err_bg = np.array([0.0, 0.0, 0.0]) r = sqrt_info @ np.block([err_pos, err_vel, err_rot, err_ba, err_bg]) if kwargs.get('only_residuals', False): return r # Form jacobians J0 = zeros((15, 6)) # residuals w.r.t pose i J1 = zeros((15, 9)) # residuals w.r.t speed and biase i J2 = zeros((15, 6)) # residuals w.r.t pose j J3 = zeros((15, 9)) # residuals w.r.t speed and biase j # -- Jacobian w.r.t. pose i # yapf: disable J0[0:3, 0:3] = -C_i.T # dr w.r.t r_i J0[0:3, 3:6] = skew(dr_meas) # dr w.r.t C_i J0[3:6, 3:6] = skew(dv_meas) # dv w.r.t C_i J0[6:9, 3:6] = -(quat_left(rot2quat(C_j.T @ C_i)) @ quat_right(dq))[1:4, 1:4] # dtheta w.r.t C_i J0 = sqrt_info @ J0 # yapf: enable # -- Jacobian w.r.t. speed and biases i # yapf: disable J1[0:3, 0:3] = -C_i.T * Dt # dr w.r.t v_i J1[0:3, 3:6] = -dr_dba # dr w.r.t ba J1[0:3, 6:9] = -dr_dbg # dr w.r.t bg J1[3:6, 0:3] = -C_i.T # dv w.r.t v_i J1[3:6, 3:6] = -dv_dba # dv w.r.t ba J1[3:6, 6:9] = -dv_dbg # dv w.r.t bg J1[6:9, 6:9] = -quat_left(rot2quat(C_j.T @ C_i @ self.dC))[1:4, 1:4] @ dq_dbg # dtheta w.r.t C_i J1 = sqrt_info @ J1 # yapf: enable # -- Jacobian w.r.t. pose j # yapf: disable J2[0:3, 0:3] = C_i.T # dr w.r.t r_j J2[6:9, 3:6] = quat_left(rot2quat(dC.T @ C_i.T @ C_j))[1:4, 1:4] # dtheta w.r.t C_j J2 = sqrt_info @ J2 # yapf: enable # -- Jacobian w.r.t. sb j J3[3:6, 0:3] = C_i.T # dv w.r.t v_j J3 = sqrt_info @ J3 return (r, [J0, J1, J2, J3]) def check_factor_jacobian(factor, fvars, var_idx, jac_name, **kwargs): """ Check factor jacobian """ # Step size and threshold h = kwargs.get('step_size', 1e-8) threshold = kwargs.get('threshold', 1e-4) verbose = kwargs.get('verbose', False) # Calculate baseline params = [sv.param for sv in fvars] r, jacs = factor.eval(params) # Numerical diff J_fdiff = zeros((len(r), fvars[var_idx].min_dims)) for i in range(fvars[var_idx].min_dims): # Forward difference and evaluate vars_fwd = copy.deepcopy(fvars) vars_fwd[var_idx] = perturb_state_variable(vars_fwd[var_idx], i, 0.5 * h) r_fwd, _ = factor.eval([sv.param for sv in vars_fwd]) # Backward difference and evaluate vars_bwd = copy.deepcopy(fvars) vars_bwd[var_idx] = perturb_state_variable(vars_bwd[var_idx], i, -0.5 * h) r_bwd, _ = factor.eval([sv.param for sv in vars_bwd]) # Central finite difference J_fdiff[:, i] = (r_fwd - r_bwd) / h J = jacs[var_idx] return check_jacobian(jac_name, J_fdiff, J, threshold, verbose) # FACTOR GRAPH ################################################################ class FactorGraph: """ Factor Graph """ def __init__(self): # Parameters and factors self._next_param_id = 0 self._next_factor_id = 0 self.params = {} self.factors = {} # Solver self.solver_max_iter = 5 self.solver_lambda = 1e-4 def add_param(self, param): """ Add param """ param_id = self._next_param_id self.params[param_id] = param self.params[param_id].set_param_id(param_id) self._next_param_id += 1 return param_id def add_factor(self, factor): """ Add factor """ # Double check if params exists for param_id in factor.param_ids: if param_id not in self.params: raise RuntimeError(f"Parameter [{param_id}] does not exist!") # Add factor factor_id = self._next_factor_id self.factors[factor_id] = factor self.factors[factor_id].set_factor_id(factor_id) self._next_factor_id += 1 return factor_id def remove_param(self, param): """ Remove param """ assert param.param_id in self.params del self.params[param.param_id] def remove_factor(self, factor): """ Remove factor """ assert factor.factor_id in self.factors del self.factors[factor.factor_id] def get_reproj_errors(self): """ Get reprojection errors """ target_factors = ["BAFactor", "VisionFactor", "CalibVisionFactor"] reproj_errors = [] for _, factor in self.factors.items(): if factor.factor_type in target_factors: factor_params = [self.params[pid].param for pid in factor.param_ids] retval = factor.get_reproj_error(*factor_params) if retval is not None: reproj_errors.append(retval) return np.array(reproj_errors).flatten() @staticmethod def _print_to_console(iter_k, lambda_k, cost_kp1, cost_k): """ Print to console """ print(f"iter[{iter_k}]:", end=" ") print(f"lambda: {lambda_k:.2e}", end=", ") print(f"cost: {cost_kp1:.2e}", end=", ") print(f"dcost: {cost_kp1 - cost_k:.2e}", end=" ") print() # rmse_vision = rmse(self._get_reproj_errors()) # print(f"rms_reproj_error: {rmse_vision:.2f} px") sys.stdout.flush() def _form_param_indices(self): """ Form parameter indices """ # Parameter ids pose_param_ids = set() sb_param_ids = set() camera_param_ids = set() exts_param_ids = set() feature_param_ids = set() # Track parameters nb_params = 0 for _, factor in self.factors.items(): for _, param_id in enumerate(factor.param_ids): param = self.params[param_id] if param.fix: continue elif param.var_type == "pose": pose_param_ids.add(param_id) elif param.var_type == "speed_and_biases": sb_param_ids.add(param_id) elif param.var_type == "extrinsics": exts_param_ids.add(param_id) elif param.var_type == "feature": feature_param_ids.add(param_id) elif param.var_type == "camera": camera_param_ids.add(param_id) nb_params += 1 # Assign global parameter order param_ids_list = [] param_ids_list.append(pose_param_ids) param_ids_list.append(sb_param_ids) param_ids_list.append(exts_param_ids) param_ids_list.append(feature_param_ids) param_ids_list.append(camera_param_ids) param_idxs = {} param_size = 0 for param_ids in param_ids_list: for param_id in param_ids: param_idxs[param_id] = param_size param_size += self.params[param_id].min_dims return (param_idxs, param_size) def _linearize(self, params, param_idxs, param_size): """ Linearize non-linear problem """ H = zeros((param_size, param_size)) g = zeros(param_size) # Form Hessian and R.H.S of Gauss newton for _, factor in self.factors.items(): factor_params = [params[pid].param for pid in factor.param_ids] r, jacobians = factor.eval(factor_params) # Form Hessian nb_params = len(factor_params) for i in range(nb_params): param_i = params[factor.param_ids[i]] if param_i.fix: continue idx_i = param_idxs[factor.param_ids[i]] size_i = param_i.min_dims J_i = jacobians[i] for j in range(i, nb_params): param_j = params[factor.param_ids[j]] if param_j.fix: continue idx_j = param_idxs[factor.param_ids[j]] size_j = param_j.min_dims J_j = jacobians[j] rs = idx_i re = idx_i + size_i cs = idx_j ce = idx_j + size_j if i == j: # Diagonal H[rs:re, cs:ce] += J_i.T @ J_j else: # Off-Diagonal H[rs:re, cs:ce] += J_i.T @ J_j H[cs:ce, rs:re] += H[rs:re, cs:ce].T # Form R.H.S. Gauss Newton g rs = idx_i re = idx_i + size_i g[rs:re] += (-J_i.T @ r) return (H, g) def _evaluate(self, params): """ Evaluate """ (param_idxs, param_size) = self._form_param_indices() (H, g) = self._linearize(params, param_idxs, param_size) return ((H, g), param_idxs) def _calculate_residuals(self, params): """ Calculate Residuals """ residuals = [] for _, factor in self.factors.items(): factor_params = [params[pid].param for pid in factor.param_ids] r = factor.eval(factor_params, only_residuals=True) residuals.append(r) return np.array(residuals).flatten() def _calculate_cost(self, params): """ Calculate Cost """ r = self._calculate_residuals(params) return 0.5 * (r.T @ r) @staticmethod def _update(params_k, param_idxs, dx): """ Update """ params_kp1 = copy.deepcopy(params_k) for param_id, param in params_kp1.items(): # Check if param even exists if param_id not in param_idxs: continue # Update parameter start = param_idxs[param_id] end = start + param.min_dims param_dx = dx[start:end] update_state_variable(param, param_dx) return params_kp1 @staticmethod def _solve_for_dx(lambda_k, H, g): """ Solve for dx """ # Damp Hessian H = H + lambda_k * eye(H.shape[0]) # H = H + lambda_k * np.diag(H.diagonal()) # # Pseudo inverse # dx = pinv(H) @ g # # Linear solver # dx = np.linalg.solve(H, g) # # Cholesky decomposition c, low = scipy.linalg.cho_factor(H) dx = scipy.linalg.cho_solve((c, low), g) # SVD # dx = solve_svd(H, g) # # Sparse cholesky decomposition # sH = scipy.sparse.csc_matrix(H) # dx = scipy.sparse.linalg.spsolve(sH, g) return dx def solve(self, verbose=False): """ Solve """ lambda_k = self.solver_lambda params_k = copy.deepcopy(self.params) cost_k = self._calculate_cost(params_k) # First evaluation if verbose: print(f"nb_factors: {len(self.factors)}") print(f"nb_params: {len(self.params)}") self._print_to_console(0, lambda_k, cost_k, cost_k) # Iterate for i in range(1, self.solver_max_iter): # Update and calculate cost ((H, g), param_idxs) = self._evaluate(params_k) dx = self._solve_for_dx(lambda_k, H, g) params_kp1 = self._update(params_k, param_idxs, dx) cost_kp1 = self._calculate_cost(params_kp1) # Verbose if verbose: self._print_to_console(i, lambda_k, cost_kp1, cost_k) # Accept or reject update if cost_kp1 < cost_k: # Accept update cost_k = cost_kp1 params_k = params_kp1 lambda_k /= 10.0 else: # Reject update params_k = params_k lambda_k *= 10.0 # Finish - set the original params the optimized values # Note: The reason we don't just do `self.params = params_k` is because # that would destroy the references to outside `FactorGraph()`. for param_id, param in params_k.items(): self.params[param_id].param = param.param # FEATURE TRACKING ############################################################# def draw_matches(img_i, img_j, kps_i, kps_j, **kwargs): """ Draw keypoint matches between images `img_i` and `img_j` with keypoints `kps_i` and `kps_j` """ assert len(kps_i) == len(kps_j) nb_kps = len(kps_i) viz = cv2.hconcat([img_i, img_j]) viz = cv2.cvtColor(viz, cv2.COLOR_GRAY2RG) color = (0, 255, 0) radius = 3 thickness = kwargs.get('thickness', cv2.FILLED) linetype = kwargs.get('linetype', cv2.LINE_AA) for n in range(nb_kps): pt_i = None pt_j = None if hasattr(kps_i[n], 'pt'): pt_i = (int(kps_i[n].pt[0]), int(kps_i[n].pt[1])) pt_j = (int(kps_j[n].pt[0] + img_i.shape[1]), int(kps_j[n].pt[1])) else: pt_i = (int(kps_i[n][0]), int(kps_i[n][1])) pt_j = (int(kps_j[n][0] + img_i.shape[1]), int(kps_j[n][1])) cv2.circle(viz, pt_i, radius, color, thickness, lineType=linetype) cv2.circle(viz, pt_j, radius, color, thickness, lineType=linetype) cv2.line(viz, pt_i, pt_j, color, 1, linetype) return viz def draw_keypoints(img, kps, inliers=None, **kwargs): """ Draw points `kps` on image `img`. The `inliers` boolean list is optional and is expected to be the same size as `kps` denoting whether the point should be drawn or not. """ inliers = [1 for i in range(len(kps))] if inliers is None else inliers radius = kwargs.get('radius', 2) color = kwargs.get('color', (0, 255, 0)) thickness = kwargs.get('thickness', cv2.FILLED) linetype = kwargs.get('linetype', cv2.LINE_AA) viz = img if len(img.shape) == 2: viz = cv2.cvtColor(img, cv2.COLOR_GRAY2RGB) for n, kp in enumerate(kps): if inliers[n]: p = None if hasattr(kp, 'pt'): p = (int(kp.pt[0]), int(kp.pt[1])) else: p = (int(kp[0]), int(kp[1])) cv2.circle(viz, p, radius, color, thickness, lineType=linetype) return viz def sort_keypoints(kps): """ Sort a list of cv2.KeyPoint based on their response """ responses = [kp.response for kp in kps] indices = range(len(responses)) indices = sorted(indices, key=lambda i: responses[i], reverse=True) return [kps[i] for i in indices] def spread_keypoints(img, kps, min_dist, **kwargs): """ Given a set of keypoints `kps` make sure they are atleast `min_dist` pixels away from each other, if they are not remove them. """ # Pre-check if not kps: return kps # Setup debug = kwargs.get('debug', False) prev_kps = kwargs.get('prev_kps', []) min_dist = int(min_dist) img_h, img_w = img.shape A = np.zeros(img.shape) # Allowable areas are marked 0 else not allowed # Loop through previous keypoints for kp in prev_kps: # Convert from keypoint to tuple p = (int(kp.pt[0]), int(kp.pt[1])) # Fill the area of the matrix where the next keypoint cannot be around rs = int(max(p[1] - min_dist, 0.0)) re = int(min(p[1] + min_dist + 1, img_h)) cs = int(max(p[0] - min_dist, 0.0)) ce = int(min(p[0] + min_dist + 1, img_w)) A[rs:re, cs:ce] = np.ones((re - rs, ce - cs)) # Loop through keypoints kps_results = [] for kp in sort_keypoints(kps): # Convert from keypoint to tuple p = (int(kp.pt[0]), int(kp.pt[1])) # Check if point is ok to be added to results if A[p[1], p[0]] > 0.0: continue # Fill the area of the matrix where the next keypoint cannot be around rs = int(max(p[1] - min_dist, 0.0)) re = int(min(p[1] + min_dist + 1, img_h)) cs = int(max(p[0] - min_dist, 0.0)) ce = int(min(p[0] + min_dist + 1, img_w)) A[rs:re, cs:ce] = np.ones((re - rs, ce - cs)) A[p[1], p[0]] = 2 # Add to results kps_results.append(kp) # Debug if debug: img = draw_keypoints(img, kps_results, radius=3) plt.figure() ax = plt.subplot(121) ax.imshow(A) ax.set_xlabel('pixel') ax.set_ylabel('pixel') ax.xaxis.tick_top() ax.xaxis.set_label_position('top') ax = plt.subplot(122) ax.imshow(img) ax.set_xlabel('pixel') ax.set_ylabel('pixel') ax.xaxis.tick_top() ax.xaxis.set_label_position('top') plt.show() return kps_results class FeatureGrid: """ FeatureGrid The idea is to take all the feature positions and put them into grid cells across the full image space. This is so that one could keep track of how many feautures are being tracked in each individual grid cell and act accordingly. o-----> x | --------------------- | | 0 | 1 | 2 | 3 | V --------------------- y | 4 | 5 | 6 | 7 | --------------------- | 8 | 9 | 10 | 11 | --------------------- | 12 | 13 | 14 | 15 | --------------------- grid_x = ceil((max(1, pixel_x) / img_w) * grid_cols) - 1.0 grid_y = ceil((max(1, pixel_y) / img_h) * grid_rows) - 1.0 cell_id = int(grid_x + (grid_y * grid_cols)) """ def __init__(self, grid_rows, grid_cols, image_shape, keypoints): assert len(image_shape) == 2 self.grid_rows = grid_rows self.grid_cols = grid_cols self.image_shape = image_shape self.keypoints = keypoints self.cell = [0 for i in range(self.grid_rows * self.grid_cols)] for kp in keypoints: if hasattr(kp, 'pt'): # cv2.KeyPoint assert (kp.pt[0] >= 0 and kp.pt[0] <= image_shape[1]) assert (kp.pt[1] >= 0 and kp.pt[1] <= image_shape[0]) self.cell[self.cell_index(kp.pt)] += 1 else: # Tuple assert (kp[0] >= 0 and kp[0] <= image_shape[1]) assert (kp[1] >= 0 and kp[1] <= image_shape[0]) self.cell[self.cell_index(kp)] += 1 def cell_index(self, pt): """ Return cell index based on point `pt` """ pixel_x, pixel_y = pt img_h, img_w = self.image_shape grid_x = math.ceil((max(1, pixel_x) / img_w) * self.grid_cols) - 1.0 grid_y = math.ceil((max(1, pixel_y) / img_h) * self.grid_rows) - 1.0 cell_id = int(grid_x + (grid_y * self.grid_cols)) return cell_id def count(self, cell_idx): """ Return cell count """ return self.cell[cell_idx] def grid_detect(detector, image, **kwargs): """ Detect features uniformly using a grid system. """ optflow_mode = kwargs.get('optflow_mode', False) max_keypoints = kwargs.get('max_keypoints', 240) grid_rows = kwargs.get('grid_rows', 3) grid_cols = kwargs.get('grid_cols', 4) prev_kps = kwargs.get('prev_kps', []) if prev_kps is None: prev_kps = [] # Calculate number of grid cells and max corners per cell image_height, image_width = image.shape dx = int(math.ceil(float(image_width) / float(grid_cols))) dy = int(math.ceil(float(image_height) / float(grid_rows))) nb_cells = grid_rows * grid_cols max_per_cell = math.floor(max_keypoints / nb_cells) # Detect corners in each grid cell feature_grid = FeatureGrid(grid_rows, grid_cols, image.shape, prev_kps) des_all = [] kps_all = [] cell_idx = 0 for y in range(0, image_height, dy): for x in range(0, image_width, dx): # Make sure roi width and height are not out of bounds w = image_width - x if (x + dx > image_width) else dx h = image_height - y if (y + dy > image_height) else dy # Detect corners in grid cell cs, ce, rs, re = (x, x + w, y, y + h) roi_image = image[rs:re, cs:ce] kps = None des = None if optflow_mode: detector.setNonmaxSuppression(1) kps = detector.detect(roi_image) kps = sort_keypoints(kps) else: kps = detector.detect(roi_image, None) kps, des = detector.compute(roi_image, kps) # Offset keypoints cell_vacancy = max_per_cell - feature_grid.count(cell_idx) if cell_vacancy <= 0: continue limit = min(len(kps), cell_vacancy) for i in range(limit): kp = kps[i] kp.pt = (kp.pt[0] + x, kp.pt[1] + y) kps_all.append(kp) des_all.append(des[i, :] if optflow_mode is False else None) # Update cell_idx cell_idx += 1 # Space out the keypoints if optflow_mode: kps_all = spread_keypoints(image, kps_all, 20, prev_kps=prev_kps) # Debug if kwargs.get('debug', False): # Setup viz = cv2.cvtColor(image, cv2.COLOR_GRAY2RGB) kps_grid = FeatureGrid(grid_rows, grid_cols, image.shape, kps_all) # Visualization properties red = (0, 0, 255) yellow = (0, 255, 255) linetype = cv2.LINE_AA font = cv2.FONT_HERSHEY_SIMPLEX # -- Draw horizontal lines for x in range(0, image_width, dx): cv2.line(viz, (x, 0), (x, image_height), red, 1, linetype) # -- Draw vertical lines for y in range(0, image_height, dy): cv2.line(viz, (0, y), (image_width, y), red, 1, linetype) # -- Draw bin numbers cell_idx = 0 for y in range(0, image_height, dy): for x in range(0, image_width, dx): text = str(kps_grid.count(cell_idx)) origin = (x + 10, y + 20) viz = cv2.putText(viz, text, origin, font, 0.5, red, 1, linetype) # text = str(feature_grid.count(cell_idx)) # origin = (x + 10, y + 20) # viz = cv2.putText(viz, text, origin, font, 0.5, yellow, 1, linetype) cell_idx += 1 # -- Draw keypoints viz = draw_keypoints(viz, kps_all, color=red) viz = draw_keypoints(viz, prev_kps, color=yellow) cv2.imshow("viz", viz) cv2.waitKey(0) # Return if optflow_mode: return kps_all return kps_all, np.array(des_all) def optflow_track(img_i, img_j, pts_i, **kwargs): """ Track keypoints `pts_i` from image `img_i` to image `img_j` using optical flow. Returns a tuple of `(pts_i, pts_j, inliers)` points in image i, j and a vector of inliers. """ # Setup patch_size = kwargs.get('patch_size', 50) max_iter = kwargs.get('max_iter', 100) epsilon = kwargs.get('epsilon', 0.001) crit = (cv2.TermCriteria_COUNT | cv2.TermCriteria_EPS, max_iter, epsilon) # Optical flow settings config = {} config['winSize'] = (patch_size, patch_size) config['maxLevel'] = 3 config['criteria'] = crit config['flags'] = cv2.OPTFLOW_USE_INITIAL_FLOW # Track using optical flow pts_j = np.array(pts_i) track_results = cv2.calcOpticalFlowPyrLK(img_i, img_j, pts_i, pts_j, **config) (pts_j, optflow_inliers, _) = track_results # Make sure keypoints are within image dimensions bound_inliers = [] img_h, img_w = img_j.shape for p in pts_j: x_ok = p[0] >= 0 and p[0] <= img_w y_ok = p[1] >= 0 and p[1] <= img_h if x_ok and y_ok: bound_inliers.append(True) else: bound_inliers.append(False) # Update or mark feature as lost assert len(pts_i) == optflow_inliers.shape[0] assert len(pts_i) == len(bound_inliers) inliers = [] for i in range(len(pts_i)): if optflow_inliers[i, 0] and bound_inliers[i]: inliers.append(True) else: inliers.append(False) if kwargs.get('debug', False): viz_i = draw_keypoints(img_i, pts_i, inliers) viz_j = draw_keypoints(img_j, pts_j, inliers) viz = cv2.hconcat([viz_i, viz_j]) cv2.imshow('viz', viz) cv2.waitKey(0) return (pts_i, pts_j, inliers) def filter_outliers(pts_i, pts_j, inliers): """ Filter outliers """ pts_out_i = [] pts_out_j = [] for n, status in enumerate(inliers): if status: pts_out_i.append(pts_i[n]) pts_out_j.append(pts_j[n]) return (pts_out_i, pts_out_j) def ransac(pts_i, pts_j, cam_i, cam_j): """ RANSAC """ # Setup cam_geom_i = cam_i.data cam_geom_j = cam_j.data # Undistort points pts_i_ud = np.array([cam_geom_i.undistort(cam_i.param, p) for p in pts_i]) pts_j_ud = np.array([cam_geom_j.undistort(cam_j.param, p) for p in pts_j]) # Ransac via OpenCV's find fundamental matrix method = cv2.FM_RANSAC reproj_thresh = 0.75 confidence = 0.99 args = [pts_i_ud, pts_j_ud, method, reproj_thresh, confidence] _, inliers = cv2.findFundamentalMat(*args) return inliers.flatten() class FeatureTrackerData: """ Feature tracking data *per camera* This data structure keeps track of: - Image - Keypoints - Descriptors - Feature ids (optional) """ def __init__(self, cam_idx, image, keypoints, feature_ids=None): self.cam_idx = cam_idx self.image = image self.keypoints = list(keypoints) self.feature_ids = list(feature_ids) def add(self, fid, kp): """ Add measurement """ assert isinstance(fid, int) assert hasattr(kp, 'pt') self.keypoints.append(kp) self.feature_ids.append(fid) assert len(self.keypoints) == len(self.feature_ids) def update(self, image, fids, kps): """ Extend measurements """ assert len(kps) == len(fids) self.image = np.array(image) if kps: assert hasattr(kps[0], 'pt') self.feature_ids.extend(fids) self.keypoints.extend(kps) assert len(self.keypoints) == len(self.feature_ids) class FeatureTracker: """ Feature tracker """ def __init__(self): # Settings self.mode = "TRACK_DEFAULT" # self.mode = "TRACK_OVERLAPS" # self.mode = "TRACK_INDEPENDENT" # Settings self.reproj_threshold = 5.0 # Data self.prev_ts = None self.frame_idx = 0 self.detector = cv2.FastFeatureDetector_create(threshold=50) self.features_detected = 0 self.features_tracking = 0 self.feature_overlaps = {} self.prev_mcam_imgs = None self.kp_size = 0 self.cam_idxs = [] self.cam_params = {} self.cam_exts = {} self.cam_overlaps = {} self.cam_data = {} def add_camera(self, cam_idx, cam_params, cam_exts): """ Add camera """ self.cam_idxs.append(cam_idx) self.cam_data[cam_idx] = None self.cam_params[cam_idx] = cam_params self.cam_exts[cam_idx] = cam_exts def add_overlap(self, cam_i_idx, cam_j_idx): """ Add overlap """ if cam_i_idx not in self.cam_overlaps: self.cam_overlaps[cam_i_idx] = [] self.cam_overlaps[cam_i_idx].append(cam_j_idx) def num_tracking(self): """ Return number of features tracking """ feature_ids = [] for _, cam_data in self.cam_data.items(): if cam_data is not None: feature_ids.extend(cam_data.feature_ids) return len(set(feature_ids)) def _get_camera_indices(self): """ Get camera indices """ return [cam_idx for cam_idx, _ in self.cam_params] def _get_keypoints(self, cam_idx): """ Get keypoints observed by camera `cam_idx` """ keypoints = None if self.cam_data[cam_idx] is not None: keypoints = self.cam_data[cam_idx].keypoints return keypoints def _get_feature_ids(self, cam_idx): """ Get feature ids observed by camera `cam_idx` """ feature_ids = None if self.cam_data[cam_idx] is not None: feature_ids = self.cam_data[cam_idx].feature_ids return feature_ids def _form_feature_ids(self, nb_kps): """ Form list of feature ids for new features to be added """ self.features_detected += nb_kps start_idx = self.features_detected - nb_kps end_idx = start_idx + nb_kps return list(range(start_idx, end_idx)) def _triangulate(self, idx_i, idx_j, z_i, z_j): """ Triangulate feature """ # Setup cam_i = self.cam_params[idx_i] cam_j = self.cam_params[idx_j] cam_geom_i = cam_i.data cam_geom_j = cam_j.data cam_exts_i = self.cam_exts[idx_i] cam_exts_j = self.cam_exts[idx_j] # Form projection matrices P_i and P_j T_BCi = pose2tf(cam_exts_i.param) T_BCj = pose2tf(cam_exts_j.param) T_CiCj = inv(T_BCi) @ T_BCj P_i = pinhole_P(cam_geom_i.proj_params(cam_i.param),
eye(4)
numpy.eye
# -*- coding: utf-8 -*- """ Created on Fri Nov 30 16:41:27 2018 @author: eee """ from os.path import dirname, join import matplotlib.pyplot as plt import numpy as np import pickle from pyomo.environ import * from matplotlib import gridspec plt.rc('text', usetex=True) plt.rc('font', size=8,family='Times New Roman') plt.rcParams.update({'figure.max_open_warning': 0}) plt.rcParams['xtick.direction'] = 'out' plt.rcParams['ytick.direction'] = 'out' plt.rcParams['ytick.major.width'] = 0.4 plt.rcParams['xtick.major.width'] = 0.4 def get_result(casename, mode, opt_num): ''' load the opt results from saved file ''' path_result = join(dirname(__file__), 'result//' + 'result-' + casename + '-mode_' + str(mode) + '-opt_num_' + str(opt_num) + '.p') with open(path_result, 'rb') as fp: opt_result = pickle.load(fp) return opt_result def get_curve_data(opt_result): x_w = dict() x_theta = dict() x_w.update(opt_result.opt_result.w.get_values()) x_theta.update(opt_result.opt_result.theta.get_values()) n_sample = 50 # number of samples in each time element d_type = opt_result.casedata['disturbance'].keys() - [9] curve_w = dict() for i_bus in opt_result.i_gen: for k0 in d_type: for k1 in opt_result.casedata['disturbance'][k0][:,0]: k = (k0, k1) t_i = np.array([]) x_w_t = np.array([]) for i in range(1, len(opt_result.param_dd[k[0]].keys()) + 1): t_i = np.r_[t_i, np.linspace(opt_result.param_dd[k[0]][i][0], opt_result.param_dd[k[0]][i][1], n_sample)] tau_i = (np.linspace(opt_result.param_dd[k[0]][i][0], opt_result.param_dd[k[0]][i][1], n_sample) - opt_result.param_dd[k[0]][i][0])/(opt_result.param_dd[k[0]][i][1] - opt_result.param_dd[k[0]][i][0]) nc_l = opt_result.param_dd[k[0]][i][2] s_tau_l = opt_result.param_dd[k[0]][i][3] NC = list(range(0, nc_l + 1)) x_w_t = np.r_[x_w_t, sum( x_w[k[0], k[1], i, j, i_bus] * prod( (tau_i - s_tau_l[r])/(s_tau_l[j] - s_tau_l[r]) for r in set(NC)-set([j]) ) for j in NC)] curve_w[k, i_bus] = [t_i, x_w_t] curve_theta = dict() for i_bus in opt_result.i_gen + opt_result.i_load + opt_result.i_non: for k0 in d_type: for k1 in opt_result.casedata['disturbance'][k0][:,0]: k = (k0, k1) t_i = np.array([]) x_theta_t = np.array([]) for i in range(1, len(opt_result.param_dd[k[0]].keys()) + 1): t_i = np.r_[t_i,
np.linspace(opt_result.param_dd[k[0]][i][0], opt_result.param_dd[k[0]][i][1], n_sample)
numpy.linspace
""" making grid for plot depending on manifold and latent distribution """ import numpy as np from scipy.special import i0 def true_density(data, manifold, latent_distribution): if manifold == 'sphere': theta = data[1] phi = data[0] if latent_distribution == 'mixture': kappa = 6.0 mu11 = 1*np.pi/4 mu12 = np.pi/2 #northpole mu21 = 3*np.pi/4 mu22 = 4*np.pi/3 #southpole mu31 = 3*np.pi/4 mu32 = np.pi/2 mu41 = np.pi/4 mu42 = 4*np.pi/3 probs = 1/4* (2*np.exp(kappa*np.cos(2* (theta-mu31))) * np.exp(kappa*np.cos(phi-mu32)) *(1/(2*np.pi*i0(kappa))**2) +2*np.exp(kappa*np.cos(2* (theta-mu11))) * np.exp(kappa*np.cos(phi-mu12)) *(1/(2*np.pi*i0(kappa))**2) +2*np.exp(kappa*np.cos(2* (theta-mu21))) * np.exp(kappa*np.cos(phi-mu22)) *(1/(2*np.pi*i0(kappa))**2) +2*np.exp(kappa*np.cos(2* (theta-mu41))) * np.exp(kappa*np.cos(phi-mu42)) *(1/(2*np.pi*i0(kappa))**2) ) return probs elif latent_distribution == 'correlated': kappa = 6.0 mu11, kappa11 = 0, kappa mu12, kappa12 = np.pi/2 , kappa #northpole mu21, kappa21 = np.pi , kappa mu22, kappa22 = 3*np.pi/2 , kappa #southpole mu31, kappa31 = np.pi/2 , kappa mu32, kappa32 = 0 , kappa mu41, kappa41 = np.pi/2, 50 mu42, kappa42 = np.pi , kappa prob = (1/(2*np.pi*i0(kappa41))) * 2*np.exp(kappa41*np.cos(2*(theta-mu41))) / (2*np.pi) probs = 1/3 * (prob +2*np.exp(kappa11*np.cos(2* (theta-mu11))) * np.exp(kappa12*np.cos(phi-mu12)) *(1/(2*np.pi*i0(kappa))**2) +2*np.exp(kappa21*np.cos(2* (theta-mu21))) * np.exp(kappa22*np.cos(phi-mu22)) *(1/(2*np.pi*i0(kappa))**2) ) return probs elif latent_distribution == 'unimodal': kappa, mu1, mu2 = 6, np.pi/2, np.pi probs = 2*np.exp(kappa*np.cos(2* (theta-mu1))) * np.exp(kappa*np.cos(phi-mu2)) *(1/(2*np.pi*i0(kappa))**2) return probs elif manifold =='torus': theta = data[1] phi = data[0] if latent_distribution == 'mixture': kappa, mu11, mu12, mu21, mu22, mu31, mu32 = 2, 0.21, 2.85, 1.89, 6.18, 3.77, 1.56 probs = 0.3* ( np.exp(kappa*np.cos(theta-mu11)+kappa*np.cos(phi-mu12)) + np.exp(kappa*np.cos(theta-mu21)+kappa*np.cos(phi-mu22)) + np.exp(kappa*np.cos(theta-mu31)+kappa*np.cos(phi-mu32)) )*(1/(2*np.pi*i0(kappa))**2) return probs elif latent_distribution == 'correlated': kappa, mu = 2, 1.94 probs = 1/(2*np.pi) * np.exp(kappa*np.cos(phi+theta-mu)) *(1/(2*np.pi*i0(kappa))**1) return probs elif latent_distribution == 'unimodal': mu1, mu2, kappa = 4.18-np.pi, 5.96-np.pi, 2 probs = np.exp(kappa*np.cos(phi-mu1) + kappa*np.cos(theta-mu2))*(1/(2*np.pi*i0(kappa))**2) return probs elif manifold =='swiss_roll': u_ = data[0] v_ = data[1] if latent_distribution == 'mixture': kappa, mu11, mu12, mu21, mu22, mu31, mu32 = 2, 0.1, 0.1, 0.5, 0.8, 0.8, 0.8 probs = 0.3*(np.exp(kappa*np.cos((u_-mu11)*(2*np.pi))+kappa*np.cos((v_-mu12)*(2*np.pi))) + np.exp(kappa*np.cos((u_-mu21)*(2*np.pi))+kappa*np.cos((v_-mu22)*(2*np.pi))) + np.exp(kappa*np.cos((u_-mu31)*(2*np.pi))+kappa*np.cos((v_-mu32)*(2*np.pi))))*(1/(2*np.pi*i0(kappa))**2) elif latent_distribution == 'correlated': def _translate(x): #from [0,1] to [-pi,pi] return 2*np.pi*x-np.pi def _translate_inverse(u): #from [-pi,pi] to [0,1] return (u+np.pi)/(2*np.pi) kappa, mu = 5, 0.0 probs = 2*np.pi * np.exp((kappa)*np.cos(_translate(v_) - _translate(u_))) *(1/(2*np.pi*i0(kappa))**1) elif latent_distribution == 'unimodal': mu1, mu2, kappa = 0.5, 0.9, 2 probs = np.exp(kappa*np.cos((u_-mu1)*(2*np.pi)) + kappa*np.cos((v_-mu2)*(2*np.pi))) return probs elif manifold == 'hyperboloid': v = data[0] theta = data[1] if latent_distribution == 'mixture': kappa, mu11, mu12 = 6, np.pi/2, 3*np.pi/2 probs_theta = 0.5*( np.exp(kappa*np.cos((theta-mu11))) + np.exp(kappa*np.cos(theta-mu12)) ) *(1/(2*np.pi*i0(kappa))) probs_v = np.zeros(*v.shape) for i in range(len(v)): if 1.0<=v[i] <= 1.5: probs_v[i]=1/2 probs = probs_v * probs_theta elif latent_distribution == 'correlated': kappa = 6 probs_theta = 1*( np.exp(kappa*np.cos((theta-v+np.pi)))) *(1/(2*np.pi*i0(kappa))) #1*( np.exp(kappa*np.cos((theta-v+np.pi)))) *(1/(2*np.pi*i0(kappa))) probs_v = np.ones(*v.shape)/2 probs = probs_v * probs_theta elif latent_distribution == 'unimodal': sig2 = 1 probs_theta = 1/(2*np.pi) probs_v = 2*np.exp(-v/0.5) #np.sqrt(2*np.pi*sig2)*np.exp(-v**2/(2*sig2)) probs = probs_v * probs_theta return probs elif manifold =='thin_spiral': z = data[0] from scipy.stats import expon probs= expon.pdf(z,scale=0.3) return probs elif manifold == 'spheroid': z1, z2 = data[0], data[1] idx_hyp = np.where(z1 < 0 )[0] idx_sph = np.where(z1 >=0 )[0] probs = np.zeros(len(z1)) if latent_distribution == 'mixture': kappa, mu11, mu12, mu21, mu22, mu31, mu32 =6.0, 0, np.pi, -0.5, np.pi/2, 0.5, np.pi/2 probs = 1/3* (np.exp(kappa*np.cos((z1-mu11))) * np.exp(kappa*np.cos(z2-mu12)) *(1/(2*np.pi*i0(kappa))**2) +np.exp(kappa*np.cos((z1-mu21))) * np.exp(kappa*np.cos(z2-mu22)) *(1/(2*np.pi*i0(kappa))**2) +np.exp(kappa*np.cos((z1-mu31))) * np.exp(kappa*np.cos(z2-mu32)) *(1/(2*np.pi*i0(kappa))**2) ) elif latent_distribution == 'correlated': from scipy.stats import expon _scale, _kappa = 0.4, 10 theta, phi = z1[idx_sph], z2[idx_sph] probs_phi = np.exp((_kappa)*np.cos(phi- np.pi + theta)) *(1/(2*np.pi*i0(_kappa))) #10/(2*np.pi)#n probs_theta = expon.pdf(theta,scale=_scale) probs[idx_sph] = 0.5 * probs_theta * probs_phi v, psi = z1[idx_hyp], z2[idx_hyp] probs_theta = 1*( np.exp(_kappa*np.cos(psi-np.pi-np.abs(v)))) *(1/(2*np.pi*i0(_kappa))) probs_v = expon.pdf(np.abs(v),scale=_scale) probs[idx_hyp] = 0.5 * probs_v * probs_theta return probs elif manifold == 'stiefel': theta_ = data[0] kappa, mu1, mu2, mu3, mu4 = 6.0, 0, -np.pi/2, np.pi/2, np.pi probs = 1/4* (np.exp(kappa*np.cos(theta_-mu1)) + np.exp(kappa*np.cos(theta_-mu2)) +np.exp(kappa*np.cos(theta_-mu3)) + np.exp(kappa*np.cos(theta_-mu4)) ) * (1/(2*np.pi*i0(kappa))) return probs def _transform_u_to_hyperboloid(v,psi,sign=1): a,b,c = 1, 1, 1 x = -a*(np.cosh(np.abs(v)))*np.cos(psi) y = -b*(np.cosh(np.abs(v)))*np.sin(psi) z = sign * c*np.sinh(np.abs(v)) samples = np.stack([x,y,z], axis=1) return samples def _transform_u_to_sphere(theta,phi): c, a = 0, 1 x = (c + a*np.cos(theta+np.pi)) * np.cos(phi) y = (c + a*np.cos(theta+np.pi)) * np.sin(phi) z = a * np.sin(theta+np.pi) x = np.stack([x,y,z], axis=1) return x def make_grid(n_pts,manifold='sphere',latent_distr = 'mixture'): # print('manifold ',manifold == 'torus') if manifold == 'sphere': theta = np.linspace(0, np.pi, n_pts+1)[1:] dx = theta[1]-theta[0] phi =
np.linspace(0, 2*np.pi, n_pts)
numpy.linspace
import numpy as np from .Gaussianformula.baseFunc import * from .Gaussianformula.ordering import * import matplotlib.pyplot as plt class Gaussian(): def __init__(self, N): self.N = N self.V = (np.eye(2 * N)) * 0.5 self.mu = np.zeros(2 * N) def mean(self, idx): res = np.copy(self.mu[2 * idx:2 * idx + 2]) return res def cov(self, idx): res = np.copy(self.V[(2 * idx):(2 * idx + 2), (2 * idx):(2 * idx + 2)]) return res def S(self, idx, r): self.Xsqueeze(idx, r) def Xsqueeze(self, idx, r): idx = 2 * idx S = np.eye(2 * self.N) S[idx:idx+2, idx:idx+2] = np.array([[np.exp(-r), 0], [0, np.exp(r)]]) self.V = np.dot(S, np.dot(self.V, S.T)) self.mu =
np.dot(S, self.mu)
numpy.dot
# --- # jupyter: # jupytext: # text_representation: # extension: .py # format_name: light # format_version: '1.4' # jupytext_version: 1.1.5 # kernelspec: # display_name: Python 3 # language: python # name: python3 # --- # # s_checklist_scenariobased_step01 [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=s_checklist_scenariobased_step01&codeLang=Python) # For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=ex-vue-1). # + import numpy as np import pandas as pd from scipy import interpolate import matplotlib.pyplot as plt from pandas.plotting import register_matplotlib_converters register_matplotlib_converters() from arpym.pricing import bsm_function, bootstrap_nelson_siegel, \ implvol_delta2m_moneyness from arpym.tools import aggregate_rating_migrations, add_logo # - # ## [Input parameters](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step01-parameters) # + # set current time t_now t_now = np.datetime64('2012-08-31') # set start date for data selection t_first = np.datetime64('2009-11-02') # set initial portfolio construction date t_init t_init = np.datetime64('2012-08-30') # stocks - must include GE and JPM stock_names = ['GE', 'JPM', 'A', 'AA', 'AAPL'] # stocks considered # make sure stock names includes GE and JPM stock_names = ['GE', 'JPM'] + [stock for stock in stock_names if stock not in ['GE', 'JPM']] print('Stocks considered:', stock_names) # options on S&P 500 k_strk = 1407 # strike value of options on S&P 500 (US dollars) tend_option = np.datetime64('2013-08-26') # options expiry date y = 0.01 # level for yield curve (assumed flat and constant) l_ = 9 # number of points on the m-moneyness grid # corporate bonds # expiry date of the GE coupon bond to extract tend_ge = np.datetime64('2013-09-16') # expiry date of the JPM coupon bond to extract tend_jpm = np.datetime64('2014-01-15') # starting ratings following the table: # "AAA" (0), "AA" (1), "A" (2), "BBB" (3), "BB" (4), "B" (5), # "CCC" (6), "D" (7) ratings_tnow = np.array([5, # initial credit rating for GE (corresponding to B) 3]) # initial credit rating for JPM (corresponding to BBB) # start of period for aggregate credit risk drivers tfirst_credit = np.datetime64('1995-01-01') # end of period for aggregate credit risk drivers tlast_credit = np.datetime64('2004-12-31') # index of risk driver to plot d_plot = 1 # - # ## [Step 0](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step01-implementation-step00): Import data # + # upload data # stocks stocks_path = '../../../databases/global-databases/equities/db_stocks_SP500/' db_stocks = pd.read_csv(stocks_path + 'db_stocks_sp.csv', skiprows=[0], index_col=0) db_stocks.index = pd.to_datetime(db_stocks.index) # implied volatility of option on S&P 500 index path = '../../../databases/global-databases/derivatives/db_implvol_optionSPX/' db_impliedvol = pd.read_csv(path + 'data.csv', index_col=['date'], parse_dates=['date']) implvol_param = pd.read_csv(path + 'params.csv', index_col=False) # corporate bonds: GE and JPM jpm_path = \ '../../../databases/global-databases/fixed-income/db_corporatebonds/JPM/' db_jpm = pd.read_csv(jpm_path + 'data.csv', index_col=['date'], parse_dates=['date']) jpm_param = pd.read_csv(jpm_path + 'params.csv', index_col=['expiry_date'], parse_dates=['expiry_date']) jpm_param['link'] = ['dprice_'+str(i) for i in range(1, jpm_param.shape[0]+1)] ge_path = '../../../databases/global-databases/fixed-income/db_corporatebonds/GE/' db_ge = pd.read_csv(ge_path + 'data.csv', index_col=['date'], parse_dates=['date']) ge_param = pd.read_csv(ge_path + 'params.csv', index_col=['expiry_date'], parse_dates=['expiry_date']) ge_param['link'] = ['dprice_'+str(i) for i in range(1, ge_param.shape[0]+1)] # ratings rating_path = '../../../databases/global-databases/credit/db_ratings/' db_ratings = pd.read_csv(rating_path+'data.csv', parse_dates=['date']) # ratings_param represents all possible ratings i.e. AAA, AA, etc. ratings_param = pd.read_csv(rating_path+'params.csv', index_col=0) ratings_param = np.array(ratings_param.index) c_ = len(ratings_param)-1 # define the date range of interest dates = db_stocks.index[(db_stocks.index >= t_first) & (db_stocks.index <= t_now)] dates = np.intersect1d(dates, db_impliedvol.index) dates = dates.astype('datetime64[D]') # the corporate bonds time series is shorter; select the bonds dates ind_dates_bonds = np.where((db_ge.index >= dates[0]) & (db_ge.index <= t_now)) dates_bonds = np.intersect1d(db_ge.index[ind_dates_bonds], db_jpm.index) dates_bonds = dates_bonds.astype('datetime64[D]') # length of the time series t_ = len(dates) t_bonds = len(dates_bonds) # initialize temporary databases db_risk_drivers = {} v_tnow = {} v_tinit = {} risk_drivers_names = {} v_tnow_names = {} # implied volatility parametrized by time to expiry and delta-moneyness tau_implvol = np.array(implvol_param.time2expiry) tau_implvol = tau_implvol[~np.isnan(tau_implvol)] delta_moneyness = np.array(implvol_param.delta) implvol_delta_moneyness_2d = \ db_impliedvol.loc[(db_impliedvol.index.isin(dates)), (db_impliedvol.columns != 'underlying')] k_ = len(tau_implvol) # unpack flattened database (from 2d to 3d) implvol_delta_moneyness_3d = np.zeros((t_, k_, len(delta_moneyness))) for k in range(k_): implvol_delta_moneyness_3d[:, k, :] = \ np.r_[np.array(implvol_delta_moneyness_2d.iloc[:, k::k_])] # - # ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step01-implementation-step01): Stocks # + n_stocks = len(stock_names) # number of stocks d_stocks = n_stocks # one risk driver for each stock for d in range(d_stocks): # calculate time series of stock risk drivers db_risk_drivers[d] = np.log(np.array(db_stocks.loc[dates, stock_names[d]])) risk_drivers_names[d] = 'stock '+stock_names[d]+'_log_value' # stock value v_tnow[d] = db_stocks.loc[t_now, stock_names[d]] v_tinit[d] = db_stocks.loc[t_init, stock_names[d]] v_tnow_names[d] = 'stock '+stock_names[d] # number of risk drivers, to be updated at every insertion d_ = d_stocks # - # ## [Step 2](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step01-implementation-step02): S&P 500 Index # + # calculate risk driver of the S&P 500 index db_risk_drivers[d_] = \ np.log(np.array(db_impliedvol.loc[(db_impliedvol.index.isin(dates)), 'underlying'])) risk_drivers_names[d_] = 'sp_index_log_value' # value of the S&P 500 index v_tnow[d_] = db_impliedvol.loc[t_now, 'underlying'] v_tinit[d_] = db_impliedvol.loc[t_init, 'underlying'] v_tnow_names[d_] = 'sp_index' # update counter d_ = d_+1 # - # ## [Step 3](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step01-implementation-step03): Call and put options on the S&P 500 Index # + # from delta-moneyness to m-moneyness parametrization implvol_m_moneyness_3d, m_moneyness = \ implvol_delta2m_moneyness(implvol_delta_moneyness_3d, tau_implvol, delta_moneyness, y*np.ones((t_, k_)), tau_implvol, l_) # calculate log implied volatility log_implvol_m_moneyness_2d = \ np.log(np.reshape(implvol_m_moneyness_3d, (t_, k_*(l_)), 'F')) # value of the underlying s_tnow = v_tnow[d_stocks] s_tinit = v_tinit[d_stocks] # time to expiry (in years) tau_option_tnow = np.busday_count(t_now, tend_option)/252 tau_option_tinit = np.busday_count(t_init, tend_option)/252 # moneyness moneyness_tnow = np.log(s_tnow/k_strk)/np.sqrt(tau_option_tnow) moneyness_tinit = np.log(s_tnow/k_strk)/np.sqrt(tau_option_tnow) # grid points points = list(zip(*[grid.flatten() for grid in np.meshgrid(*[tau_implvol, m_moneyness])])) # known values values = implvol_m_moneyness_3d[-1, :, :].flatten('F') # implied volatility (interpolated) impl_vol_tnow = \ interpolate.LinearNDInterpolator(points, values)(*np.r_[tau_option_tnow, moneyness_tnow]) impl_vol_tinit = \ interpolate.LinearNDInterpolator(points, values)(*np.r_[tau_option_tinit, moneyness_tinit]) # compute call option value by means of Black-Scholes-Merton formula v_call_tnow = bsm_function(s_tnow, y, impl_vol_tnow, moneyness_tnow, tau_option_tnow) v_call_tinit = bsm_function(s_tinit, y, impl_vol_tinit, moneyness_tinit, tau_option_tinit) # compute put option value by means of the put-call parity v_zcb_tnow = np.exp(-y*tau_option_tnow) v_put_tnow = v_call_tnow - s_tnow + k_strk*v_zcb_tnow v_zcb_tinit = np.exp(-y*tau_option_tinit) v_put_tinit = v_call_tinit - s_tinit + k_strk*v_zcb_tinit # store data d_implvol = log_implvol_m_moneyness_2d.shape[1] for d in np.arange(d_implvol): db_risk_drivers[d_+d] = log_implvol_m_moneyness_2d[:, d] risk_drivers_names[d_+d] = 'option_spx_logimplvol_mtau_' + str(d+1) v_tnow[d_] = v_call_tnow v_tinit[d_] = v_call_tinit v_tnow_names[d_] = 'option_spx_call' v_tnow[d_+1] = v_put_tnow v_tinit[d_+1] = v_put_tinit v_tnow_names[d_+1] = 'option_spx_put' # update counter d_ = len(db_risk_drivers) n_ = len(v_tnow) # - # ## [Step 4](https://www.arpm.co/lab/redirect.php?permalink=s_checklist_scenariobased_step01-implementation-step04): Corporate bonds # + n_bonds = 2 # GE bond # extract coupon coupon_ge = ge_param.loc[tend_ge, 'coupons']/100 # rescaled dirty prices of GE bond v_bond_ge = db_ge.loc[db_ge.index.isin(dates_bonds)]/100 # computation of Nelson-Siegel parameters for GE bond theta_ge = np.zeros((t_bonds, 4)) theta_ge = bootstrap_nelson_siegel(v_bond_ge.values, dates_bonds, np.array(ge_param.coupons/100), ge_param.index.values.astype('datetime64[D]')) # risk drivers for bonds are Nelson-Siegel parameters for d in np.arange(4): if d == 3: db_risk_drivers[d_+d] = np.sqrt(theta_ge[:, d]) else: db_risk_drivers[d_+d] = theta_ge[:, d] risk_drivers_names[d_+d] = 'ge_bond_nel_sieg_theta_' + str(d+1) # store dirty price of GE bond # get column variable name in v_bond_ge that selects bond with correct expiry ge_link = ge_param.loc[tend_ge, 'link'] v_tnow[n_] = v_bond_ge.loc[t_now, ge_link] v_tinit[n_] = v_bond_ge.loc[t_init, ge_link] v_tnow_names[n_] = 'ge_bond' # update counter d_ = len(db_risk_drivers) n_ = len(v_tnow_names) # JPM bond # extract coupon coupon_jpm = jpm_param.loc[tend_jpm, 'coupons']/100 # rescaled dirty prices of JPM bond v_bond_jpm = db_jpm.loc[db_ge.index.isin(dates_bonds)]/100 # computation of Nelson-Siegel parameters for JPM bond theta_jpm = np.zeros((t_bonds, 4)) theta_jpm = bootstrap_nelson_siegel(v_bond_jpm.values, dates_bonds, np.array(jpm_param.coupons/100), jpm_param.index.values.astype('datetime64[D]')) # risk drivers for bonds are Nelson-Siegel parameters for d in np.arange(4): if d == 3: db_risk_drivers[d_+d] =
np.sqrt(theta_jpm[:, d])
numpy.sqrt
#! /usr/bin/python3 from .Camoco import Camoco from .RefGen import RefGen from .Tools import memoize from .Locus import Locus from .Exceptions import ( CamocoGeneNameError, CamocoAccessionNameError, CamocoGeneAbsentError, ) from scipy.spatial.distance import pdist, squareform, euclidean from scipy.stats import hypergeom, pearsonr from scipy.stats.mstats import rankdata as mrankdata from scipy.cluster.hierarchy import linkage, dendrogram from collections import defaultdict, Counter import matplotlib import pandas as pd import numpy as np import matplotlib.pyplot as plt import io import re import string pd.set_option("display.width", 100) class Expr(Camoco): """ A gene expression dataset. Build, normalize, filter and easily access different parts of the gene expression matrix. """ def __init__(self, name): # Create a camoco object super().__init__(name=name, type="Expr") # Part I: Load the Expression dataset self.log("Loading Expr table") self._expr = self._bcolz("expr") self._gene_qc_status = self._bcolz("gene_qc_status") if (self._expr is None) or (self._gene_qc_status is None): self._expr = pd.DataFrame() self.log("Building Expr Index") self._expr_index = defaultdict( lambda: None, {gene: index for index, gene in enumerate(self._expr.index)} ) # Part II: Load the Reference Genome try: self.log("Loading RefGen") self.refgen = RefGen(self.refgen) except TypeError as e: self.log("RefGen for {} not set!", self.name) except NameError as e: self.log.warn("Refgen for {} not available, must be reset!", self.name) def __contains__(self, obj): if obj in self._expr.index: return True if obj in self._expr.columns: return True try: if obj.id in self._expr.index: return True except AttributeError as e: pass return False def __repr__(self): return "" def __str__(self): pass def num_genes(self, raw=False): return len(self.expr(raw=raw)) def num_accessions(self, raw=False): return len(self.expr(raw=raw).columns) def shape(self): return self._expr.shape def zscore(self): pass def accessions(self): return self._expr.columns def genes(self, raw=False): # Returns a list of distinct genes if raw is False: return self.refgen.from_ids(self._expr.index) else: return self.refgen.from_ids(self._bcolz("raw_expr").index) def expr_profile(self, gene): """ return the expression profile for a gene """ # try to use as gene object try: return self._expr.loc[gene.id] except AttributeError: pass # try to get gene object from refgen gene = self.refgen[gene] return self._expr.loc[gene.id] def is_normalized(self, max_val=None, raw=False): if max_val is not None: max_val = max_val # Use the user defined max val elif self.rawtype.upper() == "RNASEQ": max_val = 1100 elif self.rawtype.upper() == "MICROARRAY": max_val = 100 else: max_val = 0 return self._expr.apply(lambda col: np.nanmax(col.values) < max_val, axis=0) def max_values(self, axis=0): return np.nanmax(self._expr, axis=axis) def anynancol(self): """ A gut check method to make sure none of the expression columns got turned into all nans. Because apparently that is a problem. """ return any(self._expr.apply(lambda col: all(np.isnan(col)), axis=0)) def expr(self, genes=None, accessions=None, raw=False, gene_normalize=False): """ Access raw and QC'd expression data. Parameters ---------- genes : iterable of camoco.Locus objects (default: None) If not None, this will retrieve the expression values for the loci specified within the iterable, otherwise it will include ALL loci in the expr dataset accessions : iterable of str (default: None) If not None, will retrieve expression values for the accessions (experiments) specified, otherwise will retrieve ALL accessions. raw : bool (default: False) Flag to indicate on using the raw table versus the current expr table. See the transformation_log for more details on the difference. gene_normalize : bool (default: False) Perform standard normalization on gene-wise data zscore : bool (default: False) """ if raw is True: self.log("Extracting raw expression values") df = self._bcolz("raw_expr") else: df = self._expr if genes is not None: df = df.loc[[x.id for x in genes], :] if accessions is not None: df = df[accessions] if gene_normalize: df = df.apply( # Axis: 1 applies to ROWS! lambda row: (row - row.mean()) / row.std(), axis=1, ) return df def plot_accession_histograms(self, bins=50, figsize=(16, 8)): """ Plot histogram of accession expression values. """ raw = self._bcolz("raw_expr") qcd = self._expr for name, values in qcd.iteritems(): raw_values = raw[name] # Shorten name if len(name) > 20: name = name[0:20] + "..." + name[-11:-1] self.log("Plotting values for {}", name) # Extract out the raw values raw_valid = np.ma.masked_invalid(raw_values) # Extract out the normalized values valid = np.ma.masked_invalid(values) # Plot histograms f = plt.figure(figsize=figsize) plt.subplot(121) plt.hist(raw_valid[~raw_valid.mask], bins=bins) plt.xlim(-15, 15) plt.title("{}:{}".format(self.name, name)) plt.ylabel("Frequency") plt.subplot(122) plt.hist(valid[~valid.mask], bins=bins) plt.xlabel("Expression") plt.xlim(-15, 15) plt.savefig("ACC_HIST_{}:{}.png".format(self.name, name)) plt.close(f) """ Internal Methods ------------------------------------------------------ """ def _update_values(self, df, transform_name, raw=False): """ updates the 'expression' table values with values from df. Requires a transformation name for the log. Option to overwrite raw table or working table. Parameters ---------- df : DataFrame Updates the internal values for the Expr object with values in the data frame. transform_name : str A short justification for what was done to the updated values. raw : bool (default: False) A flag to update the raw values. This also resets the current values to what is in df. Returns ------- self : Expr Object Raises: ------ CamocoGeneNamesError CamocoAccessNamesError """ # update the transformation log if len(set(df.columns)) != len(df.columns): raise CamocoAccessionNameError("Accession names must be unique") if len(set(df.index)) != len(df.index): raise CamocoGeneNameError("Gene names must be unique.") self._transformation_log(transform_name) if raw == True: table = "raw_expr" # If we are updating the raw table, remove the # normal table since it assumes it came from # the raw table. self._reset(raw=False) else: table = "expr" # Keep full names in raw, but compress the # names in the normed network def shorten(x): if len(x) > 100: return x[0:89] + "..." + x[-10:-1] else: return x df.columns = [shorten(x) for x in df.columns] # Sort the table by genes df = df.sort_index() # ensure that column names are alphanumeric colP = re.compile("[^A-Za-z0-9_]") begP = re.compile("^\d") df.columns = [colP.sub("_", x).strip("_") for x in df.columns.values] df.columns = [ x if not begP.match(x[0]) else "Exp_" + x for x in df.columns.values ] # Also, make sure gene names are uppercase idxP = re.compile("[^A-Za-z0-9_, ;:().]") df.index = [idxP.sub("", str(x)).upper() for x in df.index.values] try: self._bcolz(table, df=df) self._expr = df except Exception as e: self.log("Unable to update expression table values: {}", e) raise e # Set the index self._expr_index = defaultdict( lambda: None, {gene: index for index, gene in enumerate(self._expr.index)} ) return self def _get_gene_index(self, gene): """ Retrieve the row index for a gene. Parameters ---------- gene : co.Locus object The gene object the get the index for Returns ------- an integer containing the expr dataframe index Raises ------ CamocoGeneAbsentError If the gene requested is not in the Expr dataframe """ if isinstance(gene, Locus): id = gene.id else: id = gene index = self._expr_index[id] if index == None: raise CamocoGeneAbsentError("{} not in {}".format(id, self.name)) return index def _transformation_log(self, transform=None): if transform is None: return self._global("transformation_log") elif transform == "reset" or self._global("transformation_log") is None: self._global("transformation_log", "raw") else: self._global( "transformation_log", self._global("transformation_log") + "->" + str(transform), ) self.log("Trans. Log: {}", self._global("transformation_log")) def _reset(self, raw=False): """ resets the expression values to their raw state undoing any normalizations """ if raw: # kill the raw table too self.log("Resetting raw expression data") self._bcolz("raw_expr", df=pd.DataFrame()) self.log("Resetting expression data") self._expr = self.expr(raw=True) self._bcolz("expr", df=self._expr) self._transformation_log("reset") def _normalize(self, norm_method=None, max_val=None, **kwargs): """ Evaluates QC expression data and re-enters normalized data into database Parameters ---------- norm_method : The normalization method to use. This can be inferred from the raw data type. By default RNASeq uses np.arcsinh and microarray data uses np.log2. A different normalization function can be passed directly in. Default: None (inferred from Expr.rawtype) max_val : This value is used to determine if any columns of the dataset have already been normalized. If any 'normailzed' values in an Accession column is larger than max_val, an exception is thown. max_val is determined by Expr.raw_type (default 100 for MicroArray and 1100 for RNASeq) but a max_val can be passed in to override these defaults. """ self.log("------------ Normalizing") if all(self.is_normalized(max_val=max_val)): self.log("Dataset already normalized") self._transformation_log("DetectedPreNormalized") elif any(self.is_normalized(max_val=max_val)): raise TypeError( ( "Attempting normalization on already normalized" " dataset. See the --max-val option to over ride." ).format(min(self.max_values())) ) else: df = self._expr if norm_method is not None: method = norm_method elif self.rawtype.upper() == "RNASEQ": method = np.arcsinh elif self.rawtype.upper() == "MICROARRAY": method = np.log2 else: raise ValueError( ( "Could not guess correct normalization for {}" " pass in function through method argument." ).format(self.rawtype) ) # apply the normalization to each column (accession) df = df.apply(lambda col: method(col), axis=0) # update values self._update_values(df, method.__name__) def _quality_control( self, min_expr=0.01, max_gene_missing_data=0.2, min_single_sample_expr=5, max_accession_missing_data=0.3, membership=None, dry_run=False, presence_absence=False, **kwargs, ): """ Perform Quality Control on raw expression data. This method filters genes based on membership to some RefGen instance, filters based on a minimum FPKM or equivalent expression value, filters out genes and accessions with too much missing data, filters out genes which are lowly expressed (do not have at least one accession that meets an FPKM threshold, i.e. likely presence absense). See parameters for more details. Parameters ---------- min_expr : int (default: 0.01) FPKM (or equivalent) values under this threshold will be set to NaN and not used during correlation calculations. max_gene_missing_data : float (default: 0.2) Maximum percentage missing data a gene can have. Genes under this are removed from dataset. min_single_sample_expr : int (default: 5) Genes that do not have a single accession having an expression value above this threshold are removed from analysis. These are likely presence/absence and will not have a strong coexpression pattern. max_accession_missing_data : float (default: 0.5) maximum percentage missing data an accession (experiment) can have before it is removed. membership : RefGen Genes which are not contained within this RefGen will be removed. Note: this could also be another object that will implement an interface that will check to see if gene ids are contained within it i.e. a set of gene ids. dry_run : bool (default: False) Used in testing to speed up calculations. Limits the QC dataframe to only have 100 genes. presence_absence : bool (default: False) Used to convert 0's within the data to a 0.001 after min expression values are filtered out to allow for presence absence variation """ self.log("------------Quality Control") df = self.expr() # remember how we set the flags self._global("qc_min_expr", min_expr) self._global("qc_max_gene_missing_data", max_gene_missing_data) self._global("qc_min_single_sample_expr", min_single_sample_expr) self._global("qc_max_accession_missing_data", max_accession_missing_data) # Retrieve raw data as a data frame self.log( "Raw Starting set: {} genes {} accessions".format( len(df.index), len(df.columns) ) ) # Remember why we remove certain genes # If TRUE it passes, if FALSE it fails!!! qc_gene = pd.DataFrame({"has_id": True}, index=df.index) qc_accession = pd.DataFrame({"has_id": True}, index=df.columns) # ----------------------------------------- # Gene Membership test if not membership: membership = self.refgen self._global("qc_membership", str(membership)) qc_gene["pass_membership"] = [x in membership for x in df.index] self.log( "Found out {} genes not in {}", sum(qc_gene["pass_membership"] == False), membership, ) # ----------------------------------------- # Set minimum FPKM threshold self.log("Filtering expression values lower than {}", min_expr) df_flt = df.copy() # Presence absence variable et if presence_absence == True: self.log("Allowing for presence absence variation") # find out which values equal 0 zero_index = df_flt == 0 # Filter the min expression genes df_flt[df < min_expr] = np.nan if presence_absence == True: # change out original 0's index to a small value df_flt[zero_index] = 0.001 df = df_flt # ----------------------------------------- # Gene Missing Data Test qc_gene["pass_missing_data"] = df.apply( lambda x: ((sum(np.isnan(x))) < len(x) * max_gene_missing_data), axis=1 ) self.log( "Found {} genes with > {} missing data", sum(qc_gene["pass_missing_data"] == False), max_gene_missing_data, ) # ----------------------------------------- # Gene Min Expression Test # filter out genes which do not meet a minimum expr # threshold in at least one sample qc_gene["pass_min_expression"] = df.apply( lambda x: any(x >= min_single_sample_expr), axis=1 # 1 is column ) self.log( ("Found {} genes which " "do not have one sample above {}"), sum(qc_gene["pass_min_expression"] == False), min_single_sample_expr, ) qc_gene["PASS_ALL"] = qc_gene.apply(lambda row: np.all(row), axis=1) df = df.loc[qc_gene["PASS_ALL"], :] # ----------------------------------------- # Filter out ACCESSIONS with too much missing data qc_accession["pass_missing_data"] = df.apply( lambda col: ( ((sum(np.isnan(col)) / len(col)) <= max_accession_missing_data) ), axis=0, # 0 is columns ) self.log( "Found {} accessions with > {} missing data", sum(qc_accession["pass_missing_data"] == False), max_accession_missing_data, ) # Update the total QC passing column qc_accession["PASS_ALL"] = qc_accession.apply(lambda row: np.all(row), axis=1) df = df.loc[:, qc_accession["PASS_ALL"]] # Update the database self._bcolz("qc_accession", df=qc_accession) self._bcolz("qc_gene", df=qc_gene) # Report your findings self.log("Genes passing QC:\n{}", str(qc_gene.apply(sum, axis=0))) self.log("Accessions passing QC:\n{}", str(qc_accession.apply(sum, axis=0))) # Also report a breakdown by chromosome qc_gene = qc_gene[qc_gene["pass_membership"]] qc_gene["chrom"] = [self.refgen[x].chrom for x in qc_gene.index] self.log( "Genes passing QC by chromosome:\n{}", str(qc_gene.groupby("chrom").aggregate(sum, axis=0)), ) # update the df to reflect only genes/accession passing QC self.log("Kept: {} genes {} accessions".format(len(df.index), len(df.columns))) if dry_run: # If dry run, take first 100 rows of QC self.log.warn("Dry Run") df = df.iloc[0:100, :] self._update_values(df, "quality_control") @staticmethod def inplace_nansort(col): # mask invalid data masked_col = np.ma.masked_invalid(col) masked_sorted = np.sort(col[~masked_col.mask].data) # get ranked values col_sorted = np.copy(col) non_nan = 0 for i, x in enumerate(~masked_col.mask): if x == True: col_sorted[i] = masked_sorted[non_nan] non_nan += 1 else: col_sorted[i] = np.nan return col_sorted def _quantile(self): """ Perform quantile normalization across each accession. Each accessions gene expression values are replaced with ranked gene averages. """ self.log("------------ Quantile ") if "quantile" in self._transformation_log(): raise ValueError("Quantile already performed on {}", self.name) # Retrieve current expression DataFrame expr = self.expr() self.log("Ranking data") for accession_name, values in expr.iteritems(): rank_ties = max(Counter(values).values()) if rank_ties > len(values) * 0.20: raise ValueError( f"{self.name}:{accession_name} has {rank_ties} " f"({rank_ties/len(values)}%) rank ties" ) # assign ranks by accession (column) expr_ranks = expr.rank(axis=0, method="first", na_option="keep") assert np.all(np.isnan(expr) == np.isnan(expr_ranks)) # normalize rank to be percentage expr_ranks = expr_ranks.apply(lambda col: col / np.nanmax(col.values), axis=0) # we need to know the number of non-nans so we can correct for their ranks later self.log("Sorting ranked data") # Sort values by accession/column, lowest to highest expr_sort = expr.apply(lambda col: self.inplace_nansort(col), axis=0) # make sure the nans weren't included in the sort or the rank assert np.all(np.isnan(expr) == np.isnan(expr_ranks)) assert np.all(np.isnan(expr) == np.isnan(expr_sort)) # calculate ranked averages self.log("Calculating averages") rank_average = expr_sort.apply(np.nanmean, axis=1) # we need to apply the percentages to the lenght of the rankmax = len(rank_average) self.log( "Range of normalized values:{}..{} (n = {})".format( min(rank_average), max(rank_average), len(rank_average) ) ) self.log("Asserting that no Genes are nan...") assert sum(np.isnan(rank_average)) == 0 self.log("Applying non-floating normalization") quan_expr = expr_ranks.applymap( lambda x: rank_average[int(x * rankmax) - 1] if not
np.isnan(x)
numpy.isnan
""" Created on 30.09.2020 @author: <NAME> @director: <NAME> @co-director: <NAME> """ import os import numpy as np from scipy.io import wavfile class DataLoader: def __init__(self, context_manager, normalize, batch_size=0): self.context_manager = context_manager self.normalize = normalize self.batch_size = batch_size @property def context_manager(self): return self._context_manager @context_manager.setter def context_manager(self, value): self._context_manager = value def __load_heidelburg(self, path=None, language="english", shuffle=True): """To use this data loader you need to convert flac files to wav files Args: path (str, optional): The path of the dataset. Defaults to None. language (str, optional): The desired language to load. Defaults to "english". shuffle (bool, optional): If True, the loaded dataset will be shuffled. Defaults to True. Returns: ndarray, ndarray: train set, test set """ np.random.seed(0xbadc0de) with open(os.path.join(path, 'train_filenames.txt')) as f: train_paths = f.readlines() with open(os.path.join(path, 'test_filenames.txt')) as f: test_paths = f.readlines() max_len = 55718 # is the longest audio signal max_len += self.context_manager.stride - ( max_len - self.context_manager.ker_len) % self.context_manager.stride train_set =
np.zeros((4011, max_len))
numpy.zeros
# Copyright 2019 Graphcore Ltd. import inspect import numpy as np import os import popart import sys from collections import namedtuple def graph_builder(opts): if opts.mode == 'infer': builder_fn = infer_builder elif opts.mode == 'eval': builder_fn = eval_builder elif opts.mode == 'train': builder_fn = train_builder else: raise ValueError("Unknown mode '{}'".format(opts.mode)) defn = builder_fn(opts) defn[0] = defn[0].getModelProto() return defn def infer_builder(opts): builder = popart.Builder() timesteps = opts.timesteps batch_size = opts.batch_size hidden_size = opts.hidden_size input_size = hidden_size dType_str = "FLOAT16" dType = np.float16 input_shape = [timesteps, batch_size, input_size] d1 = popart.TensorInfo(dType_str, input_shape) if opts.use_zero_values: d2 = np.zeros([1, 4 * hidden_size, input_size], dType) d3 = np.zeros([1, 4 * hidden_size, hidden_size], dType) input = np.zeros(input_shape, dType) else: d2 = np.random.normal(0, 1, [1, 4 * hidden_size, input_size]).astype(dType) d3 = np.random.normal(0, 1, [1, 4 * hidden_size, hidden_size]).astype(dType) input =
np.random.uniform(-1, 1, input_shape)
numpy.random.uniform
#!/usr/bin/env python # # Author: <NAME> <<EMAIL>> # import sys import time import numpy import scipy.linalg from pyscf import lib from pyscf.lib import logger def expmat(a): return scipy.linalg.expm(a) class CIAHOptimizer(lib.StreamObject): def __init__(self): self.conv_tol_grad = 1e-4 self.max_stepsize = .05 self.max_iters = 10 self.kf_interval = 5 self.kf_trust_region = 5 self.ah_start_tol = 5. self.ah_start_cycle = 1 self.ah_level_shift = 0#1e-4 self.ah_conv_tol = 1e-12 self.ah_lindep = 1e-14 self.ah_max_cycle = 30 self.ah_trust_region = 3. def gen_g_hop(self, u): pass def pack_uniq_var(self, mat): nmo = mat.shape[0] idx =
numpy.tril_indices(nmo, -1)
numpy.tril_indices
#!/usr/bin/env python3 # -*- coding: utf-8 -*- __author__ = 'david.tang' import cv2 import numpy as np from numpy.linalg import norm SZ = 20 # 训练图片长宽 MAX_WIDTH = 1000 # 原始图片最大宽度 Min_Area = 2000 # 车牌区域允许最大面积 PROVINCE_START = 1000 # 来自opencv的sample,用于svm训练 def deskew(img): m = cv2.moments(img) if abs(m['mu02']) < 1e-2: return img.copy() skew = m['mu11'] / m['mu02'] M =
np.float32([[1, skew, -0.5 * SZ * skew], [0, 1, 0]])
numpy.float32
#!/bin/python # -*- coding: UTF-8 -*- from __future__ import print_function import os, sys import cPickle as pickle import numpy as np import pandas as pd from sklearn.model_selection import train_test_split from sklearn.utils import shuffle cfg_folder = "../BigCloneBench/dcsim" \ "/cfgs_antlr" labels_folder = "../BigCloneBench/dcsim" \ "/labels_antlr" seed = 233 def read_sample_sparse(filepath): sparse_arr = [] row = 0 for line in open(filepath, 'r'): xs = line.split('\t')[:-1] # remove '\n' if (len(xs) != 128): continue col = 0 for x in xs: if x != '{}': x = x[1:-1] if len(x) > 0: indices = x.split(',') for index in indices: sparse_arr.append((row, col, int(index))) col += 1 row += 1 return sparse_arr def read_data_info(filepath): """ Read information about each node in the graph for each method. :param filepath: The file path of the *.info file :return: """ fin = open(filepath, 'r') line = fin.readline() var_count = int(line.split(' ')[0]) block_count = int(line.split(' ')[1]) types = [] for _ in xrange(var_count): line = fin.readline() types.append(int(line)) for _ in xrange(block_count): line = fin.readline() types.append(int(line)) return types, var_count, block_count def flatten_clones_set(clones_set): X = [] y = [] file_dest = [] infos = [] for (l, clones) in clones_set.items(): for m, file_path, info in zip(clones['data'], clones['file_dest'], clones['infos']): X.append(m) y.append(l) file_dest.append(file_path) infos.append(info) # X = np.array(X, dtype=np.float) y =
np.array(y, dtype=np.int)
numpy.array
import numpy as np import sunpy.map from sunpy.coordinates import frames from astropy.coordinates import SkyCoord import astropy.units as u from scipy import ndimage from coord_transform import hmi_disambig #plane2sphere.pro def plane2sphere(x,y,latc,lonc): """Convert (x,y) of a CEA map to Stonyhurst/Carrington coordinates (lat,lon) Params: x,y: coordinate of CEA map pixel relative to map reference point (usually center) in radians. latc,lonc: Stonyhurst latitude and longitude of map reference point (usually center) in radians. Returns: Stonyhurst latitude and longitude of the map pixel in radians. """ if np.abs(y) > 1: lat = np.nan lon = np.nan return lat,lon else: pass coslatc = np.cos(latc) sinlatc = np.sin(latc) cosphi = np.sqrt(1.0 - y**2) lat = np.arcsin((y*coslatc) + (cosphi*np.cos(x)*sinlatc)) if np.cos(lat) == 0: test = 0.0 else: test = cosphi*np.sin(x)/np.cos(lat) lon = np.arcsin(test) + lonc x0 = x if np.abs(x0) > np.pi/2.0: while x0 > np.pi/2.0: lon = np.pi - lon x0 = x0 - np.pi while x0 < -np.pi/2.0: lon = -np.pi - lon x0 = x0 + np.pi return lat,lon #sphere2img.pro def sphere2img(lat,lon,latc,lonc,xcen,ycen,rSun,peff,hemi_out=False): """Convert Stonyhurst lat,lon to xi,eta in heliocentric-cartesian coordinates. Params: lat,lon: latitude and longitude of desired pixel in radians. latc,lonc: latitude and longitude of disk center in radians. xcen,ycen: disk center in pixels. rSun: radius of Sun in arbitraty units. peff: p-angle hemi_out: whether or not to output hemisphere of farside. Returns: Coordinate on image (xi,eta) in units of rSun and hemisphere of farside (optional output). """ # Correction of finite distance (1AU) sin_asd = 0.004660 cos_asd = 0.99998914 last_latc = 0.0 cos_latc = 1.0 sin_latc = 0.0 if latc != last_latc: sin_latc = np.sin(latc) cos_latc = np.cos(latc) last_latc = latc sin_lat = np.sin(lat) cos_lat = np.cos(lat) cos_lat_lon = cos_lat*np.cos(lon-lonc) cos_cang = sin_lat*sin_latc + cos_latc*cos_lat_lon if cos_cang < 0.0: hemisphere = 1 else: hemisphere = 0 r = rSun*cos_asd/(1.0 - cos_cang*sin_asd) xr = r*cos_lat*np.sin(lon - lonc) yr = r*(sin_lat*cos_latc - sin_latc*cos_lat_lon) cospa = np.cos(peff) sinpa = np.sin(peff) xi = xr*cospa - yr*sinpa eta = xr*sinpa + yr*cospa xi = xi + xcen eta = eta + ycen if hemi_out == True: return xi,eta,hemisphere else: return xi,eta #find_cea_coord.pro def find_cea_coord(header,phi_c,lambda_c,nx,ny,dx,dy): """Convert the index to CCD coordinate (xi,eta)""" nx = int(nx) ny = int(ny) # Array of CEA coords x = [] y = [] for j in range(ny): col = [] row = [] for i in range(nx): col.append(np.radians((i-(nx-1)/2)*dx)) row.append(np.radians((j-(ny-1)/2)*dy)) x.append(col) y.append(row) x = np.array(x) y = np.array(y) # Relevant header values rSun = header['rsun_obs']/header['cdelt1'] #solar radius in pixels disk_latc = np.radians(header['CRLT_OBS']) disk_lonc = np.radians(header['CRLN_OBS']) disk_xc = header['CRPIX1'] - 1 #disk center wrt lower left of patch disk_yc = header['CRPIX2'] - 1 pa = np.radians(header['CROTA2']*-1) latc = np.radians(lambda_c) lonc = np.radians(phi_c) - disk_lonc # Convert coordinates lat = [] lon = [] xi = [] eta = [] for j in range(ny): lat_col = [] lon_col = [] xi_col = [] eta_col = [] for i in range(nx): lat0,lon0 = plane2sphere(x[j,i],y[j,i],latc,lonc) lat_col.append(lat0) lon_col.append(lon0) xi0,eta0 = sphere2img(lat0,lon0,disk_latc,0.0,disk_xc,disk_yc,rSun,pa) xi_col.append(xi0) eta_col.append(eta0) lat.append(lat_col) lon.append(lon_col) xi.append(xi_col) eta.append(eta_col) lat = np.array(lat) lon = np.array(lon) xi = np.array(xi) eta = np.array(eta) return xi,eta,lat,lon #img2heliovec.pro def img2heliovec(bxImg,byImg,bzImg,lon,lat,lonc,latc,pAng): """Convert from image coordinates to Heliocentric spherical coordinates.""" a11 = -np.sin(latc)*np.sin(pAng)*np.sin(lon - lonc) + np.cos(pAng)*np.cos(lon - lonc) a12 = np.sin(latc)*np.cos(pAng)*np.sin(lon - lonc) + np.sin(pAng)*np.cos(lon - lonc) a13 = -np.cos(latc)*np.sin(lon - lonc) a21 = -np.sin(lat)*(np.sin(latc)*np.sin(pAng)*np.cos(lon - lonc) + np.cos(pAng)*np.sin(lon - lonc)) - np.cos(lat)*np.cos(latc)*np.sin(pAng) a22 = np.sin(lat)*(np.sin(latc)*np.cos(pAng)*np.cos(lon - lonc) - np.sin(pAng)*np.sin(lon - lonc)) + np.cos(lat)*np.cos(latc)*np.cos(pAng) a23 = -np.cos(latc)*np.sin(lat)*np.cos(lon - lonc) + np.sin(latc)*np.cos(lat) a31 = np.cos(lat)*(np.sin(latc)*np.sin(pAng)*np.cos(lon - lonc) + np.cos(pAng)*np.sin(lon - lonc)) - np.sin(lat)*np.cos(latc)*np.sin(pAng) a32 = -np.cos(lat)*(np.sin(latc)*np.cos(pAng)*np.cos(lon - lonc) - np.sin(pAng)*
np.sin(lon - lonc)
numpy.sin
#Kernal Regression from Steimetz et al. (2019) # #Feb 6th 2022 #<NAME> """ frequency_array still needs testing. Ignore the unexpected indent in spyder, it just doesnt like stein.calldata Description of Kernel Regression Implementation: We need to first reun CCA to generate B then we want to find the matrix a (also denoted as a matrix W with vectors w_n for each neruon n). CCA is first run from the toeplitz matrix of diagonalized kernel functions this will reduce the dimensionality of the entire time course, we then optimize the weights of the components of this reduced representation. Minimizations of square error is done by elastic net regularizatuion applied on a neuron by neuron basis. Currently has matlab code sprinkled in comments to guide development. Eventually the goal is to turn this into a .ipyn file, the all caps comments are notes which denote sections, multi line commebts are quotes from the paper which will be included or written up for description of the workflow. """ ##INTRODUCTION ####START WITH AN IMAGE OF THE WORKFLOW AND A BRIEF EXPLANATION OF THE MODEL import os import numpy as np import pandas as pd from math import ceil from math import floor import scipy.ndimage import timeit #for testing and tracking run times import scipy.stats import getSteinmetz2019data as stein import warnings import piso # From the local path on Angus's PC, toeplitz and freq_array, # use this as the default consider changing DEFAULT_FILEPATH = os.fspath(r'C:\Users\angus\Desktop\SteinmetzLab\9598406\spikeAndBehavioralData\allData') """ start = timeit.timeit() end = timeit.timeit() print(end - start)" """ #for ubuntu.... #cd mnt/c/Users/angus/Desktop/SteinmetzLab/Analysis ############ FILTERING ##Going from neurons across all regions and mice # Which neurons to include """ clusters._phy_annotation.npy [enumerated type] (nClusters) 0 = noise (these are already excluded and don't appear in this dataset at all); 1 = MUA (i.e. presumed to contain spikes from multiple neurons; these are not analyzed in any analyses in the paper); 2 = Good (manually labeled); 3 = Unsorted. In this dataset 'Good' was applied in a few but not all datasets to included neurons, so in general the neurons with _phy_annotation>=2 are the ones that should be included. """ #So we should apply the criteria we want and search the data that way. #when querrying the clusters data we can apply the quality score criteria # first we want trial times since we are initially only going to look at # data withing the trial times, may as well collect the data we need from them # for feeding into toeplitz matrix later """ A NOTE ON THE TIMESTAMP FILES AND LFP DATA So each session contains a few files named like this: 'Forssmann_2017-11-01_K1_g0_t0.imec.lf.timestamps.npy' These are the time base offsets for the probes internal clocks. In order to align the time base here for the events occuring in the trials to the LFP you will need to account for these. They bear no relevance for the spikes, stimuli, movement etc etc these are set to the same time base which starts prior to the begining of the trials. """ #For smoothing we make halfguassian_kernel1d and halfgaussian_filter1d def halfgaussian_kernel1d(sigma, radius): """ Computes a 1-D Half-Gaussian convolution kernel. """ sigma2 = sigma * sigma x = np.arange(0, radius+1) phi_x = np.exp(-0.5 / sigma2 * x ** 2) phi_x = phi_x / phi_x.sum() return phi_x def halfgaussian_filter1d(input, sigma, axis=-1, output=None, mode="constant", cval=0.0, truncate=4.0): """ Convolves a 1-D Half-Gaussian convolution kernel. """ sd = float(sigma) # make the radius of the filter equal to truncate standard deviations lw = int(truncate * sd + 0.5) weights = halfgaussian_kernel1d(sigma, lw) origin = -lw // 2 return scipy.ndimage.convolve1d(input, weights, axis, output, mode, cval, origin) #now we can make the function that will generate our Y matrix, the firing rates to predict #based on our kernels def frequency_array(session, bin_size, only_use_these_clusters=[], quality_annotation_filter = True, select_trials = [], filter_by_engagement = True, FILEPATH = DEFAULT_FILEPATH): """ Input: session: the name of the desired session, we take it and generate.... Takes Alyx format .npy files and load them into a numpy array, can either give you spikeclusterIDs: from the 'spikes.clusters.npy' file spikestimes: from the 'spikes.times.npy' start_times: times to start collecting from should have corrresponding equal length vector of end_times end_times: time to stop collecting spikes bin_size: the length in seconds of the bins we calculate frqncy over only_use_these_clusters: a list or array of clusters to filter, should be supplied as an actual list of indices a boolean will not works quality_annotation_filter: default to true overwritten byonly_use_these_clusters, removes clusters below quality annotation of 2 (out of 3) select_trials: may be boolean or an array of ints, limits trials to particular set, should match that of the X you are pulling from filter_by_engagement: by default set to true removes trials based on engagement index Returns: A numpy array of spike frequencies for each neuron, if return_meta_data also supplies a dataframe of the cluster ID and corresponding Allen onotlogy data as well as session label """ def get_and_filter_spikes(): """ calls the spikes datat from the session we are interested in, removes the low quality scores, I.e. those listed as 1 steinmetz annotated the kilosorts clusters as 1, 2, or 3 recomended using nothing below a 2 -returns 2 numpy arrays one for the clusters THIS SECTION MAY BE UNNESCESSARY """ #We call the relvant objects for clusters (neurons) identity of a firing #the time at which a firing occured and the quality of the recording spikes = stein.calldata(session, ['spikes.clusters.npy', 'spikes.times.npy', 'clusters._phy_annotation.npy'], steinmetzpath=FILEPATH) spikesclusters = spikes['spikesclusters'] #the idneity in sequence of #each cluster, match it with spikestimes to get timing and identity info spikestimes = spikes['spikestimes'] #times corresponding to clusters firing # by default remove clusters wiht a rating of 1 if len(only_use_these_clusters)!=0: #finds the clusters in the time series with bad quality (q<2) and removes them #from the series holding when a spike occured and what it's identity was clusters_mask = np.isin(spikesclusters, only_use_these_clusters) #boolean mask spikestimes = spikestimes[clusters_mask] spikesclusters = spikesclusters[clusters_mask] clusters_idx = np.unique(spikesclusters) elif quality_annotation_filter: clusterquality = spikes['clusters_phy_annotation'] #quality rating of clsuters clusters_idx = np.arange(0, len(clusterquality)).reshape(clusterquality.shape) clusters_mask = clusterquality >=2 #boolean mask clusters_idx = clusters_idx[clusters_mask] #filter out low quality clusters #remove those clusters from the time series, here we do it with np.isin spikestimes = spikestimes[np.isin(spikesclusters, clusters_idx)] spikesclusters = spikesclusters[np.isin(spikesclusters, clusters_idx)] clusters_idx = np.unique(spikesclusters) # if provided clusters to use instead.... return(spikesclusters, spikestimes, clusters_idx ) # run above function and get the spikes serieses for this session clusters, times, filteredclusters_idx = get_and_filter_spikes() #getting thetrials objects we need trials = stein.calldata(session, ['trials.intervals.npy', 'trials.included.npy'], steinmetzpath=FILEPATH) # filter by the engagfement index filter provided is set tp ture by default # alternately a list of trials to include may be supplied # Supplying this filter overwrites the engagement-index if len(select_trials)!=0: trialsincluded = select_trials elif filter_by_engagement: trialsincluded = trials['trialsincluded'] trialsincluded = [ i for i in range(0,len(trialsincluded)) if trialsincluded[i]] trialsincluded = np.array(trialsincluded) # filter trialsintervals by trialsincluded trialsintervals = trials['trialsintervals'] trialsintervals = trialsintervals[trialsincluded,:] #this will be our output session_arr = np.zeros([len(np.unique(clusters)),2], dtype=float) #trials starts are trialsintervals[, 0] #trial ends are trialsintervals[, 0] for trial in range(0,trialsintervals.shape[0]): #find out number of step in the trial n_steps = ceil((trialsintervals[trial,1]-trialsintervals[trial,0])/bin_size) t_i = trialsintervals[trial,0] t_plus_dt = t_i + bin_size trial_arr = np.zeros([len(np.unique(clusters)),2], dtype=float) # will be concatenated for i in range(0,n_steps): #bin_arr will be the frequency for this trial, will be added to trail_arr each step and the reset bin_arr = np.zeros(len(np.unique(clusters)), dtype=float) #this bin will filter our timing and clusters so we can # just work on the slice of spikeclusters corresponding to #each bin step this_bin = np.logical_and(times>=t_i, times<=t_plus_dt) #we find the index of the clusters and convert spike counts to hertz (unique, counts) = np.unique(clusters[this_bin], return_counts=True) frequencies = np.asarray((unique, counts/bin_size)) #This runs if there are no spikes, i.e. frequency array has 2nd dim = 0 if frequencies.shape[1]==0: bin_arr = np.zeros([trial_arr.shape[0],1]) trial_arr = np.column_stack([trial_arr, bin_arr]) j = 0 #initializing and index to move down frequncy 2d frequency values array with for neuron in frequencies[0,]: ### !!!! ####!!!! there is an error in this loop ## !!!!! #make cluster identiy in frequencies into int so it can be found in clusters_idx #for adding firirng rate to bin_arr match_idx = int(neuron)==filteredclusters_idx #this evaluats to True, bin_arr[match_idx] = frequencies[1,j] #add the freq in Hz to the vector #bin_arr is now ready to be concatenated to trial_arr j = j + 1 trial_arr = np.column_stack([trial_arr, bin_arr]) #end of neuron for-loop #end of i for-loop #trimming array, then smoothing our firing rates trial_arr = trial_arr[:,2:] trial_arr = halfgaussian_filter1d(input = trial_arr, sigma = 0.25) #clipping intialization array session_arr = np.column_stack([session_arr, trial_arr]) #end of trial for-loop session_arr = session_arr[:,2:] # cuts off initialization array from session_arr return (session_arr, filteredclusters_idx) def make_toeplitz_matrix(session, bin_size, kernels, filter_by_engagement = True, select_trials = [], FILEPATH = DEFAULT_FILEPATH): """ Makes the matrix X aka P in Steinmetz et al., (2019), the Toeplitz matrix of dimension. THe kernel is either 0 or 1 or -1 Input: session: session name see stein.recording_key() bin_size: needs to matech taht used for frequency array kernels: which kernels to inlcude should be a three entry boolean list Please Note this function assumes all times tested will be within trial intervals will need some reworking if we want to use non-trial events as well """ #Run this before trial_section() fetched_objects = stein.calldata(session, ['trials.intervals.npy', 'trials.included.npy', 'trials.response_choice.npy', 'trials.response_times.npy', 'trials.visualStim_contrastLeft.npy', 'trials.visualStim_contrastRight.npy', 'trials.visualStim_times.npy'], steinmetzpath = FILEPATH) # filter by the engagfement index filter provided is set tp ture by default # alternately a filter may be supplied if filter_by_engagement: include = fetched_objects['trialsincluded'] trialsintervals = fetched_objects['trialsintervals'] trialsintervals = trialsintervals[include.reshape(trialsintervals.shape[0]),:] # Supplying this filter overwrites the engagement-index if len(select_trials)!=0: include = select_trials trialsintervals = fetched_objects['trialsintervals'] trialsintervals = trialsintervals[include] responsechoice = fetched_objects['trialsresponse_choice'][include] responsetimes = fetched_objects['trialsresponse_times'][include] Leftcontrasts = fetched_objects['trialsvisualStim_contrastLeft'][include] Rightcontrasts = fetched_objects['trialsvisualStim_contrastRight'][include] stim_times = fetched_objects['trialsvisualStim_times'][include] # the vision kenels, L_c, are supported for -0.05 to 0.4 post stimulus onset # the L_c kernels are therefore 90 high # the L_d kernels, for actions and choice are 55 high while L_c are 90 # the action kernels are supported over -025 to def trial_section(trial): """ Requires a fetched_objects = stein.calldata(session, ['trails.intervals.npy', 'trials.included.npy', 'trials.response_choice.npy', 'trials.visualStim_contrastLeft.npy', 'trials.visualStim_contrastRight.npy']) to be run before hand. Input: trial, specifies which trial interval this is running on, be sure to filter trialsintervals and the behavioural measures as well with trialsincluded to drop the trials with low engagement kernel: a three item boolean list specifcying which kernels to include in this run kernel = [vision, action, choice], should be specified beforehand if this is run in make_toeplitz_matrix() """ def make_kernel(trialkernel, T_start, T_stop, L_start, L_stop, coef = 1): """ Creates an np.diag array and replaces the provided the specified indices of trialkernel with this array, coef is by default 1 but will be changed for right choice kernels to -1 """ #these four lines scale the starting and stopping based on bin_size #prevents making non-mathcing trialkernels and kernels L_start = (bin_size/0.005)*L_start L_start = floor(L_start) L_stop = (bin_size/0.005)*L_stop L_stop = ceil(L_stop) kernel_length = L_stop-L_start kernel = np.diag(np.ones(kernel_length))*coef trialkernel[T_start:T_stop, L_start:L_stop] = kernel return trialkernel #here the timesteps are length and each kernel is hieght # T_trial is calculated same as s_steps in frequency_array() trial_start = trialsintervals[trial,0] trial_end = trialsintervals[trial,1] T_trial = ceil((trial_end - trial_start)/bin_size) #same thing is assumed in frequency_array and they need to match lengths #the 6 vision kernels (left low, left med, left high, right low, etc..) """ The Vision kernels Kc,n(t) are supported over the window −0.05 to 0.4 s relative to stimulus onset, """ if kernels[0] == True: # instatiating zeros to fill in with diagonal 1's visionkernel = np.zeros(( T_trial, 6*90+90), dtype = int) # indices for looping over #in bin count from start of trial when the kernel begins stim_start = stim_times[trial] - trial_start - 0.05 stim_start = floor(stim_start/bin_size) # stim_end at +.45s/binsize because vision kernel k_c covers... # -0.05s >= stimulation start time =< 0.4s therefore... stim_end = int( stim_start + (0.45/bin_size) ) # Left Low Contrast if Leftcontrasts[trial] == 0.25: visionkernel = make_kernel(visionkernel, stim_start, stim_end, L_start =0, L_stop = 90, coef = 1) # Left Medium Contrast if Leftcontrasts[trial] == 0.5: visionkernel = make_kernel(visionkernel, stim_start, stim_end, L_start =90, L_stop = 180, coef = 1) #Left High Contrast if Leftcontrasts[trial] == 1.0: visionkernel = make_kernel(visionkernel, stim_start, stim_end, L_start =180, L_stop = 270, coef = 1) # Right Low Contrat if Rightcontrasts[trial] == 0.25: visionkernel = make_kernel(visionkernel, stim_start, stim_end, L_start =270, L_stop = 360, coef = 1) # Right Medium Contrast if Rightcontrasts[trial] == 0.5: visionkernel = make_kernel(visionkernel, stim_start, stim_end, L_start =450, L_stop = 540, coef = 1) # Right High Contrast if Rightcontrasts[trial] == 1.0: visionkernel = make_kernel(visionkernel, stim_start, stim_end, L_start =540, L_stop = 630, coef = 1) ##### Movement Kernel """ the Action and Choice kernels are supported over the window −0.25 to 0.025 s relative to movement onset. """ if kernels[1]==True: # instantiate matrix actionkernel = np.zeros((T_trial, 55), dtype = int) #when movementstarts move_start = responsetimes[trial] - trial_start - 0.25 move_start = floor(move_start/bin_size) # move_end at +.45s/binsize because movement kernel k_d covers... # -0.25s >= movement start time =< 0.025s therefore... move_end = int( move_start + (0.275/bin_size) ) if responsechoice[trial]!=0: #add contrast to our matrix if there is no movement actionkernel = make_kernel(actionkernel, move_start, move_end, L_start = 0, L_stop = 55, coef =1) #Choice Kernel """ the Action and Choice kernels are supported over the window −0.25 to 0.025 s relative to movement onset. """ if kernels[2]==True: # instantiate matrix choicekernel = np.zeros((T_trial, 55), dtype = int) #when movementstarts move_start = responsetimes[trial] - trial_start - 0.25 move_start = floor(move_start/bin_size) # move_end at +.45s/binsize because movement kernel k_d covers... # -0.25s >= movement start time =< 0.025s therefore... move_end = ceil( move_start + (0.275/bin_size) ) ##!!!! this is causing an error needs testing #add contrast to our matrix #Left Choice Kernel contrast = 1 along diagonal aligned to movement start if responsechoice[trial]==1: #Left choice choicekernel = make_kernel(choicekernel, move_start, move_end, L_start = 0, L_stop = 55, coef = 1) if responsechoice[trial]==-1: #Right choice Kernel contrast = 1 along diagonal aligned to movement start # so here we set coef to -1 choicekernel = make_kernel(choicekernel, move_start, move_end, L_start = 0, L_stop = 55, coef = -1) # Stitiching kernels together and warning about how kernel should be given def kernel_improperly_specified(): warnings.warn( "kernel must be input including vision kernel, also you cannot\ include choice kernel without action kernel." ) if kernels[0] & kernels[1] & kernels[2]: X_trial_i = np.column_stack([visionkernel , actionkernel, choicekernel]) elif kernels[0] & kernels[1]: X_trial_i = np.column_stack([visionkernel , actionkernel]) elif kernels[0]: X_trial_i = visionkernel else: kernel_improperly_specified() return(X_trial_i) #instantiate the array to stack based on kernels included #this will need to be changed if you change the kernels included if kernels[0] & kernels[1] & kernels[2]: X = np.zeros((2, 740)) elif kernels[0] & kernels[1]: X = np.zeros((2, 685)) elif kernels[0]: X = np.zeros((2, 630)) else: kernel_improperly_specified() # loop to rowstack all these things for i in range(0, trialsintervals.shape[0]): X_i = trial_section(i) X = np.row_stack([X, X_i]) #end of this for loop #clip instatiation array X = X[2:,:] return X def generate_event_interval(events, offset): """testetest makes a Alyx format .npy intervals array 0 index for interval beginings and 1 index for intervals end Args: events (numpy 1d, or list of int or floats): list of events in seconds from trial start offset(a tuple or 2 item list): time from event to make the interval extend from and to, """ # lists to be later converted to numpy arrays and stacked starts = [] ends = [] #extends lsits with values from offset for occurence in range(0, len(events)): starts.append(events[occurence] + offset[0]) ends.append(events[occurence] + offset[1]) # turn them into arrays make sure they are shaped right, as numpy is weird like that starts = np.asarray(starts) starts = np.reshape(starts, (len(starts), 1) ) ends = np.asarray(ends) ends = ends.reshape(starts.shape) out_arr =
np.column_stack([starts, ends])
numpy.column_stack
import numpy as np import matplotlib.pyplot as plt #from .invlap import * import inspect # Used for storing the input import sys from .aquifer_parameters import param_3d, param_maq from .aquifer import Aquifer #from .bessel import * from .invlapnumba import compute_laplace_parameters_numba, invlap, invlapcomp from .util import PlotTtim class TimModel(PlotTtim): def __init__(self, kaq=[1, 1], z=[3, 2, 1], Haq=[1, 1], Hll=[0], c=[1e100, 100], Saq=[1e-4, 1e-4], Sll=[0], poraq=0.3, porll=0.3, ltype=['a', 'a'], topboundary='conf', phreatictop=False, tmin=1, tmax=10, tstart=0, M=10, kzoverkh=None, model3d=False, timmlmodel=None): self.elementlist = [] self.elementdict = {} self.vbclist = [] # variable boundary condition 'v' elements self.zbclist = [] # zero and constant boundary condition 'z' elements self.gbclist = [] # given boundary condition 'g' elements # note: given bc elements don't have any unknowns self.tmin = tmin self.tmax = tmax self.tstart = tstart self.M = M self.aq = Aquifer(self, kaq, z, Haq, Hll, c, Saq, Sll, poraq, porll, ltype, topboundary, phreatictop, kzoverkh, model3d) self.compute_laplace_parameters() self.name = 'TimModel' self.modelname = 'ml' # Used for writing out input self.timmlmodel = timmlmodel def __repr__(self): return 'Model' def initialize(self): self.gvbclist = self.gbclist + self.vbclist self.vzbclist = self.vbclist + self.zbclist # Given elements are first in list self.elementlist = self.gbclist + self.vbclist + self.zbclist self.ngbc = len(self.gbclist) self.nvbc = len(self.vbclist) self.nzbc = len(self.zbclist) self.ngvbc = self.ngbc + self.nvbc self.aq.initialize() for e in self.elementlist: e.initialize() # lists used for inverse transform enumber = [] etstart = [] ebc = [] for k in range(self.ngvbc): e = self.gvbclist[k] enumber.extend(len(e.tstart) * [k]) etstart.extend(list(e.tstart)) ebc.extend(list(e.bc)) self.enumber = np.array(enumber) self.etstart =
np.array(etstart)
numpy.array
# from numba import njit from enum import IntEnum import numpy as np # import numba # from numba.experimental import jitclass from numba import int32, int64, float64, complex128, typed from numba.core import types kv_ty = (types.unicode_type, types.int64) class ModelType(IntEnum): """ Identification of different Model types. """ GEN_ORD_6 = 0 # 6th order model VSC_1 = 1 DC_LINE = 2 VS = 3 SAVR = 4 GEN_2_2 = 5 # model 2.2 class CtrlMode(IntEnum): """ Identification of converter control modes. """ P_VAC = 0 P_Q = 1 VDC_Q = 2 # @njit def d_vsc_dt(xm, um, model): """ Voltage Source Converter differential equations Parameters ---------- xm : ndarray State vector. um : ndarray Input vector. model : object Model parameters. Returns ------- dx : ndarray State derivatives. """ i_d = xm[model.x_idx['Id']] i_q = xm[model.x_idx['Iq']] # i_dc = x[model.x_idx['Idc']] Md = xm[model.x_idx['Md']] Mq = xm[model.x_idx['Mq']] Madd = xm[model.x_idx['Madd']] Madq = xm[model.x_idx['Madq']] Theta_pll = xm[model.x_idx['Theta']] Xpll = xm[model.x_idx['Xpll']] Xf = xm[model.x_idx['Xf']] Xp = xm[model.x_idx['Xp']] Xq = xm[model.x_idx['Xq']] Pm = xm[model.x_idx['Pm']] Qm = xm[model.x_idx['Qm']] Vm = xm[model.x_idx['Vm']] vx = um[0] vy = um[1] Vdc = um[2] Pref = um[3] Qref = um[4] Vref = um[5] vd = (vx * np.cos(Theta_pll) + vy * np.sin(Theta_pll)) vq = (-vx * np.sin(Theta_pll) + vy * np.cos(Theta_pll)) wpll = model.Kp_pll * vq + model.Ki_pll * Xpll # wpll = np.clip(wpll, 0.8, 1.2) # TODO check the limits and make them part of the model Pac = vd * i_d + vq * i_q Qac = (vq * i_d - vd * i_q) Vac = np.sqrt(vd ** 2 + vq ** 2) if model.Tpm == 0: Pm = Pac Qm = Qac if model.Tvm == 0: Vm = Vac if model.ctrl == CtrlMode.VDC_Q: # TODO seperate the control modes to avoid mixup (Vref is used for both ac and dc) dP = Vdc / Vref - 1 else: dP = Pref - Pm + model.Kpf * (1 - wpll) + model.Kif * Xf id_ref = model.Kpp * dP + Xp * model.Kip dQ = (model.Kq * (Qm - Qref) + model.Kv * (Vm - Vref)) iq_ref = dQ * model.Kpq + Xq * model.Kiq # id_max = 1 # id_ref = np.clip(id_ref, -id_max, id_max) # iq_max = np.sqrt(max(0,1-id_ref**2)) # iq_ref = np.clip(iq_ref, -iq_max, iq_max) vmd = (Madd - wpll * model.Lt * i_q + model.Kpc * (id_ref - i_d) + model.Kic * Md) / Vdc vmq = (Madq + wpll * model.Lt * i_d + model.Kpc * (iq_ref - i_q) + model.Kic * Mq) / Vdc dx = np.zeros(len(xm)) dx[model.x_idx['Id']] = model.wn / model.Lt * (vmd - vd - model.Rt * i_d + wpll * model.Lt * i_q) # di_d dx[model.x_idx['Iq']] = model.wn / model.Lt * (vmq - vq - model.Rt * i_q - wpll * model.Lt * i_d) # di_q # dx[model.x_idx['Idc']]= (model.wn/(model.Ldc)*(Pac/Vdc-i_dc)) # TODO find a propper equation assuming power # balance between AC and DC sides dx[model.x_idx['Md']] = (id_ref - i_d) # dMd dx[model.x_idx['Mq']] = (iq_ref - i_q) # dMq dx[model.x_idx['Madd']] = (-Madd + vd) / model.Tad # dMadd dx[model.x_idx['Madq']] = (-Madq + vq) / model.Tad # dMadq dx[model.x_idx['Theta']] = (wpll - 1) * model.wn # dTheta_pll dx[model.x_idx['Xpll']] = vq # dXpll dx[model.x_idx['Xf']] = (1 - wpll) # dMf dx[model.x_idx['Xp']] = dP # dMp dx[model.x_idx['Xq']] = dQ # dMq if model.Tpm > 0: dx[model.x_idx['Pm']] = (Pac - Pm) / model.Tpm dx[model.x_idx['Qm']] = (Qac - Qm) / model.Tpm if model.Tvm > 0: dx[model.x_idx['Vm']] = (Vac - Vm) / model.Tvm return dx # @njit def d_dcline_dt(xm, um, model): """ DC line differential equations Parameters ---------- xm : ndarray State vector. u : ndarray Input vector. model : object Model parameters. Returns ------- dx : ndarray State derivatives. """ Il = xm[model.x_idx['Il']] Vf = xm[model.x_idx['Vf']] Vt = xm[model.x_idx['Vt']] If = um[0] It = um[1] dx = np.zeros(len(xm)) dx[model.x_idx['Il']] = model.wn * 1 / (model.L + 1e-6) * (Vf - Vt - model.R * Il) dx[model.x_idx['Vf']] = model.wn * 2 / model.C * (If - Il - model.G / 2 * Vf) dx[model.x_idx['Vt']] = model.wn * 2 / model.C * (Il - It - model.G / 2 * Vt) return dx # @njit def d_vs_dt(xm, um, model): """ Voltage source differential equations Parameters ---------- xm : ndarray State vector. um : ndarray Input vector. model : object Model parameters. Returns ------- dx : ndarray State derivatives. """ phi = xm[model.x_idx['phi']] Ix = xm[model.x_idx['Ix']] Iy = xm[model.x_idx['Iy']] Vx = um[0] Vy = um[1] # f = um[2] fpu = 1 # TODO this should be the measured grid frequency dphi = 2 * np.pi * 50 * (fpu - 1) ux_setp = model.V0 * np.cos(phi + dphi) uy_setp = model.V0 * np.sin(phi + dphi) dIx = model.wn / model.L * (ux_setp - Vx - model.R * Ix + model.L * Iy) dIy = model.wn / model.L * (uy_setp - Vy - model.R * Iy - model.L * Ix) dx = np.zeros(len(xm)) dx[model.x_idx['phi']] = dphi dx[model.x_idx['Ix']] = dIx dx[model.x_idx['Iy']] = dIy return dx # @njit def d_gen_ord_6_rms_dt(xm, um, model): """ Sixth order generator differential equations. Generator current is not a state variable and is calculated from the terminal and subtransient voltage. Parameters ---------- xm : ndarray State vector. um : ndarray Input vector. model : object Model parameters. Returns ------- dx : ndarray State derivatives. """ d = xm[model.x_idx['d']] w = xm[model.x_idx['w']] Eqp = xm[model.x_idx['Eqp']] Eqpp = xm[model.x_idx['Eqpp']] Edp = xm[model.x_idx['Edp']] Edpp = xm[model.x_idx['Edpp']] Efq = xm[model.x_idx['Efq']] # TODO seperate the avr from the generator to simplify using different avr models Vf = xm[model.x_idx['Vf']] X_avr = xm[model.x_idx['Xavr']] Efq = max(Efq, 0) # TODO add limits vx = um[0] vy = um[1] Vref = um[2] Vac = np.sqrt(vx ** 2 + vy ** 2) Vd = (vx * np.cos(d) + vy * np.sin(d)) Vq = (-vx * np.sin(d) + vy * np.cos(d)) Id = -(-model.ra * (Vd - Edpp) - model.xqpp * (Vq - Eqpp)) / (model.ra ** 2 + model.xqpp * model.xdpp) Iq = -(model.xdpp * (Vd - Edpp) - model.ra * (Vq - Eqpp)) / (model.ra ** 2 + model.xqpp * model.xdpp) Pe = -(Vd * Id + Vq * Iq) + (Id ** 2 + Iq ** 2) * model.ra # Pe = (Edpp*Id+Eqpp*Iq)+(model.xdpp-model.xqpp)*Id*Iq delta_w = model.wn * (w - 1) dx = np.zeros(len(xm)) dx[model.x_idx['d']] = delta_w dx[model.x_idx['w']] = 1 / (model.Tj) * (model.Pm - Pe - model.D * w) # dw dx[model.x_idx['Eqp']] = 1 / model.Tdp * (Efq - Eqp + Id * (model.xd - model.xdp)) dx[model.x_idx['Eqpp']] = 1 / model.Tdpp * (Eqp - Eqpp + Id * (model.xdp - model.xdpp)) dx[model.x_idx['Edp']] = 1 / model.Tqp * (-Edp - Iq * (model.xq - model.xqp)) dx[model.x_idx['Edpp']] = 1 / model.Tqpp * (Edp - Edpp - Iq * (model.xqp - model.xqpp)) dEfq = 1 / model.Te * (-Efq + model.Kc * (Vref - Vf) + model.Kc / model.Tc * X_avr) dx[model.x_idx['Efq']] = dEfq dx[model.x_idx['Vf']] = 1 / model.Tm * (-Vf + Vac) dx[model.x_idx['Xavr']] = (Vref - Vf) return dx # @njit def d_gen_ord_6_emt_dt(xm, um, model): """ Sixth order generator differential equations. Generator current is included as a state variable. Parameters ---------- xm : ndarray State vector. um : ndarray Input vector. model : object Model parameters. Returns ------- dx : ndarray State derivatives. """ Id = xm[model.x_idx['Id']] Iq = xm[model.x_idx['Iq']] d = xm[model.x_idx['d']] w = xm[model.x_idx['w']] Eqp = xm[model.x_idx['Eqp']] Eqpp = xm[model.x_idx['Eqpp']] Edp = xm[model.x_idx['Edp']] Edpp = xm[model.x_idx['Edpp']] Efq = xm[model.x_idx['Efq']] # TODO seperate the avr from the generator to simplify using different avr models Vf = xm[model.x_idx['Vf']] X_avr = xm[model.x_idx['Xavr']] # Efq = np.clip(Efq, 0.0, 5.0) vx = um[0] vy = um[1] Vref = um[2] Vac = np.sqrt(vx ** 2 + vy ** 2) Vd = vx * np.cos(d) + vy * np.sin(d) Vq = -vx * np.sin(d) + vy * np.cos(d) Pe = (Edpp * Id + Eqpp * Iq) + (model.xdpp - model.xqpp) * Id * Iq delta_w = model.wn * (w - 1) dx = np.zeros(len(xm)) dx[model.x_idx['d']] = delta_w dx[model.x_idx['w']] = (1 / model.Tj) * (model.Pm - Pe - model.D * w) dx[model.x_idx['Eqp']] = (1 / model.Tdp) * (Efq - Eqp - Id * (model.xd - model.xdp)) dx[model.x_idx['Eqpp']] = (1 / model.Tdpp) * (Eqp - Eqpp - Id * (model.xdp - model.xdpp)) dx[model.x_idx['Edp']] = (1 / model.Tqp) * (-Edp + Iq * (model.xq - model.xqp)) dx[model.x_idx['Edpp']] = (1 / model.Tqpp) * (Edp - Edpp + Iq * (model.xqp - model.xqpp)) dEfq = 1 / model.Te * (-Efq + model.Kc * (Vref - Vf) + model.Kc / model.Tc * X_avr) dx[model.x_idx['Efq']] = dEfq dx[model.x_idx['Vf']] = 1 / model.Tm * (-Vf + Vac) dx[model.x_idx['Xavr']] = (Vref - Vf) # TODO check the equations for w*E'' dx[model.x_idx['Id']] = model.wn / model.xdpp * (w * Edpp - Vd - model.ra * Id + w * model.xqpp * Iq) dx[model.x_idx['Iq']] = model.wn / model.xqpp * (w * Eqpp - Vq - model.ra * Iq - w * model.xdpp * Id) return dx # @njit def d_gen_model_2_2_dt(xm, um, model): """ Generator model 2.2 differential equations. Generator current is included as a state variable. Parameters ---------- xm : ndarray State vector. um : ndarray Input vector. model : object Model parameters. Returns ------- dx : ndarray State derivatives. """ Id = xm[model.x_idx['Id']] Iq = xm[model.x_idx['Iq']] d = xm[model.x_idx['d']] w = xm[model.x_idx['w']] psi_d = xm[model.x_idx['psi_d']] psi_q = xm[model.x_idx['psi_q']] psi_fd = xm[model.x_idx['psi_fd']] psi_1d = xm[model.x_idx['psi_1d']] psi_1q = xm[model.x_idx['psi_1q']] psi_2q = xm[model.x_idx['psi_2q']] Efd = xm[model.x_idx['Efd']] # TODO seperate the avr from the generator to simplify using different avr models Vf = xm[model.x_idx['Vf']] X_avr = xm[model.x_idx['Xavr']] # Efd = np.clip(Efd, 0.0, 5.0) vx = um[0] vy = um[1] Vref = um[2] # Efd = um[3] Vac = np.sqrt(vx ** 2 + vy ** 2) Vd = (vx * np.cos(d) + vy * np.sin(d)) Vq = (-vx * np.sin(d) + vy * np.cos(d)) vfd = model.rfd / model.xadu * Efd te = (Iq * psi_d - Id * psi_q) / model.cosn tm = 0 # TODO include torque input tdkd = model.dkd * (w - 1) tdpe = model.dpe / w * (w - 1) ifd = model.kfd * Id + (model.x1d_loop * psi_fd - (model.xad + model.xrld) * psi_1d) / model.xdet_d i1d = model.k1d * Id + (model.xfd_loop * psi_1d - (model.xad + model.xrld) * psi_fd) / model.xdet_d i1q = model.k1q * Iq + (model.x2q_loop * psi_1q - (model.xaq + model.xrlq) * psi_2q) / model.xdet_q i2q = model.k2q * Iq + (model.x1q_loop * psi_2q - (model.xaq + model.xrlq) * psi_1q) / model.xdet_q dpsi_fd = model.wn * (vfd - model.rfd * ifd) dpsi_1d = model.wn * (-model.r1d * i1d) dpsi_1q = model.wn * (-model.r1q * i1q) dpsi_2q = model.wn * (-model.r2q * i2q) Edpp = -w * (model.k1q * psi_1q + model.k2q * psi_2q) + ( model.kfd / model.wn * dpsi_fd + model.k1d / model.wn * dpsi_1d) Eqpp = w * (model.kfd * psi_fd + model.k1d * psi_1d) + ( model.k1q / model.wn * dpsi_1q + model.k2q / model.wn * dpsi_2q) delta_w = model.wn * (w - 1) dx = np.zeros(len(xm)) dx[model.x_idx['d']] = delta_w dx[model.x_idx['w']] = (1 / model.Tj) * (tm - te - tdkd - tdpe) dx[model.x_idx['psi_d']] = model.wn * (Vd + model.ra * Id + w * psi_q) dx[model.x_idx['psi_q']] = model.wn * (Vq + model.ra * Iq - w * psi_d) dx[model.x_idx['psi_fd']] = dpsi_fd dx[model.x_idx['psi_1d']] = dpsi_1d dx[model.x_idx['psi_1q']] = dpsi_1q dx[model.x_idx['psi_2q']] = dpsi_2q dx[model.x_idx['Id']] = (model.wn / model.xdpp * (Edpp - Vd - model.ra * Id + w * model.xqpp * Iq)) dx[model.x_idx['Iq']] = (model.wn / model.xqpp * (Eqpp - Vq - model.ra * Iq - w * model.xdpp * Id)) dEfd = 1 / model.Te * (-Efd + model.Kc * (Vref - Vf) + model.Kc / model.Tc * X_avr) dx[model.x_idx['Efd']] = dEfd dx[model.x_idx['Vf']] = 1 / model.Tm * (-Vf + Vac) dx[model.x_idx['Xavr']] = (Vref - Vf) return dx # @njit def d_avr_dt(xm, um, model): """ Simple AVR differential equations. Parameters ---------- xm : ndarray State vector. um : ndarray Input vector. model : object Model parameters. Returns ------- dx : ndarray State derivatives. """ Efd = xm[model.x_idx['Efd']] Vf = xm[model.x_idx['Vf']] X_avr = xm[model.x_idx['Xavr']] Vpu = um[0] Vref = um[2] dx = np.zeros(len(xm)) dx[model.x_idx['Efd']] = 1 / model.Te * (-Efd + model.Kc * (Vref - Vf) + model.Kc / model.Tc * X_avr) dx[model.x_idx['Vf']] = 1 / model.Tm * (-Vf + Vpu) dx[model.x_idx['Xavr']] = (Vref - Vf) return dx # @njit def d_network_dt(xn, un, npr): """ Differential equations of the network. Assuming non-zero capacitance at every bus. Zero capacitance buses are not yet fully tested. Parameters ---------- xn : ndarray State vector. un : ndarray Input vector. npr : object Network parameters. Returns ------- dx : ndarray State derivatives. """ n_bus = npr.n_bus ishx = np.zeros(n_bus) ishy = np.zeros(n_bus) for k in range(n_bus): ishx[k] += un[k * 2] ishy[k] += un[k * 2 + 1] for k, (f, t) in enumerate(zip(npr.f, npr.t)): ishx[f] -= xn[n_bus * 2 + k * 2] ishy[f] -= xn[n_bus * 2 + k * 2 + 1] ishx[t] += xn[n_bus * 2 + k * 2] ishy[t] += xn[n_bus * 2 + k * 2 + 1] dx = np.zeros(len(xn)) for i in range(n_bus): Csh = np.imag(npr.Ybus[i].sum()) vx = xn[i * 2] vy = xn[i * 2 + 1] if Csh > 1e-9: dx[i * 2] = npr.wn / Csh * (ishx[i] + Csh * vy) dx[i * 2 + 1] = npr.wn / Csh * (ishy[i] - Csh * vx) else: # TODO if Csh == 0 dx[i * 2] = npr.wn * ishx[i] dx[i * 2 + 1] = npr.wn * ishy[i] for i in range(npr.n_br): f = npr.f[i] t = npr.t[i] vfx = xn[f * 2] vfy = xn[f * 2 + 1] vtx = xn[t * 2] vty = xn[t * 2 + 1] ix = xn[n_bus * 2 + i * 2] iy = xn[n_bus * 2 + i * 2 + 1] R = np.real(-1 / npr.Ybus[f, t]) L = np.imag(-1 / npr.Ybus[f, t]) dx[n_bus * 2 + i * 2] = (npr.wn / L * (vfx - vtx - R * ix + L * iy)) dx[n_bus * 2 + i * 2 + 1] = (npr.wn / L * (vfy - vty - R * iy - L * ix)) return dx # # @njit def d_sys_dt(x, u, npr, models): # TODO # @njit can't be used with the models in a list. # I should find a way around that because it's much slower dx = np.zeros(len(x)) unw = np.zeros(npr.n_bus * 2) for model in models: vx = x[npr.x_ind + model.bus_ind * 2] vy = x[npr.x_ind + model.bus_ind * 2 + 1] if model.type == ModelType.GEN_ORD_6: xm = x[model.x_ind:model.x_ind + model.nx] um = np.array([vx, vy, model.Vref]) dx[model.x_ind:model.x_ind + model.nx] = d_gen_ord_6_emt_dt(xm, um, model) d = x[model.x_ind + model.x_idx['d']] Id = x[model.x_ind + model.x_idx['Id']] Iq = x[model.x_ind + model.x_idx['Iq']] ix = (Id * np.cos(d) - Iq * np.sin(d)) * model.Sn / npr.Sb iy = (Id * np.sin(d) + Iq * np.cos(d)) * model.Sn / npr.Sb unw[model.bus_ind * 2] += ix unw[model.bus_ind * 2 + 1] += iy if model.type == ModelType.GEN_2_2: xm = x[model.x_ind:model.x_ind + model.nx] um = np.array([vx, vy, model.Vref]) dx[model.x_ind:model.x_ind + model.nx] = d_gen_model_2_2_dt(xm, um, model) d = x[model.x_ind + model.x_idx['d']] # Vd = vx*np.cos(d)+vy*np.sin(d) # Vq = -vx*np.sin(d)+vy*np.cos(d) # Eqpp = x[model.x_ind+model.x_idx['Eqpp']] # Edpp = x[model.x_ind+model.x_idx['Edpp']] # IdIq = model.Zg_inv.copy()@np.array([[Vd-Edpp],[Vq-Eqpp]]) # Id = IdIq[0,0] # Iq = IdIq[1,0] Id = x[model.x_ind + model.x_idx['Id']] Iq = x[model.x_ind + model.x_idx['Iq']] ix = (Id * np.cos(d) - Iq * np.sin(d)) * model.Sn / npr.Sb iy = (Id * np.sin(d) + Iq * np.cos(d)) * model.Sn / npr.Sb # unw = np.hstack((unw,ix,iy)) unw[model.bus_ind * 2] += ix unw[model.bus_ind * 2 + 1] += iy elif model.type == ModelType.VSC_1: ix = (x[model.x_ind + model.x_idx['Id']] * np.cos(x[model.x_ind + model.x_idx['Theta']]) - x[ model.x_ind + model.x_idx['Iq']] * np.sin(x[model.x_ind + model.x_idx['Theta']])) * model.Sn / npr.Sb iy = (x[model.x_ind + model.x_idx['Id']] * np.sin(x[model.x_ind + model.x_idx['Theta']]) + x[ model.x_ind + model.x_idx['Iq']] * np.cos(x[model.x_ind + model.x_idx['Theta']])) * model.Sn / npr.Sb xm = x[model.x_ind:model.x_ind + model.nx] um = np.array([vx, vy, 1, model.Pref, model.Qref, model.Vref]) dx[model.x_ind:model.x_ind + model.nx] = d_vsc_dt(xm, um, model) # unw = np.hstack((unw,ix,iy)) unw[model.bus_ind * 2] += ix unw[model.bus_ind * 2 + 1] += iy xnw = x[npr.x_ind:] dx[npr.x_ind:] = d_network_dt(xnw, unw, npr) return dx # @njit def calc_gen_dx(x, npr, model): vx = x[npr.x_ind + model.bus_ind * 2] vy = x[npr.x_ind + model.bus_ind * 2 + 1] xm = x[model.x_ind:model.x_ind + model.nx] um = np.array([vx, vy, model.Vref]) dx = model.dx_dt(xm, um) d = x[model.x_ind + model.x_idx['d']] Id = x[model.x_ind + model.x_idx['Id']] Iq = x[model.x_ind + model.x_idx['Iq']] ix = (Id * np.cos(d) - Iq * np.sin(d)) * model.Sn / npr.Sb iy = (Id * np.sin(d) + Iq * np.cos(d)) * model.Sn / npr.Sb return dx, ix, iy # @njit def calc_vsc_dx(x, npr, model): if not model.x_dc == -1: vdc = x[model.x_dc] else: vdc = 1 vx = x[npr.x_ind + model.bus_ind * 2] vy = x[npr.x_ind + model.bus_ind * 2 + 1] ix = (x[model.x_ind + model.x_idx['Id']] * np.cos(x[model.x_ind + model.x_idx['Theta']]) - x[ model.x_ind + model.x_idx['Iq']] * np.sin(x[model.x_ind + model.x_idx['Theta']])) * model.Sn / npr.Sb iy = (x[model.x_ind + model.x_idx['Id']] * np.sin(x[model.x_ind + model.x_idx['Theta']]) + x[ model.x_ind + model.x_idx['Iq']] * np.cos(x[model.x_ind + model.x_idx['Theta']])) * model.Sn / npr.Sb xm = x[model.x_ind:model.x_ind + model.nx] um = np.array([vx, vy, vdc, model.Pref, model.Qref, model.Vref]) dx = model.dx_dt(xm, um) return dx, ix, iy # @njit def calc_dc_cable_dx(x, npr, model, vsc_f, vsc_t): If = -x[vsc_f.x_ind + vsc_f.x_idx['Idc']] * vsc_f.Sn / npr.Sb It = x[vsc_t.x_ind + vsc_t.x_idx['Idc']] * vsc_t.Sn / npr.Sb xm = x[model.x_ind:model.x_ind + model.nx] um = np.array([If, It]) dx = model.dx_dt(xm, um) return dx # @njit def calc_dc_cable2_dx(x, npr, model): If = -x[model.x_If] * model.Sf / npr.Sb It = x[model.x_It] * model.St / npr.Sb xm = x[model.x_ind:model.x_ind + model.nx] um = np.array([If, It]) dx = model.dx_dt(xm, um) return dx # @njit def calc_vs_dx(x, npr, model): vx = x[npr.x_ind + model.bus_ind * 2] vy = x[npr.x_ind + model.bus_ind * 2 + 1] xm = x[model.x_ind:model.x_ind + model.nx] um = np.array([vx, vy]) dx = model.dx_dt(xm, um) ix = x[model.x_ind + model.x_idx['Ix']] iy = x[model.x_ind + model.x_idx['Iy']] return dx, ix, iy def d_sys_2_dt(x, npr, models): # TODO @njit can't be used with the models in a list. # I should find a way around that because it's much slower dx = np.zeros(len(x)) unw = np.zeros(npr.n_bus * 2) for model in models: if model.type in [ModelType.GEN_ORD_6, ModelType.GEN_2_2]: dx[model.x_ind:model.x_ind + model.nx], ix, iy = calc_gen_dx(x, npr, model) unw[model.bus_ind * 2] += ix unw[model.bus_ind * 2 + 1] += iy elif model.type == ModelType.VSC_1: dx[model.x_ind:model.x_ind + model.nx], ix, iy = calc_vsc_dx(x, npr, model) unw[model.bus_ind * 2] += ix unw[model.bus_ind * 2 + 1] += iy elif model.type == ModelType.DC_LINE: dx[model.x_ind:model.x_ind + model.nx] = calc_dc_cable_dx(x, npr, model, models[model.f], models[model.t]) elif model.type == ModelType.VS: dx[model.x_ind:model.x_ind + model.nx], ix, iy = calc_vs_dx(x, npr, model) unw[model.bus_ind * 2] += ix unw[model.bus_ind * 2 + 1] += iy xnw = x[npr.x_ind:] dx[npr.x_ind:] = d_network_dt(xnw, unw, npr) return dx # @njit def d_sys_nswph_dt(x, u, npr, OFF, WF, SC): dx = np.zeros(len(x)) unw = np.zeros(npr.n_bus * 2) for model in OFF.models: vx = x[npr.x_ind + model.bus_ind * 2] vy = x[npr.x_ind + model.bus_ind * 2 + 1] if not model.x_dc == -1: vdc = x[model.x_dc] else: vdc = 1 ix = (x[model.x_ind + model.x_idx['Id']] * np.cos(x[model.x_ind + model.x_idx['Theta']]) - x[ model.x_ind + model.x_idx['Iq']] * np.sin(x[model.x_ind + model.x_idx['Theta']])) * model.Sn / npr.Sb iy = (x[model.x_ind + model.x_idx['Id']] * np.sin(x[model.x_ind + model.x_idx['Theta']]) + x[ model.x_ind + model.x_idx['Iq']] * np.cos(x[model.x_ind + model.x_idx['Theta']])) * model.Sn / npr.Sb xm = x[model.x_ind:model.x_ind + model.nx] um = np.array([vx, vy, vdc, model.Pref, model.Qref, model.Vref]) dx[model.x_ind:model.x_ind + model.nx] = model.dx_dt(xm, um) unw[model.bus_ind * 2] += ix unw[model.bus_ind * 2 + 1] += iy for model in WF.models: vx = x[npr.x_ind + model.bus_ind * 2] vy = x[npr.x_ind + model.bus_ind * 2 + 1] ix = (x[model.x_ind + model.x_idx['Id']] * np.cos(x[model.x_ind + model.x_idx['Theta']]) - x[ model.x_ind + model.x_idx['Iq']] *
np.sin(x[model.x_ind + model.x_idx['Theta']])
numpy.sin
import datetime from datetime import date from datetime import datetime import requests import pymysql import json import math import skfuzzy as fuzz from skfuzzy import control as ctrl import numpy as np import matplotlib from apscheduler.schedulers.blocking import BlockingScheduler import config ''' Developed by <NAME> This script is used inside the irrigationRecommendation container and does the following: 1. Get weather parameters and sent it to the referenceEvapotranspiration Entity 2. Calculate Reference evaotranspiration for the city of São Bernardo do Campo, São Paulo, Brazil 3. Calculate the irrigation recommendation based on FAO's Crop Evapotranspiration 4. Calculate the irrigation recommendation based on a Fuzzy Inference System developed by <NAME> ''' def get_daily_info(): """ Colled daily weather data and calculate max, min, avg values for temperature, relative humidity and wind speed """ connection = pymysql.connect(host='db-mysql', user='root', password='<PASSWORD>', db='lab') # Select Maximum daily temperature cursor = connection.cursor() sql = 'SELECT MAX(CASE attrName WHEN "temperature" THEN attrValue END) FROM `WeatherCurrent_WeatherCurrent` WHERE date(recvTime) = curdate()' cursor.execute(sql) tmax = cursor.fetchone() # Collect minimum daily temperature sql = 'SELECT MIN(CASE attrName WHEN "temperature" THEN attrValue END) FROM `WeatherCurrent_WeatherCurrent` WHERE date(recvTime) = curdate()' cursor.execute(sql) tmin = cursor.fetchone() # Collect minimum daily temperature sql = 'SELECT AVG(CASE attrName WHEN "temperature" THEN attrValue END) FROM `WeatherCurrent_WeatherCurrent` WHERE date(recvTime) = curdate()' cursor.execute(sql) tmed = cursor.fetchone() # Collect maximum daily humidity sql = 'SELECT MAX(CASE attrName WHEN "humidity" THEN attrValue END) FROM `WeatherCurrent_WeatherCurrent` WHERE date(recvTime) = curdate()' cursor.execute(sql) rhmax = cursor.fetchone() # Collect minimum daily humidity sql = 'SELECT MIN(CASE attrName WHEN "humidity" THEN attrValue END) FROM `WeatherCurrent_WeatherCurrent` WHERE date(recvTime) = curdate()' cursor.execute(sql) rhmin = cursor.fetchone() # Collect average daily humidity sql = 'SELECT AVG(CASE attrName WHEN "humidity" THEN attrValue END) FROM `WeatherCurrent_WeatherCurrent` WHERE date(recvTime) = curdate()' cursor.execute(sql) rhmed = cursor.fetchone() # Collect average daily wind speed sql = 'SELECT MAX(CASE attrName WHEN "windSpeed" THEN attrValue END) FROM `WeatherCurrent_WeatherCurrent` WHERE date(recvTime) = curdate()' cursor.execute(sql) vmax = cursor.fetchone() # Collect average daily wind speed sql = 'SELECT MIN(CASE attrName WHEN "windSpeed" THEN attrValue END) FROM `WeatherCurrent_WeatherCurrent` WHERE date(recvTime) = curdate()' cursor.execute(sql) vmin = cursor.fetchone() # Collect average daily wind speed sql = 'SELECT AVG(CASE attrName WHEN "windSpeed" THEN attrValue END) FROM `WeatherCurrent_WeatherCurrent` WHERE date(recvTime) = curdate()' cursor.execute(sql) vmed = cursor.fetchone() payload = json.dumps({ "dateObserved": { "value": datetime.now().isoformat() }, "dailyTmax": { "value": float(tmax[0]) }, "dailyTmin": { "value": float(tmin[0]) }, "dailyTmed": { "value": float(tmed[0]) }, "dailyRhmax": { "value": float(rhmax[0]) }, "dailyRhmin": { "value": float(rhmin[0]) }, "dailyRhmed": { "value": float(rhmed[0]) }, "dailyVmax": { "value": float(vmax[0]) }, "dailyVmin": { "value": float(vmin[0]) }, "dailyVmed": { "value": float(vmed[0]) } }) print(f'Sending payload to Orion: {payload}') url = "http://orion:1026/v2/entities/urn:ngsi-ld:referenceEvapotranspiration:1/attrs" headers = { 'Content-Type': 'application/json', 'fiware-service': 'lab', 'fiware-servicepath': '/' } try: response = requests.request("PATCH", url, headers=headers, data = payload) print(response.text.encode('utf8')) except requests.exceptions.RequestException as e: # This is the correct syntax print(e) def evapotranspiration(): ''' Calculates the reference evapotranspiration based on referenceEvapotranspiration the entity and Sunrise and sunset hours 1. Gets data from the reference Evapotranspiration entity. 2. Gets Sunrise and Sunsset hours from the OpenWeather API 3. Calculates the reference evapotranspiration. 4. Send the daily evapotranspiration to the reference Evapotranspiration Entity. ''' url = "http://orion:1026/v2/entities/urn:ngsi-ld:referenceEvapotranspiration:1/attrs?options=keyValues" payload = {} headers = { 'fiware-service': 'lab', 'fiware-servicepath': '/' } response = requests.request("GET", url, headers=headers, data = payload).json() day_corr = date.today().toordinal() - date(date.today().year,1,1).toordinal() tmax = response['dailyTmax'] tmin = response['dailyTmin'] tmed = response['dailyTmed'] rhmax = response['dailyRhmax'] v_med = response['dailyVmed'] alt_v = 2 rhmed =response['dailyRhmed'] # Collect sunrise and sunset hours from Ope Weather API and calculate difference url = "http://api.openweathermap.org/data/2.5/weather?id=3449344&appid=1b43995d45e76484eac79c54b28ad885&units=metric" payload = {} headers= {} response = requests.request("GET", url, headers=headers, data = payload) r = response.json() n = datetime.fromtimestamp(r['sys']['sunset']) - datetime.fromtimestamp(r['sys']['sunrise']) n = n.total_seconds()/3600 # tmax is maximum temperature for the day # tmin is the minimum temperature for the day # rhmax is the maximum relative humidity for the day # n is the actual duration of sunshine [hour] # v_med is the average wind velocity Km/h # alt_v is the altitute from the ground that the wind speed is collected. As for a weather station for example. # rhmed # day_corr is the current date in the range 1 to 365 elev = 801 # Elevation from sea level. Used the city of São Bernardo- São Paulo - Brazil. Change if needed. p = 92.183188 # Atmospheric Pressure use eq 101.3*math.pow((293-0.0065*elev)/293,5.26). Used the city of São Bernardo- São Paulo - Brazil. Change if needed. phi = -0.414081215084 # latitude in radians. Used the city of São Bernardo- São Paulo - Brazil. Change if needed. y = 0.665*math.pow(10,-3)*p # y is the psycometric constant dr = 1 + 0.033*math.cos((2*math.pi*day_corr)/365) # Dr is Relative Distance Earth-Sun delt = 0.409 * math.sin(((2*math.pi*day_corr)/365)-1.39) # Delt is solar declination e0_tmax = 0.6108*math.pow(math.e,((17.27*tmax)/(tmax+237.3))) # eo_tmax is saturation vapor pressure for the max air temperature e0_tmin = 0.6108*math.pow(math.e,((17.27*tmin)/(tmin+237.3))) # eo_tmin is saturation vapor pressure for the min air temperature es = (e0_tmax + e0_tmin)/2 # es is the mean saturation vapor pressure D = (4098*(0.6108*math.pow(math.e,((17.27*tmed)/(tmed+237.3)))))/math.pow((tmed+237.3),2) # D is Slope Vapor Pressure Curve ea = es*rhmed/100 # ea us actual vapor pressure considering an average relative humidity ws = math.acos(-math.tan(phi)*math.tan(delt)) #Ws is sunset hour angle ra = 37.5860314*dr*((ws*math.sin(phi)*math.sin(delt)) + (math.cos(phi)*math.cos(delt)*math.sin(ws))) # Ra is Extraterrestrial Radiation rs = (0.25 + (0.5 * (n/(7.6394 * ws))))*ra #*0.408 Rs is solar radiation rns = 0.77*rs #rns ius Net Shortwave Radiation rso = (0.75*ra) # Rso is Clear Sky Solar Radiation f_rs_rso = rs/rso if f_rs_rso > 1: f_rs_rso = 1 rnl = (4.903*math.pow(10,-9)) * ((math.pow((tmax+273.16),4) + math.pow((tmin+273.16),4))/2) * (0.34+(-0.14*math.sqrt(ea))) * ((1.35*(f_rs_rso))-0.35) # Rnl is Net Long Wave Radiation r_n = rns - rnl # Rn is Neet Radiation g = 0 uz = v_med*1000/3600 # uz is Wind Speed measured at Z height in m/s u2 = uz*(4.87/(math.log(67.8*alt_v - 5.42))) #u2 is is wind speed at 2m above ground et_o = ((0.408*D*(r_n-g)+y*(900/(tmed+273))*u2*(es-ea))/(D+y*(1+0.34*u2)))/0.85 # Calculate daily evapotranspiration based on the values before payload = json.dumps({ "evapotranspiration": { "value": et_o } }) print(f'Sending payload to Orion: {payload}') url = "http://orion:1026/v2/entities/urn:ngsi-ld:referenceEvapotranspiration:1/attrs" headers = { 'Content-Type': 'application/json', 'fiware-service': 'lab', 'fiware-servicepath': '/' } try: response = requests.request("PATCH", url, headers=headers, data = payload) print(response.text.encode('utf8')) except requests.exceptions.RequestException as e: # This is the correct syntax print(e) def fao_recommendation(): ''' Calculate Fao's irrigation recommendation for a crop of Pepper and send to management zone 1 1. Get daily reference evapotranspiration (Eto) from referenceEvappotranspiration entity 2. Get days after seeding (Das) from cropInstance evapotranspiration 3. Get crop Koeficiente (kc) from Crop type entity 4. Send irrigation recommendation based on Eto * Kc ''' # 1. Get daily reference evapotranspiration (Eto) from referenceEvappotranspiration entity url = "http://orion:1026/v2/entities/urn:ngsi-ld:referenceEvapotranspiration:1/attrs?options=keyValues" payload = {} headers = { 'fiware-service': 'lab', 'fiware-servicepath': '/' } response = requests.request("GET", url, headers=headers, data = payload).json() eto = response['evapotranspiration'] # 2. Get days after seeding (Das) from cropInstance evapotranspiration url = "http://orion:1026/v2/entities/urn:ngsi-ld:CropInstance:Pepper/attrs?options=keyValues" payload = {} headers = { 'fiware-service': 'lab', 'fiware-servicepath': '/' } response = requests.request("GET", url, headers=headers, data = payload).json() das = date.today().toordinal() - datetime.strptime(response['SeedingDay'], '%Y-%m-%dT%H:%M:%S.%fZ').toordinal() # 3. Get crop Koeficiente (kc) from Crop type entity url = "http://orion:1026/v2/entities/urn:ngsi-ld:CropType:Pepper/attrs?options=keyValues" payload = {} headers = { 'fiware-service': 'lab', 'fiware-servicepath': '/' } r = requests.request("GET", url, headers=headers, data = payload).json() kc = 0 IniDays = float(r['stageIniDays']) DevDays = float(r['stageDevDays']) MidDays = float(r['stageMidDays']) LateDays = float(r['stageLateDays']) IniKc = float(r['stageIniKc']) MidKc = float(r['stageMidKc']) LateKc = float(r['stageLateKc']) if das <= IniDays: kc = IniKc elif das > IniDays and das <= IniDays + DevDays: kc = IniKc + ((MidKc-IniKc) * (das - IniDays)) / (DevDays) elif das > IniDays + DevDays and das <= IniDays + DevDays + MidDays: kc = MidKc elif das > IniDays + DevDays + MidDays and das <= IniDays + DevDays + MidDays + LateDays: kc = LateKc + ((MidKc - LateKc) * (IniDays + DevDays + MidDays + LateDays - das)) / (LateDays) else: print('error') # 4. Send irrigation recommendation based on Eto * Kc payload = json.dumps({ "irrigationInMilimiters": { "value": eto*kc }, "timeCalculated" : { "value": datetime.now().isoformat() } }) print(f'Sending payload to Orion: {payload}') url = "http://orion:1026/v2/entities/urn:ngsi-ld:IrrigationRecommendation:1/attrs" headers = { 'Content-Type': 'application/json', 'fiware-service': 'lab', 'fiware-servicepath': '/' } try: response = requests.request("PATCH", url, headers=headers, data = payload) print(response.text.encode('utf8')) except requests.exceptions.RequestException as e: # This is the correct syntax print(e) def fuzzy_recommendation(): ''' Calculate Fuzzy Irrigation recommendation for a crop of Pepper and send to management zone 2 1. Get daily reference evapotranspiration (Eto) from referenceEvappotranspiration entity 2. Get days after seeding (Das) from cropInstance evapotranspiration 3. Get crop Coeficiente (kc) from Crop type entity 4. Get rain prediction and rain probability for the next day from Weather Forecast Entity. 5. Get Daily Average Reading for the Soil Probe 5. Send irrigation recommendation based on Eto * Kc ''' # 1. Get daily reference evapotranspiration (Eto) from referenceEvappotranspiration 2 entity url = "http://orion:1026/v2/entities/urn:ngsi-ld:referenceEvapotranspiration:1/attrs?options=keyValues" payload = {} headers = { 'fiware-service': 'lab', 'fiware-servicepath': '/' } response = requests.request("GET", url, headers=headers, data = payload).json() eto = float(response['evapotranspiration']) # 2. Get days after seeding (Das) from cropInstance evapotranspiration url = "http://orion:1026/v2/entities/urn:ngsi-ld:CropInstance:Pepper/attrs?options=keyValues" payload = {} headers = { 'fiware-service': 'lab', 'fiware-servicepath': '/' } response = requests.request("GET", url, headers=headers, data = payload).json() das_in = date.today().toordinal() - datetime.strptime(response['SeedingDay'], '%Y-%m-%dT%H:%M:%S.%fZ').toordinal() # 3. Get crop Koeficiente (kc) from Crop type entity url = "http://orion:1026/v2/entities/urn:ngsi-ld:CropType:Pepper/attrs?options=keyValues" payload = {} headers = { 'fiware-service': 'lab', 'fiware-servicepath': '/' } r = requests.request("GET", url, headers=headers, data = payload).json() kc = 0 IniDays = float(r['stageIniDays']) DevDays = float(r['stageDevDays']) MidDays = float(r['stageMidDays']) LateDays = float(r['stageLateDays']) IniKc = float(r['stageIniKc']) MidKc = float(r['stageMidKc']) LateKc = float(r['stageLateKc']) if das_in <= IniDays: kc = IniKc elif das_in > IniDays and das_in <= IniDays + DevDays: kc = IniKc + ((MidKc-IniKc) * (das_in - IniDays)) / (DevDays) elif das_in > IniDays + DevDays and das_in <= IniDays + DevDays + MidDays: kc = MidKc elif das_in > IniDays + DevDays + MidDays and das_in <= IniDays + DevDays + MidDays + LateDays: kc = LateKc + ((MidKc - LateKc) * (IniDays + DevDays + MidDays + LateDays - das_in)) / (LateDays) else: print('error') # 4. Get rain prediction and rain probability for the next day from Weather Forecast Entity. url = f"https://api.openweathermap.org/data/2.5/onecall?lat=-23.73&lon=-46.58&exclude=minutely,hourly&appid={config.api_key}&units=metric" payload = {} headers= {} r = requests.request("GET", url, headers=headers, data = payload).json() rain_prob_in = r["daily"][1]["pop"] rain_pred = 0 if "rain" in r["daily"][1].keys(): rain_pred_in = r["daily"][1]["rain"] else: rain_pred_in= 0 # 5. Get Average sensor reading for both soil moisture sensors connection = pymysql.connect(host='db-mysql', user='root', password='<PASSWORD>', db='lab') cursor = connection.cursor() sql = 'SELECT AVG(CASE attrName WHEN "soilMoistureCalibratedDepth1" THEN attrValue END) FROM `urn_ngsi-ld_SoilProbe_1_SoilProbe` WHERE date(recvTime) = curdate()' cursor.execute(sql) sm1 = cursor.fetchone() avg_soil_moisture_1 = float(sm1[0]) sql = 'SELECT AVG(CASE attrName WHEN "soilMoistureCalibratedDepth2" THEN attrValue END) FROM `urn_ngsi-ld_SoilProbe_1_SoilProbe` WHERE date(recvTime) = curdate()' cursor.execute(sql) sm2 = cursor.fetchone() avg_soil_moisture_2 = float(sm1[0]) # 6. Get daily arverage rain for the current day/month url = "http://orion:1026/v2/entities/urn:ngsi-ld:rainAvg:SBC/attrs?options=keyValues" payload = {} headers = { 'fiware-service': 'lab', 'fiware-servicepath': '/' } r = requests.request("GET", url, headers=headers, data = payload).json() month = datetime.now().strftime('%h') daily_rain_avg = float(r[month]) # Dynamic rain forecast levels based on daily average rain for the current month for fuzzifier input. ini = round(daily_rain_avg * 0.25,0) med = round(daily_rain_avg * 1.0,0) fin = round(daily_rain_avg * 1.75,0) # Define Variables das = ctrl.Antecedent(np.arange(0, 110, 0.25), 'das') # Das = Days after seeding rain_pred = ctrl.Antecedent(np.arange(0, 5, 0.25), 'rain_pred') # rain_pred = Rain prediction in mm rain_prob = ctrl.Antecedent(np.arange(0, 100, 0.25), 'rain_prob') # rain_prob = rain_ probability in % depl = ctrl.Antecedent(
np.arange(18, 36, 0.25)
numpy.arange
import numpy as np import matplotlib.pyplot as plt import networkx as nx from scipy import fftpack from scipy import signal data = np.loadtxt('Ach_spike.txt') W = np.loadtxt('strangr_1_2.txt') data_array = np.loadtxt('Ach_array.txt') plt.figure() plt.scatter(data[:,0],data[:,1],cmap='viridis',linewidth=0.5,color="k",marker='.',s=9,alpha=0.5) spike_onetime = [] spike_totall = [] #print(len(np.nonzero(data_array[int(50/0.0125 +123),:])[0])) # print(data_array[int(50/0.0125 +123):int(50/0.0125 +128),:]) # print(len(np.nonzero(data_array[int(50/0.0125 +123):int(50/0.0125 +123)+ 1600 ,:])[0]) ) index = np.where(data_array.any(axis=1))[0] difference = index[1:]- index[0:-1] difference =
np.array(difference)
numpy.array
import numpy as np import pandas as pd import xarray as xr import os import Grid import pf_dynamic_sph as pfs import pf_static_sph if __name__ == "__main__": # # Initialization # matplotlib.rcParams.update({'font.size': 12, 'text.usetex': True}) # gParams (Lx, Ly, Lz) = (20, 20, 20) (dx, dy, dz) = (0.2, 0.2, 0.2) NGridPoints_cart = (1 + 2 * Lx / dx) * (1 + 2 * Ly / dy) * (1 + 2 * Lz / dz) xgrid = Grid.Grid('CARTESIAN_3D') xgrid.initArray('x', -Lx, Lx, dx); xgrid.initArray('y', -Ly, Ly, dy); xgrid.initArray('z', -Lz, Lz, dz) NGridPoints_cart = (1 + 2 * Lx / dx) * (1 + 2 * Ly / dy) * (1 + 2 * Lz / dz) NGridPoints_desired = (1 + 2 * Lx / dx) * (1 + 2 * Lz / dz) Ntheta = 50 Nk = np.ceil(NGridPoints_desired / Ntheta) theta_max = np.pi thetaArray, dtheta = np.linspace(0, theta_max, Ntheta, retstep=True) # k_max = np.sqrt((np.pi / dx)**2 + (np.pi / dy)**2 + (np.pi / dz)**2) k_max = ((2 * np.pi / dx)**3 / (4 * np.pi / 3))**(1 / 3) k_min = 1e-5 kArray, dk = np.linspace(k_min, k_max, Nk, retstep=True) if dk < k_min: print('k ARRAY GENERATION ERROR') kgrid = Grid.Grid("SPHERICAL_2D") kgrid.initArray_premade('k', kArray) kgrid.initArray_premade('th', thetaArray) dVk = kgrid.dV() # Basic parameters mI = 1.7 mB = 1 n0 = 1 aBB = 0.075 gBB = (4 * np.pi / mB) * aBB names = list(kgrid.arrays.keys()) # ***need to have arrays added as k, th when kgrid is created if names[0] != 'k': print('CREATED kgrid IN WRONG ORDER') functions_wk = [lambda k: pfs.omegak(k, mB, n0, gBB), lambda th: 0 * th + 1] wk = kgrid.function_prod(names, functions_wk) # datapath = '/home/kis/Dropbox/VariationalResearch/HarvardOdyssey/ZwierleinExp_data/NGridPoints_{:.2E}'.format(NGridPoints_cart) # datapath = '/media/kis/Storage/Dropbox/VariationalResearch/HarvardOdyssey/ZwierleinExp_data/NGridPoints_{:.2E}'.format(NGridPoints_cart) datapath = '/n/regal/demler_lab/kis/ZwierleinExp_data/NGridPoints_{:.2E}'.format(NGridPoints_cart) # innerdatapath = datapath + '/imdyn_spherical' innerdatapath = datapath + '/redyn_spherical_nonint' outputdatapath = datapath + '/dirRF' # # Individual Datasets Nsteps = 1e2 pf_static_sph.createSpline_grid(Nsteps, kgrid, mI, mB, n0, gBB) aSi_tck =
np.load('aSi_spline_sph.npy')
numpy.load
'''This module contains all scientific equations and operations necessary for calclating monthly mean energy fluxes from the IMB data.''' import scientific_constants as sc import parameters as param import numpy as np import difference_functions as df def region_code(ncid_in,index,aux_vars_non_salin): '''Determines whether a buoy was contained entirely with the North Pole or Beaufort Sea regions within a particular month of operation''' import sys sys.path.insert(0,'/home/h01/hadax/Python/PhD/Buoys/hadax-IceMassBalanceBuoys/') import spatial region_codes = [spatial.region_key([ncid_in.variables['longitude_rt'][:][index][item],ncid_in.variables['latitude_rt'][:][index][item]]) for item in range(index[0].shape[0])] if np.unique(
np.array(region_codes)
numpy.array
# Licensed to the Apache Software Foundation (ASF) under one # or more contributor license agreements. See the NOTICE file # distributed with this work for additional information # regarding copyright ownership. The ASF licenses this file # to you under the Apache License, Version 2.0 (the # "License"); you may not use this file except in compliance # with the License. You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, # software distributed under the License is distributed on an # "AS IS" BASIS, WITHOUT WARRANTIES OR CONDITIONS OF ANY # KIND, either express or implied. See the License for the # specific language governing permissions and limitations # under the License. """Unit tests for graph partitioning.""" import os import sys import json import math import numpy as np import tvm from tvm import te import tvm.relay.testing import tvm.relay.transform from tvm import relay from tvm.relay import transform from tvm import runtime from tvm.contrib import utils import vta import vta.testing def check_result(mod, map_inputs, out_shape, result, tol=1e-5, target="llvm", ctx=tvm.cpu(), use_graph_rt=True): if sys.platform == "win32": print("Skip test on Windows for now") return def update_lib(lib): vta_hw_path = os.environ['VTA_HW_PATH'] tvm_home = os.environ['TVM_HOME'] test_dir = os.path.dirname(os.path.realpath(os.path.expanduser(__file__))) source_dir = os.path.join(test_dir, "..", "..", "..") vta_config = json.load(open('/' + os.path.join(*(vta_hw_path.split(os.path.sep) + ['config', 'vta_config.json'])))) vta_config['LOG_BLOCK_IN'] = vta_config['LOG_BLOCK'] vta_config['LOG_BLOCK_OUT'] = vta_config['LOG_BLOCK'] vta_config['LOG_OUT_WIDTH'] = vta_config['LOG_INP_WIDTH'] vta_config['LOG_OUT_BUFF_SIZE'] = vta_config['LOG_ACC_BUFF_SIZE'] + vta_config['LOG_OUT_WIDTH'] - vta_config['LOG_ACC_WIDTH'] kwargs = {} kwargs["options"] = ["-O2", "-std=c++14", f"-L{tvm_home}/build", "-lvta_fsim", f'-I{tvm_home}/src/runtime/contrib', f"-I{tvm_home}/3rdparty/vta-hw/include"] \ + [f'-D{"VTA_" + x}={y}' for (x, y) in filter(lambda pi: 'LOG' in pi[0], vta_config.items())] kwargs["options"].append(f'-DVTA_LOG_BLOCK_IN={vta_config["LOG_BLOCK"]}') kwargs["options"].append(f'-DVTA_LOG_BLOCK_OUT={vta_config["LOG_BLOCK"]}') tmp_path = utils.tempdir() lib_name = "lib.so" lib_path = tmp_path.relpath(lib_name) lib.export_library(lib_path, fcompile=False, **kwargs) lib = tvm.runtime.load_module(lib_path) return lib def check_vm_result(): with tvm.transform.PassContext(opt_level=3, disabled_pass=["AlterOpLayout"]): exe = relay.vm.compile(mod, target=target) code, lib = exe.save() lib = update_lib(lib) exe = runtime.vm.Executable.load_exec(code, lib) vm = runtime.vm.VirtualMachine(exe, ctx) out = vm.run(**map_inputs) tvm.testing.assert_allclose(out.asnumpy(), result, rtol=tol, atol=tol) def check_graph_runtime_result(): with tvm.transform.PassContext(opt_level=3, disabled_pass=["AlterOpLayout"]): json, lib, _ = relay.build(mod, target=target) lib = update_lib(lib) rt_mod = tvm.contrib.graph_runtime.create(json, lib, ctx) for name, data in map_inputs.items(): rt_mod.set_input(name, data) rt_mod.run() out = tvm.nd.empty(out_shape, ctx=ctx) out = rt_mod.get_output(0, out) tvm.testing.assert_allclose(out.asnumpy(), result, rtol=tol, atol=tol) check_vm_result() if use_graph_rt: check_graph_runtime_result() def set_external_func_attr(func, compiler, ext_symbol): func = func.with_attr("Primitive", tvm.tir.IntImm("int32", 1)) func = func.with_attr("Compiler", compiler) func = func.with_attr("global_symbol", ext_symbol) return func def test_multi_node_subgraph(): x = relay.var("x", shape=(10, 10)) w0 = relay.var("w0", shape=(10, 10)) w1 = relay.var("w1", shape=(10, 10)) w2 = relay.var("w2", shape=(10, 10)) w3 = relay.var("w3", shape=(10, 10)) w4 = relay.var("w4", shape=(10, 10)) w5 = relay.var("w5", shape=(10, 10)) w6 = relay.var("w6", shape=(10, 10)) w7 = relay.var("w7", shape=(10, 10)) # subgraph0 x0 = relay.var("x0", shape=(10, 10)) w00 = relay.var("w00", shape=(10, 10)) w01 = relay.var("w01", shape=(10, 10)) w02 = relay.var("w02", shape=(10, 10)) z00 = relay.add(x0, w00) p00 = relay.subtract(z00, w01) q00 = relay.multiply(p00, w02) subgraph0 = relay.Function([x0, w00, w01, w02], q00) subgraph0 = set_external_func_attr(subgraph0, "ccompiler", "ccompiler_0") call0 = relay.Call(subgraph0, [x, w0, w1, w2]) # subgraph1 x1 = relay.var("x1", shape=(10, 10)) w10 = relay.var("w10", shape=(10, 10)) w11 = relay.var("w11", shape=(10, 10)) w12 = relay.var("w12", shape=(10, 10)) z10 = relay.add(x1, w10) p10 = relay.subtract(z10, w11) q10 = relay.multiply(p10, w12) subgraph1 = relay.Function([x1, w10, w11, w12], q10) subgraph1 = set_external_func_attr(subgraph1, "ccompiler", "ccompiler_1") call1 = relay.Call(subgraph1, [x, w3, w4, w5]) # Other parts on TVM z2 = relay.add(x, w6) q2 = relay.subtract(z2, w7) r = relay.concatenate((call0, call1, q2), axis=0) f = relay.Function([x, w0, w1, w2, w3, w4, w5, w6, w7], r) mod = tvm.IRModule() mod["main"] = f mod = relay.transform.InferType()(mod) x_data = np.random.rand(10, 10).astype("float32") w_data = [] for _ in range(8): w_data.append(np.random.rand(10, 10).astype("float32")) map_inputs = {"w{}".format(i): w_data[i] for i in range(8)} map_inputs["x"] = x_data check_result( mod, map_inputs, (30, 10), np.concatenate( ( ((x_data + w_data[0]) - w_data[1]) * w_data[2], ((x_data + w_data[3]) - w_data[4]) * w_data[5], x_data + w_data[6] - w_data[7], ), axis=0, ), ) def test_extern_gcc_single_op(): x = relay.var("x", shape=(8, 8)) y = relay.var("y", shape=(8, 8)) x0 = relay.var("x0", shape=(8, 8)) y0 = relay.var("y0", shape=(8, 8)) z = x0 + y0 f = relay.Function([x0, y0], z) f = set_external_func_attr(f, "ccompiler", "ccompiler_0") call = relay.Call(f, [x, y]) mod = tvm.IRModule.from_expr(call) x_data =
np.random.rand(8, 8)
numpy.random.rand
import numpy as np from numpy.testing import assert_allclose, run_module_suite from pyins import sim from pyins import earth from pyins import dcm def test_from_position(): dt = 1e-1 n_points = 1000 lat = np.full(n_points, 50.0) lon = np.full(n_points, 45.0) alt = np.zeros(n_points) h = np.zeros(n_points) p = np.zeros(n_points) r = np.zeros(n_points) VE = np.zeros(n_points) VN = np.zeros(n_points) VU = np.zeros(n_points) slat = np.sin(np.deg2rad(50)) clat = (1 - slat**2) ** 0.5 gyro = earth.RATE * np.array([0, clat, slat]) * dt accel = np.array([0, 0, earth.gravity(50)]) * dt traj, gyro_g, accel_g = sim.from_position(dt, lat, lon, alt, h, p, r) assert_allclose(traj.lat, 50, rtol=1e-12)
assert_allclose(traj.lon, 45, rtol=1e-12)
numpy.testing.assert_allclose
import unittest import numpy as np import tomophantom import tomophantom.phantom3d from ccpi.reconstruction.conebeam import alg as ccpi_reconstruction class TestConebeamReconstruction(unittest.TestCase): """ Test the cone beam reconstruction """ def setUp(self): pass def tearDown(self): pass def test_create_phantom(self): data = ccpi_reconstruction.create_phantom() self.assertEqual(data[0].shape[0], 250) self.assertEqual(data[0].shape[1], 512) self.assertEqual(data[0].shape[2], 512) self.assertEqual(data[1].shape[0], 250) self.assertEqual(data[2], 250.0) self.assertEqual(data[3], 987.0) self.assertEqual(data[4], 0.390625) self.assertEqual(data[5], 0.390625) self.assertEqual(data[6], 25.151547886198443) @unittest.skip("takes a long to process") def test_cgls_test(self): data = ccpi_reconstruction.create_phantom() projection = data[0] #projections angles = data[1] #angles h_offsets = np.zeros(1, dtype='float32') v_offsets = np.zeros(1, dtype='float32') source_x = data[2] # source_x detector_x = data[3] #detector_x h_pixel_size = data[4] #h pixel size v_pixel_size = data[5] #v pixel size mask_radius = data[6] #mask radius full_vox_origin = np.zeros(3, dtype='float32') voxel_size = np.ones(3,dtype='float32') niterations=10 nthreads=16 recon = ccpi_reconstruction.cgls(projection, angles, h_offsets, v_offsets, 1, source_x, detector_x,h_pixel_size,v_pixel_size,mask_radius,False,full_vox_origin,voxel_size,niterations,nthreads,True) self.assertNotEqual(recon[256,256,256],0) def prepare_phantom(self): num_angles = 250 num_h_pixels = 512 num_v_pixels = 512 source_x = -250.0 source_y = 0 source_z = 0 detector_x = 987.0-250.0 xpixel_size = 0.390625 ypixel_size = 0.390625 h_pixels = np.arange(num_h_pixels, dtype='float32') v_pixels = np.arange(num_v_pixels, dtype='float32') pixel_base = -((num_h_pixels-1)*xpixel_size/2.0) h_pixels = h_pixels * xpixel_size + pixel_base pixel_base = -((num_v_pixels-1)*ypixel_size/2.0) v_pixels = v_pixels * ypixel_size + pixel_base mask_radius = -source_x * np.sin(np.arctan(h_pixels[num_h_pixels - 1] / (detector_x- source_x))) grid_offset=np.zeros(3, dtype='float32') voxel_size=
np.ones(3, dtype='float32')
numpy.ones
""" Utility functions that wrap the hyperopt library. """ import json import pickle import time from os.path import join, exists import numpy as np from hyperopt import fmin, tpe, hp, STATUS_OK, Trials from rlkit.pythonplusplus import ( dot_map_dict_to_nested_dict, merge_recursive_dicts, dict_to_safe_json, ) def optimize_and_save( base_log_dir, function, search_space, num_rounds=100, num_evals_per_round=1, load_trials=False, trials_filename="trials.pkl", search_space_filename="search_space.pkl", non_hp_results_filename="non_hp_results.json", verbose=False, **kwargs ): assert num_rounds > 0 assert num_evals_per_round > 0 trials = None trials_path = join(base_log_dir, trials_filename) if load_trials and exists(trials_path): with open(trials_path, 'rb') as handle: trials = pickle.load(handle) start_time = time.time() for round in range(num_rounds): num_evals = (round+1) * num_evals_per_round best_params, min_value, trials, best_variant = optimize( function=function, search_space=search_space, num_evals=num_evals, trials=trials, **kwargs ) search_space_path = join(base_log_dir, search_space_filename) non_hp_results_path = join(base_log_dir, non_hp_results_filename) if verbose: print("# evaluations so far:", num_evals) print("best_params:", best_params) print("min_value:", min_value) print("best_variant:", best_variant) print("Total time elapsed = {}".format(time.time() - start_time)) print("Saving all results {0}".format(base_log_dir)) with open(trials_path, 'wb') as handle: pickle.dump(trials, handle, protocol=pickle.HIGHEST_PROTOCOL) with open(search_space_path, 'wb') as handle: pickle.dump(search_space, handle, protocol=pickle.HIGHEST_PROTOCOL) with open(non_hp_results_path, "w") as f: results = { 'best_params': dict_to_safe_json(best_params), 'min_value': min_value, 'best_variant': dict_to_safe_json(best_variant), } json.dump(results, f, indent=2, sort_keys=True) def optimize( function, search_space, extra_function_kwargs=None, trials=None, num_evals=10, maximize=False, flatten_choice_dictionary=True, dotmap_to_nested_dictionary=True, max_magnitude=1e10, ): """ Optimize (by default minimize) a function over a search space. :param function: Function to optimize over. :param search_space: a hyperopt.pyll.base.Apply instance, or a dictionary mapping from keyword to Apply node, as in ``` space = { 'b': hp.uniform('b', -1, 1), 'c': hp.uniform('c', 10, 11), ... } ``` A bit hacky, but the `b` in the dictionary key should match the `b` in the argument to the `hp.uniform` call. It doesn't *really* need to match, as the `b` in the argument gets ignored, but it's good practice. See https://github.com/hyperopt/hyperopt/wiki/FMin#2-defining-a-search-space for more details on defining spaces. :param extra_function_kwargs: Extra kwargs to pass to function. :param trials: hyperopt.base.Trials. Its important members are: - `trials.trials` - a list of dictionaries representing everything about the search - `trials.results` - a list of dictionaries returned by 'objective' during the search - `trials.losses()` - a list of losses (float for each 'ok' trial) - `trials.statuses()` - a list of status strings :param num_evals: Maximum number of queries to function. :param maximize: Default behavior is the minimize the functions. If True, maximize the . :param dotmap_to_nested_dictionary: If True, convert keys like `a.b` into nested dictionaries before passing onto the main function. :param flatten_choice_dictionary: If True, flatten the nested dictionary caused by creating a choice variable before passing it to the functions. :param max_magnitude: Clip the returned value's magnitude to this value. A choice variable defined as ``` z=hp.choice('z', [ ('z_one', {'a': 5}), ('z_two', {'b': 4), ]) ``` will results in a dictionary like ``` { `z`: `z_one`, `a`: 5, } ``` or ``` { `z`: `z_two`, `a`: 4, } ``` that is passed to the optimization function. If `flatten_choice_dictionary` is False, you get ``` { `z`: ( 'z_one', { `a`: 4, } ) } ``` which I find a bit messier. :return: tuple - best_params: Best dictionary passed to function. Does not include the `extra_function_kwargs`. - minimum: value at the best_variant. - trials: updated hyperopt.base.Trials instance. - best_variant: Best dictionary over the search space. Similar to `best_params`, but this has a type that hyperopt uses. """ if extra_function_kwargs is None: extra_function_kwargs = {} def wrapped_function(params): start_time = time.time() if flatten_choice_dictionary: params = flatten_hyperopt_choice_dict(params) if dotmap_to_nested_dictionary: params = dot_map_dict_to_nested_dict(params) loss = function(merge_recursive_dicts( params, extra_function_kwargs, ignore_duplicate_keys_in_second_dict=True, )) if maximize: loss = - loss if
np.isnan(loss)
numpy.isnan
import numpy as np import logging import copy import numbers from scipy.special import logsumexp from sklearn.base import BaseEstimator from sklearn.utils.validation import check_is_fitted, NotFittedError from .preprocessing import DataWindow from . import core from .plotting import _plot_ratemap from .auxiliary import TuningCurve1D, TuningCurve2D from .utils_.decorators import keyword_deprecation """ FiringRateEstimator(BaseEstimator) DRAFT SPECIFICATION X : BST / spike counts (or actual spikes?) y : firing rate (not used) z : position (response variable) mode = ['hist', 'glm-poisson', 'glm-binomial', 'glm', 'gvm', 'bars', 'gp'] fit(X, y, z) : estimate model parameters, or collect histogram evidence y = predict(X, z) : this predicts the firing rate estimate for data score (X, y, z) RateMap(BaseEstimator): X : position (state: discrete) y : firing rate(s) mode = ['continuous', 'discrete', 'circular'] fit(X, y) : assign rates to X bins y = predict(X) : predicts, and then smooths, firing rates bst = synthesize(X) : predicts and smooths firing rate, and then synthesize spikes _bins _ratemap _mode BayesianDecoder(BaseEstimator): X : BST / spike counts y : position fit(X, y) : fits the RateMap, and occupancy (and other params, like movement) y = predict(X) : predicts position from spike counts (also called decode) """ class KeywordError(Exception): def __init__(self, message): self.message = message class UnitSlicer(object): def __init__(self, obj): self.obj = obj def __getitem__(self, *args): """units ids""" # by default, keep all units unitslice = slice(None, None, None) if isinstance(*args, int): unitslice = args[0] else: slices = np.s_[args]; slices = slices[0] unitslice = slices if isinstance(unitslice, slice): start = unitslice.start stop = unitslice.stop istep = unitslice.step try: if start is None: istart = 0 else: istart = list(self.obj.unit_ids).index(start) except ValueError: raise KeyError('unit_id {} could not be found in RateMap!'.format(start)) try: if stop is None: istop = self.obj.n_units else: istop = list(self.obj.unit_ids).index(stop) + 1 except ValueError: raise KeyError('unit_id {} could not be found in RateMap!'.format(stop)) if istep is None: istep = 1 if istep < 0: istop -=1 istart -=1 istart, istop = istop, istart unit_idx_list = list(range(istart, istop, istep)) else: unit_idx_list = [] unitslice = np.atleast_1d(unitslice) for unit in unitslice: try: uidx = list(self.obj.unit_ids).index(unit) except ValueError: raise KeyError("unit_id {} could not be found in RateMap!".format(unit)) else: unit_idx_list.append(uidx) return unit_idx_list class ItemGetter_loc(object): """.loc is primarily label based (that is, unit_id based) .loc will raise KeyError when the items are not found. Allowed inputs are: - A single label, e.g. 5 or 'a', (note that 5 is interpreted as a label of the index. This use is not an integer position along the index) - A list or array of labels ['a', 'b', 'c'] - A slice object with labels 'a':'f', (note that contrary to usual python slices, both the start and the stop are included!) """ def __init__(self, obj): self.obj = obj def __getitem__(self, idx): """unit_ids""" unit_idx_list = self.obj._slicer[idx] return self.obj[unit_idx_list] class ItemGetter_iloc(object): """.iloc is primarily integer position based (from 0 to length-1 of the axis). .iloc will raise IndexError if a requested indexer is out-of-bounds, except slice indexers which allow out-of-bounds indexing. (this conforms with python/numpy slice semantics). Allowed inputs are: - An integer e.g. 5 - A list or array of integers [4, 3, 0] - A slice object with ints 1:7 """ def __init__(self, obj): self.obj = obj def __getitem__(self, idx): """intervals, series""" unit_idx_list = idx if isinstance(idx, int): unit_idx_list = [idx] return self.obj[unit_idx_list] class RateMap(BaseEstimator): """ RateMap with persistent unit_ids and firing rates in Hz. NOTE: RateMap assumes a [uniform] isometric spacing in all dimensions of the rate map. This is only relevant when smoothing is applied. mode = ['continuous', 'discrete', 'circular'] fit(X, y) estimates ratemap [discrete, continuous, circular] predict(X) predicts firing rate synthesize(X) generates spikes based on input (inhomogenous Poisson?) Parameters ---------- connectivity : string ['continuous', 'discrete', 'circular'], optional Defines how smoothing is applied. If 'discrete', then no smoothing is applied. Default is 'continuous'. """ def __init__(self, connectivity='continuous'): self.connectivity = connectivity self._slicer = UnitSlicer(self) self.loc = ItemGetter_loc(self) self.iloc = ItemGetter_iloc(self) def __repr__(self): r = super().__repr__() if self._is_fitted(): if self.is_1d: r += ' with shape (n_units={}, n_bins_x={})'.format(*self.shape) else: r += ' with shape (n_units={}, n_bins_x={}, n_bins_y={})'.format(*self.shape) return r def fit(self, X, y, dt=1, unit_ids=None): """Fit firing rates Parameters ---------- X : array-like, shape (n_bins,), or (n_bins_x, n_bins_y) Bin locations (centers) where ratemap is defined. y : array-like, shape (n_units, n_bins) or (n_units, n_bins_x, n_bins_y) Expected number of spikes in a temporal bin of width dt, for each of the predictor bins specified in X. dt : float, optional (default=1) Temporal bin size with which firing rate y is defined. For example, if dt==1, then the firing rate is in Hz. If dt==0.001, then the firing rate is in kHz, and so on. unit_ids : array-like, shape (n_units,), optional (default=None) Persistent unit IDs that are used to associate units after permutation. Unit IDs are inherited from nelpy.core.BinnedEventArray objects, or initialized to np.arange(n_units). """ n_units, n_bins_x, n_bins_y = self._check_X_y(X, y) if n_bins_y > 0: # self.ratemap_ = np.zeros((n_units, n_bins_x, n_bins_y)) #FIXME self.ratemap_ = y/dt bin_centers_x = np.squeeze(X[:,0]) bin_centers_y = np.squeeze(X[:,1]) bin_dx = np.median(np.diff(bin_centers_x)) bin_dy = np.median(np.diff(bin_centers_y)) bins_x = np.insert(bin_centers_x[:-1] + np.diff(bin_centers_x)/2, 0, bin_centers_x[0] - bin_dx/2) bins_x = np.append(bins_x, bins_x[-1] + bin_dx) bins_y = np.insert(bin_centers_y[:-1] + np.diff(bin_centers_y)/2, 0, bin_centers_y[0] - bin_dy/2) bins_y = np.append(bins_y, bins_y[-1] + bin_dy) self._bins_x = bins_x self._bins_y = bins_y self._bin_centers_x = bin_centers_x self._bin_centers_y = X[:,1] else: # self.ratemap_ = np.zeros((n_units, n_bins_x)) #FIXME self.ratemap_ = y/dt bin_centers_x = np.squeeze(X) bin_dx = np.median(np.diff(bin_centers_x)) bins_x = np.insert(bin_centers_x[:-1] + np.diff(bin_centers_x)/2, 0, bin_centers_x[0] - bin_dx/2) bins_x = np.append(bins_x, bins_x[-1] + bin_dx) self._bins_x = bins_x self._bin_centers_x = bin_centers_x if unit_ids is not None: if len(unit_ids) != n_units: raise ValueError("'unit_ids' must have same number of elements as 'n_units'. {} != {}".format(len(unit_ids), n_units)) self._unit_ids = unit_ids else: self._unit_ids = np.arange(n_units) def predict(self, X): check_is_fitted(self, 'ratemap_') raise NotImplementedError def synthesize(self, X): check_is_fitted(self, 'ratemap_') raise NotImplementedError def __len__(self): return self.n_units def __iter__(self): """TuningCurve1D iterator initialization""" # initialize the internal index to zero when used as iterator self._index = 0 return self def __next__(self): """TuningCurve1D iterator advancer.""" index = self._index if index > self.n_units - 1: raise StopIteration out = copy.copy(self) out.ratemap_ = self.ratemap_[tuple([index])] out._unit_ids = self._unit_ids[index] self._index += 1 return out def __getitem__(self, *idx): """RateMap unit index access. NOTE: this is index-based, not label-based. For label-based, use loc[...] Accepts integers, slices, and lists""" idx = [ii for ii in idx] if len(idx) == 1 and not isinstance(idx[0], int): idx = idx[0] if isinstance(idx, tuple): idx = [ii for ii in idx] try: out = copy.copy(self) out.ratemap_ = self.ratemap_[tuple([idx])] out._unit_ids = list(np.array(out._unit_ids)[tuple([idx])]) out._slicer = UnitSlicer(out) out.loc = ItemGetter_loc(out) out.iloc = ItemGetter_iloc(out) return out except Exception: raise TypeError( 'unsupported subsctipting type {}'.format(type(idx))) def get_peak_firing_order_ids(self): """Get the unit_ids in order of peak firing location for 1D RateMaps. Returns ------- unit_ids : array-like The permutaiton of unit_ids such that after reordering, the peak firing locations are ordered along the RateMap. """ check_is_fitted(self, 'ratemap_') if self.is_2d: raise NotImplementedError("get_peak_firing_order_ids() only implemented for 1D RateMaps.") peakorder = np.argmax(self.ratemap_, axis=1).argsort() return np.array(self.unit_ids)[peakorder] def reorder_units_by_ids(self, unit_ids, inplace=False): """Permute the unit ordering. #TODO If no order is specified, and an ordering exists from fit(), then the data in X will automatically be permuted to match that registered during fit(). Parameters ---------- unit_ids : array-like, shape (n_units,) Returns ------- out : reordered RateMap """ def swap_units(arr, frm, to): """swap 'units' of a 3D np.array""" arr[(frm, to),:] = arr[(to, frm),:] self._validate_unit_ids(unit_ids) if len(unit_ids) != len(self._unit_ids): raise ValueError('unit_ids must be a permutation of self.unit_ids, not a subset thereof.') if inplace: out = self else: out = copy.deepcopy(self) neworder = [list(self.unit_ids).index(x) for x in unit_ids] oldorder = list(range(len(neworder))) for oi, ni in enumerate(neworder): frm = oldorder.index(ni) to = oi swap_units(out.ratemap_, frm, to) out._unit_ids[frm], out._unit_ids[to] = out._unit_ids[to], out._unit_ids[frm] oldorder[frm], oldorder[to] = oldorder[to], oldorder[frm] return out def _check_X_y(self, X, y): X = np.atleast_1d(X) y = np.atleast_2d(y) n_units = y.shape[0] n_bins_xy = y.shape[1] try: n_bins_yy = y.shape[2] except IndexError: n_bins_yy = 0 n_bins_xx = X.shape[0] try: n_bins_yx = X.shape[1] except IndexError: n_bins_yx = 0 assert n_units > 0, "n_units must be a positive integer!" assert n_bins_xx == n_bins_xy, "X and y must have the same n_bins_x" assert n_bins_yx == n_bins_yy, "X and y must have the same n_bins_y" n_bins_x = n_bins_xx n_bins_y = n_bins_yy return n_units, n_bins_x, n_bins_y def _validate_unit_ids(self, unit_ids): self._check_unit_ids_in_ratemap(unit_ids) if len(set(unit_ids)) != len(unit_ids): raise ValueError("Duplicate unit_ids are not allowed.") def _check_unit_ids_in_ratemap(self, unit_ids): for unit_id in unit_ids: # NOTE: the check below allows for predict() to pass on only # a subset of the units that were used during fit! So we # could fit on 100 units, and then predict on only 10 of # them, if we wanted. if unit_id not in self.unit_ids: raise ValueError('unit_id {} was not present during fit(); aborting...'.format(unit_id)) def _is_fitted(self): try: check_is_fitted(self, 'ratemap_') except Exception: # should really be except NotFitterError return False return True @property def connectivity(self): return self._connectivity @connectivity.setter def connectivity(self, val): self._connectivity = self._validate_connectivity(val) @staticmethod def _validate_connectivity(connectivity): connectivity = str(connectivity).strip().lower() options = ['continuous', 'discrete', 'circular'] if connectivity in options: return connectivity raise NotImplementedError("connectivity '{}' is not supported yet!".format(str(connectivity))) @staticmethod def _units_from_X(X): """ Get unit_ids from bst X, or generate them from ndarray X. Returns ------- n_units : unit_ids : """ raise NotImplementedError @property def T(self): """transpose the ratemap. Here we transpose the x and y dims, and return a new RateMap object. """ if self.is_1d: return self out = copy.copy(self) out.ratemap_ = np.transpose(out.ratemap_, axes=(0,2,1)) return out @property def shape(self): """ RateMap.shape = (n_units, n_features_x, n_features_y) OR RateMap.shape = (n_units, n_features) """ check_is_fitted(self, 'ratemap_') return self.ratemap_.shape @property def is_1d(self): check_is_fitted(self, 'ratemap_') if len(self.ratemap_.shape) == 2: return True return False @property def is_2d(self): check_is_fitted(self, 'ratemap_') if len(self.ratemap_.shape) == 3: return True return False @property def n_units(self): check_is_fitted(self, 'ratemap_') return self.ratemap_.shape[0] @property def unit_ids(self): check_is_fitted(self, 'ratemap_') return self._unit_ids @property def n_bins(self): """(int) Number of external correlates (bins).""" check_is_fitted(self, 'ratemap_') if self.is_2d: return self.n_bins_x*self.n_bins_y return self.n_bins_x @property def n_bins_x(self): """(int) Number of external correlates (bins).""" check_is_fitted(self, 'ratemap_') return self.ratemap_.shape[1] @property def n_bins_y(self): """(int) Number of external correlates (bins).""" check_is_fitted(self, 'ratemap_') if self.is_1d: raise ValueError('RateMap is 1D; no y bins are defined.') return self.ratemap_.shape[2] def max(self, axis=None, out=None): """ maximum firing rate for each unit: RateMap.max() maximum firing rate across units: RateMap.max(axis=0) """ check_is_fitted(self, 'ratemap_') if axis == None: if self.is_2d: return self.ratemap_.max(axis=1, out=out).max(axis=1, out=out) else: return self.ratemap_.max(axis=1, out=out) return self.ratemap_.max(axis=axis, out=out) def min(self, axis=None, out=None): check_is_fitted(self, 'ratemap_') if axis == None: if self.is_2d: return self.ratemap_.min(axis=1, out=out).min(axis=1, out=out) else: return self.ratemap_.min(axis=1, out=out) return self.ratemap_.min(axis=axis, out=out) def mean(self, axis=None, dtype=None, out=None, keepdims=False): check_is_fitted(self, 'ratemap_') kwargs = {'dtype':dtype, 'out':out, 'keepdims':keepdims} if axis == None: if self.is_2d: return self.ratemap_.mean(axis=1, **kwargs).mean(axis=1, **kwargs) else: return self.ratemap_.mean(axis=1, **kwargs) return self.ratemap_.mean(axis=axis, **kwargs) @property def bins(self): if self.is_1d: return self._bins_x return np.vstack((self._bins_x, self._bins_y)) @property def bins_x(self): return self._bins_x @property def bins_y(self): if self.is_2d: return self._bins_y else: raise ValueError('only valid for 2D RateMap() objects.') @property def bin_centers(self): if self.is_1d: return self._bin_centers_x return np.vstack((self._bin_centers_x, self._bin_centers_y)) @property def bin_centers_x(self): return self._bin_centers_x @property def bin_centers_y(self): if self.is_2d: return self._bin_centers_y else: raise ValueError('only valid for 2D RateMap() objects.') @property def mask(self): return self._mask @mask.setter def mask(self, val): #TODO: mask validation raise NotImplementedError self._mask = val def plot(self, **kwargs): check_is_fitted(self, 'ratemap_') if self.is_2d: raise NotImplementedError("plot() not yet implemented for 2D RateMaps.") pad = kwargs.pop('pad', None) _plot_ratemap(self, pad=pad, **kwargs) @keyword_deprecation(replace_x_with_y={'bw':'truncate'}) def smooth(self, *, sigma=None, truncate=None, inplace=False, mode=None, cval=None): """Smooths the tuning curve with a Gaussian kernel. mode : {‘reflect’, ‘constant’, ‘nearest’, ‘mirror’, ‘wrap’}, optional The mode parameter determines how the array borders are handled, where cval is the value when mode is equal to ‘constant’. Default is ‘reflect’ truncate : float Truncate the filter at this many standard deviations. Default is 4.0. truncate : float, deprecated Truncate the filter at this many standard deviations. Default is 4.0. cval : scalar, optional Value to fill past edges of input if mode is ‘constant’. Default is 0.0 """ if sigma is None: sigma = 0.1 # in units of extern if truncate is None: truncate = 4 if mode is None: mode = 'reflect' if cval is None: cval = 0.0 raise NotImplementedError class BayesianDecoderTemp(BaseEstimator): """ Bayesian decoder wrapper class. mode = ['hist', 'glm-poisson', 'glm-binomial', 'glm', 'gvm', 'bars', 'gp'] (gvm = generalized von mises; see http://kordinglab.com/spykes/getting-started.html) QQQ. Do we always bin first? does GLM and BARS use spike times, or binned spike counts? I think GLM uses binned spike counts with Poisson regression; not sure about BARS. QQQ. What other methods should be supported? BAKS? What is state of the art? QQQ. What if we want to know the fring rate over time? What does the input y look like then? How about trial averaged? How about a tuning curve? AAA. At the end of the day, this class should estimate a ratemap, and we need some way to set the domain of that ratemap, if desired, but it should not have to assume anything else. Values in y might be repeated, but if not, then we estimate the (single-trial) firing rate over time or whatever the associated y represents. See https://arxiv.org/pdf/1602.07389.pdf for more GLM intuition? and http://www.stat.columbia.edu/~liam/teaching/neurostat-fall18/glm-notes.pdf [2] https://www.biorxiv.org/content/biorxiv/early/2017/02/24/111450.full.pdf?%3Fcollection= http://kordinglab.com/spykes/getting-started.html https://xcorr.net/2011/10/03/using-the-binomial-glm-instead-of-the-poisson-for-spike-data/ [1] http://www.stat.cmu.edu/~kass/papers/bars.pdf https://gist.github.com/AustinRochford/d640a240af12f6869a7b9b592485ca15 https://discourse.pymc.io/t/bayesian-adaptive-regression-splines-and-mcmc-some-questions/756/5 """ def __init__(self, rate_estimator=None, w=None, ratemap=None): self._rate_estimator = self._validate_rate_estimator(rate_estimator) self._ratemap = self._validate_ratemap(ratemap) self._w = self._validate_window(w) @property def rate_estimator(self): return self._rate_estimator @property def ratemap(self): return self._ratemap @property def w(self): return self._w @staticmethod def _validate_rate_estimator(rate_estimator): if rate_estimator is None: rate_estimator = FiringRateEstimator() elif not isinstance(rate_estimator, FiringRateEstimator): raise TypeError("'rate_estimator' must be a nelpy FiringRateEstimator() type!") return rate_estimator @staticmethod def _validate_ratemap(ratemap): if ratemap is None: ratemap = NDRateMap() elif not isinstance(ratemap, NDRateMap): raise TypeError("'ratemap' must be a nelpy RateMap() type!") return ratemap @staticmethod def _validate_window(w): if w is None: w = DataWindow(sum=True, bin_width=1) elif not isinstance(w, DataWindow): raise TypeError('w must be a nelpy DataWindow() type!') else: w = copy.copy(w) if w._sum is False: logging.warning('BayesianDecoder requires DataWindow (w) to have sum=True; changing to True') w._sum = True if w.bin_width is None: w.bin_width = 1 return w def _check_X_dt(self, X, *, lengths=None, dt=None): if isinstance(X, core.BinnedEventArray): if dt is not None: logging.warning("A {} was passed in, so 'dt' will be ignored...".format(X.type_name)) dt = X.ds if self._w.bin_width != dt: raise ValueError('BayesianDecoder was fit with a bin_width of {}, but is being used to predict data with a bin_width of {}'.format(self.w.bin_width, dt)) X, T = self.w.transform(X, lengths=lengths, sum=True) else: if dt is not None: if self._w.bin_width != dt: raise ValueError('BayesianDecoder was fit with a bin_width of {}, but is being used to predict data with a bin_width of {}'.format(self.w.bin_width, dt)) else: dt = self._w.bin_width return X, dt def _check_X_y(self, X, y, *, method='score', lengths=None): if isinstance(X, core.BinnedEventArray): if method == 'fit': self._w.bin_width = X.ds logging.info('Updating DataWindow.bin_width from training data.') else: if self._w.bin_width != X.ds: raise ValueError('BayesianDecoder was fit with a bin_width of {}, but is being used to predict data with a bin_width of {}'.format(self.w.bin_width, X.ds)) X, T = self.w.transform(X, lengths=lengths, sum=True) if isinstance(y, core.RegularlySampledAnalogSignalArray): y = y(T).T if isinstance(y, core.RegularlySampledAnalogSignalArray): raise TypeError('y can only be a RegularlySampledAnalogSignalArray if X is a BinnedEventArray.') assert len(X) == len(y), "X and y must have the same number of samples!" return X, y def _ratemap_permute_unit_order(self, unit_ids, inplace=False): """Permute the unit ordering. If no order is specified, and an ordering exists from fit(), then the data in X will automatically be permuted to match that registered during fit(). Parameters ---------- unit_ids : array-like, shape (n_units,) """ unit_ids = self._check_unit_ids(unit_ids=unit_ids) if len(unit_ids) != len(self.unit_ids): raise ValueError("To re-order (permute) units, 'unit_ids' must have the same length as self._unit_ids.") self._ratemap.reorder_units_by_ids(unit_ids, inplace=inplace) def _check_unit_ids(self,*, X=None, unit_ids=None, fit=False): """Check that unit_ids are valid (if provided), and return unit_ids. if calling from fit(), pass in fit=True, which will skip checks against self.ratemap, which doesn't exist before fitting... """ def a_contains_b(a, b): """Returns True iff 'b' is a subset of 'a'.""" for bb in b: if bb not in a: logging.warning("{} was not found in set".format(bb)) return False return True if isinstance(X, core.BinnedEventArray): if unit_ids is not None: # unit_ids were passed in, even though it's also contained in X.unit_ids # 1. check that unit_ids are contained in the data: if not a_contains_b(X.series_ids, unit_ids): raise ValueError('Some unit_ids were not contained in X!') # 2. check that unit_ids are contained in self (decoder ratemap) if not fit: if not a_contains_b(self.unit_ids, unit_ids): raise ValueError('Some unit_ids were not contained in ratemap!') else: # infer unit_ids from X unit_ids = X.series_ids # check that unit_ids are contained in self (decoder ratemap) if not fit: if not a_contains_b(self.unit_ids, unit_ids): raise ValueError('Some unit_ids from X were not contained in ratemap!') else: # a non-nelpy X was passed, possibly X=None if unit_ids is not None: # 1. check that unit_ids are contained in self (decoder ratemap) if not fit: if not a_contains_b(self.unit_ids, unit_ids): raise ValueError('Some unit_ids were not contained in ratemap!') else: # no unit_ids were passed, only a non-nelpy X if X is not None: n_samples, n_units = X.shape if not fit: if n_units > self.n_units: raise ValueError("X contains more units than decoder! {} > {}".format(n_units, self.n_units)) unit_ids = self.unit_ids[:n_units] else: unit_ids = np.arange(n_units) else: raise NotImplementedError ("unexpected branch reached...") return unit_ids def _get_transformed_ratemap(self, unit_ids): # first, trim ratemap to subset of units ratemap = self.ratemap.loc[unit_ids] # then, permute the ratemap ratemap = ratemap.reorder_units_by_ids(unit_ids) # maybe unneccessary, since .loc already permutes return ratemap def fit(self, X, y, *, lengths=None, dt=None, unit_ids=None, n_bins=None, sample_weight=None): """Fit Gaussian Naive Bayes according to X, y Parameters ---------- X : array-like, shape (n_samples, n_features) Training vectors, where n_samples is the number of samples and n_features is the number of features. OR nelpy.core.BinnedEventArray / BinnedSpikeTrainArray The number of spikes in each time bin for each neuron/unit. y : array-like, shape (n_samples, n_output_dims) Target values. OR nelpy.core.RegularlySampledAnalogSignalArray containing the target values corresponding to X. NOTE: If X is an array-like, then y must be an array-like. lengths : array-like, shape (n_epochs,), optional (default=None) Lengths (in samples) of contiguous segments in (X, y). .. versionadded:: x.xx BayesianDecoder does not yet support *lengths*. unit_ids : array-like, shape (n_units,), optional (default=None) Persistent unit IDs that are used to associate units after permutation. Unit IDs are inherited from nelpy.core.BinnedEventArray objects, or initialized to np.arange(n_units). sample_weight : array-like, shape (n_samples,), optional (default=None) Weights applied to individual samples (1. for unweighted). .. versionadded:: x.xx BayesianDecoder does not yet support fitting with *sample_weight*. Returns ------- self : object """ #TODO dt should probably come from datawindow specification, but may be overridden here! unit_ids = self._check_unit_ids(X=X, unit_ids=unit_ids, fit=True) # estimate the firing rate(s): self.rate_estimator.fit(X=X, y=y, dt=dt, n_bins=n_bins) # store the estimated firing rates as a rate map: bin_centers = self.rate_estimator.tc_.bin_centers #temp code FIXME bins = self.rate_estimator.tc_.bins #temp code FIXME rates = self.rate_estimator.tc_.ratemap #temp code FIXME # unit_ids = np.array(self.rate_estimator.tc_.unit_ids) #temp code FIXME self.ratemap.fit(X=bin_centers, y=rates, unit_ids=unit_ids) #temp code FIXME X, y = self._check_X_y(X, y, method='fit', lengths=lengths) # can I remove this? no; it sets the bin width... but maybe we should refactor... self.ratemap_ = self.ratemap.ratemap_ def predict(self, X, *, output=None, mode='mean', lengths=None, unit_ids=None, dt=None): # if output is 'asa', then return an ASA check_is_fitted(self, 'ratemap_') unit_ids = self._check_unit_ids(X=X, unit_ids=unit_ids) ratemap = self._get_transformed_ratemap(unit_ids) X, dt = self._check_X_dt(X=X, lengths=lengths, dt=dt) posterior, mean_pth = decode_bayesian_memoryless_nd(X=X, ratemap=ratemap.ratemap_, dt=dt, bin_centers=ratemap.bin_centers) if output is not None: raise NotImplementedError("output mode not implemented yet") return posterior, mean_pth def predict_proba(self, X, *, lengths=None, unit_ids=None, dt=None): check_is_fitted(self, 'ratemap_') raise NotImplementedError ratemap = self._get_transformed_ratemap(unit_ids) return self._predict_proba_from_ratemap(X, ratemap) def score(self, X, y, *, lengths=None, unit_ids=None, dt=None): # check that unit_ids are valid # THEN, transform X, y into standardized form (including trimming and permutation) and continue with scoring check_is_fitted(self, 'ratemap_') unit_ids = self._check_unit_ids(X=X, unit_ids=unit_ids) ratemap = self._get_transformed_ratemap(unit_ids) # X = self._permute_unit_order(X) # X, y = self._check_X_y(X, y, method='score', unit_ids=unit_ids) raise NotImplementedError ratemap = self._get_transformed_ratemap(unit_ids) return self._score_from_ratemap(X, ratemap) def score_samples(self, X, y, *, lengths=None, unit_ids=None, dt=None): # X = self._permute_unit_order(X) check_is_fitted(self, 'ratemap_') raise NotImplementedError @property def unit_ids(self): check_is_fitted(self, 'ratemap_') return self.ratemap.unit_ids @property def n_units(self): check_is_fitted(self, 'ratemap_') return len(self.unit_ids) class FiringRateEstimator(BaseEstimator): """ FiringRateEstimator Estimate the firing rate of a spike train. mode = ['hist', 'glm-poisson', 'glm-binomial', 'glm', 'gvm', 'bars', 'gp'] (gvm = generalized von mises; see http://kordinglab.com/spykes/getting-started.html) QQQ. Do we always bin first? does GLM and BARS use spike times, or binned spike counts? I think GLM uses binned spike counts with Poisson regression; not sure about BARS. QQQ. What other methods should be supported? BAKS? What is state of the art? QQQ. What if we want to know the fring rate over time? What does the input y look like then? How about trial averaged? How about a tuning curve? AAA. At the end of the day, this class should estimate a ratemap, and we need some way to set the domain of that ratemap, if desired, but it should not have to assume anything else. Values in y might be repeated, but if not, then we estimate the (single-trial) firing rate over time or whatever the associated y represents. See https://arxiv.org/pdf/1602.07389.pdf for more GLM intuition? and http://www.stat.columbia.edu/~liam/teaching/neurostat-fall18/glm-notes.pdf [2] https://www.biorxiv.org/content/biorxiv/early/2017/02/24/111450.full.pdf?%3Fcollection= http://kordinglab.com/spykes/getting-started.html https://xcorr.net/2011/10/03/using-the-binomial-glm-instead-of-the-poisson-for-spike-data/ [1] http://www.stat.cmu.edu/~kass/papers/bars.pdf https://gist.github.com/AustinRochford/d640a240af12f6869a7b9b592485ca15 https://discourse.pymc.io/t/bayesian-adaptive-regression-splines-and-mcmc-some-questions/756/5 """ def __init__(self, mode='hist'): if mode not in ['hist']: raise NotImplementedError("mode '{}' not supported / implemented yet!".format(mode)) self._mode = mode def _check_X_y_dt(self, X, y, lengths=None, dt=None, timestamps=None, n_bins=None): if isinstance(X, core.BinnedEventArray): T = X.bin_centers if lengths is not None: logging.warning("'lengths' was passed in, but will be" \ " overwritten by 'X's 'lengths' attribute") if timestamps is not None: logging.warning("'timestamps' was passed in, but will be" \ " overwritten by 'X's 'bin_centers' attribute") if dt is not None: logging.warning("'dt' was passed in, but will be overwritten" \ " by 'X's 'ds' attribute") if isinstance(y, core.RegularlySampledAnalogSignalArray): y = y(T).T dt = X.ds lengths = X.lengths X = X.data.T elif isinstance(X, np.ndarray): if dt is None: raise ValueError("'dt' is a required argument when 'X' is passed in as a numpy array!") if isinstance(y, core.RegularlySampledAnalogSignalArray): if timestamps is not None: y = y(timestamps).T else: raise ValueError("'timestamps' required when passing in 'X' as a numpy array and 'y' as a nelpy RegularlySampledAnalogSignalArray!") else: raise TypeError("'X' should be either a nelpy BinnedEventArray, or a numpy array!") n_samples, n_units = X.shape _, n_dims = y.shape print('{}-dimensional y passed in'.format(n_dims)) assert n_samples == len(y), "'X' and 'y' must have the same number" \ " of samples! len(X)=={} but len(y)=={}".format(n_samples, len(y)) if n_bins is not None: n_bins = np.atleast_1d(n_bins) assert len(n_bins) == n_dims, "'n_bins' must have one entry for each dimension in 'y'!" return X, y, dt, n_bins def fit(self, X, y, lengths=None, dt=None, timestamps=None, unit_ids=None, n_bins=None, sample_weight=None): """Fit Gaussian Naive Bayes according to X, y Parameters ---------- X : array-like, shape (n_samples, n_X_features) Training vectors, where n_samples is the number of samples and n_X_features is the number of features, typically n_units. y : array-like, shape (n_samples, n_y_features) Target values. sample_weight : array-like, shape (n_samples,), optional (default=None) Weights applied to individual samples (1. for unweighted). .. versionadded:: 0.17 Gaussian Naive Bayes supports fitting with *sample_weight*. Returns ------- self : object """ X, y, dt, n_bins = self._check_X_y_dt(X=X, y=y, lengths=lengths, dt=dt, timestamps=timestamps, n_bins=n_bins) # 1. estimate mask # 2. estimate occupancy # 3. compute spikes histogram # 4. normalize spike histogram by occupancy # 5. apply mask # if y.n_signals == 1: # self.tc_ = TuningCurve1D(bst=X, extern=y, n_extern=100, extmin=y.min(), extmax=y.max(), sigma=2.5, min_duration=0) # if y.n_signals == 2: # xmin, ymin = y.min() # xmax, ymax = y.max() # self.tc_ = TuningCurve2D(bst=X, extern=y, ext_nx=50, ext_ny=50, ext_xmin=xmin, ext_xmax=xmax, ext_ymin=ymin, ext_ymax=ymax, sigma=2.5, min_duration=0) @property def mode(self): return self._mode def predict(self, X, lengths=None): raise NotImplementedError def predict_proba(self, X, lengths=None): raise NotImplementedError def score(self, X, y, lengths=None): raise NotImplementedError def score_samples(self, X, y, lengths=None): raise NotImplementedError # def decode_bayes_from_ratemap_1d(X, ratemap, dt, xmin, xmax, bin_centers): # """ # X has been standardized to (n_samples, n_units), where each sample is a singleton window # """ # n_samples, n_features = X.shape # n_units, n_xbins = ratemap.shape # assert n_features == n_units, "X has {} units, whereas ratemap has {}".format(n_features, n_units) # lfx = np.log(ratemap) # eterm = -ratemap.sum(axis=0)*dt # posterior = np.empty((n_xbins, n_samples)) # posterior[:] = np.nan # # decode each sample / bin separately # for tt in range(n_samples): # obs = X[tt] # if obs.sum() > 0: # posterior[:,tt] = (np.tile(np.array(obs, ndmin=2).T, n_xbins) * lfx).sum(axis=0) + eterm # # normalize posterior: # posterior = np.exp(posterior - logsumexp(posterior, axis=0)) # mode_pth = np.argmax(posterior, axis=0)*xmax/n_xbins # mode_pth = np.where(np.isnan(posterior.sum(axis=0)), np.nan, mode_pth) # mean_pth = (bin_centers * posterior.T).sum(axis=1) # return posterior, mode_pth, mean_pth def decode_bayesian_memoryless_nd(X, *, ratemap, bin_centers, dt=1): """Memoryless Bayesian decoding (supports multidimensional decoding). Decode binned spike counts (e.g. from a BinnedSpikeTrainArray) to an external correlate (e.g. position), using a memoryless Bayesian decoder and a previously estimated ratemap. Parameters ---------- X : numpy array with shape (n_samples, n_features), where the features are generally putative units / cells, and where each sample represents spike counts in a singleton data window. ratemap : array-like of shape (n_units, n_bins_d1, ..., n_bins_dN) Expected number of spikes for each unit, within each bin, along each dimension. bin_centers : array-like with shape (n_dims, ), where each element is also an array-like with shape (n_bins_dn, ) containing the bin centers for the particular dimension. dt : float, optional (default=1) Temporal bin width corresponding to X, in seconds. NOTE: generally it is assumed that ratemap will be given in Hz (that is, it has dt=1). If ratemap has a different unit, then dt might have to be adjusted to compensate for this. This can get tricky / confusing, so the recommended approach is always to construct ratemap with dt=1, and then to use the data-specific dt here when decoding. Returns ------- posterior : numpy array of shape (n_samples, n_bins_d1, ..., n_bins_dN) Posterior probabilities for each voxel. expected_pth : numpy array of shape (n_samples, n_dims) Expected (posterior-averaged) decoded trajectory. """ def tile_obs(obs, *n_bins): n_units = len(obs) out = np.zeros((n_units, *n_bins)) for unit in range(n_units): out[unit,:] = obs[unit] return out n_samples, n_features = X.shape n_units = ratemap.shape[0] n_bins = np.atleast_1d(ratemap.shape[1:]) n_dims = len(n_bins) assert n_features == n_units, "X has {} units, whereas ratemap has {}".format(n_features, n_units) lfx = np.log(ratemap) eterm = -ratemap.sum(axis=0)*dt posterior = np.empty((n_samples, *n_bins)) posterior[:] = np.nan # decode each sample / bin separately for tt in range(n_samples): obs = X[tt] if obs.sum() > 0: posterior[tt] = (tile_obs(obs, *n_bins) * lfx).sum(axis=0) + eterm # normalize posterior: posterior = np.exp(posterior - logsumexp(posterior, axis=tuple(np.arange(1, n_dims+1)), keepdims=True)) if n_dims > 1: expected = [] for dd in range(1, n_dims+1): axes = tuple(set(np.arange(1, n_dims+1)) - set([dd])) expected.append((bin_centers[dd-1] * posterior.sum(axis=axes)).sum(axis=1)) expected_pth = np.vstack(expected).T else: expected_pth = (bin_centers * posterior).sum(axis=1) return posterior, expected_pth class NDRateMap(BaseEstimator): """ RateMap with persistent unit_ids and firing rates in Hz. NOTE: RateMap assumes a [uniform] isometric spacing in all dimensions of the rate map. This is only relevant when smoothing is applied. mode = ['continuous', 'discrete', 'circular'] fit(X, y) estimates ratemap [discrete, continuous, circular] predict(X) predicts firing rate synthesize(X) generates spikes based on input (inhomogenous Poisson?) Parameters ---------- connectivity : string ['continuous', 'discrete', 'circular'], optional Defines how smoothing is applied. If 'discrete', then no smoothing is applied. Default is 'continuous'. """ def __init__(self, connectivity='continuous'): self.connectivity = connectivity self._slicer = UnitSlicer(self) self.loc = ItemGetter_loc(self) self.iloc = ItemGetter_iloc(self) def __repr__(self): r = super().__repr__() if self._is_fitted(): dimstr = '' for dd in range(self.n_dims): dimstr += ", n_bins_d{}={}".format(dd+1, self.shape[dd+1]) r += ' with shape (n_units={}{})'.format(self.n_units, dimstr) return r def fit(self, X, y, dt=1, unit_ids=None): """Fit firing rates Parameters ---------- X : array-like, with shape (n_dims, ), each element of which has shape (n_bins_dn, ) for n=1, ..., N; N=n_dims. Bin locations (centers) where ratemap is defined. y : array-like, shape (n_units, n_bins_d1, ..., n_bins_dN) Expected number of spikes in a temporal bin of width dt, for each of the predictor bins specified in X. dt : float, optional (default=1) Temporal bin size with which firing rate y is defined. For example, if dt==1, then the firing rate is in Hz. If dt==0.001, then the firing rate is in kHz, and so on. unit_ids : array-like, shape (n_units,), optional (default=None) Persistent unit IDs that are used to associate units after permutation. Unit IDs are inherited from nelpy.core.BinnedEventArray objects, or initialized to np.arange(n_units). """ n_units, n_bins, n_dims = self._check_X_y(X, y) self.ratemap_ = y/dt self._bin_centers = X self._bins = np.array(n_dims*[None]) if n_dims > 1: for dd in range(n_dims): bin_centers = np.squeeze(X[dd]) dx = np.median(np.diff(bin_centers)) bins = np.insert(bin_centers[-1] + np.diff(bin_centers)/2, 0, bin_centers[0] - dx/2) bins = np.append(bins, bins[-1] + dx) self._bins[dd] = bins else: bin_centers = np.squeeze(X) dx = np.median(np.diff(bin_centers)) bins = np.insert(bin_centers[-1] + np.diff(bin_centers)/2, 0, bin_centers[0] - dx/2) bins = np.append(bins, bins[-1] + dx) self._bins = bins if unit_ids is not None: if len(unit_ids) != n_units: raise ValueError("'unit_ids' must have same number of elements as 'n_units'. {} != {}".format(len(unit_ids), n_units)) self._unit_ids = unit_ids else: self._unit_ids =
np.arange(n_units)
numpy.arange
#!/usr/bin/env python """A simple viewer for Jones patterns for dual-polarized representations. """ import argparse import numpy import matplotlib.pyplot as plt from antpat.reps.sphgridfun.pntsonsphere import ZenHemisphGrid from antpat.dualpolelem import DualPolElem, jones2gIXR, IXRJ2IXRM from antpat.reps.hamaker import convLOFARcc2DPE import antpat.io.filetypes as antfiles def plotJonesCanonical(theta, phi, jones, dpelemname): normalize = True dbscale = True polarplt = True IXRTYPE = 'IXR_J' # Can be IXR_J or IXR_M g, IXRJ = jones2gIXR(jones) IXRM = IXRJ2IXRM(IXRJ) if IXRTYPE == 'IXR_J': IXR = IXRJ elif IXRTYPE == 'IXR_J': IXR = IXRM else: raise RuntimeError("""Error: IXR type {} unknown. Known types are IXR_J, IXR_M.""".format(IXRTYPE)) fig = plt.figure() fig.suptitle(dpelemname) plt.subplot(121, polar=polarplt) if normalize: g_max = numpy.max(g) g = g/g_max if dbscale: g = 20*
numpy.log10(g)
numpy.log10
# -*- coding: utf-8 -*- """ """ import pickle import numpy as np import pytest from cslug import ptr from rockhopper import RaggedArray from rockhopper._ragged_array import _2_power, _big_endian pytestmark = pytest.mark.order(3) def test_2_power(): assert _2_power(np.int8) == 0 assert _2_power(np.int16) == 1 assert _2_power(np.int32) == 2 assert _2_power(np.int64) == 3 assert _2_power(np.uint64) == 3 with pytest.raises(TypeError): _2_power(1) for dtype in [np.uint8, np.int8, np.intc, np.int16, np.uint32, np.int64]: assert 1 << _2_power(dtype) == dtype().itemsize @pytest.mark.parametrize("dtype", [np.uint8, np.uint16, np.uint32]) @pytest.mark.parametrize("byteorder", "<>=|") def test_dump_load(dtype, byteorder): dtype = np.dtype(dtype).newbyteorder(byteorder) flat = np.arange(5, dtype=np.int8) self = RaggedArray.from_lengths(flat, [2, 3, 0]) _byteorder = "big" if _big_endian(dtype) else "little" _bin_int = lambda x: int.to_bytes(x, dtype.itemsize, _byteorder) bin = self.dumps(ldtype=dtype) target = (_bin_int(2), flat[0:2].tobytes(), _bin_int(3), flat[2:5].tobytes(), _bin_int(0), b"") # yapf: disable # Convert to lists only to make the pytest traceback more readable. assert list(bin) == list(b"".join(target)) from rockhopper._ragged_array import slug assert slug.dll.count_rows(ptr(bin), len(bin), _2_power(dtype), _big_endian(dtype), flat.itemsize) == len(self) with pytest.raises(ValueError): RaggedArray.loads(bin.tobytes() + b"\x01", dtype=self.dtype, ldtype=dtype) parsed, consumed = RaggedArray.loads(bin, dtype=self.dtype, ldtype=dtype) assert np.array_equal(self.starts, parsed.starts) assert
np.array_equal(self.ends, parsed.ends)
numpy.array_equal
""" Functions are useful statistical untilities for data processing in the ANN Notes ----- Author : <NAME> Date : 15 July 2020 Usage ----- [1] rmse(a,b) [2] pickSmileModels(data,modelGCMs,pickSMILE) [3] remove_annual_mean(data,data_obs,lats,lons,lats_obs,lons_obs) [4] remove_merid_mean(data,data_obs) [5] remove_observations_mean(data,data_obs,lats,lons) [6] calculate_anomalies(data,data_obs,lats,lons,baseline,yearsall) [7] remove_ensemble_mean(data,ravel_modelens,ravelmodeltime,rm_standard_dev,numOfEns) [8] remove_ocean(data,data_obs) [9] remove_land(data,data_obs) [10] standardize_data(Xtrain,Xtest) [11] standardize_dataSEPARATE(Xtrain,Xtest): [12] rm_standard_dev(var,window,ravelmodeltime,numOfEns) [13] rm_variance_dev(var,window) [14] addNoiseTwinSingle(data,integer,sizeOfTwin,random_segment_seed,maskNoiseClass,lat_bounds,lon_bounds) [15] smoothedEnsembles(data,lat_bounds,lon_bounds) """ def rmse(a,b): """ Calculates the root mean squared error takes two variables, a and b, and returns value """ ### Import modules import numpy as np ### Calculate RMSE rmse_stat = np.sqrt(np.mean((a - b)**2)) return rmse_stat ############################################################################### def pickSmileModels(data,modelGCMs,pickSMILE): """ Select models to analyze if using a subset """ ### Pick return indices of models lenOfPicks = len(pickSMILE) indModels = [i for i, item in enumerate(modelGCMs) if item in pickSMILE] ### Slice data if data.shape[0] == len(modelGCMs): if len(indModels) == lenOfPicks: modelSelected = data[indModels] else: print(ValueError('Something is wrong with the indexing of the models!')) else: print(ValueError('Something is wrong with the order of the data!')) return modelSelected ############################################################################### def remove_annual_mean(data,data_obs,lats,lons,lats_obs,lons_obs): """ Removes annual mean from data set """ ### Import modulates import numpy as np import calc_Utilities as UT ### Create 2d grid lons2,lats2 = np.meshgrid(lons,lats) lons2_obs,lats2_obs = np.meshgrid(lons_obs,lats_obs) ### Calculate weighted average and remove mean data = data - UT.calc_weightedAve(data,lats2)[:,:,:,np.newaxis,np.newaxis] data_obs = data_obs - UT.calc_weightedAve(data_obs,lats2_obs)[:,np.newaxis,np.newaxis] return data,data_obs ############################################################################### def remove_merid_mean(data,data_obs,lats,lons,lats_obs,lons_obs): """ Removes meridional mean from data set """ ### Import modules import numpy as np ### Remove mean of latitude data = data - np.nanmean(data,axis=3)[:,:,:,np.newaxis,:] data_obs = data_obs - np.nanmean(data_obs,axis=1)[:,np.newaxis,:] return data,data_obs ############################################################################### def remove_observations_mean(data,data_obs,lats,lons): """ Removes observations to calculate model biases """ ### Import modules import numpy as np ### Remove observational data databias = data - data_obs[np.newaxis,np.newaxis,:,:,:] return databias ############################################################################### def calculate_anomalies(data,data_obs,lats,lons,baseline,yearsall): """ Calculates anomalies for each model and observational data set. Note that it assumes the years at the moment """ ### Import modules import numpy as np ### Select years to slice minyr = baseline.min() maxyr = baseline.max() yearq = np.where((yearsall >= minyr) & (yearsall <= maxyr))[0] if data.ndim == 5: ### Slice years modelnew = data[:,:,yearq,:,:] obsnew = data_obs[yearq,:,:] ### Average climatology meanmodel = np.nanmean(modelnew[:,:,:,:,:],axis=2) meanobs = np.nanmean(obsnew,axis=0) ### Calculate anomalies modelanom = data[:,:,:,:,:] - meanmodel[:,:,np.newaxis,:,:] obsanom = data_obs[:,:,:] - meanobs[:,:] else: obsnew = data_obs[yearq,:,:] ### Average climatology meanobs = np.nanmean(obsnew,axis=0) ### Calculate anomalies obsanom = data_obs[:,:,:] - meanobs[:,:] modelanom = np.nan print('NO MODEL ANOMALIES DUE TO SHAPE SIZE!!!') return modelanom,obsanom ############################################################################### def remove_ensemble_mean(data,ravel_modelens,ravelmodeltime,rm_standard_dev,numOfEns): """ Removes ensemble mean """ ### Import modulates import numpy as np ### Remove ensemble mean if data.ndim == 4: datameangoneq = data - np.nanmean(data,axis=0) elif data.ndim == 5: ensmeanmodel = np.nanmean(data,axis=1) datameangoneq = np.empty((data.shape)) for i in range(data.shape[0]): datameangoneq[i,:,:,:,:] = data[i,:,:,:,:] - ensmeanmodel[i,:,:,:] print('Completed: Ensemble mean removed for model %s!' % (i+1)) if ravel_modelens == True: datameangone = np.reshape(datameangoneq,(datameangoneq.shape[0]*datameangoneq.shape[1], datameangoneq.shape[2], datameangoneq.shape[3], datameangoneq.shape[4])) else: datameangone = datameangoneq if rm_standard_dev == False: if ravelmodeltime == True: datameangone = np.reshape(datameangoneq,(datameangoneq.shape[0]*datameangoneq.shape[1]*datameangoneq.shape[2], datameangoneq.shape[3], datameangoneq.shape[4])) else: datameangone = datameangoneq return datameangone ############################################################################### def remove_ocean(data,data_obs,lat_bounds,lon_bounds): """ Masks out the ocean for land_only == True """ ### Import modules import numpy as np from netCDF4 import Dataset import calc_dataFunctions as df ### Read in land mask directorydata = '/Users/zlabe/Data/masks/' filename = 'lsmask_19x25.nc' datafile = Dataset(directorydata + filename) maskq = datafile.variables['nmask'][:] lats = datafile.variables['latitude'][:] lons = datafile.variables['longitude'][:] datafile.close() mask,lats,lons = df.getRegion(maskq,lats,lons,lat_bounds,lon_bounds) ### Mask out model and observations datamask = data * mask data_obsmask = data_obs * mask ### Check for floats datamask[np.where(datamask==0.)] = 0 data_obsmask[np.where(data_obsmask==0.)] = 0 return datamask, data_obsmask ############################################################################### def remove_land(data,data_obs,lat_bounds,lon_bounds): """ Masks out the ocean for ocean_only == True """ ### Import modules import numpy as np from netCDF4 import Dataset import calc_dataFunctions as df ### Read in ocean mask directorydata = '/Users/zlabe/Data/masks/' filename = 'ocmask_19x25.nc' datafile = Dataset(directorydata + filename) maskq = datafile.variables['nmask'][:] lats = datafile.variables['latitude'][:] lons = datafile.variables['longitude'][:] datafile.close() mask,lats,lons = df.getRegion(maskq,lats,lons,lat_bounds,lon_bounds) ### Mask out model and observations datamask = data * mask data_obsmask = data_obs * mask ### Check for floats datamask[np.where(datamask==0.)] = 0 data_obsmask[np.where(data_obsmask==0.)] = 0 return datamask, data_obsmask ############################################################################### def standardize_data(Xtrain,Xtest): """ Standardizes training and testing data """ ### Import modulates import numpy as np Xmean = np.mean(Xtrain,axis=0) Xstd = np.std(Xtrain,axis=0) Xtest = (Xtest - Xmean)/Xstd Xtrain = (Xtrain - Xmean)/Xstd stdVals = (Xmean,Xstd) stdVals = stdVals[:] ### If there is a nan (like for land/ocean masks) if np.isnan(np.min(Xtrain)) == True: Xtrain[np.isnan(Xtrain)] = 0 Xtest[np.isnan(Xtest)] = 0 print('--THERE WAS A NAN IN THE STANDARDIZED DATA!--') return Xtrain,Xtest,stdVals ############################################################################### def standardize_dataSEPARATE(Xtrain,Xtest): """ Standardizes training and testing data for each climate model separately """ ### Import modulates import numpy as np Xtrainq = Xtrain.reshape(8,12*70,Xtrain.shape[1]//144,144) Xtrainallq = Xtrain.reshape(8,12,70,Xtrain.shape[1]//144,144) Xtestallq = Xtest.reshape(8,4,70,Xtest.shape[1]//144,144) Xmean = np.mean(Xtrainq,axis=1)[:,np.newaxis,np.newaxis,:,:] Xstd = np.std(Xtrainq,axis=1)[:,np.newaxis,np.newaxis,:,:] Xtest = ((Xtestallq - Xmean)/Xstd).reshape(Xtestallq.shape[0]*Xtestallq.shape[1]*Xtestallq.shape[2],Xtestallq.shape[3]*Xtestallq.shape[4]) Xtrain = ((Xtrainallq - Xmean)/Xstd).reshape(Xtrainallq.shape[0]*Xtrainallq.shape[1]*Xtrainallq.shape[2],Xtrainallq.shape[3]*Xtrainallq.shape[4]) stdVals = (Xmean,Xstd) stdVals = stdVals[:] ### If there is a nan (like for land/ocean masks) if np.isnan(np.min(Xtrain)) == True: Xtrain[np.isnan(Xtrain)] = 0 Xtest[np.isnan(Xtest)] = 0 print('--THERE WAS A NAN IN THE STANDARDIZED DATA!--') return Xtrain,Xtest,stdVals ############################################################################### def rm_standard_dev(var,window,ravelmodeltime,numOfEns): """ Smoothed standard deviation """ import pandas as pd import numpy as np print('\n\n-----------STARTED: Rolling std!\n\n') if var.ndim == 3: rollingstd = np.empty((var.shape)) for i in range(var.shape[1]): for j in range(var.shape[2]): series = pd.Series(var[:,i,j]) rollingstd[:,i,j] = series.rolling(window).std().to_numpy() elif var.ndim == 4: rollingstd = np.empty((var.shape)) for ens in range(var.shape[0]): for i in range(var.shape[2]): for j in range(var.shape[3]): series = pd.Series(var[ens,:,i,j]) rollingstd[ens,:,i,j] = series.rolling(window).std().to_numpy() elif var.ndim == 5: varn = np.reshape(var,(var.shape[0]*var.shape[1],var.shape[2],var.shape[3],var.shape[4])) rollingstd = np.empty((varn.shape)) for ens in range(varn.shape[0]): for i in range(varn.shape[2]): for j in range(varn.shape[3]): series = pd.Series(varn[ens,:,i,j]) rollingstd[ens,:,i,j] = series.rolling(window).std().to_numpy() newdataq = rollingstd[:,window:,:,:] if ravelmodeltime == True: newdata = np.reshape(newdataq,(newdataq.shape[0]*newdataq.shape[1], newdataq.shape[2],newdataq.shape[3])) else: newdata = np.reshape(newdataq,(newdataq.shape[0]//numOfEns,numOfEns,newdataq.shape[1], newdataq.shape[2],newdataq.shape[3])) print('-----------COMPLETED: Rolling std!\n\n') return newdata ############################################################################### def rm_variance_dev(var,window,ravelmodeltime): """ Smoothed variance """ import pandas as pd import numpy as np print('\n\n-----------STARTED: Rolling vari!\n\n') rollingvar = np.empty((var.shape)) for ens in range(var.shape[0]): for i in range(var.shape[2]): for j in range(var.shape[3]): series = pd.Series(var[ens,:,i,j]) rollingvar[ens,:,i,j] = series.rolling(window).var().to_numpy() newdataq = rollingvar[:,window:,:,:] if ravelmodeltime == True: newdata = np.reshape(newdataq,(newdataq.shape[0]*newdataq.shape[1], newdataq.shape[2],newdataq.shape[3])) else: newdata = newdataq print('-----------COMPLETED: Rolling vari!\n\n') return newdata ############################################################################### def addNoiseTwinSingle(data,data_obs,integer,sizeOfTwin,random_segment_seed,maskNoiseClass,lat_bounds,lon_bounds): """ Calculate an additional class of noise added to the original data """ import numpy as np import sys print('\n----------- USING EXPERIMENT CLASS #%s -----------' % sizeOfTwin) if sizeOfTwin == 1: """ Adds random noise to each grid point """ newmodels = data.copy() if newmodels.shape[0] > 7: newmodels = newmodels[:7,:,:,:,:] dataRandNoise = np.random.randint(low=-integer,high=integer+1,size=newmodels.shape) # dataRandNoise = np.random.uniform(low=-integer,high=integer,size=newmodels.shape) randomNoiseTwinq = newmodels + dataRandNoise randomNoiseTwin = randomNoiseTwinq.reshape(randomNoiseTwinq.shape[0]*randomNoiseTwinq.shape[1], randomNoiseTwinq.shape[2],randomNoiseTwinq.shape[3], randomNoiseTwinq.shape[4]) print('--Size of noise twin --->',randomNoiseTwin.shape) print('<<Added noise of +-%s at every grid point for twin!>>' % integer) ### Calculating random subsample if random_segment_seed == None: random_segment_seed = int(int(
np.random.randint(1, 100000)
numpy.random.randint
# Imports import numpy as np import pandas as pd import os import pickle from os.path import join from time import time import pwlf from sklearn.model_selection import KFold from sklearn.preprocessing import PolynomialFeatures from sklearn.pipeline import make_pipeline from sklearn.linear_model import LinearRegression from sklearn.metrics import log_loss def node_scores_pwlf_with_crossvalidation(p_hat, y, n_splits = 5, add_error = False, seed = 0, max_nodes = 15, degree = 1): node_scores = [0]*(max_nodes+1) node_ECEs_abs = [0]*(max_nodes+1) node_ECEs_square = [0]*(max_nodes+1) node_loss = [0]*(max_nodes+1) node_scores[0] = np.inf node_ECEs_abs[0] = np.inf node_ECEs_square[0] = np.inf node_loss[0] = np.inf all_weights = [] all_cv_scores = [] for n_nodes in range(1, max_nodes+1, 1): weights = [] cv_scores = [] cv_ECE_square = [] cv_ECE_abs = [] cv_loss = [] print("Nodes:", n_nodes) start_cv = time() kf = KFold(n_splits=n_splits, shuffle=True, random_state=seed) ij = 0 for train_index, test_index in kf.split(p_hat): try: p_hat_train, p_hat_test = p_hat[train_index], p_hat[test_index] y_train, y_test = y[train_index], y[test_index] if n_nodes == 1: model= make_pipeline(PolynomialFeatures(degree),LinearRegression()) model.fit(p_hat_train.reshape(-1,1), y_train) c_hat_test = model.predict(p_hat_test.reshape(-1,1)) c_hat_train = model.predict(p_hat_train.reshape(-1,1)) else: model = pwlf.PiecewiseLinFit(p_hat_train, y_train, degree = degree) h = model.fit(n_nodes) c_hat_test = model.predict(p_hat_test) c_hat_train = model.predict(p_hat_train) cv_scores.append(np.mean((c_hat_test - y_test)**2)) # Squared error cv_ECE_square.append(np.mean((c_hat_test - p_hat_test)**2)) cv_ECE_abs.append(np.mean(np.abs(c_hat_test - p_hat_test))) cv_loss.append(np.mean(np.square(c_hat_train - y_train))) # Train loss print("Split:", ij) ij += 1 except: print("error for " + str(n_nodes) + " with method pwlf") node_scores[n_nodes] += 9999 node_scores[n_nodes] +=
np.mean(cv_scores)
numpy.mean
import numpy as np def mean_squared_error(predicted_output, target_matrix): delta = predicted_output - target_matrix squared_errors = np.sum(np.square(delta), axis=0) return np.mean(squared_errors) def cross_entropy(predicted_output, target_matrix): predicted_output_log = -
np.log(predicted_output)
numpy.log
#!/usr/bin/python # # NAME: Plot_ColorMap.py # # PURPOSE: # This routine will be used for plotting the colormap from the input # data consisting of 2D images of the vector field. The output image # will be stored as a tiff color image. There are options to save it # using custom RGB colorwheel, or standard HSV colorwheel. # # CALLING SEQUENCE: # result = Plot_ColorMap(Bx = Bx, By = By, hsvwheel = True, # filename = filename) # # PARAMETERS: # Bx : 2D Array consisting of the x-component of the vector field # By : 2D Array consisting of the y-component of the vector field # hsvwheel : If True then colorimage using the standard HSV scheme or using the custom RGB scheme. # filename : The output filename to be used for saving the color image. If not provided, default # Vector_ColorMap.jpeg will be used. # # RETURNS: # result : A (M x N x 3) array containing the color-image. # # AUTHOR: # <NAME>, ANL, 07.Aug.2017. #---------------------------------------------------------------------------------------------------- #import necessary modules import numpy as np from skimage import io as skimage_io from skimage import color as skimage_color from matplotlib import colors as mt_cols def Plot_ColorMap(Bx = np.random.rand(256,256), By = np.random.rand(256,256), \ hsvwheel = False, filename = 'Vector_ColorMap.jpeg'): # first get the size of the input data [dimx,dimy] = Bx.shape #inset colorwheel size - 100 px csize = 100 #co-ordinate arrays for colorwheel. line = np.arange(csize) - float(csize/2) [X,Y] = np.meshgrid(line,line,indexing = 'xy') th = np.arctan2(Y,X) h_col = (th + np.pi)/2/np.pi rr = np.sqrt(X**2 + Y**2) msk = np.zeros(rr.shape) msk[np.where(rr <= csize/2)] = 1.0 rr *= msk rr /= np.amax(rr) val_col = np.ones(rr.shape) * msk #Compute the maximum in magnitude BB = sqrt(Bx^2 + By^2) mmax = np.amax(np.sqrt(Bx**2 + By**2)) # Normalize with respect to max. Bx /= float(mmax) By /= float(mmax) #Compute the magnitude and scale between 0 and 1 Bmag = np.sqrt(Bx**2 + By**2) if hsvwheel: # Here we will proceed with using the standard HSV colorwheel routine. # Get the Hue (angle) as By/Bx and scale between [0,1] hue = (np.arctan2(By,Bx) + np.pi)/2/np.pi # Array to hold the colorimage. color_im = np.zeros([dimx+csize, dimy, 3]) #First the Hue. color_im[0:dimx,0:dimy,0] = hue # Then the Sat. color_im[0:dimx,0:dimy,1] = Bmag # Then the Val. color_im[0:dimx,0:dimy,2] = np.ones([dimx,dimy]) # Store the colorwheel in the image color_im[dimx:,dimy/2-csize/2:dimy/2+csize/2,0] = h_col color_im[dimx:,dimy/2-csize/2:dimy/2+csize/2,1] = rr color_im[dimx:,dimy/2-csize/2:dimy/2+csize/2,2] = val_col # Convert to RGB image. rgb_image = mt_cols.hsv_to_rgb(color_im) else: #Here we proceed with custom RGB colorwheel. #Arrays for each RGB channel red = np.zeros([dimx,dimy]) gr = np.zeros([dimx,dimy]) blue = np.zeros([dimx,dimy]) #Scale the magnitude between 0 and 255 cmag = Bmag #* 255.0 #Compute the cosine of the angle cang = Bx / cmag #Compute the sine of the angle sang = np.sqrt(1.0 - cang**2) #first the green component qq =
np.where((Bx < 0.0) & (By >= 0.0))
numpy.where
#!/usr/bin/python # Filename: PIV.py ################### IMPORTS ####################### import pandas as pd from pandas import DataFrame import numpy as np ################################################### ############# READ IN PIV DATA ##################### #Function #this function will read in PIV data output from LaVision software #The form of the input data is .txt files with 4 columns #column 1 = X location (mm) #column 2 = Y Location (mm) #column 3 = U velocity (m/sec) #column 4 = V velocity (m/sec) #UPDATED: 1-20-2016 #INPUTS, base_name_input = Name of Folder with PIV data inside it # data_sets = number of data sets (txt files) in folder # size = pixel size (32 x 32) #OUTPUTS, # col 1 = time # col 2 = test data # col 3 = delta t def piv_readin(base_name_input, data_sets, size): #initalize data temp_u = np.ndarray([data_sets-1, size, size]) temp_v = np.ndarray([data_sets-1, size, size]) count = 0 x_range = np.arange(1, data_sets) for i in x_range: #create file name for each txt file loc = base_name_input + '/B' + str('{0:05}'.format(i)) + '.txt' #read in txt file but skip first row temp = pd.read_csv(loc, sep='\t', skiprows=1, header=None) #rename columns to designated davis output temp.columns = ['Xlocation (mm)', 'Ylocation (mm)', 'U (m/sec)', 'V (m/sec)'] #reorganize into seperate arrays temp_x = np.array(
np.reshape(temp['Xlocation (mm)'], (size, -1))
numpy.reshape
from statsmodels.compat.numpy import lstsq from statsmodels.compat.pandas import assert_index_equal from statsmodels.compat.platform import PLATFORM_WIN from statsmodels.compat.python import lrange import os import warnings import numpy as np from numpy.testing import ( assert_, assert_allclose, assert_almost_equal, assert_equal, assert_raises, ) import pandas as pd from pandas import DataFrame, Series, date_range import pytest from scipy.interpolate import interp1d from statsmodels.datasets import macrodata, modechoice, nile, randhie, sunspots from statsmodels.tools.sm_exceptions import ( CollinearityWarning, InfeasibleTestError, InterpolationWarning, MissingDataError, ) # Remove imports when range unit root test gets an R implementation from statsmodels.tools.validation import array_like, bool_like from statsmodels.tsa.arima_process import arma_acovf from statsmodels.tsa.statespace.sarimax import SARIMAX from statsmodels.tsa.stattools import ( acf, acovf, adfuller, arma_order_select_ic, breakvar_heteroskedasticity_test, ccovf, coint, grangercausalitytests, innovations_algo, innovations_filter, kpss, levinson_durbin, levinson_durbin_pacf, pacf, pacf_burg, pacf_ols, pacf_yw, range_unit_root_test, zivot_andrews, ) DECIMAL_8 = 8 DECIMAL_6 = 6 DECIMAL_5 = 5 DECIMAL_4 = 4 DECIMAL_3 = 3 DECIMAL_2 = 2 DECIMAL_1 = 1 CURR_DIR = os.path.dirname(os.path.abspath(__file__)) @pytest.fixture(scope="module") def acovf_data(): rnd = np.random.RandomState(12345) return rnd.randn(250) class CheckADF(object): """ Test Augmented Dickey-Fuller Test values taken from Stata. """ levels = ["1%", "5%", "10%"] data = macrodata.load_pandas() x = data.data["realgdp"].values y = data.data["infl"].values def test_teststat(self): assert_almost_equal(self.res1[0], self.teststat, DECIMAL_5) def test_pvalue(self): assert_almost_equal(self.res1[1], self.pvalue, DECIMAL_5) def test_critvalues(self): critvalues = [self.res1[4][lev] for lev in self.levels] assert_almost_equal(critvalues, self.critvalues, DECIMAL_2) class TestADFConstant(CheckADF): """ Dickey-Fuller test for unit root """ @classmethod def setup_class(cls): cls.res1 = adfuller(cls.x, regression="c", autolag=None, maxlag=4) cls.teststat = 0.97505319 cls.pvalue = 0.99399563 cls.critvalues = [-3.476, -2.883, -2.573] class TestADFConstantTrend(CheckADF): """""" @classmethod def setup_class(cls): cls.res1 = adfuller(cls.x, regression="ct", autolag=None, maxlag=4) cls.teststat = -1.8566374 cls.pvalue = 0.67682968 cls.critvalues = [-4.007, -3.437, -3.137] # FIXME: do not leave commented-out # class TestADFConstantTrendSquared(CheckADF): # """ # """ # pass # TODO: get test values from R? class TestADFNoConstant(CheckADF): """""" @classmethod def setup_class(cls): with pytest.warns(FutureWarning): adfuller(cls.x, regression="nc", autolag=None, maxlag=4) cls.res1 = adfuller(cls.x, regression="n", autolag=None, maxlag=4) cls.teststat = 3.5227498 cls.pvalue = 0.99999 # Stata does not return a p-value for noconstant. # Tau^max in MacKinnon (1994) is missing, so it is # assumed that its right-tail is well-behaved cls.critvalues = [-2.587, -1.950, -1.617] # No Unit Root class TestADFConstant2(CheckADF): @classmethod def setup_class(cls): cls.res1 = adfuller(cls.y, regression="c", autolag=None, maxlag=1) cls.teststat = -4.3346988 cls.pvalue = 0.00038661 cls.critvalues = [-3.476, -2.883, -2.573] class TestADFConstantTrend2(CheckADF): @classmethod def setup_class(cls): cls.res1 = adfuller(cls.y, regression="ct", autolag=None, maxlag=1) cls.teststat = -4.425093 cls.pvalue = 0.00199633 cls.critvalues = [-4.006, -3.437, -3.137] class TestADFNoConstant2(CheckADF): @classmethod def setup_class(cls): cls.res1 = adfuller(cls.y, regression="n", autolag=None, maxlag=1) cls.teststat = -2.4511596 cls.pvalue = 0.013747 # Stata does not return a p-value for noconstant # this value is just taken from our results cls.critvalues = [-2.587, -1.950, -1.617] _, _1, _2, cls.store = adfuller( cls.y, regression="n", autolag=None, maxlag=1, store=True ) def test_store_str(self): assert_equal( self.store.__str__(), "Augmented Dickey-Fuller Test Results" ) class CheckCorrGram(object): """ Set up for ACF, PACF tests. """ data = macrodata.load_pandas() x = data.data["realgdp"] filename = os.path.join(CURR_DIR, "results", "results_corrgram.csv") results = pd.read_csv(filename, delimiter=",") class TestACF(CheckCorrGram): """ Test Autocorrelation Function """ @classmethod def setup_class(cls): cls.acf = cls.results["acvar"] # cls.acf = np.concatenate(([1.], cls.acf)) cls.qstat = cls.results["Q1"] cls.res1 = acf(cls.x, nlags=40, qstat=True, alpha=0.05, fft=False) cls.confint_res = cls.results[["acvar_lb", "acvar_ub"]].values def test_acf(self): assert_almost_equal(self.res1[0][1:41], self.acf, DECIMAL_8) def test_confint(self): centered = self.res1[1] - self.res1[1].mean(1)[:, None] assert_almost_equal(centered[1:41], self.confint_res, DECIMAL_8) def test_qstat(self): assert_almost_equal(self.res1[2][:40], self.qstat, DECIMAL_3) # 3 decimal places because of stata rounding # FIXME: enable/xfail/skip or delete # def pvalue(self): # pass # NOTE: should not need testing if Q stat is correct class TestACF_FFT(CheckCorrGram): # Test Autocorrelation Function using FFT @classmethod def setup_class(cls): cls.acf = cls.results["acvarfft"] cls.qstat = cls.results["Q1"] cls.res1 = acf(cls.x, nlags=40, qstat=True, fft=True) def test_acf(self): assert_almost_equal(self.res1[0][1:], self.acf, DECIMAL_8) def test_qstat(self): # todo why is res1/qstat 1 short assert_almost_equal(self.res1[1], self.qstat, DECIMAL_3) class TestACFMissing(CheckCorrGram): # Test Autocorrelation Function using Missing @classmethod def setup_class(cls): cls.x = np.concatenate((np.array([np.nan]), cls.x)) cls.acf = cls.results["acvar"] # drop and conservative cls.qstat = cls.results["Q1"] cls.res_drop = acf( cls.x, nlags=40, qstat=True, alpha=0.05, missing="drop", fft=False ) cls.res_conservative = acf( cls.x, nlags=40, qstat=True, alpha=0.05, fft=False, missing="conservative", ) cls.acf_none = np.empty(40) * np.nan # lags 1 to 40 inclusive cls.qstat_none = np.empty(40) * np.nan cls.res_none = acf( cls.x, nlags=40, qstat=True, alpha=0.05, missing="none", fft=False ) def test_raise(self): with pytest.raises(MissingDataError): acf( self.x, nlags=40, qstat=True, fft=False, alpha=0.05, missing="raise", ) def test_acf_none(self): assert_almost_equal(self.res_none[0][1:41], self.acf_none, DECIMAL_8) def test_acf_drop(self): assert_almost_equal(self.res_drop[0][1:41], self.acf, DECIMAL_8) def test_acf_conservative(self): assert_almost_equal( self.res_conservative[0][1:41], self.acf, DECIMAL_8 ) def test_qstat_none(self): # todo why is res1/qstat 1 short assert_almost_equal(self.res_none[2], self.qstat_none, DECIMAL_3) # FIXME: enable/xfail/skip or delete # how to do this test? the correct q_stat depends on whether nobs=len(x) is # used when x contains NaNs or whether nobs<len(x) when x contains NaNs # def test_qstat_drop(self): # assert_almost_equal(self.res_drop[2][:40], self.qstat, DECIMAL_3) class TestPACF(CheckCorrGram): @classmethod def setup_class(cls): cls.pacfols = cls.results["PACOLS"] cls.pacfyw = cls.results["PACYW"] def test_ols(self): pacfols, confint = pacf(self.x, nlags=40, alpha=0.05, method="ols") assert_almost_equal(pacfols[1:], self.pacfols, DECIMAL_6) centered = confint - confint.mean(1)[:, None] # from edited Stata ado file res = [[-0.1375625, 0.1375625]] * 40 assert_almost_equal(centered[1:41], res, DECIMAL_6) # check lag 0 assert_equal(centered[0], [0.0, 0.0]) assert_equal(confint[0], [1, 1]) assert_equal(pacfols[0], 1) def test_ols_inefficient(self): lag_len = 5 pacfols = pacf_ols(self.x, nlags=lag_len, efficient=False) x = self.x.copy() x -= x.mean() n = x.shape[0] lags = np.zeros((n - 5, 5)) lead = x[5:] direct = np.empty(lag_len + 1) direct[0] = 1.0 for i in range(lag_len): lags[:, i] = x[5 - (i + 1) : -(i + 1)] direct[i + 1] = lstsq(lags[:, : (i + 1)], lead, rcond=None)[0][-1] assert_allclose(pacfols, direct, atol=1e-8) def test_yw(self): pacfyw = pacf_yw(self.x, nlags=40, method="mle")
assert_almost_equal(pacfyw[1:], self.pacfyw, DECIMAL_8)
numpy.testing.assert_almost_equal
#--------------------------------------------------- # File Name: aerofoil_NACA_plotter.py # Date: 22 Jan 2019 # Author: <NAME> #--------------------------------------------------- # Importing Required Modules import numpy as np import matplotlib.pyplot as plt import sys import os # Passing the coordinate file name into our program as an argument program_name = sys.argv[0] argument = sys.argv[1] arg_len = len(argument) file_format = argument[arg_len-4:] coordinate_file = str(argument) if(not os.path.exists(coordinate_file)): print("Such a coordinate file does not exist \nExiting the program") sys.exit(1) # Ensuring that only arguments passed are python script file and coordinate file if(len(sys.argv) != 2): print("Usage: python aerofoil_NACA_plotter.py coordinatefile.csv \nFor more clarification, refer the readme file\nExiting the program") sys.exit(1) elif(file_format == '.csv'): pass elif(file_format == '.txt'): pass else: print("Coordinate File must be in '.csv' or '.txt' format only\nExiting the program") sys.exit(1) # Reading Aerofoil coordinates from the csv file file_data = np.loadtxt(coordinate_file, dtype = float, delimiter = ',') # Assigning Values to x and y coordinate arrays x = file_data[:,][:,0].reshape(len(file_data),1) y = file_data[:,][:,1].reshape(len(file_data),1) # Appending initial x & y values to the arrays to complete the contour x = np.append(x, x[0][0]) y = np.append(y, y[0][0]) # Defining min and max limits for 'matplotlib.pyplot.axis' function x_c_max = np.amax(x) + 0.05 x_c_min =
np.amin(x)
numpy.amin
import numpy as np import matplotlib.pyplot as plt #%%# WAVES #################################################################### def complex_to_polar(x): return np.angle(x), np.abs(x) def sines(freqs, plot=False, N=120): freqs = freqs if isinstance(freqs, (list, np.ndarray, tuple)) else [freqs] theta = np.linspace(0, 2 * np.pi, N, endpoint=False) S = [
np.cos(f * theta)
numpy.cos
from matplotlib import pyplot as plt # Pyplot for nice graphs import numpy as np # NumPy from numpy import linalg as LA from Functions import ImportSystem from progress.bar import Bar # Retrieve unit cell xyz, shiftx, shifty, filename = ImportSystem(1) repx = int(input('Repetition in x? ')) repy = int(input('Repetition in y? ')) xyztemp = xyz for i in range(repx): shiftarr = xyz + np.array([shiftx*(i+1), 0, 0]) xyztemp = np.append(xyz, shiftarr, axis=0) print(xyz.shape) xyz = xyztemp xyztemp = xyz for i in range(repy): shiftarr = xyz + np.array([0, shifty*(i+1), 0]) xyztemp = np.append(xyz, shiftarr, axis=0) print(xyz.shape) xyz = xyztemp xlin = np.array([[0, 0]]) ylin =
np.array([[0, 0]])
numpy.array
import collections from itertools import chain import urllib.request as request import pickle import numpy as np import scipy.signal as signal import scipy.special as special import scipy.optimize as optimize import matplotlib.pyplot as plt import skimage from skimage import io,transform import cv2 import libsvm from libsvm import svmutil #Natural Scene Statistics def normalized(kernel): return kernel / np.sum(kernel) #compute the locally normalized luminances via local mean subtraction and divide it by the local deviation def gaussian_kernel2d(n, SIGMA): Y, X = np.indices((n, n)) - int(n/2) gaussian_kernel = 1 / (2 * np.pi * SIGMA ** 2) * np.exp(-(X ** 2 + Y ** 2) / (2 * SIGMA ** 2)) return normalized(gaussian_kernel) #finding local mean by applying Gaussian filter to the color image """ Local Mean Field (\mu) is nothing but the Gaussian Blur of the original image, while Local Variance Field (\sigma) is the Gaussian Blur of the square of the difference of original image and u(\mu) """ def local_mean(image, kernel): return signal.convolve2d(image, kernel, 'same') #calclulating the deviation by finding the square root of observed value and the local_mean def local_deviation(image, local_mean, kernel): sigma = image ** 2 sigma = signal.convolve2d(sigma, kernel, 'same') return np.sqrt(np.abs(local_mean ** 2 - sigma)) #MSCN coefficients are distributed as a Generalized Gaussian Distribution (GGD) for a broader spectrum of the distorted image """ There are a few different ways to normalize an image. One such normalization is called Mean Substracted Contrast Normalization (MSCN). """ def MSCN_coefficients(image, kernel_size=6,SIGMA=7/6): C = 1/255 kernel = gaussian_kernel2d(kernel_size, SIGMA=SIGMA) local_mean = signal.convolve2d(image, kernel, 'same') local_var = local_deviation(image, local_mean, kernel) return (image - local_mean) / (local_var + C) def generalized_gaussian_dist(x, alpha, sigma): beta = sigma * np.sqrt(special.gamma(1 / alpha) / special.gamma(3 / alpha)) coefficient = alpha / (2 * beta() * special.gamma(1 / alpha)) return coefficient * np.exp(-(np.abs(x) / beta) ** alpha) """ adjacent coefficients also exhibit a regular structure, which gets disturbed in the presence of distortion. To avoid this pairwise products of neighboring MSCN coefficients along four directions (1) horizontal H, (2) vertical V, (3) main-diagonal D1 and (4) secondary-diagonal D2 are used """ def calculate_pair_product_coefficients(mscn_coefficients): return collections.OrderedDict({ 'mscn': mscn_coefficients, 'horizontal': mscn_coefficients[:, :-1] * mscn_coefficients[:, 1:], 'vertical': mscn_coefficients[:-1, :] * mscn_coefficients[1:, :], 'main_diagonal': mscn_coefficients[:-1, :-1] * mscn_coefficients[1:, 1:], 'secondary_diagonal': mscn_coefficients[1:, :-1] * mscn_coefficients[:-1, 1:] }) """ we get the dimensions of the image and derive the center (x, y) coordinates """ def detect_blur_fft(image, size=60, thresh=20, vis=False): (h, w) = image.shape (cX, cY) = (int(w / 2.0), int(h / 2.0)) fft = np.fft.fft2(image) # we compute the FFT to find the frequency transform fftShift = np.fft.fftshift(fft) # shiting the zero frequency component to the center if vis: plt.imshow(image) plt.show() fftShift[cY - size:cY + size, cX - size:cX + size] = 0 # zero-out the center of the FFT shift fftShift = np.fft.ifftshift(fftShift) # apply the inverse shift so that component once again becomes the top-left recon = np.fft.ifft2(fftShift) # get the inverse fourier feature transform magnitude = 20 * np.log(np.abs(recon)) mean = np.mean(magnitude) return (mean, mean <= thresh) #mean value < thresh to signify if image is blurry or not #The methodology to fit an Asymmetric Generalized Gaussian Distribution """ an Asymmetric Generalized Gaussian Distribution (AGGD) is fit to each of the four pairwise product images. AGGD is an asymmetric form of Generalized Gaussian Fitting (GGD). It has four parameters — shape, mean, left variance and right variance """ def asymmetric_generalized_gaussian_fit(x): #Calculate γ where Nₗ is the number of negative samples and Nᵣ is the number of positive samples. def estimate_phi(ALP): numerator = special.gamma(2 / ALP) ** 2 denominator = special.gamma(1 / ALP) * special.gamma(3 / ALP) return numerator / denominator def estimate_r_hat(x): size = np.prod(x.shape) return (np.sum(np.abs(x)) / size) ** 2 / (np.sum(x ** 2) / size) #Calculate R hat using γ and r hat estimations. def estimate_R_hat(r_hat, gamma): num = (gamma ** 3 + 1) * (gamma + 1) den = (gamma ** 2 + 1) ** 2 return r_hat * num / den #calculatin mean squares using filtered values def mean_squares_sum(x, filter = lambda z: z == z): filtered_values = x[filter(x)] squares_sum =
np.sum(filtered_values ** 2)
numpy.sum
import numpy as np from scipy.optimize import curve_fit from scipy.optimize import fsolve, brentq from scipy.interpolate import interp1d import scipy.integrate import sys import os import velociraptor_python_tools as vpt from scipy.spatial import cKDTree import h5py import re from constants import * from snapshot import * import copy import itertools class Unbuffered(object): def __init__(self, stream): self.stream = stream def write(self, data): self.stream.write(data) self.stream.flush() def writelines(self, datas): self.stream.writelines(datas) self.stream.flush() def __getattr__(self, attr): return getattr(self.stream, attr) sys.stdout = Unbuffered(sys.stdout) def getHaloCoord(catalog, halo, z=0, snapshottype='GADGET', physical=False): #Mpc/h coords = np.zeros(3) if (('Xcminpot' not in catalog.keys())):# or # (np.abs(catalog['Xcminpot'][halo])>0.1) or # (np.abs(catalog['Ycminpot'][halo])>0.1) or # (np.abs(catalog['Zcminpot'][halo])>0.1)): return getHaloCoordCOM(catalog, halo, z=z, snapshottype=snapshottype, physical=physical) if physical: coords[0] = (catalog['Xcminpot'][halo]) coords[1] = (catalog['Ycminpot'][halo]) coords[2] = (catalog['Zcminpot'][halo]) elif snapshottype in ['GADGET', 'Gadget', 'gadget']: coords[0] = (catalog['Xcminpot'][halo])*h*(1+z) coords[1] = (catalog['Ycminpot'][halo])*h*(1+z) coords[2] = (catalog['Zcminpot'][halo])*h*(1+z) elif snapshottype in ['SWIFT', 'Swift', 'swift']: coords[0] = (catalog['Xcminpot'][halo])*(1+z) coords[1] = (catalog['Ycminpot'][halo])*(1+z) coords[2] = (catalog['Zcminpot'][halo])*(1+z) else: print('Snapshottype not set') return coords def getHaloRadius(catalog, halo, z=0, rtype='R_200crit', snapshottype='GADGET', physical=False): #Mpc/h if physical: return catalog[rtype][halo] elif snapshottype in ['GADGET', 'Gadget', 'gadget']: return catalog[rtype][halo]*h*(1+z) elif snapshottype in ['SWIFT', 'Swift', 'swift']: return catalog[rtype][halo]*(1+z) def getHaloCoordCOM(catalog, halo, z=0, snapshottype='GADGET', physical=False): #Mpc/h coords = np.zeros(3) if physical: coords[0] = catalog['Xc'][halo] coords[1] = catalog['Yc'][halo] coords[2] = catalog['Zc'][halo] elif snapshottype in ['GADGET', 'Gadget', 'gadget']: coords[0] = catalog['Xc'][halo]*h*(1+z) coords[1] = catalog['Yc'][halo]*h*(1+z) coords[2] = catalog['Zc'][halo]*h*(1+z) elif snapshottype in ['SWIFT', 'Swift', 'swift']: coords[0] = catalog['Xc'][halo]*(1+z) coords[1] = catalog['Yc'][halo]*(1+z) coords[2] = catalog['Zc'][halo]*(1+z) return coords def readHaloFile(halofile): atime,tree,numhalos,halodata,cosmodata,unitdata = vpt.ReadUnifiedTreeandHaloCatalog(halofile, desiredfields=[], icombinedfile=1,iverbose=0) return atime,tree,numhalos,halodata,cosmodata,unitdata def findSurroundingHaloProperties(hp, halolist, d_snap, boxsize=32.): coords = hp['Coord'] halotree = cKDTree(coords, boxsize=boxsize) for k in halolist: if hp['R200'][k] == -1: continue halostring = hp['HaloIndex'][k] length_of_neighbours = len(np.array(halotree.query_ball_point([hp['Coord'][k]], r=hp['R200'][k]*5)[0])) distance, indices = halotree.query([hp['Coord'][k]], k=length_of_neighbours) indices = np.array(indices[0])[1:] distance = np.array(distance[0])[1:] hp['Neighbours'][halostring] = hp['HaloIndex'][indices] hp['Neighbour_distance'][halostring] = distance hp['Neighbour_Velrad'][halostring] = np.zeros(len(distance)) j=0 for i in indices: partindices = hp['Partindices'][hp['HaloIndex'][i]] hp['Neighbour_Velrad'][halostring][j] = np.sum(d_snap['File'].get_radialvelocity(hp['Coord'][k], indices=partindices))/len(partindices) j+=1 def fixSatelliteProblems(hp, TEMPORALHALOIDVAL=1000000000000, boxsize=32): welke = np.where(hp['Coord'][:, 0] >= 0)[0] halotree = cKDTree(hp['Coord'][welke], boxsize=boxsize) toolarge = welke[np.where(hp['R200'][welke] > hp['R200'][np.argmax(hp['n_part'])]*1.2)[0]] #print(i, toolarge) if len(toolarge) != 0: for tl in toolarge: hp['M200'][tl] = -1 hp['R200'][tl] = -1 hp['hostHaloIndex'][hp['HaloIndex'][tl]==hp['hostHaloIndex']] = -2 for halo in welke:#range(len(hp['M200'])): if hp['M200'][halo] == -1: continue buren = np.array(halotree.query_ball_point(hp['Coord'][halo], r = 2*hp['R200'][halo])) if len(buren) <= 1: continue buren = buren[hp['R200'][buren] != -1] if len(buren) == 0: continue i_largest = np.argmax(hp['n_part'][buren]) index_largest = buren[i_largest] buren = np.delete(buren,i_largest) coords = hp['Coord'][buren] - hp['Coord'][index_largest] coords = np.where(np.abs(coords) > 0.5*boxsize, coords - coords/np.abs(coords)*boxsize, coords) rad = np.sqrt(np.sum(coords*coords, axis=1)) burentemp = np.where(hp['R200'][buren]-rad+hp['R200'][index_largest] > 0)[0] if len(burentemp) == 0: continue buren = buren[burentemp] hp['hostHaloIndex'][buren] = index_largest hp['M200'][buren] = -1 hp['R200'][buren] = -1 def findSubHaloFraction(hp, catalog): if len(hp['hostHaloIndex']) < 10: hp['Msub'] = np.zeros(len(hp['M200'])) return 0 i_hostH = np.where(hp['hostHaloIndex'] > -1)[0] hp['Msub'] = np.zeros(len(hp['M200'])) for i in i_hostH: isattemp = np.where(hp['HaloID'][i] == catalog['ID'])[0] hp['Msub'][hp['hostHaloIndex'][i]] += catalog['Mass_FOF'][isattemp] def buildHaloDictionary(Hydro=None, partType=None, multiple=None): if ('DM' in partType) or ('H' in partType) or ('S' in partType): return buildHaloDictionary_nieuw(partType=partType, multiple=multiple) haloproperties = {} if partType is None: if Hydro is None: sys.exit("buildHaloDictionary should have an entry for either Hydro or partType") if partType is not None: if partType in [0, 2, 3, 4, 5]: sys.exit("Bestaat nog niet voor partType = %i" %partType) elif partType == 7: Hydro = True elif partType == 8: Hydro = True haloarray = (['HaloIndex', 'HaloID', 'Coord', 'R200', 'M200', 'redshift', 'snapshot', 'lambda', 'Density', 'Npart', 'Vmax', 'Rmax', 'AngularMomentum', 'Npart_profile', 'Radius', 'Velrad', 'Vel', 'Mass_profile', 'Partindices', 'n_part', 'MaxRadIndex', 'Virial_ratio', 'COM_offset', 'Msub', 'CrossTime', 'hostHaloIndex', 'MassTable']) if Hydro: haloarray.extend(['lambdaDM', 'lambdaH', 'DensityDM', 'DensityH', 'NpartH_profile', 'DMFraction', 'DMFraction_profile', 'HFraction', 'HFraction_profile', 'MassH_profile', 'MassDM_profile', 'VelradDM', 'VelradH', 'Temperature', 'AngularMomentumDM', 'AngularMomentumH']) if partType == 8: haloarray.extend(['lambdaS', 'DensityS', 'NpartS_profile', 'SFraction', 'SFraction_profile', 'MassS_profile', 'VelradB', 'VelradS', 'AgeS', 'AngularMomentumS']) for key in haloarray: if (multiple is not None) and (key=='Partindices'): haloproperties[key] = {} else: haloproperties[key] = np.zeros(0) return haloproperties def allocateSizes(key, lengte): if key in ['R200', 'M200', 'redshift', 'lambda', 'Vmax', 'Rmax', 'Vmax_part', 'Rmax_part', 'Vmax_interp', 'Rmax_interp', 'Virial_ratio', 'COM_offset', 'Msub', 'CrossTime', 'lambdaDM', 'lambdaH', 'DMFraction', 'HFraction', 'lambdaS', 'SFraction']: return np.ones(lengte[0])*-1 if key in ['HaloIndex', 'HaloID', 'snapshot', 'Npart', 'NpartDM', 'NpartH','NpartS', 'n_part', 'MaxRadIndex', 'hostHaloIndex', 'Tail', 'Head', 'RootHead', 'RootTail']: return np.ones(lengte[0]).astype(int)*-1 elif key in ['Coord', 'Vel']: return np.ones((lengte[0], 3))*-1 elif key in ['Density', 'AngularMomentum', 'Velrad', 'Mass_profile', 'DensityDM', 'DensityH', 'DMFraction_profile', 'HFraction_profile', 'MassH_profile', 'MassDM_profile', 'VelradDM', 'VelradH', 'Temperature', 'AngularMomentumDM', 'AngularMomentumH', 'lambdaS', 'DensityS', 'SFraction_profile', 'MassS_profile','VelradB', 'VelradS', 'AgeS', 'AngularMomentumS']: return np.zeros((lengte[0], lengte[1])) elif key in ['Npart_profile', 'NpartDM_profile', 'NpartH_profile', 'NpartS_profile']: return np.zeros((lengte[0], lengte[1])).astype(int) def buildHaloDictionary_nieuw(partType=None, multiple=None): haloproperties = {} if partType is None: sys.exit("buildHaloDictionary should have an entry for partType") haloarray = (['HaloIndex', 'HaloID', 'Coord', 'R200', 'M200', 'redshift', 'snapshot', 'lambda', 'Density', 'Npart', 'Vmax', 'Rmax', 'AngularMomentum', 'Npart_profile', 'Radius', 'Velrad', 'Vel', 'Mass_profile', 'Partindices', 'n_part', 'MaxRadIndex', 'Virial_ratio', 'COM_offset', 'Msub', 'CrossTime', 'hostHaloIndex', 'MassTable', 'Tail', 'Head', 'Vmax_part', 'Rmax_part', 'Vmax_interp', 'Rmax_interp', 'RootHead', 'RootTail']) if 'H' in partType: haloarray.extend(['lambdaDM', 'lambdaH', 'DensityDM', 'DensityH', 'NpartDM_profile','NpartH', 'NpartDM', 'NpartH_profile', 'DMFraction', 'DMFraction_profile', 'HFraction', 'HFraction_profile', 'MassH_profile', 'MassDM_profile', 'VelradDM', 'VelradH', 'Temperature', 'AngularMomentumDM', 'AngularMomentumH']) if 'S' in partType: haloarray.extend(['lambdaS', 'DensityS', 'NpartS', 'NpartS_profile', 'SFraction', 'SFraction_profile', 'MassS_profile', 'VelradB', 'VelradS', 'AgeS', 'AngularMomentumS']) for key in haloarray: if (multiple is not None) and (key=='Partindices'): haloproperties[key] = {} elif multiple is not None: haloproperties[key] = allocateSizes(key, multiple) else: haloproperties[key] = None return haloproperties def quantity_keys(): return (['HaloIndex', 'HaloID', 'Coord', 'R200', 'M200', 'redshift', 'snapshot', 'lambda', 'Npart', 'NpartDM', 'NpartH', 'NpartS', 'Vel', 'n_part', 'Tail', 'Head', 'RootHead', 'RootTail', 'Virial_ratio', 'COM_offset', 'Msub', 'CrossTime', 'hostHaloIndex', 'MassTable', 'lambdaDM', 'lambdaH', 'lambdaS', 'DMFraction', 'HFraction', 'SFraction', 'Vmax_part', 'Rmax_part', 'Vmax_interp', 'Rmax_interp']) def profile_keys(): return (['HaloIndex', 'HaloID', 'AngularMomentum', 'Npart_profile', 'Radius', 'Velrad', 'MassTable', 'Mass_profile', 'MaxRadIndex', 'Density', 'DensityDM', 'DensityH', 'NpartH_profile', 'DMFraction_profile', 'HFraction_profile', 'MassH_profile', 'MassDM_profile', 'VelradDM', 'VelradH', 'Temperature', 'AngularMomentumDM', 'AngularMomentumH', 'NpartS_profile', 'SFraction_profile', 'MassS_profile', 'VelradB', 'VelradS', 'AgeS', 'AngularMomentumS']) def convertVel_keys(): return (['HaloIndex', 'HaloID', 'Npart', 'NpartDM', 'NpartH', 'NpartS', 'n_part', 'Vel', 'Coord', 'R200', 'M200', 'Tail', 'Head', 'RootHead', 'RootTail', 'redshift', 'snapshot', 'hostHaloIndex']) def findHaloPropertiesInSnap_nieuw(catalog, d_snap, Nhalo=100, halolist=None, startHalo=0, d_radius=None, d_partType = None, d_runparams=None, partdata = None, TEMPORALHALOIDVAL=1000000000000, boxsize=None, debug=False): #Keeping all VELOCIraptor haloes, but saving 'wrong' haloes as HaloIndex = -1 if d_runparams['VELconvert'] == False: boxsize = d_snap['File'].boxsize partType = d_partType['particle_type'] print("Computing properties for %i haloes in snapshot %i" %(Nhalo, d_snap['snapshot'])) if 'profile' in d_radius.keys(): ylen = len(d_radius['profile']) else: ylen = 0 haloproperties = buildHaloDictionary(partType=partType, multiple=[Nhalo, ylen]) if len(catalog['Mass_200crit']) == 0: return haloproperties # if (d_runparams['VELconvert'] == False): # sortorder = np.argsort(catalog['Mass_tot'][:])[::-1] # sortorderinvert = np.argsort(sortorder) # for key in catalog.keys(): # catalog[key][:] = catalog[key][sortorder] # else: #sortorder = np.arange(len(catalog['Mass_tot'])).astype(int) # if partdata is not None: # for key in partdata.keys(): # partdata[key][:] = partdata[key][sortorder] if halolist is None: haloindices = np.arange(startHalo, startHalo+Nhalo).astype(int) use_existing_r200 = False else: haloindices = (halolist%TEMPORALHALOIDVAL - 1).astype(int) use_existing_r200 = False halo_i = -1 for halo in haloindices: halo_i += 1 #if halolist is not None: # print('Computing properties for halo %i'%halo) if halo%10000==0: print('Computing properties for halo %i-%i' %(halo, halo+10000)) if halo > len(catalog['Xc'])-1: print("Nhalo > N(velociraptor haloes)") break halopropertiestemp = {} coords = getHaloCoord(catalog, halo_i, z=d_snap['redshift'], snapshottype=d_runparams['SnapshotType'], physical=d_runparams['Physical']) coords = coords%boxsize radhier = getHaloRadius(catalog, halo_i, z=d_snap['redshift'], rtype = d_radius['Rchoice'], snapshottype=d_runparams['SnapshotType'], physical=d_runparams['Physical']) satellite = False #Trusting VELOCIraptor not to falsely identify haloes as satellites if (halolist is None) and (catalog['hostHaloID'][halo_i] != -1): satellite = True hostHaloIDtemp = np.where(catalog['hostHaloID'][halo_i]==catalog['ID'])[0] if len(hostHaloIDtemp) == 0: hostHaloIDtemp = -2 else: hostHaloIDtemp = hostHaloIDtemp[0] else: hostHaloIDtemp = -1 #All happens here if debug: start_time = time.time() print('M200: ', catalog['Mass_200crit'][halo_i]) print('R200: ', catalog['R_200crit'][halo_i]) print('ID: ', catalog['ID'][halo_i]) if d_runparams['VELconvert']: if d_runparams['ParticleDataType'] != 'None': halopropertiestemp = copyVELOCIraptor(catalog, halo_i, coords, redshift = d_snap['redshift'], partType=partType, particledata=partdata['Particle_Types'], d_partType=d_partType) else: halopropertiestemp = copyVELOCIraptor(catalog, halo_i, coords, redshift = d_snap['redshift'], partType=partType) halopropertiestemp['hostHaloIndex'] = hostHaloIDtemp elif d_runparams['ParticleDataType'] == 'None': #print("Halo", halo) halopropertiestemp = findHaloProperties(d_snap, halo_i, coords, d_radius, partType=partType, satellite=satellite, rad = radhier, partlim=0, use_existing_r200=use_existing_r200, profiles=d_runparams['Profiles'], quantities=d_runparams['Quantities'], debug=debug) else: #print("Halo", halo,len(partdata['Particle_IDs'][sortorder[halo]])) halopropertiestemp = findHaloProperties(d_snap, halo_i, coords, d_radius, partType=partType, satellite=satellite, rad = radhier, partlim=0, use_existing_r200=use_existing_r200, profiles=d_runparams['Profiles'], quantities=d_runparams['Quantities'], debug=debug, particledata=partdata['Particle_IDs'][halo_i]) if halopropertiestemp is None: if debug: print("De halo is leeg???") continue if debug: print("--- %s seconds ---" % (time.time() - start_time), 'halopropertiestemp computed') start_time = time.time() if d_runparams['TreeData']: halopropertiestemp['Tail'] = catalog['Tail'][halo_i]-1 halopropertiestemp['Head'] = catalog['Head'][halo_i]-1 halopropertiestemp['RootTail'] = catalog['RootTail'][halo_i]-1 halopropertiestemp['RootHead'] = catalog['RootHead'][halo_i]-1 if d_runparams['VELconvert'] == False: if halopropertiestemp is None: halopropertiestemp = buildHaloDictionary(partType=partType) halopropertiestemp['HaloID'] = catalog['ID'][halo_i] halopropertiestemp['HaloIndex'] = -1 halopropertiestemp['COM_offset'] = -1 halopropertiestemp['CrossTime'] = -1 halopropertiestemp['Coord'] = coords else: if satellite: halopropertiestemp['Npart'] = catalog['npart'][halo_i] halopropertiestemp['n_part'] = catalog['npart'][halo_i] halopropertiestemp['HaloID'] = catalog['ID'][halo_i] halopropertiestemp['hostHaloIndex'] = hostHaloIDtemp if not satellite: afstandtemp = coords - getHaloCoordCOM(catalog, halo_i, z=d_snap['redshift'], snapshottype=d_runparams['SnapshotType'], physical=d_runparams['Physical']) rhier = np.where(np.abs(afstandtemp)>0.5*boxsize, np.abs(afstandtemp) - boxsize, afstandtemp) halopropertiestemp['COM_offset'] = np.sqrt(np.sum(rhier**2))/halopropertiestemp['R200'] halopropertiestemp['CrossTime'] = (2.*halopropertiestemp['R200']*Mpc_to_km / np.sqrt(G_Mpc_km2_Msi_si2*halopropertiestemp['M200']*1e10/ halopropertiestemp['R200']))*s_to_yr/1.e6 else: halopropertiestemp['COM_offset'] = -1 halopropertiestemp['CrossTime'] = -1 for key in haloproperties.keys(): doorgaan = False if (d_runparams['Profiles'] == True) and (key in profile_keys()): doorgaan = True if (d_runparams['Quantities'] == True) and (key in quantity_keys()): doorgaan = True if (d_runparams['VELconvert'] == True) and (key in convertVel_keys()): doorgaan = True if doorgaan == False: continue if key in ['Radius', 'MassTable', 'snapshot', 'redshift']: continue elif key == 'Neighbours' or key == 'Neighbour_distance' or key == 'Neighbour_Velrad': continue if (halopropertiestemp['HaloIndex'] == -1) and (key != 'HaloID'): continue if halopropertiestemp[key] is None: continue elif key=='Partindices': haloproperties[key][halopropertiestemp['HaloIndex']] = halopropertiestemp[key][:] else: haloproperties[key][halo] = halopropertiestemp[key] if debug: print("--- %s seconds ---" % (time.time() - start_time), 'haloproperties updated') if 'profile' in d_radius.keys(): haloproperties['Radius'] = d_radius['profile'] haloproperties['redshift'] = np.array([d_snap['redshift']]) haloproperties['snapshot'] = np.array([d_snap['snapshot']]) j = 0 if d_runparams['VELconvert'] == False: haloproperties['MassTable'] = d_snap['File'].mass for i in d_snap['File'].readParticles: if haloproperties['MassTable'][i] == 0 and d_snap['File'].npart[i] != 0: waar = np.where(d_snap['File'].partTypeArray == i)[0][0] haloproperties['MassTable'][i] = d_snap['File'].masses[waar] j += 1 if d_runparams['TreeData']: haloproperties['Tail'] = haloproperties['Tail'].astype(int) haloproperties['Head'] = haloproperties['Head'].astype(int) haloproperties['RootTail'] = haloproperties['RootTail'].astype(int) haloproperties['RootHead'] = haloproperties['RootHead'].astype(int) if (len(haloproperties['Coord']) > 0) and (halolist is None): if d_runparams['Quantities'] or d_runparams['VELconvert']: print("Reassigning satellite haloes") fixSatelliteProblems(haloproperties, boxsize=boxsize) return haloproperties def findHaloPropertiesInSnap(catalog, snappath, snapshot, partType=8, Nhalo=100, startHalo=0, softeningLength=0.002, Radius=1., partlim=200, sortorder=None, boxsize=32, TEMPORALHALOIDVAL=1000000000000, particledata=None, mass=False): print("Computing properties for %i haloes in snapshot %i" %(Nhalo, snapshot)) haloproperties = buildHaloDictionary(partType=partType, multiple=True) if len(catalog['Mass_tot']) == 0: return haloproperties if sortorder is None: sortorder = np.argsort(catalog['Mass_tot'][:])[::-1] sortorderinvert = np.argsort(sortorder) else: sortorderinvert = np.argsort(sortorder) d_snap = {} d_snap['snapshot'] = snapshot limiet = 0 d_snap['File'] = Snapshot(snappath, snapshot, useIDs=False, partType=partType, softeningLength=softeningLength) d_snap['File'].makeCoordTree() for key in catalog.keys(): catalog[key][:] = catalog[key][sortorder] for halo in range(startHalo, startHalo+Nhalo): #start_time = time.time() #print(halo) #print(catalog['npart'][halo]) if halo%1000==0: print('Computing properties for halo %i-%i' %(halo, halo+1000)) if halo > len(catalog['Xc'])-1: print("Halo limit reached: nhalo = %i, hlim = %i" %(halo, limiet)) print("Coordinates: ", coords) break if limiet > 500: #Only computing sats if catalog['hostHaloID'][halo] == -1: continue halopropertiestemp = {} coords = getHaloCoord(catalog, halo, z=d_snap['File'].redshift) coords = coords%boxsize radhier = getHaloRadius(catalog, halo, z=d_snap['File'].redshift) satellite = False if (catalog['npart'][halo] < 20) or (catalog['Mass_200crit'][halo]*h == 0): startHalo += 1 # haloproperties['TreeBool'][halo] = 0 continue #Checking for dissapeared host haloes if (catalog['hostHaloID'][halo] != -1) and len(haloproperties['HaloID'])>1: haloindextemp = np.where((haloproperties['HaloID']%TEMPORALHALOIDVAL)==catalog['hostHaloID'][halo]%TEMPORALHALOIDVAL)[0] if len(haloindextemp) == 0: hostHaloIDtemp = -1 if catalog['npart'][halo] < partlim/2.: hostHaloIDtemp = -2 satellite = True else: afstandtemp = (haloproperties['Coord'][haloindextemp[0]]-coords) afstandtemp = np.where(np.abs(afstandtemp)>0.5*boxsize, np.abs(afstandtemp) - boxsize, afstandtemp) afstandtemp = (np.sum(afstandtemp*afstandtemp))**0.5 if afstandtemp < haloproperties['R200'][haloindextemp[0]]: # and catalog['npart'][halo] > 50: #print(afstandtemp, haloproperties['R200'][haloindextemp[0]], haloproperties['Coord'][haloindextemp[0]], coords) hostHaloIDtemp = haloindextemp[0] satellite = True else: #print(afstandtemp, haloproperties['R200'][haloindextemp[0]], haloproperties['Coord'][haloindextemp[0]], coords) hostHaloIDtemp = -1 else: hostHaloIDtemp = -1 #All happens here halopropertiestemp = findHaloProperties(d_snap, halo, coords, Radius, partType=partType, rad=radhier, mass=mass, satellite=satellite, partlim=partlim) #print("--- %s seconds ---" % (time.time() - start_time), 'halopropertiestemp computed') if halopropertiestemp is None: startHalo += 1 limiet += 1 # haloproperties['TreeBool'][halo] = 0 continue if satellite == False and halopropertiestemp['Npart'] < partlim: startHalo += 1 limiet += 1 # haloproperties['TreeBool'][halo] = 0 continue limiet = 0 if satellite: halopropertiestemp['Npart'] = catalog['npart'][halo] #start_time = time.time() halopropertiestemp['n_part'] = catalog['npart'][halo] halopropertiestemp['HaloID'] = catalog['ID'][halo] halopropertiestemp['hostHaloIndex'] = hostHaloIDtemp if not satellite: afstandtemp = coords - getHaloCoord(catalog, halo, z=d_snap['File'].redshift) rhier = np.where(np.abs(afstandtemp)>0.5*boxsize, np.abs(afstandtemp) - boxsize, afstandtemp) halopropertiestemp['COM_offset'] = np.sqrt(np.sum(rhier**2))/halopropertiestemp['R200'] halopropertiestemp['CrossTime'] = (2.*halopropertiestemp['R200']*Mpc_to_km / np.sqrt(G_Mpc_km2_Msi_si2*halopropertiestemp['M200']*1e10/halopropertiestemp['R200']))*s_to_yr/1.e6 else: halopropertiestemp['COM_offset'] = -1 halopropertiestemp['CrossTime'] = -1 for key in haloproperties.keys(): if key in ['TreeBool', 'Tail', 'Head', 'Radius', 'MassTable', 'snapshot', 'redshift']: continue elif key == 'Neighbours' or key == 'Neighbour_distance' or key == 'Neighbour_Velrad': continue elif key=='Partindices': haloproperties[key][halopropertiestemp['HaloIndex']] = halopropertiestemp[key][:] elif halo == startHalo: haloproperties[key] = [halopropertiestemp[key]] else: haloproperties[key] = np.concatenate((haloproperties[key], [halopropertiestemp[key]])) #print("--- %s seconds ---" % (time.time() - start_time), 'haloproperties updated') haloproperties['Radius'] = Radius haloproperties['redshift'] = np.array([d_snap['File'].redshift]) haloproperties['snapshot'] = np.array([d_snap['snapshot']]) haloproperties['MassTable'] = d_snap['File'].mass j = 0 for i in d_snap['File'].readParticles: if haloproperties['MassTable'][i] == 0 and d_snap['File'].npart[i] != 0: waar = np.where(d_snap['File'].partTypeArray == i)[0][0] haloproperties['MassTable'][i] = d_snap['File'].masses[waar] j += 1 findSubHaloFraction(haloproperties, catalog) print("Reassigning satellite haloes") if len(haloproperties['Coord']) > 0: if 'DMFraction' in haloproperties.keys(): Hydro = True else: Hydro = False fixSatelliteProblems(haloproperties, Hydro = Hydro) #print("Computing subhalo fraction") print(haloproperties.keys()) return haloproperties def findHaloProperties(d_snap, halo, Coord, fixedRadius, r200fac = 8, partType=None, rad=None, satellite=False, partlim=200, profiles=False, quantities=True, particledata=None, debug=False, use_existing_r200=False): haloproperties = buildHaloDictionary(partType=partType) if isinstance(fixedRadius, dict): if 'profile' in fixedRadius.keys(): radprofile = fixedRadius['profile'] radfrac = fixedRadius['Rfrac'] else: radfrac = fixedRadius['Rfrac'] else: radprofile = fixedRadius radfrac = r200fac snap = d_snap['File'] haloproperties['HaloIndex'] = halo haloproperties['HaloID'] = halo#catalog['ID'][halo] snap.debug = debug coord = Coord if debug: start_time = time.time() if rad is None: rad = fixedRadius[-1] snap.get_temphalo(coord, rad, r200fac=radfrac, fixedRadius=radprofile, satellite=satellite, particledata=particledata, partlim=partlim, initialise_profiles=profiles, use_existing_r200=use_existing_r200) if len(snap.temphalo['indices']) < partlim or len(snap.temphalo['indices'])<=1: if debug: print('Halo has %i particles, and is thus too small' %len(snap.temphalo['indices'])) return None if debug: print("--- %s seconds ---" % (time.time() - start_time), 'halo initiated', snap.temphalo['R200']) if profiles: if debug: start_time = time.time() snap.get_temphalo_profiles() snap.get_specific_angular_momentum_radius(coord, radius=snap.temphalo['Radius']) haloproperties['AngularMomentum'] = snap.temphalo['AngularMomentum'] haloproperties['Density'] = snap.temphalo['profile_density'] haloproperties['Velrad'] = snap.temphalo['profile_vrad'] haloproperties['Npart_profile'] = snap.temphalo['profile_npart'] haloproperties['Mass_profile'] = snap.temphalo['profile_mass'] haloproperties['MaxRadIndex'] = snap.temphalo['MaxRadIndex'] if debug: print("--- %s seconds ---" % (time.time() - start_time), 'halo profiles calculated') haloproperties['Coord'] = snap.temphalo['Coord'] #Virial radius and mass R200 = snap.temphalo['R200'] haloproperties['M200']= snap.temphalo['M200'] haloproperties['R200'] = R200 #Assigning halo properties if quantities: if debug: start_time = time.time() if (satellite == False) or (particledata is not None): snap.get_spin_parameter() haloproperties['lambda'] = snap.temphalo['lambda'] haloproperties['lambda'] = snap.temphalo['lambda'] snap.get_Vmax_Rmax() haloproperties['Vmax_part'] = snap.temphalo['Vmax_part'] haloproperties['Rmax_part'] = snap.temphalo['Rmax_part'] haloproperties['Vmax_interp'] = snap.temphalo['Vmax_interp'] haloproperties['Rmax_interp'] = snap.temphalo['Rmax_interp'] if debug: print("--- %s seconds ---" % (time.time() - start_time), 'lambda calculated') haloproperties['Vel'] = snap.temphalo['Vel'] haloproperties['Partindices'] = snap.temphalo['indices'] haloproperties['Npart'] = len(haloproperties['Partindices']) # if satellite == False: # haloproperties['Virial_ratio'] = snap.get_virial_ratio(1000) # else: # haloproperties['Virial_ratio'] = -1 if debug: start_time = time.time() if len(snap.readParticles) > 1: nietnulhier=np.where(haloproperties['Mass_profile']!=0) for i_pT in range(len(snap.readParticles)): if quantities: if (satellite == False) or (particledata is not None): haloproperties['lambda'+snap.namePrefix[i_pT]] = snap.temphalo['lambda'+snap.namePrefix[i_pT]] else: haloproperties['lambda'+snap.namePrefix[i_pT]] = -1 haloproperties['Npart'+snap.namePrefix[i_pT]] = snap.temphalo['Npart'+snap.namePrefix[i_pT]] haloproperties[snap.namePrefix[i_pT]+'Fraction'] = snap.temphalo[snap.namePrefix[i_pT]+'Fraction'] if profiles: haloproperties['AngularMomentum'+snap.namePrefix[i_pT]] = snap.temphalo['AngularMomentum'+snap.namePrefix[i_pT]] haloproperties['Density'+snap.namePrefix[i_pT]] = snap.temphalo['profile_'+snap.namePrefix[i_pT]+'density'] haloproperties['Npart'+snap.namePrefix[i_pT]+'_profile'] = snap.temphalo['profile_'+snap.namePrefix[i_pT]+'npart'] haloproperties['Velrad'+snap.namePrefix[i_pT]] = snap.temphalo['profile_'+snap.namePrefix[i_pT]+'vrad'] haloproperties['Mass'+snap.namePrefix[i_pT]+'_profile'] = snap.temphalo['profile_'+snap.namePrefix[i_pT]+'mass'] if snap.readParticles[i_pT] == 0: haloproperties['Temperature'] = snap.temphalo['profile_temperature'] elif snap.readParticles[i_pT] == 5: haloproperties['AgeS'] = snap.temphalo['profile_Sage'] haloproperties[snap.namePrefix[i_pT]+'Fraction_profile'] = np.zeros_like(haloproperties['Mass_profile']) haloproperties[snap.namePrefix[i_pT]+'Fraction_profile'][nietnulhier] = haloproperties['Mass'+snap.namePrefix[i_pT]+'_profile'][nietnulhier]/haloproperties['Mass_profile'][nietnulhier] if debug: print("--- %s seconds ---" % (time.time() - start_time), 'particle types done') if particledata is not None: if debug: start_time = time.time() snap.delete_used_indices(snap.temphalo['indices']) if debug: print("--- %s seconds ---" % (time.time() - start_time), 'Deleted particles') return haloproperties def copyVELOCIraptor(catalog, halo, Coord, redshift, d_partType=None, partType=None, particledata=None): c = constant(redshift=redshift) c.change_constants(redshift) comoving_rhocrit200 = deltaVir*c.rhocrit_Ms_Mpci3*h/(h*(1+redshift))**3 haloproperties = buildHaloDictionary(partType=partType) haloproperties['HaloIndex'] = halo haloproperties['HaloID'] = catalog['ID'][halo] haloproperties['n_part'] = catalog['npart'][halo] haloproperties['Coord'] = Coord #Virial radius and mass haloproperties['M200'] = catalog['Mass_200crit'][halo]*h haloproperties['R200'] = (haloproperties['M200']*1.e10/(comoving_rhocrit200 * 4./3. * np.pi))**(1./3.) #Assigning halo properties haloproperties['Vel'] = np.array([catalog['VXc'][halo], catalog['VYc'][halo], catalog['VZc'][halo]])*(1+redshift) haloproperties['Npart'] = catalog['npart'][halo] if (particledata is not None) and (len(d_partType['particle_type']) > 1): allpart = len(particledata[halo]) for i_pT in range(len(d_partType['particle_type'])): if allpart == 0: haloproperties['Npart'+d_partType['particle_type'][i_pT]] = 0 else: haloproperties['Npart'+d_partType['particle_type'][i_pT]] = len(np.where(particledata[halo] == d_partType['particle_number'][i_pT])[0]) #print(d_partType['particle_type'][i_pT], d_partType['particle_number'][i_pT], haloproperties['Npart'+d_partType['particle_type'][i_pT]]) return haloproperties def everythingOutside(haloproperties, d_snap): allpin = np.zeros(0) iets=0 allpinBool = np.array([True]*np.sum(d_snap['File'].npart)) for i in haloproperties['HaloIndex']: allpinBool[haloproperties['Partindices'][i]] = False outsideIndices = np.where(allpinBool)[0] insideIndices = np.where(allpinBool==False)[0] outsideIndicesDM = outsideIndices[np.where(outsideIndices < d_snap['File'].npart[0])[0]] outsideIndicesH = outsideIndices[np.where(outsideIndices >= d_snap['File'].npart[0])[0]] insideIndicesDM = insideIndices[np.where(insideIndices < d_snap['File'].npart[0])[0]] insideIndicesH = insideIndices[np.where(insideIndices >= d_snap['File'].npart[0])[0]] dmmass = d_snap['File'].get_masses()[0] hmass = d_snap['File'].get_masses()[-1] haloproperties['Outside_fdm_temp_DMpart_Hpart_dmmass_hmass'] = np.array([len(outsideIndicesDM)*dmmass/(len(outsideIndicesDM)*dmmass+len(outsideIndicesH)*hmass), np.sum(d_snap['File'].get_temperature()[outsideIndicesH])/len(outsideIndicesH), len(outsideIndicesDM), len(outsideIndicesH), dmmass, hmass]) haloproperties['Inside_fdm_temp_DMpart_Hpart_dmmass_hmass'] = np.array([len(insideIndicesDM)*dmmass/(len(insideIndicesDM)*dmmass+len(insideIndicesH)*hmass), np.sum(d_snap['File'].get_temperature()[insideIndicesH])/len(insideIndicesH), len(insideIndicesDM), len(insideIndicesH), dmmass, hmass]) def writeDataToHDF5quantities(path, name, haloproperties, overwrite=False, savePartData=False, convertVel=False, copyVel=False): existing = False if overwrite==False and os.path.isfile(path + name): haloprop = h5py.File(path + name, 'r+') existing = True HaloIndices = haloprop['HaloIndex'][:] overlap = np.where(np.in1d(haloproperties['HaloIndex'], HaloIndices))[0] nonoverlap = np.delete(haloproperties['HaloIndex'][:], overlap) nonoverlapindex = np.delete(np.arange(0, len(haloproperties['HaloIndex']), 1).astype(int), overlap) nonoverlaplist = ['haloIndex_%05d' %i for i in nonoverlap] else: haloprop = h5py.File(path+name, 'w') for key in haloproperties.keys(): if (copyVel==False) and (convertVel==False) and (key not in quantity_keys()): continue if (copyVel==False) and convertVel and (key not in convertVel_keys()): continue if isinstance(haloproperties[key], dict): if not savePartData: if key == 'DMpartIDs' or key == 'HpartIDs' or key=='Partindices': continue if existing: temp = haloprop[key] else: temp = haloprop.create_group(key) for key2 in haloproperties[key].keys(): if haloproperties[key][key2] is None: print(key) continue key2string = 'haloIndex_%05d' %key2 if existing: if len(np.where(np.in1d(key2string, nonoverlaplist))[0]) > 0: temp.create_dataset(key2string, data = np.array(haloproperties[key][key2])) else: temp.create_dataset(key2string, data = np.array(haloproperties[key][key2])) else: if haloproperties[key] is None: print(key) continue if existing: if key == 'Radius' or key == 'MassTable' or key == 'snapshot' or key == 'redshift': continue data = haloprop[key][:] for i in nonoverlapindex: data = np.concatenate((data, [haloproperties[key][i]])) del haloprop[key] haloprop.create_dataset(key, data = data) else: haloprop.create_dataset(key, data = np.array(haloproperties[key])) haloprop.close() def writeDataToHDF5profiles(path, name, haloproperties, overwrite=False, savePartData=False): existing = False if overwrite==False and os.path.isfile(path + name): haloprop = h5py.File(path + name, 'r+') existing = True HaloIndices = haloprop['HaloIndex'][:] overlap = np.where(np.in1d(haloproperties['HaloIndex'], HaloIndices))[0] nonoverlap = np.delete(haloproperties['HaloIndex'][:], overlap) nonoverlapindex = np.delete(np.arange(0, len(haloproperties['HaloIndex']), 1).astype(int), overlap) nonoverlaplist = ['haloIndex_%05d' %i for i in nonoverlap] else: haloprop = h5py.File(path+name, 'w') for key in haloproperties.keys(): if key not in profile_keys(): continue if isinstance(haloproperties[key], dict): if not savePartData: if key == 'DMpartIDs' or key == 'HpartIDs' or key=='Partindices': continue if existing: temp = haloprop[key] else: temp = haloprop.create_group(key) for key2 in haloproperties[key].keys(): if haloproperties[key][key2] is None: print(key) continue key2string = 'haloIndex_%05d' %key2 if existing: if len(np.where(np.in1d(key2string, nonoverlaplist))[0]) > 0: temp.create_dataset(key2string, data = np.array(haloproperties[key][key2])) else: temp.create_dataset(key2string, data = np.array(haloproperties[key][key2])) else: if haloproperties[key] is None: print(key) continue if existing: if key == 'Radius' or key == 'MassTable' or key == 'snapshot' or key == 'redshift': continue data = haloprop[key][:] for i in nonoverlapindex: data = np.concatenate((data, [haloproperties[key][i]])) del haloprop[key] haloprop.create_dataset(key, data = data) else: haloprop.create_dataset(key, data = np.array(haloproperties[key])) haloprop.close() def writeDataToHDF5(path, name, haloproperties, overwrite=False, savePartData=False): existing = False if overwrite==False and os.path.isfile(path + name): haloprop = h5py.File(path +name, 'r+') existing = True HaloIndices = haloprop['HaloIndex'][:] overlap = np.where(np.in1d(haloproperties['HaloIndex'], HaloIndices))[0] nonoverlap = np.delete(haloproperties['HaloIndex'][:], overlap) nonoverlapindex = np.delete(np.arange(0, len(haloproperties['HaloIndex']), 1).astype(int), overlap) nonoverlaplist = ['haloIndex_%05d' %i for i in nonoverlap] else: haloprop = h5py.File(path+name, 'w') for key in haloproperties.keys(): if isinstance(haloproperties[key], dict): if not savePartData: if key == 'DMpartIDs' or key == 'HpartIDs' or key=='Partindices': continue if existing: temp = haloprop[key] else: temp = haloprop.create_group(key) for key2 in haloproperties[key].keys(): if haloproperties[key][key2] is None: print(key) continue key2string = 'haloIndex_%05d' %key2 if existing: if len(np.where(np.in1d(key2string, nonoverlaplist))[0]) > 0: temp.create_dataset(key2string, data = np.array(haloproperties[key][key2])) else: temp.create_dataset(key2string, data = np.array(haloproperties[key][key2])) else: if haloproperties[key] is None: print(key) continue if existing: if key == 'Radius' or key == 'MassTable' or key == 'snapshot' or key == 'redshift': continue data = haloprop[key][:] for i in nonoverlapindex: data = np.concatenate((data, [haloproperties[key][i]])) del haloprop[key] haloprop.create_dataset(key, data = data) else: haloprop.create_dataset(key, data = np.array(haloproperties[key])) haloprop.close() def readHDF5Data(path, name, Hydro=True): existing = False if os.path.isfile(path + name): haloprop = h5py.File(path +name, 'r') else: sys.exit('Error: file '+path+name+' not found.') haloproperties = buildHaloDictionary(Hydro=Hydro, multiple=True) for key in haloprop.id: if isinstance(haloproperties[key.decode('utf-8')], dict): if isinstance(haloprop[key].id, h5py.h5d.DatasetID): continue temp = haloprop[key] for key2 in haloprop[key].id: haloindex = [int(s) for s in re.findall(r'\d+', key2.decode('utf-8'))][0] haloproperties[key.decode('utf-8')][haloindex] = temp[key2][:] else: haloproperties[key.decode('utf-8')] = haloprop[key][:] haloprop.close() return haloproperties def readHDF5DataSets(path, name, datasets, Hydro=True): existing = False if os.path.isfile(path + name): haloprop = h5py.File(path +name, 'r') else: sys.exit('Error: file '+path+name+' not found.') haloproperties = buildHaloDictionary(Hydro=Hydro, multiple=True) for key in haloprop.id: if key.decode('utf-8') in datasets: if isinstance(haloproperties[key.decode('utf-8')], dict): if isinstance(haloprop[key].id, h5py.h5d.DatasetID): continue temp = haloprop[key] for key2 in haloprop[key].id: haloindex = [int(s) for s in re.findall(r'\d+', key2.decode('utf-8'))][0] haloproperties[key.decode('utf-8')][haloindex] = temp[key2][:] else: haloproperties[key.decode('utf-8')] = haloprop[key][:] haloprop.close() return haloproperties def getRidOfBadHaloes(hp): c = constant() c.change_constants(hp['redshift']) wrong = np.where(4./3*np.pi*hp['R200']**3*200*c.rhocrit_Ms_Mpci3/(h**2*(1+hp['redshift'])**3) > 1.2*hp['M200']*1e10)[0] wrong = np.append(wrong, np.where(4./3*np.pi*hp['R200']**3*200*c.rhocrit_Ms_Mpci3/(h**2*(1+hp['redshift'])**3) < 0.8*hp['M200']*1e10)[0]) wronghi = hp['HaloIndex'][wrong] print(len(wronghi)) for i in hp.keys(): if i == 'Inside_fdm_temp_DMpart_Hpart_dmmass_hmass' or i == 'Outside_fdm_temp_DMpart_Hpart_dmmass_hmass': continue if isinstance(hp[i], dict): for j in wronghi: hp[i].pop(j, None) else: hp[i] = np.delete(hp[i], wrong) def rewriteHeadTails(halodata, snapmin=0, snapmax=200, TEMPORALHALOIDVAL=1000000000000): sortorder = {} sortorderinvert = {} newtail = {} newhead = {} for snap in range(snapmin, snapmax+1): sortorder[snap] =
np.argsort(halodata[snap]['Mass_tot'][:])
numpy.argsort
""" Tests internal math routines """ import random import itertools import numpy as np import pytest import moldesign from moldesign import units as u from moldesign.mathutils import spherical_harmonics as harmonics from . import helpers from .molecule_fixtures import * registered_types = {} __PYTEST_MARK__ = ['math', 'gaussians'] def typedfixture(*types, **kwargs): """This is a decorator that lets us associate fixtures with one or more arbitrary types. We'll later use this type to determine what tests to run on the result""" def fixture_wrapper(func): for t in types: registered_types.setdefault(t, []).append(func.__name__) return pytest.fixture(**kwargs)(func) return fixture_wrapper @pytest.fixture def std_1d_gaussian(): g = moldesign.orbitals.gaussians.Gaussian([0.0]*u.angstrom, 1.0/u.angstrom ** 2) return g @typedfixture('basis_fn') def std_3d_gaussian(): g = moldesign.orbitals.gaussians.Gaussian([0.0, 0.0, 0.0]*u.angstrom, 1.0/u.angstrom ** 2) return g @typedfixture('basis_fn') def cartesian_3d_gaussian(): g = moldesign.orbitals.CartesianGaussian( center=[random.random() for i in range(3)]*u.angstrom, powers=[1, 3, 0], alpha=1.1/u.angstrom ** 2, coeff=1.0) return g @typedfixture('basis_fn') def spherical_3d_gaussian(): g = moldesign.orbitals.SphericalGaussian( center=[random.random() for i in range(3)]*u.angstrom, l=3, m=-2, alpha=1.1/u.angstrom ** 2, coeff=1.0) return g @pytest.mark.parametrize('objkey', ['std_1d_gaussian','std_3d_gaussian']) @pytest.mark.screening def test_gaussian_integral_and_dimensionality(objkey, request): g = request.getfixturevalue(objkey) assert g.ndim == len(g.center) intval = g.integral expectval = g.coeff*(np.pi/g.alpha) ** (g.ndim/2.0) _assert_almost_equal(intval, expectval, decimal=10) @pytest.fixture def linear_combination(): return _make_rando_linear_combination(True) def _make_rando_gaussian(withunits=True): if withunits: length = u.angstrom else: length = 1.0 return moldesign.orbitals.Gaussian((np.random.rand(3)-0.5)*1.0 * length, (random.random()*5)/(length ** 2), coeff=random.random()) def _make_rando_cartesian_gaussian(powers, withunits=True): if withunits: length = u.angstrom else: length = 1.0 return moldesign.orbitals.CartesianGaussian((np.random.rand(3)-0.5)*1.0 * length, (random.random()*5)/(length ** 2), powers=powers, coeff=random.random()) def _make_rando_spherical_gaussian(l,m, withunits=True): if withunits: length = u.angstrom else: length = 1.0 return moldesign.orbitals.SphericalGaussian((np.random.rand(3)-0.5)*1.0 * length, (random.random()*5)/(length ** 2), l,m, coeff=random.random()) def _make_rando_linear_combination(withunits=True): gaussians = [] if withunits: length = u.angstrom else: length = 1.0 center = (np.random.rand(3)-0.5)*1.0 * length for pwr in [(0,0,0), (1,1,1), (3,2,1)]: gaussians.append( moldesign.orbitals.CartesianGaussian( center=center, powers=pwr, alpha=(10.0 * (random.random()+3))/(length**2), coeff=1/(np.sqrt(3.0)))) lc = moldesign.orbitals.PrimitiveSum(gaussians) lc.ndims = 3 # so it works with the test suite return lc @pytest.mark.parametrize('withunits', [True, False], ids=['quantity','number']) def test_numerical_vs_analytical_overlap_gauss(withunits): p1 = _make_rando_gaussian(withunits) p2 = _make_rando_gaussian(withunits) _assert_numerical_analytical_overlaps_match(p1, p2) @pytest.mark.parametrize('withunits', [True, False], ids=['quantity','number']) def test_numerical_vs_analytical_overlap_cartesian(withunits): p1 = _make_rando_cartesian_gaussian((1,2,3), withunits) p2 = _make_rando_cartesian_gaussian((1,0,1), withunits) _assert_numerical_analytical_overlaps_match(p1, p2) @pytest.mark.parametrize('withunits', [True, False], ids=['quantity','number']) def test_numerical_vs_analytical_overlap_spherical(withunits): p1 = _make_rando_spherical_gaussian(1,-1, withunits) p2 = _make_rando_spherical_gaussian(2,0, withunits) _assert_numerical_analytical_overlaps_match(p1, p2) @pytest.mark.parametrize('withunits', [True, False], ids=['quantity','number']) def test_numerical_vs_analytical_overlap_linear_combination(withunits): p1 = _make_rando_linear_combination(withunits) p2 = _make_rando_linear_combination(withunits) _assert_numerical_analytical_overlaps_match(p1, p2) def _assert_numerical_analytical_overlaps_match(g1, g2): olap = g1.overlap(g2) try: prod = g1*g2 except NotImplementedError: assert isinstance(g1, moldesign.orbitals.SphericalGaussian) assert isinstance(g2, moldesign.orbitals.SphericalGaussian) else: helpers.assert_almost_equal(prod.integral, olap) def assert_with_resolution(npoints): allpoints, grid = helpers.generate_grid(g1, g2, npoints) with np.errstate(under='ignore'): prodvals = g1(allpoints) * g2(allpoints) numsum = prodvals.sum() * grid.dx * grid.dy * grid.dz helpers.assert_almost_equal(numsum, olap, decimal=4) # If numerical isn't equal to analytical, try again with higher resolution # to make sure the failure isn't due to a sparse grid: try: assert_with_resolution(64) except AssertionError: pass else: return try: assert_with_resolution(128) except AssertionError: pass else: return assert_with_resolution(256) @pytest.mark.parametrize('withunits', [False, True]) def test_gaussian_multiplication_amplitudes(withunits): g1 = _make_rando_gaussian(withunits) g2 = _make_rando_gaussian(withunits) _assert_same_function_values(g1, g2, withunits) # parameterizations across a sample of cartesian gaussians test_powers = ((0,0,0), (1,0,0), (0,1,0), (0,0,1), (2,0,0), (1,1,1), (2,0,2), (4,1,1)) cartesian_test_suite = list(itertools.product(test_powers, test_powers, [True, False])) cartesian_test_ids = ['[%d%d%d]*[%d%d%d]/%s' % (p[0] + p[1] + ('units' if p[2] else 'c-num',)) for p in cartesian_test_suite] @pytest.mark.parametrize('p1,p2,withunits', cartesian_test_suite, ids=cartesian_test_ids) def test_cartesian_gaussian_multiplication_amplitudes(p1, p2, withunits): """ Tests that ``g1(x) * g2(x) == (g1 * g2)(x)`` """ g1 = _make_rando_cartesian_gaussian(p1, withunits) g2 = _make_rando_cartesian_gaussian(p2, withunits) _assert_same_function_values(g1, g2, withunits) def _assert_same_function_values(g1, g2, withunits): testcoords = 6.0*(np.random.rand(50, 3)-0.5) if withunits: testcoords = testcoords*u.angstrom g1g2 = g1*g2 gvals = g1g2(testcoords) g1vals = g1(testcoords) g2vals = g2(testcoords) prodvals = g1vals*g2vals helpers.assert_almost_equal(prodvals, gvals) def test_initial_gaussian_normalization_gaussian(): center = np.random.rand(3) * u.angstrom exp = 5.12 / u.angstrom**2 g2 = moldesign.orbitals.Gaussian(center, exp, normalized=True) helpers.assert_almost_equal(1.0, _numerical_norm(g2), decimal=3) helpers.assert_almost_equal(1.0, g2.norm) def test_initial_gaussian_normalization_with_prefactor(): center = np.random.rand(3) * u.angstrom exp = 5.12 / u.angstrom**2 g1 = moldesign.orbitals.Gaussian(center, exp, coeff=3.0*u.angstrom, normalized=True) helpers.assert_almost_equal(3.0*u.angstrom, _numerical_norm(g1), decimal=3) helpers.assert_almost_equal(3.0*u.angstrom, g1.norm) def test_initial_normalization_cartesian(): center = np.random.rand(3) * u.angstrom exp = 5.12 / u.angstrom**2 for powers in itertools.product(range(4), range(4), range(4)): g2 = moldesign.orbitals.CartesianGaussian(center, exp, powers, normalized=True) helpers.assert_almost_equal(1.0, _numerical_norm(g2), decimal=3) helpers.assert_almost_equal(1.0, g2.norm) def test_initial_normalization_cartesian_with_prefactor(): center = np.random.rand(3) * u.angstrom exp = 5.12 / u.angstrom**2 for powers in itertools.product(range(4), range(4), range(4)): g1 = moldesign.orbitals.CartesianGaussian(center, exp, powers, coeff=3.0, normalized=True) helpers.assert_almost_equal(3.0, _numerical_norm(g1), decimal=3) helpers.assert_almost_equal(3.0, g1.norm) def test_initial_normalization_spherical(): center = np.random.rand(3) * u.angstrom exp = 5.12 / u.angstrom**2 for l in range(5): for m in range(-l, l+1): g2 = moldesign.orbitals.SphericalGaussian(center, exp, l, m, normalized=True) helpers.assert_almost_equal(1.0, _numerical_norm(g2), decimal=3) helpers.assert_almost_equal(1.0, g2.norm) def test_initial_normalization_spherical_with_prefactor(): center = np.random.rand(3) * u.angstrom exp = 5.12 / u.angstrom**2 for l in range(5): for m in range(-l, l+1): g1 = moldesign.orbitals.SphericalGaussian(center, exp, l, m, coeff=3.0 * u.angstrom, normalized=True) helpers.assert_almost_equal(3.0 * u.angstrom, _numerical_norm(g1), decimal=3) helpers.assert_almost_equal(3.0 * u.angstrom, g1.norm) def _numerical_norm(g): allpoints, grid = helpers.generate_grid(g) with np.errstate(under='ignore'): vals = g(allpoints) numnorm = np.sqrt(grid.dx * grid.dy * grid.dz * (vals**2).sum()) return numnorm @pytest.mark.parametrize('objkey', registered_types['basis_fn']) def test_gaussian_function_values(objkey, request): g = request.getfixturevalue(objkey) for idim in range(g.ndims): coord = g.center.copy() randoffset = 4.0 * (random.random() - 0.5) * g.alpha**-0.5 coord[idim] += randoffset funcval = _gfuncval(g, coord) retval = g(coord) _assert_almost_equal(funcval, retval) @pytest.mark.parametrize('objkey', registered_types['basis_fn']) def test_vectorized_gaussian_function_evaluations(objkey, request): g = request.getfixturevalue(objkey) coords = np.zeros((5, g.ndims)) * g.center.units for i in range(5): coords[i] = g.center randoffset = 4.0 * (random.random() - 0.5) * g.alpha**-0.5 idim = random.randrange(g.ndims) coords[i, idim] += randoffset vector_results = g(coords) expected = u.array([g(c) for c in coords]) if vector_results.dimensionless: vector_results = vector_results._magnitude _assert_almost_equal(vector_results, expected, decimal=8) @pytest.mark.parametrize('objkey', registered_types['basis_fn'] + ['linear_combination']) def test_gaussian_str_and_repr_works(objkey, request): g1 = request.getfixturevalue(objkey) str(g1) repr(g1) @pytest.mark.parametrize('objkey', registered_types['basis_fn']) def test_normalized_gaussian_self_overlap_is_unity(objkey, request): g1 = request.getfixturevalue(objkey) g2 = g1.copy() g1.coeff = -10.0 g2.coeff = 12341.1832 olap = g1.overlap(g2, normalized=True) assert abs(-1.0 - olap) < 1e-12 g1.coeff = 10.0 olap = g1.overlap(g2, normalized=True) assert abs(1.0 - olap) < 1e-12 @pytest.mark.parametrize('objkey', registered_types['basis_fn']) def test_normalization(objkey, request): g1 = request.getfixturevalue(objkey) oldnorm = g1.norm g1.coeff = (random.random() - 0.5) * 428.23 try: assert g1.norm != oldnorm except u.DimensionalityError: pass # this is a reasonable thing to happen too g1.normalize() assert abs(g1.norm - 1.0) < 1e-12 def test_linear_combination_normalization(linear_combination): g1 = linear_combination oldnorm = g1.norm prefactor = (random.random() - 0.5) * 428.23 for prim in g1: prim.coeff *= prefactor try: assert g1.norm != oldnorm except u.DimensionalityError: pass # this is a reasonable thing to happen too g1.normalize() assert abs(g1.norm - 1.0) < 1e-12 def _gfuncval(g, coord): r = g.center - coord if len(coord.shape) > 1: r2 = np.sum(r**2, axis=1) else: r2 =
np.sum(r**2)
numpy.sum
from .base_analyzer import BaseAnalyzer import time import math import numpy as np, scipy, matplotlib.pyplot as plt import librosa, librosa.display #print ("pitch_analyzer __name__ :", __name__) # acf and deal with voice def deal1_pa(x, sr): # x, sr = librosa.load(path) # read the voice # sr = self.sr f_hi = 1100 # the highest voice of women f_lo = 82 # the lowest voice of man t_lo = sr / f_hi # the possible shortest time of the highest peak appear t_hi = sr / f_lo # the possible longest time of the highest peak appear max0 = int((len(x) - 440) / 220) - 1 # duration=max*20ms h0 = np.empty([max0, ], dtype=float) # calculate the frequency in each 20ms for i in range(0, max0): r = librosa.autocorrelate(x[(i * 220):(440 + i * 220)]) r[:int(t_lo)] = 0 r[int(t_hi):] = 0 t_max = r.argmax() if (t_max > t_lo and t_max <= t_hi): h0[i] = float(sr) / t_max else: h0[i] = 0 # data processing # minus the empty time in the front of frequency array and at last of array for i in range(0, max0): if h0[i] != 0: h0 = h0[i:] max0 = max0 - i break for i in range(0, max0): if h0[max0 - i - 1] != 0: h0 = h0[:(max0 - i)] max0 = max0 - i break # step 1:Smooth processing temp = np.empty([5, ], dtype=int) for i in range(0, max0 - 6): for j in range(0, 5): temp[j] = h0[i + j] for j in range(0, 5): for k in range(j + 1, 5): if temp[j] < temp[k]: t = temp[k] temp[k] = temp[j] temp[j] = t for j in range(0, 5): if h0[i + j] == temp[2]: t = h0[i + j] h0[i + j] = h0[i + 2] h0[i + 2] = t # step2:calculate mean value for i in range(0, int(max0 / 5 - 1)): flag = 1 average_all = 0 for j in range(0, 5): average_all += h0[i * 5 + j] if h0[i * 5 + j] == 0: flag = 0 if flag == 1: average = average_all / 5 for j in range(0, 5): h0[i * 5 + j] = average # step3:resolve the value of zero for i in range(0, max0): if h0[i] < 80: h0[i] = 0 # step4: remove the motation value for i in range(1, max0 - 1): if h0[i - 1] == 0 and h0[i + 1] == 0: h0[i] = 0 # translate pitch into midi note n0 = np.empty([max0, ], dtype=float) for i in range(0, max0): if h0[i] > 80: n0[i] = float(librosa.hz_to_midi(h0[i])) else: n0[i] = -1 # minus the zero in the front fo and the the last of the array for i in range(0, len(n0)): if n0[i] != 0: n0 = n0[i:] break for i in range(0, len(n0)): if n0[len(n0) - 1 - i] != 0: n0 = n0[:(len(n0) - i)] break return (n0) def deal1_da(data): sr = 22050 f_hi = 1100 # the highest voice of women f_lo = 82 # the lowest voice of man t_lo = sr / f_hi # the possible shortest time of the highest peak appear t_hi = sr / f_lo # the possible longest time of the highest peak appear max0 = int((len(data) - 440) / 220) - 1 # duration=max*20ms h0 = np.empty([max0, ], dtype=float) # calculate the frequency in each 20ms for i in range(0, max0): r = librosa.autocorrelate(data[(i * 220):(440 + i * 220)]) r[:int(t_lo)] = 0 r[int(t_hi):] = 0 t_max = r.argmax() if (t_max > t_lo and t_max <= t_hi): h0[i] = float(sr) / t_max else: h0[i] = 0 # data processing # minus the empty time in the front of frequency array and at last of array for i in range(0, max0): if h0[i] != 0: h0 = h0[i:] max0 = max0 - i break for i in range(0, max0): if h0[max0 - i - 1] != 0: h0 = h0[:(max0 - i)] max0 = max0 - i break # step 1:Smooth processing temp = np.empty([5, ], dtype=int) for i in range(0, max0 - 6): for j in range(0, 5): temp[j] = h0[i + j] for j in range(0, 5): for k in range(j + 1, 5): if temp[j] < temp[k]: t = temp[k] temp[k] = temp[j] temp[j] = t for j in range(0, 5): if h0[i + j] == temp[2]: t = h0[i + j] h0[i + j] = h0[i + 2] h0[i + 2] = t # step2:calculate mean value for i in range(0, int(max0 / 5 - 1)): flag = 1 average_all = 0 for j in range(0, 5): average_all += h0[i * 5 + j] if h0[i * 5 + j] == 0: flag = 0 if flag == 1: average = average_all / 5 for j in range(0, 5): h0[i * 5 + j] = average # step3:resolve the value of zero for i in range(0, max0): if h0[i] < 80: h0[i] = 0 # step4: remove the motation value for i in range(1, max0 - 1): if h0[i - 1] == 0 and h0[i + 1] == 0: h0[i] = 0 # translate pitch into midi note n0 = np.empty([max0, ], dtype=float) for i in range(0, max0): if h0[i] > 80: n0[i] = float(librosa.hz_to_midi(h0[i])) else: n0[i] = -1 # minus the zero in the front fo and the the last of the array for i in range(0, len(n0)): if n0[i] != 0: n0 = n0[i:] break for i in range(0, len(n0)): if n0[len(n0) - 1 - i] != 0: n0 = n0[:(len(n0) - i)] break return (n0) def Z_ScoreNormalization(x,mu,sigma): for i in range(0,12): x[i] = (x[i] - mu) / sigma return x def deal2_pa(path): x, sr = librosa.load(path) # read the voice chroma = librosa.feature.chroma_stft(y=x, sr=sr) chroma = np.transpose(chroma) for i in range(0,len(chroma)): chroma[i] = Z_ScoreNormalization(chroma[i], np.average(chroma[0]), np.std(chroma[0])) return chroma srcscore = deal2_pa("mp3/source_2.mp3") #print ("print (srcscore)") #print (srcscore) def deal2_da(data): chroma = librosa.feature.chroma_stft(y=data, sr=22050) chroma = np.transpose(chroma) for i in range(0,len(chroma)): chroma[i] = Z_ScoreNormalization(chroma[i],
np.average(chroma[0])
numpy.average
import warnings import numpy as np from copy import deepcopy from sklearn.cluster import KMeans from numpy.linalg import inv, eigvals from scipy.sparse.linalg import eigs from scipy.linalg import sqrtm from mudcod.nw import Loss, Similarity from mudcod.utils.sutils import timeit, log from mudcod.spectral import SpectralClustering warnings.filterwarnings(action="ignore", category=np.ComplexWarning) _eps = 10 ** (-10) CONVERGENCE_CRITERIA = 10 ** (-5) class PisCES(SpectralClustering): def __init__(self, verbose=False): super().__init__("pisces", verbose=verbose) self.convergence_monitor = [] def fit( self, adj, alpha=None, k_max=None, k_opt="empirical", n_iter=30, degree_correction=True, monitor_convergence=False, ): """ Parameters ---------- adj adjacency matrices with dimention (th,n,n), n is the number of nodes and th is the number of time time steps. alpha smoothing tuning parameter, along time axis, default is 0.05J(th,2). k_max maximum number of communities, default is n/10. n_iter number of iteration of pisces, default is 30. degree_correction Laplacianized, default is 'True'. monitor_convergence if True, method reports convergence based on ||U_{t} - U_{t-1}|| and |obj_t - obj_{t-1}> at each iteration. Returns ------- embeddings: computed spectral embeddings, with shape (th, n, k) """ assert type(adj) in [np.ndarray, np.memmap] and adj.ndim == 3 self.adj = adj.astype(float) self.adj_shape = self.adj.shape self.n = self.adj_shape[1] # or adj_shape[2] self.time_horizon = self.adj_shape[0] self.degree_correction = degree_correction self.degree = deepcopy(adj) th = self.time_horizon n = self.n adj = self.adj degree = self.degree if alpha is None: alpha = 0.05 *
np.ones((th, 2))
numpy.ones
'''functions to work with contrasts for multiple tests contrast matrices for comparing all pairs, all levels to reference level, ... extension to 2-way groups in progress TwoWay: class for bringing two-way analysis together and try out various helper functions Idea for second part - get all transformation matrices to move in between different full rank parameterizations - standardize to one parameterization to get all interesting effects. - multivariate normal distribution - exploit or expand what we have in LikelihoodResults, cov_params, f_test, t_test, example: resols_dropf_full.cov_params(C2) - connect to new multiple comparison for contrast matrices, based on multivariate normal or t distribution (Hothorn, Bretz, Westfall) ''' from numpy.testing import assert_equal import numpy as np #next 3 functions copied from multicomp.py def contrast_allpairs(nm): '''contrast or restriction matrix for all pairs of nm variables Parameters ---------- nm : int Returns ------- contr : ndarray, 2d, (nm*(nm-1)/2, nm) contrast matrix for all pairwise comparisons ''' contr = [] for i in range(nm): for j in range(i+1, nm): contr_row = np.zeros(nm) contr_row[i] = 1 contr_row[j] = -1 contr.append(contr_row) return np.array(contr) def contrast_all_one(nm): '''contrast or restriction matrix for all against first comparison Parameters ---------- nm : int Returns ------- contr : ndarray, 2d, (nm-1, nm) contrast matrix for all against first comparisons ''' contr = np.column_stack((np.ones(nm-1), -np.eye(nm-1))) return contr def contrast_diff_mean(nm): '''contrast or restriction matrix for all against mean comparison Parameters ---------- nm : int Returns ------- contr : ndarray, 2d, (nm-1, nm) contrast matrix for all against mean comparisons ''' return np.eye(nm) - np.ones((nm,nm))/nm def signstr(x, noplus=False): if x in [-1,0,1]: if not noplus: return '+' if np.sign(x)>=0 else '-' else: return '' if np.sign(x)>=0 else '-' else: return str(x) def contrast_labels(contrasts, names, reverse=False): if reverse: sl = slice(None, None, -1) else: sl = slice(None) labels = [''.join(['%s%s' % (signstr(c, noplus=True),v) for c,v in zip(row, names)[sl] if c != 0]) for row in contrasts] return labels def contrast_product(names1, names2, intgroup1=None, intgroup2=None, pairs=False): '''build contrast matrices for products of two categorical variables this is an experimental script and should be converted to a class Parameters ---------- names1, names2 : lists of strings contains the list of level labels for each categorical variable intgroup1, intgroup2 : ndarrays TODO: this part not tested, finished yet categorical variable Notes ----- This creates a full rank matrix. It does not do all pairwise comparisons, parameterization is using contrast_all_one to get differences with first level. ? does contrast_all_pairs work as a plugin to get all pairs ? ''' n1 = len(names1) n2 = len(names2) names_prod = ['%s_%s' % (i,j) for i in names1 for j in names2] ee1 = np.zeros((1,n1)) ee1[0,0] = 1 if not pairs: dd = np.r_[ee1, -contrast_all_one(n1)] else: dd = np.r_[ee1, -contrast_allpairs(n1)] contrast_prod = np.kron(dd[1:],
np.eye(n2)
numpy.eye
#!/usr/bin/env python3 # -*- coding: utf-8 -*- """ Importance sampling with the snowball proposition distribution in O(k_max x T). Author: <NAME> <<EMAIL>> """ import networkx as nx import numpy as np # Global status dict status = {"unexplored": 0, "boundary": 1, "explored": 2} class History(object): """Edge history vectors in MultiGraphs.""" def __init__(self, X=None): """Constructor with automagic.""" if X is not None: self.X = X for e in self.X: self.X = self.order_edge(e) else: self.X = [] self.idx = 0 def __str__(self): return self.X.__str__() def __repr__(self): return self.X.__repr__() def __iter__(self): return self def __next__(self): self.idx += 1 try: return self.X[self.idx - 1] except IndexError: self.idx = 0 raise StopIteration # Done iterating. next = __next__ # python2.x compatibility. def order_edge(self, edge): """Canonical ordering of an edge's representation.""" if edge[0] > edge[1]: return (edge[1], edge[0], edge[2]) else: return edge # Public methods def load_history(self, filepath, is_simple=False): """Load history from file. Note ---- The file must contain one edge per line, represented by a pair of integers, with an additionnal tag to count multiedges if is a multigraphs. The ordering is assumed to represent the history's order. Parameters ---------- filepath : str Path to history file. is_simple : bool Assume that the graph is simple and ignore edge tags. """ with open(filepath, 'r') as f: self.X = [] for line in f: e = tuple(int(v) for v in line.strip().split()) if is_simple: self.X.append(self.order_edge((e[0], e[1], 0))) else: self.X.append(self.order_edge(e)) return self.X def save_history(self, filepath): """Save history to file. Note ---- The file must contain one edge per line, represented by a pair of integers, with an additionnal tag to count multiedges if is a multigraphs. The ordering is assumed to represent the history's order. Parameters ---------- filepath : str Path to history file. is_simple : bool Assume that the graph is simple and ignore edge tags. """ with open(filepath, 'r') as f: for e in self.X: print(list(e).join(" "), file=f) def set_size(self, T): """Set total size of history when used in `set` mode.""" self.X = [(0, 0, 0)] * T def set(self, t, e): """Set edge at time t.""" self.X[t] = self.order_edge(e) def clear(self): """Clear container for reuse.""" self.X.clear() def tau(self, e): """Postition of edge 'e' in the history.""" if len(e) == 2: e = (e[0], e[1], 0) return self.X.index(self.order_edge(e)) class Sampler(object): """Baes Sampler class.""" def __init__(self, g): """Construct sampler for a graph.""" self.N = g.number_of_nodes() self.g = g self.reset() # super(Sampler, self).__init__() def reset(self): """Reset all sample dependents values.""" self.deg = [0] * self.N self.Z = 0 def loglikelihood_update(self, edge, gamma, b): """Compute the change in log-likelihood due to adding `edge`.""" v0 = edge[0] v1 = edge[1] if self.deg[v0] == 0 and self.deg[v1] == 0: return 0 # raise Exception("Impossible event!") if self.deg[v0] == 0: # new node is v0 delta =
np.log(b * (self.deg[v1] ** gamma / self.Z))
numpy.log
import numpy as np import scipy.ndimage as ndi def remove_small_region(input, threshold): labels, nb_labels = ndi.label(input) label_areas = np.bincount(labels.ravel()) too_small_labels = label_areas < threshold too_small_mask = too_small_labels[labels] input[too_small_mask] = 0 return input class RemoveSmallRegion(object): def __init__(self, threshold): self.threshold = threshold def __call__(self, case): case['label'] = remove_small_region(case['label'], self.threshold) return case def split_dim(input, axis=-1): sub_arr = np.split(input, input.shape[axis], axis=axis) return [np.squeeze(arr, axis=axis) for arr in sub_arr] def slice_dim(input, slice, axis=-1): return split_dim(input, axis=axis)[slice] def rescale(input, scale, order=1, mode='reflect', cval=0, is_label=False, multi_class=False): ''' A wrap of scipy.ndimage.zoom for label encoding data support. Args: See scipy.ndimage.zoom doc rescale for more detail. is_label: If true, split label before rescale. ''' dtype = input.dtype if is_label: num_classes = np.unique(input).max() + 1 if order == 0 or not is_label or num_classes < 3: if multi_class: classes = to_tensor(input) rescaled_classes = np.array([ndi.zoom(c.astype(np.float32), scale, order=order, mode=mode, cval=cval) for c in classes]) return to_numpy(rescaled_classes).astype(dtype) else: return ndi.zoom(input.astype(np.float32), scale, order=order, mode=mode, cval=cval).astype(dtype) else: onehot = to_one_hot(input, num_classes, to_tensor=True) rescaled_onehot = np.array([ndi.zoom(c.astype(np.float32), scale, order=order, mode=mode, cval=cval) for c in onehot]) return np.argmax(rescaled_onehot, axis=0).astype(dtype) def resize(input, shape, order=1, mode='reflect', cval=0, is_label=False): ''' Resize ndarray. (wrap of rescale) Args: See scipy.ndimage.zoom doc rescale for more detail. is_label: If true, split label before rescale. ''' orig_shape = input.shape multi_class = len(shape) == len(orig_shape)-1 orig_shape = orig_shape[:len(shape)] scale = np.array(shape)/np.array(orig_shape) return rescale(input, scale, order=order, mode=mode, cval=cval, is_label=is_label, multi_class=multi_class) class Resize(object): ''' Resize image and label. Args: scale (sequence or int): range of factor. If it is a int number, the range will be [1-int, 1+int] ''' def __init__(self, shape): self.shape = shape def __call__(self, case): case['image'] = resize(case['image'], self.shape) case['label'] = resize(case['label'], self.shape, is_label=True) return case class RandomRescale(object): ''' Randomly rescale image and label by range of scale factor. Args: scale (sequence or int): range of factor. If it is a int number, the range will be [1-int, 1+int] ''' def __init__(self, scale): if isinstance(scale, float): assert 0 <= scale <= 1, "If range is a single number, it must be non negative" self.scale = [1-scale, 1+scale] else: self.scale = scale def __call__(self, case): scale = np.random.uniform(self.scale[0], self.scale[1]) case['image'] = rescale(case['image'], scale) case['label'] = rescale(case['label'], scale, is_label=True) return case def to_tensor(input): dims_indices = np.arange(len(input.shape)) dims_indices = np.concatenate((dims_indices[-1:], dims_indices[:-1])) return input.transpose(dims_indices) def to_numpy(input): dims_indices = np.arange(len(input.shape)) dims_indices = np.concatenate((dims_indices[1:], dims_indices[:1])) return input.transpose(dims_indices) class ToTensor(object): ''' (d1,d2,...,dn,class) => (class,d1,d2,...,dn) ''' def __call__(self, case): case['image'] = to_tensor(case['image']) return case class ToNumpy(object): ''' (class,d1,d2,...,dn) => (d1,d2,...,dn,class) ''' def __call__(self, case): case['image'] = to_numpy(case['image']) return case def adjust_contrast(input, factor): dtype = input.dtype mean = input.mean() return ((input - mean) * factor + mean).astype(dtype) def adjust_brightness(input, factor): dtype = input.dtype minimum = input.min() return ((input - minimum) * factor + minimum).astype(dtype) def adjust_gamma(input, gamma, epsilon=1e-7): dtype = input.dtype minimum = input.min() maximum = input.max() arange = maximum - minimum + epsilon return (np.power(((input - minimum) / arange), gamma) * arange + minimum).astype(dtype) class RandomContrast(object): ''' Adjust contrast with random factor value in range. Args: factor (sequence or int): range of factor. If it is a int number, the range will be [1-int, 1+int] ''' def __init__(self, factor_range): if isinstance(factor_range, float): assert 0 <= factor_range <= 1, "If range is a single number, it must be non negative" self.factor_range = [1-factor_range, 1+factor_range] else: self.factor_range = factor_range def __call__(self, case): factor = np.random.uniform(self.factor_range[0], self.factor_range[1]) case['image'] = adjust_contrast(case['image'], factor) return case class RandomBrightness(object): ''' Adjust brightness with random factor value in range. Args: factor_range (sequence or int): range of factor. If it is a int number, the range will be [1-int, 1+int] ''' def __init__(self, factor_range): if isinstance(factor_range, float): assert 0 <= factor_range <= 1, "If range is a single number, it must be non negative" self.factor_range = [1-factor_range, 1+factor_range] else: self.factor_range = factor_range def __call__(self, case): factor = np.random.uniform(self.factor_range[0], self.factor_range[1]) case['image'] = adjust_brightness(case['image'], factor) return case class RandomGamma(object): ''' Adjust gamma with random gamma value in range. Args: gamma_range (sequence or int): range of gamma. If it is a int number, the range will be [1-int, 1+int] ''' def __init__(self, gamma_range): if isinstance(gamma_range, float): assert 0 <= gamma_range <= 1, "If range is a single number, it must be non negative" self.gamma_range = [1-gamma_range, 1+gamma_range] else: self.gamma_range = gamma_range def __call__(self, case): gamma = np.random.uniform(self.gamma_range[0], self.gamma_range[1]) case['image'] = adjust_gamma(case['image'], gamma) return case def to_one_hot(input, num_classes, to_tensor=False): ''' Label to one-hot. Label shape changes: (d1,d2,...,dn) => (d1,d2,...,dn,class) or (d1,d2,...,dn) => (class,d1,d2,...,dn) (pytorch tensor like) Args: num_classes (int): Total num of label classes. ''' dtype = input.dtype onehot = np.eye(num_classes)[input] dims_indices = np.arange(len(input.shape)+1) if to_tensor: dims_indices = np.concatenate((dims_indices[-1:], dims_indices[:-1])) return onehot.transpose(dims_indices).astype(dtype) class RandomMirror(object): ''' Mirroring image and label randomly (per_axis). Args: p_per_axis (sequence or int): axis u wanted to mirror. ''' def __init__(self, p_per_axis): self.p_per_axis = p_per_axis def __call__(self, case): dim = len(case['image'].shape)-1 if not isinstance(self.p_per_axis, (np.ndarray, tuple, list)): self.p_per_axis = [self.p_per_axis] * dim for i, p in enumerate(self.p_per_axis): if np.random.uniform() < p: # negative strides numpy array is not support for pytorch yet. case['image'] = np.flip(case['image'], i).copy() case['label'] = np.flip(case['label'], i).copy() return case class ToOnehot(object): ''' Label to one-hot. Label shape changes: (d1,d2,...,dn) => (d1,d2,...,dn,class) or (d1,d2,...,dn) => (class,d1,d2,...,dn) (with transpose) Args: num_classes (int): Total num of label classes. ''' def __init__(self, num_classes, to_tensor=False): self.num_classes = num_classes self.to_tensor = to_tensor def __call__(self, case): case['label'] = to_one_hot(case['label'], self.num_classes, self.to_tensor) return case def combination_labels(input, combinations, num_classes): ''' Combines some label indices as one. Args: combinations (ndarray, list, tuple): Combines of label indices ndarray, e.g.[[0,1],[2]] list, e.g.[0,1] tuple, e.g.(0,1) num_classes (int): Total num of label classes. ''' dtype = input.dtype # add other single class combinations in the combinations setting if len(np.array(combinations).shape) == 1: combinations = [combinations] full_combinations = [] used_combination_indices = [] classes_range = range(num_classes) for c in classes_range: c_pos = np.where(np.array(combinations) == c) related_combination_indices = c_pos[0] if len(related_combination_indices) > 0: for i in related_combination_indices: if i not in used_combination_indices: full_combinations.append(combinations[i]) used_combination_indices.append(i) else: full_combinations.append([c]) onehot = to_one_hot(input, num_classes, True) # combination the classes into new onehot combination_logics = [] for combination in full_combinations: combination_logic = np.zeros_like(onehot[0]) for c in combination: combination_logic =
np.logical_or(onehot[c], combination_logic)
numpy.logical_or
#!/usr/bin/python3 #--- coding:utf-8 import time, sys, os from IPython import embed import numpy as np import matplotlib matplotlib.use('Agg') from matplotlib import pyplot as plt from mpl_toolkits.mplot3d import Axes3D class Kmeans(): def __init__(self, n_clusters, max_iter = 1000, tol = 0.00001): self.n_clusters = n_clusters self.max_iter = max_iter self.tol = tol def fit(self, data): shape,_ = data.shape index = np.random.randint(0,shape,size=self.n_clusters) k_points = data[index] k_points_last = None for a in range(self.max_iter): label = [] k_points_last = k_points.copy() for i in range(shape): dis = [] for j in range(self.n_clusters): dis.append(np.linalg.norm(data[i,:]-k_points[j,:])) label.append(dis.index(min(dis))) for i in range(self.n_clusters): index = np.argwhere(np.array(label)==i) if len(index) != 0: k_points[i,:] = data[index, :].mean(axis=0) if np.linalg.norm(k_points-k_points_last) < self.tol: break return np.array(label) class SOM(): def __init__(self, n_clusters, in_layer, s, knn=10, out_size=(3,3), m_iter=1000): self.n_clusters = n_clusters self.in_layer = in_layer.copy() self.m_iter = m_iter self.knn = knn a,b = np.min(self.in_layer), np.max(self.in_layer) self.w = (b-a)*np.random.rand(out_size[0], out_size[1], self.in_layer.shape[1])+a self.color = ['y', 'r', 'g', 'b', 'c', 'm', 'k', 'pink', 'dark', 'orange', 'tan', 'gold'] self.label = np.zeros(len(in_layer)) self.res =None self.neuron = {} self.l = 1.0 self.s = s self.som_r = int(self.w.shape[0]/2.5) self.som_r_square = self.som_r**2 self.D_list = [] def Init_W(self): for i in range(self.w.shape[0]): for j in range(self.w.shape[1]): k = np.random.randint(self.in_layer.shape[0]) self.w[i][j]=self.in_layer[k] def Normalize_Input(self, X): ''' for i in range(X.shape[0]): t = np.linalg.norm(X[i]) X[i] /= t #''' return X def Normalize_W(self, w): ''' for i in range(w.shape[0]): for j in range(w.shape[1]): t = np.linalg.norm(w[i,j]) w[i,j] /= t #''' return w def Get_Win_Neuron(self, x): max_dis=float('inf') min_dis=-float('inf') for i in range(self.w.shape[0]): for j in range(self.w.shape[1]): ''' dis = x.dot(self.w[i,j]) #余弦距离 if dis > min_dis: min_dis = dis win_index = (i,j,dis) ''' dis =
np.linalg.norm(x-self.w[i,j])
numpy.linalg.norm
""" Script to evaluate the robust and adaptive Kalman estimator. Primarily used for to make conclusions and plots for the paper written for Stochastic System Theory (MSc) course at University of Belgrade, School of Electrical Engineering. Author: <NAME> (github: milsto) """ import numpy as np import matplotlib.pyplot as plt import matplotlib import sys sys.path.insert(0, '..') from robust_kalman import RobustKalman from robust_kalman.utils import HuberScore, VariablesHistory, WindowStatisticsEstimator np.random.seed(256) params = { 'axes.labelsize': 10, 'font.size': 10, 'legend.fontsize': 10, 'xtick.labelsize': 10, 'ytick.labelsize': 10, 'text.usetex': True, 'font.family': 'Times' } matplotlib.rcParams.update(params) # Plot robust score function # t = np.linspace(-50, 50, 1000) # h = HuberScore() # hv = np.vectorize(h.evaluate) # plt.plot(t, hv(t)) # plt.show() # Example 1 # dt = 0.01 # end_time = 1.0 # F = np.array([[1, dt, dt**2 / 2], [0, 1, dt], [0, 0, 1]], np.float32) # G = np.array([[0, 0, 1]], np.float32).T # H = np.array([[1, 0, 0]], np.float32) # x0 = np.array([[0.1, 0.1, 0.1]], np.float32).T # P0 = np.eye(3, dtype=np.float32) * 0.01 # sigma_process = 10.0 # sigma_measure = 1.0 # x0_kalman = np.array([[0, 0, 0]], np.float32).T # Example 2 dt = 0.01 end_time = 1.0 F = np.array([[1, dt], [0, 1]], np.float32) G = np.array([[0.5 * dt**2, dt]], np.float32).T H = np.array([[1, 0]], np.float32) x0 = np.array([[0.01, 0.01]], np.float32).T P0 = np.ones((2, 2), np.float32) * 0.001 sigma_process = 10.0 sigma_measure = 0.1 x0_kalman = np.array([[0, 0]], np.float32).T IS_SPIKE_EXPERIMENT = True PLOT_ADAPTIVE_CEE = True Q0 = np.matmul(G, G.T) * sigma_process**2 R0 = np.eye(1, dtype=np.float32) * sigma_measure**2 kalman_linear = RobustKalman(F, None, H, x0_kalman, P0, Q0, R0, use_robust_estimation=False, use_adaptive_statistics=False) kalman_robust = RobustKalman(F, None, H, x0_kalman, P0, Q0, R0, use_robust_estimation=True, use_adaptive_statistics=False) kalman_robust_stat = RobustKalman(F, None, H, x0_kalman, P0, Q0, R0, use_robust_estimation=True, use_adaptive_statistics=True) wstat_q = WindowStatisticsEstimator(win_size=25) wstat_r = WindowStatisticsEstimator(win_size=25) x = x0 z = np.matmul(H, x0) cee_x = 0.0 cee_xres = 0.0 cee_xres_stat = 0.0 step = 2 t_axis = np.arange(0, end_time, dt) history = VariablesHistory() for t in t_axis: history.update('x', x) history.update('z', z) history.update('x_kalman', kalman_linear.current_estimate) history.update('x_kalman_robust', kalman_robust.current_estimate) history.update('x_kalman_robust_stat', kalman_robust_stat.current_estimate) cee_x += (np.linalg.norm(kalman_linear.current_estimate - x) / (np.linalg.norm(x) + 0.0001)) / step cee_xres += (np.linalg.norm(kalman_robust.current_estimate - x) / (np.linalg.norm(x) + 0.0001)) / step cee_xres_stat += (np.linalg.norm(kalman_robust_stat.current_estimate - x) / (np.linalg.norm(x) + 0.0001)) / step history.update('cee_x_history', cee_x) history.update('cee_xres_history', cee_xres) history.update('cee_xres_stat_history', cee_xres_stat) history.update('r_mean_est', kalman_robust_stat.r_mean_est) history.update('r_var_est', kalman_robust_stat.r_var_est) q = np.random.normal(0.0, sigma_process, size=(1, 1)) if not IS_SPIKE_EXPERIMENT: r = 0.85 * np.random.normal(0.0, sigma_measure, size=(1, 1)) + 0.15 *
np.random.normal(0.0, 5.0, size=(1, 1))
numpy.random.normal
import torch import torch.nn as nn import torch.nn.functional as F from torch.autograd import Variable import numpy as np from model.utils.config import cfg from model.rpn.bbox_transform import bbox_transform_inv, clip_boxes from model.fcgn.bbox_transform_grasp import labels2points, grasp_decode from model.roi_layers import nms import networkx as nx def save_net(fname, net): import h5py h5f = h5py.File(fname, mode='w') for k, v in net.state_dict().items(): h5f.create_dataset(k, data=v.cpu().numpy()) def load_net(fname, net): import h5py h5f = h5py.File(fname, mode='r') for k, v in net.state_dict().items(): param = torch.from_numpy(np.asarray(h5f[k])) v.copy_(param) def weights_normal_init(module, dev=0.01, bias = 0): if isinstance(module, list): for m in module: weights_normal_init(m, dev) else: for m in module.modules(): if hasattr(m, 'weight'): nn.init.normal_(m.weight, 0.0, dev) if hasattr(m, 'bias') and m.bias is not None: nn.init.constant_(m.bias, bias) def weights_xavier_init(module, gain=1, bias=0, distribution='normal'): if isinstance(module, list): for m in module: weights_xavier_init(m) else: assert distribution in ['uniform', 'normal'] for m in module.modules(): if hasattr(m, 'weight'): if distribution == 'uniform': nn.init.xavier_uniform_(m.weight, gain=gain) else: nn.init.xavier_normal_(m.weight, gain=gain) if hasattr(m, 'bias') and m.bias is not None: nn.init.constant_(m.bias, bias) def weights_uniform_init(module, a=0, b=1, bias=0): if isinstance(module, list): for m in module: weights_uniform_init(m, a, b) else: for m in module.modules(): if hasattr(m, 'weight'): nn.init.uniform_(m.weight, a, b) if hasattr(m, 'bias') and m.bias is not None: nn.init.constant_(m.bias, bias) def weight_kaiming_init(module, mode='fan_out', nonlinearity='relu', bias=0, distribution='normal'): if isinstance(module, list): for m in module: weight_kaiming_init(m, mode, nonlinearity, bias, distribution) else: assert distribution in ['uniform', 'normal'] for m in module.modules(): if hasattr(m, 'weight'): if distribution == 'uniform': nn.init.kaiming_uniform_( m.weight, mode=mode, nonlinearity=nonlinearity) else: nn.init.kaiming_normal_( m.weight, mode=mode, nonlinearity=nonlinearity) if hasattr(m, 'bias') and m.bias is not None: nn.init.constant_(m.bias, bias) def bias_init_with_prob(prior_prob): """ initialize conv/fc bias value according to giving probablity""" bias_init = float(-np.log((1 - prior_prob) / prior_prob)) return bias_init def set_bn_fix(m): classname = m.__class__.__name__ if classname.find('BatchNorm') != -1: for p in m.parameters(): p.requires_grad=False def set_bn_eval(m): classname = m.__class__.__name__ if classname.find('BatchNorm') != -1: m.eval() def clip_gradient(model, clip_norm): """Computes a gradient clipping coefficient based on gradient norm.""" totalnorm = 0 for p in model.parameters(): if p.requires_grad and p.grad is not None: modulenorm = p.grad.data.norm() totalnorm += modulenorm ** 2 totalnorm = np.sqrt(totalnorm.item()) norm = clip_norm / max(totalnorm, clip_norm) for p in model.parameters(): if p.requires_grad and p.grad is not None: p.grad.mul_(norm) def adjust_learning_rate(optimizer, decay=0.1): """Sets the learning rate to the initial LR decayed by 0.5 every 20 epochs""" for param_group in optimizer.param_groups: param_group['lr'] = decay * param_group['lr'] def save_checkpoint(state, filename): torch.save(state, filename) def _smooth_l1_loss(bbox_pred, bbox_targets, bbox_inside_weights, bbox_outside_weights, sigma=1.0, dim=[1]): sigma_2 = sigma ** 2 box_diff = bbox_pred - bbox_targets in_box_diff = bbox_inside_weights * box_diff abs_in_box_diff = torch.abs(in_box_diff) smoothL1_sign = (abs_in_box_diff < 1. / sigma_2).detach().float() in_loss_box = torch.pow(in_box_diff, 2) * (sigma_2 / 2.) * smoothL1_sign \ + (abs_in_box_diff - (0.5 / sigma_2)) * (1. - smoothL1_sign) out_loss_box = bbox_outside_weights * in_loss_box loss_box = out_loss_box for i in sorted(dim, reverse=True): loss_box = loss_box.sum(i) loss_box = loss_box.mean() return loss_box def _focal_loss(cls_prob, labels, alpha = 0.25, gamma = 2): labels = labels.view(-1) final_prob = torch.gather(cls_prob.view(-1, cls_prob.size(-1)), 1, labels.unsqueeze(1)) loss_cls = - torch.log(final_prob) # setting focal weights focal_weights = torch.pow((1. - final_prob), gamma) # setting the coefficient to balance pos and neg samples. alphas = torch.Tensor(focal_weights.shape).zero_().type_as(focal_weights) alphas[labels == 0] = 1. - alpha alphas[labels > 0] = alpha loss_cls = (loss_cls * focal_weights * alphas).sum() / torch.clamp(torch.sum(labels > 0).float(), min = 1.0) # loss_cls = (loss_cls * focal_weights * alphas).mean() return loss_cls def _crop_pool_layer(bottom, rois, max_pool=True): # code modified from # https://github.com/ruotianluo/pytorch-faster-rcnn # implement it using stn # box to affine # input (x1,y1,x2,y2) """ [ x2-x1 x1 + x2 - W + 1 ] [ ----- 0 --------------- ] [ W - 1 W - 1 ] [ ] [ y2-y1 y1 + y2 - H + 1 ] [ 0 ----- --------------- ] [ H - 1 H - 1 ] """ rois = rois.detach() batch_size = bottom.size(0) D = bottom.size(1) H = bottom.size(2) W = bottom.size(3) roi_per_batch = rois.size(0) / batch_size x1 = rois[:, 1::4] / 16.0 y1 = rois[:, 2::4] / 16.0 x2 = rois[:, 3::4] / 16.0 y2 = rois[:, 4::4] / 16.0 height = bottom.size(2) width = bottom.size(3) # affine theta zero = Variable(rois.data.new(rois.size(0), 1).zero_()) theta = torch.cat([\ (x2 - x1) / (width - 1), zero, (x1 + x2 - width + 1) / (width - 1), zero, (y2 - y1) / (height - 1), (y1 + y2 - height + 1) / (height - 1)], 1).view(-1, 2, 3) if max_pool: pre_pool_size = cfg.RCNN_COMMON.POOLING_SIZE * 2 grid = F.affine_grid(theta, torch.Size((rois.size(0), 1, pre_pool_size, pre_pool_size))) bottom = bottom.view(1, batch_size, D, H, W).contiguous().expand(roi_per_batch, batch_size, D, H, W)\ .contiguous().view(-1, D, H, W) crops = F.grid_sample(bottom, grid) crops = F.max_pool2d(crops, 2, 2) else: grid = F.affine_grid(theta, torch.Size((rois.size(0), 1, cfg.RCNN_COMMON.POOLING_SIZE, cfg.RCNN_COMMON.POOLING_SIZE))) bottom = bottom.view(1, batch_size, D, H, W).contiguous().expand(roi_per_batch, batch_size, D, H, W)\ .contiguous().view(-1, D, H, W) crops = F.grid_sample(bottom, grid) return crops, grid def _affine_grid_gen(rois, input_size, grid_size): rois = rois.detach() x1 = rois[:, 1::4] / 16.0 y1 = rois[:, 2::4] / 16.0 x2 = rois[:, 3::4] / 16.0 y2 = rois[:, 4::4] / 16.0 height = input_size[0] width = input_size[1] zero = Variable(rois.data.new(rois.size(0), 1).zero_()) theta = torch.cat([\ (x2 - x1) / (width - 1), zero, (x1 + x2 - width + 1) / (width - 1), zero, (y2 - y1) / (height - 1), (y1 + y2 - height + 1) / (height - 1)], 1).view(-1, 2, 3) grid = F.affine_grid(theta, torch.Size((rois.size(0), 1, grid_size, grid_size))) return grid def _affine_theta(rois, input_size): rois = rois.detach() x1 = rois[:, 1::4] / 16.0 y1 = rois[:, 2::4] / 16.0 x2 = rois[:, 3::4] / 16.0 y2 = rois[:, 4::4] / 16.0 height = input_size[0] width = input_size[1] zero = Variable(rois.data.new(rois.size(0), 1).zero_()) # theta = torch.cat([\ # (x2 - x1) / (width - 1), # zero, # (x1 + x2 - width + 1) / (width - 1), # zero, # (y2 - y1) / (height - 1), # (y1 + y2 - height + 1) / (height - 1)], 1).view(-1, 2, 3) theta = torch.cat([\ (y2 - y1) / (height - 1), zero, (y1 + y2 - height + 1) / (height - 1), zero, (x2 - x1) / (width - 1), (x1 + x2 - width + 1) / (width - 1)], 1).view(-1, 2, 3) return theta def box_unnorm_torch(box, normalizer, d_box = 4, class_agnostic=True, n_cls = None): mean = normalizer['mean'] std = normalizer['std'] assert len(mean) == len(std) == d_box if box.dim() == 2: if class_agnostic: box = box * torch.FloatTensor(std).type_as(box) + torch.FloatTensor(mean).type_as(box) else: box = box.view(-1, d_box) * torch.FloatTensor(std).type_as(box) + torch.FloatTensor(mean).type_as(box) box = box.view(-1, d_box * n_cls) elif box.dim() == 3: batch_size = box.size(0) if class_agnostic: box = box.view(-1, d_box) * torch.FloatTensor(std).type_as(box) + torch.FloatTensor(mean).type_as(box) box = box.view(batch_size, -1, d_box) else: box = box.view(-1, d_box) * torch.FloatTensor(std).type_as(box) + torch.FloatTensor(mean).type_as(box) box = box.view(batch_size, -1, d_box * n_cls) return box def box_recover_scale_torch(box, x_scaler, y_scaler): if box.dim() == 2: box[:, 0::2] /= x_scaler box[:, 1::2] /= y_scaler elif box.dim() == 3: box[:, :, 0::2] /= x_scaler box[:, :, 1::2] /= y_scaler elif box.dim() == 4: box[:, :, :, 0::2] /= x_scaler box[:, :, :, 1::2] /= y_scaler return box def box_filter(box, box_scores, thresh, use_nms = True): """ :param box: N x d_box :param box_scores: N scores :param thresh: :param use_nms: :return: """ d_box = box.size(-1) inds = torch.nonzero(box_scores > thresh).view(-1) if inds.numel() > 0: cls_scores = box_scores[inds] _, order = torch.sort(cls_scores, 0, True) cls_boxes = box[inds, :] if use_nms: cls_dets = torch.cat((cls_boxes, cls_scores.unsqueeze(1)), 1) cls_dets = cls_dets[order] keep = nms(cls_dets[:, :4], cls_dets[:, 4], cfg.TEST.COMMON.NMS) cls_scores = cls_dets[keep.view(-1).long()][:, -1] cls_dets = cls_dets[keep.view(-1).long()][:, :-1] order = order[keep.view(-1).long()] else: cls_scores = cls_scores[order] cls_dets = cls_boxes[order] cls_dets = cls_dets.cpu().numpy() cls_scores = cls_scores.cpu().numpy() order = order.cpu().numpy() else: cls_scores = np.zeros(shape=(0,), dtype=np.float32) cls_dets = np.zeros(shape=(0, d_box), dtype=np.float32) order =
np.array([], dtype=np.int32)
numpy.array
import os import time import pickle import math import numpy as np import linecache import matplotlib.pyplot as plt # from matplotlib.pyplot import MultipleLocator import grid path = 'E:\dataset\didi' orders_file_name = 'gps_20161101_only_reserve_appearance_sampled' # '.pkl' will be added for binary file orders_processed_file_name = 'driver_20161101_processed' # '.pkl' will be added for binary file earliest_time = '2016-11-01 00:00:00' # include latest_time = '2016-11-02 00:00:00' # not include random_seed = 4321 size_hexagon_to_edge = 0.0048 hexagon_size_factor_for_plot = 1 range_map_longitude = [103.96, 104.18] range_map_latitude = [30.59, 30.77] time_interval_min = 10 # min NOT_IN_MAP_GRID_ID = -100 save_data = True size_hexagon = size_hexagon_to_edge * 2 / math.sqrt(3) # length to the point time_interval_sec = time_interval_min * 60 with open(os.path.join(path, orders_file_name+'.pkl'), 'rb') as f: orders = pickle.load(f) n_orders = len(orders) orders_ts = [] orders_lo = [] orders_processed = [] grid_to_order_start = {} grid_to_order_end = {} order_start_time_stat = [0] * (math.ceil(1440/time_interval_min)) order_end_time_stat = [0] * (math.ceil(1440/time_interval_min)) order_duration_stat = [0] * (math.ceil(1440/time_interval_min)) # duration org_order_start_time_stat = [] org_order_end_time_stat = [] org_order_duration_stat = [] # duration def convert_time_stamp(ts): return time.strftime('%Y-%m-%d %H:%M:%S', time.localtime(ts)) def make_time_stamp(t): return int(time.mktime(time.strptime(t, "%Y-%m-%d %H:%M:%S"))) earliest_time_stamp = make_time_stamp(earliest_time) latest_time_stamp = make_time_stamp(latest_time) org_order_start_time_stat = [0] * (latest_time_stamp-earliest_time_stamp) org_order_end_time_stat = [0] * (latest_time_stamp-earliest_time_stamp) org_order_duration_stat = [0] * (latest_time_stamp-earliest_time_stamp) # duration print('earliest_time_stamp is', earliest_time_stamp) print('latest_time_stamp is', latest_time_stamp) print('time_interval_sec is', time_interval_sec) print() def time_is_valid(t): return earliest_time_stamp <= t and t < latest_time_stamp def make_time_unit(ts): return ts // time_interval_sec def cal_min_dist(p, mat): # print(mat.shape) x_dist_mat = mat[:, :, 0] - p[0] y_dist_mat = mat[:, :, 1] - p[1] dist_mat = (x_dist_mat**2 + y_dist_mat**2) ind_x, ind_y = np.unravel_index(np.argmin(dist_mat, axis=None), dist_mat.shape) return ind_x, ind_y, dist_mat[ind_x, ind_y]**0.5 def reduce_tail_zero(l): i = len(l) - 1 while l[i] == 0: i -= 1 return l[:i+1] for oo in orders: orders_ts.append(oo[1:3]) orders_lo.append(oo[3:]) assert len(orders_ts) == n_orders assert len(orders_lo) == n_orders orders_ts = np.array(orders_ts) orders_lo = np.array(orders_lo) print('orders_ts.shape is', orders_ts.shape) print('orders_lo.shape is', orders_lo.shape) print('examples: ') print('time stamp') print(orders_ts[0]) print(orders_ts[5]) print('location') print(orders_lo[0]) print(orders_lo[5]) print() range_start_time = [convert_time_stamp(np.min(orders_ts[:, 0])), convert_time_stamp(np.max(orders_ts[:, 0]))] range_end_time = [convert_time_stamp(np.min(orders_ts[:, 1])), convert_time_stamp(np.max(orders_ts[:, 1]))] range_start_longitude = [np.min(orders_lo[:, 0]),
np.max(orders_lo[:, 0])
numpy.max
# -*- coding: utf-8 -*- '''Chemical Engineering Design Library (ChEDL). Utilities for process modeling. Copyright (C) 2016, 2017 2018 <NAME> <<EMAIL>> Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions: The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software. THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.''' from __future__ import division from math import log10 from scipy.optimize import newton import numpy as np from fluids import * from numpy.testing import assert_allclose import pytest def test_friction(): assert_allclose(Moody(1E5, 1E-4), 0.01809185666808665) assert_allclose(Alshul_1952(1E5, 1E-4), 0.018382997825686878) assert_allclose(Wood_1966(1E5, 1E-4), 0.021587570560090762) assert_allclose(Churchill_1973(1E5, 1E-4), 0.01846708694482294) assert_allclose(Eck_1973(1E5, 1E-4), 0.01775666973488564) assert_allclose(Jain_1976(1E5, 1E-4), 0.018436560312693327) assert_allclose(Swamee_Jain_1976(1E5, 1E-4), 0.018452424431901808) assert_allclose(Churchill_1977(1E5, 1E-4), 0.018462624566280075) assert_allclose(Chen_1979(1E5, 1E-4), 0.018552817507472126) assert_allclose(Round_1980(1E5, 1E-4), 0.01831475391244354) assert_allclose(Shacham_1980(1E5, 1E-4), 0.01860641215097828) assert_allclose(Barr_1981(1E5, 1E-4), 0.01849836032779929) assert_allclose(Zigrang_Sylvester_1(1E5, 1E-4), 0.018646892425980794) assert_allclose(Zigrang_Sylvester_2(1E5, 1E-4), 0.01850021312358548) assert_allclose(Haaland(1E5, 1E-4), 0.018265053014793857) assert_allclose(Serghides_1(1E5, 1E-4), 0.01851358983180063) assert_allclose(Serghides_2(1E5, 1E-4), 0.018486377560664482) assert_allclose(Tsal_1989(1E5, 1E-4), 0.018382997825686878) assert_allclose(Tsal_1989(1E8, 1E-4), 0.012165854627780102) assert_allclose(Manadilli_1997(1E5, 1E-4), 0.01856964649724108) assert_allclose(Romeo_2002(1E5, 1E-4), 0.018530291219676177) assert_allclose(Sonnad_Goudar_2006(1E5, 1E-4), 0.0185971269898162) assert_allclose(Rao_Kumar_2007(1E5, 1E-4), 0.01197759334600925) assert_allclose(Buzzelli_2008(1E5, 1E-4), 0.018513948401365277) assert_allclose(Avci_Karagoz_2009(1E5, 1E-4), 0.01857058061066499) assert_allclose(Papaevangelo_2010(1E5, 1E-4), 0.015685600818488177) assert_allclose(Brkic_2011_1(1E5, 1E-4), 0.01812455874141297) assert_allclose(Brkic_2011_2(1E5, 1E-4), 0.018619745410688716) assert_allclose(Fang_2011(1E5, 1E-4), 0.018481390682985432) assert_allclose(Clamond(1E5, 1E-4), 0.01851386607747165) assert_allclose(Clamond(1E5, 1E-4, fast=True), 0.01851486771096876) assert_allclose(Colebrook(1E5, 1E-4), 0.018513866077471648) # Test the colebrook is the clamond when tol=-1 assert Colebrook(1E5, 1E-4, -1) == Clamond(1E5, 1E-4) # Test the colebrook is the analytical solution when Re < 10 # even when the clamond solution is specified assert Colebrook(1, 1E-4, -1) == Colebrook(1, 1e-4) assert_allclose(friction_laminar(128), 0.5) assert_allclose(Blasius(10000), 0.03164) assert_allclose(sum(_roughness.values()), 0.01504508) assert_allclose(friction_factor(Re=1E5, eD=1E-4), 0.01851386607747165) methods_1 = friction_factor(Re=1E5, eD=1E-4, AvailableMethods=True) methods_1.sort() methods_2 = ['Clamond', 'Colebrook', 'Manadilli_1997', 'Haaland', 'Alshul_1952', 'Avci_Karagoz_2009', 'Rao_Kumar_2007', 'Zigrang_Sylvester_2', 'Eck_1973', 'Buzzelli_2008', 'Tsal_1989', 'Papaevangelo_2010', 'Barr_1981', 'Jain_1976', 'Moody', 'Brkic_2011_2', 'Brkic_2011_1', 'Swamee_Jain_1976', 'Wood_1966', 'Shacham_1980', 'Romeo_2002', 'Chen_1979', 'Fang_2011', 'Round_1980', 'Sonnad_Goudar_2006', 'Churchill_1973', 'Churchill_1977', 'Serghides_2', 'Serghides_1', 'Zigrang_Sylvester_1'] methods_2.sort() assert methods_1 == methods_2 assert_allclose(friction_factor(Re=1E5, eD=1E-4, Darcy=False), 0.01851386607747165/4) assert_allclose(friction_factor(Re=128), 0.5) assert_allclose(friction_factor(Re=1E5, eD=0, Method=None), 0.01798977308427384) fd = ft_Crane(.1) assert_allclose(fd, 0.01628845962146481) Di = 0.1 fd_act = Colebrook(7.5E6*Di, eD=roughness_Farshad(ID='Carbon steel, bare', D=Di)/Di) assert_allclose(fd, fd_act, rtol=5e-6) @pytest.mark.slow @pytest.mark.mpmath def test_Colebrook_numerical_mpmath(): # tested at n=500 for both Re and eD Res = np.logspace(np.log10(1e-12), np.log10(1E12), 30) # 1E12 is too large for sympy - it slows down too much eDs = np.logspace(np.log10(1e-20), np.log10(.1), 21) # 1-1e-9 for Re in Res: for eD in eDs: fd_exact = Colebrook(Re, eD, tol=0) fd_numerical = Colebrook(Re, eD, tol=1e-12) assert_allclose(fd_exact, fd_numerical, rtol=1e-5) @pytest.mark.slow @pytest.mark.mpmath def test_Colebrook_scipy_mpmath(): # Faily grueling test - check the lambertw implementations are matching mostly # NOTE the test is to Re = 1E7; at higher Res the numerical solver is almost # always used Res = np.logspace(np.log10(1e-12), np.log10(1e7), 20) # 1E12 is too large for sympy eDs = np.logspace(np.log10(1e-20), np.log10(.1), 19) # 1-1e-9 for Re in Res: for eD in eDs: Re = float(Re) eD = float(eD) fd_exact = Colebrook(Re, eD, tol=0) fd_scipy = Colebrook(Re, eD) assert_allclose(fd_exact, fd_scipy, rtol=1e-9) @pytest.mark.slow def test_Colebrook_vs_Clamond(): Res = np.logspace(np.log10(10), np.log10(1E50), 40).tolist() eDs = np.logspace(np.log10(1e-20), np.log10(1), 40).tolist() for Re in Res: for eD in eDs: fd_exact = Colebrook(Re, eD) fd_clamond = Clamond(Re, eD) # Interestingly, matches to rtol=1e-9 vs. numerical solver # But does not have such accuracy compared to mpmath if np.isnan(fd_exact) or np.isnan(fd_clamond): continue # older scipy on 3.4 returns a nan sometimes assert_allclose(fd_exact, fd_clamond, rtol=1e-9) # If rtol is moved to 1E-7, eD can be increased to 1 @pytest.mark.mpmath def test_Colebrook_hard_regimes(): fd_inf_regime = Colebrook(104800000000, 2.55e-08) assert_allclose(fd_inf_regime, 0.0037751087365339906, rtol=1e-10) def test_one_phase_dP(): dP = one_phase_dP(10.0, 1000, 1E-5, .1, L=1) assert_allclose(dP, 63.43447321097365) def test_one_phase_dP_gravitational(): dP = one_phase_dP_gravitational(angle=90, rho=2.6) assert_allclose(dP, 25.49729) dP = one_phase_dP_gravitational(angle=90, rho=2.6, L=2) assert_allclose(dP, 25.49729*2) def test_one_phase_dP_dz_acceleration(): dP = one_phase_dP_dz_acceleration(m=1, D=0.1, rho=827.1, dv_dP=-1.1E-5, dP_dL=5E5, dA_dL=0.0001) assert_allclose(dP, 89162.89116373913) @pytest.mark.slow @pytest.mark.thermo @pytest.mark.skip def test_one_phase_dP_dz_acceleration_example(): # This requires thermo! from thermo import Stream, Vm_to_rho from fluids import one_phase_dP, one_phase_dP_acceleration import numpy as np from scipy.integrate import odeint from numpy.testing import assert_allclose P0 = 1E5 s = Stream(['nitrogen', 'methane'], T=300, P=P0, zs=[0.5, 0.5], m=1) rho0 = s.rho D = 0.1 def dP_dz(P, L, acc=False): s.flash(P=float(P), Hm=s.Hm) dPf = one_phase_dP(m=s.m, rho=s.rhog, mu=s.rhog, D=D, roughness=0, L=1) if acc: G = 4.0*s.m/(np.pi*D*D) der = s.VolumeGasMixture.property_derivative_P(P=s.P, T=s.T, zs=s.zs, ws=s.ws) der = 1/Vm_to_rho(der, s.MW) factor = G*G*der dP = dPf/(1.0 + factor) return -dP return -dPf ls = np.linspace(0, .01) dP_noacc = odeint(dP_dz, s.P, ls, args=(False,))[-1] s.flash(P=float(P0), Hm=s.Hm) # Reset the stream object profile = odeint(dP_dz, s.P, ls, args=(True,)) dP_acc = profile[-1] s.flash(P=dP_acc, Hm=s.Hm) rho1 = s.rho dP_acc_numerical = dP_noacc - dP_acc dP_acc_basic = one_phase_dP_acceleration(m=s.m, D=D, rho_o=rho1, rho_i=rho0) assert_allclose(dP_acc_basic, dP_acc_numerical, rtol=1E-4) def test_transmission_factor(): assert_allclose(transmission_factor(fd=0.0185), 14.704292441876154) assert_allclose(transmission_factor(F=14.704292441876154), 0.0185) assert_allclose(transmission_factor(0.0185), 14.704292441876154) # Example in [1]_, lists answer as 12.65 assert_allclose(transmission_factor(fd=0.025), 12.649110640673516) with pytest.raises(Exception): transmission_factor() def test_roughness_Farshad(): e = roughness_Farshad('Cr13, bare', 0.05) assert_allclose(e, 5.3141677781137006e-05) e = roughness_Farshad('Cr13, bare') assert_allclose(e, 5.5e-05) e = roughness_Farshad(coeffs=(0.0021, -1.0055), D=0.05) assert_allclose(e, 5.3141677781137006e-05) tot = sum([abs(j) for i in _Farshad_roughness.values() for j in i]) assert_allclose(tot, 7.0729095) with pytest.raises(Exception): roughness_Farshad('BADID', 0.05) def test_nearest_material_roughness(): hit1 = nearest_material_roughness('condensate pipes', clean=False) assert hit1 == 'Seamless steel tubes, Condensate pipes in open systems or periodically operated steam pipelines' hit2 = nearest_material_roughness('Plastic', clean=True) assert hit2 == 'Plastic coated' def test_material_roughness(): e1 = material_roughness('Plastic coated') assert_allclose(e1, 5e-06) e2 = material_roughness('Plastic coated', D=1E-3) assert_allclose(e2, 5.243618447826409e-06) e3 = material_roughness('Brass') assert_allclose(e3, 1.52e-06) e4 = material_roughness('condensate pipes')
assert_allclose(e4, 0.0005)
numpy.testing.assert_allclose
import pandapower as pp from pandapower.grid_equivalents.auxiliary import calc_zpbn_parameters, \ check_validity_of_Ybus_eq, drop_internal_branch_elements, \ build_ppc_and_Ybus, drop_measurements_and_controller, \ drop_and_edit_cost_functions from copy import deepcopy import pandas as pd import numpy as np import operator import time import uuid from functools import reduce try: import pandaplan.core.pplog as logging except ImportError: import logging logger = logging.getLogger(__name__) def _runpp_except_voltage_angles(net, **kwargs): if "calculate_voltage_angles" not in kwargs or not kwargs["calculate_voltage_angles"]: pp.runpp(net, **kwargs) else: try: pp.runpp(net, **kwargs) except pp.LoadflowNotConverged: kwargs1 = deepcopy(kwargs) kwargs1["calculate_voltage_angles"] = False pp.runpp(net, **kwargs1) logger.warning("In REI generation, the power flow did converge only without " "calculate_voltage_angles.") def _calculate_equivalent_Ybus(net_zpbn, bus_lookups, eq_type, show_computing_time=False, check_validity=False): """ The function orders the admittance matrix of the original network into new format firstly, which is convenient for rei equivalent calculation.d Then it calculates the equivalent admittance matrix of the given network i: internal b: boundary e: external g: ground t: total Ymat_trans = [ Ybus_ii, Ybus_ib, 0 ] [Ybus_ii, Ybus_ib, 0 , 0 , 0 ] [ Ybus_bi, Ybus_bb, Ybus_be ] = _________________ _________________ [ 0 , Ybus_eb, Ybus_ee ] [Ybus_bi, |Ybus_bb , 0 |, | 0 , Ybus_be| ] | | | | [ 0 , | 0 ,Ybus_tt|, |Ybus_tg, 0 | ] |________________| |________________| _________________ _________________ [ 0 , | 0 ,Ybus_gt|, |Ybus_gg, Ybus_ge| ] [ 0 , |Ybus_eb , 0 |, |Ybus_eg, Ybus_ee| ] |________________| |________________| INPUT: **net_zpbn** - zero power balance network (pandapower network) **bus_lookups** (dict) - bus lookups **eq_type** (str) - the equavalten type OPTIONAL: **check_validity** (bool, False) - XXXX OUTPUT: **Ybus** - equivalent admittance matrix of the external network """ t_start = time.perf_counter() # --- initialization Ybus_origin = net_zpbn._ppc["internal"]["Ybus"].todense() Ybus_sorted = net_zpbn._ppc["internal"]["Ybus"].todense() bus_lookup_ppc = bus_lookups["bus_lookup_ppc"] nb_dict = {} for key in bus_lookup_ppc.keys(): if key != "b_area_buses_no_switch": nb_dict["nb_"+key.split("_")[0]] = len(bus_lookup_ppc[key]) Ybus_buses = list(bus_lookup_ppc.values()) Ybus_new_sequence = reduce(operator.concat, Ybus_buses) # --- transform Ybus_origin to Ybus_new according to the Ybus_new_sequence for i in range(len(Ybus_new_sequence)): for j in range(len(Ybus_new_sequence)): # --- if xward, put very large admittance at the diagonals (PV-bus) of Ybus if eq_type == "xward" and i >= nb_dict["nb_i"]+nb_dict["nb_b"] and \ i == j and Ybus_new_sequence[i] in net_zpbn._ppc["gen"][:, 0]: Ybus_sorted[i, j] = 1e8 else: Ybus_sorted[i, j] = Ybus_origin[Ybus_new_sequence[i], Ybus_new_sequence[j]] # --- calculate calculate equivalent Ybus and equivalent Ybus without_internals Ybus_bb = Ybus_sorted[nb_dict["nb_i"]:(nb_dict["nb_i"] + nb_dict["nb_b"] + nb_dict["nb_t"]), nb_dict["nb_i"]:(nb_dict["nb_i"] + nb_dict["nb_b"] + nb_dict["nb_t"])] Ybus_ee = Ybus_sorted[-(nb_dict["nb_e"] + nb_dict["nb_g"]):, -(nb_dict["nb_e"] + nb_dict["nb_g"]):] Ybus_eb = Ybus_sorted[-(nb_dict["nb_e"] + nb_dict["nb_g"]):, nb_dict["nb_i"]:(nb_dict["nb_i"] + nb_dict["nb_b"] + nb_dict["nb_t"])] Ybus_be = Ybus_eb.T try: inverse_Ybus_ee = np.linalg.inv(Ybus_ee) except np.linalg.LinAlgError as err: if 'Singular matrix' in str(err): logger.debug("Ymat_ee is a singular martix, now try to compute the \ pseudo-inverse of the matrix.") inverse_Ybus_ee = np.linalg.pinv(Ybus_ee) Ybus_eq_boundary = Ybus_bb - (Ybus_be * inverse_Ybus_ee * Ybus_eb) Ybus_eq = np.copy(Ybus_sorted[0: nb_dict["nb_i"] + nb_dict["nb_b"] + nb_dict["nb_t"], 0: nb_dict["nb_i"] + nb_dict["nb_b"] + nb_dict["nb_t"]]) Ybus_eq[-(nb_dict["nb_b"] + nb_dict["nb_t"]):, -(nb_dict["nb_b"] + nb_dict["nb_t"]):] = Ybus_eq_boundary # --- the validity of the equivalent Ybus will be checked if check_validity: power_check_df = check_validity_of_Ybus_eq(net_zpbn, Ybus_eq, bus_lookups) logger.debug(power_check_df) act_p = net_zpbn.res_ext_grid.p_mw[power_check_df.ext_grid_index].values act_q = net_zpbn.res_ext_grid.q_mvar[power_check_df.ext_grid_index].values real_p = power_check_df.power.values.real real_q = power_check_df.power.values.imag assert abs(max(act_p - real_p)) < 1e-3 assert abs(max(act_q - real_q)) < 1e-3 t_end = time.perf_counter() if show_computing_time: logger.info("\"calculate_equivalent_Ybus\" finished in %s seconds:" % round(( t_end-t_start), 2)) return Ybus_eq def adapt_impedance_params(Z, sign=1, adaption=1e-15): """ In some extreme cases, the created admittance matrix of the zpbn network is singular. The routine is unsolvalbe with it. In response, an impedance adaption is created and added. """ rft_pu = Z.real + sign*adaption xft_pu = Z.imag + sign*adaption return rft_pu, xft_pu def _create_net_zpbn(net, boundary_buses, all_internal_buses, all_external_buses, load_separate=False, sgen_separate=True, gen_separate=True, show_computing_time=False, calc_volt_angles=True, **kwargs): """ The function builds the zero power balance network with calculated impedance and voltage INPUT: **net** - pandapower network **boundary_buses** (list) - boundary buses **all_internal_buses** - all the internal buses **all_external_buses** - all the external buses OPTIONAL: **load_separate** (bool, False) - flag if all the loads are reserved integrally **sgen_separate** (bool, True) - flag if all the DER are reserved separately **gen_separate** (bool, True) - flag if all the gens are reserved separately **tolerance_mva** (float, 1e-3) - loadflow termination condition referring to P / Q mismatch of node power in MVA. The loalflow hier is to get the admittance matrix of the zpbn network OUTPUT: **net_zpbn** - zero power balance networks """ net_internal, net_external = _get_internal_and_external_nets( net, boundary_buses, all_internal_buses, all_external_buses, show_computing_time, calc_volt_angles=calc_volt_angles) net_zpbn = net_external Z, S, v, limits = calc_zpbn_parameters(net_zpbn, boundary_buses, all_external_buses) # --- remove the original load, sgen and gen in exteranl area, # and creat new buses and impedance t_buses, g_buses = [], [] sn_mva = net_zpbn.sn_mva for elm, separate in [("load", load_separate), ("sgen", sgen_separate), ("gen", gen_separate), ("ext_grid", False)]: # in Z columns only gen, load and sgens are considered, so we can leave out ext_grid net_zpbn[elm].drop(net_zpbn[elm].index[net_zpbn[elm].bus.isin(all_external_buses)], inplace=True) if elm == "ext_grid": continue if not
np.isnan(Z[elm+"_ground"].values)
numpy.isnan
from wordcloud import WordCloud, get_single_color_func, ImageColorGenerator import numpy as np import pytest from random import Random from numpy.testing import assert_array_equal from PIL import Image import xml.etree.ElementTree as ET import matplotlib matplotlib.use('Agg') THIS = """The Zen of Python, by <NAME> Beautiful is better than ugly. Explicit is better than implicit. Simple is better than complex. Complex is better than complicated. Flat is better than nested. Sparse is better than dense. Readability counts. Special cases aren't special enough to break the rules. Although practicality beats purity. Errors should never pass silently. Unless explicitly silenced. In the face of ambiguity, refuse the temptation to guess. There should be one-- and preferably only one --obvious way to do it. Although that way may not be obvious at first unless you're Dutch. Now is better than never. Although never is often better than *right* now. If the implementation is hard to explain, it's a bad idea. If the implementation is easy to explain, it may be a good idea. Namespaces are one honking great idea -- let's do more of those! 3 . 14 15 92 65 35 89 79 32 38 46 26 433 83 27 95 02 88 41 97 16 93 99 37 510 58 20 97 49 44 59 23 07 81 64 06 286 20 89 98 62 80 34 82 53 42 11 70 679 82 14 80 86 51 32 82 30 66 47 09 384 46 09 55 05 82 23 17 25 35 94 08 128 """ STOPWORDED_COLLOCATIONS = """ thank you very much thank you very much thank you very much thanks """ SMALL_CANVAS = """ better late than never someone will say """ def test_collocations(): wc = WordCloud(collocations=False, stopwords=set()) wc.generate(THIS) wc2 = WordCloud(collocations=True, stopwords=set()) wc2.generate(THIS) assert "is better" in wc2.words_ assert "is better" not in wc.words_ assert "way may" not in wc2.words_ def test_collocation_stopwords(): wc = WordCloud(collocations=True, stopwords={"you", "very"}, collocation_threshold=9) wc.generate(STOPWORDED_COLLOCATIONS) assert "thank you" in wc.words_ assert "very much" in wc.words_ # "thank" will have been removed in favor of the bigrams including "thank" assert "thank" not in wc.words_ # a bigram of all stopwords will be removed assert "you very" not in wc.words_ def test_plurals_numbers(): text = THIS + "\n" + "1 idea 2 ideas three ideas although many Ideas" wc = WordCloud(stopwords=[]).generate(text) # not capitalized usually assert "Ideas" not in wc.words_ # plural removed assert "ideas" not in wc.words_ # usually capitalized assert "although" not in wc.words_ assert "idea" in wc.words_ assert "Although" in wc.words_ assert "better than" in wc.words_ def test_multiple_s(): text = 'flo flos floss flosss' wc = WordCloud(stopwords=[]).generate(text) assert "flo" in wc.words_ assert "flos" not in wc.words_ assert "floss" in wc.words_ assert "flosss" in wc.words_ # not normalizing means that the one with just one s is kept wc = WordCloud(stopwords=[], normalize_plurals=False).generate(text) assert "flo" in wc.words_ assert "flos" in wc.words_ assert "floss" in wc.words_ assert "flosss" in wc.words_ def test_empty_text(): # test originally empty text raises an exception wc = WordCloud(stopwords=[]) with pytest.raises(ValueError): wc.generate('') # test empty-after-filtering text raises an exception wc = WordCloud(stopwords=['a', 'b']) with pytest.raises(ValueError): wc.generate('a b a') def test_default(): # test that default word cloud creation and conversions work wc = WordCloud(max_words=50) wc.generate(THIS) # check for proper word extraction assert len(wc.words_) == wc.max_words # check that we got enough words assert len(wc.layout_) == wc.max_words # check image export wc_image = wc.to_image() assert wc_image.size == (wc.width, wc.height) # check that numpy conversion works wc_array = np.array(wc) assert_array_equal(wc_array, wc.to_array()) # check size assert wc_array.shape == (wc.height, wc.width, 3) def test_stopwords_lowercasing(): # test that capitalized stopwords work. wc = WordCloud(stopwords=["Beautiful"]) processed = wc.process_text(THIS) words = [count[0] for count in processed] assert "Beautiful" not in words def test_writing_to_file(tmpdir): wc = WordCloud() wc.generate(THIS) # check writing to file filename = str(tmpdir.join("word_cloud.png")) wc.to_file(filename) loaded_image = Image.open(filename) assert loaded_image.size == (wc.width, wc.height) def test_check_errors(): wc = WordCloud() with pytest.raises(NotImplementedError): wc.to_html() try: np.array(wc) raise AssertionError("np.array(wc) didn't raise") except ValueError as e: assert "call generate" in str(e) try: wc.recolor() raise AssertionError("wc.recolor didn't raise") except ValueError as e: assert "call generate" in str(e) def test_svg_syntax(): wc = WordCloud() wc.generate(THIS) svg = wc.to_svg() ET.fromstring(svg) def test_recolor(): wc = WordCloud(max_words=50, colormap="jet") wc.generate(THIS) array_before = wc.to_array() wc.recolor() array_after = wc.to_array() # check that the same places are filled assert_array_equal(array_before.sum(axis=-1) != 0, array_after.sum(axis=-1) != 0) # check that they are not the same assert np.abs(array_before - array_after).sum() > 10000 # check that recoloring is deterministic wc.recolor(random_state=10) wc_again = wc.to_array() assert_array_equal(wc_again, wc.recolor(random_state=10)) def test_random_state(): # check that random state makes everything deterministic wc = WordCloud(random_state=0) wc2 = WordCloud(random_state=0) wc.generate(THIS) wc2.generate(THIS) assert_array_equal(wc, wc2) def test_mask(): # test masks # check that using an empty mask is equivalent to not using a mask wc = WordCloud(random_state=42) wc.generate(THIS) mask = np.zeros(np.array(wc).shape[:2], dtype=np.int) wc_mask = WordCloud(mask=mask, random_state=42) wc_mask.generate(THIS) assert_array_equal(wc, wc_mask) # use actual nonzero mask mask = np.zeros((234, 456), dtype=np.int) mask[100:150, 300:400] = 255 wc = WordCloud(mask=mask) wc.generate(THIS) wc_array =
np.array(wc)
numpy.array
import numpy as np import pytest from landlab import FieldError, RasterModelGrid from landlab.components import FlowAccumulator, FlowDirectorSteepest from landlab.utils.distance_to_divide import calculate_distance_to_divide def test_no_flow_receivers(): """Test that correct error is raised when no flow recievers are on the grid.""" mg = RasterModelGrid((30, 70)) with pytest.raises(FieldError): calculate_distance_to_divide(mg) def test_no_upstream_array(): """Test that correct error is raised when no flow__upstream_node_order.""" mg = RasterModelGrid((30, 70)) mg.add_ones("topographic__elevation", at="node") mg.add_ones("drainage_area", at="node") fd = FlowDirectorSteepest(mg) fd.run_one_step() with pytest.raises(FieldError): calculate_distance_to_divide(mg) def test_drainage_area(): """Test that correct error is raised when no flow__upstream_node_order.""" mg = RasterModelGrid((30, 70)) mg.add_ones("topographic__elevation", at="node") mg.add_ones("flow__upstream_node_order", at="node") fd = FlowDirectorSteepest(mg) fd.run_one_step() with pytest.raises(FieldError): calculate_distance_to_divide(mg) @pytest.mark.parametrize("flow_dir", ["D8", "D4", "MFD"]) def test_simple_case_same(flow_dir): mg = RasterModelGrid((20, 5)) z = mg.add_zeros("topographic__elevation", at="node") z += mg.y_of_node mg.set_closed_boundaries_at_grid_edges( bottom_is_closed=False, left_is_closed=True, right_is_closed=True, top_is_closed=True, ) fa = FlowAccumulator(mg, flow_director=flow_dir) fa.run_one_step() dist1 = calculate_distance_to_divide(mg, longest_path=True) dist2 = calculate_distance_to_divide(mg, longest_path=False)
np.testing.assert_array_equal(dist1, dist2)
numpy.testing.assert_array_equal
#!/usr/bin/env python # coding: utf-8 from typing import Tuple import numpy as np import PathReducer.calculate_rmsd as rmsd import pandas as pd import math import glob import os import sys import ntpath import MDAnalysis as mda import PathReducer.plotting_functions as plotting_functions from periodictable import * from sklearn import * from sympy import solve, Symbol def path_leaf(path): head, tail = ntpath.split(path) return tail or ntpath.basename(head) def read_traj_file(*args, **kwargs) -> Tuple[str, np.ndarray, np.ndarray]: """ Reads in a trajectory using MDAnalysis' Universe class, documentation and information on parameters found here: (https://www.mdanalysis.org/docs/documentation_pages/core/universe.html#MDAnalysis.core.universe.Universe). A topology file is always required, however there are multiple ways of setting up a universe for a trajectory. Examples include: u = Universe(topology, trajectory) # read system from file(s) u = Universe(pdbfile) # read atoms and coordinates from PDB or GRO u = Universe(topology, [traj1, traj2, ...]) # read from a list of trajectories u = Universe(topology, traj1, traj2, ...) # read from multiple trajectories The trajectory being read in should be already pruned (of explicit solvent, backbone residues, and anything that you don't want PCA to capture. The function then returns a numpy array of all of the atom types of the system, and a numpy array of the Cartesian coordinates of each atom for every frame. :param topology: str (.pdb, .top, .gro etc) :param coordinates: str (.dcd, .nc, .xyz etc) :return extensionless_system_name atom_list cartesians """ u = mda.Universe(*args, **kwargs) system_name = path_leaf(u.filename) extensionless_system_name = os.path.splitext(system_name)[0] n_frames = len(u.trajectory) n_atoms = len(u.atoms) cartesians = np.ndarray((n_frames, n_atoms, 3)) try: atom_list = u.atoms.elements except AttributeError: atom_list = u.atoms.types for frame_index, ts in enumerate(u.trajectory): cartesians[frame_index] = ts.positions return extensionless_system_name, atom_list, cartesians def read_xyz_file(path): """ Reads in an xyz file from path as a DataFrame. This DataFrame is then turned into a 3D array such that the dimensions are (number of points) X (number of atoms) X 3 (Cartesian coordinates). The system name (based on the filename), list of atoms in the system, and Cartesian coordinates are output. :param path: path to xyz file to be read :return extensionless_system_name: str atom_list: numpy array cartesians: numpy array """ system_name = path_leaf(path) print("File being read is: %s" % system_name) extensionless_system_name = os.path.splitext(system_name)[0] data = pd.read_csv(path, header=None, delim_whitespace=True, names=['atom', 'X', 'Y', 'Z']) n_atoms = int(data.loc[0][0]) n_lines_per_frame = int(n_atoms + 2) data_array = np.array(data) data_reshape = np.reshape(data_array, (int(data_array.shape[0]/n_lines_per_frame), n_lines_per_frame, data_array.shape[1])) cartesians = data_reshape[:, 2::, 1::].astype(np.float) atom_list = data_reshape[0, 2::, 0] return extensionless_system_name, atom_list, cartesians def remove_atoms_by_type(atom_types_to_remove, atom_list, cartesians): """ Removes specific atoms if they are not wanted for PCA :param atom_list: list of atoms in the structure :param cartesians: cartesian coordinates of each frame :return: cartesian coordinates of each frame with specific atom types removed """ matches_indexes = [i for i, x in enumerate(atom_list) if x in atom_types_to_remove] cartesians_sans_atoms = np.delete(cartesians, list(matches_indexes), axis=1) atom_list_sans_atoms = np.delete(atom_list, list(matches_indexes), axis=0) return atom_list_sans_atoms, cartesians_sans_atoms def calculate_velocities(cartesians, timestep=1): """ Calculate velocities at each timestep given Cartesian coordinates. Velocities at the first and last point are extrapolated. :param cartesians: Cartesian coordinates along trajectory :param timestep: time step between frames in units of fs, default=1 :return: velocities """ velocities = [] for i in range(0, len(cartesians)): if i == 0: velocity = (cartesians[i + 1] - cartesians[i]) / timestep elif i == len(cartesians) - 1: velocity = (cartesians[i] - cartesians[i - 1]) / timestep else: velocity = (cartesians[i + 1] - cartesians[i - 1]) / 2 * timestep velocities.append(velocity) return velocities def calculate_momenta(velocities, atoms): """ :param cartesians: Cartesian coordinates along trajectory :param timestep: time step between frames in units of fs, default=1 :return: velocities """ velocities = np.array(velocities) atoms = np.array(atoms) atom_masses = np.array([formula(atom).mass for atom in atoms]) momenta = velocities * atom_masses[np.newaxis, :, np.newaxis] return momenta def set_atom_one_to_origin(coordinates): coordinates_shifted = coordinates - coordinates[:, np.newaxis, 0] return coordinates_shifted def mass_weighting(atoms, cartesians): cartesians = np.array(cartesians) atoms = np.array(atoms) atom_masses = [formula(atom).mass for atom in atoms] weighting = np.sqrt(atom_masses) mass_weighted_cartesians = cartesians * weighting[np.newaxis, :, np.newaxis] return mass_weighted_cartesians def remove_mass_weighting(atoms, coordinates): coordinates = np.array(coordinates) atoms = np.array(atoms) atom_masses = [formula(atom).mass for atom in atoms] weighting = np.sqrt(atom_masses) unmass_weighted_coords = coordinates / weighting[np.newaxis, :, np.newaxis] return unmass_weighted_coords def generate_distance_matrices(coordinates): """ Generates distance matrices for each structure. """ coordinates = np.array(coordinates) d2 = np.sum((coordinates[:, :, None] - coordinates[:, None, :]) ** 2, axis=3) return d2 def generate_dihedral_matrices(coordinates): return coordinates def generate_and_reshape_ds_big_structures(coordinates): """ Generates matrix of pairwise distances, which includes pairwise distances for each structure. :param coordinates: """ coordinates = np.array(coordinates) atoms = int(coordinates.shape[1]) d_re = np.zeros((coordinates.shape[0], int(atoms * (atoms - 1) / 2))) for i in range(coordinates.shape[0]): d2 = np.square(metrics.pairwise.euclidean_distances(coordinates[i])) x = d2[0].shape[0] dint_re = d2[np.triu_indices(x, k=1)] d_re[i] = dint_re return d_re def reshape_ds(d): """ Takes only the upper triangle of the distance matrices and reshapes them into 1D arrays. """ d_re = [] x = d[0][0].shape[0] for dint in d: dint_re = dint[np.triu_indices(x, k=1)] d_re.append(dint_re) d_re = np.asarray(d_re) return d_re def vector_to_matrix(v): """ Converts a representation from 1D vector to 2D square matrix. Slightly altered from rmsd package to disregard zeroes along diagonal of matrix. :param v: 1D input representation. :type v: numpy array :return: Square matrix representation. :rtype: numpy array """ if not (np.sqrt(8 * v.shape[0] + 1) == int(np.sqrt(8 * v.shape[0] + 1))): print("ERROR: Can not make a square matrix.") exit(1) n = v.shape[0] w = ((-1 + int(np.sqrt(8 * n + 1))) // 2) + 1 m = np.zeros((w, w)) index = 0 for i in range(w): for j in range(w): if i > j - 1: continue m[i, j] = v[index] m[j, i] = m[i, j] index += 1 return m def distance_matrix_to_coords(v): """ Converts a (2D square) distance matrix representation of a structure to Cartesian coordinates (first 3 columns correspond to 3D xyz coordinates) via a Gram matrix. :param v: 1D vector, numpy array :return: 3D Cartesian coordinates, numpy array """ d = vector_to_matrix(v) d_one = np.reshape(d[:, 0], (d.shape[0], 1)) m = (-0.5) * (d - np.matmul(np.ones((d.shape[0], 1)), np.transpose(d_one)) - np.matmul(d_one, np.ones((1, d.shape[0])))) values, vectors = np.linalg.eig(m) idx = values.argsort()[::-1] values = values[idx] vectors = vectors[:, idx] assert np.allclose(np.dot(m, vectors), values * vectors) coords = np.dot(vectors, np.diag(
np.sqrt(values)
numpy.sqrt
import numpy as np def get_augmentations_from_list(str_list, upright_axis=2): ''' :param str_list: List of string indicating the augmentation type :param upright_axis: Set to 1 for modelnet (i.e. y-axis is vertical axis), but 2 otherwise (i.e. z-axis) :return: ''' if str_list is None: return [] augmentations = [] if 'Rotate1D' in str_list: if upright_axis == 1: augmentations.append(RotateY()) elif upright_axis == 2: augmentations.append(RotateZ()) if 'Jitter' in str_list: augmentations.append(Jitter()) if 'Scale' in str_list: augmentations.append(Scale()) if 'RotateSmall' in str_list: augmentations.append(RotateSmall()) if 'Shift' in str_list: augmentations.append(Shift()) return augmentations class Augmentation(object): def apply(self, data): raise NotImplementedError class Jitter(Augmentation): ''' Applies a small jitter to the position of each point ''' def __init__(self, sigma=0.01, clip=0.05): self.sigma = sigma self.clip = clip def apply(self, data): assert (self.clip > 0) jittered_data = np.clip(self.sigma * np.random.randn(*data.shape), -1 * self.clip, self.clip) jittered_data += data return jittered_data class Shift(Augmentation): def __init__(self, shift_range=0.1): self.shift_range = shift_range def apply(self, data): shift = np.random.uniform(-self.shift_range, self.shift_range, 3) data += shift return data class RotateZ(Augmentation): ''' Rotation perturbation around Z-axis. ''' def apply(self, data): rotation_angle = np.random.uniform() * 2 * np.pi cosval = np.cos(rotation_angle) sinval = np.sin(rotation_angle) rotation_matrix = np.array([[cosval, sinval, 0], [-sinval, cosval, 0], [0, 0, 1]]) rotated_data = np.dot(data, rotation_matrix) return rotated_data class RotateY(Augmentation): ''' Rotation perturbation around Y-axis. ''' def apply(self, data): rotation_angle = np.random.uniform() * 2 * np.pi cosval = np.cos(rotation_angle) sinval = np.sin(rotation_angle) rotation_matrix = np.array([[cosval, 0, sinval], [0, 1, 0], [-sinval, 0, cosval]]) rotated_data = np.dot(data, rotation_matrix) return rotated_data class RotateSmall(Augmentation): ''' Applies a small rotation perturbation around all axes ''' def __init__(self, angle_sigma=0.06, angle_clip=0.18): self.angle_sigma = angle_sigma self.angle_clip = angle_clip def apply(self, data): angles = np.clip(self.angle_sigma * np.random.randn(3), -self.angle_clip, self.angle_clip) Rx = np.array([[1, 0, 0], [0, np.cos(angles[0]), -np.sin(angles[0])], [0, np.sin(angles[0]), np.cos(angles[0])]]) Ry = np.array([[np.cos(angles[1]), 0, np.sin(angles[1])], [0, 1, 0], [-np.sin(angles[1]), 0, np.cos(angles[1])]]) Rz = np.array([[np.cos(angles[2]), -np.sin(angles[2]), 0], [np.sin(angles[2]),
np.cos(angles[2])
numpy.cos
import os, sys from astropy.io import fits import astropy.io.ascii as at import numpy as np from scipy.interpolate import interp1d import matplotlib matplotlib.use("AGG") import matplotlib.pyplot as plt # blaze functions are different on each chip # 1-120 on one, 121-240 on the other (except 0 indexed instead) def read_blaze(filename): with fits.open(filename) as hdu: wave = hdu[1].data["WAVE"]*10 blaze = hdu[1].data["BLAZE"] return wave, blaze def make_blaze(filename): ww, bb = read_blaze(filename) blaze = interp1d(ww,bb,kind="linear",bounds_error=False,fill_value=np.nan, assume_sorted=False) print(min(ww),max(ww)) return blaze blaze1 = make_blaze("/n/home13/stdouglas/data/Hectochelle/master_blaze_det1_smooth.fits") blaze2 = make_blaze("/n/home13/stdouglas/data/Hectochelle/master_blaze_det2_smooth.fits") def read_chelle(filename,wave_ext=0,flux_ext=1,err_ext=2,idl=True): if os.path.exists(filename): wave = np.array(fits.getdata(filename,0)) flux_raw = np.array(fits.getdata(filename,1)) inv_err = np.array(fits.getdata(filename,2)) err_raw = 1/np.sqrt(inv_err) mask = np.array(fits.getdata(filename,3)) obj_data = fits.getdata(filename,5) else: print(filename,"NOT FOUND") return None, None, None # remove masked pixels (AND mask) flux_raw[mask] = np.nan # sigma clip outliers # TODO: vectorize! std = np.std(flux_raw,axis=1) fmed_raw = np.nanmedian(flux_raw, axis=1) outlier_mask = np.zeros_like(flux_raw,dtype=bool) for i,fs in enumerate(flux_raw): outlier_mask[i] = abs(fs-fmed_raw[i])>(std[i]*5) #print(len(np.where(outlier_mask[i])[0])) flux_raw[outlier_mask] = np.nan # TODO: divide by nanmedian before dividing by blaze fmed = np.nanmedian(flux_raw,axis=1) #print(fmed) #print(fmed.reshape(240,-1)) flux0 = np.divide(flux_raw,fmed.reshape(240,-1)) err0 = np.divide(err_raw,fmed.reshape(240,-1)) obj_id = obj_data["OBJTYPE"] realspec = np.ones(240) realspec[obj_data["OBJTYPE"]=="UNUSED"] = False realspec[obj_data["OBJTYPE"]=="SKY"] = False realspec[np.all(np.isnan(flux_raw),axis=1)] = False for i in np.where(realspec)[0]: if "REJECT" in obj_data["OBJTYPE"][i]: realspec[i] = False fblaze1 = blaze1(wave[:120]) fblaze2 = blaze2(wave[120:]) flux =
np.ones_like(flux0)
numpy.ones_like
""" original code from https://github.com/dhanajitb/GAIN-Pytorch original paper Pytorch implementation of the paper GAIN: Missing Data Imputation using Generative Adversarial Nets by <NAME>, <NAME>, <NAME> modified by K.H. Implemented as an imputer class """ # %% Packages import torch import numpy as np import torch.nn.functional as F from tqdm.notebook import tqdm_notebook as tqdm import torch.nn as nn class GAIN: def __init__(self, use_gpu=True, mb_size=16, train_rate=0.95, iteration=5000, p_hint=0.9, alpha=10, batch_norm=False ): self.train_rate = train_rate self.iteration = iteration self.p_hint = p_hint self.use_gpu = use_gpu self.alpha = alpha self.mb_size = mb_size # batch normalization: useless for most regression tasks self.batch_norm = batch_norm def fit_transform(self, dat_list): self._set_masking_array(dat_list) self._split() if self.batch_norm: self.bn = nn.BatchNorm1d(self.Dim) if self.use_gpu: self.bn = self.bn.cuda().double() self._init_theta() self.train() reconst_array = self._transform() final_reconst_array = (self.Data*self.Missing) + \ (reconst_array*(1-self.Missing))[0] return final_reconst_array # prepare masking array def _set_masking_array(self, dat_list): # prepare masking array inp = np.array(dat_list) missing = np.array(inp) missing[np.where(missing == missing)] = 1 missing[np.where(missing != missing)] = 0 inp[np.where(inp != inp)] = 0 self.Data = inp self.Missing = missing # split database def _split(self): self.No = len(self.Data) self.Dim = len(self.Data[0, :]) self.H_Dim1 = self.Dim self.H_Dim2 = self.Dim self.idx = np.random.permutation(self.No) self.Train_No = int(self.No * self.train_rate) self.Test_No = self.No - self.Train_No # Train / Test Features self.trainX = self.Data[self.idx[:self.Train_No], :] self.testX = self.Data[self.idx[self.Train_No:], :] # Train / Test Missing Indicators self.trainM = self.Missing[self.idx[:self.Train_No], :] self.testM = self.Missing[self.idx[self.Train_No:], :] def _init_theta(self): Dim = self.Dim # %% 1. Discriminator if self.use_gpu is True: self.D_W1 = torch.tensor(xavier_init( [Dim*2, self.H_Dim1]), requires_grad=True, device="cuda") # Data + Hint as inputs self.D_b1 = torch.tensor( np.zeros(shape=[self.H_Dim1]), requires_grad=True, device="cuda") self.D_W2 = torch.tensor(xavier_init( [self.H_Dim1, self.H_Dim2]), requires_grad=True, device="cuda") self.D_b2 = torch.tensor( np.zeros(shape=[self.H_Dim2]), requires_grad=True, device="cuda") self.D_W3 = torch.tensor(xavier_init( [self.H_Dim2, Dim]), requires_grad=True, device="cuda") # Output is multi-variate self.D_b3 = torch.tensor( np.zeros(shape=[Dim]), requires_grad=True, device="cuda") else: # Data + Hint as inputs self.D_W1 = torch.tensor(xavier_init( [Dim*2, self.H_Dim1]), requires_grad=True) self.D_b1 = torch.tensor(
np.zeros(shape=[self.H_Dim1])
numpy.zeros
import cv2 import glob import os import numpy as np import veloster_config as cnf from veloster_data_utils import get_filtered_lidar from veloster_bev_utils import makeBEVMap, makeBEVMap_binary prev_boundary = { "minX": 0., "maxX": 40., "minY": -20., "maxY": 20., "minZ": -0.50, "maxZ": 3.50 } prev_bound_size_x = prev_boundary['maxX'] - prev_boundary['minX'] prev_bound_size_y = prev_boundary['maxY'] - prev_boundary['minY'] prev_bound_size_z = prev_boundary['maxZ'] - prev_boundary['minZ'] prev_boundary_back = { "minX": -40., "maxX": 0., "minY": -20., "maxY": 20., "minZ": -0.50, "maxZ": 3.50 } prev_BEV_WIDTH = 416 # across y axis -25m ~ 25m prev_BEV_HEIGHT = 416 # across x axis 0m ~ 50m prev_DISCRETIZATION = (prev_boundary["maxX"] - prev_boundary["minX"]) / float(prev_BEV_HEIGHT) # input: [cls_id, cx, cy, w, h, angle] # output: [cls_id, x, y, z, height, width, length, ry] def get_3dlabel_from_2dlabel(labels): object_labels = [] for label in labels: [cls_id, cx, cy, w, h, angle] = label x = (prev_BEV_HEIGHT-cy)*prev_DISCRETIZATION y = -(cx - float(prev_BEV_HEIGHT)/2.)*prev_DISCRETIZATION if cls_id == 0 or cls_id == 2: # Pedestrian, Cyclist height = 1.73 else: # Car height = 1.56 z = height / 2. width = w*prev_DISCRETIZATION length = h*prev_DISCRETIZATION ry = -angle object_label = [cls_id, x, y, z, height, width, length, ry] object_labels.append(object_label) return object_labels # input: [cls_id, x, y, z, height, width, length, ry] # output: [cls_id, cx, cy, w, h, angle] def get_2dlabel_from_3dlabel(labels): img_labels = [] for label in labels: [cls_id, x, y, z, h, w, l, yaw] = label pixel_x = -y/cnf.DISCRETIZATION + cnf.BEV_WIDTH/2. pixel_y = cnf.BEV_HEIGHT - x/cnf.DISCRETIZATION pixel_w = w*cnf.BEV_WIDTH/cnf.bound_size_y pixel_h = l*cnf.BEV_HEIGHT/cnf.bound_size_x pixel_yaw = -yaw img_label = [cls_id, pixel_x, pixel_y, pixel_w, pixel_h, pixel_yaw] img_labels.append(img_label) return img_labels def get_bev_img(lidar_file): gen_numpy =
np.load(lidar_file)
numpy.load
import numpy as np from baselines.deepq.experiments.atari.knn_cuda_fixmem import knn as knn_cuda_fixmem import copy import logging # each action -> a lru_knn buffer # alpha is for updating the internal reward i.e. count based reward class LRU_KNN_GPU_PS_DENSITY(object): def __init__(self, capacity, z_dim, env_name, action, num_actions=6, knn=4, debug=True, gamma=0.99, alpha=0.1, beta=0.01): self.action = action self.alpha = alpha self.beta = beta self.env_name = env_name self.capacity = capacity self.num_actions = num_actions self.rmax = 100000 self.states = np.empty((capacity, z_dim), dtype=np.float32) # self.hash_table = np.empty((capacity, z_dim), dtype=np.float32) # self.hashes = {} self.knn_mean_dist = np.full((capacity,), 0) self.external_value = np.full((capacity, num_actions), np.nan) self.state_value_v = np.full((capacity,), np.nan) self.state_value_u = np.full((capacity,), np.nan) self.reward = np.zeros((capacity, num_actions)) self.done = np.zeros((capacity, num_actions), dtype=np.bool) self.newly_added = np.ones((capacity,), dtype=np.bool) self.internal_value = self.rmax * np.ones((capacity, num_actions)) self.prev_id = [[] for _ in range(capacity)] self.next_id = [[{} for __ in range(num_actions)] for _ in range(capacity)] self.pseudo_count = [[{} for __ in range(num_actions)] for _ in range(capacity)] self.pseudo_reward = np.zeros((capacity, num_actions)) self.pseudo_prev = [{} for _ in range(capacity)] self.debug = debug self.count = np.zeros((capacity, num_actions)) self.lru = np.zeros(capacity) self.density = self.rmax *
np.ones(capacity)
numpy.ones
import numpy as np from mpi4py import MPI from scipy.stats import norm def corr_matrix(X,theta,p): ''' Builds the correlation matrix R of the gaussian process model for certain parameters Parameters ---------- X : n*dim float The n samples the Gaussian process model is build upon. theta : dim float Mutliplier in the Gaussian process model. p : dim float Exponents in the Gaussian process model. Returns ------- R : n*n float Correlation matrix in the Gaussian process model. ''' theta=theta[np.newaxis,np.newaxis,:] p=p[np.newaxis,np.newaxis,:] R=np.exp(-np.sum(theta*np.abs(X[:,np.newaxis,:,np.newaxis]-X[np.newaxis,:,:,np.newaxis])**p,2)) return R def likely(X,C,para): para=np.transpose(para) if len(C.shape)==1: C=C[:,np.newaxis] num_samples=X.shape[0] dim=X.shape[1] theta=para[:dim,:] p=para[dim:,:]+0.5 K=corr_matrix(X,theta,p) Kinv=np.zeros(K.shape) for i in range(K.shape[2]): inverse_failed=True while inverse_failed: try: Kinv[:,:,i]=np.linalg.inv(K[:,:,i]) inverse_failed=False except np.linalg.LinAlgError: K[:,:,i]=K[:,:,i]-np.identity(num_samples)*1e-4 one=np.ones((num_samples,1)) mu=np.dot(C.transpose(),np.dot(np.transpose(one),Kinv)[0])/np.dot(one.transpose(),np.dot(np.transpose(one),Kinv)[0]) sigmasq=np.zeros((1,para.shape[1])) for i in range(para.shape[1]): hi=C-one*mu[0,i] sigmasq[0,i]=np.dot(hi.transpose(),np.dot(Kinv[:,:,i],hi)) L=num_samples*np.log(np.abs(sigmasq[0,:])+1e-8)+np.log(np.abs(np.linalg.det(K.transpose(2,0,1)))+1e-8) return L def kriging(X,C,comm,rank,size,para_old,k_iter): ''' Determines the parameters of a Gaussian process model. This is done using differential evolution on multiple cores to optimize the likelyhood of the model belonging to the parameters. Parameters ---------- X : n*dim float The n samples the Gaussian process model is build upon. C : n*1 float The cost function at the n samples. comm : MPI_WORLD Envrionment to communicate with different processors. rank : int The processor used here. size : int Total number of processors. para_old : 2*dim float or [] If existig, the optimal parameters from the previous iteration. k_iter : int Number of generations when using differential evolution. Returns ------- theta : dim float Mutliplier in the Gaussian process model. p : dim float Exponents in the Gaussian process model. ''' dim=X.shape[1] prob_change=0.9 multiplyer=0.6 imax=k_iter train_samples=int(dim) train_samples_perrank=int(np.ceil(train_samples/size)) train_samples=train_samples_perrank*size Para_rank=np.random.rand(train_samples_perrank,2*dim)*4.9+0.1 if len(para_old)>0 and rank==0: Para_rank[0,:]=para_old L_rank=likely(X,C,Para_rank) if rank==0: Para_rec=np.empty((size,train_samples_perrank,2*dim)) L_rec=np.empty((size,train_samples_perrank)) else: Para_rec=None L_rec=None comm.Barrier() comm.Gather(Para_rank,Para_rec,root=0) comm.Gather(L_rank,L_rec,root=0) if rank==0: Para=Para_rec.reshape((train_samples,2*dim)) L=L_rec.reshape(train_samples) else: Para=None L=None Para=comm.bcast(Para,root=0) L=comm.bcast(L,root=0) loop=0 while loop<imax: Para_rank=Para[rank*train_samples_perrank:(rank+1)*train_samples_perrank,:] L_rank=L[rank*train_samples_perrank:(rank+1)*train_samples_perrank] test_case=np.floor(np.random.rand(train_samples_perrank,3)*(train_samples-1e-7)).astype('int') Paraa_rank=np.copy(Para[test_case[:,0],:]) Parab_rank=np.copy(Para[test_case[:,1],:]) Parac_rank=np.copy(Para[test_case[:,2],:]) Paracom_rank=Paraa_rank+multiplyer*(Parab_rank-Parac_rank) prob=np.random.rand(train_samples_perrank,2*dim) Paracom_rank[prob>prob_change]=np.copy(Para_rank[prob>prob_change]) Paracom_rank[Paracom_rank<0.1]=0.1 Paracom_rank[Paracom_rank>5]=5 L_compare=likely(X,C,Paracom_rank) L_rank=np.minimum(L_rank,L_compare) Para_rank[L_compare<=L_rank,:]=Paracom_rank[L_compare<=L_rank,:] if rank==0: Para_rec=np.empty((size,train_samples_perrank,2*dim)) L_rec=np.empty((size,train_samples_perrank)) else: Para_rec=None L_rec=None comm.Barrier() comm.Gather(Para_rank,Para_rec,root=0) comm.Gather(L_rank,L_rec,root=0) if rank==0: Para=Para_rec.reshape((train_samples,2*dim)) L=L_rec.reshape(train_samples) else: Para=None L=None Para=comm.bcast(Para,root=0) L=comm.bcast(L,root=0) loop=loop+1 if rank==0: jmin=np.argmin(L) theta=Para[jmin,:dim] p=Para[jmin,dim:] else: theta=None p=None comm.Barrier() theta=comm.bcast(theta,root=0) p=comm.bcast(p,root=0) return theta,p+0.5 def ExImp(Xpos,theta,p,X,C,Rinv,weight_explore): ''' Build an aqusition function, the expected improvement function, for bayesian optimization based on a Gaussian process model [1]. [1]: <NAME>, <NAME>, and <NAME>. Efficient global optimization of expensive black-box functions. Journal of Global optimization, 13(4):455–492, 1998. Parameters ---------- Xpos : m*dim The m samples at which the expected improvement function is to be evaluated. theta : dim float Mutliplier in the Gaussian process model. p : dim float Exponents in the Gaussian process model. X : n*dim float The n samples the Gaussian process model is build upon. C : n*1 float The cost function at the n samples. Rinv : n*n float The inverse of the correlation matrix in the Gaussian process model. weight_explore : float A factor used to allow to shift focus between exploration (heigh value) and exploitation (low value). Returns ------- m float The negative value of the expected improvement function (EI has to maximized, and therefore, -EI is minimized). ''' num_samples=len(X) one=np.ones((num_samples,1)) mu=np.dot(np.dot(np.transpose(one),Rinv),C)/(np.dot(np.dot(np.transpose(one),Rinv),one)+1e-8) sigmasq=np.dot(np.dot(np.transpose(C-mu*one),Rinv),(C-mu*one))/num_samples theta=theta[np.newaxis,np.newaxis,:] p=p[np.newaxis,np.newaxis,:] k=np.exp(-np.sum(theta*np.abs(Xpos[np.newaxis,:,:]-X[:,np.newaxis,:])**p,2)) pred=mu+np.dot(np.dot(np.transpose(k),Rinv),(C-one*mu)) #check how the matrix dimensions work out here conf=np.sqrt(np.abs(sigmasq*(1-np.sum(k*np.dot(Rinv,k),0)+(1-np.dot(
np.transpose(one)
numpy.transpose