prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
# -*- coding: UTF-8 -*-
"""Definitions for `Sampler` class."""
import numpy as np
import time
class Sampler(object):
"""Sample the posterior distribution of a model against an observation."""
_MIN_WEIGHT = 1e-4
def __init__(self, fitter, num_walkers=None, **kwargs):
"""Initialize `Sampler` class."""
self._printer = kwargs.get('printer')
self._fitter = fitter
self._pool = self._fitter._pool
self._printer = self._fitter._printer
self._num_walkers = num_walkers
def get_samples(self):
"""Return samples from ensembler."""
samples = np.array([a for b in self._pout for a in b])
if self._lnprobout is None:
return samples, None, np.array([
1.0 / len(samples) for x in samples])
probs = np.array([a for b in self._lnprobout for a in b])
weights = np.array([a for b in self._weights for a in b])
min_weight = self._MIN_WEIGHT / len(samples)
sel = weights > min_weight
samples = samples[sel]
probs = probs[sel]
weights = weights[sel]
wsis = np.argsort(weights)
samples = samples[wsis]
probs = probs[wsis]
weights = weights[wsis]
return samples, probs, weights
def run(self):
"""Run the sampler."""
pass
def psrf(self, chain):
"""Calculate PSRF for a chain."""
m = len(chain)
n = len(chain[0])
mom = np.mean(np.mean(chain, axis=1))
b = n / float(m - 1) * np.sum(
( | np.mean(chain, axis=1) | numpy.mean |
import typing as T
import tensorflow as tf
from functools import reduce
from collections import namedtuple
import numpy as np
class Tree:
"""Concrete tree instances"""
def __init__(self, node_type_id: str, children: T.List["Tree"] = None, value=None, meta=None):
"""
:param node_type_id:
:param children:
:param value:
"""
self.node_type_id = node_type_id
self.children = children if children is not None else []
self.value = value
self.meta = meta if meta is not None else {} # just a place to store additional info
def __str__(self):
s = self.node_type_id
if self.value is not None:
s += ' ' + str(self.value.abstract_value)
if len(self.children) > 0:
s += " : [" + ", ".join(map(str, self.children)) + "]"
return s
def leaves(self):
if len(self.children) == 0:
return [self.value]
else:
return reduce(lambda x,y: x + y, map(Tree.leaves, self.children))
def calculate_max_depth(self):
return 1 + max(map(Tree.calculate_max_depth, self.children), default=0)
def calculate_node_count(self):
return 1 + sum(map(Tree.calculate_node_count, self.children))
def calculate_max_arity(self):
return max(len(self.children), max(map(Tree.calculate_max_arity, self.children), default=0))
def calculate_mean_arity(self):
def _visit(t):
if t.children:
return [len(t.children)] + list(reduce(lambda x,y: x+y, map(_visit, t.children), []))
else:
return []
arities = _visit(self)
return sum(arities)/len(arities) if arities else None
def compute_overlaps(self, t2, also_values=False, skip_leaves_value=False):
t1_nodes, t2_nodes = 0, 0
bcpt_nodes = 0
all_value_count = 0
matched_value_count = 0
def visit(t1, t2):
nonlocal t1_nodes, t2_nodes, bcpt_nodes, all_value_count, matched_value_count
t1_nodes += len(t1.children)
t2_nodes += len(t2.children)
for c1, c2 in zip(t1.children, t2.children):
if c1.node_type_id == c2.node_type_id:
bcpt_nodes += 1
visit(c1, c2)
if also_values and c1.value is not None and not (len(c1.children) == 0 and skip_leaves_value):
all_value_count += 1
if c1.value.abstract_value == c2.value.abstract_value:
matched_value_count += 1
if self.node_type_id == t2.node_type_id:
bcpt_nodes = 1
t1_nodes = 1
t2_nodes = 1
if also_values and self.value is not None:
all_value_count += 1
if self.value.abstract_value == t2.value.abstract_value:
matched_value_count += 1
visit(self, t2)
s_acc = 2 * bcpt_nodes / float(t1_nodes + t2_nodes)
if also_values:
v_acc = s_acc * matched_value_count / float(all_value_count) if all_value_count > 0 else 1.0 * s_acc
else:
v_acc = None
return s_acc, v_acc
else:
return 0, 0
def clone(self, clone_value=False):
return Tree(node_type_id=self.node_type_id,
children=list(map(Tree.clone, self.children)),
meta=self.meta.copy(),
value=type(self.value)(abstract_value=self.value.abstract_value) if clone_value else self.value)
TreeComparisonInfo = namedtuple('TreesComparisonInfo', [
"matching_struct",
"matching_value",
"struct_overlap_by_depth",
"struct_overlap_by_node",
"value_overlap"])
@staticmethod
def compare_trees(ts1: T.List["Tree"], ts2: T.List["Tree"], also_values=True, skip_leaves_values=False,):
overlaps_s, overlaps_v = zip(*[ts1[i].compute_overlaps(ts2[i],
also_values=also_values,
skip_leaves_value=skip_leaves_values) for i in range(len(ts1))])
overlaps_s_avg = float(np.mean(overlaps_s))
overlaps_v_avg = float(np.mean(overlaps_v))
overlaps_s_acc = np.sum( | np.equal(overlaps_s, 1.0) | numpy.equal |
import json
import numpy as np
import logging
from sm.engine.ms_txt_converter import MsTxtConverter
from sm.engine.util import SMConfig, read_json
from sm.engine.db import DB
from sm.engine.es_export import ESExporter
logger = logging.getLogger('engine')
class DatasetReader(object):
""" Class for reading dataset coordinates and spectra
Args
----------
input_path : str
Input path with mass spec files
sc : pyspark.SparkContext
Spark context object
"""
def __init__(self, input_path, sc, wd_manager):
self.input_path = input_path
self._wd_manager = wd_manager
self._sc = sc
self.coord_pairs = None
@staticmethod
def _parse_coord_row(s):
res = []
row = s.strip('\n')
if len(row) > 0:
vals = row.split(',')
if len(vals) > 0:
res = [int(v) for v in vals[1:]]
return res
@staticmethod
def _is_valid_coord_row(fields):
return len(fields) == 2
def _determine_pixel_order(self):
coord_path = self._wd_manager.coord_path
self.coord_pairs = (self._sc.textFile(coord_path)
.map(self._parse_coord_row)
.filter(self._is_valid_coord_row).collect())
self.min_x, self.min_y = np.amin(np.asarray(self.coord_pairs), axis=0)
self.max_x, self.max_y = np.amax(np.asarray(self.coord_pairs), axis=0)
_coord = np.array(self.coord_pairs)
_coord = | np.around(_coord, 5) | numpy.around |
import os
import pickle
import random
import networkx as nx
import numpy as np
from sklearn.manifold import TSNE
import torch
import torch.nn as nn
import torch.multiprocessing as mp
import torch.nn.functional as F
import torch.optim as optim
from torch_geometric.data import DataLoader
from torch_geometric.datasets import TUDataset, PPI, QM9
import torch_geometric.utils as pyg_utils
import torch_geometric.nn as pyg_nn
from tqdm import tqdm
import queue
from deepsnap.dataset import GraphDataset
from deepsnap.batch import Batch
from deepsnap.graph import Graph as DSGraph
#import orca
from torch_scatter import scatter_add
from common import utils
AUGMENT_METHOD = "concat"
#FEATURE_AUGMENT, FEATURE_AUGMENT_DIMS = [], []
FEATURE_AUGMENT, FEATURE_AUGMENT_DIMS = ['dataset_attrs'], [1]
#FEATURE_AUGMENT, FEATURE_AUGMENT_DIMS = ['dataset_attrs', "node_degree"], [1,1]
#FEATURE_AUGMENT, FEATURE_AUGMENT_DIMS = ["identity"], [4]
#FEATURE_AUGMENT = ["motif_counts"]
#FEATURE_AUGMENT_DIMS = [73]
#FEATURE_AUGMENT_DIMS = [15]
def norm(edge_index, num_nodes, edge_weight=None, improved=False,
dtype=None):
if edge_weight is None:
edge_weight = torch.ones((edge_index.size(1),), dtype=dtype,
device=edge_index.device)
fill_value = 1 if not improved else 2
edge_index, edge_weight = pyg_utils.add_remaining_self_loops(
edge_index, edge_weight, fill_value, num_nodes)
row, col = edge_index
deg = scatter_add(edge_weight, row, dim=0, dim_size=num_nodes)
deg_inv_sqrt = deg.pow(-0.5)
deg_inv_sqrt[deg_inv_sqrt == float('inf')] = 0
return edge_index, deg_inv_sqrt[row] * edge_weight * deg_inv_sqrt[col]
def compute_identity(edge_index, n, k):
edge_weight = torch.ones((edge_index.size(1),), dtype=torch.float,
device=edge_index.device)
edge_index, edge_weight = pyg_utils.add_remaining_self_loops(
edge_index, edge_weight, 1, n)
adj_sparse = torch.sparse.FloatTensor(edge_index, edge_weight,
torch.Size([n, n]))
adj = adj_sparse.to_dense()
deg = torch.diag(torch.sum(adj, -1))
deg_inv_sqrt = deg.pow(-0.5)
adj = deg_inv_sqrt @ adj @ deg_inv_sqrt
diag_all = [torch.diag(adj)]
adj_power = adj
for i in range(1, k):
adj_power = adj_power @ adj
diag_all.append(torch.diag(adj_power))
diag_all = torch.stack(diag_all, dim=1)
return diag_all
class FeatureAugment(nn.Module):
def __init__(self):
super(FeatureAugment, self).__init__()
def degree_fun(graph, feature_dim):
graph.node_degree = self._one_hot_tensor(
[d for _, d in graph.G.degree()],
one_hot_dim=feature_dim)
return graph
def centrality_fun(graph, feature_dim):
nodes = list(graph.G.nodes)
centrality = nx.betweenness_centrality(graph.G)
graph.betweenness_centrality = torch.tensor(
[centrality[x] for x in
nodes]).unsqueeze(1)
return graph
def path_len_fun(graph, feature_dim):
nodes = list(graph.G.nodes)
graph.path_len = self._one_hot_tensor(
[np.mean(list(nx.shortest_path_length(graph.G,
source=x).values())) for x in nodes],
one_hot_dim=feature_dim)
return graph
def pagerank_fun(graph, feature_dim):
nodes = list(graph.G.nodes)
pagerank = nx.pagerank(graph.G)
graph.pagerank = torch.tensor([pagerank[x] for x in
nodes]).unsqueeze(1)
return graph
def identity_fun(graph, feature_dim):
graph.identity = compute_identity(
graph.edge_index, graph.num_nodes, feature_dim)
return graph
def clustering_coefficient_fun(graph, feature_dim):
node_cc = list(nx.clustering(graph.G).values())
if feature_dim == 1:
graph.node_clustering_coefficient = torch.tensor(
node_cc, dtype=torch.float).unsqueeze(1)
else:
graph.node_clustering_coefficient = FeatureAugment._bin_features(
node_cc, feature_dim=feature_dim)
def motif_counts_fun(graph, feature_dim):
assert feature_dim % 73 == 0
counts = orca.orbit_counts("node", 5, graph.G)
counts = [[np.log(c) if c > 0 else -1.0 for c in l]
for l in counts]
counts = torch.tensor(counts).type(torch.float)
# counts = FeatureAugment._wave_features(counts,
# feature_dim=feature_dim // 73)
graph.motif_counts = counts
return graph
def node_features_base_fun(graph, feature_dim):
for v in graph.G.nodes:
if "node_feature" not in graph.G.nodes[v]:
graph.G.nodes[v]["node_feature"] = torch.ones(feature_dim)
return graph
def dataset_attrs_fun(graph, feature_dim):
graph.dataset_attrs = torch.zeros(
(len(graph.G.nodes), feature_dim))
for i, v in enumerate(graph.G.nodes):
feat = (graph.G.nodes[v]["label"] if "label" in
graph.G.nodes[v] else (graph.G.nodes[v]["feat"] if "feat"
in graph.G.nodes[v] else np.zeros(1)))
graph.dataset_attrs[i, :len([feat])] = torch.tensor(
[feat], dtype=torch.float)
return graph
self.node_features_base_fun = node_features_base_fun
self.node_feature_funs = {"node_degree": degree_fun,
"betweenness_centrality": centrality_fun,
"path_len": path_len_fun,
"pagerank": pagerank_fun,
'node_clustering_coefficient': clustering_coefficient_fun,
"motif_counts": motif_counts_fun,
"identity": identity_fun,
"dataset_attrs": dataset_attrs_fun}
def register_feature_fun(self, name, feature_fun):
self.node_feature_funs[name] = feature_fun
@staticmethod
def _wave_features(list_scalars, feature_dim=4, scale=10000):
pos = np.array(list_scalars)
if len(pos.shape) == 1:
pos = pos[:, np.newaxis]
batch_size, n_feats = pos.shape
pos = pos.reshape(-1)
rng = np.arange(0, feature_dim // 2).astype(
np.float) / (feature_dim // 2)
sins = np.sin(pos[:, np.newaxis] / scale**rng[np.newaxis, :])
coss = np.cos(pos[:, np.newaxis] / scale**rng[np.newaxis, :])
m = | np.concatenate((coss, sins), axis=-1) | numpy.concatenate |
import matplotlib as mpl
#mpl.use('Agg')
import matplotlib.pyplot as plt
import random
import gym
import torch
from torch.optim import Adam
import numpy as np
import os
import math
import pickle
import torch
import torch.nn as nn
import torch.optim as optim
import torch.nn.functional as F
import torchvision.transforms as T
device = torch.device("cuda:0")
print(torch.cuda.is_available())
class Pclass(torch.nn.Module):
def __init__(self,layer_size,time,state):
super(Pclass, self).__init__()
self.size = time*state
self.actor = torch.nn.Sequential(
)
self.actor.add_module('l1',torch.nn.Linear(time*state, layer_size))
#self.actor.add_module('b1', torch.nn.BatchNorm1d(num_features=layer_size))
self.actor.add_module('d1', torch.nn.Dropout(p=0.3))
self.actor.add_module('a1', torch.nn.LeakyReLU())
self.actor.add_module('l4',torch.nn.Linear(layer_size, layer_size))
#self.actor.add_module('b2', torch.nn.BatchNorm1d(num_features=layer_size))
self.actor.add_module('d2', torch.nn.Dropout(p=0.3))
self.actor.add_module('a4',torch.nn.LeakyReLU())
#self.actor.add_module('b3', torch.nn.BatchNorm1d(num_features=layer_size))
self.actor.add_module('d2', torch.nn.Dropout(p=0.3))
self.actor.add_module('l5',torch.nn.Linear(layer_size, layer_size))
self.actor.add_module('a5',torch.nn.LeakyReLU())
self.actor.add_module('d3', torch.nn.Dropout(p=0.3))
self.actor.add_module('l6',torch.nn.Linear(layer_size, layer_size))
self.actor.add_module('a6',torch.nn.LeakyReLU())
self.actor.add_module('d4', torch.nn.Dropout(p=0.3))
self.actor.add_module('l7',torch.nn.Linear(layer_size, layer_size))
self.actor.add_module('a7',torch.nn.LeakyReLU())
self.actor.add_module('d5', torch.nn.Dropout(p=0.3))
self.actor.add_module('l8',torch.nn.Linear(layer_size,2))
#self.actor.add_module('a8',torch.nn.ReLU())
#self.actor.add_module('out', torch.nn.Softmax(dim=1))
self.actor.to(device)
self.epochs = 2000
self.MseLoss = torch.nn.CrossEntropyLoss()
self.optimizer = torch.optim.Adam(self.actor.parameters(), lr=0.001, betas=(0.9,0.999))
def forward(self):
raise NotImplementedError
def update(self, train,test, name):
samples = torch.tensor(test[:,0,:]).float().to(device)
p_results = torch.squeeze(self.actor(samples))
results = test[:,1,0]
p_results = (torch.argmax(p_results, dim=1).detach().cpu().numpy())
print("test acc")
print(np.sum(np.ravel(results == np.ravel(p_results))/len(np.ravel(p_results))))
samples = torch.tensor(train[:,0,:]).float().to(device)
pp_results = torch.squeeze(self.actor(samples))
results = train[:,1,0]
p_results = (torch.argmax(pp_results, dim=1).detach().cpu().numpy())
print("train acc")
print(np.sum(np.ravel(results == np.ravel(p_results))/len(np.ravel(p_results))))
losses = []
for _ in range(self.epochs):
# np.random.shuffle(train)
samples = torch.tensor(train[:,0,:]).float().to(device)
#results = np.reshape(results,(len(results),1))
results = torch.tensor(train[:,1,0]).long().to(device)
self.optimizer.zero_grad()
p_results = torch.squeeze(self.actor(samples))
loss = self.MseLoss(p_results, results)
#loss = Variable(loss, requires_grad = True)
#print(loss)
loss_result = loss.item()
#print(loss_result)
losses.append(loss_result)
loss.backward()
self.optimizer.step()
samples = torch.tensor(test[:,0,:]).float().to(device)
p_results = torch.squeeze(self.actor(samples))
results = test[:,1,0]
p_results = (torch.argmax(p_results, dim = 1).detach().cpu().numpy())
print("test acc")
print(np.sum(np.ravel(results == np.ravel(p_results))/len( | np.ravel(p_results) | numpy.ravel |
##############################################################################
# PyLipID: A python module for analysing protein-lipid interactions
#
# Author: <NAME>
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in all
# copies or substantial portions of the Software.
##############################################################################
from collections import defaultdict
from itertools import product
from functools import partial
import pickle
import os
import re
import warnings
import mdtraj as md
import numpy as np
np.seterr(all='ignore')
from scipy.sparse import coo_matrix
import pandas as pd
from tqdm import trange, tqdm
from p_tqdm import p_map
from ..func import cal_contact_residues
from ..func import Duration
from ..func import cal_lipidcount, cal_occupancy
from ..func import get_node_list
from ..func import collect_bound_poses
from ..func import analyze_pose_wrapper, calculate_koff_wrapper, calculate_surface_area_wrapper
from ..plot import plot_surface_area, plot_binding_site_data
from ..plot import plot_residue_data, plot_corrcoef, plot_residue_data_logo
from ..util import check_dir, write_PDB, write_pymol_script, sparse_corrcoef, get_traj_info
class LipidInteraction:
def __init__(self, trajfile_list, cutoffs=[0.475, 0.7], lipid="CHOL", topfile_list=None, lipid_atoms=None,
nprot=1, resi_offset=0, save_dir=None, timeunit="us", stride=1, dt_traj=None):
"""The main class that handles calculation and controls workflow.
``LipidInteraction`` reads trajectory information via `mdtraj.load()`, so it supports most of the trajectory
formats. ``LipidInteraction`` calculates lipid interactions with both protein residues and the calculated
binding sites, and provides a couple of assisting functions to plot data and present data in various forms.
The methods of ``LipidInteraction`` can be divided into three groups based on their roles: one for calculation
of interaction with protein residues, one for binding site and the last that contains assisting functions for
plotting and generating data. Each of the first two groups has a core function to collect/calculate the required
data for the rest of the functions in that group, i.e. ``collect_residue_contacts`` that builds lipid index for
residues as a function of time for residue analysis; and ``compute_binding_sites`` that calculates the binding
sites using the interaction network of the residues. The rest of the methods in each group are independent of
each other.
``LipidInteraction`` also has an attribute, named ``dataset``, which stores the calculation interaction data in
a `pandas.DataFrame <https://pandas.pydata.org/pandas-docs/stable/reference/api/pandas.DataFrame.html>`_ object
and updates automatically after calculation. It records interaction data for protein residues by
rows, including, for each residue, the interaction residence times, averaged durations, occupancy and lipid
count etc., and binding site IDs and the various interaction data of the belonging binding site.
For the computing-demanding functions of, i.e. ``compute_residue_koff``, ``compute_site_koff``,
``analyze_bound_poses``, and ``compute_surface_area``, PyLipID uses the python multiprocessing library
to speed up the calculation. Users can specify the number of CPUs these functions can use, otherwise all the
CPUs in the system will be used by default.
Parameters
----------
trajfile_list : str or a list of str
Trajectory filename(s). Read by mdtraj.load() to obtain trajectory information.
cutoffs : list of two scalar or a scalar, default=[0.475, 0.7]
Cutoff value(s) for defining contacts. When a list of two scalar are provided, the dual-cutoff scheme
will be used. A contact in the dual-cutoff scheme starts when a lipid gets closer than the lower cutoff,
and ends when this lipid moves farther than the upper cutoff. The duration between the two time points is
the duration of this contact.
lipid : str, default="CHOL"
Lipid name in topology.
topfile_list : str or a list of str, default=None
Topology filename(s). Most trajectory formats do not contain topology information. Provide either
the path to a RCSB PDB file, a trajectory, or a topology for each trajectory in `trajfile_list`
for the topology information. See `mdtraj.load() <https://mdtraj.org>`_. for more information.
lipid_atoms : list of str, default=None
Lipid atom names. Only interactions of the provided atoms will be considered for the calculation of contacts.
If None, all atoms of the lipid molecule will be used.
nprot : int, default=1
Number of protein copies in the system. If the system has N copies of the protein, 'nprot=N' will report
averaged values from the N copies, but 'nprot=1' will report interaction values for each copy.
resi_offset : int, default=0
Shift residue index in the reported results from what is shown in the topology. Can be useful for
MARTINI force field.
save_dir : str, default=None
The root directory to store the generated data. By default, a directory Interaction_{lipid} will be created
in the current working directory, under which all the generated data are stored.
timeunit : {"us", "ns"}, default="us"
The time unit used for reporting results. "us" is micro-second and "ns" is nanosecond.
stride : int, default=1
Only read every stride-th frame. The same stride in mdtraj.load().
dt_traj : float, default=None
Timestep of trajectories. It is required when trajectories do not have timestep information. Not needed for
trajectory formats of e.g. xtc, trr etc. If None, timestep information will take from trajectories.
"""
self._trajfile_list = np.atleast_1d(trajfile_list)
if len(np.atleast_1d(topfile_list)) == len(self._trajfile_list):
self._topfile_list = np.atleast_1d(topfile_list)
elif len(self._trajfile_list) > 1 and len(np.atleast_1d(topfile_list)) == 1:
self._topfile_list = [topfile_list for dummy in self._trajfile_list]
else:
raise ValueError(
"topfile_list should either have the same length as trajfile_list or have one valid file name.")
if len(np.atleast_1d(cutoffs)) == 1:
self._cutoffs = np.array([np.atleast_1d(cutoffs)[0] for dummy in range(2)])
elif len(np.atleast_1d(cutoffs)) == 2:
self._cutoffs = np.sort(np.array(cutoffs, dtype=float))
else:
raise ValueError("cutoffs should be either a scalar or a list of two scalars.")
self._dt_traj = dt_traj
self._lipid = lipid
self._lipid_atoms = lipid_atoms
self._nprot = int(nprot)
self._timeunit = timeunit
self._stride = int(stride)
self._resi_offset = resi_offset
self.dataset = pd.DataFrame()
self._save_dir = check_dir(os.getcwd(), "Interaction_{}".format(self._lipid)) if save_dir is None \
else check_dir(save_dir, "Interaction_{}".format(self._lipid))
return
#############################################
# attributes
#############################################
def dataset(self):
"""Summary of lipid interaction stored in a pandas.DataFrame() object."""
return self.dataset
@property
def residue_list(self):
"""A list of Residue names."""
return self._residue_list
@property
def node_list(self):
"""A list of binding site residue indices. """
return self._node_list
@property
def lipid(self):
"""Lipid residue name."""
return self._lipid
@property
def lipid_atoms(self):
"""Lipid atom names"""
return self._lipid_atoms
@property
def cutoffs(self):
"""Cutoffs used for calculating contacts. """
return self._cutoffs
@property
def nprot(self):
"""Number of protein copies in system. """
return self._nprot
@property
def stride(self):
"""Stride"""
return self._stride
@property
def trajfile_list(self):
"""Trajectory filenames """
return self._trajfile_list
@property
def topfile_list(self):
"""Topology filenames"""
return self._topfile_list
@property
def dt_traj(self):
"""Trajectory timestep"""
return self._dt_traj
@property
def resi_offset(self):
"""Residue index offset"""
return self._resi_offset
@property
def save_dir(self):
"""Root directory for the generated data."""
return self._save_dir
@property
def timeunit(self):
"""Time unit used for reporting results. """
return self._timeunit
def koff(self, residue_id=None, residue_name=None):
"""Residue koff"""
if residue_id is not None and residue_name is not None:
assert self.dataset[self.dataset["Residue ID"] == residue_id]["Residue"] == residue_name, \
"residue_id and residue_name are pointing to different residues!"
return self._koff[residue_id]
elif residue_id is not None:
return self._koff[residue_id]
elif residue_name is not None:
return self._koff[self._residue_map[residue_name]]
def res_time(self, residue_id=None, residue_name=None):
"""Residue residence time"""
if residue_id is not None and residue_name is not None:
assert self.dataset[self.dataset["Residue ID"] == residue_id]["Residue"] == residue_name, \
"residue_id and residue_name are pointing to different residues!"
return self._res_time[residue_id]
elif residue_id is not None:
return self._res_time[residue_id]
elif residue_name is not None:
return self._res_time[self._residue_map[residue_name]]
def koff_bs(self, bs_id):
"""Binding site koff"""
return self._koff_BS[bs_id]
def res_time_bs(self, bs_id):
"""Binding site residence time"""
return self._res_time_BS[bs_id]
def residue(self, residue_id=None, residue_name=None, print_data=True):
"""Obtain the lipid interaction information for a residue
Use either residue_id or residue_name to indicate the residue identity.
Return the interaction information in a pandas.DataFrame object.
Parameters
----------
residue_id : int or list of int, default=None
The residue ID that is used by PyLipID for identifying residues. The ID starts from 0, i.e. the ID
of N-th residue is (N-1). If None, all residues are selected.
residue_name : str or list of str, default=None
The residue name as stored in PyLipID dataset. The residue name is in the format of resi+resn
Returns
-------
df : pandas.DataFrame
A pandas.DataFrame of interaction information of the residue.
"""
if residue_id is not None and residue_name is not None:
assert self.dataset[self.dataset["Residue ID"] == residue_id]["Residue"] == residue_name, \
"residue_id and residue_name are pointing to different residues!"
df = self.dataset[self.dataset["Residue ID"] == residue_id]
elif residue_id is not None:
df = self.dataset[self.dataset["Residue ID"] == residue_id]
elif residue_name is not None:
df = self.dataset[self.dataset["Residue"] == residue_name]
if print_data:
print(df)
return df
def binding_site(self, binding_site_id, print_data=True, sort_residue="Residence Time"):
"""Obtain the lipid interaction information for a binding site.
Use binding site ID to access the information. Return the lipid interaction information of the
binding site in a pandas.DataFrame object. If print_data is True, the binding site info will be
formatted and print out.
"""
df = self.dataset[self.dataset["Binding Site ID"] == binding_site_id].sort_values(by="Residence Time")
if print_data:
text = self._format_BS_print_info(binding_site_id, self._node_list[binding_site_id], sort_residue)
print(text)
return df
########################################
# interaction calculation
########################################
def collect_residue_contacts(self):
r"""Create contacting lipid index for residues.
This function creates contacting lipid index for residues that are used for the rest of calculation in PyLipID.
The design of contacting lipid index is to assist the calculation of contacts using a dual-cutoff scheme, which
considers a lipid as being in contact when the lipid moves closer than the lower cutoff and as being dissociated
when the lipid moves farther than the upper cutoff.
The lipid indices created by this method are stored in the private class variables of
``_contact_residue_high`` and ``_contact_residue_low`` for each of the cutoffs. These indices are python
dictionary objects with residue indices as their keys. For each residue, the lipid index stores the residue index
of contacting lipid molecules from each trajectory frame in a list.
The lipid index of the lower cutoff, i.e. ``_contact_residue_low`` is used to calculate lipid occupancy and lipid
count.
The Pearson correlation matrix of lipid interactions for protein residues is also calculated in this function and
stored in the class variable of ``interaction_corrcoef``.
The class attribute :meth:`~LipidInteraction.dataset` which stores the summary of lipid interaction as a
pandas.DataFrame object, is initialized in this method.
"""
self._protein_ref = None
self._lipid_ref = None
self._T_total = []
self._timesteps = []
self._protein_residue_id = []
# initialise data for interaction matrix
col = []
row = []
data = []
ncol_start = 0
# calculate interactions from trajectories
for traj_idx in trange(len(self._trajfile_list), desc="COLLECT INTERACTIONS FROM TRAJECTORIES",
total=len(self._trajfile_list)):
traj = md.load(self._trajfile_list[traj_idx], top=self._topfile_list[traj_idx], stride=self._stride)
traj_info, self._protein_ref, self._lipid_ref = get_traj_info(traj, lipid=self._lipid,
lipid_atoms=self._lipid_atoms,
resi_offset=self._resi_offset,
nprot=self._nprot,
protein_ref=self._protein_ref,
lipid_ref=self._lipid_ref)
if self._dt_traj is None:
timestep = traj.timestep / 1000000.0 if self._timeunit == "us" else traj.timestep / 1000.0
else:
timestep = float(self._dt_traj * self._stride)
self._T_total.append((traj.n_frames - 1) * timestep)
self._timesteps.append(timestep)
if len(self._protein_residue_id) == 0:
self._protein_residue_id = traj_info["protein_residue_id"]
self._residue_list = traj_info["residue_list"]
self._nresi_per_protein = len(self._residue_list)
self._duration = dict()
self._occupancy = dict()
self._lipid_count = dict()
self._contact_residues_high = {residue_id: [] for residue_id in self._protein_residue_id}
self._contact_residues_low = {residue_id: [] for residue_id in self._protein_residue_id}
self._koff = np.zeros(self._nresi_per_protein)
self._koff_boot = np.zeros(self._nresi_per_protein)
self._r_squared = np.zeros(self._nresi_per_protein)
self._r_squared_boot = np.zeros(self._nresi_per_protein)
self._res_time = np.zeros(self._nresi_per_protein)
self._residue_map = {residue_name: residue_id
for residue_id, residue_name in zip(self._protein_residue_id, self._residue_list)}
else:
assert len(self._protein_residue_id) == len(traj_info["protein_residue_id"]), \
"Trajectory {} contains {} residues whereas trajectory {} contains {} residues".format(
traj_idx, len(traj_info["protein_residue_id"]), traj_idx - 1, len(self._protein_residue_id))
ncol_per_protein = len(traj_info["lipid_residue_atomid_list"]) * traj.n_frames
for protein_idx in np.arange(self._nprot, dtype=int):
for residue_id, residue_atom_indices in enumerate(
traj_info["protein_residue_atomid_list"][protein_idx]):
# calculate interaction per residue
dist_matrix = np.array([np.min(
md.compute_distances(traj, np.array(list(product(residue_atom_indices, lipid_atom_indices))),
periodic=True, opt=True),
axis=1) for lipid_atom_indices in traj_info["lipid_residue_atomid_list"]])
contact_low, frame_id_set_low, lipid_id_set_low = cal_contact_residues(dist_matrix, self._cutoffs[0])
contact_high, _, _ = cal_contact_residues(dist_matrix, self._cutoffs[1])
self._contact_residues_high[residue_id].append(contact_high)
self._contact_residues_low[residue_id].append(contact_low)
# update coordinates for coo_matrix
col.append([ncol_start + ncol_per_protein * protein_idx + lipid_id * traj.n_frames +
frame_id for frame_id, lipid_id in zip(frame_id_set_low, lipid_id_set_low)])
row.append([residue_id for dummy in np.arange(len(frame_id_set_low), dtype=int)])
data.append(dist_matrix[lipid_id_set_low, frame_id_set_low])
ncol_start += ncol_per_protein * self._nprot
# calculate correlation coefficient matrix
row = np.concatenate(row)
col = np.concatenate(col)
data = np.concatenate(data)
contact_info = coo_matrix((data, (row, col)), shape=(self._nresi_per_protein, ncol_start))
self.interaction_corrcoef = sparse_corrcoef(contact_info)
self.dataset = pd.DataFrame({"Residue": [residue for residue in self._residue_list],
"Residue ID": self._protein_residue_id})
return
def compute_residue_duration(self, residue_id=None):
r"""Calculate lipid contact durations for residues
PyLipID calculates lipid contacts using a dual-cutoff scheme. In this scheme, a continuous contact starts when
a molecule moves closer than the lower distance cutoff and ends when the molecule moves out of the upper cutoff.
The duration between these two time points is the duration of the contact.
PyLipID implements this dual-cutoff tactic by creating a lipid index for the lower and upper
cutoff respectively, which records the lipid molecules within that distance cutoff at each trajectory frame
for residues. Such lipid indices are created by the method :meth:`~LipidInteraction.collect_residue_contacts`,
and are stored in the private class variables of ``_contact_residue_high`` and ``_contact_residue_low`` for
each of the cutoffs.
For calculation of contact durations, a lipid molecule that appears in the lipid index of the lower cutoff is
searched in the subsequent frames of the upper lipid index for that residue and the search then stops if this
molecule disappears from the upper cutoff index. This lipid molecule is labeled as 'checked' in the searched
frames in both lipid indices, and the duration of this contact is calculated from the number of frames in which
this lipid molecule appears in the lipid indices. This calculation iterates until all lipid molecules in the
lower lipid index are labeled as 'checked'.
This function returns a list of contact durations or lists of contact durations if multiple residue IDs are
provided.
Parameters
----------
residue_id : int or list of int, default=None
The residue ID, or residue index, that is used by PyLipID for identifying residues. The ID starts from 0,
i.e. the ID of N-th residue is (N-1). If None, all residues are selected.
Returns
-------
durations : list
A list of contact durations or lists of contact durations if multiple residue IDs are provided.
See Also
--------
pylipid.api.LipidInteraction.collect_residue_contacts
Create the lipid index.
pylipid.api.LipidInteraction.compute_site_duration
Calculate durations of contacts with binding sites.
pylipid.func.Duration
Calculate contact durations from lipid index.
"""
self._check_calculation("Residue", self.collect_residue_contacts)
if residue_id is None:
selected_residue_id = self._protein_residue_id
else:
selected_residue_id = np.atleast_1d(residue_id)
for residue_id in tqdm(selected_residue_id, desc="CALCULATE DURATION PER RESIDUE"):
self._duration[residue_id] = [
Duration(self._contact_residues_low[residue_id][(traj_idx*self._nprot)+protein_idx],
self._contact_residues_high[residue_id][(traj_idx*self._nprot)+protein_idx],
self._timesteps[traj_idx]).cal_durations()
for traj_idx in np.arange(len(self.trajfile_list))
for protein_idx in np.arange(self._nprot, dtype=int)]
self.dataset["Duration"] = [np.mean(np.concatenate(self._duration[residue_id]))
if len(self._duration[residue_id]) > 0 else 0
for residue_id in self._protein_residue_id]
self.dataset["Duration std"] = [np.std(np.concatenate(self._duration[residue_id]))
if len(self._duration[residue_id]) > 0 else 0
for residue_id in self._protein_residue_id]
if len(selected_residue_id) == 1:
return self._duration[residue_id]
else:
return [self._duration[residue_id] for residue_id in selected_residue_id]
def compute_residue_occupancy(self, residue_id=None):
"""Calculate the percentage of frames in which the specified residue formed lipid contacts for residues.
The lipid occupancy is calculated using the lower cutoff, and calculated as the percentage of frames in which
the specified lipid species formed contact with residues within the lower distance cutoff.
The returned occupancy list contains data from all protein copies and all trajectories.
Parameters
----------
residue_id : int or list of int, default=None
The residue ID, or residue index, that is used by PyLipID for identifying residues. The ID starts from 0,
i.e. the ID of N-th residue is (N-1). If None, all residues are selected.
Returns
-------
occupancies : list
A list of lipid occupancies, of length of n_trajs x n_proteins, or lists of lipid occupancies if multiple
residue IDs are provided.
See Also
--------
pylipid.api.LipidInteraction.collect_residue_contacts
Create the lipid index.
pylipid.api.LipidInteraction.compute_site_occupancy
Calculate binding site occupancy
pylipid.func.cal_occupancy
Calculate the percentage of frames in which a contact is formed.
"""
self._check_calculation("Residue", self.collect_residue_contacts)
if residue_id is None:
selected_residue_id = self._protein_residue_id
else:
selected_residue_id = np.atleast_1d(residue_id)
for residue_id in tqdm(selected_residue_id, desc="CALCULATE OCCUPANCY"):
self._occupancy[residue_id] = [cal_occupancy(self._contact_residues_low[residue_id][(traj_idx*self._nprot)+protein_idx])
for traj_idx in np.arange(len(self.trajfile_list))
for protein_idx in np.arange(self._nprot, dtype=int)]
self.dataset["Occupancy"] = [np.mean(self._occupancy[residue_id])
if len(self._occupancy[residue_id]) > 0 else 0
for residue_id in self._protein_residue_id]
self.dataset["Occupancy std"] = [np.std(self._occupancy[residue_id])
if len(self._occupancy[residue_id]) > 0 else 0
for residue_id in self._protein_residue_id]
if len(selected_residue_id) == 1:
return self._occupancy[residue_id]
else:
return [self._occupancy[residue_id] for residue_id in selected_residue_id]
def compute_residue_lipidcount(self, residue_id=None):
"""Calculate the average number of contacting lipids for residues.
This method calculates the number of specified lipid within the lower distance cutoff to a residue. The
reported value is averaged from the trajectory frames in which interaction between the specified lipid and the
residue is formed. Thus the returned values report the average number of surrounding lipid molecules when
the lipids are bound.
The returned lipid count list contains data from each of the protein copies and each of the trajectories.
Parameters
----------
residue_id : int or list of int, default=None
The residue ID, or residue index, that is used by PyLipID for identifying residues. The ID starts from 0,
i.e. the ID of N-th residue is (N-1). If None, all residues are selected.
Returns
-------
lipidcounts : list
A list of lipid counts, of length of n_trajs x n_proteins, or lists of lipid counts if multiple
residue IDs are provided.
See Also
--------
pylipid.api.LipidInteraction.collect_residue_contacts
Create the lipid index.
pylipid.api.LipidInteraction.compute_site_lipidcount
Calculate binding site lipid count.
pylipid.func.cal_lipidcount
Calculate the average number of contacting molecules.
"""
self._check_calculation("Residue", self.collect_residue_contacts)
if residue_id is None:
selected_residue_id = self._protein_residue_id
else:
selected_residue_id = np.atleast_1d(residue_id)
for residue_id in tqdm(selected_residue_id, desc="CALCULATE RESIDUE LIPIDCOUNT"):
self._lipid_count[residue_id] = [cal_lipidcount(self._contact_residues_low[residue_id][(traj_idx*self._nprot)+protein_idx])
for traj_idx in np.arange(len(self.trajfile_list))
for protein_idx in np.arange(self._nprot, dtype=int)]
self.dataset["Lipid Count"] = [np.mean(self._lipid_count[residue_id])
if len(self._lipid_count[residue_id]) > 0 else 0
for residue_id in self._protein_residue_id]
self.dataset["Lipid Count std"] = [np.std(self._lipid_count[residue_id])
if len(self._lipid_count[residue_id]) > 0 else 0
for residue_id in self._protein_residue_id]
if len(selected_residue_id) == 1:
self._lipid_count[residue_id]
else:
return [self._lipid_count[residue_id] for residue_id in selected_residue_id]
def compute_residue_koff(self, residue_id=None, nbootstrap=10, initial_guess=[1., 1., 1., 1.],
save_dir=None, plot_data=True, fig_close=True, fig_format="pdf", num_cpus=None):
r"""Calculate interaction koff and residence time for residues.
The koff is calculated from a survival time correlation function which describes the relaxation of the bound
lipids [1]_. Often the interactions between lipid and protein surface are be divided into prolonged interactions and
quick diffusive contacts. Thus PyLipID fits the normalised survival function to a bi-exponential curve which
describes the long and short decay periods.
The survival time correlation function σ(t) is calculated as follow
.. math::
\sigma(t) = \frac{1}{N_{j}} \frac{1}{T-t} \sum_{j=1}^{N_{j}} \sum_{v=0}^{T-t}\tilde{n}_{j}(v, v+t)
where T is the length of the simulation trajectory, :math:`N_{j}` is the total number of lipid contacts and
:math:`\sum_{v=0}^{T-t} \tilde{n}_{j}(v, v+t)` is a binary function that takes the value 1 if the contact of
lipid j lasts from time ν to time v+t and 0 otherwise. The values of :math:`\sigma(t)` are calculated for every
value of t from 0 to T ns, for each time step of the trajectories, and normalized by dividing by :math:`\sigma(t)`,
so that the survival time-correlation function has value 1 at t = 0.
The normalized survival function is then fitted to a biexponential to model the long and short decays of
lipid relaxation:
.. math::
\sigma(t) \sim A e^{-k_{1} t}+B e^{-k_{2} t}\left(k_{1} \leq k_{2}\right)
PyLipID takes :math:`k_{1}` as the the dissociation :math:`k_{off}`, and calculates the residence time as
:math:`\tau=1 / k_{off}`. PyLipID raises a warning for the impact on the accuracy of :math:`k_{off}`
calculation if trajectories are of different lengths when multiple trajectories are provided. PyLipID measures
the :math:`r^{2}` of the biexponential fitting to the survival function to show the quality of the
:math:`k_{off}` estimation. In addition, PyLipID bootstraps the contact durations and measures the
:math:`k_{off}` of the bootstrapped data, to report how well lipid contacts are sampled from simulations. The
lipid contact sampling, the curve-fitting and the bootstrap results can be conveniently checked via the
:math:`k_{off}` plot.
The calculation of koff for residues can be time-consuming, thus PyLipID uses python multiprocessing to
parallize the calculation. The number of CPUs used for multiprocessing can be specificed, otherwise all the
available CPUs will be used by default.
Parameters
----------
residue_id : int or list of int, default=None
The residue ID, or residue index, that is used by PyLipID for identifying residues. The ID starts from 0,
i.e. the ID of N-th residue is (N-1). If None, all residues are selected.
nbootstrap : int, default=10
Number of bootstrap on the interaction durations. For each bootstrap, samples of the size of the original
dataset are drawn from the collected durations with replacement. :math:`k_{koff}` and :math:`r^{2}` are
calculated for each bootstrap.
initial_guess : array_like, default=None
The initial guess for the curve-fitting of the biexponential curve. Used by scipy.optimize.curve_fit.
save_dir : str, default=None
The the directory for saving the koff figures of residues if plot_data is True. By default, the koff figures
are saved in the directory of Reisidue_koffs_{lipid} under the root directory defined when ``LipidInteraction``
was initiated.
plot_data : bool, default=True
If True, plot the koff figures fir residues.
fig_close : bool, default=True
Use matplotlib.pyplot.close() to close the koff figures. Can save memory if many figures are open and plotted.
fig_format : str, default="pdf"
The format of koff figures. Support formats that are supported by matplotlib.pyplot.savefig().
num_cpus : int or None, default=None
Number of CPUs used for multiprocessing. If None, all the available CPUs will be used.
Returns
---------
koff : scalar or list of scalar
The calculated koffs for selected residues.
restime : scalar or list of scalar
The calculated residence times for selected residues.
See Also
---------
pylipid.api.LipidInteraction.collect_residue_contacts
Create the lipid index.
pylipid.api.LipidInteraction.compute_site_koff
Calculate binding site koffs and residence times.
pylipid.func.cal_koff
Calculate residence time and koff.
pylipid.func.cal_survival_func
Compute the normalised survival function.
References
-----------
.. [1] García, <NAME>, Lewis. Computation of the mean residence time of water in the hydration shells
of biomolecules. 1993. Journal of Computational Chemistry.
"""
self._check_calculation("Duration", self.compute_residue_duration)
if plot_data:
koff_dir = check_dir(save_dir, "Reisidue_koffs_{}".format(self._lipid)) if save_dir is not None \
else check_dir(self._save_dir, "Residue_koffs_{}".format(self._lipid))
if len(set(self._residue_list)) != len(self._residue_list):
residue_name_set = ["{}_ResidueID{}".format(residue, residue_id) for residue, residue_id in
zip(self._residue_list, self._protein_residue_id)]
else:
residue_name_set = self._residue_list
if residue_id is not None:
selected_residue_id = np.atleast_1d(residue_id)
else:
selected_residue_id = self._protein_residue_id
residues_missing_durations = [residue_id for residue_id in selected_residue_id
if len(self._duration[residue_id]) == 0]
if len(residues_missing_durations) > 0:
self.compute_residue_duration(residue_id=residues_missing_durations)
t_total = np.max(self._T_total)
same_length = np.all(np.array(self._T_total) == t_total)
if not same_length:
warnings.warn(
"Trajectories have different lengths. This will impair the accuracy of koff calculation!")
timestep = np.min(self._timesteps)
same_timestep = np.all(np.array(self._timesteps) == timestep)
if not same_timestep:
warnings.warn(
"Trajectories have different timesteps. This will impair the accuracy of koff calculation!")
if plot_data:
fn_set = [os.path.join(koff_dir, "{}.{}".format(residue_name_set[residue_id], fig_format))
for residue_id in selected_residue_id]
else:
fn_set = [False for dummy in selected_residue_id]
returned_values = p_map(partial(calculate_koff_wrapper, t_total=t_total, timestep=timestep, nbootstrap=nbootstrap,
initial_guess=initial_guess, plot_data=plot_data, timeunit=self._timeunit,
fig_close=fig_close),
[np.concatenate(self._duration[residue_id]) for residue_id in selected_residue_id],
[residue_name_set[residue_id] for residue_id in selected_residue_id],
fn_set, num_cpus=num_cpus, desc="CALCULATE KOFF FOR RESIDUES")
for residue_id, returned_value in zip(selected_residue_id, returned_values):
self._koff[residue_id] = returned_value[0]
self._res_time[residue_id] = returned_value[1]
self._r_squared[residue_id] = returned_value[2]
self._koff_boot[residue_id] = returned_value[3]
self._r_squared_boot[residue_id] = returned_value[4]
# update dataset
self.dataset["Koff"] = self._koff
self.dataset["Residence Time"] = self._res_time
self.dataset["R Squared"] = self._r_squared
self.dataset["Koff Bootstrap avg"] = self._koff_boot
self.dataset["R Squared Bootstrap avg"] = self._r_squared_boot
if len(selected_residue_id) == 1:
return self._koff[selected_residue_id[0]], self._res_time[selected_residue_id[0]]
else:
return [self._koff[residue_id] for residue_id in selected_residue_id], \
[self._res_time[residue_id] for residue_id in selected_residue_id]
def compute_binding_nodes(self, threshold=4, print_data=True):
r"""Calculate binding sites.
Binding sites are defined based on a community analysis of protein residue-interaction networks that are created
from the lipid interaction correlation matrix. Given the definition of a lipid binding site, namely a
cluster of residues that bind to the same lipid molecule at the same time, PyLipID creates a distance vector
for each residue that records the distances to all lipid molecules as a function of time, and calculate the
Pearson correlation matrix of protein residues for binding the same lipid molecules. This correlation matrix is
calculated by :meth:`~LipidInteraction.collect_residue_contacts()` and stored in the class variable
``interaction_corrcoef``.
The protein residue interaction network is constructed based on the Pearson correlation matrix.
In this network, the nodes are the protein residues and the weights are the Pearson correlation
coefficients of pairs of residues. The interaction network is then decomposed into sub-units or communities,
which are groups of nodes that are more densely connected internally than with the rest of the network.
For the calculation of communities, the Louvain algorithm [1]_ is used to find high modularity network partitions.
Modularity, which measures the quality of network partiions, is defined as [2]_
.. math::
Q=\frac{1}{2 m} \sum_{i, j}\left[A_{i j}-\frac{k_{i} k_{j}}{2 m}\right] \delta\left(c_{i}, c_{j}\right)
where :math:`A_{i j}` is the weight of the edge between node i and node j; :math:`k_{i}` is the sum of weights
of the nodes attached to the node i, i.e. the degree of the node; :math:`c_{i}` is the community to which node i
assigned; :math:`\delta\left(c_{i}, c_{j}\right)` is 1 if i=j and 0 otherwise; and
:math:`m=\frac{1}{2} \sum_{i j} A_{i j}` is the number of edges. In the modularity optimization, the Louvain
algorithm orders the nodes in the network, and then, one by one, removes and inserts each node in a different
community c_i until no significant increase in modularity. After modularity optimization, all the nodes that
belong to the same community are merged into a single node, of which the edge weights are the sum of the weights
of the comprising nodes. This optimization-aggregation loop is iterated until all nodes are collapsed into one.
By default, this method returns binding sites of at least 4 residues. This filtering step is particularly helpful
for analysis on smaller amount of trajectory frames, in which false correlation is more likely to happen among
2 or 3 residues.
Parameters
----------
threshold : int, default=4
The minimum size of binding sites. Only binding sites with more residues than the threshold will be returned.
print : bool, default=True
If True, print a summary of binding site information.
Returns
-------
node_list: list
Binding site node list, i.e. a list of binding sites which contains sets of binding site residue indices
modularity : float or None
The modularity of network partition. It measure the quality of network partition. The value is between 1 and
-1. The bigger the modularity, the better the partition.
See Also
--------
pylipid.func.get_node_list
Calculates community structures in interaction network.
References
----------
.. [1] <NAME>.; <NAME>.; <NAME>.; <NAME>., Fast unfolding of communities in large
networks. Journal of Statistical Mechanics: Theory and Experiment 2008, 2008 (10), P10008
.. [2] <NAME>., Analysis of weighted networks. Physical Review E 2004, 70 (5), 056131.
"""
self._check_calculation("Residue", self.compute_residue_koff)
corrcoef_raw = np.nan_to_num(self.interaction_corrcoef)
corrcoef = np.copy(corrcoef_raw)
node_list, modularity = get_node_list(corrcoef, threshold=threshold)
self._node_list = node_list
self._network_modularity = modularity
if len(self._node_list) == 0:
print("*"*30)
print(" No binding site detected!!")
print("*"*30)
else:
residue_BS_identifiers = np.ones(self._nresi_per_protein, dtype=int) * -1
for bs_id, nodes in enumerate(self._node_list):
residue_BS_identifiers[nodes] = int(bs_id)
# update dataset
self.dataset["Binding Site ID"] = residue_BS_identifiers
# initialise variable for binding site interactions
self._duration_BS = dict()
self._occupancy_BS = dict()
self._lipid_count_BS = dict()
self._koff_BS = np.zeros(len(self._node_list))
self._koff_BS_boot = np.zeros(len(self._node_list))
self._res_time_BS = np.zeros(len(self._node_list))
self._r_squared_BS = np.zeros(len(self._node_list))
self._r_squared_BS_boot = np.zeros(len(self._node_list))
if print_data:
print(f"Network modularity: {modularity:.3f}")
for bs_id, nodes in enumerate(self._node_list):
print("#" * 25)
print(f"Binding Site ID: {bs_id}")
print("{:>10s} -- {:<12s}".format("Residue", "Residue ID"))
for node in nodes:
print("{:>10s} -- {:<12d}".format(self._residue_list[node], self._protein_residue_id[node]))
print("#" * 25)
return node_list, modularity
def compute_site_duration(self, binding_site_id=None):
"""Calculate interaction durations for binding sites.
PyLipID calculates lipid contacts using a dual-cutoff scheme. In this scheme, a continuous contact starts when
a molecule moves closer than the lower distance cutoff and ends when the molecule moves out of the upper cutoff.
The duration between these two time points is the duration of the contact.
PyLipID implements this dual-cutoff tactic by creating a lipid index for the lower and upper
cutoff respectively, which records the lipid molecules within that distance cutoff at each trajectory frame
for residues. Such lipid indices are created by the method :meth:`~LipidInteraction.collect_residue_contacts`,
and are stored in the private class variables of ``_contact_residue_high`` and ``_contact_residue_low`` for
each of the cutoffs.
For calculating contacts for binding sites, the interacting lipid molecules with binding site residues are
merged with duplicates removed to form the lipid indices for the upper cutoff and lower cutoff respectively.
Similar to the calculation of residues, a contact duration of a binding sites are calculated as the duration
between the time point of a lipid molecule appearing in the lipid index of the lower cutoff and of this molecule
disappeared from the upper cutoff index.
This function returns a list of contact durations or lists of contact durations if multiple binding site IDs are
provided.
Parameters
----------
binding_site_id : int or list of int, default=None
The binding site ID used in PyLipID. This ID is the index in the binding site node list that is
calculated by the method ``compute_binding_nodes``. The ID of the N-th binding site is (N-1). If None,
the contact duration of all binding sites are calculated.
Returns
-------
durations_BS : list
A list of contact durations or lists of contact durations if multiple binding site IDs are provided.
See Also
---------
pylipid.api.LipidInteraction.collect_residue_contacts
Create the lipid index.
pylipid.api.LipidInteraction.compute_residue_duration
Calculate residue contact durations.
pylipid.func.Duration
Calculate contact durations from lipid index.
"""
self._check_calculation("Binding Site ID", self.compute_binding_nodes, print_data=False)
selected_bs_id = np.atleast_1d(binding_site_id) if binding_site_id is not None \
else np.arange(len(self._node_list), dtype=int)
for bs_id in tqdm(selected_bs_id, desc="CALCULATE DURATION PER BINDING SITE"):
nodes = self._node_list[bs_id]
durations_BS = []
for traj_idx in np.arange(len(self._trajfile_list), dtype=int):
for protein_idx in | np.arange(self._nprot, dtype=int) | numpy.arange |
import h5py
import os
import glob
import re
import numpy as np
from . import peano
import warnings
from scipy.integrate import quad
base_path = os.environ['EAGLE_BASE_PATH']
release = os.environ['EAGLE_ACCESS_TYPE']
class Snapshot:
""" Basic SnapShot superclass which finds the relevant files and gets relevant information
regarding the snapshot specified.
arguments:
run - the run (e.g. L0012N0188)
model - an EAGLE model (e.g. Ref)
tag - a tag string specifying a snapshot output (e.g. 028_z000p000)
history:
written - Mackereth (UoB) - 22/11/2019
"""
def __init__(self, run, model, tag, load_particles=False):
#store the snapshot identity info
self.run = run
self.model = model
self.tag = tag
if release == 'public':
self.simlabel = self.model+self.run
self.snaplabel = 'snapshot_'+self.tag
self.base_subfile = 'snap_'+self.tag
self.path = os.path.join(base_path, self.simlabel, self.snaplabel)
elif release == 'ARI':
self.snaplabel = 'snapshot_'+self.tag
self.base_subfile = 'snap_'+self.tag
self.path = os.path.join(base_path, self.run, self.model, 'data', self.snaplabel)
else:
raise Exception('private/custom data access is not yet implemented!')
if not os.path.exists(os.path.join(self.path, self.base_subfile+'.0.hdf5')):
raise Exception('could not see snapshot data in directory: '+self.path)
#get the files related to this snapshot and load some of their metadata
self.files = natural_sort(glob.glob(os.path.join(self.path, self.base_subfile+'*.hdf5')))
self.nfiles = len(self.files)
self.header_dict = dict(h5py.File(self.files[0], 'r')['/Header'].attrs.items())
self.abundance_dict = dict(h5py.File(self.files[0], 'r')['/Parameters/ChemicalElements'].attrs.items())
self.elements = ['Hydrogen', 'Helium', 'Carbon', 'Nitrogen', 'Oxygen', 'Silicon', 'Sulphur', 'Magnesium', 'Iron']
self.solar_abundances = dict([(self.elements[i],self.abundance_dict['SolarAbundance_%s' % self.elements[i]]) for i in range(len(self.elements))])
self.BoxSize = self.header_dict['BoxSize']
self.HubbleParam = self.header_dict['HubbleParam']
self.Omega0, self.OmegaLambda, self.OmegaBaryon, self.a0 = self.header_dict['Omega0'], self.header_dict['OmegaLambda'], self.header_dict['OmegaBaryon'], self.header_dict['ExpansionFactor']
self.NumPartTotal = self.header_dict['NumPart_Total']
self.ParticleTypes = np.array([0,1,2,3,4,5])
self.ParticleTypePresent = self.NumPartTotal > 0
self.ParticleTypePresent_file = np.zeros((len(self.files),len(self.NumPartTotal)), dtype=bool)
for ii, file in enumerate(self.files):
head = dict(h5py.File(file, 'r')['/Header'].attrs.items())
self.ParticleTypePresent_file[ii, head['NumPart_ThisFile'] > 0] = True
self._ptypeind = {self.ParticleTypes[self.ParticleTypePresent][i]:i for i in range(len(self.ParticleTypes[self.ParticleTypePresent]))}
#get the Hash Table info for P-H key sorting
self.HashBits = dict(h5py.File(self.files[0], 'r')['/HashTable'].attrs.items())['HashBits']
self.HashGridSideLength = 2**self.HashBits
self.HashGridCellSize = self.BoxSize/self.HashGridSideLength
self.firstkeys = np.zeros((len(self.ParticleTypes[self.ParticleTypePresent]),self.nfiles))
self.lastkeys = np.zeros((len(self.ParticleTypes[self.ParticleTypePresent]),self.nfiles))
self.datasets = {}
for ii,parttype in enumerate(self.ParticleTypes[self.ParticleTypePresent]):
self.firstkeys[ii] = np.array(h5py.File(self.files[0], 'r')['/HashTable/PartType'+str(parttype)+'/FirstKeyInFile'])
self.lastkeys[ii] = np.array(h5py.File(self.files[0], 'r')['/HashTable/PartType'+str(parttype)+'/LastKeyInFile'])
#be sure we get a file with this parttype (only really an issue for when low N stars!!)
ind = np.nonzero(h5py.File(self.files[0], 'r')['/HashTable/PartType'+str(parttype)+'/LastKeyInFile'][:])[0][0]
self.datasets['PartType'+str(parttype)] = list(h5py.File(self.files[ind], 'r')['/PartType'+str(parttype)].keys())
if load_particles:
self._get_coordinates()
def _get_coordinates(self):
""" Load all the coordinates of the available particles
"""
#load coordinates and velocities
coordinates = []
velocities = []
for ii,type in enumerate(self.ParticleTypes[self.ParticleTypePresent]):
#now load the coordinates in these files and save the indices for each particle type
thistypecoord, thistypevels = self._get_parttype_indices(type, self.files)
coordinates.append(thistypecoord)
velocities.append(thistypevels)
self.velocities = velocities
self.coordinates = coordinates
def _get_parttype_indices(self, parttype, files):
"""get the coordinates and indices for a given particle type in a given region"""
coords, velocities, indices = [], [], []
for ii,file in enumerate(files):
#check this particle type is present here
if not _particle_type_present(parttype, file):
return None, None
# load the file
thisfilecoords = np.array(h5py.File(file, 'r')['/PartType'+str(parttype)+'/Coordinates'])
thisfilevels = np.array(h5py.File(file, 'r')['/PartType'+str(parttype)+'/Velocity'])
#store the coordinates and the indices of these particles in the file
coords.append(thisfilecoords)
velocities.append(thisfilevels)
return np.concatenate(coords), np.concatenate(velocities)
def _get_coords_vels(self, parttype, files):
"""get the coordinates and velocities for all particles of a certain type"""
if not self.ParticleTypePresent[parttype]:
warnings.warn('Particle type is not present, returning empty arrays...')
return np.array([]), np.array([]), np.array([])
coords, velocities, indices = [], [], []
for file in files:
# load the file
thisfilecoords = np.array(h5py.File(file, 'r')['/PartType'+str(parttype)+'/Coordinates'])
thisfilevels = np.array(h5py.File(file, 'r')['/PartType'+str(parttype)+'/Velocity'])
#store the coordinates and the indices of these particles in the file
coords.append(thisfilecoords)
velocities.append(thisfilevels)
return np.concatenate(coords), np.concatenate(velocities)
def get_dataset(self, parttype, dataset, physical=False, cgs=False):
""" get the data for a given entry in the HDF5 file for the given region """
if not self.ParticleTypePresent[parttype]:
warnings.warn('Particle type is not present, returning empty arrays...')
return np.array([])
key = os.path.join('/PartType'+str(parttype),dataset)
if physical:
#find conversion factor
factor = self._conversion_factor(key, self.a0, self.HubbleParam, cgs=cgs)
elif not physical and cgs:
factor = h5py.File(self.files[0], 'r')[key].attrs['CGSConversionFactor']
else:
#else just multiply by 1!
factor = 1.
out = []
for ii,file in enumerate(self.files):
# load this file and get the particles
out.append(np.array(h5py.File(file, 'r')[key]) * factor)
return np.concatenate(out)
def _conversion_factor(self, key, a, h, cgs=False):
aexp_scale, h_scale = self._get_conversion_factor_exponents(key)
if cgs:
cgs_factor = h5py.File(self.files[0], 'r')[key].attrs['CGSConversionFactor']
else:
cgs_factor = 1.
return a**(aexp_scale)*h**(h_scale)*cgs_factor
def _get_conversion_factor_exponents(self, key):
aexp_scale = h5py.File(self.files[0], 'r')[key].attrs['aexp-scale-exponent']
h_scale = h5py.File(self.files[0], 'r')[key].attrs['h-scale-exponent']
return aexp_scale, h_scale
def _single_X_H(self,X,H,element):
solar = self.solar_abundances[element]
solarH = self.solar_abundances['Hydrogen']
return np.log10(X/H)-np.log10(solar/solarH)
def abundance_ratios(self,gas=False,smoothed=True):
""" Compute element abundance ratios for the region, returns a dict of [X/H] """
if smoothed:
e_key = 'SmoothedElementAbundance'
else:
e_key = 'ElementAbundance'
if gas:
parttype = 0
else:
parttype = 4
entries = []
H = self.get_dataset(parttype,os.path.join(e_key,'Hydrogen'))
for i in range(len(self.elements)):
if self.elements[i] == 'Hydrogen' or self.elements[i] == 'Sulphur':
continue
X = self.get_dataset(parttype,os.path.join(e_key,self.elements[i]))
entries.append((self.elements[i],self._single_X_H(X,H,self.elements[i])))
return dict(entries)
def t_lookback(self,a):
return a / (np.sqrt(self.Omega0 * a + self.OmegaLambda * (a ** 4)))
def z2age(self,z):
a = 1 / (1 + z)
t = np.array([quad(self.t_lookback, x, self.a0)[0] for x in a])
return (1 / (self.HubbleParam * 100)) * (3.086e19 / 3.1536e16) * t
def a2age(self,a):
t = np.array([quad(self.t_lookback, x, self.a0)[0] for x in a])
return (1 / (self.HubbleParam * 100)) * (3.086e19 / 3.1536e16) * t
def z2tau(self,z):
t_em = quad(self.t_lookback, 0., self.a0)[0]
t_em = (1 / (self.HubbleParam * 100)) * (3.086e19 / 3.1536e16) * t_em
a = 1 / (1 + z)
t = np.array([quad(self.t_lookback, x, self.a0)[0] for x in a])
return t_em - ((1 / (self.HubbleParam * 100)) * (3.086e19 / 3.1536e16) * t)
def a2tau(self,a):
t_em = quad(self.t_lookback, 0., self.a0)[0]
t_em = (1 / (self.HubbleParam * 100)) * (3.086e19 / 3.1536e16) * t_em
t = np.array([quad(self.t_lookback, x, self.a0)[0] for x in a])
return t_em - ((1 / (self.HubbleParam * 100)) * (3.086e19 / 3.1536e16) * t)
class SnapshotRegion(Snapshot):
""" A class inheriting from SnapShot, which defines a region inside a larger simulation snapshot.
when initialised, this will read the files in that region, and get the indices of the particles inside the
desired region. The necessary datasets can then be loaded by using get_dataset.
arguments:
run - the run (e.g. L0012N0188)
model - an EAGLE model (e.g. Ref)
tag - a tag string specifying a snapshot output (e.g. 028_z000p000)
center - the center of the desired region
sidelength - the length of a side of the volume required
history:
written - Mackereth (UoB) - 22/11/2019
"""
def __init__(self, run, model, tag, center, sidelength, just_get_files=False):
#we want everything from SnapShot plus some extras
super().__init__(run, model, tag)
self.center = center
self.sidelength = sidelength
self.centered = False
self._index_region(self.center, self.sidelength, justfiles=just_get_files)
def _index_region(self, center, side_length, phgrid_n=70, justfiles=False):
""" Load a region defined by a central cordinate and a side length
arguments:
center - the [x,y,z] coordinate of the desired center (simulation units)
side_length - the desired side length (in the simulation units)
keyword arguments:
phgrid_n - the number of grid points along a side length to look for PH cells (default 70)
"""
#work out which files contain the desired region
grid = peano.coordinate_grid(center, side_length, self.BoxSize, n=phgrid_n)
keys = peano.get_unique_grid_keys(grid, self.HashGridCellSize, self.BoxSize, bits=self.HashBits)
particles_in_volume = self.ParticleTypes[self.ParticleTypePresent]
self.files_for_region = []
self.file_indices = []
coordinates = []
velocities = []
indices = []
for ii in self.ParticleTypes:
if not self.ParticleTypePresent[ii]:
continue
Nfiles = self._get_parttype_files(ii, keys)
if len(Nfiles) < 1:
#particle is not present in the region - remove from here
self.ParticleTypePresent[ii] = 0
continue
thisfiles = | np.array(self.files) | numpy.array |
import gym
import numpy as np
import random
import tensorflow as tf
import matplotlib.pyplot as plt
env = gym.make('FrozenLake-v0')
tf.reset_default_graph()
# Feed Forward
inputs1 = tf.placeholder(shape=[1, 16], dtype=tf.float32)
W = tf.Variable(tf.random_uniform([16,4], 0, 0.01))
Qout = tf.matmul(inputs1, W)
predict = tf.argmax(Qout, 1)
# loss = (targetQ - PredictQ)^2
nextQ = tf.placeholder(shape=[1,4], dtype=tf.float32)
loss = tf.reduce_sum(tf.square(nextQ - Qout))
trainer = tf.train.GradientDescentOptimizer(learning_rate=0.1)
updateModel = trainer.minimize(loss)
init = tf.global_variables_initializer()
y = .99
e = 0.1
num_episodes = 2000
jList = []
rList = []
with tf.Session() as sess:
sess.run(init)
for i in range(num_episodes):
s = env.reset()
rAll = 0
d = False
j = 0
while j < 99:
j += 1
# np.identity(16)[s:s+1] is return the 1-d array which is agent position information at s.
a, allQ = sess.run([predict, Qout], feed_dict={inputs1:np.identity(16)[s:s+1]})
if np.random.rand(1) < e:
a[0] = env.action_space.sample()
s1,r,d,_ = env.step(a[0])
Q1 = sess.run(Qout,feed_dict={inputs1:np.identity(16)[s1:s1+1]})
maxQ1 = | np.max(Q1) | numpy.max |
"""
This file contains specific functions for computing losses of FCOS
file
"""
import logging
import torch
from torch.nn import functional as F
from torch import nn
import os
from ..utils import concat_box_prediction_layers
from fcos_core.layers import IOULoss
from fcos_core.layers import SigmoidFocalLoss
from fcos_core.layers import sigmoid_focal_loss_bce
from fcos_core.modeling.matcher import Matcher
from fcos_core.modeling.utils import cat
from fcos_core.structures.boxlist_ops import boxlist_iou
from fcos_core.structures.boxlist_ops import cat_boxlist
INF = 100000000
def get_num_gpus():
return int(os.environ["WORLD_SIZE"]) if "WORLD_SIZE" in os.environ else 1
def reduce_sum(tensor):
if get_num_gpus() <= 1:
return tensor
import torch.distributed as dist
tensor = tensor.clone()
dist.all_reduce(tensor, op=dist.reduce_op.SUM)
return tensor
class FCOSLossComputation(object):
"""
This class computes the FCOS losses.
"""
def __init__(self, cfg):
self.cls_loss_func = SigmoidFocalLoss(
cfg.MODEL.FCOS.LOSS_GAMMA,
cfg.MODEL.FCOS.LOSS_ALPHA
)
self.fpn_strides = cfg.MODEL.FCOS.FPN_STRIDES
self.center_sampling_radius = cfg.MODEL.FCOS.CENTER_SAMPLING_RADIUS
self.iou_loss_type = cfg.MODEL.FCOS.IOU_LOSS_TYPE
self.norm_reg_targets = cfg.MODEL.FCOS.NORM_REG_TARGETS
# we make use of IOU Loss for bounding boxes regression,
# but we found that L1 in log scale can yield a similar performance
self.box_reg_loss_func = IOULoss(self.iou_loss_type)
self.centerness_loss_func = nn.BCEWithLogitsLoss(reduction="sum")
self.COUNT = [0, 0, 0, 0, 0]
def gmm_clustter_2(self, cls_loss):
from sklearn.mixture import GaussianMixture
import numpy as np
# mean = torch.mean(cls_loss)
# sigma = torch.std(cls_loss)
min_loss = torch.min(cls_loss).cpu().detach().numpy()
max_loss = torch.max(cls_loss).cpu().detach().numpy()
means_init = np.array([min_loss, max_loss]).reshape(2, 1)
precisions_init = np.array([0.1, 0.1]).reshape(2, 1, 1)
cls_loss = cls_loss.view(-1, 1).cpu().detach().numpy()
gm = GaussianMixture(n_components=2, weights_init=[0.5, 0.5],
means_init=means_init, precisions_init= precisions_init)
gm.fit(cls_loss)
results = gm.predict(cls_loss)
assignments = results == 0
if len(np.nonzero(assignments)[0]) > 0:
scores = gm.score_samples(cls_loss)
score_fgs = scores[assignments]
fgs_inds = np.nonzero(assignments)[0]
fgs_thr_ind = np.argmax(score_fgs)
assignments_ = cls_loss.reshape(-1) <= cls_loss[fgs_inds[fgs_thr_ind]]
assignments = assignments & assignments_
return torch.from_numpy(assignments)
def gmm_clustter(self, cls_loss):
from sklearn.mixture import GaussianMixture
import numpy as np
topk = 12
topk = min(topk, torch.numel(cls_loss))
cls_loss = cls_loss.cpu().detach().numpy().flatten()
lenth = cls_loss.shape[0]
assign_topk = np.argpartition(cls_loss, topk - 1)[0:topk]
cls_loss = cls_loss[assign_topk]
min_loss = np.min(cls_loss)
max_loss = np.max(cls_loss)
means_init = np.array([min_loss, max_loss]).reshape(2, 1)
precisions_init = np.array([0.1, 0.1]).reshape(2, 1, 1)
cls_loss = cls_loss.reshape((-1, 1))
gm = GaussianMixture(n_components=2, weights_init=[0.5, 0.5],
means_init=means_init, precisions_init= precisions_init)
gm.fit(cls_loss)
results = gm.predict(cls_loss)
assign_temp = results == 0
assignments = np.zeros(lenth, dtype=np.bool)
assignments[assign_topk[assign_temp]] = True
# if len(np.nonzero(assignments)[0]) > 0:
# scores = gm.score_samples(cls_loss)
# score_fgs = scores[assignments]
# fgs_inds = np.nonzero(assignments)[0]
# fgs_thr_ind = np.argmax(score_fgs)
# assignments_ = cls_loss.reshape(-1) < cls_loss[fgs_inds[fgs_thr_ind]]
# assignments = assignments & assignments_
return torch.from_numpy(assignments)
def topk_clustter(self, cls_loss, k = 9):
import numpy as np
# mean = torch.mean(cls_loss)
# sigma = torch.std(cls_loss)
min_loss = torch.min(cls_loss).cpu().detach().numpy()
max_loss = torch.max(cls_loss).cpu().detach().numpy()
means_init = np.array([min_loss, max_loss]).reshape(2, 1)
precisions_init = np.array([0.1, 0.1]).reshape(2, 1, 1)
cls_loss = cls_loss.flatten()
k = min(k, len(cls_loss))
cls_loss = 0 - cls_loss
_, assignments = torch.topk(cls_loss, k)
return assignments
def avg_clustter(self, cls_loss):
mean = torch.mean(cls_loss)
sigma = torch.std(cls_loss)
assignments = cls_loss <= mean
return assignments
def dbscan_clustter(self, loss):
from sklearn.clustter import DBSCAN
import numpy as np
def get_ious(self, pred, target):
pred_left = pred[:, 0]
pred_top = pred[:, 1]
pred_right = pred[:, 2]
pred_bottom = pred[:, 3]
target_left = target[:, 0]
target_top = target[:, 1]
target_right = target[:, 2]
target_bottom = target[:, 3]
target_area = (target_left + target_right) * \
(target_top + target_bottom)
pred_area = (pred_left + pred_right) * \
(pred_top + pred_bottom)
w_intersect = torch.min(pred_left, target_left) + torch.min(pred_right, target_right)
g_w_intersect = torch.max(pred_left, target_left) + torch.max(
pred_right, target_right)
h_intersect = torch.min(pred_bottom, target_bottom) + torch.min(pred_top, target_top)
g_h_intersect = torch.max(pred_bottom, target_bottom) + torch.max(pred_top, target_top)
ac_uion = g_w_intersect * g_h_intersect + 1e-7
area_intersect = w_intersect * h_intersect
area_union = target_area + pred_area - area_intersect
ious = (area_intersect + 1.0) / (area_union + 1.0)
gious = ious - (ac_uion - area_union) / ac_uion
return ious, gious
def gmm_clustter_2(self, cls_loss):
from sklearn.mixture import GaussianMixture
import numpy as np
# mean = torch.mean(cls_loss)
# sigma = torch.std(cls_loss)
min_loss = torch.min(cls_loss).cpu().detach().numpy()
max_loss = torch.max(cls_loss).cpu().detach().numpy()
means_init = | np.array([min_loss, max_loss]) | numpy.array |
# -*- coding: utf-8 -*-
import itertools
import numpy as np
from . import builder
from . import converters
MOLAL_MASS_WATER = 18.01528 #g/mol
MOLALITY_WATER = 1e3*1/MOLAL_MASS_WATER #mol/kg
class SolutionResult():
"""
Class for solution of equilibria
Parameters
----------
TK: float
Temperature in Kelvin
base_species: List[str]
Base aqueous species in system
elements: List[str]
Elements in system
species: List[str]
Species in system
reactions: List[dict]
Reactions in system
solid_reactions: List[dict]
Solid Reactions in system
formula_matrix: ndarray
Formula matrix of system
stoich_matrix: ndarray
Stoichiometric matrix of system
solid_formula_matrix: ndarray
Solid formula matrix of system
solid_stoich_matrix: ndarray
Solid stoichiometric matrix of system
"""
def __init__(self, equilibrium_system, x, TK,
molals_solids=None, solid_phases_in=None,
molals_gases=None, gas_phases_in=None,
PATM=1.0):
self.TK = TK
self.PATM = PATM
self.solverlog = equilibrium_system.solverlog
self.solvertype = equilibrium_system.solvertype
self._x_molal = x
self._x_logact = equilibrium_system.activity_function(x, TK)
self._x_act = np.nan_to_num(10**self._x_logact)
self._molals_solids = molals_solids
self._solid_phases_in = solid_phases_in
self._molals_gases = molals_gases
self._gas_phases_in = gas_phases_in
self.base_species = equilibrium_system.base_species
self.species = equilibrium_system.species
self.reactions = equilibrium_system.reactions
self.solid_reactions = equilibrium_system.solid_reactions
self.gas_reactions = equilibrium_system.gas_reactions
self.elements = equilibrium_system.elements
self.formula_matrix = equilibrium_system.formula_matrix
self.stoich_matrix = equilibrium_system.stoich_matrix
self.solid_formula_matrix = equilibrium_system.solid_formula_matrix
self.solid_stoich_matrix = equilibrium_system.solid_stoich_matrix
self.gas_formula_matrix = equilibrium_system.gas_formula_matrix
self.gas_stoich_matrix = equilibrium_system.gas_stoich_matrix
self._logsatur = self._build_saturation_indexes()
def getlog(self):
separator = "\n" + "-"*40 + "\n"
conditions_block_init = f"CONDITIONS\n{self.solvertype}"
conditions_block = conditions_block_init + "\n" + self.solverlog
species_block = self._make_species_string()
properties_block = self._make_properties_string()
phases_block = self._make_phases_string()
saturation_block = self._make_saturation_string()
log = separator.join((conditions_block,
species_block,
properties_block,
phases_block,
saturation_block))
return log
def savelog(self, filename):
with open(filename, "w") as f:
f.write(self.getlog())
@property
def molals(self):
"""Molals"""
molals_dict = {'H2O': MOLALITY_WATER}
molals_dict.update(self.solute_molals)
return molals_dict
@property
def solute_molals(self):
"""Molals of solutes"""
molals_dict = {self.solutes[i]: self._x_molal[i]
for i in range(len(self._x_molal))}
return molals_dict
@property
def mole_fractions(self):
molal_sum = sum(self.molals.values())
return {key: value/molal_sum for key, value in self.molals.items()}
@property
def concentrations(self): # mM or mol/m^3
"""Equilibrium concentrations. Assumes water volue much greater than ionic volumes.
At high ionic concentration one should give preference to molals"""
return {sp: converters.molal_to_mmolar(val,
self.TK)
for sp, val in self.molals.items()}
@property
def concentrations_mgl(self):
return {sp: converters.molal_to_mgl(val,
sp,
self.TK)
for sp, val in self.molals.items()}
@property
def elements_mgl(self):
return {el: converters.molal_to_mgl(val,
el,
self.TK)
for el, val in self.elements_molals.items()}
@property
def activities(self):
"""Equilibrium activities"""
return {self.species[i]: self._x_act[i]
for i in range(len(self._x_act))}
@property
def saturation_indexes(self):
"""Saturation indexes for solids"""
return {self.solid_phase_names[i]: self._logsatur[i]
for i in range(len(self._logsatur))}
@property
def saturations(self):
return {k:10**v for k, v in self.saturation_indexes.items()}
@property
def ionic_strength(self):
"""Ionic strength of system"""
return 0.5*np.sum(
self._charge_vector[1:]**2*self._x_molal)
@property
def solute_elements(self): # Ignore H and O
"""Elements ignoring H and O"""
return self.elements[2:]
@property
def solutes(self): # Ignore H2O
"""Solutes"""
return self.species[1:]
@property
def solid_phase_names(self):
"""Names of solid phases"""
return [sol_reac['phase_name'] for sol_reac in self.solid_reactions]
@property
def solid_molals(self):
"""Solid molals"""
if self._solid_phases_in is None:
solid_molals_ = dict()
else:
solid_molals_ = dict(zip(self._solid_phases_in, self._molals_solids))
solid_molals = {k: solid_molals_.get(k, 0.0) for k in self.solid_phase_names}
return solid_molals
@property
def gas_phase_names(self):
"""Names of solid phases"""
return [gas_reac['phase_name'] for gas_reac in self.gas_reactions]
@property
def gas_molals(self):
"""Solid molals"""
if self._gas_phases_in is None:
gas_molals_ = dict()
else:
gas_molals_ = dict(zip(self._gas_phases_in, self._molals_gases))
gas_molals = {k: gas_molals_.get(k, 0.0) for k in self.gas_phase_names}
return gas_molals
@property
def elements_molals(self):
"""Molals for elements"""
balance_vector = self._balance_vector
return {k: balance_vector[i] for i, k in enumerate(self.elements)}
@property
def charge_density(self):
"""Charge density (e/kg)"""
return | np.sum(self.formula_matrix[-1, 1:]*self._x_molal) | numpy.sum |
import numpy as np
import scipy as sp
import pandas as pd
from tqdm import tqdm
import copy
import time
from sklearn.model_selection import train_test_split
from jax.config import config
#config.update("jax_enable_x64", True)
import jax.numpy as jnp
from jax import random
from surv_copula.main_copula_survival import fit_copula_survival,fit_parametric_a0,\
predict_copula_survival,check_convergence_pr,predictive_resample_survival
from surv_copula.parametric_survival_functions import pr_lomax_smc,pr_lomax_IS
#Import data
data = pd.read_csv('./data/pbc.csv')
t = np.array(data['t'])
delta = np.array(data['delta'])
delta[delta ==1.] = 0
delta[delta==2.] = 1
trt = np.array(data['trt'])
#Split into treatments (filtering NA)
t1 = t[trt == 1.]
delta1 = delta[trt==1.]
t2 = t[trt == 2.]
delta2 = delta[trt==2.]
#Initialize cv
rep_cv = 10
n1 = np.shape(t1)[0]
n_train1 = int(n1/2)
n_test1 = n1-n_train1
n2 = np.shape(t2)[0]
n_train2 = int(n2/2)
n_test2 = n2-n_train2
test_ll_cv1 = np.zeros(rep_cv)
test_ll_cv2 = | np.zeros(rep_cv) | numpy.zeros |
#!/usr/bin/env python
"""
Pylab Extras
============
This module contains various functions similar to those in
Matlab that are not in pylab.
- eps Compute spacing of floating point numbers.
- minmax Return range of array.
- realmax Return largest representable positive floating point number.
- realmin Return smallest representable positive floating point number.
"""
# Copyright (c) 2009-2015, <NAME>
# All rights reserved.
# Distributed under the terms of the BSD license:
# http://www.opensource.org/licenses/bsd-license
__all__ = ['eps', 'realmax', 'realmin']
from numpy import array, finfo, single, float, double, longdouble, \
floor, log2, abs, inf, NaN, min, max, shape, vstack
finfo_dict = {single:finfo(single),
float:finfo(float),
double:finfo(double),
longdouble:finfo(longdouble)}
def realmax(t=double):
"""Return the largest positive floating point number representable
with the specified precision on this computer. Double precision is
assumed if no floating point type is specified."""
if t not in finfo_dict:
raise ValueError('invalid floating point type')
else:
return finfo_dict[t].max
def realmin(t=double):
"""Return the smallest positive floating point number
representable with the specified precision on this computer.
Double precision is assumed if no floating point type is specified."""
if t not in finfo_dict:
raise ValueError('invalid floating point type')
else:
return finfo_dict[t].tiny.item()
def eps(x):
"""Compute the spacing of floating point numbers."""
t = type(x)
if t not in finfo_dict:
raise ValueError('invalid floating point type')
ibeta = int(finfo_dict[t].machar.ibeta)
maxexp = finfo_dict[t].maxexp
machep = finfo_dict[t].machep
minexp = finfo_dict[t].minexp
negep = finfo_dict[t].negep
xmax = finfo_dict[t].machar.xmax
xmin = finfo_dict[t].machar.xmin
x = abs(x)
if x in (inf, NaN):
return NaN
elif x >= xmax:
return ibeta**(maxexp+negep)
elif x > xmin:
# Convert output of log2() to int to prevent
# imprecision from confusing floor():
return ibeta**(machep+int(floor(int(log2(x)))))
else:
return ibeta**(minexp+negep+1)
def minmax(x):
"""Return the range of the given array. If the array has 2
dimensions, return an array containing the minima and maxima of
each of the rows."""
dims = len(shape(x))
if dims == 1:
return array((min(x), max(x)))
elif dims == 2:
return vstack(( | min(x,1) | numpy.min |
import os
import warnings
import numpy as np
import matplotlib.pyplot as plt
import astropy.stats
import astropy.units as u
CLIGHT = 299792458.0 # m/s
def path_to_eazy_data():
return os.path.join(os.path.dirname(__file__), 'data')
def set_warnings(numpy_level='ignore', astropy_level='ignore'):
"""
Set global numpy and astropy warnings
Parameters
----------
numpy_level : {'ignore', 'warn', 'raise', 'call', 'print', 'log'}
Numpy error level (see `~numpy.seterr`).
astropy_level : {'error', 'ignore', 'always', 'default', 'module', 'once'}
Astropy error level (see `~warnings.simplefilter`).
"""
from astropy.utils.exceptions import AstropyWarning
np.seterr(all=numpy_level)
warnings.simplefilter(astropy_level, category=AstropyWarning)
def running_median(xi, yi, NBIN=10, use_median=True, use_nmad=True, reverse=False, bins=None, x_func=astropy.stats.biweight_location, y_func=astropy.stats.biweight_location, std_func=astropy.stats.biweight_midvariance, integrate=False):
"""
Running median/biweight/nmad
"""
NPER = xi.size // NBIN
if bins is None:
so = np.argsort(xi)
if reverse:
so = so[::-1]
bx = np.linspace(0,len(xi),NBIN+1)
bins = np.interp(bx, np.arange(len(xi)), xi[so])
if reverse:
bins = bins[::-1]
NBIN = len(bins)-1
xm = np.arange(NBIN)*1.
xs = xm*0
ym = xm*0
ys = xm*0
N = np.arange(NBIN)
if use_median:
y_func = np.median
if use_nmad:
std_func = astropy.stats.mad_std
#idx = np.arange(NPER, dtype=int)
for i in range(NBIN):
in_bin = (xi > bins[i]) & (xi <= bins[i+1])
N[i] = in_bin.sum() #N[i] = xi[so][idx+NPER*i].size
if integrate:
xso = np.argsort(xi[in_bin])
ma = xi[in_bin].max()
mi = xi[in_bin].min()
xm[i] = (ma+mi)/2.
dx = (ma-mi)
ym[i] = np.trapz(yi[in_bin][xso], xi[in_bin][xso])/dx
else:
xm[i] = x_func(xi[in_bin])
ym[i] = y_func(yi[in_bin])
ys[i] = std_func(yi[in_bin])
# if use_median:
# xm[i] = np.median(xi[in_bin]) # [so][idx+NPER*i])
# ym[i] = np.median(yi[in_bin]) # [so][idx+NPER*i])
# else:
# xm[i] = astropy.stats.biweight_location(xi[in_bin]) # [so][idx+NPER*i])
# ym[i] = astropy.stats.biweight_location(yi[in_bin]) # [so][idx+NPER*i])
#
# if use_nmad:
# mad = astropy.stats.median_absolute_deviation
# ys[i] = 1.4826*mad(yi[in_bin]) # [so][idx+NPER*i])
# else:
# ys[i] = astropy.stats.biweight_midvariance(yi[in_bin]) # [so][idx+NPER*i])
return xm, ym, ys, N
def nmad(arr):
import astropy.stats
return 1.48*astropy.stats.median_absolute_deviation(arr)
def log_zgrid(zr=[0.7,3.4], dz=0.01):
"""Make a logarithmically spaced redshift grid
Parameters
----------
zr : [float, float]
Minimum and maximum of the desired grid
dz : float
Step size, dz/(1+z)
Returns
-------
zgrid : array-like
Redshift grid
"""
zgrid = np.exp(np.arange(np.log(1+zr[0]), np.log(1+zr[1]), dz))-1
return zgrid
def trapz_dx(x):
"""
Return trapezoid rule coefficients, useful for numerical integration
using a dot product
Parameters
----------
x : array-like
Independent variable
Returns
-------
dx : array_like
Coefficients for trapezoidal rule integration.
"""
dx = np.zeros_like(x)
diff = np.diff(x)/2.
dx[:-1] += diff
dx[1:] += diff
return dx
def clipLog(im, lexp=1000, cmap=[-1.4914, 0.6273], scale=[-0.1,10]):
"""
Return normalized array like DS9 log
"""
import numpy as np
contrast, bias = cmap
clip = (np.clip(im, scale[0], scale[1])-scale[0])/(scale[1]-scale[0])
clip_log = np.clip((np.log10(lexp*clip+1)/np.log10(lexp)-bias)*contrast+0.5, 0, 1)
return clip_log
def get_mw_dust(ra, dec, **kwargs):
"""
Wrapper around functions to try to query for the MW E(B-V)
"""
try:
ebv = get_dustmaps_dust(ra, dec, web=True)
return ebv
except:
pass
try:
ebv = get_dustmaps_dust(ra, dec, web=False)
return ebv
except:
pass
try:
ebv = get_irsa_dust(ra, dec, **kwargs)
return ebv
except:
pass
return 0.00
def get_dustmaps_dust(ra, dec, web=True, **kwargs):
"Use https://github.com/gregreen/dustmaps"
from dustmaps.sfd import SFDQuery, SFDWebQuery
from astropy.coordinates import SkyCoord
coords = SkyCoord(ra, dec, unit='deg', frame='icrs')
if web:
sfd = SFDWebQuery()
else:
sfd = SFDQuery()
ebv = sfd(coords)
return ebv
def get_irsa_dust(ra=53.1227, dec=-27.805089, type='SandF'):
"""
Get Galactic dust reddening from NED/IRSA at a given position
http://irsa.ipac.caltech.edu/applications/DUST/docs/dustProgramInterface.html
Parameters
----------
ra, dec : float
RA/Dec in decimal degrees.
type : 'SFD' or 'SandF'
Dust model, with
SandF = Schlafly & Finkbeiner 2011 (ApJ 737, 103)
SFD = Schlegel et al. 1998 (ApJ 500, 525)
Returns
-------
ebv : float
Color excess E(B-V), in magnitudes
"""
import os
import tempfile
import urllib.request
from astropy.table import Table
from lxml import objectify
query = 'http://irsa.ipac.caltech.edu/cgi-bin/DUST/nph-dust?locstr={0:.4f}+{1:.4f}+equ+j2000'.format(ra, dec)
req = urllib.request.Request(query)
response = urllib.request.urlopen(req)
resp_text = response.read().decode('utf-8')
root = objectify.fromstring(resp_text)
stats = root.result.statistics
if type == 'SFD':
return float(str(stats.refPixelValueSFD).split()[0])
else:
return float(str(stats.refPixelValueSandF).split()[0])
def fill_between_steps(x, y, z, ax=None, *args, **kwargs):
"""
Make `fill_between` work like linestyle='steps-mid'.
"""
so = np.argsort(x)
mid = x[so][:-1] + np.diff(x[so])/2.
xfull = np.append(np.append(x, mid), mid+np.diff(x[so])/1.e6)
yfull = np.append(np.append(y, y[:-1]), y[1:])
zfull = np.append(np.append(z, z[:-1]), z[1:])
so = np.argsort(xfull)
if ax is None:
ax = plt.gca()
ax.fill_between(xfull[so], yfull[so], zfull[so], *args, **kwargs)
class GalacticExtinction(object):
def __init__(self, EBV=0, Rv=3.1, force=None, radec=None, ebv_type='SandF'):
"""
Wrapper to use either `~specutils.extinction` or the `~extinction`
modules, which have different calling formats. The results from
both of these modules should be equivalent.
Parameters
----------
EBV : float
Galactic reddening, e.g., from `https://irsa.ipac.caltech.edu/applications/DUST/`.
Rv : float
Selective extinction ratio, `Rv=Av/(E(B-V))`.
radec : None or (float, float)
If provided, try to determine EBV based on these coordinates
with `get_irsa_dust(type=[ebv_type])` or `dustmaps`.
force : None, 'extinction', 'specutils.extinction'
Force use one or the other modules. If `None`, then first try
to import `~specutils.extinction` and if that fails use
`~extinction`.
"""
import importlib
# Import handler
if force == 'specutils.extinction':
import specutils.extinction
self.module = 'specutils.extinction'
elif force == 'extinction':
from extinction import Fitzpatrick99
self.module = 'extinction'
elif force == 'dust_extinction':
from dust_extinction.parameter_averages import F99
self.module = 'dust_extinction'
else:
modules = [['dust_extinction.parameter_averages', 'F99'],
['extinction','Fitzpatrick99'],
['specutils.extinction','ExtinctionF99']]
self.module = None
for (mod, cla) in modules:
try:
_F99 = getattr(importlib.import_module(mod), cla)
self.module = mod
break
except:
continue
if self.module is None:
raise ImportError("Couldn't import extinction module from "
"dust_extinction, extinction or specutils")
# try:
# from specutils.extinction import ExtinctionF99
# self.module = 'specutils.extinction'
# except:
# from extinction import Fitzpatrick99
# self.module = 'extinction'
if radec is not None:
self.EBV = get_mw_dust(ra=radec[0], dec=radec[1], type=ebv_type)
else:
self.EBV = EBV
self.Rv = Rv
if self.module == 'dust_extinction.parameter_averages':
self.f99 = _F99(Rv=self.Rv)
elif self.module == 'specutils.extinction':
self.f99 = _F99(self.Av)
#self.Alambda = f99(self.wave*u.angstrom)
else:
self.f99 = _F99(self.Rv)
#self.Alambda = f99(self.wave*u.angstrom, Av)
@property
def Av(self):
return self.EBV*self.Rv
@property
def info(self):
msg = ('F99 extinction with `{0}`: Rv={1:.1f}, '
'E(B-V)={2:.3f} (Av={3:.2f})')
return msg.format(self.module, self.Rv, self.EBV, self.Av)
def __call__(self, wave):
"""
Compute Fitzpatrick99 extinction.
Parameters
----------
wave : float or `~numpy.ndarray`
Observed-frame wavelengths. If no `unit` attribute available,
assume units are `~astropy.units.Angstrom`.
Returns
-------
Alambda : like `wave`
F99 extinction (mags) as a function of wavelength. Output will
be set to zero below 909 Angstroms and above 6 microns as the
extinction modules themselves don't compute outside that range.
"""
import astropy.units as u
if not hasattr(wave, 'unit'):
unit = u.Angstrom
else:
unit = 1
inwave = np.squeeze(wave).flatten()
if self.module == 'dust_extinction.parameter_averages':
clip = (inwave*unit > 1/10.*u.micron)
clip &= (inwave*unit < 1/0.3*u.micron)
else:
clip = (inwave*unit > 909*u.angstrom) & (inwave*unit < 6*u.micron)
Alambda = np.zeros(inwave.shape)
if clip.sum() == 0:
return Alambda
else:
if self.module == 'dust_extinction.parameter_averages':
flam = self.f99.extinguish(inwave[clip]*unit, Av=self.Av)
Alambda[clip] = -2.5*np.log10(flam)
elif self.module == 'specutils.extinction':
Alambda[clip] = self.f99(inwave[clip]*unit)
else:
Alambda[clip] = self.f99(inwave[clip]*unit, self.Av)
return Alambda
def abs_mag_to_luminosity(absmag, pivot=None, output_unit=u.L_sun):
"""
Convert absolute AB mag to luminosity units
Parameters
----------
absmag : array-like
Absolute AB magnitude.
pivot : float
Filter pivot wavelength associated with the magnitude. If no units,
then assume `~astropy.units.Angstrom`.
output_unit : `~astropy.units.core.Unit`
Desired output unit. Must specify a ``pivot`` wavelength for output
power units, e.g., `~astropy.unit.L_sun`.
"""
if pivot is None:
nu = 1.
else:
if hasattr(pivot, 'unit'):
wunit = 1
else:
wunit = u.Angstrom
nu = ((CLIGHT*u.m/u.second)/(pivot*wunit)).to(u.Hz)
fjy = 3631*u.jansky * 10**(-0.4*absmag)
d10 = (10*u.pc).to(u.cm)
f10 = fjy * 4 * np.pi * d10**2 * nu
return f10.to(output_unit)
def zphot_zspec(zphot, zspec, zlimits=None, zmin=0, zmax=4, axes=None, figsize=[6,7], minor=0.5, skip=2, selection=None, catastrophic_limit=0.15, title=None, min_zphot=0.02, alpha=0.2, extra_xlabel='', extra_ylabel='', xlabel=r'$z_\mathrm{spec}$', ylabel=r'$z_\mathrm{phot}$', label_pos=(0.05, 0.95), label_kwargs=dict(ha='left', va='top', fontsize=10), label_prefix='', format_axes=True, color='k', point_label=None, **kwargs):
"""
Make zphot_zspec plot scaled by log(1+z) and show uncertainties
"""
import matplotlib.pyplot as plt
from matplotlib.gridspec import GridSpec
clip = (zphot > min_zphot) & (zspec > zmin) & (zspec <= zmax)
if selection is not None:
clip &= selection
dz = (zphot-zspec)/(1+zspec)
#izbest = np.argmin(self.fit_chi2, axis=1)
clip_cat = (np.abs(dz) < catastrophic_limit)
frac_cat = 1-(clip & clip_cat).sum() / clip.sum()
NOUT = (clip & ~clip_cat).sum()
gs = GridSpec(2,1, height_ratios=[6,1])
NEW_AXES = axes is None
if NEW_AXES:
fig = plt.figure(figsize=figsize)
ax = fig.add_subplot(gs[0,0])
else:
ax = axes[0]
fig = None
if title is not None:
ax.set_title(title)
if zlimits is not None:
yerr = np.log10(1+np.abs(zlimits.T - zphot))
ax.errorbar(np.log10(1+zspec[clip & ~clip_cat]),
np.log10(1+zphot[clip & ~clip_cat]),
yerr=yerr[:,clip & ~clip_cat], marker='.', alpha=alpha,
color='r', linestyle='None')
ax.errorbar(np.log10(1+zspec[clip & clip_cat]),
np.log10(1+zphot[clip & clip_cat]),
yerr=yerr[:,clip & clip_cat], marker='.', alpha=alpha,
color=color, linestyle='None', label=point_label)
else:
ax.scatter(np.log10(1+zspec[clip & ~clip_cat]),
np.log10(1+zphot[clip & ~clip_cat]),
marker='.', alpha=alpha, color='r')
ax.scatter(np.log10(1+zspec[clip & clip_cat]),
np.log10(1+zphot[clip & clip_cat]),
marker='.', alpha=alpha, color=color, label=point_label)
if NEW_AXES | format_axes:
xt = np.arange(zmin, zmax+0.1, minor)
xl = | np.log10(1+xt) | numpy.log10 |
"""Handling of individual transducers and their directivities.
This module contains classes describing how individual transducer elements radiate sound,
e.g. waveforms and directivities.
This is also where the various spatial properties, e.g. derivatives, are implemented.
Most calculations in this module are fully vectorized, so the models can calculate
sound fields for any number of source positions and receiver positions at once.
.. autosummary::
:nosignatures:
TransducerModel
PointSource
PlaneWaveTransducer
CircularPiston
CircularRing
TransducerReflector
"""
import numpy as np
import logging
from scipy.special import j0, j1
from scipy.special import spherical_jn, spherical_yn, sph_harm
from .materials import air
from . import utils
logger = logging.getLogger(__name__)
class TransducerModel:
"""Base class for ultrasonic single frequency transducers.
Parameters
----------
freq : float, default 40 kHz
The resonant frequency of the transducer.
p0 : float, default 6 Pa
The sound pressure created at maximum amplitude at 1m distance, in Pa.
Note: This is not an rms value!
medium : Material
The medium in which the array is operating.
physical_size : float, default 10e-3
The physical dimentions of the transducer. Mainly used for visualization
and some geometrical assumptions.
Attributes
----------
k : float
Wavenumber in the medium.
wavelength : float
Wavelength in the medium.
omega : float
Angular frequency.
freq : float
Wave frequency.
"""
_repr_fmt_spec = '{:%cls(freq=%freq, p0=%p0, medium=%mediumfull, physical_size=%physical_size)}'
_str_fmt_spec = '{:%cls(freq=%freq, p0=%p0, medium=%medium)}'
def __init__(self, freq=40e3, p0=6, medium=air, physical_size=10e-3):
self.medium = medium
self.freq = freq
self.p0 = p0
self.physical_size = physical_size
# The murata transducers are measured to 85 dB SPL at 1 V at 1 m, which corresponds to ~6 Pa at 20 V
# The datasheet specifies 120 dB SPL @ 0.3 m, which corresponds to ~6 Pa @ 1 m
def __format__(self, fmt_spec):
return fmt_spec.replace('%cls', self.__class__.__name__).replace('%freq', str(self.freq)).replace('%p0', str(self.p0)).replace('%mediumfull', repr(self.medium)).replace('%medium', str(self.medium)).replace('%physical_size', str(self.physical_size))
def __str__(self):
return self._str_fmt_spec.format(self)
def __repr__(self):
return self._repr_fmt_spec.format(self)
def _repr_pretty_(self, p, cycle):
p.text(str(self))
def __eq__(self, other):
return (
type(self) == type(other)
and np.allclose(self.p0, other.p0)
and np.allclose(self.omega, other.omega)
and np.allclose(self.k, other.k)
and self.medium == other.medium
and self.physical_size == other.physical_size
)
@property
def k(self):
return self.omega / self.medium.c
@k.setter
def k(self, value):
self._omega = value * self.medium.c
@property
def omega(self):
return self._omega
@omega.setter
def omega(self, value):
self._omega = value
@property
def freq(self):
return self.omega / 2 / np.pi
@freq.setter
def freq(self, value):
self.omega = value * 2 * np.pi
@property
def wavelength(self):
return 2 * np.pi / self.k
@wavelength.setter
def wavelength(self, value):
self.k = 2 * np.pi / value
def pressure(self, source_positions, source_normals, receiver_positions, **kwargs):
"""Calculate the complex sound pressure from the transducer.
Parameters
----------
source_positions : numpy.ndarray
The location of the transducer, as a (3, ...) shape array.
source_normals : numpy.ndarray
The look direction of the transducer, as a (3, ...) shape array.
receiver_positions : numpy.ndarray
The location(s) at which to evaluate the radiation, shape (3, ...).
The first dimension must have length 3 and represent the coordinates of the points.
Returns
-------
out : numpy.ndarray
The pressure at the locations, shape `source_positions.shape[1:] + receiver_positions.shape[1:]`.
"""
return self.pressure_derivs(source_positions=source_positions, source_normals=source_normals, receiver_positions=receiver_positions, orders=0, **kwargs)[0]
def pressure_derivs(self, source_positions, source_normals, receiver_positions, orders=3, **kwargs):
"""Calculate the spatial derivatives of the greens function.
Calculates Cartesian spatial derivatives of the pressure Green's function. Should be implemented by concrete subclasses.
Parameters
----------
source_positions : numpy.ndarray
The location of the transducer, as a (3, ...) shape array.
source_normals : numpy.ndarray
The look direction of the transducer, as a (3, ...) shape array.
receiver_positions : numpy.ndarray
The location(s) at which to evaluate the radiation, shape (3, ...).
The first dimension must have length 3 and represent the coordinates of the points.
orders : int
How many orders of derivatives to calculate. Currently three orders are supported.
Returns
-------
derivatives : numpy.ndarray
Array with the calculated derivatives. Has the shape `(M,) + source_positions.shape[1:] + receiver_positions.shape[1:]`.
where `M` is the number of spatial derivatives, see `num_spatial_derivatives` and `spatial_derivative_order`.
"""
raise NotImplementedError('Transducer model of type `{}` has not implemented cartesian pressure derivatives'.format(self.__class__.__name__))
class PointSource(TransducerModel):
r"""Point source transducers.
A point source is in this context defines as a spherically spreading wave,
optionally with a directivity. On its own this class defines a monopole,
but subclasses are free to change the directivity to other shapes.
The spherical spreading is defined as
.. math:: G(r) = {e^{ikr} \over r}
where :math:`r` is the distance from the source, and :math:`k` is the wavenumber of the wave.
"""
def directivity(self, source_positions, source_normals, receiver_positions):
"""Evaluate transducer directivity.
Subclasses will preferably implement this to create new directivity models.
Default implementation is omnidirectional sources.
Parameters
----------
source_positions : numpy.ndarray
The location of the transducer, as a (3, ...) shape array.
source_normals : numpy.ndarray
The look direction of the transducer, as a (3, ...) shape array.
receiver_positions : numpy.ndarray
The location(s) at which to evaluate the radiation, shape (3, ...).
The first dimension must have length 3 and represent the coordinates of the points.
Returns
-------
out : numpy.ndarray
The amplitude (and phase) of the directivity, shape `source_positions.shape[1:] + receiver_positions.shape[1:]`.
"""
return np.ones(np.asarray(source_positions).shape[1:2] + np.asarray(receiver_positions).shape[1:])
def pressure_derivs(self, source_positions, source_normals, receiver_positions, orders=3, **kwargs):
"""Calculate the spatial derivatives of the greens function.
This is the combination of the derivative of the spherical spreading, and
the derivatives of the directivity, including source strength.
Parameters
----------
source_positions : numpy.ndarray
The location of the transducer, as a (3, ...) shape array.
source_normals : numpy.ndarray
The look direction of the transducer, as a (3, ...) shape array.
receiver_positions : numpy.ndarray
The location(s) at which to evaluate the radiation, shape (3, ...).
The first dimension must have length 3 and represent the coordinates of the points.
orders : int
How many orders of derivatives to calculate. Currently three orders are supported.
Returns
-------
derivatives : numpy.ndarray
Array with the calculated derivatives. Has the `(M,) + source_positions.shape[1:] + receiver_positions.shape[1:]`.
where `M` is the number of spatial derivatives, see `num_spatial_derivatives` and `spatial_derivative_order`.
"""
receiver_positions = np.asarray(receiver_positions)
if receiver_positions.shape[0] != 3:
raise ValueError('Incorrect shape of positions')
wavefront_derivatives = self.wavefront_derivatives(source_positions, receiver_positions, orders)
if type(self) == PointSource:
return wavefront_derivatives * self.p0
directivity_derivatives = self.directivity_derivatives(source_positions, source_normals, receiver_positions, orders)
derivatives = np.empty(wavefront_derivatives.shape, dtype=np.complex128)
derivatives[0] = wavefront_derivatives[0] * directivity_derivatives[0]
if orders > 0:
derivatives[1] = wavefront_derivatives[0] * directivity_derivatives[1] + directivity_derivatives[0] * wavefront_derivatives[1]
derivatives[2] = wavefront_derivatives[0] * directivity_derivatives[2] + directivity_derivatives[0] * wavefront_derivatives[2]
derivatives[3] = wavefront_derivatives[0] * directivity_derivatives[3] + directivity_derivatives[0] * wavefront_derivatives[3]
if orders > 1:
derivatives[4] = wavefront_derivatives[0] * directivity_derivatives[4] + directivity_derivatives[0] * wavefront_derivatives[4] + 2 * directivity_derivatives[1] * wavefront_derivatives[1]
derivatives[5] = wavefront_derivatives[0] * directivity_derivatives[5] + directivity_derivatives[0] * wavefront_derivatives[5] + 2 * directivity_derivatives[2] * wavefront_derivatives[2]
derivatives[6] = wavefront_derivatives[0] * directivity_derivatives[6] + directivity_derivatives[0] * wavefront_derivatives[6] + 2 * directivity_derivatives[3] * wavefront_derivatives[3]
derivatives[7] = wavefront_derivatives[0] * directivity_derivatives[7] + directivity_derivatives[0] * wavefront_derivatives[7] + wavefront_derivatives[1] * directivity_derivatives[2] + directivity_derivatives[1] * wavefront_derivatives[2]
derivatives[8] = wavefront_derivatives[0] * directivity_derivatives[8] + directivity_derivatives[0] * wavefront_derivatives[8] + wavefront_derivatives[1] * directivity_derivatives[3] + directivity_derivatives[1] * wavefront_derivatives[3]
derivatives[9] = wavefront_derivatives[0] * directivity_derivatives[9] + directivity_derivatives[0] * wavefront_derivatives[9] + wavefront_derivatives[2] * directivity_derivatives[3] + directivity_derivatives[2] * wavefront_derivatives[3]
if orders > 2:
derivatives[10] = wavefront_derivatives[0] * directivity_derivatives[10] + directivity_derivatives[0] * wavefront_derivatives[10] + 3 * (directivity_derivatives[4] * wavefront_derivatives[1] + wavefront_derivatives[4] * directivity_derivatives[1])
derivatives[11] = wavefront_derivatives[0] * directivity_derivatives[11] + directivity_derivatives[0] * wavefront_derivatives[11] + 3 * (directivity_derivatives[5] * wavefront_derivatives[2] + wavefront_derivatives[5] * directivity_derivatives[2])
derivatives[12] = wavefront_derivatives[0] * directivity_derivatives[12] + directivity_derivatives[0] * wavefront_derivatives[12] + 3 * (directivity_derivatives[6] * wavefront_derivatives[3] + wavefront_derivatives[6] * directivity_derivatives[3])
derivatives[13] = wavefront_derivatives[0] * directivity_derivatives[13] + directivity_derivatives[0] * wavefront_derivatives[13] + wavefront_derivatives[2] * directivity_derivatives[4] + directivity_derivatives[2] * wavefront_derivatives[4] + 2 * (wavefront_derivatives[1] * directivity_derivatives[7] + directivity_derivatives[1] * wavefront_derivatives[7])
derivatives[14] = wavefront_derivatives[0] * directivity_derivatives[14] + directivity_derivatives[0] * wavefront_derivatives[14] + wavefront_derivatives[3] * directivity_derivatives[4] + directivity_derivatives[3] * wavefront_derivatives[4] + 2 * (wavefront_derivatives[1] * directivity_derivatives[8] + directivity_derivatives[1] * wavefront_derivatives[8])
derivatives[15] = wavefront_derivatives[0] * directivity_derivatives[15] + directivity_derivatives[0] * wavefront_derivatives[15] + wavefront_derivatives[1] * directivity_derivatives[5] + directivity_derivatives[1] * wavefront_derivatives[5] + 2 * (wavefront_derivatives[2] * directivity_derivatives[7] + directivity_derivatives[2] * wavefront_derivatives[7])
derivatives[16] = wavefront_derivatives[0] * directivity_derivatives[16] + directivity_derivatives[0] * wavefront_derivatives[16] + wavefront_derivatives[3] * directivity_derivatives[5] + directivity_derivatives[3] * wavefront_derivatives[5] + 2 * (wavefront_derivatives[2] * directivity_derivatives[9] + directivity_derivatives[2] * wavefront_derivatives[9])
derivatives[17] = wavefront_derivatives[0] * directivity_derivatives[17] + directivity_derivatives[0] * wavefront_derivatives[17] + wavefront_derivatives[1] * directivity_derivatives[6] + directivity_derivatives[1] * wavefront_derivatives[6] + 2 * (wavefront_derivatives[3] * directivity_derivatives[8] + directivity_derivatives[3] * wavefront_derivatives[8])
derivatives[18] = wavefront_derivatives[0] * directivity_derivatives[18] + directivity_derivatives[0] * wavefront_derivatives[18] + wavefront_derivatives[2] * directivity_derivatives[6] + directivity_derivatives[2] * wavefront_derivatives[6] + 2 * (wavefront_derivatives[3] * directivity_derivatives[9] + directivity_derivatives[3] * wavefront_derivatives[9])
derivatives[19] = wavefront_derivatives[0] * directivity_derivatives[19] + wavefront_derivatives[19] * directivity_derivatives[0] + wavefront_derivatives[1] * directivity_derivatives[9] + wavefront_derivatives[2] * directivity_derivatives[8] + wavefront_derivatives[3] * directivity_derivatives[7] + directivity_derivatives[1] * wavefront_derivatives[9] + directivity_derivatives[2] * wavefront_derivatives[8] + directivity_derivatives[3] * wavefront_derivatives[7]
derivatives *= self.p0
return derivatives
def wavefront_derivatives(self, source_positions, receiver_positions, orders=3):
"""Calculate the spatial derivatives of the spherical spreading.
Parameters
----------
source_positions : numpy.ndarray
The location of the transducer, as a (3, ...) shape array.
receiver_positions : numpy.ndarray
The location(s) at which to evaluate the radiation, shape (3, ...).
The first dimension must have length 3 and represent the coordinates of the points.
orders : int
How many orders of derivatives to calculate. Currently three orders are supported.
Returns
-------
derivatives : ndarray
Array with the calculated derivatives. Has the shape `(M,) + source_positions.shape[1:] + receiver_positions.shape[1:]`.
where `M` is the number of spatial derivatives, see `num_spatial_derivatives` and `spatial_derivative_order`.
"""
source_positions = np.asarray(source_positions)
receiver_positions = np.asarray(receiver_positions)
if receiver_positions.shape[0] != 3:
raise ValueError('Incorrect shape of positions')
diff = receiver_positions.reshape((3,) + (1,) * (source_positions.ndim - 1) + receiver_positions.shape[1:]) - source_positions.reshape(source_positions.shape[:2] + (receiver_positions.ndim - 1) * (1,))
r = np.sum(diff**2, axis=0)**0.5
kr = self.k * r
jkr = 1j * kr
phase = np.exp(jkr)
derivatives = np.empty((utils.num_pressure_derivs[orders],) + r.shape, dtype=np.complex128)
derivatives[0] = phase / r
if orders > 0:
coeff = (jkr - 1) * phase / r**3
derivatives[1] = diff[0] * coeff
derivatives[2] = diff[1] * coeff
derivatives[3] = diff[2] * coeff
if orders > 1:
coeff = (3 - kr**2 - 3 * jkr) * phase / r**5
const = (jkr - 1) * phase / r**3
derivatives[4] = diff[0]**2 * coeff + const
derivatives[5] = diff[1]**2 * coeff + const
derivatives[6] = diff[2]**2 * coeff + const
derivatives[7] = diff[0] * diff[1] * coeff
derivatives[8] = diff[0] * diff[2] * coeff
derivatives[9] = diff[1] * diff[2] * coeff
if orders > 2:
const = (3 - 3 * jkr - kr**2) * phase / r**5
coeff = (-15 + 15 * jkr + 6 * kr**2 - 1j * kr**3) * phase / r**7
derivatives[10] = diff[0] * (3 * const + diff[0]**2 * coeff)
derivatives[11] = diff[1] * (3 * const + diff[1]**2 * coeff)
derivatives[12] = diff[2] * (3 * const + diff[2]**2 * coeff)
derivatives[13] = diff[1] * (const + diff[0]**2 * coeff)
derivatives[14] = diff[2] * (const + diff[0]**2 * coeff)
derivatives[15] = diff[0] * (const + diff[1]**2 * coeff)
derivatives[16] = diff[2] * (const + diff[1]**2 * coeff)
derivatives[17] = diff[0] * (const + diff[2]**2 * coeff)
derivatives[18] = diff[1] * (const + diff[2]**2 * coeff)
derivatives[19] = diff[0] * diff[1] * diff[2] * coeff
return derivatives
def directivity_derivatives(self, source_positions, source_normals, receiver_positions, orders=3):
"""Calculate the spatial derivatives of the directivity.
The default implementation uses finite difference stencils to evaluate the
derivatives. In principle this means that customized directivity models
does not need to implement their own derivatives, but can do so for speed
and precision benefits.
Parameters
----------
source_positions : numpy.ndarray
The location of the transducer, as a (3, ...) shape array.
source_normals : numpy.ndarray
The look direction of the transducer, as a (3, ...) shape array.
receiver_positions : numpy.ndarray
The location(s) at which to evaluate the radiation, shape (3, ...).
The first dimension must have length 3 and represent the coordinates of the points.
orders : int
How many orders of derivatives to calculate. Currently three orders are supported.
Returns
-------
derivatives : numpy.ndarray
Array with the calculated derivatives. Has the shape `(M,) + source_positions.shape[1:] + receiver_positions.shape[1:]`.
where `M` is the number of spatial derivatives, see `num_spatial_derivatives` and `spatial_derivative_order`.
"""
source_positions = np.asarray(source_positions)
source_normals = np.asarray(source_normals)
receiver_positions = np.asarray(receiver_positions)
if receiver_positions.shape[0] != 3:
raise ValueError('Incorrect shape of positions')
finite_difference_coefficients = {'': (np.array([[0, 0, 0]]).T, np.array([1]))}
if orders > 0:
finite_difference_coefficients['x'] = (np.array([[1, 0, 0], [-1, 0, 0]]).T, np.array([0.5, -0.5]))
finite_difference_coefficients['y'] = (np.array([[0, 1, 0], [0, -1, 0]]).T, np.array([0.5, -0.5]))
finite_difference_coefficients['z'] = (np.array([[0, 0, 1], [0, 0, -1]]).T, np.array([0.5, -0.5]))
if orders > 1:
finite_difference_coefficients['xx'] = (np.array([[1, 0, 0], [0, 0, 0], [-1, 0, 0]]).T, np.array([1, -2, 1])) # Alt -- (np.array([[2, 0, 0], [0, 0, 0], [-2, 0, 0]]), [0.25, -0.5, 0.25])
finite_difference_coefficients['yy'] = (np.array([[0, 1, 0], [0, 0, 0], [0, -1, 0]]).T, np.array([1, -2, 1])) # Alt-- (np.array([[0, 2, 0], [0, 0, 0], [0, -2, 0]]), [0.25, -0.5, 0.25])
finite_difference_coefficients['zz'] = (np.array([[0, 0, 1], [0, 0, 0], [0, 0, -1]]).T, np.array([1, -2, 1])) # Alt -- (np.array([[0, 0, 2], [0, 0, 0], [0, 0, -2]]), [0.25, -0.5, 0.25])
finite_difference_coefficients['xy'] = (np.array([[1, 1, 0], [-1, -1, 0], [1, -1, 0], [-1, 1, 0]]).T, np.array([0.25, 0.25, -0.25, -0.25]))
finite_difference_coefficients['xz'] = (np.array([[1, 0, 1], [-1, 0, -1], [1, 0, -1], [-1, 0, 1]]).T, np.array([0.25, 0.25, -0.25, -0.25]))
finite_difference_coefficients['yz'] = (np.array([[0, 1, 1], [0, -1, -1], [0, -1, 1], [0, 1, -1]]).T, np.array([0.25, 0.25, -0.25, -0.25]))
if orders > 2:
finite_difference_coefficients['xxx'] = (np.array([[2, 0, 0], [-2, 0, 0], [1, 0, 0], [-1, 0, 0]]).T, np.array([0.5, -0.5, -1, 1])) # Alt -- (np.array([[3, 0, 0], [-3, 0, 0], [1, 0, 0], [-1, 0, 0]]), [0.125, -0.125, -0.375, 0.375])
finite_difference_coefficients['yyy'] = (np.array([[0, 2, 0], [0, -2, 0], [0, 1, 0], [0, -1, 0]]).T, np.array([0.5, -0.5, -1, 1])) # Alt -- (np.array([[0, 3, 0], [0, -3, 0], [0, 1, 0], [0, -1, 0]]), [0.125, -0.125, -0.375, 0.375])
finite_difference_coefficients['zzz'] = (np.array([[0, 0, 2], [0, 0, -2], [0, 0, 1], [0, 0, -1]]).T, np.array([0.5, -0.5, -1, 1])) # Alt -- (np.array([[0, 0, 3], [0, 0, -3], [0, 0, 1], [0, 0, -1]]), [0.125, -0.125, -0.375, 0.375])
finite_difference_coefficients['xxy'] = (np.array([[1, 1, 0], [-1, -1, 0], [1, -1, 0], [-1, 1, 0], [0, 1, 0], [0, -1, 0]]).T, np.array([0.5, -0.5, -0.5, 0.5, -1, 1])) # Alt -- (np.array([[2, 1, 0], [-2, -1, 0], [2, -1, 0], [-2, 1, 0], [0, 1, 0], [0, -1, 0]]), [0.125, -0.125, -0.125, 0.125, -0.25, 0.25])
finite_difference_coefficients['xxz'] = (np.array([[1, 0, 1], [-1, 0, -1], [1, 0, -1], [-1, 0, 1], [0, 0, 1], [0, 0, -1]]).T, np.array([0.5, -0.5, -0.5, 0.5, -1, 1])) # Alt -- (np.array([[2, 0, 1], [-2, 0, -1], [2, 0, -1], [-2, 0, 1], [0, 0, 1], [0, 0, -1]]), [0.125, -0.125, -0.125, 0.125, -0.25, 0.25])
finite_difference_coefficients['yyx'] = (np.array([[1, 1, 0], [-1, -1, 0], [-1, 1, 0], [1, -1, 0], [1, 0, 0], [-1, 0, 0]]).T, np.array([0.5, -0.5, -0.5, 0.5, -1, 1])) # Alt -- (np.array([[1, 2, 0], [-1, -2, 0], [-1, 2, 0], [1, -2, 0], [1, 0, 0], [-1, 0, 0]]), [0.125, -0.125, -0.125, 0.125, -0.25, 0.25])
finite_difference_coefficients['yyz'] = (np.array([[0, 1, 1], [0, -1, -1], [0, 1, -1], [0, -1, 1], [0, 0, 1], [0, 0, -1]]).T, np.array([0.5, -0.5, -0.5, 0.5, -1, 1])) # Alt -- (np.array([[0, 2, 1], [0, -2, -1], [0, 2, -1], [0, -2, 1], [0, 0, 1], [0, 0, -1]]), [0.125, -0.125, -0.125, 0.125, -0.25, 0.25])
finite_difference_coefficients['zzx'] = (np.array([[1, 0, 1], [-1, 0, -1], [-1, 0, 1], [1, 0, -1], [1, 0, 0], [-1, 0, 0]]).T, np.array([0.5, -0.5, -0.5, 0.5, -1, 1])) # Alt -- (np.array([[1, 0, 2], [-1, 0, -2], [-1, 0, 2], [1, 0, -2], [1, 0, 0], [-1, 0, 0]]), [0.125, -0.125, -0.125, 0.125, -0.25, 0.25])
finite_difference_coefficients['zzy'] = (np.array([[0, 1, 1], [0, -1, -1], [0, -1, 1], [0, 1, -1], [0, 1, 0], [0, -1, 0]]).T, np.array([0.5, -0.5, -0.5, 0.5, -1, 1])) # Alt -- (np.array([[0, 1, 2], [0, -1, -2], [0, -1, 2], [0, 1, -2], [0, 1, 0], [0, -1, 0]]), [0.125, -0.125, -0.125, 0.125, -0.25, 0.25])
finite_difference_coefficients['xyz'] = (np.array([[1, 1, 1], [-1, -1, -1], [1, -1, -1], [-1, 1, 1], [-1, 1, -1], [1, -1, 1], [-1, -1, 1], [1, 1, -1]]).T, np.array([1, -1, 1, -1, 1, -1, 1, -1]) * 0.125)
derivatives = np.empty((utils.num_pressure_derivs[orders],) + source_positions.shape[1:2] + receiver_positions.shape[1:], dtype=np.complex128)
h = 1 / self.k
# For all derivatives needed:
for derivative, (shifts, weights) in finite_difference_coefficients.items():
# Create the finite difference grid for all positions simultaneously by inserting a new axis for them (axis 1).
# positions.shape = (3, n_difference_points, n_receiver_points)
positions = shifts.reshape([3, -1] + (receiver_positions.ndim - 1) * [1]) * h + receiver_positions[:, np.newaxis, ...]
# Calcualte the directivity at all positions at once, and weight them with the correct weights
# weighted_values.shape = (n_difference_points, n_receiver_points)
weighted_values = self.directivity(source_positions, source_normals, positions) * weights.reshape((source_positions.ndim - 1) * [1] + [-1] + (receiver_positions.ndim - 1) * [1])
# sum the finite weighted points and store in the correct position in the output array.
derivatives[utils.pressure_derivs_order.index(derivative)] = np.sum(weighted_values, axis=(source_positions.ndim - 1)) / h**len(derivative)
return derivatives
def spherical_harmonics(self, source_positions, source_normals, receiver_positions, orders=0, **kwargs):
"""Expand sound field in spherical harmonics.
Performs a spherical harmonics expansion of the sound field created from the transducer model.
The expansion is centered at the receiver position(s), and calculated by translating spherical
wavefronts from the source position(s).
Parameters
----------
source_positions : numpy.ndarray
The location of the transducer, as a (3, ...) shape array.
source_normals : numpy.ndarray
The look direction of the transducer, as a (3, ...) shape array.
receiver_positions : numpy.ndarray
The location(s) at which to evaluate the radiation, shape (3, ...).
The first dimension must have length 3 and represent the coordinates of the points.
orders : int
How many orders of spherical harmonics coefficients to calculate.
Returns
-------
coefficients : numpy.ndarray
Array with the calculated expansion coefficients. Has the shape
`(M,) + source_positions.shape[1:] + receiver_positions.shape[1:]`,
where `M=len(SphericalHarmonicsIndexer(orders))`, see `~levitate.utils.SphericalHarmonicsIndexer`
for details on the structure of the coefficients.
"""
source_positions = np.asarray(source_positions)
source_normals = | np.asarray(source_normals) | numpy.asarray |
import numpy as np
import matplotlib.pyplot as plt
def ArrowLine(fig,X,Y,color=[0.0,0.0,0.0],zorder=None,Spacing=20,SingleArrow=False,EndArrow=False,linewidth=1,linestyle='-',HeadWidth=0.05, HeadLength=0.1,Reverse=False,**kwargs):
if Reverse == True:
x=X[::-1]
y=Y[::-1]
else:
x=X
y=Y
if type(fig) == type(plt):
ax=fig.gca()
else:
#assume that axes have been provided instead of a figure
ax = fig
fig.plot(x,y,color=color,zorder=zorder,linewidth=linewidth,linestyle=linestyle,**kwargs)
if EndArrow:
n=1
i0=np.array([ | np.size(x) | numpy.size |
import numpy as np
import pytest
import unittest
from desc.equilibrium import Equilibrium, EquilibriaFamily
from desc.grid import ConcentricGrid
from desc.profiles import PowerSeriesProfile, SplineProfile
from desc.geometry import (
FourierRZCurve,
FourierRZToroidalSurface,
ZernikeRZToroidalSection,
)
class TestConstructor(unittest.TestCase):
def test_defaults(self):
eq = Equilibrium()
self.assertEqual(eq.spectral_indexing, "ansi")
self.assertEqual(eq.NFP, 1)
self.assertEqual(eq.L, 1)
self.assertEqual(eq.M, 1)
self.assertEqual(eq.N, 0)
self.assertEqual(eq.sym, False)
self.assertTrue(eq.surface.eq(FourierRZToroidalSurface()))
self.assertIsInstance(eq.pressure, PowerSeriesProfile)
np.testing.assert_allclose(eq.p_l, [0])
self.assertIsInstance(eq.iota, PowerSeriesProfile)
| np.testing.assert_allclose(eq.i_l, [0]) | numpy.testing.assert_allclose |
import numpy as np
import copy
from math import pi, sin, exp
import scipy.constants as sp
class FDTD:
def __init__(self, mesh, pulse, time):
self.mesh=mesh
self.pulse=pulse
self.time=time
def boundarymur(self, ex, ex_old):
ncells, ddx= self.mesh.ncells, self.mesh.ddx
dt=self.mesh.ddx / (2*sp.c)
c_bound=(sp.c*dt-ddx)/(sp.c*dt+ddx)
ex[0]=ex_old[1] + c_bound * (ex[1]-ex_old[0])
ex[ncells]=ex_old[ncells-1] + c_bound * (ex[ncells-1]-ex_old[ncells])
def FDTDLoop(self,k1,k2):
dt=self.mesh.ddx / (2*sp.c)
nsteps= int(self.time / dt)
# COMENTAR: Mejor quitar nsteps, no guardar siempre todo...
ex=np.zeros(self.mesh.ncells+1)
hy=np.zeros(self.mesh.ncells+1)
ex_old=np.zeros(self.mesh.ncells+1)
ex_save_k1=np.empty(nsteps+1)
ex_save_k2=np.empty(nsteps+1)
ex_save_film=np.empty((nsteps+1,self.mesh.ncells+1))
ca=self.mesh.material()[0][1:-1]
cb=self.mesh.material()[1][1:-1]
for time_step in range(1, nsteps + 1):
ex_old=copy.deepcopy(ex)
ex[1:-1] = ca * ex[1:-1] + cb * (hy[:-2] - hy[1:-1])
#Guardo los valores a representar
ex_save_film[time_step][:]=ex[:]
#Guardo los valores para calcular la transformada
ex_save_k1[time_step]=ex[k1]
ex_save_k2[time_step]=ex[k2]
ex[self.pulse.k_ini] += 0.5*self.pulse.pulse(time_step)
self.boundarymur(ex,ex_old)
hy[:-1] = hy[:-1] + 0.5 * (ex[:-1] - ex[1:])
t= time_step+1/2
hy[self.pulse.k_ini] += 0.25* self.pulse.pulse(t)
hy[self.pulse.k_ini-1] += 0.25* self.pulse.pulse(t)
return ex_save_k1, ex_save_k2, ex_save_film
class Source:
def __init__(self, sourcetype, t_0, s_0, k_ini):
self.sourcetype=sourcetype
self.t_0=t_0
self.s_0=s_0
self.k_ini=k_ini
def pulse(self, time):
self.time=time
if self.sourcetype == 'gauss':
pulse = exp(-0.5*( (self.t_0 - time) / self.s_0 )**2)
return pulse
#Clase para la Trasformada Rápida de Fourier
# COMENTAR: Esto es mas un namespace que una clase.
# COMENTAR: Cuanto menos estado, mejor
class Utilities:
def FFT(self,e1tk1_total,e2tk1,e1tk2,e2tk2):
#Hay que cancelar la parte incidente
e1tk1_reflected = e1tk1_total - e2tk1
e1wk1= | np.fft.fft(e1tk1_reflected) | numpy.fft.fft |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
# -*- coding: utf-8 -*-
import numpy as np
import glob
try:
from astropy.tests.compat import assert_allclose
except ImportError:
from numpy.testing.utils import assert_allclose
from astropy.tests.helper import raises
from astropy.utils.data import get_pkg_data_filename
from os.path import basename, dirname, join
from ..file_lines import file_lines
from ..median import median
from ..pcomp import pcomp
from ..rebin import rebin
from ..smooth import smooth
from ..uniq import uniq
class TestPydl(object):
"""Test the top-level pydl functions.
"""
def setup(self):
pass
def teardown(self):
pass
def test_file_lines(self):
#
# Find the test files
#
line_numbers = (1, 42, 137)
plainfiles = [get_pkg_data_filename('t/this-file-contains-{0:d}-lines.txt'.format(l)) for l in line_numbers]
gzfiles = [get_pkg_data_filename('t/this-file-contains-{0:d}-lines.txt.gz'.format(l)) for l in line_numbers]
for i, p in enumerate(plainfiles):
n = file_lines(p)
assert n == line_numbers[i]
for i, p in enumerate(gzfiles):
n = file_lines(p, compress=True)
assert n == line_numbers[i]
#
# Test list passing
#
n = file_lines(plainfiles)
assert tuple(n) == line_numbers
n = file_lines(gzfiles, compress=True)
assert tuple(n) == line_numbers
#
# Make sure empty files work
#
n = file_lines(get_pkg_data_filename('t/this-file-is-empty.txt'))
assert n == 0
def test_median(self):
odd_data = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13],
dtype=np.float32)
even_data = np.array([1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12],
dtype=np.float32)
assert median(odd_data) == 7
assert median(odd_data, even=True) == 7
assert median(even_data) == 7
assert median(even_data, even=True) == 6.5
assert (median(odd_data, 3) == odd_data).all()
with raises(ValueError):
foo = median(np.ones((9, 9, 9)), 3)
odd_data2 = np.vstack((odd_data, odd_data, odd_data, odd_data, odd_data))
assert (median(odd_data2, 3) == odd_data2).all()
assert (median(odd_data2, axis=0) == odd_data).all()
assert (median(odd_data2, axis=1) ==
7*np.ones((odd_data2.shape[0],), dtype=odd_data2.dtype)).all()
def test_pcomp(self):
test_data_file = get_pkg_data_filename('t/pcomp_data.txt')
test_data = np.loadtxt(test_data_file, dtype='d', delimiter=',')
with raises(ValueError):
foo = pcomp(np.arange(10))
pcomp_data = test_data[0:20, :]
m = 4
n = 20
means = np.tile(pcomp_data.mean(0), n).reshape(pcomp_data.shape)
newarray = pcomp_data - means
foo = pcomp(newarray, covariance=True)
#
# This array is obtained from the IDL version of PCOMP.
# It is only accurate up to an overall sign on each column.
#
derived = test_data[20:40, :]
for k in range(m):
assert_allclose(abs(foo.derived[:, k]), abs(derived[:, k]), 1e-4)
coefficients = test_data[40:44, :]
coefficientsT = coefficients.T
for k in range(m):
assert_allclose(abs(foo.coefficients[:, k]),
abs(coefficientsT[:, k]),
1e-4)
eigenvalues = test_data[44, :]
assert_allclose(foo.eigenvalues, eigenvalues, 1e-4)
variance = test_data[45, :]
assert_allclose(foo.variance, variance, 1e-4)
#
# Test the standardization.
#
foo = pcomp(pcomp_data, standardize=True, covariance=True)
# for k in range(m):
# assert_allclose(abs(foo.derived[:, k]), abs(derived[:, k]), 1e-4)
# for k in range(m):
# assert_allclose(abs(foo.coefficients[:, k]),
# abs(coefficientsT[:, k]),
# 1e-4)
eigenvalues = test_data[46, :]
assert_allclose(foo.eigenvalues, eigenvalues, 1e-4)
variance = test_data[47, :]
assert_allclose(foo.variance, variance, 1e-4)
# assert_allclose(foo.derived[0, :], np.array([-1.64153312,
# -9.12322038,
# 1.41790708,
# -8.29359322]))
#
# Make sure correlation is working at least.
#
foo = pcomp(pcomp_data, standardize=True)
assert_allclose(foo.eigenvalues, np.array([2.84968632e+00,
1.00127640e+00,
1.48380121e-01,
6.57156222e-04]))
assert_allclose(foo.variance, np.array([7.12421581e-01,
2.50319100e-01,
3.70950302e-02,
1.64289056e-04]))
def test_rebin(self):
x = np.arange(40)
with raises(ValueError):
r = rebin(x, d=(10, 10))
with raises(ValueError):
r = rebin(x, d=(70,))
with raises(ValueError):
r = rebin(x, d=(30,))
x = np.array([[1.0, 2.0], [2.0, 3.0]])
rexpect = np.array([[1.0, 2.0], [1.5, 2.5], [2.0, 3.0], [2.0, 3.0]])
r = rebin(x, d=(4, 2))
assert np.allclose(r, rexpect)
rexpect = np.array([[1.0, 1.5, 2.0, 2.0], [2.0, 2.5, 3.0, 3.0]])
r = rebin(x, d=(2, 4))
assert np.allclose(r, rexpect)
rexpect = np.array([[1.0, 2.0], [1.0, 2.0], [2.0, 3.0], [2.0, 3.0]])
r = rebin(x, d=(4, 2), sample=True)
assert | np.allclose(r, rexpect) | numpy.allclose |
import numpy as np
import unittest
import chainer
from chainer.datasets import TupleDataset
from chainer.iterators import SerialIterator
from chainer import testing
from chainercv.extensions import InstanceSegmentationVOCEvaluator
class _InstanceSegmentationStubLink(chainer.Link):
def __init__(self, masks, labels):
super(_InstanceSegmentationStubLink, self).__init__()
self.count = 0
self.masks = masks
self.labels = labels
def predict(self, imgs):
n_img = len(imgs)
masks = self.masks[self.count:self.count + n_img]
labels = self.labels[self.count:self.count + n_img]
scores = [np.ones_like(l) for l in labels]
self.count += n_img
return masks, labels, scores
class TestInstanceSegmentationVOCEvaluator(unittest.TestCase):
def setUp(self):
masks = np.random.uniform(size=(10, 5, 32, 48)) > 0.5
labels = np.ones((10, 5), dtype=np.int32)
self.dataset = TupleDataset(
| np.random.uniform(size=(10, 3, 32, 48)) | numpy.random.uniform |
# ******************************************************************************
# Copyright 2017-2018 Intel Corporation
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ******************************************************************************
"""
Defines text datatset preprocessing routines
"""
from future import standard_library
standard_library.install_aliases() # triggers E402, hence noqa below
from builtins import map # noqa
import numpy as np # noqa
import re # noqa
from neon.util.compat import pickle # noqa
def clean_string(base):
"""
Tokenization/string cleaning.
Original from https://github.com/yoonkim/CNN_sentence/blob/master/process_data.py
"""
base = re.sub(r"[^A-Za-z0-9(),!?\'\`]", " ", base)
base = re.sub(r"\'re", " \'re", base)
base = re.sub(r"\'d", " \'d", base)
base = re.sub(r"\'ll", " \'ll", base)
base = re.sub(r"\'s", " \'s", base)
base = re.sub(r"\'ve", " \'ve", base)
base = re.sub(r"n\'t", " n\'t", base)
base = re.sub(r"!", " ! ", base)
base = re.sub(r",", " , ", base)
base = re.sub(r"\)", " \) ", base)
base = re.sub(r"\(", " \( ", base)
base = re.sub(r"\?", " \? ", base)
base = re.sub(r"\s{2,}", " ", base)
return base.strip().lower()
def pad_sentences(sentences, sentence_length=None, dtype=np.int32, pad_val=0.):
lengths = [len(sent) for sent in sentences]
nsamples = len(sentences)
if sentence_length is None:
sentence_length = np.max(lengths)
X = (np.ones((nsamples, sentence_length)) * pad_val).astype(dtype=np.int32)
for i, sent in enumerate(sentences):
trunc = sent[-sentence_length:]
X[i, -len(trunc):] = trunc
return X
def pad_data(path, vocab_size=20000, sentence_length=100, oov=2,
start=1, index_from=3, seed=113, test_split=0.2):
f = open(path, 'rb')
X, y = pickle.load(f)
f.close()
np.random.seed(seed)
| np.random.shuffle(X) | numpy.random.shuffle |
import os
os.chdir('seqFISH_AllenVISp/')
import numpy as np
import pandas as pd
import scipy.stats as st
import pickle
seqFISH_data = pd.read_csv('data/seqFISH/sourcedata/cortex_svz_counts.csv',header=0)
seqFISH_meta = pd.read_csv('data/seqFISH/sourcedata/cortex_svz_cellcentroids.csv',header=0)
seqFISH_data = seqFISH_data.iloc[np.where(seqFISH_meta['Region'] == 'Cortex')[0],:]
seqFISH_meta = seqFISH_meta.iloc[ | np.where(seqFISH_meta['Region'] == 'Cortex') | numpy.where |
import PIL
from PIL import Image
import numpy as np
import cPickle as p
import itertools as iter
import random
import sys
np.random.seed(0)
random.seed(0)
# number of test images to generate
num_images = 1000
# camera FOV in degrees
# should be less than 180
# should also be less than maximum catalog angle
fov_y = 10
# image size in pixels
num_pixels_x = 1024
num_pixels_y = 1024
# maximum number of stars allowed per image
max_num_stars_per_image = 25
# number of stars in star vectors file
num_star_vectors = 5904
# name of star vectors file
star_vectors_file = "stars"
# calculate diagonal FOV in degrees from fov_y
fov_diag = np.arctan(np.tan(fov_y * np.pi / 180) * np.sqrt(num_pixels_x ** 2 + num_pixels_y ** 2) / num_pixels_y) * 180 / np.pi
# star vectors data format
star_vectors_data_type = [("i", np.float64),("j", np.float64),("k", np.float64),("mag", np.float64),("id", np.uint32),("pad", np.uint32)]
# import catalog vectors
catalog_vectors = np.memmap(star_vectors_file, dtype=star_vectors_data_type, mode="r", shape=(num_star_vectors,))
# find vectors of matched stars
matched_vectors = [np.array((catalog_vectors[star][0], catalog_vectors[star][1], catalog_vectors[star][2])) for star in range(num_star_vectors)]
# create star hash table for fast nearby star lookup
star_hash = {}
star_hash_max_dot = np.cos((np.pi / 360) * fov_diag + 2*np.arcsin(np.sqrt(3) / 40))
for x in range(20):
for y in range(20):
for z in range(20):
icv = ((float(x) / 10.0) - .95, (float(y) / 10.0) - .95, (float(z) / 10.0) - .95)
icv = icv / np.sqrt(np.dot(icv,icv))
star_hash[(x,y,z)] = [vector for vector in matched_vectors if np.dot(icv,vector) > star_hash_max_dot]
# create reverse vector catalog
star_catalog = {}
for star in range(num_star_vectors):
star_catalog[str(catalog_vectors[star][0]) + "," + str(catalog_vectors[star][1]) + "," + str(catalog_vectors[star][2])] = star
# create memmap of image data for fast processing with C
image_data_file = 'image_data' + '.p'
image_data = np.memmap(image_data_file, dtype=np.uint16, mode="w+", shape=(max_num_stars_per_image * num_images,))
image_data_index = 0
# create memmap of rotation matrices
pointing_data_file = 'pointing_data' + '.p'
pointing_data = np.memmap(pointing_data_file, dtype=np.float32, mode="w+", shape=(num_images, 3, 3))
pointing_data_index = 0
# create memmap of centroid data for fast processing with C
centroid_data_file = 'centroid_data' + '.p'
centroid_data = np.memmap(centroid_data_file, dtype=np.float32, mode="w+", shape=(max_num_stars_per_image * num_images, 2))
centroid_data_index = 0
# precompute some useful values
center_x = float(num_pixels_x) / 2
center_y = float(num_pixels_y) / 2
scale_factor = np.tan(fov_y * np.pi / 360) / center_y
max_dot = np.cos((np.pi / 360) * fov_diag)
for image_number in range(num_images):
# # yaw, declination, roll of camera
# yaw = (random.random() - .5) * 360
# declination = (random.random() - .5) * 180
# roll = (random.random() - .5) * 360
# # convert declination to pitch
# pitch = -declination
# # convert to radians
# yaw = yaw * np.pi / 180
# pitch = pitch * np.pi / 180
# roll = roll * np.pi / 180
# # obtain rotation matrix of camera frame from yaw, pitch, roll
# rotation_matrix = np.matrix([[np.cos(yaw)*np.cos(pitch),np.cos(yaw)*np.sin(pitch)*np.sin(roll)-np.sin(yaw)*np.cos(roll),np.cos(yaw)*np.sin(pitch)*np.cos(roll)+np.sin(yaw)*np.sin(roll)],[np.sin(yaw)*np.cos(pitch),np.sin(yaw)*np.sin(pitch)*np.sin(roll)+np.cos(yaw)*np.cos(roll),np.sin(yaw)*np.sin(pitch)*np.cos(roll)-np.cos(yaw)*np.sin(roll)],[-np.sin(pitch),np.cos(pitch)*np.sin(roll),np.cos(pitch)*np.cos(roll)]]).T
ax = np.array([random.gauss(0,10), random.gauss(0,10), random.gauss(0,10)]);
ax = ax / np.linalg.norm(ax)
x = ax[0]
y = ax[1]
z = ax[2]
ro = np.random.uniform(0,2*np.pi)
c = np.cos(ro)
s = | np.sin(ro) | numpy.sin |
"""dynamic user-input-responsive part of mood, and mood graphs"""
from datetime import datetime
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import matplotlib.dates as mdates
from pandas.plotting import register_matplotlib_converters
register_matplotlib_converters()
from scipy.signal import lsim, lti
from scipy.signal.ltisys import StateSpaceContinuous
from tqdm.autonotebook import tqdm
from IPython.display import display
from persistence.response_cache import (
ResponseCache,
UserInputIdentifier,
)
from feels.mood import (
random_mood_at_pst_datetime,
logit_diff_to_pos_sent,
pos_sent_to_logit_diff,
)
from util.past import MILESTONE_TIMES
from util.times import now_pst, fromtimestamp_pst
MOOD_IMAGE_DIR = "data/mood_images/"
STEP_SEC = 30 * 1
TAU_SEC = 3600 * 12
TAU_SEC_2ND = 60 * 60
WEIGHTED_AVG_START_TIME = pd.Timestamp("2021-01-04 09:10:00")
WEIGHTED_AVG_P75_WEIGHT = 0.5
RESPONSE_SCALE_BASE = 0.15 # 0.1 # 0.2 #0.5
DETERMINER_CENTER = -3.1 # -2.4 # -1.5 #-2
DETERMINER_CENTER_UPDATES = {
pd.Timestamp("2020-08-20 01:00:00"): -2.4,
pd.Timestamp("2020-08-25 14:00:00"): -2.0,
pd.Timestamp("2020-08-31 09:15:00"): -2.4,
pd.Timestamp("2020-09-16 06:00:00"): -2.1,
pd.Timestamp("2020-10-28 17:00:00"): -2.4,
pd.Timestamp("2020-11-04 11:00:00"): -2.78,
pd.Timestamp("2020-11-13 19:00:00"): -2.7,
pd.Timestamp("2020-11-15 07:30:00"): -2.6,
pd.Timestamp("2020-12-04 07:00:00"): -2.5,
pd.Timestamp("2020-12-10 08:35:00"): -2.35,
pd.Timestamp("2020-12-10 23:45:00"): -2.0,
pd.Timestamp("2020-12-18 15:35:00"): -2.2,
pd.Timestamp("2020-12-21 15:25:00"): -2.3,
WEIGHTED_AVG_START_TIME: 0.0,
pd.Timestamp("2021-02-08 09:25:00"): -0.25,
pd.Timestamp("2021-02-14 17:55:00"): -0.125,
pd.Timestamp("2021-02-15 17:25:00"): 0,
pd.Timestamp("2021-02-16 17:45:00"): 0.5,
pd.Timestamp("2021-02-17 12:45:00"): 0,
pd.Timestamp("2021-02-26 17:30:00"): 0.5,
pd.Timestamp("2021-02-27 16:05:00"): 0.,
pd.Timestamp("2021-03-15 09:55:00"): -0.2,
pd.Timestamp("2021-03-15 19:50:00"): -0.4,
pd.Timestamp("2021-03-20 06:55:00"): 0.,
pd.Timestamp("2021-03-24 22:40:00"): -0.3,
pd.Timestamp("2021-03-31 12:25:00"): -0.5,
pd.Timestamp("2021-04-09 07:10:00"): -0.25,
pd.Timestamp("2021-05-05 17:00:00"): 0.,
pd.Timestamp("2021-05-07 18:15:00"): -0.25,
pd.Timestamp("2021-05-12 07:50:00"): 0.,
pd.Timestamp("2021-05-22 09:50:00"): -0.125,
pd.Timestamp("2021-05-23 07:15:00"): -0.25,
pd.Timestamp("2021-06-05 12:05:00"): -0.5,
pd.Timestamp("2021-06-07 22:35:00"): -0.3,
pd.Timestamp("2021-06-08 13:15:00"): 0.,
pd.Timestamp("2021-06-14 06:55:00"): -0.25,
pd.Timestamp("2021-06-15 18:08:00"): 0.,
pd.Timestamp("2021-06-16 13:00:00"): 0.125,
pd.Timestamp("2021-06-26 07:35:00"): 0.25,
pd.Timestamp("2021-06-30 08:40:00"): 0.,
pd.Timestamp("2021-08-06 00:45:00"): -0.125,
pd.Timestamp("2021-09-21 08:25:00"): 0.,
pd.Timestamp("2021-09-22 17:45:00"): -0.075,
pd.Timestamp("2021-10-24 12:15:00"): -0.,
pd.Timestamp("2021-10-24 08:40:00"): 0.125,
pd.Timestamp("2021-10-25 17:55:00"): 0.25,
pd.Timestamp("2021-10-28 22:40:00"): 0.125,
pd.Timestamp("2021-10-31 18:10:00"): 0.05,
pd.Timestamp("2021-11-02 20:40:00"): 0.,
pd.Timestamp("2021-11-15 19:20:00"): 0.05,
pd.Timestamp("2021-11-17 09:10:00"): 0.1,
pd.Timestamp("2021-11-19 14:50:00"): 0.,
pd.Timestamp("2021-12-24 14:45:00"): 0.1,
pd.Timestamp("2021-12-30 09:55:00"): 0.05,
}
DETERMINER_MULTIPLIER_UPDATES = {
pd.Timestamp("2020-08-25 17:00:00"): 0.1 / RESPONSE_SCALE_BASE,
pd.Timestamp("2020-10-21 21:15:00"): 0.075 / RESPONSE_SCALE_BASE,
pd.Timestamp("2020-11-16 10:45:00"): 0.0667 / RESPONSE_SCALE_BASE,
pd.Timestamp("2020-11-25 11:30:00"): 0.1 / RESPONSE_SCALE_BASE,
pd.Timestamp("2020-11-27 08:55:00"): 0.15 / RESPONSE_SCALE_BASE,
pd.Timestamp("2020-12-04 07:00:00"): 0.1 / RESPONSE_SCALE_BASE,
pd.Timestamp("2020-12-09 19:50:00"): 0.075 / RESPONSE_SCALE_BASE,
pd.Timestamp("2020-12-20 23:30:00"): 0.05 / RESPONSE_SCALE_BASE,
pd.Timestamp("2021-01-08 08:55:00"): 0.075 / RESPONSE_SCALE_BASE,
pd.Timestamp("2021-01-08 09:10:00"): 0.1 / RESPONSE_SCALE_BASE,
pd.Timestamp("2021-01-13 09:20:00"): 0.15 / RESPONSE_SCALE_BASE,
pd.Timestamp("2021-01-14 08:00:00"): 0.2 / RESPONSE_SCALE_BASE,
pd.Timestamp("2021-01-14 20:35:00"): 0.15 / RESPONSE_SCALE_BASE,
pd.Timestamp("2021-01-20 07:40:00"): 0.1 / RESPONSE_SCALE_BASE,
pd.Timestamp("2021-02-08 09:25:00"): 0.125 / RESPONSE_SCALE_BASE,
pd.Timestamp("2021-02-15 06:55:00"): 0.1 / RESPONSE_SCALE_BASE,
pd.Timestamp("2021-02-18 23:45:00"): 0.075 / RESPONSE_SCALE_BASE,
pd.Timestamp("2021-03-16 08:55:00"): 0.0667 / RESPONSE_SCALE_BASE,
pd.Timestamp("2021-04-03 12:00:00"): 0.075 / RESPONSE_SCALE_BASE,
pd.Timestamp("2021-05-11 08:15:00"): 0.1 / RESPONSE_SCALE_BASE,
pd.Timestamp("2021-05-15 08:35:00"): 0.15 / RESPONSE_SCALE_BASE,
pd.Timestamp("2021-05-23 07:15:00"): 0.1 / RESPONSE_SCALE_BASE,
pd.Timestamp("2021-06-05 12:05:00"): 0.175 / RESPONSE_SCALE_BASE,
pd.Timestamp("2021-06-07 22:35:00"): 0.1 / RESPONSE_SCALE_BASE,
pd.Timestamp("2021-06-16 23:20:00"): 0.075 / RESPONSE_SCALE_BASE,
pd.Timestamp("2021-06-26 07:35:00"): 0.0667 / RESPONSE_SCALE_BASE,
pd.Timestamp("2021-07-08 06:45:00"): 0.075 / RESPONSE_SCALE_BASE,
pd.Timestamp("2021-07-25 09:00:00"): 0.1125 / RESPONSE_SCALE_BASE,
pd.Timestamp("2021-09-16 08:35:00"): 0.175 / RESPONSE_SCALE_BASE,
pd.Timestamp("2021-10-25 17:55:00"): 0.15 / RESPONSE_SCALE_BASE,
pd.Timestamp("2021-11-08 08:30:00"): 0.175 / RESPONSE_SCALE_BASE,
pd.Timestamp("2021-12-24 08:00:00"): 0.125 / RESPONSE_SCALE_BASE,
}
MOOD_NAME_TO_DYNAMIC_MOOD_VALUE_MAP = {
"only_sad": 0.094,
"only_non_happy": 0.37,
"meh": 0.7,
"unrestricted": 0.7,
"only_non_sad": 0.9,
"only_happy": 0.99,
}
_ordered_cutoffs = sorted(MOOD_NAME_TO_DYNAMIC_MOOD_VALUE_MAP.values())
_ordered_cutoffs_moods = [
(c, [k for k, v in MOOD_NAME_TO_DYNAMIC_MOOD_VALUE_MAP.items() if v == c][0])
for c in _ordered_cutoffs
]
GENERATED_TS_FIRST_STABLE = pd.Timestamp("2020-05-26 19:00:00")
DUPLICATES_BUGFIX_START_TS = pd.Timestamp("2020-12-19 11:00:00")
WINDOW_LENGTH_DAYS = 2.5
def convert_p75_generated_logit_diff_to_user_input_logit_diff(x):
# one-time empirical (lr fit)
return 1.24462721 * x - 1.4965600283833032
def convert_user_input_logit_diff_to_p75_generated_logit_diff(x):
# one-time empirical (lr fit)
return (x + 1.4965600283833032) / 1.24462721
def mood_buff_v2(x, y):
prod = pos_sent_to_logit_diff(x) * pos_sent_to_logit_diff(y)
result = logit_diff_to_pos_sent(np.sign(prod) * np.sqrt(np.abs(prod))) # in (0, 1)
return 2 * (result - 0.5) # in (-1, 1)
def dynamic_mood_value_to_mood_interp(value, verbose=True) -> str:
upper_name = None
upper_dist = None
for tup in _ordered_cutoffs_moods[::-1]:
if value < tup[0]:
upper_name = tup[1]
upper_dist = tup[0] - value
lower_name = None
lower_dist = None
for tup in _ordered_cutoffs_moods:
if value >= tup[0]:
lower_name = tup[1]
lower_dist = value - tup[0]
if lower_name is None:
# below lowest
return _ordered_cutoffs_moods[0][1]
if upper_name is None:
# above highest
return _ordered_cutoffs_moods[-1][1]
lower_frac = upper_dist / (lower_dist + upper_dist)
upper_frac = 1.0 - lower_frac
interp_name = (
f"interp_{lower_name}__{upper_name}__{lower_frac:.2f}__{upper_frac:.2f}"
)
if verbose:
print(
f"interpolating between {lower_frac:.1%} {lower_name} and {upper_frac:.1%} {upper_name}"
)
return interp_name
class DynamicMoodSystem:
def __init__(
self,
step_sec: float = STEP_SEC,
tau_sec: float = TAU_SEC,
tau_sec_2nd: float = TAU_SEC_2ND,
response_scale_base: float = RESPONSE_SCALE_BASE,
determiner_center: float = DETERMINER_CENTER,
determiner_center_updates: dict = DETERMINER_CENTER_UPDATES,
determiner_multiplier_updates: dict = DETERMINER_MULTIPLIER_UPDATES,
):
self.step_sec = step_sec
self.tau_sec = tau_sec
self.tau_sec_2nd = tau_sec_2nd
self.response_scale_base = response_scale_base
self.determiner_center = determiner_center
self.determiner_center_updates = determiner_center_updates
self.determiner_multiplier_updates = determiner_multiplier_updates
@property
def response_scale(self) -> float:
return self.response_scale_base * (self.step_sec / self.tau_sec_2nd)
@property
def system_matrices(self):
return (
[
[-self.step_sec / self.tau_sec, 1],
[0, -self.step_sec / self.tau_sec_2nd],
],
[[0], [self.response_scale]],
[[1, 0]],
[[0]],
)
@property
def lti_system(self) -> StateSpaceContinuous:
system = lti(*self.system_matrices)
return system
@property
def forcing_system(self) -> StateSpaceContinuous:
matrices = self.system_matrices
system = lti(matrices[0], matrices[1], [[0, 1]], matrices[3])
return system
def determiner_center_series(self, determiner: pd.Series) -> pd.Series:
determiner_center_s = pd.Series(
[self.determiner_center for _ in determiner.index], index=determiner.index
)
start = determiner.index.min()
for time in sorted(self.determiner_center_updates.keys()):
determiner_center_s.loc[max(start, time):] = self.determiner_center_updates[time]
return determiner_center_s
def determiner_multiplier_series(self, determiner: pd.Series) -> pd.Series:
determiner_multiplier_s = pd.Series(
[1.0 for _ in determiner.index], index=determiner.index
)
for time in sorted(self.determiner_multiplier_updates.keys()):
determiner_multiplier_s.loc[time:] = self.determiner_multiplier_updates[
time
]
return determiner_multiplier_s
def set_centered_scaled_determiner(self,
mood_inputs: pd.DataFrame,
) -> pd.DataFrame:
mood_inputs["centered_determiner"] = mood_inputs["determiner"] - self.determiner_center_series(mood_inputs["determiner"])
mood_inputs["scaled_determiner"] = self.determiner_multiplier_series(mood_inputs["centered_determiner"]) * mood_inputs["centered_determiner"]
return mood_inputs
def compute_determiner_legacy(row):
return convert_p75_generated_logit_diff_to_user_input_logit_diff(
row.p75_generated_logit_diff
)
def compute_determiner_weighted_avg(row):
weighted_avg = ((1 - WEIGHTED_AVG_P75_WEIGHT) * row.logit_diff) + (
WEIGHTED_AVG_P75_WEIGHT * row.p75_generated_logit_diff
)
# one time empirical lr fit, see `sentiment_refresh_2021.ipynb`
weighted_avg_fitted = (0.61029747 * weighted_avg) + 0.4252486735525668
return weighted_avg_fitted
def compute_dynamic_mood_inputs(
response_cache: ResponseCache,
weighted_avg_start_time: pd.Timestamp = WEIGHTED_AVG_START_TIME,
system: DynamicMoodSystem = None,
) -> pd.DataFrame:
if system is None:
system = DynamicMoodSystem()
df = pd.DataFrame.from_records(
[
{
"timestamp": ident.timestamp,
"blog_name": ident.blog_name,
"logit_diff": sent["logit_diff"],
"generated_logit_diff": sent.get("generated_logit_diff")
if sent.get("generated_logit_diff")
else (
[
pos_sent_to_logit_diff(entry)
for entry in sent.get("generated_pos_sent")
]
if "generated_pos_sent" in sent
else None
),
"p75_generated_logit_diff": sent.get("p75_generated_logit_diff"),
"text_for_sentiment": sent.get("text_for_sentiment"),
"generated_ts": sent.get("generated_ts"),
}
for ident, sent in response_cache.user_input_sentiments.items()
]
).drop_duplicates(subset=["timestamp"])
_filter = df.generated_logit_diff.notnull() & df.p75_generated_logit_diff.isnull()
df.loc[_filter, "p75_generated_logit_diff"] = df.loc[
_filter, "generated_logit_diff"
].apply(lambda l: | np.percentile(l, 75) | numpy.percentile |
'''
This example script computes the finite difference density
derivative using both numerical integration and complex pole
summation. It serves as a demonstration and a validation of
the pole summation method. For the given parameters below,
the numerical energy grid contains 708 energy points (it would
be larger if the lower bound wasn't truncated by Emin) and the
complex pole summation contains only 26. Not only do these
results agree within 1 part in 10,000 at 1/16th the compu-
tational cost, due to its construction, the complex pole sum-
mation is more accurate. Where possible, the complex pole
summation method should be used over numerical integration.
'''
import matplotlib.pyplot as plt
import numpy as np
import nanonet.tb as tb
from nanonet.negf.greens_functions import simple_iterative_greens_function, surface_greens_function
from nanonet.negf import pole_summation_method
from nanonet.negf.pole_summation_method import fermi_fun
# First we design a tight-binding model. We choose a 15 site model
# so that it is symmetric and that features may be clearly ob-
# served, one or two site models would look to similar to clearly
# differentiate whether something erroneously similar was happening
a = tb.Orbitals('A')
a.add_orbital('s', 0)
tb.Orbitals.orbital_sets = {'A': a}
tb.set_tb_params(PARAMS_A_A={'ss_sigma': -1})
xyz_file = """15
A cell
A1 0.0000000000 0.0000000000 0.0000000000
A2 1.0000000000 0.0000000000 0.0000000000
A3 2.0000000000 0.0000000000 0.0000000000
A4 3.0000000000 0.0000000000 0.0000000000
A5 4.0000000000 0.0000000000 0.0000000000
A6 5.0000000000 0.0000000000 0.0000000000
A7 6.0000000000 0.0000000000 0.0000000000
A8 7.0000000000 0.0000000000 0.0000000000
A9 8.0000000000 0.0000000000 0.0000000000
A10 9.0000000000 0.0000000000 0.0000000000
A11 10.0000000000 0.0000000000 0.0000000000
A12 11.0000000000 0.0000000000 0.0000000000
A13 12.0000000000 0.0000000000 0.0000000000
A14 13.0000000000 0.0000000000 0.0000000000
A15 14.0000000000 0.0000000000 0.0000000000
"""
h = tb.Hamiltonian(xyz=xyz_file, nn_distance=1.1)
h.initialize()
h.set_periodic_bc([[0, 0, 1.0]])
h_l, h_0, h_r = h.get_hamiltonians()
# Now that the Hamiltonian is constructed within the TB
# framework, we set our numerical parameters to be
# examined. We choose two endpoints mu +/- dmu,
# the temperature being evaluated, the relative tolerance
# of the pole summation and the numerical integration
# Emin = -3.98, is the point where there are
# no more states to the left, we can use this
# to reduce number of points in the evaluation
Emin = -3.98
muL = -3.9175
muR = -3.9025
muC = 0.5*(muL + muR) # This is the energy the derivative is being evaluated at
kT = 0.001
reltol = 10**-8
p = np.ceil(-np.log(reltol)) # Integer number of kT away to get the desired relative tolerance.
lowbnd = max(Emin, muL - p*kT)
uppbnd = muR + p*kT
# We chose to have our energy spacing be at most 3*kT/40
numE = round((uppbnd-lowbnd)/(0.075*kT)) + 1
# numE = round((muR - muL + 2*p*kT)/(0.075*kT)) + 1
# We generate our grid for numerical integration, paying mind about the FD tails at muL-p*kT and muR + p*kT.
energy = | np.linspace(lowbnd, uppbnd, numE) | numpy.linspace |
"""
In this file we run ours models one by one
"""
# Imports
import random
from random import shuffle
import numpy as np
import os
import scipy.sparse as sp
import torch
from tqdm import tqdm
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
from torch.utils.data import DataLoader
import pickle
from torch.utils.data import DataLoader
from models import MLP_With_Average_Voting, PretrainedDensenet, PretrainedResnet, CNN_With_Average_Voting, \
MLP_With_Max_Pooling, CNN_MLP_Average_Voting, CNN_MLP_Max_Pooling, PretrainedDensenetAverageVoting, \
PretrainedDensenetRELU, PretrainedDensenetAverageVotingRELU, CNN_With_Average_VotingRELU, \
CNN_MLP_Average_VotingRELU, CNN_MLP_Max_PoolingRELU, CNN_With_Max_Pooling, CNN_With_Max_PoolingRELU
from sklearn.metrics import roc_curve, auc, roc_auc_score, average_precision_score
import re
import argparse
import logging
import pandas as pd
import json
from dataloader import get_study_level_data, get_dataloaders
# Seed for our experiments
seed = 1997
random.seed(seed)
np.random.seed(seed)
torch.manual_seed(seed)
# Setting cuda for GPU if it is available
use_cuda = torch.cuda.is_available()
if use_cuda:
torch.cuda.manual_seed(seed)
# Base directory for checkpoints
odir_checkpoint = '/mnt/data/sotiris/checkpoints/'
# odir_checkpoint = 'drive/My Drive/MURA Project/checkpoints/'
# Initialize the logger handle to None
hdlr = None
# Initialize names of the body parts for the MURA dataset
study_wrist = 'XR_WRIST'
study_elbow = 'XR_ELBOW'
study_finger = 'XR_FINGER'
study_forearm = 'XR_FOREARM'
study_hand = 'XR_HAND'
study_humerus = 'XR_HUMERUS'
study_shoulder = 'XR_SHOULDER'
# Set checkpoints for each model
# THIS IS FOR DENSENET PRETRAINED WITH MLP WITH 1 LAYER AND AVERAGE VOTING -- OUR LOSS
# best_checkpoint_name = 'densenet_mlp_averagevoting.pth.tar'
# progress_checkpoint = 'densenet_mlp_averagevoting_progress.pth.tar'
# THIS IS FOR DENSENET PRETRAINED WITH MLP WITH 1 LAYER AND AVERAGE VOTING WITH RELU -- OUR LOSS
# best_checkpoint_name = 'densenet_mlp_averagevoting_relu.pth.tar'
# progress_checkpoint = 'densenet_mlp_averagevoting_relu_progress.pth.tar'
# THIS IS FOR DENSENET PRETRAINED WITH MLP WITH 1 LAYER AND MAX POOLING OVER THE VIEWS -- OUR LOSS
# best_checkpoint_name = 'densenet_mlp_maxpooling.pth.tar'
# progress_checkpoint = 'densenet_mlp_maxpooling_progress.pth.tar'
# THIS IS FOR DENSENET PRETRAINED WITH MLP WITH 1 LAYER AND MAX POOLING OVER THE VIEWS WITH RELU -- OUR LOSS
# best_checkpoint_name = 'densenet_mlp_maxpooling_relu.pth.tar'
# progress_checkpoint = 'densenet_mlp_maxpooling_relu_progress.pth.tar'
# THIS IS FOR FROZEN DENSENET PRETRAINED WITH MLP WITH 1 LAYER AND AVERAGE VOTING -- OUR LOSS
# best_checkpoint_name = 'frozen_densenet_mlp_averagevoting.pth.tar'
# progress_checkpoint = 'frozen_densenet_mlp_averagevoting_progress.pth.tar'
# THIS IS FOR FROZEN DENSENET PRETRAINED WITH MLP WITH 1 LAYER AND AVERAGE VOTING WITH RELU -- OUR LOSS
# best_checkpoint_name = 'frozen_densenet_mlp_averagevoting_relu.pth.tar'
# progress_checkpoint = 'frozen_densenet_mlp_averagevoting_relu_progress.pth.tar'
# THIS IS FOR FROZEN DENSENET PRETRAINED WITH MLP WITH 1 LAYER AND MAX POOLING OVER THE VIEWS -- OUR LOSS
# best_checkpoint_name = 'frozen_densenet_mlp_maxpooling.pth.tar'
# progress_checkpoint = 'frozen_densenet_mlp_maxpooling_progress.pth.tar'
# THIS IS FOR FROZEN DENSENET PRETRAINED WITH MLP WITH 1 LAYER AND MAX POOLING OVER THE VIEWS WITH RELU -- OUR LOSS
# best_checkpoint_name = 'frozen_densenet_mlp_maxpooling_relu.pth.tar'
# progress_checkpoint = 'frozen_densenet_mlp_maxpooling_relu_progress.pth.tar'
# THIS IS FOR RESNET PRETRAINED WITH MLP WITH 1 LAYER AND MAX POOLING OVER THE VIEWS -- OUR LOSS
# best_checkpoint_name = 'resnet_mlp_maxpooling.pth.tar'
# progress_checkpoint = 'resnet_mlp_maxpooling_progress.pth.tar'
# THIS IS FOR CNN 2 LAYERS + AVERAGE VOTING -- OUR LOSS
# best_checkpoint_name = 'cnn_2layers_averagevoting.pth.tar'
# progress_checkpoint = 'cnn_2layers_averagevoting_progress.pth.tar'
# THIS IS FOR CNN 2 LAYERS + MAX POOLING -- OUR LOSS
# best_checkpoint_name = 'cnn_2layers_maxpooling.pth.tar'
# progress_checkpoint = 'cnn_2layers_maxpooling.pth.tar'
# THIS IS FOR CNN 2 LAYERS + MLP + AVERAGE VOTING -- OUR LOSS
# best_checkpoint_name = 'cnn_2layers_mlp_averagevoting.pth.tar'
# progress_checkpoint = 'cnn_2layers_mlp_averagevoting_progress.pth.tar'
# THIS IS FOR CNN 2 LAYERS + MLP + MAX POOLING OVER VIEWS -- OUR LOSS
# best_checkpoint_name = 'cnn_2layers_mpl_maxpooling.pth.tar'
# progress_checkpoint = 'cnn_2layers_mpl_maxpooling_progress.pth.tar'
# THIS IS FOR CNN 2 LAYERS + AVERAGE VOTING WITH RELU -- OUR LOSS
# best_checkpoint_name = 'cnn_2layers_averagevoting_relu.pth.tar'
# progress_checkpoint = 'cnn_2layers_averagevoting_relu_progress.pth.tar'
# THIS IS FOR CNN 2 LAYERS + MAX POOLING OVER VIEWS WITH RELU -- OUR LOSS
# best_checkpoint_name = 'cnn_2layers_maxpooling_relu.pth.tar'
# progress_checkpoint = 'cnn_2layers_maxpooling_relu_progress.pth.tar'
# THIS IS FOR CNN 2 LAYERS + MLP + AVERAGE VOTING WITH RELU-- OUR LOSS
# best_checkpoint_name = 'cnn_2layers_mlp_averagevoting_relu.pth.tar'
# progress_checkpoint = 'cnn_2layers_mlp_averagevoting_relu_progress.pth.tar'
# THIS IS FOR CNN 2 LAYERS + MLP + MAX POOLING OVER VIEWS WITH RELU-- OUR LOSS
# best_checkpoint_name = 'cnn_2layers_mpl_maxpooling_relu.pth.tar'
# progress_checkpoint = 'cnn_2layers_mpl_maxpooling_relu_progress.pth.tar'
# THIS IS FOR MLP + AVERAGE POOLING -- OUR LOSS
# best_checkpoint_name = 'mlp_averagevoting.pth.tar'
# progress_checkpoint = 'mlp_averagevoting_progress.pth.tar'
# best_checkpoint_name = 'mlp_averagevoting_nodropout.pth.tar'
# progress_checkpoint = 'mlp_averagevoting_nodropout_progress.pth.tar'
# THIS IS FOR MLP + MAX POOLING -- OUR LOSS
# best_checkpoint_name = 'mlp_maxpooling.pth.tar'
# progress_checkpoint = 'mlp_maxpooling_progress.pth.tar'
# best_checkpoint_name = 'mlp_maxpooling_nodropout.pth.tar'
# progress_checkpoint = 'mlp_maxpooling_nodropout_progress.pth.tar'
# FOR TESTING
# best_checkpoint_name = 'testing.pth.tar'
# progress_checkpoint = 'testing_progress.pth.tar'
# FOR BEST MODEL
best_checkpoint_name = 'densenet_maxpooling_relu/hyperopt_trial_0.pth.tar'
progress_checkpoint = None
# Create the checkpoints directory
if not os.path.exists(odir_checkpoint):
os.makedirs(odir_checkpoint)
def print_params(model):
'''
It just prints the number of parameters in the model.
:param model: The pytorch model
:return: Nothing.
'''
print(40 * '=')
print(model)
print(40 * '=')
logger.info(40 * '=')
logger.info(model)
logger.info(40 * '=')
trainable = 0
untrainable = 0
for parameter in model.parameters():
# print(parameter.size())
v = 1
for s in parameter.size():
v *= s
if parameter.requires_grad:
trainable += v
else:
untrainable += v
total_params = trainable + untrainable
print(40 * '=')
print('trainable:{} untrainable:{} total:{}'.format(trainable, untrainable, total_params))
print(40 * '=')
logger.info(40 * '=')
logger.info('trainable:{} untrainable:{} total:{}'.format(trainable, untrainable, total_params))
logger.info(40 * '=')
logger.info('')
logger.info('')
def save_checkpoint(state, filename='checkpoint.pth.tar'):
"""
Save the torch checkpoint
:param state: The state/checkpoint to save
:param filename: The path and filename
:return: Nothing
"""
torch.save(state, filename)
def init_the_logger(hdlr):
"""
Initializes the logger
:param hdlr: The handler for the logger
:return: The logger and its handler
"""
# Create the checkpoints folder
if not os.path.exists(odir_checkpoint):
os.makedirs(odir_checkpoint)
# Set the logger base directory
od = odir_checkpoint.split('/')[-1]
logger = logging.getLogger(od)
# Remove the previous handler
if (hdlr is not None):
logger.removeHandler(hdlr)
# Create the handler for the logger for each experiment
# THIS IS FOR DENSENET PRETRAINED WITH MLP WITH 1 LAYER AND AVERAGE VOTING -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'densenet_mlp_averagevoting.log'))
# THIS IS FOR DENSENET PRETRAINED WITH MLP WITH 1 LAYER AND AVERAGE VOTING WITH RELU -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'densenet_mlp_averagevoting_relu.log'))
# THIS IS FOR DENSENET PRETRAINED WITH MLP WITH 1 LAYER AND MAX POOLING OVER THE VIEWS -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'densenet_mlp_maxpooling.log'))
# THIS IS FOR DENSENET PRETRAINED WITH MLP WITH 1 LAYER AND MAX POOLING OVER THE VIEWS WITH RELU -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'densenet_mlp_maxpooling_relu.log'))
# THIS IS FOR FROZEN DENSENET PRETRAINED WITH MLP WITH 1 LAYER AND AVERAGE VOTING -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'frozen_densenet_mlp_averagevoting.log'))
# THIS IS FOR FROZEN DENSENET PRETRAINED WITH MLP WITH 1 LAYER AND AVERAGE VOTING WITH RELU -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'frozen_densenet_mlp_averagevoting_relu.log'))
# THIS IS FOR FROZEN DENSENET PRETRAINED WITH MLP WITH 1 LAYER AND MAX POOLING OVER THE VIEWS -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'frozen_densenet_mlp_maxpooling.log'))
# THIS IS FOR FROZEN DENSENET PRETRAINED WITH MLP WITH 1 LAYER AND MAX POOLING OVER THE VIEWS WITH RELU -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'frozen_densenet_mlp_maxpooling_relu.log'))
# THIS IS FOR RESNET PRETRAINED WITH MLP WITH 1 LAYER AND MAX POOLING OVER THE VIEWS -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'resnet_mlp_maxpooling.log'))
# THIS IS FOR CNN 2 LAYERS + AVERAGE VOTING -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'cnn_2layers_averagevoting.log'))
# THIS IS FOR CNN 2 LAYERS + MAX POOLING OVER VIEWS -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'cnn_2layers_maxpooling.log'))
# THIS IS FOR CNN 2 LAYERS + MLP + AVERAGE VOTING -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'cnn_2layers_mlp_averagevoting.log'))
# THIS IS FOR CNN 2 LAYERS + MLP + MAX POOLING OVER VIEWS -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'cnn_2layers_mpl_maxpooling.log'))
# THIS IS FOR CNN 2 LAYERS + AVERAGE VOTING WITH RELU -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'cnn_2layers_averagevoting_relu.log'))
# THIS IS FOR CNN 2 LAYERS + MAX POOLING OVER VIEWS WITH RELU -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'cnn_2layers_maxpooling_relu.log'))
# THIS IS FOR CNN 2 LAYERS + MLP + AVERAGE VOTING WITH RELU -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'cnn_2layers_mlp_averagevoting_relu.log'))
# THIS IS FOR CNN 2 LAYERS + MLP + MAX POOLING OVER VIEWS WITH RELU -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'cnn_2layers_mpl_maxpooling_relu.log'))
# THIS IS FOR MLP + AVERAGE VOTING -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'mlp_averagevoting.log'))
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'mlp_averagevoting_nodropout.log'))
# THIS IS FOR MLP + MAX POOLING -- OUR LOSS
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'mlp_maxpooling.log'))
# hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'mlp_maxpooling_nodropout.log'))
# FOR TESTING
hdlr = logging.FileHandler(os.path.join(odir_checkpoint, 'testing.log'))
# Set the format for the logger
formatter = logging.Formatter('%(asctime)s %(levelname)s %(message)s')
hdlr.setFormatter(formatter)
# Add the handler to the logger
logger.addHandler(hdlr)
logger.setLevel(logging.INFO)
return logger, hdlr
# Initialize the logger
logger, hdlr = init_the_logger(hdlr)
def back_prop(batch_costs):
"""
Perform back propagation for a batch
:param batch_costs: The costs for the batch
:return: The average cost of the batch
"""
batch_cost = sum(batch_costs) / float(len(batch_costs))
batch_cost.backward()
optimizer.step()
optimizer.zero_grad()
batch_aver_cost = batch_cost.cpu().item()
return batch_aver_cost
# HERE YOU PASS POSITIVE AND NEGATIVE WEIGHTS
# IT IS THE LOSS FROM THE PAPER
# def weighted_binary_cross_entropy(output, target, weights=None):
# if weights is not None:
# assert len(weights) == 2
# loss = weights[1] * (target * torch.log(output)) + weights[0] * ((1 - target) * torch.log(1 - output))
# else:
# loss = target * torch.log(output) + (1 - target) * torch.log(1 - output)
# return torch.neg(torch.mean(loss))
print()
print('Loading Data...')
print()
print('Loading ELBOW')
study_data_elbow = get_study_level_data(study_elbow)
print('Loading FINGER')
study_data_finger = get_study_level_data(study_finger)
print('Loading FOREARM')
study_data_forearm = get_study_level_data(study_forearm)
print('Loading HAND')
study_data_hand = get_study_level_data(study_hand)
print('Loading WRIST')
study_data_wrist = get_study_level_data(study_wrist)
print('Loading SHOULDER')
study_data_shoulder = get_study_level_data(study_shoulder)
print('Loading HUMERUS')
study_data_humerus = get_study_level_data(study_humerus)
print()
print('Data Loaded!')
print()
frames_train = [study_data_elbow['train'],
study_data_finger['train'],
study_data_forearm['train'],
study_data_hand['train'],
study_data_wrist['train'],
study_data_shoulder['train'],
study_data_humerus['train']]
frames_dev = [study_data_elbow['valid'],
study_data_finger['valid'],
study_data_forearm['valid'],
study_data_hand['valid'],
study_data_wrist['valid'],
study_data_shoulder['valid'],
study_data_humerus['valid']]
for_test_dev = pd.concat(frames_dev)
# Shuffle it first and then split it
# Set random state so the shuffling will always have the same result
for_test_dev = for_test_dev.sample(frac=1, random_state=seed)
study_data = {'train': pd.concat(frames_train), 'valid': for_test_dev.iloc[700:], 'test': for_test_dev.iloc[:700]}
# FOR TESTING PURPOSES -- PER STUDY
# study_data_elbow = get_study_level_data(study_elbow)
# frames_train = [study_data_elbow['train']]
# frames_dev = [study_data_elbow['valid']]
# study_data_finger = get_study_level_data(study_finger)
# frames_train = [study_data_finger['train']]
# frames_dev = [study_data_finger['valid']]
# study_data_forearm = get_study_level_data(study_forearm)
# frames_train = [study_data_forearm['train']]
# frames_dev = [study_data_forearm['valid']]
# study_data_hand = get_study_level_data(study_hand)
# frames_train = [study_data_hand['train']]
# frames_dev = [study_data_hand['valid']]
# study_data_wrist = get_study_level_data(study_wrist)
# frames_train = [study_data_wrist['train']]
# frames_dev = [study_data_wrist['valid']]
# study_data_shoulder = get_study_level_data(study_shoulder)
# frames_train = [study_data_shoulder['train']]
# frames_dev = [study_data_shoulder['valid']]
# study_data_humerus = get_study_level_data(study_humerus)
# frames_train = [study_data_humerus['train']]
# frames_dev = [study_data_humerus['valid']]
# for_test_dev = pd.concat(frames_dev)
# for_test_dev = for_test_dev.sample(frac=1, random_state=seed)
# study_data = {'train': pd.concat(frames_train), 'valid': for_test_dev.iloc[70:], 'test': for_test_dev.iloc[:70]}
# END FOR TESTING PURPOSES
# Create the dataloaders for the data
data_cat = ['train', 'valid', 'test']
dataloaders, image_shape = get_dataloaders(study_data, batch_size=1)
dataset_sizes = {x: len(study_data[x]) for x in data_cat}
# find weights for the positive class (as pos_weight)
# this loss will be different from the paper
# i think it makes sense to only do it in the training phase
# Abnormal is our positive / we find how many views are abnormal and normal
train_dataframe = study_data['train']
num_abnormal_images = train_dataframe[train_dataframe['Path'].str.contains('positive')]['Count'].sum()
num_normal_images = train_dataframe[train_dataframe['Path'].str.contains('negative')]['Count'].sum()
# Abnormal weight
pos_weight = torch.FloatTensor(np.array(num_abnormal_images / (num_abnormal_images + num_normal_images)))
# normal weight
# neg_weight = torch.FloatTensor(np.array(num_normal_images / (num_abnormal_images + num_normal_images)))
# weights for weighted binary cross entropy
# weights = [neg_weight, pos_weight]
# Set the learning rate, batch size, epochs and patience
lr = 0.001
batch_size = 64
epochs = 20
max_patience = 5
# Set if you want to resume the training
resume = False
# Set if you want to just evaluate the test dataset
eval_test = True
# ================================== DEFINE MODEL ================================== #
# model = PretrainedDensenetAverageVoting(hidden_size=500, num_class=1)
# model = PretrainedDensenetAverageVotingRELU(hidden_size=500, num_class=1)
# model = PretrainedDensenet(hidden_size=500, num_class=1)
# model = PretrainedDensenetRELU(hidden_size=500, num_class=1)
# model = PretrainedDensenetAverageVoting(hidden_size=500, num_class=1, frozen=False)
# model = PretrainedDensenetAverageVotingRELU(hidden_size=500, num_class=1, frozen=False)
# model = PretrainedDensenet(hidden_size=500, num_class=1, frozen=False)
model = PretrainedDensenetRELU(hidden_size=500, num_class=1, frozen=False)
# model = PretrainedResnet(hidden_size=500, num_class=1)
# model = MLP_With_Average_Voting(input_dim=3 * image_shape[0] * image_shape[1],
# n_classes=1,
# hidden_1=500,
# hidden_2=200,
# hidden_3=100,
# dropout=0.3)
# model = MLP_With_Max_Pooling(input_dim=3 * image_shape[0] * image_shape[1],
# n_classes=1,
# hidden_1=500,
# hidden_2=200,
# hidden_3=100,
# dropout=0.3)
# model = CNN_With_Average_Voting(input_channels=3, input_shape=image_shape,
# n_classes=1,
# n_filters_1=10,
# n_filters_2=20,
# dropout=0.3)
# model = CNN_With_Max_Pooling(input_channels=3, input_shape=image_shape,
# n_classes=1,
# n_filters_1=10,
# n_filters_2=20,
# dropout=0.3)
# model = CNN_MLP_Average_Voting(input_channels=3, input_shape=image_shape,
# n_classes=1,
# n_filters_1=10,
# n_filters_2=20,
# hidden_size=500,
# dropout=0.3)
# model = CNN_MLP_Max_Pooling(input_channels=3,
# input_shape=image_shape,
# n_classes=1,
# n_filters_1=10,
# n_filters_2=20,
# hidden_size=500,
# dropout=0.3)
# model = CNN_With_Average_VotingRELU(input_channels=3, input_shape=image_shape,
# n_classes=1,
# n_filters_1=10,
# n_filters_2=20,
# dropout=0.3)
# model = CNN_With_Max_PoolingRELU(input_channels=3, input_shape=image_shape,
# n_classes=1,
# n_filters_1=10,
# n_filters_2=20,
# dropout=0.3)
# model = CNN_MLP_Average_VotingRELU(input_channels=3, input_shape=image_shape,
# n_classes=1,
# n_filters_1=10,
# n_filters_2=20,
# hidden_size=500,
# dropout=0.3)
# model = CNN_MLP_Max_PoolingRELU(input_channels=3,
# input_shape=image_shape,
# n_classes=1,
# n_filters_1=10,
# n_filters_2=20,
# hidden_size=500,
# dropout=0.3)
# ================================== ================================== #
# Print the parameters of the model
print_params(model)
# Get the model parameters
paramaters = model.parameters()
# Set the loss function
loss = nn.BCEWithLogitsLoss(pos_weight=pos_weight)
# Set the optimizer
optimizer = torch.optim.Adam(params=paramaters, lr=lr)
# Get the dataset iterators
train_iterator = dataloaders['train']
dev_iterator = dataloaders['valid']
test_iterator = dataloaders['test']
# Initialize values for best auc and best epoch
best_auc = -1000.0
best_epoch = 0
# Load json file to store model results or create an empty dictionary
results_mura = None
if os.path.exists('/mnt/data/sotiris/results_mura.json'):
with open('/mnt/data/sotiris/results_mura.json') as fi:
results_mura = json.load(fi)
else:
results_mura = dict()
if use_cuda:
print()
print('GPU available!!')
print()
model = model.cuda()
def evaluate(iterator, model):
"""
Method that evaluates the dev/test sets
:param iterator: The dataset iterator
:param model: The model
:return: Metrics for the set
"""
# Perform all actions without keeping gradients
with torch.no_grad():
# Set the model to evaluation mode
model.eval()
# Initialize values and lists
batch_preds = []
eval_costs = []
eval_cost = 0.0
batch_labels = []
aucs = []
aps = []
# Iterate the set
for ev_batch in iterator:
# Get the images and the labels
dev_images = ev_batch['images']
dev_labels = ev_batch['labels'].float()
# Cast them to cuda if necessary
if use_cuda:
dev_images = dev_images.cuda()
dev_labels = dev_labels.cuda()
# Reset the gradients in the optimizer
optimizer.zero_grad()
# Pass the images through the model to get the predictions
dev_preds = model(dev_images)
# Calculate the accumulated loss
eval_cost += float(loss(dev_preds, dev_labels).cpu().item())
# Append the labels and preds to the batch lists
batch_labels.append(dev_labels)
batch_preds.append(dev_preds)
# If we have reached the batch size
if len(batch_preds) == batch_size:
# Get the average of the losses and append it to the list
eval_costs.append(eval_cost / batch_size)
# Set the accumulated loss to 0
eval_cost = 0
# Pass the batch predictions through a sigmoid
sigmoid_dev_preds = torch.sigmoid(torch.stack(batch_preds))
# Calculate auc score
dev_auc_easy = roc_auc_score(torch.stack(batch_labels).cpu().numpy(),
sigmoid_dev_preds.cpu().numpy())
# Calculate average precision
average_precision = average_precision_score(torch.stack(batch_labels).cpu().numpy(),
sigmoid_dev_preds.cpu().numpy())
# Append scores to the lists
aucs.append(dev_auc_easy)
aps.append(average_precision)
# Reset the lists
batch_labels = []
batch_preds = []
# Return metrics
return dev_auc_easy, aucs, aps, eval_costs
def evaluate_cam(iterator, model, num_of_images):
"""
Method that evaluates the dev/test set and also creates the gradCAM images
:param iterator: The dataset iterator
:param model: The model
:param num_of_images: The number of images to get for CAM
:return: Metrics for the set
"""
# Set the model to evaluation mode
model.eval()
# Initialize values and lists
batch_preds = []
eval_costs = []
eval_cost = 0.0
batch_labels = []
aucs = []
aps = []
img_i = 0
dev_auc_easy = 0
# Iterate the set
for ev_batch in iterator:
# Get the images and the labels
dev_images = ev_batch['images']
dev_labels = ev_batch['labels'].float()
# Cast them to cuda if necessary
if use_cuda:
dev_images = dev_images.cuda()
dev_labels = dev_labels.cuda()
# Reset the gradients in the optimizer
optimizer.zero_grad()
# Create gradCAM images only for the first n instances
if img_i <= num_of_images:
# Generate heatmap
# as in: https://medium.com/@stepanulyanin/implementing-grad-cam-in-pytorch-ea0937c31e82
import cv2
# Get the instance's path to the image file
pathImageFiles = ev_batch['paths']
# Set the output image's file
pathOutputFile = 'cam_images/test{}.jpg'.format(img_i)
# Increment the output image id
img_i += 1
# Get predictions with hook on the gradients
cam_output = model.forward_cam(dev_images)
# Legacy for dev -- so that we don't pass it 2 times
dev_preds = cam_output
eval_cost += float(loss(dev_preds, dev_labels).cpu().item())
# Get the gradient of the output with respect to the parameters of the model
cam_output.backward()
# Pull the gradients out of the model
gradients = model.get_activations_gradient()
# Pool the gradients across the channels
pooled_gradients = torch.mean(gradients, dim=[2, 3])
# Get the activations of the last convolutional layer
activations = model.get_activations(dev_images).detach()
# Weight the channels by corresponding gradients
for v in range(len(ev_batch['paths'][0])):
for i in range(activations.shape[1]):
activations[v, i, :, :] *= pooled_gradients[v, i]
# Average the channels of the activations
heatmaps = torch.mean(activations, dim=1)
# Create plot for the heatmaps and the superposed image
import matplotlib.pyplot as plt
fig, axis = plt.subplots(len(ev_batch['paths']), 2)
if len(ev_batch['paths']) == 1:
axis = axis.reshape(1, 2)
fig.suptitle('/'.join(ev_batch['paths'][0][0].split('/')[5:-1]) +
'\nTrue: {} -- Predicted: {:.3f}'.format(dev_labels.cpu().item(),
F.sigmoid(cam_output).cpu().item()))
# For every view in the instance
for v in range(len(ev_batch['paths'])):
# leaky relu on top of the heatmap
# or maybe better use relu
# heatmap = F.leaky_relu(heatmaps[v])
# Pass the heatmaps from a relu to throw negative scores
heatmap = F.relu(heatmaps[v])
# Normalize the heatmap
h_max = torch.max(heatmap)
if h_max != 0.0:
heatmap /= h_max
# Save the heatmaps -- for debugging
# plt.matshow(heatmap.cpu().numpy())
# plt.savefig('{}_matrix.png'.format(v))
# plt.clf()
# Add the heatmap for hte view in the plot
axis[v, 0].matshow(heatmap.cpu().numpy())
axis[v, 0].axis('off')
# Read the image from the path
imgOriginal = cv2.imread(pathImageFiles[v][0])
# Resize the heatmap to the image's dimensions
heatmap = cv2.resize(heatmap.cpu().numpy(), (imgOriginal.shape[1], imgOriginal.shape[0]))
# Cast heatmap values to [0,255] ints
heatmap = | np.uint8(255 * heatmap) | numpy.uint8 |
import time
import numpy as np
from scipy.signal import lfilter
import scipy.io.wavfile as wav
from .audio import AudioProcess
class WaveProcessor(object):
"""Applying Filters to wav file
This is for pre-fft / fft / post-fft processing wav file using filter.
It provides the parallel and cascades structure. The details of testing is on
test.py.
Parameters
----------
wavfile_path (str): path to wav file
TODO LIST
---------
- integrate data format (ex. Q1.30, int16, etc.)
Current data format is float32, but it did not test.
- self.process_time_domain: Fixed point(ex. Q1.30)
Reference
---------
https://dsp.stackexchange.com/questions/20194/concept-of-combining-multiple-fir-filters-into-1-fir-filter
https://kr.mathworks.com/help/audio/ref/graphiceq.html
https://kr.mathworks.com/help/audio/ug/GraphicEQModel.html
"""
def __init__(self, wavfile_path) -> None:
if isinstance(wavfile_path, str):
self.wavfile_path = wavfile_path
self.sampleing_freq, self.data_wav = wav.read(wavfile_path)
elif isinstance(wavfile_path, np.ndarray):
self.wavfile_path = None
self.data_wav = wavfile_path
self.sampleing_freq = 48000
else:
raise ValueError("wavfile_path must be str or np.ndarray")
self._bias = None
self._filter_time_domain_list = []
self._filter_freq_domain_list = []
self.zi = []
self.time_filter_time = []
self.time_filter_freq = []
# for testing
self.frame_prev = []
self.output_prev = []
self.frame_counter = 0
self.graphical_equalizer = False
@property
def filter_time_domain_list(self) -> list:
return self._filter_time_domain_list
@filter_time_domain_list.setter
def filter_time_domain_list(self, coeff):
if not isinstance(coeff, np.ndarray):
coeff = | np.array(coeff) | numpy.array |
import numpy as np
import matplotlib.pyplot as plt
import itertools
import time
import os
from numpy.fft import fft, ifft, fft2, ifft2, fftn, ifftn, fftshift, ifftshift
from IPython import display
from scipy.ndimage import uniform_filter
from concurrent.futures import ProcessPoolExecutor
from .util import *
from .optics import *
from .background_estimator import *
def intensity_mapping(img_stack):
img_stack_out = np.zeros_like(img_stack)
img_stack_out[0] = img_stack[0].copy()
img_stack_out[1] = img_stack[4].copy()
img_stack_out[2] = img_stack[3].copy()
img_stack_out[3] = img_stack[1].copy()
img_stack_out[4] = img_stack[2].copy()
return img_stack_out
def instrument_matrix_and_source_calibration(I_cali_mean, handedness = 'RCP'):
_, N_cali = I_cali_mean.shape
# Source intensity
I_tot = np.sum(I_cali_mean,axis=0)
# Calibration matrix
theta = np.r_[0:N_cali]/N_cali*2*np.pi
C_matrix = np.array([np.ones((N_cali,)), np.cos(2*theta), np.sin(2*theta)])
# offset calibration
I_cali_norm = I_cali_mean/I_tot
offset_est = np.transpose(np.linalg.pinv(C_matrix.transpose()).dot(np.transpose(I_cali_norm[0,:])))
alpha = np.arctan2(-offset_est[2], offset_est[1])/2
# Source calibration
C_matrix_offset = np.array([np.ones((N_cali,)), np.cos(2*(theta+alpha)), np.sin(2*(theta+alpha))])
S_source = np.linalg.pinv(C_matrix_offset.transpose()).dot(I_tot[:,np.newaxis])
S_source_norm = S_source/S_source[0]
Ax = np.sqrt((S_source_norm[0]+S_source_norm[1])/2)
Ay = np.sqrt((S_source_norm[0]-S_source_norm[1])/2)
del_phi = np.arccos(S_source_norm[2]/2/Ax/Ay)
if handedness == 'RCP':
E_in = np.array([Ax, Ay*np.exp(1j*del_phi)])
elif handedness == 'LCP':
E_in = np.array([Ax, Ay*np.exp(-1j*del_phi)])
else:
raise TypeError("handedness type must be 'LCP' or 'RCP'")
# Instrument matrix calibration
A_matrix = np.transpose(np.linalg.pinv(C_matrix_offset.transpose()).dot(np.transpose(I_cali_norm)))
theta_fine = np.r_[0:360]/360*2*np.pi
C_matrix_offset_fine = np.array([np.ones((360,)), np.cos(2*(theta_fine+alpha)), np.sin(2*(theta_fine+alpha))])
print('Calibrated source field:\n' + str(np.round(E_in,4)))
print('Calibrated instrument matrix:\n' + str(np.round(A_matrix,4)))
fig,ax = plt.subplots(2,2,figsize=(20,20))
ax[0,0].plot(theta/np.pi*180,np.transpose(I_cali_mean))
ax[0,0].legend(['$I_0$', '$I_{45}$', '$I_{90}$', '$I_{135}$'])
ax[0,0].set_title('Calibration curve without normalization')
ax[0,0].set_xlabel('Orientation of LP (deg)')
ax[0,0].set_ylabel('Raw intensity')
ax[0,1].plot(theta/np.pi*180,I_tot)
ax[0,1].plot(theta_fine/np.pi*180,np.transpose(C_matrix_offset_fine).dot(S_source))
ax[0,1].legend(['Mean source intensity', 'Fitted source intensity'])
ax[0,1].set_title('Source calibration curve')
ax[0,1].set_xlabel('Orientation of LP (deg)')
ax[0,1].set_ylabel('Mean intensity from 4 linear channels')
ax[1,0].plot(theta/np.pi*180,np.transpose(I_cali_mean/I_tot))
ax[1,0].legend(['$I_0$', '$I_{45}$', '$I_{90}$', '$I_{135}$'])
ax[1,0].set_title('Normalized calibration curve')
ax[1,0].set_xlabel('Orientation of LP (deg)')
ax[1,0].set_ylabel('Normalized intensity')
ax[1,1].plot(theta/np.pi*180,np.transpose(I_cali_norm))
ax[1,1].plot(theta_fine/np.pi*180,np.transpose(A_matrix.dot(C_matrix_offset_fine)))
ax[1,1].legend(['$I_0$', '$I_{45}$', '$I_{90}$', '$I_{135}$'])
ax[1,1].set_xlabel('Orientation of LP (deg)')
ax[1,1].set_ylabel('Normalized intensity')
ax[1,1].set_title('Fitted calibration curves')
return E_in, A_matrix, np.transpose(A_matrix.dot(C_matrix_offset_fine))
def instrument_matrix_calibration(I_cali_norm, I_meas):
_, N_cali = I_cali_norm.shape
theta = np.r_[0:N_cali]/N_cali*2*np.pi
S_matrix = np.array([np.ones((N_cali,)), np.cos(2*theta), np.sin(2*theta)])
A_matrix = np.transpose(np.linalg.pinv(S_matrix.transpose()).dot(np.transpose(I_cali_norm)))
if I_meas.ndim == 3:
I_mean = np.mean(I_meas,axis=(1,2))
elif I_meas.ndim == 4:
I_mean = np.mean(I_meas,axis=(1,2,3))
I_tot = np.sum(I_mean)
A_matrix_S3 = I_mean/I_tot-A_matrix[:,0]
I_corr = (I_tot/4)*(A_matrix_S3)/np.mean(A_matrix[:,0])
print('Calibrated instrument matrix:\n' + str(np.round(A_matrix,4)))
print('Last column of instrument matrix:\n' + str(np.round(A_matrix_S3.reshape((4,1)),4)))
plt.plot(np.transpose(I_cali_norm))
plt.plot(np.transpose(A_matrix.dot(S_matrix)))
plt.xlabel('Orientation of LP (deg)')
plt.ylabel('Normalized intensity')
plt.title('Fitted calibration curves')
plt.legend(['$I_0$', '$I_{45}$', '$I_{90}$', '$I_{135}$'])
return A_matrix, I_corr
class waveorder_microscopy:
'''
waveorder_microscopy contains methods to compute weak object transfer function
for label-free image reconstruction with various types of dataset:
1) 2D/3D phase reconstruction with a single brightfield defocused stack (Transport of intensity, TIE)
2) 2D/3D phase reconstruction with intensities of asymetric illumination
(differential phase contrast, DPC)
3) 2D/3D joint phase and polarization (2D orientation) reconstruction
with brightfield-illuminated polarization-sensitive intensities (QLIPP)
4) 2D/3D joint phase and polarization (uniaxial permittivity tensor) reconstruction
with asymmetrically-illuminated polarization-sensitive intensities (uPTI)
Parameters
----------
img_dim : tuple
shape of the computed 2D space with size of (N, M)
lambda_illu : float
wavelength of the incident light
ps : float
xy pixel size of the image space
psz : float
z step size of the image space
NA_obj : float
numerical aperture of the detection objective
NA_illu : float
numerical aperture of the illumination condenser
z_defocus : numpy.ndarray
1D array of defocused z position corresponds to the intensity stack
(matters for 2D reconstruction, the direction positive z matters for 3D reconstruction)
chi : float
swing of the illumination or detection polarization state (in radian)
n_media : float
refractive index of the immersing media
cali : bool
'True' for the orientation convention of QLIPP data,
'False' for the orientation convention of uPTI data
bg_option : str
'local' for estimating background with scipy uniform filter
'local_fit' for estimating background with polynomial fit
other string for normal background subtraction with the provided background
A_matrix : numpy.ndarray
self-provided instrument matrix converting polarization-sensitive intensity images into Stokes parameters
with shape of (N_channel, N_Stokes)
If None is provided, the instrument matrix is determined by the QLIPP convention with swing specify by chi
QLIPP_birefringence_only : bool
'True' to skip pre-processing functions for phase/uPTI reconstruction
'False' to continue with pre-processing functions for phase/uPTI reconstruction
bire_in_plane_deconv : str
string contains the dimension of 2D birefringence deconvolution
'2D' for 2D deconvolution of 2D birefringence
'3D' for 3D deconvolution of 2D birefringence
inc_recon : str
option for constructing settings for 3D orientation reconstruction
'2D-vec-WOTF' for 2D diffractive reconstruction of 3D anisotropy
'3D' for 3D for diffractive reconstruction of 3D anisotropy
phase_deconv : str
string contains the phase reconstruction dimension
'2D' for 2D phase deconvolution
'3D' for 3D phase deconvolution
ph_deconv_layer : int
number of layers included for each layer of semi-3D phase reconstruction
illu_mode : str
string to set the pattern of illumination source
'BF' for brightfield illumination with source pattern specified by NA_illu
'PH' for phase contrast illumination with the source pattern specify by NA_illu and NA_illu_in
'Arbitrary' for self-defined source pattern of dimension (N_pattern, N, M)
NA_illu_in : flaot
numerical aperture of the inner circle for phase contrast ring illumination
Source : numpy.ndarray
illumination source pattern with dimension of (N_pattern, N, M)
Source_PolState : numpy.ndarray
illumination polarization states (Ex, Ey) for each illumination pattern with dimension of (N_pattern, 2)
If provided with size of (2,), a single state is used for all illumination patterns
pad_z : int
number of z-layers to pad (reflection boundary condition) for 3D deconvolution
use_gpu : bool
option to use gpu or not
gpu_id : int
number refering to which gpu will be used
'''
def __init__(self, img_dim, lambda_illu, ps, NA_obj, NA_illu, z_defocus, chi=None,\
n_media=1, cali=False, bg_option='global',
A_matrix=None, QLIPP_birefringence_only = False, bire_in_plane_deconv=None, inc_recon=None,
phase_deconv=None, ph_deconv_layer = 5,
illu_mode='BF', NA_illu_in=None, Source=None, Source_PolState=np.array([1, 1j]),
pad_z=0, use_gpu=False, gpu_id=0):
'''
initialize the system parameters for phase and orders microscopy
'''
t0 = time.time()
# GPU/CPU
self.use_gpu = use_gpu
self.gpu_id = gpu_id
if self.use_gpu:
globals()['cp'] = __import__("cupy")
cp.cuda.Device(self.gpu_id).use()
# Basic parameter
self.N, self.M = img_dim
self.n_media = n_media
self.lambda_illu = lambda_illu/n_media
self.ps = ps
self.z_defocus = z_defocus.copy()
if len(z_defocus) >= 2:
self.psz = np.abs(z_defocus[0] - z_defocus[1])
self.G_tensor_z_upsampling = np.ceil(self.psz/(self.lambda_illu/2))
self.pad_z = pad_z
self.NA_obj = NA_obj/n_media
self.NA_illu = NA_illu/n_media
self.N_defocus = len(z_defocus)
self.N_defocus_3D = self.N_defocus + 2*self.pad_z
self.chi = chi
self.cali = cali
self.bg_option = bg_option
self.phase_deconv = phase_deconv
if QLIPP_birefringence_only == False:
# setup microscocpe variables
self.xx, self.yy, self.fxx, self.fyy = gen_coordinate((self.N, self.M), ps)
self.Pupil_obj = gen_Pupil(self.fxx, self.fyy, self.NA_obj, self.lambda_illu)
self.Pupil_support = self.Pupil_obj.copy()
# illumination setup
self.illumination_setup(illu_mode, NA_illu_in, Source, Source_PolState)
# Defocus kernel initialization
self.Hz_det_setup(self.phase_deconv, ph_deconv_layer, bire_in_plane_deconv, inc_recon)
# select either 2D or 3D model for phase deconvolution
self.phase_deconv_setup(self.phase_deconv)
# instrument matrix for polarization detection
self.instrument_matrix_setup(A_matrix)
# select either 2D or 3D model for 2D birefringence deconvolution
self.bire_in_plane_deconv_setup(bire_in_plane_deconv)
# inclination reconstruction model selection
self.inclination_recon_setup(inc_recon)
else:
# instrument matrix for polarization detection
self.instrument_matrix_setup(A_matrix)
############## constructor function group ##############
def illumination_setup(self, illu_mode, NA_illu_in, Source, Source_PolState):
'''
setup illumination source function for transfer function computing
Parameters
----------
illu_mode : str
string to set the pattern of illumination source
'BF' for brightfield illumination with source pattern specified by NA_illu
'PH' for phase contrast illumination with the source pattern specify by NA_illu and NA_illu_in
'Arbitrary' for self-defined source pattern of dimension (N_pattern, N, M)
NA_illu_in : flaot
numerical aperture of the inner circle for phase contrast ring illumination
Source : numpy.ndarray
illumination source pattern with dimension of (N_pattern, N, M)
Source_PolState : numpy.ndarray
illumination polarization states (Ex, Ey) for each illumination pattern with dimension of (N_pattern, 2)
'''
if illu_mode == 'BF':
self.Source = gen_Pupil(self.fxx, self.fyy, self.NA_illu, self.lambda_illu)
self.N_pattern = 1
elif illu_mode == 'PH':
if NA_illu_in == None:
raise('No inner rim NA specified in the PH illumination mode')
else:
self.NA_illu_in = NA_illu_in/self.n_media
inner_pupil = gen_Pupil(self.fxx, self.fyy, self.NA_illu_in/self.n_media, self.lambda_illu)
self.Source = gen_Pupil(self.fxx, self.fyy, self.NA_illu, self.lambda_illu)
self.Source -= inner_pupil
Pupil_ring_out = gen_Pupil(self.fxx, self.fyy, self.NA_illu/self.n_media, self.lambda_illu)
Pupil_ring_in = gen_Pupil(self.fxx, self.fyy, self.NA_illu_in/self.n_media, self.lambda_illu)
self.Pupil_obj = self.Pupil_obj*np.exp((Pupil_ring_out-Pupil_ring_in)*(np.log(0.7)-1j*(np.pi/2 - 0.0*np.pi)))
self.N_pattern = 1
elif illu_mode == 'Arbitrary':
self.Source = Source.copy()
if Source.ndim == 2:
self.N_pattern = 1
else:
self.N_pattern = len(Source)
self.Source_PolState = np.zeros((self.N_pattern, 2), complex)
if Source_PolState.ndim == 1:
for i in range(self.N_pattern):
self.Source_PolState[i] = Source_PolState/(np.sum(np.abs(Source_PolState)**2))**(1/2)
else:
if len(Source_PolState) != self.N_pattern:
raise('The length of Source_PolState needs to be either 1 or the same as N_pattern')
for i in range(self.N_pattern):
self.Source_PolState[i] = Source_PolState[i]/(np.sum(np.abs(Source_PolState[i])**2))**(1/2)
def Hz_det_setup(self, phase_deconv, ph_deconv_layer, bire_in_plane_deconv, inc_recon):
'''
setup defocus kernels for deconvolution with the corresponding dimensions
Parameters
----------
phase_deconv : str
string contains the dimension of the phase reconstruction
'2D' for 2D phase deconvolution
'3D' for 3D phase deconvolution
ph_deconv_layer : int
number of layers included for each layer of semi-3D phase reconstruction
bire_in_plane_deconv : str
string contains the dimension of 2D birefringence deconvolution
'2D' for 2D deconvolution of 2D birefringence
'3D' for 3D deconvolution of 2D birefringence
inc_recon : str
option for constructing settings for 3D orientation reconstruction
'2D-geometric' for 2D non-diffractive reconstruction of 3D anisotropy
'2D-vec-WOTF' for 2D diffractive reconstruction of 3D anisotropy
'3D' for 3D for diffractive reconstruction of 3D anisotropy
'''
if phase_deconv == '2D' or bire_in_plane_deconv == '2D' or inc_recon == '2D-vec-WOTF':
# generate defocus kernel based on Pupil function and z_defocus
self.Hz_det_2D = gen_Hz_stack(self.fxx, self.fyy, self.Pupil_support, self.lambda_illu, self.z_defocus)
if phase_deconv == 'semi-3D':
self.ph_deconv_layer = ph_deconv_layer
if self.z_defocus[0] - self.z_defocus[1] >0:
z_deconv = -(np.r_[:self.ph_deconv_layer]-self.ph_deconv_layer//2)*self.psz
else:
z_deconv = (np.r_[:self.ph_deconv_layer]-self.ph_deconv_layer//2)*self.psz
self.Hz_det_semi_3D = gen_Hz_stack(self.fxx, self.fyy, self.Pupil_support, self.lambda_illu, z_deconv)
self.G_fun_z_semi_3D = gen_Greens_function_z(self.fxx, self.fyy, self.Pupil_support, self.lambda_illu, z_deconv)
if phase_deconv == '3D' or bire_in_plane_deconv == '3D' or inc_recon == '3D':
# generate defocus kernel and Green's function
if self.z_defocus[0] - self.z_defocus[1] >0:
z = -ifftshift((np.r_[0:self.N_defocus_3D]-self.N_defocus_3D//2)*self.psz)
else:
z = ifftshift((np.r_[0:self.N_defocus_3D]-self.N_defocus_3D//2)*self.psz)
self.Hz_det_3D = gen_Hz_stack(self.fxx, self.fyy, self.Pupil_support, self.lambda_illu, z)
self.G_fun_z_3D = gen_Greens_function_z(self.fxx, self.fyy, self.Pupil_support, self.lambda_illu, z)
def phase_deconv_setup(self, phase_deconv):
'''
setup transfer functions for phase deconvolution with the corresponding dimensions
Parameters
----------
phase_deconv : str
string contains the dimension of the phase reconstruction
'2D' for 2D phase deconvolution
'3D' for 3D phase deconvolution
ph_deconv_layer : int
number of layers included for each layer of semi-3D phase reconstruction
'''
if phase_deconv == '2D':
# compute 2D phase transfer function
self.gen_WOTF()
elif phase_deconv == 'semi-3D':
self.gen_semi_3D_WOTF()
elif phase_deconv == '3D':
# compute 3D phase transfer function
self.gen_3D_WOTF()
def bire_in_plane_deconv_setup(self, bire_in_plane_deconv):
'''
setup transfer functions for 2D birefringence deconvolution with the corresponding dimensions
Parameters
----------
bire_in_plane_deconv : str
string contains the dimension of 2D birefringence deconvolution
'2D' for 2D deconvolution of 2D birefringence
'3D' for 3D deconvolution of 2D birefringence
'''
if bire_in_plane_deconv == '2D':
# generate 2D vectorial transfer function for 2D birefringence deconvolution in 2D space
self.gen_2D_vec_WOTF(False)
elif bire_in_plane_deconv == '3D':
# generate 3D vectorial transfer function for 2D birefringence deconvolution in 3D space
self.gen_3D_vec_WOTF(False)
def inclination_recon_setup(self, inc_recon):
'''
setup transfer functions for uPTI reconstruction
Parameters
----------
phase_deconv : str
string contains the phase reconstruction dimension
'2D' for 2D phase deconvolution
'3D' for 3D phase deconvolution
inc_recon : str
option for constructing settings for 3D orientation reconstruction
'2D-geometric' for 2D non-diffractive reconstruction of 3D anisotropy
'2D-vec-WOTF' for 2D diffractive reconstruction of 3D anisotropy
'3D' for 3D for diffractive reconstruction of 3D anisotropy
'''
if inc_recon is not None and inc_recon != '3D':
if inc_recon == '2D-geometric':
wave_vec_norm_x = self.lambda_illu*self.fxx
wave_vec_norm_y = self.lambda_illu*self.fyy
wave_vec_norm_z = (np.maximum(0,1 - wave_vec_norm_x**2 - wave_vec_norm_y**2))**(0.5)
incident_theta = np.arctan2((wave_vec_norm_x**2 + wave_vec_norm_y**2)**(0.5), wave_vec_norm_z)
incident_phi = np.arctan2(wave_vec_norm_y,wave_vec_norm_x)
self.geometric_inc_matrix, self.geometric_inc_matrix_inv = gen_geometric_inc_matrix(incident_theta, incident_phi, self.Source)
elif inc_recon == '2D-vec-WOTF':
# generate 2D vectorial transfer function for 2D uPTI
self.gen_2D_vec_WOTF(True)
# compute the AHA matrix for later 2D inversion
self.inc_AHA_2D_vec = np.zeros((7,7,self.N,self.M),complex)
for i,j,p in itertools.product(range(7), range(7), range(self.N_Stokes)):
self.inc_AHA_2D_vec[i,j] += np.sum(np.conj(self.H_dyadic_2D_OTF[p,i])*self.H_dyadic_2D_OTF[p,j],axis=2)
elif inc_recon == '3D':
# generate 3D vectorial transfer function for 3D uPTI
self.gen_3D_vec_WOTF(True)
self.inc_AHA_3D_vec = np.zeros((7,7,self.N,self.M,self.N_defocus_3D), dtype='complex64')
# compute the AHA matrix for later 3D inversion
for i,j,p in itertools.product(range(7), range(7), range(self.N_Stokes)):
self.inc_AHA_3D_vec[i,j] += np.sum(np.conj(self.H_dyadic_OTF[p,i])*self.H_dyadic_OTF[p,j],axis=0)
def instrument_matrix_setup(self, A_matrix):
'''
setup instrument matrix
Parameters
----------
A_matrix : numpy.ndarray
self-provided instrument matrix converting polarization-sensitive intensity images into Stokes parameters
with shape of (N_channel, N_Stokes)
If None is provided, the instrument matrix is determined by the QLIPP convention with swing specify by chi
'''
if A_matrix is None:
self.N_channel = 5
self.N_Stokes = 4
self.A_matrix = 0.5*np.array([[1,0,0,-1], \
[1, np.sin(self.chi), 0, -np.cos(self.chi)], \
[1, 0, np.sin(self.chi), -np.cos(self.chi)], \
[1, -np.sin(self.chi), 0, -np.cos(self.chi)], \
[1, 0, -np.sin(self.chi), -np.cos(self.chi)]])
else:
self.N_channel = A_matrix.shape[0]
self.N_Stokes = A_matrix.shape[1]
self.A_matrix = A_matrix.copy()
############## constructor asisting function group ##############
def gen_WOTF(self):
'''
generate 2D phase transfer functions
'''
self.Hu = np.zeros((self.N, self.M, self.N_defocus*self.N_pattern),complex)
self.Hp = np.zeros((self.N, self.M, self.N_defocus*self.N_pattern),complex)
if self.N_pattern == 1:
for i in range(self.N_defocus):
self.Hu[:,:,i], self.Hp[:,:,i] = WOTF_2D_compute(self.Source, self.Pupil_obj * self.Hz_det_2D[:,:,i], \
use_gpu=self.use_gpu, gpu_id=self.gpu_id)
else:
for i,j in itertools.product(range(self.N_defocus), range(self.N_pattern)):
idx = i*self.N_pattern+j
self.Hu[:,:,idx], self.Hp[:,:,idx] = WOTF_2D_compute(self.Source[j], self.Pupil_obj * self.Hz_det_2D[:,:,i], \
use_gpu=self.use_gpu, gpu_id=self.gpu_id)
def gen_semi_3D_WOTF(self):
'''
generate semi-3D phase transfer functions
'''
self.Hu = np.zeros((self.N, self.M, self.ph_deconv_layer*self.N_pattern),complex)
self.Hp = np.zeros((self.N, self.M, self.ph_deconv_layer*self.N_pattern),complex)
for i,j in itertools.product(range(self.ph_deconv_layer), range(self.N_pattern)):
if self.N_pattern == 1:
Source_current = self.Source.copy()
else:
Source_current = self.Source[j].copy()
idx = i*self.N_pattern+j
self.Hu[:,:,idx], self.Hp[:,:,idx] = WOTF_semi_3D_compute(Source_current, Source_current, self.Pupil_obj, self.Hz_det_semi_3D[:,:,i], \
self.G_fun_z_semi_3D[:,:,i]*4*np.pi*1j/self.lambda_illu, \
use_gpu=self.use_gpu, gpu_id=self.gpu_id)
def gen_3D_WOTF(self):
'''
generate 3D phase transfer functions
'''
self.H_re = np.zeros((self.N_pattern, self.N, self.M, self.N_defocus_3D),dtype='complex64')
self.H_im = np.zeros((self.N_pattern, self.N, self.M, self.N_defocus_3D),dtype='complex64')
for i in range(self.N_pattern):
if self.N_pattern == 1:
Source_current = self.Source.copy()
else:
Source_current = self.Source[i].copy()
self.H_re[i], self.H_im[i] = WOTF_3D_compute(Source_current.astype('float32'), Source_current.astype('float32'), self.Pupil_obj.astype('complex64'), \
self.Hz_det_3D.astype('complex64'), self.G_fun_z_3D.astype('complex64'), self.psz,\
use_gpu=self.use_gpu, gpu_id=self.gpu_id)
self.H_re = np.squeeze(self.H_re)
self.H_im = np.squeeze(self.H_im)
def gen_2D_vec_WOTF(self, inc_option):
'''
generate 2D vectorial transfer functions for 2D QUTIPP
'''
if inc_option == True:
self.H_dyadic_2D_OTF = np.zeros((self.N_Stokes, 7, self.N, self.M, self.N_defocus*self.N_pattern),dtype='complex64')
else:
self.H_dyadic_2D_OTF_in_plane = np.zeros((2, 2, self.N, self.M, self.N_defocus*self.N_pattern),dtype='complex64')
# angle-dependent electric field components due to focusing effect
fr = (self.fxx**2 + self.fyy**2)**(0.5)
cos_factor = (1-(self.lambda_illu**2)*(fr**2)*self.Pupil_support)**(0.5)*self.Pupil_support
dc_idx = (fr==0)
nondc_idx = (fr!=0)
E_field_factor = np.zeros((5, self.N, self.M))
E_field_factor[0, nondc_idx] = ((self.fxx[nondc_idx]**2)*cos_factor[nondc_idx]+ self.fyy[nondc_idx]**2) / fr[nondc_idx]**2
E_field_factor[0, dc_idx] = 1
E_field_factor[1, nondc_idx] = (self.fxx[nondc_idx]*self.fyy[nondc_idx] * (cos_factor[nondc_idx]-1)) / fr[nondc_idx]**2
E_field_factor[2, nondc_idx] = ((self.fyy[nondc_idx]**2)*cos_factor[nondc_idx] + self.fxx[nondc_idx]**2) / fr[nondc_idx]**2
E_field_factor[2, dc_idx] = 1
E_field_factor[3, nondc_idx] = -self.lambda_illu*self.fxx[nondc_idx]
E_field_factor[4, nondc_idx] = -self.lambda_illu*self.fyy[nondc_idx]
# generate dyadic Green's tensor
G_fun_z = gen_Greens_function_z(self.fxx, self.fyy, self.Pupil_support, self.lambda_illu, self.z_defocus)
G_tensor_z = gen_dyadic_Greens_tensor_z(self.fxx, self.fyy, G_fun_z, self.Pupil_support, self.lambda_illu)
# compute transfer functions
OTF_compute = lambda x, y, z, w: WOTF_semi_3D_compute(x, y, self.Pupil_obj, w, \
z, use_gpu=self.use_gpu, gpu_id=self.gpu_id)
for i,j in itertools.product(range(self.N_defocus), range(self.N_pattern)):
if self.N_pattern == 1:
Source_current = self.Source.copy()
else:
Source_current = self.Source[j].copy()
idx = i*self.N_pattern+j
# focusing electric field components
Ex_field = self.Source_PolState[j,0]*E_field_factor[0] + self.Source_PolState[j,1]*E_field_factor[1]
Ey_field = self.Source_PolState[j,0]*E_field_factor[1] + self.Source_PolState[j,1]*E_field_factor[2]
Ez_field = self.Source_PolState[j,0]*E_field_factor[3] + self.Source_PolState[j,1]*E_field_factor[4]
IF_ExEx = np.abs(Ex_field)**2
IF_ExEy = Ex_field * np.conj(Ey_field)
IF_ExEz = Ex_field * np.conj(Ez_field)
IF_EyEy = np.abs(Ey_field)**2
IF_EyEz = Ey_field * np.conj(Ez_field)
Source_norm = Source_current*(IF_ExEx + IF_EyEy)
# intermediate transfer functions
ExEx_Gxx_re, ExEx_Gxx_im = OTF_compute(Source_norm, Source_current*IF_ExEx, G_tensor_z[0,0,:,:,i], self.Hz_det_2D[:,:,i]) #
ExEy_Gxy_re, ExEy_Gxy_im = OTF_compute(Source_norm, Source_current*IF_ExEy, G_tensor_z[0,1,:,:,i], self.Hz_det_2D[:,:,i]) #
EyEx_Gyx_re, EyEx_Gyx_im = OTF_compute(Source_norm, Source_current*IF_ExEy.conj(), G_tensor_z[0,1,:,:,i], self.Hz_det_2D[:,:,i]) #
EyEy_Gyy_re, EyEy_Gyy_im = OTF_compute(Source_norm, Source_current*IF_EyEy, G_tensor_z[1,1,:,:,i], self.Hz_det_2D[:,:,i]) #
ExEx_Gxy_re, ExEx_Gxy_im = OTF_compute(Source_norm, Source_current*IF_ExEx, G_tensor_z[0,1,:,:,i], self.Hz_det_2D[:,:,i]) #
ExEy_Gxx_re, ExEy_Gxx_im = OTF_compute(Source_norm, Source_current*IF_ExEy, G_tensor_z[0,0,:,:,i], self.Hz_det_2D[:,:,i]) #
EyEx_Gyy_re, EyEx_Gyy_im = OTF_compute(Source_norm, Source_current*IF_ExEy.conj(), G_tensor_z[1,1,:,:,i], self.Hz_det_2D[:,:,i]) #
EyEy_Gyx_re, EyEy_Gyx_im = OTF_compute(Source_norm, Source_current*IF_EyEy, G_tensor_z[0,1,:,:,i], self.Hz_det_2D[:,:,i]) #
ExEx_Gyy_re, ExEx_Gyy_im = OTF_compute(Source_norm, Source_current*IF_ExEx, G_tensor_z[1,1,:,:,i], self.Hz_det_2D[:,:,i]) #
EyEy_Gxx_re, EyEy_Gxx_im = OTF_compute(Source_norm, Source_current*IF_EyEy, G_tensor_z[0,0,:,:,i], self.Hz_det_2D[:,:,i]) #
EyEx_Gxx_re, EyEx_Gxx_im = OTF_compute(Source_norm, Source_current*IF_ExEy.conj(), G_tensor_z[0,0,:,:,i], self.Hz_det_2D[:,:,i]) #
ExEy_Gyy_re, ExEy_Gyy_im = OTF_compute(Source_norm, Source_current*IF_ExEy, G_tensor_z[1,1,:,:,i], self.Hz_det_2D[:,:,i]) #
if inc_option == True:
ExEz_Gxz_re, ExEz_Gxz_im = OTF_compute(Source_norm, Source_current*IF_ExEz, G_tensor_z[0,2,:,:,i], self.Hz_det_2D[:,:,i])
EyEz_Gyz_re, EyEz_Gyz_im = OTF_compute(Source_norm, Source_current*IF_EyEz, G_tensor_z[1,2,:,:,i], self.Hz_det_2D[:,:,i])
ExEx_Gxz_re, ExEx_Gxz_im = OTF_compute(Source_norm, Source_current*IF_ExEx, G_tensor_z[0,2,:,:,i], self.Hz_det_2D[:,:,i])
ExEz_Gxx_re, ExEz_Gxx_im = OTF_compute(Source_norm, Source_current*IF_ExEz, G_tensor_z[0,0,:,:,i], self.Hz_det_2D[:,:,i])
EyEx_Gyz_re, EyEx_Gyz_im = OTF_compute(Source_norm, Source_current*IF_ExEy.conj(), G_tensor_z[1,2,:,:,i], self.Hz_det_2D[:,:,i])
EyEz_Gyx_re, EyEz_Gyx_im = OTF_compute(Source_norm, Source_current*IF_EyEz, G_tensor_z[0,1,:,:,i], self.Hz_det_2D[:,:,i])
ExEy_Gxz_re, ExEy_Gxz_im = OTF_compute(Source_norm, Source_current*IF_ExEy, G_tensor_z[0,2,:,:,i], self.Hz_det_2D[:,:,i])
ExEz_Gxy_re, ExEz_Gxy_im = OTF_compute(Source_norm, Source_current*IF_ExEz, G_tensor_z[0,1,:,:,i], self.Hz_det_2D[:,:,i])
EyEy_Gyz_re, EyEy_Gyz_im = OTF_compute(Source_norm, Source_current*IF_EyEy, G_tensor_z[1,2,:,:,i], self.Hz_det_2D[:,:,i])
EyEz_Gyy_re, EyEz_Gyy_im = OTF_compute(Source_norm, Source_current*IF_EyEz, G_tensor_z[1,1,:,:,i], self.Hz_det_2D[:,:,i])
ExEz_Gyz_re, ExEz_Gyz_im = OTF_compute(Source_norm, Source_current*IF_ExEz, G_tensor_z[1,2,:,:,i], self.Hz_det_2D[:,:,i])
EyEz_Gxz_re, EyEz_Gxz_im = OTF_compute(Source_norm, Source_current*IF_EyEz, G_tensor_z[0,2,:,:,i], self.Hz_det_2D[:,:,i])
EyEx_Gxz_re, EyEx_Gxz_im = OTF_compute(Source_norm, Source_current*IF_ExEy.conj(), G_tensor_z[0,2,:,:,i], self.Hz_det_2D[:,:,i])
EyEz_Gxx_re, EyEz_Gxx_im = OTF_compute(Source_norm, Source_current*IF_EyEz, G_tensor_z[0,0,:,:,i], self.Hz_det_2D[:,:,i])
ExEy_Gyz_re, ExEy_Gyz_im = OTF_compute(Source_norm, Source_current*IF_ExEy, G_tensor_z[1,2,:,:,i], self.Hz_det_2D[:,:,i])
ExEz_Gyy_re, ExEz_Gyy_im = OTF_compute(Source_norm, Source_current*IF_ExEz, G_tensor_z[1,1,:,:,i], self.Hz_det_2D[:,:,i])
EyEy_Gxz_re, EyEy_Gxz_im = OTF_compute(Source_norm, Source_current*IF_EyEy, G_tensor_z[0,2,:,:,i], self.Hz_det_2D[:,:,i])
ExEx_Gyz_re, ExEx_Gyz_im = OTF_compute(Source_norm, Source_current*IF_ExEx, G_tensor_z[1,2,:,:,i], self.Hz_det_2D[:,:,i])
# 2D vectorial transfer functions
self.H_dyadic_2D_OTF[0,0,:,:,idx] = ExEx_Gxx_re + ExEy_Gxy_re + ExEz_Gxz_re + EyEx_Gyx_re + EyEy_Gyy_re + EyEz_Gyz_re
self.H_dyadic_2D_OTF[0,1,:,:,idx] = ExEx_Gxx_im + ExEy_Gxy_im + ExEz_Gxz_im + EyEx_Gyx_im + EyEy_Gyy_im + EyEz_Gyz_im
self.H_dyadic_2D_OTF[0,2,:,:,idx] = ExEx_Gxx_re - ExEy_Gxy_re + EyEx_Gyx_re - EyEy_Gyy_re
self.H_dyadic_2D_OTF[0,3,:,:,idx] = ExEx_Gxy_re + ExEy_Gxx_re + EyEx_Gyy_re + EyEy_Gyx_re
self.H_dyadic_2D_OTF[0,4,:,:,idx] = ExEx_Gxz_re + ExEz_Gxx_re + EyEx_Gyz_re + EyEz_Gyx_re
self.H_dyadic_2D_OTF[0,5,:,:,idx] = ExEy_Gxz_re + ExEz_Gxy_re + EyEy_Gyz_re + EyEz_Gyy_re
self.H_dyadic_2D_OTF[0,6,:,:,idx] = ExEz_Gxz_re + EyEz_Gyz_re
self.H_dyadic_2D_OTF[1,0,:,:,idx] = ExEx_Gxx_re + ExEy_Gxy_re + ExEz_Gxz_re - EyEx_Gyx_re - EyEy_Gyy_re - EyEz_Gyz_re
self.H_dyadic_2D_OTF[1,1,:,:,idx] = ExEx_Gxx_im + ExEy_Gxy_im + ExEz_Gxz_im - EyEx_Gyx_im - EyEy_Gyy_im - EyEz_Gyz_im
self.H_dyadic_2D_OTF[1,2,:,:,idx] = ExEx_Gxx_re - ExEy_Gxy_re - EyEx_Gyx_re + EyEy_Gyy_re
self.H_dyadic_2D_OTF[1,3,:,:,idx] = ExEx_Gxy_re + ExEy_Gxx_re - EyEx_Gyy_re - EyEy_Gyx_re
self.H_dyadic_2D_OTF[1,4,:,:,idx] = ExEx_Gxz_re + ExEz_Gxx_re - EyEx_Gyz_re - EyEz_Gyx_re
self.H_dyadic_2D_OTF[1,5,:,:,idx] = ExEy_Gxz_re + ExEz_Gxy_re - EyEy_Gyz_re - EyEz_Gyy_re
self.H_dyadic_2D_OTF[1,6,:,:,idx] = ExEz_Gxz_re - EyEz_Gyz_re
self.H_dyadic_2D_OTF[2,0,:,:,idx] = ExEx_Gxy_re + ExEy_Gyy_re + ExEz_Gyz_re + EyEx_Gxx_re + EyEy_Gyx_re + EyEz_Gxz_re
self.H_dyadic_2D_OTF[2,1,:,:,idx] = ExEx_Gxy_im + ExEy_Gyy_im + ExEz_Gyz_im + EyEx_Gxx_im + EyEy_Gyx_im + EyEz_Gxz_im
self.H_dyadic_2D_OTF[2,2,:,:,idx] = ExEx_Gxy_re - ExEy_Gyy_re + EyEx_Gxx_re - EyEy_Gyx_re
self.H_dyadic_2D_OTF[2,3,:,:,idx] = ExEx_Gyy_re + ExEy_Gxy_re + EyEx_Gyx_re + EyEy_Gxx_re
self.H_dyadic_2D_OTF[2,4,:,:,idx] = ExEx_Gyz_re + ExEz_Gxy_re + EyEx_Gxz_re + EyEz_Gxx_re
self.H_dyadic_2D_OTF[2,5,:,:,idx] = ExEy_Gyz_re + ExEz_Gyy_re + EyEy_Gxz_re + EyEz_Gyx_re
self.H_dyadic_2D_OTF[2,6,:,:,idx] = ExEz_Gyz_re + EyEz_Gxz_re
# transfer functions for S3
if self.N_Stokes == 4:
self.H_dyadic_2D_OTF[3,0,:,:,idx] = -ExEx_Gxy_im - ExEy_Gyy_im - ExEz_Gyz_im + EyEx_Gxx_im + EyEy_Gyx_im + EyEz_Gxz_im
self.H_dyadic_2D_OTF[3,1,:,:,idx] = ExEx_Gxy_re + ExEy_Gyy_re + ExEz_Gyz_re - EyEx_Gxx_re - EyEy_Gyx_re - EyEz_Gxz_re
self.H_dyadic_2D_OTF[3,2,:,:,idx] = -ExEx_Gxy_im + ExEy_Gyy_im + EyEx_Gxx_im - EyEy_Gyx_im
self.H_dyadic_2D_OTF[3,3,:,:,idx] = -ExEx_Gyy_im - ExEy_Gxy_im + EyEx_Gyx_im + EyEy_Gxx_im
self.H_dyadic_2D_OTF[3,4,:,:,idx] = -ExEx_Gyz_im - ExEz_Gxy_im + EyEx_Gxz_im + EyEz_Gxx_im
self.H_dyadic_2D_OTF[3,5,:,:,idx] = -ExEy_Gyz_im - ExEz_Gyy_im + EyEy_Gxz_im + EyEz_Gyx_im
self.H_dyadic_2D_OTF[3,6,:,:,idx] = -ExEz_Gyz_im + EyEz_Gxz_im
else:
self.H_dyadic_2D_OTF_in_plane[0,0,:,:,idx] = ExEx_Gxx_re - ExEy_Gxy_re - EyEx_Gyx_re + EyEy_Gyy_re
self.H_dyadic_2D_OTF_in_plane[0,1,:,:,idx] = ExEx_Gxy_re + ExEy_Gxx_re - EyEx_Gyy_re - EyEy_Gyx_re
self.H_dyadic_2D_OTF_in_plane[1,0,:,:,idx] = ExEx_Gxy_re - ExEy_Gyy_re + EyEx_Gxx_re - EyEy_Gyx_re
self.H_dyadic_2D_OTF_in_plane[1,1,:,:,idx] = ExEx_Gyy_re + ExEy_Gxy_re + EyEx_Gyx_re + EyEy_Gxx_re
def gen_3D_vec_WOTF(self, inc_option):
'''
generate 3D vectorial transfer functions for 3D QUTIPP
'''
if inc_option == True:
self.H_dyadic_OTF = np.zeros((self.N_Stokes, 7, self.N_pattern, self.N, self.M, self.N_defocus_3D),dtype='complex64')
else:
self.H_dyadic_OTF_in_plane = np.zeros((2, 2, self.N_pattern, self.N, self.M, self.N_defocus_3D),dtype='complex64')
# angle-dependent electric field components due to focusing effect
fr = (self.fxx**2 + self.fyy**2)**(0.5)
cos_factor = (1-(self.lambda_illu**2)*(fr**2)*self.Pupil_support)**(0.5)*self.Pupil_support
dc_idx = (fr==0)
nondc_idx = (fr!=0)
E_field_factor = np.zeros((5, self.N, self.M))
E_field_factor[0, nondc_idx] = ((self.fxx[nondc_idx]**2)*cos_factor[nondc_idx]+ self.fyy[nondc_idx]**2) / fr[nondc_idx]**2
E_field_factor[0, dc_idx] = 1
E_field_factor[1, nondc_idx] = (self.fxx[nondc_idx]*self.fyy[nondc_idx] * (cos_factor[nondc_idx]-1)) / fr[nondc_idx]**2
E_field_factor[2, nondc_idx] = ((self.fyy[nondc_idx]**2)*cos_factor[nondc_idx] + self.fxx[nondc_idx]**2) / fr[nondc_idx]**2
E_field_factor[2, dc_idx] = 1
E_field_factor[3, nondc_idx] = -self.lambda_illu*self.fxx[nondc_idx]
E_field_factor[4, nondc_idx] = -self.lambda_illu*self.fyy[nondc_idx]
# generate dyadic Green's tensor
N_defocus = self.G_tensor_z_upsampling*self.N_defocus_3D
psz = self.psz/self.G_tensor_z_upsampling
if self.z_defocus[0] - self.z_defocus[1] >0:
z = -ifftshift((np.r_[0:N_defocus]-N_defocus//2)*psz)
else:
z = ifftshift((np.r_[0:N_defocus]-N_defocus//2)*psz)
G_fun_z = gen_Greens_function_z(self.fxx, self.fyy, self.Pupil_support, self.lambda_illu, z)
G_real = fftshift(ifft2(G_fun_z, axes=(0,1))/self.ps**2)
G_tensor = gen_dyadic_Greens_tensor(G_real, self.ps, psz, self.lambda_illu, space='Fourier')
G_tensor_z = (ifft(G_tensor, axis=4)/psz)[...,::np.int(self.G_tensor_z_upsampling)]
# compute transfer functions
OTF_compute = lambda x, y, z: WOTF_3D_compute(x.astype('float32'), y.astype('complex64'),
self.Pupil_obj.astype('complex64'), self.Hz_det_3D.astype('complex64'), \
z.astype('complex64'), self.psz,\
use_gpu=self.use_gpu, gpu_id=self.gpu_id)
for i in range(self.N_pattern):
if self.N_pattern == 1:
Source_current = self.Source.copy()
else:
Source_current = self.Source[i].copy()
# focusing electric field components
Ex_field = self.Source_PolState[i,0]*E_field_factor[0] + self.Source_PolState[i,1]*E_field_factor[1]
Ey_field = self.Source_PolState[i,0]*E_field_factor[1] + self.Source_PolState[i,1]*E_field_factor[2]
Ez_field = self.Source_PolState[i,0]*E_field_factor[3] + self.Source_PolState[i,1]*E_field_factor[4]
IF_ExEx = np.abs(Ex_field)**2
IF_ExEy = Ex_field * np.conj(Ey_field)
IF_ExEz = Ex_field * np.conj(Ez_field)
IF_EyEy = np.abs(Ey_field)**2
IF_EyEz = Ey_field * np.conj(Ez_field)
Source_norm = Source_current*(IF_ExEx + IF_EyEy)
# intermediate transfer functions
ExEx_Gxx_re, ExEx_Gxx_im = OTF_compute(Source_norm, Source_current*IF_ExEx, G_tensor_z[0,0]) #
ExEy_Gxy_re, ExEy_Gxy_im = OTF_compute(Source_norm, Source_current*IF_ExEy, G_tensor_z[0,1]) #
EyEx_Gyx_re, EyEx_Gyx_im = OTF_compute(Source_norm, Source_current*IF_ExEy.conj(), G_tensor_z[0,1]) #
EyEy_Gyy_re, EyEy_Gyy_im = OTF_compute(Source_norm, Source_current*IF_EyEy, G_tensor_z[1,1]) #
ExEx_Gxy_re, ExEx_Gxy_im = OTF_compute(Source_norm, Source_current*IF_ExEx, G_tensor_z[0,1]) #
ExEy_Gxx_re, ExEy_Gxx_im = OTF_compute(Source_norm, Source_current*IF_ExEy, G_tensor_z[0,0]) #
EyEx_Gyy_re, EyEx_Gyy_im = OTF_compute(Source_norm, Source_current*IF_ExEy.conj(), G_tensor_z[1,1]) #
EyEy_Gyx_re, EyEy_Gyx_im = OTF_compute(Source_norm, Source_current*IF_EyEy, G_tensor_z[0,1]) #
ExEy_Gyy_re, ExEy_Gyy_im = OTF_compute(Source_norm, Source_current*IF_ExEy, G_tensor_z[1,1]) #
EyEx_Gxx_re, EyEx_Gxx_im = OTF_compute(Source_norm, Source_current*IF_ExEy.conj(), G_tensor_z[0,0]) #
ExEx_Gyy_re, ExEx_Gyy_im = OTF_compute(Source_norm, Source_current*IF_ExEx, G_tensor_z[1,1]) #
EyEy_Gxx_re, EyEy_Gxx_im = OTF_compute(Source_norm, Source_current*IF_EyEy, G_tensor_z[0,0]) #
if inc_option == True:
ExEz_Gxz_re, ExEz_Gxz_im = OTF_compute(Source_norm, Source_current*IF_ExEz, G_tensor_z[0,2])
EyEz_Gyz_re, EyEz_Gyz_im = OTF_compute(Source_norm, Source_current*IF_EyEz, G_tensor_z[1,2])
ExEx_Gxz_re, ExEx_Gxz_im = OTF_compute(Source_norm, Source_current*IF_ExEx, G_tensor_z[0,2])
ExEz_Gxx_re, ExEz_Gxx_im = OTF_compute(Source_norm, Source_current*IF_ExEz, G_tensor_z[0,0])
EyEx_Gyz_re, EyEx_Gyz_im = OTF_compute(Source_norm, Source_current*IF_ExEy.conj(), G_tensor_z[1,2])
EyEz_Gyx_re, EyEz_Gyx_im = OTF_compute(Source_norm, Source_current*IF_EyEz, G_tensor_z[0,1])
ExEy_Gxz_re, ExEy_Gxz_im = OTF_compute(Source_norm, Source_current*IF_ExEy, G_tensor_z[0,2])
ExEz_Gxy_re, ExEz_Gxy_im = OTF_compute(Source_norm, Source_current*IF_ExEz, G_tensor_z[0,1])
EyEy_Gyz_re, EyEy_Gyz_im = OTF_compute(Source_norm, Source_current*IF_EyEy, G_tensor_z[1,2])
EyEz_Gyy_re, EyEz_Gyy_im = OTF_compute(Source_norm, Source_current*IF_EyEz, G_tensor_z[1,1])
ExEz_Gyz_re, ExEz_Gyz_im = OTF_compute(Source_norm, Source_current*IF_ExEz, G_tensor_z[1,2])
EyEz_Gxz_re, EyEz_Gxz_im = OTF_compute(Source_norm, Source_current*IF_EyEz, G_tensor_z[0,2])
EyEx_Gxz_re, EyEx_Gxz_im = OTF_compute(Source_norm, Source_current*IF_ExEy.conj(), G_tensor_z[0,2])
EyEz_Gxx_re, EyEz_Gxx_im = OTF_compute(Source_norm, Source_current*IF_EyEz, G_tensor_z[0,0])
ExEy_Gyz_re, ExEy_Gyz_im = OTF_compute(Source_norm, Source_current*IF_ExEy, G_tensor_z[1,2])
ExEz_Gyy_re, ExEz_Gyy_im = OTF_compute(Source_norm, Source_current*IF_ExEz, G_tensor_z[1,1])
EyEy_Gxz_re, EyEy_Gxz_im = OTF_compute(Source_norm, Source_current*IF_EyEy, G_tensor_z[0,2])
ExEx_Gyz_re, ExEx_Gyz_im = OTF_compute(Source_norm, Source_current*IF_ExEx, G_tensor_z[1,2])
# 3D vectorial transfer functions
self.H_dyadic_OTF[0,0,i] = ExEx_Gxx_re + ExEy_Gxy_re + ExEz_Gxz_re + EyEx_Gyx_re + EyEy_Gyy_re + EyEz_Gyz_re
self.H_dyadic_OTF[0,1,i] = ExEx_Gxx_im + ExEy_Gxy_im + ExEz_Gxz_im + EyEx_Gyx_im + EyEy_Gyy_im + EyEz_Gyz_im
self.H_dyadic_OTF[0,2,i] = ExEx_Gxx_re - ExEy_Gxy_re + EyEx_Gyx_re - EyEy_Gyy_re
self.H_dyadic_OTF[0,3,i] = ExEx_Gxy_re + ExEy_Gxx_re + EyEx_Gyy_re + EyEy_Gyx_re
self.H_dyadic_OTF[0,4,i] = ExEx_Gxz_re + ExEz_Gxx_re + EyEx_Gyz_re + EyEz_Gyx_re
self.H_dyadic_OTF[0,5,i] = ExEy_Gxz_re + ExEz_Gxy_re + EyEy_Gyz_re + EyEz_Gyy_re
self.H_dyadic_OTF[0,6,i] = ExEz_Gxz_re + EyEz_Gyz_re
self.H_dyadic_OTF[1,0,i] = ExEx_Gxx_re + ExEy_Gxy_re + ExEz_Gxz_re - EyEx_Gyx_re - EyEy_Gyy_re - EyEz_Gyz_re
self.H_dyadic_OTF[1,1,i] = ExEx_Gxx_im + ExEy_Gxy_im + ExEz_Gxz_im - EyEx_Gyx_im - EyEy_Gyy_im - EyEz_Gyz_im
self.H_dyadic_OTF[1,2,i] = ExEx_Gxx_re - ExEy_Gxy_re - EyEx_Gyx_re + EyEy_Gyy_re
self.H_dyadic_OTF[1,3,i] = ExEx_Gxy_re + ExEy_Gxx_re - EyEx_Gyy_re - EyEy_Gyx_re
self.H_dyadic_OTF[1,4,i] = ExEx_Gxz_re + ExEz_Gxx_re - EyEx_Gyz_re - EyEz_Gyx_re
self.H_dyadic_OTF[1,5,i] = ExEy_Gxz_re + ExEz_Gxy_re - EyEy_Gyz_re - EyEz_Gyy_re
self.H_dyadic_OTF[1,6,i] = ExEz_Gxz_re - EyEz_Gyz_re
self.H_dyadic_OTF[2,0,i] = ExEx_Gxy_re + ExEy_Gyy_re + ExEz_Gyz_re + EyEx_Gxx_re + EyEy_Gyx_re + EyEz_Gxz_re
self.H_dyadic_OTF[2,1,i] = ExEx_Gxy_im + ExEy_Gyy_im + ExEz_Gyz_im + EyEx_Gxx_im + EyEy_Gyx_im + EyEz_Gxz_im
self.H_dyadic_OTF[2,2,i] = ExEx_Gxy_re - ExEy_Gyy_re + EyEx_Gxx_re - EyEy_Gyx_re
self.H_dyadic_OTF[2,3,i] = ExEx_Gyy_re + ExEy_Gxy_re + EyEx_Gyx_re + EyEy_Gxx_re
self.H_dyadic_OTF[2,4,i] = ExEx_Gyz_re + ExEz_Gxy_re + EyEx_Gxz_re + EyEz_Gxx_re
self.H_dyadic_OTF[2,5,i] = ExEy_Gyz_re + ExEz_Gyy_re + EyEy_Gxz_re + EyEz_Gyx_re
self.H_dyadic_OTF[2,6,i] = ExEz_Gyz_re + EyEz_Gxz_re
# transfer functions for S3
if self.N_Stokes == 4:
self.H_dyadic_OTF[3,0,i] = -ExEx_Gxy_im - ExEy_Gyy_im - ExEz_Gyz_im + EyEx_Gxx_im + EyEy_Gyx_im + EyEz_Gxz_im
self.H_dyadic_OTF[3,1,i] = ExEx_Gxy_re + ExEy_Gyy_re + ExEz_Gyz_re - EyEx_Gxx_re - EyEy_Gyx_re - EyEz_Gxz_re
self.H_dyadic_OTF[3,2,i] = -ExEx_Gxy_im + ExEy_Gyy_im + EyEx_Gxx_im - EyEy_Gyx_im
self.H_dyadic_OTF[3,3,i] = -ExEx_Gyy_im - ExEy_Gxy_im + EyEx_Gyx_im + EyEy_Gxx_im
self.H_dyadic_OTF[3,4,i] = -ExEx_Gyz_im - ExEz_Gxy_im + EyEx_Gxz_im + EyEz_Gxx_im
self.H_dyadic_OTF[3,5,i] = -ExEy_Gyz_im - ExEz_Gyy_im + EyEy_Gxz_im + EyEz_Gyx_im
self.H_dyadic_OTF[3,6,i] = -ExEz_Gyz_im + EyEz_Gxz_im
else:
self.H_dyadic_OTF_in_plane[0,0,i] = ExEx_Gxx_re - ExEy_Gxy_re - EyEx_Gyx_re + EyEy_Gyy_re
self.H_dyadic_OTF_in_plane[0,1,i] = ExEx_Gxy_re + ExEy_Gxx_re - EyEx_Gyy_re - EyEy_Gyx_re
self.H_dyadic_OTF_in_plane[1,0,i] = ExEx_Gxy_re - ExEy_Gyy_re + EyEx_Gxx_re - EyEy_Gyx_re
self.H_dyadic_OTF_in_plane[1,1,i] = ExEx_Gyy_re + ExEy_Gxy_re + EyEx_Gyx_re + EyEy_Gxx_re
############## polarization computing function group ##############
def Stokes_recon(self, I_meas):
'''
reconstruct Stokes parameters from polarization-sensitive intensity images
Parameters
----------
I_meas : numpy.ndarray
polarization-sensitive intensity images with the size of (N_channel, ...)
Returns
-------
S_image_recon : numpy.ndarray
reconstructed Stokes parameters with the size of (N_Stokes, ...)
'''
img_shape = I_meas.shape
A_pinv = np.linalg.pinv(self.A_matrix)
S_image_recon = np.reshape(np.dot(A_pinv, I_meas.reshape((self.N_channel, -1))), (self.N_Stokes,)+img_shape[1:])
return S_image_recon
def Stokes_transform(self, S_image_recon):
'''
transform Stokes parameters into normalized Stokes parameters
Parameters
----------
S_image_recon : numpy.ndarray
reconstructed Stokes parameters with the size of (N_Stokes, ...)
Returns
-------
S_transformed : numpy.ndarray
normalized Stokes parameters with the size of (3, ...) or (5, ...)
'''
if self.use_gpu:
S_image_recon = cp.array(S_image_recon)
if self.N_Stokes == 4:
S_transformed = cp.zeros((5,)+S_image_recon.shape[1:])
elif self.N_Stokes == 3:
S_transformed = cp.zeros((3,)+S_image_recon.shape[1:])
else:
if self.N_Stokes == 4:
S_transformed = np.zeros((5,)+S_image_recon.shape[1:])
elif self.N_Stokes == 3:
S_transformed = np.zeros((3,)+S_image_recon.shape[1:])
S_transformed[0] = S_image_recon[0]
if self.N_Stokes == 4:
S_transformed[1] = S_image_recon[1] / S_image_recon[3]
S_transformed[2] = S_image_recon[2] / S_image_recon[3]
S_transformed[3] = S_image_recon[3]
S_transformed[4] = (S_image_recon[1]**2 + S_image_recon[2]**2 + S_image_recon[3]**2)**(1/2) / S_image_recon[0] # DoP
elif self.N_Stokes == 3:
S_transformed[1] = S_image_recon[1] / S_image_recon[0]
S_transformed[2] = S_image_recon[2] / S_image_recon[0]
if self.use_gpu:
S_transformed = cp.asnumpy(S_transformed)
return S_transformed
def Polscope_bg_correction(self, S_image_tm, S_bg_tm, kernel_size=400, poly_order=2):
'''
QLIPP background correction algorithm
Parameters
----------
S_image_tm : numpy.ndarray
normalized Stokes parameters with the size of (3, ...) or (5, ...)
S_bg_tm : numpy.ndarray
normalized background Stokes parameters
kernel_size : int
size of smoothing window for background estimation in 'local' method
poly_order : int
order of polynomial fitting for background estimation in 'local_fit' method
Returns
-------
S_image_tm : numpy.ndarray
background corrected normalized Stokes parameters with the same size as the input Stokes parameters
'''
if self.use_gpu:
S_image_tm = cp.array(S_image_tm)
S_bg_tm = cp.array(S_bg_tm)
dim = S_image_tm.ndim
if dim == 3:
S_image_tm[0] /= S_bg_tm[0]
S_image_tm[1] -= S_bg_tm[1]
S_image_tm[2] -= S_bg_tm[2]
if self.N_Stokes == 4:
S_image_tm[4] /= S_bg_tm[4]
else:
S_image_tm[0] /= S_bg_tm[0,:,:,np.newaxis]
S_image_tm[1] -= S_bg_tm[1,:,:,np.newaxis]
S_image_tm[2] -= S_bg_tm[2,:,:,np.newaxis]
if self.N_Stokes == 4:
S_image_tm[4] /= S_bg_tm[4,:,:,np.newaxis]
if self.bg_option == 'local':
if dim == 3:
S_image_tm[1] -= uniform_filter_2D(S_image_tm[1], size=kernel_size, use_gpu=self.use_gpu, gpu_id=self.gpu_id)
S_image_tm[2] -= uniform_filter_2D(S_image_tm[2], size=kernel_size, use_gpu=self.use_gpu, gpu_id=self.gpu_id)
else:
if self.use_gpu:
S1_bg = uniform_filter_2D(cp.mean(S_image_tm[1],axis=-1), size=kernel_size, use_gpu=self.use_gpu, gpu_id=self.gpu_id)
S2_bg = uniform_filter_2D(cp.mean(S_image_tm[2],axis=-1), size=kernel_size, use_gpu=self.use_gpu, gpu_id=self.gpu_id)
else:
S1_bg = uniform_filter_2D(np.mean(S_image_tm[1],axis=-1), size=kernel_size, use_gpu=self.use_gpu, gpu_id=self.gpu_id)
S2_bg = uniform_filter_2D(np.mean(S_image_tm[2],axis=-1), size=kernel_size, use_gpu=self.use_gpu, gpu_id=self.gpu_id)
for i in range(self.N_defocus):
S_image_tm[1,:,:,i] -= S1_bg
S_image_tm[2,:,:,i] -= S2_bg
elif self.bg_option == 'local_fit':
if self.use_gpu:
bg_estimator = BackgroundEstimator2D_GPU(gpu_id=self.gpu_id)
if dim != 3:
S1_bg = bg_estimator.get_background(cp.mean(S_image_tm[1],axis=-1), order=poly_order, normalize=False)
S2_bg = bg_estimator.get_background(cp.mean(S_image_tm[2],axis=-1), order=poly_order, normalize=False)
else:
bg_estimator = BackgroundEstimator2D()
if dim != 3:
S1_bg = bg_estimator.get_background(np.mean(S_image_tm[1],axis=-1), order=poly_order, normalize=False)
S2_bg = bg_estimator.get_background(np.mean(S_image_tm[2],axis=-1), order=poly_order, normalize=False)
if dim ==3:
S_image_tm[1] -= bg_estimator.get_background(S_image_tm[1], order=poly_order, normalize=False)
S_image_tm[2] -= bg_estimator.get_background(S_image_tm[2], order=poly_order, normalize=False)
else:
for i in range(self.N_defocus):
S_image_tm[1,:,:,i] -= S1_bg
S_image_tm[2,:,:,i] -= S2_bg
if self.use_gpu:
S_image_tm = cp.asnumpy(S_image_tm)
return S_image_tm
def Polarization_recon(self, S_image_recon):
'''
reconstruction of polarization-related physical properties in QLIPP
Parameters
----------
S_image_recon : numpy.ndarray
normalized Stokes parameters with the size of (3, ...) or (5, ...)
Returns
-------
Recon_para : numpy.ndarray
reconstructed polarization-related physical properties
channel 0 is retardance
channel 1 is in-plane orientation
channel 2 is brightfield
channel 3 is degree of polarization
'''
if self.use_gpu:
S_image_recon = cp.array(S_image_recon)
Recon_para = cp.zeros((self.N_Stokes,)+S_image_recon.shape[1:])
else:
Recon_para = np.zeros((self.N_Stokes,)+S_image_recon.shape[1:])
if self.use_gpu:
if self.N_Stokes == 4:
ret_wrapped = cp.arctan2((S_image_recon[1]**2 + S_image_recon[2]**2)**(1/2) * \
S_image_recon[3], S_image_recon[3]) # retardance
elif self.N_Stokes == 3:
ret_wrapped = cp.arcsin(cp.minimum((S_image_recon[1]**2 + S_image_recon[2]**2)**(0.5),1))
if self.cali == True:
sa_wrapped = 0.5*cp.arctan2(-S_image_recon[1], -S_image_recon[2]) % np.pi # slow-axis
else:
sa_wrapped = 0.5*cp.arctan2(-S_image_recon[1], S_image_recon[2]) % np.pi # slow-axis
else:
if self.N_Stokes == 4:
ret_wrapped = np.arctan2((S_image_recon[1]**2 + S_image_recon[2]**2)**(1/2) * \
S_image_recon[3], S_image_recon[3]) # retardance
elif self.N_Stokes == 3:
ret_wrapped = np.arcsin(np.minimum((S_image_recon[1]**2 + S_image_recon[2]**2)**(0.5),1))
if self.cali == True:
sa_wrapped = 0.5*np.arctan2(-S_image_recon[1], -S_image_recon[2]) % np.pi # slow-axis
else:
sa_wrapped = 0.5*np.arctan2(-S_image_recon[1], S_image_recon[2]) % np.pi # slow-axis
sa_wrapped[ret_wrapped<0] += np.pi/2
ret_wrapped[ret_wrapped<0] += np.pi
Recon_para[0] = ret_wrapped.copy()
Recon_para[1] = sa_wrapped%np.pi
Recon_para[2] = S_image_recon[0] # transmittance
if self.N_Stokes == 4:
Recon_para[3] = S_image_recon[4] # DoP
if self.use_gpu:
Recon_para = cp.asnumpy(Recon_para)
return Recon_para
def Birefringence_recon(self, S1_stack, S2_stack, reg = 1e-3):
# Birefringence deconvolution with slowly varying transmission approximation
if self.use_gpu:
Hu = cp.array(self.Hu, copy=True)
Hp = cp.array(self.Hp, copy=True)
AHA = [cp.sum(cp.abs(Hu)**2 + cp.abs(Hp)**2, axis=2) + reg, \
cp.sum(Hu*cp.conj(Hp) - cp.conj(Hu)*Hp, axis=2), \
-cp.sum(Hu*cp.conj(Hp) - cp.conj(Hu)*Hp, axis=2), \
cp.sum(cp.abs(Hu)**2 + cp.abs(Hp)**2, axis=2) + reg]
S1_stack_f = cp.fft.fft2(cp.array(S1_stack), axes=(0,1))
if self.cali:
S2_stack_f = cp.fft.fft2(-cp.array(S2_stack), axes=(0,1))
else:
S2_stack_f = cp.fft.fft2(cp.array(S2_stack), axes=(0,1))
b_vec = [cp.sum(-cp.conj(Hu)*S1_stack_f + cp.conj(Hp)*S2_stack_f, axis=2), \
cp.sum(cp.conj(Hp)*S1_stack_f + cp.conj(Hu)*S2_stack_f, axis=2)]
else:
AHA = [np.sum( | np.abs(self.Hu) | numpy.abs |
# Copyright 2021 Google LLC
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""Tests for neural_structured_learning.research.neural_clustering.data_generators.partition."""
from absl.testing import absltest
from absl.testing import parameterized
from neural_clustering.data_generators import partition
import numpy as np
class PartitionTest(parameterized.TestCase):
@parameterized.parameters({
'n': 100,
'batch_size': 16,
'alpha': 1
}, {
'n': 100,
'batch_size': 16,
'alpha': 0.1
}, {
'n': 1,
'batch_size': 16,
'alpha': 1
}, {
'n': 100,
'batch_size': 1,
'alpha': 1
})
def test_crp_generator_generate_batch(self, n, batch_size, alpha):
crp_generator = partition.CRPGenerator(alpha=alpha)
partitions = crp_generator.generate_batch(n, batch_size)
self.assertLen(partitions, batch_size)
partition_sums = np.array([pt.sum() for pt in partitions])
np.testing.assert_array_equal(partition_sums, np.array([n] * batch_size))
invalid_partitions = np.array([ | np.sum(pt < 1) | numpy.sum |
#-*-coding:utf-8-*-
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import collections
import itertools
from graph_nets import graphs
from graph_nets import utils_np
from graph_nets import utils_tf
import networkx as nx
import numpy as np
from scipy import spatial
import tensorflow as tf
import random
#@title Helper functions { form-width: "30%" }
# pylint: disable=redefined-outer-name
DISTANCE_WEIGHT_NAME = "distance" # The name for the distance edge attribute.
def pairwise(iterable):
"""s -> (s0,s1), (s1,s2), (s2, s3), ..."""
a, b = itertools.tee(iterable)
next(b, None)
return zip(a, b)
def set_diff(seq0, seq1):
"""Return the set difference between 2 sequences as a list."""
return list(set(seq0) - set(seq1))
def to_one_hot(indices, max_value, axis=-1):
one_hot = np.eye(max_value)[indices]
if axis not in (-1, one_hot.ndim):
one_hot = np.moveaxis(one_hot, -1, axis)
return one_hot
def get_node_dict(graph, attr):
"""Return a `dict` of node:attribute pairs from a graph."""
return {k: v[attr] for k, v in graph.node.items()}
def generate_graph_zero(rand,
num_nodes_min_max,
dimensions=2,
theta=1000.0,
rate=1.0):
"""Creates a connected graph.
The graphs are geographic threshold graphs, but with added edges via a
minimum spanning tree algorithm, to ensure all nodes are connected.
Args:
rand: A random seed for the graph generator. Default= None.
num_nodes_min_max: A sequence [lower, upper) number of nodes per graph.
dimensions: (optional) An `int` number of dimensions for the positions.
Default= 2.
theta: (optional) A `float` threshold parameters for the geographic
threshold graph's threshold. Large values (1000+) make mostly trees. Try
20-60 for good non-trees. Default=1000.0.
rate: (optional) A rate parameter for the node weight exponential sampling
distribution. Default= 1.0.
Returns:
The graph.
"""
# Sample num_nodes.
# num_nodes = rand.randint(*num_nodes_min_max)
num_nodes = 8
# Create geographic threshold graph.
pos_array = rand.uniform(size=(num_nodes, dimensions))
pos = dict(enumerate(pos_array))
weight = dict(enumerate(rand.exponential(rate, size=num_nodes)))
geo_graph = nx.geographical_threshold_graph(
num_nodes, theta, pos=pos, weight=weight)
# Create minimum spanning tree across geo_graph's nodes.
distances = spatial.distance.squareform(spatial.distance.pdist(pos_array))
i_, j_ = np.meshgrid(range(num_nodes), range(num_nodes), indexing="ij")
weighted_edges = list(zip(i_.ravel(), j_.ravel(), distances.ravel()))
mst_graph = nx.Graph()
mst_graph.add_weighted_edges_from(weighted_edges, weight=DISTANCE_WEIGHT_NAME)
mst_graph = nx.minimum_spanning_tree(mst_graph, weight=DISTANCE_WEIGHT_NAME)
# Put geo_graph's node attributes into the mst_graph.
for i in mst_graph.nodes():
mst_graph.nodes[i].update(geo_graph.nodes[i])
# Compose the graphs.
combined_graph = nx.compose_all((mst_graph, geo_graph.copy()))
# Put all distance weights into edge attributes.
for i, j in combined_graph.edges():
combined_graph.get_edge_data(i, j).setdefault(DISTANCE_WEIGHT_NAME,
distances[i, j])
return combined_graph, mst_graph, geo_graph
def generate_graph(rand,
num_nodes_min_max,
dimensions=2,
theta=1000.0,
rate=1.0):
"""Creates a connected graph.
The graphs are geographic threshold graphs, but with added edges via a
minimum spanning tree algorithm, to ensure all nodes are connected.
Args:
rand: A random seed for the graph generator. Default= None.
num_nodes_min_max: A sequence [lower, upper) number of nodes per graph.
dimensions: (optional) An `int` number of dimensions for the positions.
Default= 2.
theta: (optional) A `float` threshold parameters for the geographic
threshold graph's threshold. Large values (1000+) make mostly trees. Try
20-60 for good non-trees. Default=1000.0.
rate: (optional) A rate parameter for the node weight exponential sampling
distribution. Default= 1.0.
Returns:
The graph.m
"""
# Sample num_nodes.
num_nodes = rand.randint(*num_nodes_min_max)
# Create geographic threshold graph.
pos_array = rand.uniform(size=(num_nodes, dimensions))
pos = dict(enumerate(pos_array))
weight = dict(enumerate(rand.exponential(rate, size=num_nodes)))
geo_graph = nx.geographical_threshold_graph(
num_nodes, theta, pos=pos, weight=weight)
dg = nx.generators.directed.gn_graph(num_nodes)
geo_graph = nx.compose_all([dg.copy(), geo_graph.copy()])
# Create minimum spanning tree across geo_graph's nodes.
distances = spatial.distance.squareform(spatial.distance.pdist(pos_array))
i_, j_ = np.meshgrid(range(num_nodes), range(num_nodes), indexing="ij")
weighted_edges = list(zip(i_.ravel(), j_.ravel(), distances.ravel()))
mst_graph = nx.Graph()
mst_graph.add_weighted_edges_from(weighted_edges, weight=DISTANCE_WEIGHT_NAME)
mst_graph = nx.minimum_spanning_tree(mst_graph, weight=DISTANCE_WEIGHT_NAME)
# Put geo_graph's node attributes into the mst_graph.
for i in mst_graph.nodes():
mst_graph.nodes[i].update(geo_graph.nodes[i])
# Compose the graphs.
combined_graph = nx.compose_all((geo_graph.copy(), mst_graph))
# Put all distance weights into edge attributes.
for i, j in combined_graph.edges():
combined_graph.get_edge_data(i, j).setdefault(DISTANCE_WEIGHT_NAME,
distances[i, j])
return combined_graph, mst_graph, geo_graph
# return geo_graph, combined_graph, mst_graph
def add_shortest_path(rand, graph, min_length=1):
"""Samples a shortest path from A to B and adds attributes to indicate it.
Args:
rand: A random seed for the graph generator. Default= None.
graph: A `nx.Graph`.
min_length: (optional) An `int` minimum number of edges in the shortest
path. Default= 1.
Returns:
The `nx.DiGraph` with the shortest path added.
Raises:
ValueError: All shortest paths are below the minimum length
"""
node_connected = nx.all_pairs_node_connectivity(graph)
# path = nx.all_simple_paths(graph, 1, 4)
paths = []
path_nodes = []
# print
# print("node_connected_list", list(node_connected))
# print(type(node_connected))
i = random.choice(list(node_connected))
source = i
# print(i)
node_connected_pair = {}
node_reachable = []
for x, yy in node_connected.items():
for y, l in yy.items():
if x == i and l > 0:
node_connected_pair[x, y] = l
path = nx.all_simple_paths(graph, x, y)
node_reachable.append(y)
for p in list(path):
paths.extend(list(pairwise(p)))
node_pairs = list(node_connected_pair)
paths = set(paths)
path_nodes = set(path_nodes)
digraph = graph
digraph.add_node(source, source=True)
digraph.add_nodes_from(set_diff(digraph.nodes(), [source]), source=False)
digraph.add_nodes_from(node_reachable, reachable=True)
digraph.add_nodes_from(set_diff(digraph.nodes(), node_reachable), reachable=False)
digraph.add_nodes_from(set_diff(digraph.nodes(), path_nodes), solution=False)
digraph.add_nodes_from(path_nodes, solution=True)
digraph.add_edges_from(set_diff(digraph.edges(), paths), solution=False)
digraph.add_edges_from(paths, solution=True)
return digraph
def graph_to_input_target(graph):
"""Returns 2 graphs with input and target feature vectors for training.
Args:
graph: An `nx.DiGraph` instance.
Returns:
The input `nx.DiGraph` instance.
The target `nx.DiGraph` instance.
Raises:
ValueError: unknown node type
"""
def create_feature(attr, fields):
return np.hstack([np.array(attr[field], dtype=float) for field in fields])
input_node_fields = ("weight", "pos", "source", "reachable")
input_edge_fields = ("distance",)
target_node_fields = ("solution",)
target_edge_fields = ("solution",)
input_graph = graph.copy()
target_graph = graph.copy()
solution_length = 0
for node_index, node_feature in graph.nodes(data=True):
input_graph.add_node(
node_index, features=create_feature(node_feature, input_node_fields))
target_node = to_one_hot(
create_feature(node_feature, target_node_fields).astype(int), 2)[0]
target_graph.add_node(node_index, features=target_node)
solution_length += int(node_feature["solution"])
solution_length /= graph.number_of_nodes()
for sender, receiver, features in graph.edges(data=True):
input_graph.add_edge(
sender, receiver, features=create_feature(features, input_edge_fields))
target_edge = to_one_hot(
create_feature(features, target_edge_fields).astype(int), 2)[0]
target_graph.add_edge(sender, receiver, features=target_edge)
input_graph.graph["features"] = np.array([0.0])
target_graph.graph["features"] = np.array([solution_length], dtype=float)
return input_graph, target_graph
def generate_networkx_graphs(rand, num_examples, num_nodes_min_max, theta):
"""Generate graphs for training.
Args:
rand: A random seed (np.RandomState instance).
num_examples: Total number of graphs to generate.
num_nodes_min_max: A 2-tuple with the [lower, upper) number of nodes per
graph. The number of nodes for a graph is uniformly sampled within this
range.
theta: (optional) A `float` threshold parameters for the geographic
threshold graph's threshold. Default= the number of nodes.
Returns:
input_graphs: The list of input graphs.
target_graphs: The list of output graphs.
graphs: The list of generated graphs.
"""
input_graphs = []
target_graphs = []
graphs = []
for _ in range(num_examples):
graph = generate_graph(rand, num_nodes_min_max, theta=theta)[0]
graph = add_shortest_path(rand, graph)
input_graph, target_graph = graph_to_input_target(graph)
input_graphs.append(input_graph)
target_graphs.append(target_graph)
graphs.append(graph)
return input_graphs, target_graphs, graphs
def create_placeholders(rand, batch_size, num_nodes_min_max, theta):
"""Creates placeholders for the model training and evaluation.
Args:
rand: A random seed (np.RandomState instance).
batch_size: Total number of graphs per batch.
num_nodes_min_max: A 2-tuple with the [lower, upper) number of nodes per
graph. The number of nodes for a graph is uniformly sampled within this
range.
theta: A `float` threshold parameters for the geographic threshold graph's
threshold. Default= the number of nodes.
Returns:
input_ph: The input graph's placeholders, as a graph namedtuple.
target_ph: The target graph's placeholders, as a graph namedtuple.
"""
# Create some example data for inspecting the vector sizes.
input_graphs, target_graphs, _ = generate_networkx_graphs(
rand, batch_size, num_nodes_min_max, theta)
input_ph = utils_tf.placeholders_from_networkxs(input_graphs)
target_ph = utils_tf.placeholders_from_networkxs(target_graphs)
return input_ph, target_ph
def create_feed_dict(rand, batch_size, num_nodes_min_max, theta, input_ph,
target_ph):
"""Creates placeholders for the model training and evaluation.
Args:
rand: A random seed (np.RandomState instance).
batch_size: Total number of graphs per batch.
num_nodes_min_max: A 2-tuple with the [lower, upper) number of nodes per
graph. The number of nodes for a graph is uniformly sampled within this
range.
theta: A `float` threshold parameters for the geographic threshold graph's
threshold. Default= the number of nodes.
input_ph: The input graph's placeholders, as a graph namedtuple.
target_ph: The target graph's placeholders, as a graph namedtuple.
Returns:
feed_dict: The feed `dict` of input and target placeholders and data.
raw_graphs: The `dict` of raw networkx graphs.
"""
inputs, targets, raw_graphs = generate_networkx_graphs(
rand, batch_size, num_nodes_min_max, theta)
input_graphs = utils_np.networkxs_to_graphs_tuple(inputs)
target_graphs = utils_np.networkxs_to_graphs_tuple(targets)
feed_dict = {input_ph: input_graphs, target_ph: target_graphs}
return feed_dict, raw_graphs
def compute_accuracy(target, output, use_nodes=True, use_edges=False):
"""Calculate model accuracy.
Returns the number of correctly predicted shortest path nodes and the number
of completely solved graphs (100% correct predictions).
Args:
target: A `graphs.GraphsTuple` that contains the target graph.
output: A `graphs.GraphsTuple` that contains the output graph.
use_nodes: A `bool` indicator of whether to compute node accuracy or not.
use_edges: A `bool` indicator of whether to compute edge accuracy or not.
Returns:
correct: A `float` fraction of correctly labeled nodes/edges.
solved: A `float` fraction of graphs that are completely correctly labeled.
Raises:
ValueError: Nodes or edges (or both) must be used
"""
if not use_nodes and not use_edges:
raise ValueError("Nodes or edges (or both) must be used")
tdds = utils_np.graphs_tuple_to_data_dicts(target)
odds = utils_np.graphs_tuple_to_data_dicts(output)
cs = []
ss = []
for td, od in zip(tdds, odds):
xn = np.argmax(td["nodes"], axis=-1)
yn = np.argmax(od["nodes"], axis=-1)
xe = np.argmax(td["edges"], axis=-1)
ye = np.argmax(od["edges"], axis=-1)
c = []
if use_nodes:
c.append(xn == yn)
if use_edges:
c.append(xe == ye)
c = np.concatenate(c, axis=0)
s = np.all(c)
cs.append(c)
ss.append(s)
correct = np.mean( | np.concatenate(cs, axis=0) | numpy.concatenate |
from model import Model
import torch
import torch.nn.functional as F
from torch.utils.data import Sampler, BatchSampler
import os
import shutil
import time
import logging
import copy
import types
import importlib.machinery
import numpy as np
import h5py
from dataset import ShapeNetPartDataset, BalancedSampler
import hydra
data_path = "shapenet_part_seg_hdf5_data"
N_PARTS = 50
N_CATS = 16
seg_classes = {'Earphone': [16, 17, 18], 'Motorbike': [30, 31, 32, 33, 34, 35], 'Rocket': [41, 42, 43], 'Car': [8, 9, 10, 11], 'Laptop': [28, 29], 'Cap': [6, 7], 'Skateboard': [44, 45, 46], 'Mug': [36, 37], 'Guitar': [19, 20, 21], 'Bag': [4, 5], 'Lamp': [24, 25, 26, 27], 'Table': [47, 48, 49], 'Airplane': [0, 1, 2, 3], 'Pistol': [38, 39, 40], 'Chair': [12, 13, 14, 15], 'Knife': [22, 23]}
seg_label_to_cat = {} # {0:Airplane, 1:Airplane, ...49:Table}
for cat in seg_classes.keys():
for label in seg_classes[cat]:
seg_label_to_cat[label] = cat
def load_train_set(**kwargs):
# load data
f0 = h5py.File(hydra.utils.to_absolute_path(os.path.join(data_path, 'ply_data_train0.h5')))
f1 = h5py.File(hydra.utils.to_absolute_path(os.path.join(data_path, 'ply_data_train1.h5')))
f2 = h5py.File(hydra.utils.to_absolute_path(os.path.join(data_path, 'ply_data_train2.h5')))
f3 = h5py.File(hydra.utils.to_absolute_path(os.path.join(data_path, 'ply_data_train3.h5')))
f4 = h5py.File(hydra.utils.to_absolute_path(os.path.join(data_path, 'ply_data_train4.h5')))
f5 = h5py.File(hydra.utils.to_absolute_path(os.path.join(data_path, 'ply_data_train5.h5')))
f6 = h5py.File(hydra.utils.to_absolute_path(os.path.join(data_path, 'ply_data_val0.h5')))
f = [f0, f1, f2, f3, f4, f5, f6]
data = f[0]['data'][:]
label = f[0]['label'][:]
seg = f[0]['pid'][:]
for i in range(1, 7):
data = np.concatenate((data, f[i]['data'][:]), axis=0)
label = np.concatenate((label, f[i]['label'][:]), axis=0)
seg = np.concatenate((seg, f[i]['pid'][:]), axis=0)
for ff in f:
ff.close()
print(data.shape, label.shape, seg.shape)
return ShapeNetPartDataset(data, label, seg, **kwargs)
def load_test_set(**kwargs):
# load data
f0 = h5py.File(hydra.utils.to_absolute_path(os.path.join(data_path, 'ply_data_test0.h5')))
f1 = h5py.File(hydra.utils.to_absolute_path(os.path.join(data_path, 'ply_data_test1.h5')))
f = [f0, f1]
data = f[0]['data'][:]
label = f[0]['label'][:]
seg = f[0]['pid'][:]
for i in range(1, 2):
data = np.concatenate((data, f[i]['data'][:]), axis=0)
label = np.concatenate((label, f[i]['label'][:]), axis=0)
seg = np.concatenate((seg, f[i]['pid'][:]), axis=0)
for ff in f:
ff.close()
print(data.shape, label.shape, seg.shape)
return ShapeNetPartDataset(data, label, seg, **kwargs)
import hydra
from multiprocessing import cpu_count
@hydra.main(config_path='config', config_name='shapenet')
def main(cfg):
log_dir, batch_size, resume, num_workers = '.', cfg.batch_size, cfg.resume, cpu_count() // 2
if not os.path.isdir(log_dir):
os.mkdir(log_dir)
logger = logging.getLogger(__name__)
logger.info(cfg)
torch.backends.cudnn.benchmark = True
# Load the model
model = Model(50, cfg)
model.cuda()
if resume > 0:
model.load_state_dict(torch.load(os.path.join(log_dir, "state%d.pkl" % resume)))
logger.info("{} paramerters in total".format(sum(x.numel() for x in model.parameters())))
train_set = load_train_set(rand_rot=(cfg.rot[:2] == 'AR'), aug=True, bw=cfg.bw)
test_set = load_test_set(rand_rot=(cfg.rot[2:] == 'AR'), aug=False, bw=cfg.bw)
sampler = BatchSampler(BalancedSampler(train_set), batch_size, False)
train_loader = torch.utils.data.DataLoader(train_set, batch_sampler=sampler, shuffle=False, num_workers=num_workers,
pin_memory=True, drop_last=False)
optimizer = torch.optim.Adam(model.parameters(), lr=0)
test_loader = torch.utils.data.DataLoader(test_set, batch_size=batch_size, shuffle=False, num_workers=num_workers, pin_memory=False, drop_last=False)
def train_step(data, target_index, target, cat_onehot, backward=True):
model.train()
data, target_index, target = data.cuda(), target_index.cuda(), target.cuda()
_, prediction = model(data, target_index, cat_onehot)
prediction = prediction.view(-1, 50)
target = target.view(-1)
loss = F.nll_loss(prediction, target)
if backward:
optimizer.zero_grad()
loss.backward()
# torch.nn.utils.clip_grad_norm(model.parameters(), 1e-4)
optimizer.step()
correct = prediction.data.max(1)[1].eq(target.data).float().cpu().mean()
# pred = prediction.data.argmax(1).cpu().numpy()
# print(','.join([str(np.count_nonzero(pred == i)) for i in range(N_PARTS)]))
return loss.item(), correct.item()
def get_learning_rate(epoch):
limits = [5, 10, 15, 20, 40]
lrs = [0.01, 0.005, 0.001, 0.0005, 0.0001, 5e-5]
assert len(lrs) == len(limits) + 1
for lim, lr in zip(limits, lrs):
if epoch < lim:
return lr
return lrs[-1]
for epoch in range(resume, cfg.max_epoch):
np.random.seed(epoch)
lr = get_learning_rate(epoch)
logger.info("learning rate = {} and batch size = {}".format(lr, cfg.batch_size))
for p in optimizer.param_groups:
p['lr'] = lr
total_loss = 0
total_correct = 0
time_before_load = time.perf_counter()
for batch_idx, (data, target_index, target, _, category) in enumerate(train_loader):
# Transform category labels to one_hot.
category_labels = torch.LongTensor(category)
one_hot_labels = torch.zeros(category.size(0), 16).scatter_(1, category_labels, 1).cuda()
time_after_load = time.perf_counter()
time_before_step = time.perf_counter()
loss, correct = train_step(data, target_index, target, one_hot_labels, True)
total_loss += loss
total_correct += correct
logger.info("[{}:{}/{}] LOSS={:.2} <LOSS>={:.2} ACC={:.2} <ACC>={:.2} time={:.2}+{:.2}".format(
epoch, batch_idx, len(train_loader),
loss, total_loss / (batch_idx + 1),
correct, total_correct / (batch_idx + 1),
time_after_load - time_before_load,
time.perf_counter() - time_before_step))
time_before_load = time.perf_counter()
# test
model.eval()
total_correct = 0
mean_correct = 0
shape_ious = {cat: [] for cat in seg_classes.keys()}
for batch_idx, (data, target_index, target, pt_cloud, category) in enumerate(test_loader):
model.eval()
# Transform category labels to one_hot.
category_labels = torch.LongTensor(category)
one_hot_labels = torch.zeros(category.size(0), 16).scatter_(1, category_labels, 1).cuda()
data, target_index, target = data.cuda(), target_index.cuda(), target.cuda()
with torch.no_grad():
_, prediction = model(data, target_index, one_hot_labels)
prediction = prediction.view(-1, 2048, 50)
target = target.view(-1, 2048)
for j in range(target.size(0)):
cat = seg_label_to_cat[target.cpu().numpy()[j][0]]
prediction_np = prediction.cpu().numpy()[j][:, seg_classes[cat]].argmax(1) + seg_classes[cat][0]
target_np = target.cpu().numpy()[j]
correct = np.mean((prediction_np == target_np).astype(np.float32))
total_correct += correct
segp = prediction_np
segl = target_np
part_ious = [0.0 for _ in range(len(seg_classes[cat]))]
for l in seg_classes[cat]:
if (np.sum(segl == l) == 0) and (
| np.sum(segp == l) | numpy.sum |
import copy
import logging
import math
import matplotlib.pyplot as plt
import multiprocessing as mp
import numpy as np
import random
import time
import torch
import torch.multiprocessing as tmp
import torch.nn.functional as F
import torch.tensor as tt
from torchvision.utils import save_image
from dist import Master, Worker
from net import CAModel
from pool import CustomPool
from utils import load_emoji, to_rgb, visualize_batch, append_file, write_file, export_model, dmg
from weight_updates import hebbian_update
HIDDEN_SIZE = None
class EvolutionStrategy:
"""Master class for performing an evolution.
Keeps track of hyperparameters, weights/coeffs.
Contains methods for running the environment, evaluate performances and update parameters.
"""
def __init__(self, args):
self.iterations = args.iter
self.learning_rate = args.lr
self.sigma = args.sigma
self.pop_size = args.pop_size
self.fire_rate = args.fire_rate
self.target_size = args.size
self.target_padding = args.pad
self.new_size = self.target_size + 2 * self.target_padding
self.channel_n = args.channels
self.hidden_size = args.hidden_size
HIDDEN_SIZE = self.hidden_size
self.target_img = load_emoji(args.emoji, self.target_size)
self.use_hebb = args.hebb
self.use_pool = args.pool
self.damage = args.damage
self.damageChannels = args.damageChannels
self.use_mp = args.use_mp
self.decay_state = 0
self.log_main_every = 10
self.hit_goal = False
self.cross_machine = args.cross_machine
self.is_master = args.master
self.nodes = args.nodes
if self.damage > 0:
if not self.use_pool and not self.damage <=3:
raise ValueError("use_pool needs to be true and damage_bottom_n < 4.")
if self.cross_machine:
if self.is_master:
self.master = Master(nodes=args.nodes)
else:
self.worker = Worker(run_id=0)
p = self.target_padding
self.pad_target = F.pad(tt(self.target_img), (0, 0, p, p, p, p))
h, w = self.pad_target.shape[:2]
self.seed = np.zeros([h, w, self.channel_n], np.float64)
self.seed[h // 2, w // 2, 3:] = 1.0
if self.use_pool:
self.pool_size = 1024
self.batch_size = 4
self.pool = CustomPool(self.seed, self.pool_size)
else:
self.batch_size = 1
if self.use_hebb:
self.coefficients_per_synapse = 5
plastic_weights = 3 * self.channel_n * self.hidden_size + self.hidden_size * self.channel_n
self.coeffs_start_interval = 0.001
self.coeffs = np.random.uniform(-self.coeffs_start_interval, self.coeffs_start_interval,
(plastic_weights, self.coefficients_per_synapse))
self.net = CAModel(channel_n=self.channel_n, fire_rate=self.fire_rate, new_size_pad=self.new_size,
disable_grad=True, hidden_size=self.hidden_size, batch_size=self.batch_size, use_hebb=True)
else:
self.net = CAModel(channel_n=self.channel_n, fire_rate=self.fire_rate, new_size_pad=self.new_size,
disable_grad=True, hidden_size=self.hidden_size, batch_size=self.batch_size)
self.parameters_shape = [tuple(w.shape) for w in self.net.parameters()]
self.log_folder = args.log_folder
logging.basicConfig(filename=self.log_folder + "/logging.txt", format='%(message)s', filemode="w",
level=logging.INFO)
if args.pre_trained != "":
if self.use_hebb:
self.coeffs = np.load(args.pre_trained)
else:
self.load_model(args.pre_trained)
logging.info("lr/(pop*sigma) at start: " + str(self.learning_rate / (self.pop_size * self.sigma)))
# For logging
self.x_range = []
self.y_lin = []
self.avg = []
self.avg_iter = []
self.losses_main = []
self.iter_main = []
t_rgb = to_rgb(self.pad_target).permute(2, 0, 1)
save_image(t_rgb, self.log_folder + "/target_image.png")
def load_model(self, path):
"""Load a PyTorch model from path."""
self.net.load_state_dict(torch.load(path))
self.net.double()
def fitness_shaping(self, x):
"""Sort x and and map x to linear values between -0.5 and 0.5
Return standard score of x
"""
shaped = np.zeros(len(x))
shaped[x.argsort()] = np.arange(len(x), dtype=np.float64)
shaped /= (len(x) - 1)
shaped -= 0.5
shaped = (shaped - shaped.mean()) / shaped.std()
return shaped
def update_coeffs(self, fitnesses, epsilons):
"""Update parent Hebbian coefficients using evaluated mutants and fitness."""
fitnesses = self.fitness_shaping(fitnesses)
for index, c in enumerate(self.coeffs):
layer_population = np.array([p[index] for p in epsilons])
update_factor = self.learning_rate / (self.pop_size * self.sigma)
self.coeffs[index] = c + update_factor * np.dot(layer_population.T, fitnesses).T
def update_parameters(self, fitnesses, epsilons):
"""Update parent network weights using evaluated mutants and fitness."""
fitnesses = self.fitness_shaping(fitnesses)
for i, e in enumerate(epsilons):
for j, w in enumerate(self.net.parameters()):
w.data += self.learning_rate * 1 / (self.pop_size * self.sigma) * fitnesses[i] * e[j]
def get_population(self, use_seed=None):
"""Return an array with values sampled from N(0, sigma).
The shape of the array is (pop_size, (layer1_size, layer2_size)) using ES and (pop_size, plastic_weights, 5)
"""
if use_seed is not None:
np.random.seed(use_seed)
temp_pop = self.pop_size
if self.is_master:
temp_pop /= self.nodes
eps = []
if self.use_hebb:
layers = self.coeffs
for i in range(int(temp_pop / 2)):
e = []
e2 = []
for w in layers:
j = np.random.randn(*w.shape) * self.sigma
e.append(j)
e2.append(-j)
eps.append(e)
eps.append(e2)
else:
layers = self.parameters_shape
for i in range(int(temp_pop / 2)):
e = []
e2 = []
for w in layers:
j = np.random.randn(*w) * self.sigma
e.append(j)
e2.append(-j)
eps.append(e)
eps.append(e2)
return np.array(eps, dtype=np.object)
def train_step_hebb(self, model_try, coeffs_try, x):
"""Perform a generation of CA. Initialize a random net and update weights in every update step using
trained coeffs.
Return output x and loss
"""
torch.seed()
losses = torch.zeros(x.shape[0])
for j, x0 in enumerate(x): # Iterate over batch
model_try.apply(weights_init)
model_try.fc1.weight.zero_()
x0 = x0[None, ...]
weights1_2, weights2_3 = list(model_try.parameters())
weights1_2 = weights1_2.detach().numpy()
weights2_3 = weights2_3.detach().numpy()
iter_n = torch.randint(30, 40, (1,)).item() # Episode
for i in range(iter_n):
o0, o1, x0 = model_try(x0)
weights1_2, weights2_3 = hebbian_update(coeffs_try, weights1_2, weights2_3, o0.numpy(),
o1.numpy(), x0.numpy())
(a, b) = (0, 1)
list(model_try.parameters())[a].data /= list(model_try.parameters())[a].__abs__().max()
list(model_try.parameters())[b].data /= list(model_try.parameters())[b].__abs__().max()
list(model_try.parameters())[a].data *= 0.4
list(model_try.parameters())[b].data *= 0.4
loss = model_try.loss_f(x0, self.pad_target)
loss = torch.mean(loss)
losses[j] = loss.item()
x[j] = x0[0, ...]
loss = torch.mean(losses)
return x, loss.item()
def train_step_es(self, model_try, x):
"""Perform a generation of CA using trained net.
Return output x and loss
"""
torch.seed()
iter_n = torch.randint(30, 40, (1,)).item()
for i in range(iter_n): # Episode
x = model_try(x)
loss = self.net.loss_f(x, self.pad_target)
loss = torch.mean(loss)
return x, loss.item()
def get_fitness_hebb(self, epsilon, x0, pid, q=None):
"""Method that start a generation of Hebbian ES.
Return output from generation x and its fitness
"""
model_try = CAModel(channel_n=self.channel_n, fire_rate=self.fire_rate, new_size_pad=self.new_size,
disable_grad=True, hidden_size=self.hidden_size, batch_size=self.batch_size, use_hebb=True)
torch.seed()
model_try.apply(weights_init)
coeffs_try = self.coeffs.copy()
coeffs_try += epsilon
x, loss = self.train_step_hebb(model_try, coeffs_try, x0.clone())
fitness = -loss
if not math.isfinite(fitness):
raise ValueError('Fitness ' + str(fitness) + '. Loss: ' + str(loss))
if self.use_mp:
q.put((x, fitness, pid))
return
return x, fitness
def get_fitness_es(self, epsilon, x0, pid, q=None):
"""Method that start a generation of ES.
Return output from generation x and its fitness
"""
model_try = copy.deepcopy(self.net)
if epsilon is not None:
for i, w in enumerate(model_try.parameters()):
w.data += torch.tensor(epsilon[i])
x, loss = self.train_step_es(model_try, x0)
fitness = -loss
if not math.isfinite(fitness):
raise ValueError('Encountered non-number value in loss. Fitness ' + str(fitness) + '. Loss: ' + str(loss))
if self.use_mp:
q.put((x, fitness, pid))
return
return x, fitness
def evaluate_main(self, x0):
"""Return output and fitness from a generation using unperturbed weights/coeffs"""
if self.use_hebb:
x_main, loss_main = self.train_step_hebb(self.net, self.coeffs, x0.clone())
fit_main = - loss_main
else:
x_main, loss_main = self.train_step_es(self.net, x0.clone())
fit_main = - loss_main
return x_main, fit_main
def create_plots(self, x_range, y_lin, avg_iter, avg, iter_main, losses_main):
"""Plot population's fitnesses, average fitnesses and main network's fitnesses.
Two plots, one for all iterations so far, and one for the last 100 iterations.
"""
plt.clf()
plt.scatter(x_range, np.log10(y_lin), color="blue", s=0.5)
plt.plot(avg_iter, np.log10(avg), color='pink')
plt.plot(iter_main, np.log10(losses_main), color='red', alpha=0.7)
plt.title("Log-loss for " + self.log_folder)
plt.savefig(self.log_folder + "/log_loss_over_time.png")
if len(x_range) >= 100 * self.pop_size:
# log 10, last 100 iters
plt.clf()
plt.scatter(x_range[-100 * self.pop_size:], np.log10(y_lin[-100 * self.pop_size:]), s=0.5)
plt.plot(avg_iter[-100:], np.log10(avg[-100:]), color='red')
plt.title("Log-loss last 100 for " + self.log_folder)
plt.savefig(self.log_folder + "/log_loss_over_time_last100.png")
def save_data(self, buffer, x_range, y_lin, iter_main, losses_main, iteration):
"""Save raw population and main network fitnesses to a csv file on the format: iteration, fitness"""
if len(x_range) > 0:
points = buffer * self.pop_size
append_file(self.log_folder + '/raw/losses.csv', x_range[-points:], y_lin[-points:])
# this one overwrites
write_file(self.log_folder + '/raw/main_losses.csv', iter_main, losses_main)
if self.use_hebb:
np.save(self.log_folder + "/models/" + str(iteration) + '.npy', self.coeffs)
else:
export_model(self.net, self.log_folder + "/models/saved_model_" + str(iteration) + ".pt")
def log(self, fitnesses, iteration, x0=None, xs=None):
"""Function to add fitnesses to arrays and plot/save data at iteration intervals."""
if x0 is None:
x0 = tt(np.repeat(self.seed[None, ...], self.batch_size, 0))
# Logging/plotting
for k, fit in enumerate(fitnesses):
self.x_range.append(iteration)
self.y_lin.append(-fit)
self.avg.append(-np.average(fitnesses))
self.avg_iter.append(iteration)
# Evaluate main net/coeffs
if iteration % self.log_main_every == 0:
x_main, fit_main = self.evaluate_main(x0.clone())
self.losses_main.append(-fit_main)
self.iter_main.append(iteration)
# Visualize batch and plot points
if iteration % 500 == 0:
if xs == None:
visualize_batch([x_main], iteration, self.log_folder, nrow=self.batch_size)
else:
selected = xs[np.argmax(fitnesses)]
visualize_batch([x0.clone(), selected, x_main], iteration, self.log_folder, nrow=self.batch_size)
self.create_plots(self.x_range, self.y_lin, self.avg_iter, self.avg, self.iter_main, self.losses_main)
# Save points and weights/coeffs to file
buffer = 1000
if iteration % buffer == 0:
self.save_data(buffer, self.x_range, self.y_lin, self.iter_main, self.losses_main, iteration)
mean_fit = np.mean(fitnesses)
# Decay learning rate
if mean_fit >= -0.03 and self.decay_state == 0:
self.learning_rate *= 0.3
self.decay_state += 1
logging.info("Setting lr to " + str(self.learning_rate) + " at iter " + str(iteration))
elif mean_fit >= -0.01 and self.decay_state == 1:
self.learning_rate *= 0.5
self.decay_state += 1
logging.info("Setting lr to " + str(self.learning_rate) + " at iter " + str(iteration))
print('step: %d, mean fitness: %.3f, best fitness: %.3f' % (iteration, mean_fit, np.max(fitnesses)))
# check = 250
# if (len(self.losses_main) > check//self.log_main_every) and not self.hit_goal:
# mean_main_loss = np.mean(self.losses_main[-(check//self.log_main_every):])
# if mean_main_loss <= 0.001:
# logging.info("Hit goal at " + str(iteration))
# if self.use_hebb:
# np.save(self.log_folder + "/models/" + str(iteration) + "good" + '.npy', self.coeffs)
# else:
# export_model(self.net, self.log_folder + "/models/saved_model_" + str(iteration) + "good" + ".pt")
# self.hit_goal = True
def run_master(self):
"""Send weights/coeffs to worker nodes and poll for results.
Update weights/coeffs when all results are present.
"""
# ticM = time.time()
for iter in range(self.iterations):
# logging.info("Sending weights")
weights_to_send = self.coeffs if self.use_hebb else self.net.state_dict()
self.master.send_weights(weights_to_send)
# logging.info("Waiting for results...")
fitnesses, seeds = self.master.wait_for_results()
# logging.info("Got all results!")
fitnesses = | np.array(fitnesses) | numpy.array |
import hoki.age_utils as au
import hoki.load as load
import pkg_resources
import numpy as np
import pandas as pd
import pytest
from hoki.utils.exceptions import HokiFatalError, HokiUserWarning, HokiFormatError
# Loading Data
data_path = pkg_resources.resource_filename('hoki', 'data')
hr_file = data_path + '/hrs-sin-imf_chab100.zem4.dat'
cmd_file = data_path + '/cmd_bv_z002_bin_imf135_300'
myhrd = load.model_output(hr_file, hr_type='TL')
mycmd = load.unpickle(cmd_file)
# Creating Test Inputs
fake_hrd_input = pd.DataFrame.from_dict({'name': ['star1', 'star2', 'star3'],
'logT': np.array([4.58, 4.48, 4.14]),
'logL': np.array([4.83, 5.07, 5.40])})
bad_hrd_input = pd.DataFrame.from_dict({'logT': np.array(['bla']),
'logL': np.array([4.83])})
no_name_input = pd.DataFrame.from_dict({'logT': np.array([4.58, 4.48, 4.14]),
'logL': np.array([4.83, 5.07, 5.40])})
bad_hrd_input2 = pd.DataFrame.from_dict({'logT': np.array([4.58, 'bla']),
'logL': np.array([4.83, 2.0])})
fake_cmd_input = pd.DataFrame.from_dict({'name': ['star1', 'star2', 'STAR3'],
'col': np.array([-0.3, 0.5, -0.25]),
'mag': np.array([-5, -10, -1])})
bad_cmd_input = pd.DataFrame.from_dict({'col': np.array(['bla']),
'mag': np.array([-5])})
# Testing Suite
class TestAgeWizard(object):
def test_init_basic(self):
assert au.AgeWizard(obs_df=fake_hrd_input, model=hr_file), "Loading HRD file path failed"
assert au.AgeWizard(obs_df=fake_hrd_input, model=myhrd), "Loading with hoki.hrdiagrams.HRDiagram failed"
assert au.AgeWizard(obs_df=fake_cmd_input, model=mycmd), 'Loading with hoki.cmd.CMD'
assert au.AgeWizard(obs_df=fake_cmd_input, model=cmd_file), 'Loading CMD from frile failed'
def test_bad_init(self):
with pytest.raises(HokiFatalError):
__, __ = au.AgeWizard(obs_df=fake_cmd_input, model='sdfghj'), 'HokiFatalError should be raised'
with pytest.raises(HokiFormatError):
__, __ = au.AgeWizard(obs_df='edrftgyhu', model=cmd_file), 'HokiFormatError should be raised'
def test_combine_pdfs_not_you(self):
wiz = au.AgeWizard(fake_hrd_input, myhrd)
wiz.calculate_sample_pdf(not_you=['star1'])
cpdf = wiz.sample_pdf.pdf
assert np.sum(np.isclose([cpdf[0], cpdf[9]], [0.0, 0.7231526323765232])) == 2, "combined pdf is not right"
def test_most_likely_age(self):
wiz = au.AgeWizard(obs_df=fake_hrd_input, model=hr_file)
assert np.isclose(wiz.most_likely_age[0], 6.9), "Most likely age wrong"
def test_most_likely_ages(self):
wiz = au.AgeWizard(obs_df=fake_hrd_input, model=hr_file)
a = wiz.most_likely_ages
assert np.sum(np.isclose([a[0], a[1], a[2]], [6.9, 6.9, 6.9])) == 3, "Most likely ages not right"
def test_combine_pdfs(self):
wiz = au.AgeWizard(fake_hrd_input, myhrd)
wiz.calculate_sample_pdf()
assert np.isclose(wiz.sample_pdf.pdf[9],0.551756734145878), "Something is wrong with the combined_Age PDF"
def test_calculate_p_given_age_range(self):
wiz = au.AgeWizard(fake_hrd_input, myhrd)
probas = wiz.calculate_p_given_age_range([6.7, 6.9])
assert np.sum(np.isclose([probas[0], probas[1], probas[2]],
[0.515233714952414, 0.7920611550946726, 0.6542441096583737])) == 3, \
"probability given age range is messed up"
class TestFindCoordinates(object):
def test_hrd_input(self):
T_coord, L_coord = au.find_coordinates(obs_df=fake_hrd_input, model=myhrd)
assert np.sum(
np.isclose([T_coord[0], T_coord[1], T_coord[2]], [45, 44, 40])) == 3, "Temperature coordinates wrong"
assert np.sum(
np.isclose([L_coord[0], L_coord[1], L_coord[2]], [77, 80, 83])) == 3, "Luminosity coordinates wrong"
def test_cmd_input(self):
col_coord, mag_range = au.find_coordinates(obs_df=fake_cmd_input, model=mycmd)
assert np.sum(
np.isclose([col_coord[0], col_coord[1], col_coord[2]], [27, 35, 27])) == 3, "color coordinates wrong"
assert np.sum(
np.isclose([mag_range[0], mag_range[1], mag_range[2]], [90, 40, 130])) == 3, "magnitude coordinates wrong"
class TestFindCMDCoordinates(object):
def test_fake_input(self):
col_coord, mag_range = au._find_cmd_coordinates(obs_df=fake_cmd_input, mycmd=mycmd)
assert np.sum(
np.isclose([col_coord[0], col_coord[1], col_coord[2]], [27, 35, 27])) == 3, "color coordinates wrong"
assert np.sum(
np.isclose([mag_range[0], mag_range[1], mag_range[2]], [90, 40, 130])) == 3, "magnitude coordinates wrong"
def test_bad_input(self):
with pytest.raises(HokiFormatError):
col_coord, mag_range = au._find_cmd_coordinates(obs_df=bad_hrd_input, mycmd=mycmd)
def test_bad_input_2(self):
col_coord, mag_range = au._find_cmd_coordinates(obs_df=bad_cmd_input, mycmd=mycmd)
#assert np.siz(col_coord[0]), "This should be a nan"
assert np.isclose(mag_range[0], 90), "This L coordinate is wrong - test_bad_input."
class TestFindHRDCoordinates(object):
def test_fake_input(self):
T_coord, L_coord = au._find_hrd_coordinates(obs_df=fake_hrd_input, myhrd=myhrd)
assert np.sum(
np.isclose([T_coord[0], T_coord[1], T_coord[2]], [45, 44, 40])) == 3, "Temperature coordinates wrong"
assert np.sum(
np.isclose([L_coord[0], L_coord[1], L_coord[2]], [77, 80, 83])) == 3, "Luminosity coordinates wrong"
def test_bad_input(self):
with pytest.raises(HokiFormatError):
__, __ = au._find_hrd_coordinates(obs_df=bad_cmd_input, mycmd=mycmd)
def test_bad_input(self):
T_coord, L_coord = au._find_hrd_coordinates(obs_df=bad_hrd_input, myhrd=myhrd)
#assert np.isnan(T_coord[0]), "This should be a nan"
assert np.isclose(L_coord[0], 77), "This L coordinate is wrong - test_bad_input."
class TestNormalise1D(object):
def test_it_runs(self):
au.normalise_1d(np.array([0, 1, 4, 5, 0, 1, 7, 8]), crop_the_future=False)
def test_basic(self):
norm = au.normalise_1d(np.array([0, 0, 1, 0, 0, 0, 0]), crop_the_future=False)
assert norm[2] == 1, 'Normalisation done wrong'
assert sum(norm) == 1, "Normalisaton done wrong"
class TestCalculatePDFs(object):
def test_fake_input(self):
pdf_df = au.calculate_individual_pdfs(fake_hrd_input, myhrd)
assert 'star1' in pdf_df.columns, "Column name issue"
assert int(sum(pdf_df.star1)) == 1, "PDF not calculated correctly"
def test_input_without_name(self):
pdf_df = au.calculate_individual_pdfs(no_name_input, myhrd)
assert 's1' in pdf_df.columns, "Column names not created right"
def test_bad_input(self):
pdf_df = au.calculate_individual_pdfs(bad_hrd_input2, myhrd)
assert not np.isnan(sum(pdf_df.s0)), "somwthing went wrong"
#assert np.isnan(sum(distributions_df.s1)), "somwthing went wrong"
class TestCalculateSamplePDF(object):
def test_basic(self):
distributions = au.calculate_distributions(fake_hrd_input, myhrd)
combined = au.calculate_sample_pdf(distributions)
assert np.isclose(combined.pdf[9], 0.2715379752638662), "combined PDF not right"
def test_drop_bad(self):
distributions = au.calculate_distributions(fake_hrd_input, myhrd)
combined = au.calculate_sample_pdf(distributions, not_you=[3])
assert np.isclose(combined.pdf[9], 0.2715379752638662), "combined PDF not right"
def test_drop_good(self):
distributions = au.calculate_distributions(fake_hrd_input, myhrd)
combined = au.calculate_sample_pdf(distributions, not_you=['star1'])
assert | np.isclose(combined.pdf[9], 0.774602971512809) | numpy.isclose |
import numpy as np
import theano
import theano.tensor as T
__event_x = theano.shared(np.zeros((1,), dtype="float64"), 'event_x')
__event_y = theano.shared(np.zeros((1,), dtype="float64"), 'event_y')
__event_z = theano.shared(np.zeros((1,), dtype="float64"), 'event_z')
__event = [__event_x, __event_y, __event_z]
def set_event(e):
__event_x.set_value(e[:, 0])
__event_y.set_value(e[:, 1])
__event_z.set_value(e[:, 2])
__sigma = theano.shared(0.05, 'sigma', allow_downcast=True)
def set_sigma(s):
__sigma.set_value(s * s)
def __hessian(cost, variables):
hessians = []
for input1 in variables:
d_cost_d_input1 = T.grad(cost, input1)
hessians.append([
T.grad(d_cost_d_input1, input2) for input2 in variables
])
return hessians
__theta = T.dscalar("theta")
__phi = T.dscalar("phi")
### Normalized direction vector
__n_x = T.sin(__theta)
__n_y = T.cos(__theta) * T.sin(__phi)
__n_z = T.cos(__theta) * T.cos(__phi)
__z0 = theano.shared(0.0, 'z0', allow_downcast=True)
def set_z0(z0):
__z0.set_value(z0)
_n = [__n_x, __n_y, __n_z]
__scalar = (__event_z - __z0) / __n_z
### Difference between xy-projection of n and hit
__delta_square = (__scalar * __n_x - __event_x) ** 2 + (__scalar * __n_y - __event_y) ** 2
__r = T.sum(T.exp(-__delta_square / __sigma))
__linear_retina_response = theano.function([__theta, __phi], __r)
linear_retina_response = lambda params: __linear_retina_response(*params)
neg_linear_retina_response = lambda params: -__linear_retina_response(*params)
__linear_retina_response_jac = theano.function([__theta, __phi], theano.gradient.jacobian(__r, [__theta, __phi]))
linear_retina_response_jac = lambda params: np.array(__linear_retina_response_jac(*params))
neg_linear_retina_response_jac = lambda params: -np.array(__linear_retina_response_jac(*params))
__second_derivatives = [
[theano.function([__theta, __phi], d) for d in dd]
for dd in __hessian(__r, [__theta, __phi])
]
linear_retina_response_hess = lambda params: np.array([
[dd(*params) for dd in dddd ]
for dddd in __second_derivatives
])
neg_linear_retina_response_hess = lambda params: -linear_retina_response_hess(params)
linear_retina_response_vec = | np.vectorize(__linear_retina_response) | numpy.vectorize |
import numpy as np
from mesonh_atm.mesonh_atmosphere import MesoNHAtmosphere
import matplotlib.pyplot as plt
from scipy.interpolate import RegularGridInterpolator
import modules.cloud as ModCloud
#Old Data without advection
path = "/net/skyscanner/volume1/data/mesoNH/ARM_OneHour3600files_No_Horizontal_Wind/"
mfiles = [path+"U0K10.1.min{:02d}.{:03d}_diaKCL.nc".format(minute, second)
for minute in range(1, 60)
for second in range(1, 61)]
mtstep = 1
atm = MesoNHAtmosphere(mfiles, 1)
font = {'size' : 26}
plt.rc('font', **font)
#######################################################################
########################### cloud example #############################
#######################################################################
# Example Data of two variables with the coordinates of a rough bounding box of a cloud
# RCT = liquid water content, WT = vertical wind
lwc_data=atm.data['RCT'][449:599,75:125,60:200,110:250]
zwind_data=atm.data['WT'][449:599,75:125,60:200,110:250]
ids,counter,clouds=ModCloud.cloud_segmentation(lwc_data)
clouds=list(set(clouds.values()))
length_point_clds = np.ndarray((0,1))
for each_cloud in clouds:
print(len(each_cloud.points))
temp = len(each_cloud.points)
length_point_clds = np.vstack((length_point_clds,temp))
# Get cloud with the biggest amount of points in the bounding box
cloud = clouds[np.argmax(length_point_clds)]
cloud.calculate_attributes(lwc_data,zwind_data)
lwc_cloud = np.zeros(lwc_data.shape)
for point in cloud.points:
lwc_cloud[point] = 1
#Coordinates of the rough bounding box of the example cloud
xr = np.arange(0.005 + 60*0.01, 0.005 + 200*0.01,0.01)
yr = np.arange(0.005 + 110*0.01, 0.005 + 250*0.01,0.01)
all_Zs = atm.data["VLEV"][:,0,0]
zr = all_Zs[75:125]
tr = np.arange(449,599)
origin_xy = [60,110]
zspan = np.arange(0,16)
# Plotting three different cross-sections including the center of geometry COG and the center of masses
# of the vertical wind and liquid water content
plt.figure()
plt.xlabel("x coordinate(km)")
plt.ylabel("y coordinate(km)")
plt.contour(zwind_data[0,15].T,origin="lower",label='zwind',extent=[xr[0], xr[-1], yr[0], yr[-1]],linewidths=2)
cbar=plt.colorbar()
cbar.set_label('m/s')
plt.contour(lwc_cloud[0,15].T,V=[0,1],origin='lower',extent=[xr[0], xr[-1], yr[0], yr[-1]],alpha=0.6,cmap='Greys')
COG_2D = cloud.COG_2D_tz[0,15]*0.01 + np.array([0.005 + origin_xy[0]*0.01,0.005 + origin_xy[1]*0.01])
plt.plot(COG_2D[0],COG_2D[1],'ro',markersize=8,label='COG 2D')
COM_2D_zwind = cloud.COM_2D_zwind_tz[0,15]*0.01 + np.array([0.005 + origin_xy[0]*0.01,0.005 + origin_xy[1]*0.01])
plt.plot(COM_2D_zwind[0],COM_2D_zwind[1],'gx',markersize=8, label='COM 2D zwind')
COM_2D_lwc = cloud.COM_2D_lwc_tz[0,15]*0.01 + np.array([0.005 + origin_xy[0]*0.01,0.005 + origin_xy[1]*0.01])
plt.plot(COM_2D_lwc[0],COM_2D_lwc[1],'b>',markersize=8, label='COM 2D lwc')
plt.title("Zwind Cross-section Cloud Example, z={}km, t={}s".format(np.round(float(zr[15]),3),tr[0]))
plt.xlim(xr[0], xr[-1])
plt.ylim(yr[0], yr[-1])
plt.legend()
plt.figure()
plt.xlabel("x coordinate(km)")
plt.ylabel("y coordinate(km)")
plt.contour(zwind_data[0,19].T,origin="lower",label='zwind',extent=[xr[0], xr[-1], yr[0], yr[-1]],linewidths=2)
cbar=plt.colorbar()
cbar.set_label('m/s')
plt.contour(lwc_cloud[0,19].T,V=[0,1],origin='lower',extent=[xr[0], xr[-1], yr[0], yr[-1]],alpha=0.6,cmap='Greys')
COG_2D = cloud.COG_2D_tz[0,19]*0.01 + np.array([0.005 + origin_xy[0]*0.01,0.005 + origin_xy[1]*0.01])
plt.plot(COG_2D[0],COG_2D[1],'ro',markersize=8,label='COG 2D')
COM_2D_zwind = cloud.COM_2D_zwind_tz[0,19]*0.01 + np.array([0.005 + origin_xy[0]*0.01,0.005 + origin_xy[1]*0.01])
plt.plot(COM_2D_zwind[0],COM_2D_zwind[1],'gx',markersize=8, label='COM 2D zwind')
COM_2D_lwc = cloud.COM_2D_lwc_tz[0,19]*0.01 + np.array([0.005 + origin_xy[0]*0.01,0.005 + origin_xy[1]*0.01])
plt.plot(COM_2D_lwc[0],COM_2D_lwc[1],'b>',markersize=8, label='COM 2D lwc')
plt.title("Zwind Cross-section Cloud Example, z={}km, t={}s".format(np.round(float(zr[19]),3),tr[0]))
plt.xlim(xr[0], xr[-1])
plt.ylim(yr[0], yr[-1])
plt.legend()
plt.figure()
plt.xlabel("x coordinate(km)")
plt.ylabel("y coordinate(km)")
plt.contour(zwind_data[0,30].T,origin="lower",label='zwind',extent=[xr[0], xr[-1], yr[0], yr[-1]],linewidths=2)
cbar=plt.colorbar()
cbar.set_label('m/s')
plt.contour(lwc_cloud[0,30].T,V=[0,1],origin='lower',extent=[xr[0], xr[-1], yr[0], yr[-1]],alpha=0.6,cmap='Greys')
COG_2D = cloud.COG_2D_tz[0,30]*0.01 + np.array([0.005 + origin_xy[0]*0.01,0.005 + origin_xy[1]*0.01])
plt.plot(COG_2D[0],COG_2D[1],'ro',markersize=8,label='COG 2D')
COM_2D_zwind = cloud.COM_2D_zwind_tz[0,30]*0.01 + np.array([0.005 + origin_xy[0]*0.01,0.005 + origin_xy[1]*0.01])
plt.plot(COM_2D_zwind[0],COM_2D_zwind[1],'gx',markersize=8, label='COM 2D zwind')
COM_2D_lwc = cloud.COM_2D_lwc_tz[0,30]*0.01 + np.array([0.005 + origin_xy[0]*0.01,0.005 + origin_xy[1]*0.01])
plt.plot(COM_2D_lwc[0],COM_2D_lwc[1],'b>',markersize=8, label='COM 2D lwc')
plt.title("Zwind Cross-section Cloud, z={}km, t={}s".format(np.round(float(zr[30]),3),tr[0]))
plt.xlim(xr[0], xr[-1])
plt.ylim(yr[0], yr[-1])
plt.legend()
# Center of masses and Geometry, for each cross-section
plt.figure()
plt.xlabel("z coordinate(km)")
plt.ylabel("y coordinate(km)")
plt.plot(zr,cloud.COG_2D_tz[0,:,1]*0.01 + 0.005 + origin_xy[1]*0.01,label='COG 2D',linewidth=3)
plt.plot(zr,cloud.COM_2D_lwc_tz[0,:,1]*0.01 + 0.005 + origin_xy[1]*0.01, label='COM 2D lwc',linewidth=3)
plt.plot(zr,cloud.COM_2D_zwind_tz[0,:,1]*0.01 + 0.005 + origin_xy[1]*0.01, label='COM 2D zwind',linewidth=3)
plt.legend()
plt.title('Center of masses and geometry Cloud, t = {}s'.format(tr[0]))
plt.figure()
plt.xlabel("z coordinate(km)")
plt.ylabel("x coordinate(km)")
plt.plot(zr,cloud.COG_2D_tz[0,:,0]*0.01 + 0.005 + origin_xy[1]*0.01,label='COG 2D',linewidth=3)
plt.plot(zr,cloud.COM_2D_lwc_tz[0,:,0]*0.01 + 0.005 + origin_xy[1]*0.01, label='COM 2D lwc',linewidth=3)
plt.plot(zr,cloud.COM_2D_zwind_tz[0,:,0]*0.01 + 0.005 + origin_xy[1]*0.01, label='COM 2D zwind',linewidth=3)
plt.legend()
plt.title('Center of masses and geometry Cloud, t = {}s'.format(tr[0]))
plt.figure()
plt.xlabel("z coordinate(km)")
plt.ylabel("Surface(100$m^2$)")
plt.plot(zr,cloud.area_cs_tz[0],linewidth=3)
plt.title('Surface Area of Cloud, t={}s'.format(tr[0]))
plt.figure()
plt.xlabel("time(s)")
plt.ylabel("Volume(1000 $m^3$)")
plt.plot(tr,cloud.volumen_t,linewidth=3)
plt.title('Volume of Cloud')
####### Visualizing max vertical wind as a function of z
zwind_maxz = np.ndarray((0,1))
for z in range(int(cloud.zmin_t[0]),int(cloud.zmax_t[0])+1):
zwind_max = np.max(zwind_data[0,z][lwc_cloud[0,z]>0])
zwind_maxz = np.vstack((zwind_maxz,zwind_max))
####### Visualizing mean vertical wind as a function of z
zwind_meanz = np.ndarray((0,1))
for z in range(int(cloud.zmin_t[0]),int(cloud.zmax_t[0])+1):
zwind_mean = np.mean(zwind_data[0,z][lwc_cloud[0,z]>0])
zwind_meanz = np.vstack((zwind_meanz,zwind_mean))
plt.figure()
plt.xlabel("z coordinate(km)")
plt.ylabel("Max zwind(m/s)")
plt.plot(zr[4:],zwind_maxz,linewidth=3)
plt.title('Max Zwind per z cross-section Cloud,t={}s'.format(tr[0]))
plt.figure()
plt.xlabel("z coordinate(km)")
plt.ylabel("Mean Zwind (m/s)")
plt.plot(zr[4:],zwind_meanz,linewidth=3)
plt.title('Mean Zwind per z cross-section Cloud,t={}s'.format(tr[0]))
################# Variance behaviour of vertical wind in dependence of z
zwind_varz = np.ndarray((0,1))
for z in range(int(cloud.zmin_t[0]),int(cloud.zmax_t[0])+1):
zwind_var = zwind_data[0,z][lwc_cloud[0,z]>0].var()
zwind_varz = np.vstack((zwind_varz,zwind_var))
plt.figure()
plt.xlabel("z coordinate(km)")
plt.ylabel("Variance Zwind")
plt.plot(zr[4:],zwind_varz,linewidth=3)
plt.title('Mean Zwind per z cross-section Cloud,t={}s'.format(tr[0]))
##########################################
############# Variogram Analysis #########
##########################################
##############################################################
##### creating moving bounding box that follows center #######
##############################################################
xbound_max = int(np.max(cloud.xsize_t))
ybound_max = int( | np.max(cloud.ysize_t) | numpy.max |
from __future__ import print_function
import string
import sys
import os
from collections import deque
import pandas as pd
import numpy as np
import matplotlib.pyplot as plt
plt.switch_backend('Agg')
import tensorflow as tf
import keras
keras.backend.image_data_format()
from keras import backend as K
from keras import regularizers
from keras.layers import Input, Dense, Reshape, Lambda, Conv1D, Flatten, MaxPooling1D, UpSampling1D, GlobalMaxPooling1D
from keras.layers import LSTM, Bidirectional, BatchNormalization, Dropout, Concatenate, Embedding, Activation, Dot, dot
from keras.models import Model, clone_model, Sequential
from keras.optimizers import Adam
from keras.callbacks import EarlyStopping,ModelCheckpoint
from keras.constraints import unitnorm
from keras_layer_normalization import LayerNormalization
tf.keras.backend.set_floatx('float32')
import sklearn as sk
from sklearn.base import BaseEstimator, _pprint
from sklearn.utils import check_array, check_random_state
from sklearn.utils.validation import check_is_fitted
from sklearn.preprocessing import StandardScaler
from sklearn.manifold import LocallyLinearEmbedding, MDS, Isomap, TSNE
from sklearn.decomposition import PCA, IncrementalPCA, KernelPCA, SparsePCA, TruncatedSVD, FastICA, NMF, MiniBatchDictionaryLearning
from sklearn.random_projection import GaussianRandomProjection, SparseRandomProjection
from sklearn.linear_model import LinearRegression, LogisticRegression
from sklearn.ensemble import RandomForestRegressor
from sklearn.model_selection import KFold, GroupKFold, train_test_split
from sklearn.metrics import mean_squared_error, explained_variance_score, mean_absolute_error, median_absolute_error, r2_score
from sklearn.metrics import average_precision_score, precision_score, recall_score, f1_score, roc_auc_score, matthews_corrcoef
from sklearn.metrics import roc_curve, precision_recall_curve, RocCurveDisplay, PrecisionRecallDisplay
from sklearn.metrics import roc_auc_score,accuracy_score,matthews_corrcoef
from scipy import stats
from scipy.stats import multivariate_normal, kurtosis, skew, pearsonr, spearmanr
import processSeq
from processSeq import load_seq_1, kmer_dict, load_signal_1, load_seq_2, load_seq_2_kmer, load_seq_altfeature
import xgboost
import pickle
import os.path
from optparse import OptionParser
import time
from timeit import default_timer as timer
import utility_1
from utility_1 import mapping_Idx
import h5py
import json
# generate sequences
# idx_sel_list: chrom, serial
# seq_list: relative positions
def generate_sequences(idx_sel_list, gap_tol=5, region_list=[]):
chrom = idx_sel_list[:,0]
chrom_vec = np.unique(chrom)
chrom_vec = np.sort(chrom_vec)
seq_list = []
print(len(chrom),chrom_vec)
for chrom_id in chrom_vec:
b1 = np.where(chrom==chrom_id)[0]
t_serial = idx_sel_list[b1,1]
prev_serial = t_serial[0:-1]
next_serial = t_serial[1:]
distance = next_serial-prev_serial
b2 = np.where(distance>gap_tol)[0]
if len(b2)>0:
if len(region_list)>0:
# print('region_list',region_list,len(b2))
b_1 = np.where(region_list[:,0]==chrom_id)[0]
# print(b2)
t_serial = idx_sel_list[b2,1]
if len(b_1)>0:
# b2 = np.setdiff1d(b2,region_list[b_1,1])
# print(region_list,region_list[b_1,1],len(b2))
t_id1 = utility_1.mapping_Idx(t_serial,region_list[b_1,1])
t_id1 = t_id1[t_id1>=0]
t_id2 = b2[t_id1]
b2 = np.setdiff1d(b2,t_id2)
# print(len(b2))
# print(idx_sel_list[b2])
# return
# print('gap',len(b2))
if len(b2)>0:
t_seq = list(np.vstack((b2[0:-1]+1,b2[1:])).T)
t_seq.insert(0,np.asarray([0,b2[0]]))
t_seq.append(np.asarray([b2[-1]+1,len(b1)-1]))
else:
t_seq = [np.asarray([0,len(b1)-1])]
# print(t_seq)
# print(chrom_id,len(t_seq),max(distance))
seq_list.extend(b1[np.asarray(t_seq)])
return np.asarray(seq_list)
# select sample
def sample_select2a1(x_mtx, y, idx_sel_list, seq_list, tol=5, L=5):
num_sample = len(idx_sel_list)
num1 = len(seq_list)
size1 = 2*L+1
print(num_sample,num1,size1)
feature_dim = x_mtx.shape[1]
vec1_local = np.zeros((num_sample,size1),dtype=int)
vec1_serial = np.zeros((num_sample,size1),dtype=int)
feature_mtx = np.zeros((num_sample,size1,feature_dim),dtype=np.float32)
signal_mtx = np.zeros((num_sample,size1))
ref_serial = idx_sel_list[:,1]
id_vec = np.zeros(num_sample,dtype=np.int8)
for i in range(0,num1):
s1, s2 = seq_list[i][0], seq_list[i][1]+1
serial = ref_serial[s1:s2]
id_vec[s1:s2] = 1
# print('start stop',s1,s2,serial)
num2 = len(serial)
t1 = np.outer(list(range(s1,s2)),np.ones(size1))
t2 = t1 + np.outer(np.ones(num2),list(range(-L,L+1)))
t2[t2<s1] = s1
t2[t2>=s2] = s2-1
idx = np.int64(t2)
# print(idx)
vec1_local[s1:s2] = idx
vec1_serial[s1:s2] = ref_serial[idx]
feature_mtx[s1:s2] = x_mtx[idx]
signal_mtx[s1:s2] = y[idx]
# if i%10000==0:
# print(i,num2,vec1_local[s1],vec1_serial[s1])
id1 = np.where(id_vec>0)[0]
num2 = len(id1)
if num2<num_sample:
feature_mtx, signal_mtx = feature_mtx[id1], signal_mtx[id1]
# vec1_serial, vec1_local = vec1_serial[id1], vec1_local[id1]
vec1_serial = vec1_serial[id1]
id_1 = -np.ones(sample_num,dtype=np.int64)
id_1[id1] = np.arange(num2)
vec1_local = id_1[vec1_local]
b1 = np.where(vec1_local<0)[0]
if len(b1)>0:
print('error!',b1)
return -1
# signal_mtx = signal_mtx[:,np.newaxis]
signal_mtx = np.expand_dims(signal_mtx, axis=-1)
# signal_mtx = np.expand_dims(signal_ntx, axis=-1)
return feature_mtx, signal_mtx, vec1_serial, vec1_local
def score_2a(y, y_predicted):
score1 = mean_squared_error(y, y_predicted)
score2 = pearsonr(y, y_predicted)
score3 = explained_variance_score(y, y_predicted)
score4 = mean_absolute_error(y, y_predicted)
score5 = median_absolute_error(y, y_predicted)
score6 = r2_score(y, y_predicted)
score7, pvalue = spearmanr(y,y_predicted)
# vec1 = [score1, score2[0], score2[1], score3, score4, score5, score6]
vec1 = [score1, score2[0], score2[1], score3, score4, score5, score6, score7, pvalue]
return vec1
def read_phyloP(species_name):
path1 = './'
filename1 = '%s/estimate_rt/estimate_rt_%s.txt'%(path1,species_name)
# filename2a = 'test_seq_%s.1.txt'%(species_name)
file1 = pd.read_csv(filename1,sep='\t')
col1, col2, col3 = '%s.chrom'%(species_name), '%s.start'%(species_name), '%s.stop'%(species_name)
chrom_ori, start_ori, stop_ori, serial_ori = np.asarray(file1[col1]), np.asarray(file1[col2]), np.asarray(file1[col3]), np.asarray(file1['serial'])
num_sample = len(chrom_ori)
chrom_vec = np.unique(chrom_ori)
chrom_vec = ['chr22']
for chrom_id in chrom_vec:
filename1 = '%s/phyloP/hg19.phyloP100way.%s.bedGraph'%(path1,chrom_id)
data1 = pd.read_csv(filename1,header=None,sep='\t')
chrom, start, stop, score = data1[0], data1[1], data1[2], data1[3]
len1 = stop-start
b = np.where(chrom_ori==chrom_id)[0]
num_sample1 = len(b)
vec1 = np.zeros((num_sample1,16))
print(chrom_id,len(chrom),len(b))
cnt = 0
b1 = [-1]
for i in b:
t1 = b1[-1]+1
b1 = np.where((start[t1:]>=start_ori[i])&(stop[t1:]<stop_ori[i]))[0]+t1
if len(b1)==0:
b1 = [-1]
continue
t_len1, t_score = np.asarray(len1[b1]), np.asarray(score[b1])
s1 = 0
s2 = np.sum(t_len1)
i1 = cnt
for j in range(0,12):
temp1 = (j-8)*2.5
b2 = np.where((t_score<temp1+2.5)&(t_score>=temp1))[0]
print(b2)
vec1[i1,j] = np.sum(t_len1[b2])*1.0/s2
s1 = s1+temp1*vec1[i1,j]
vec1[i1,12] = s1 # average
vec1[i1,13] = np.median(t_score)
vec1[i1,14] = np.max(t_score)
vec1[i1,15] = np.min(t_score)
cnt += 1
if cnt%1000==0:
print(cnt,len(b1),s2,vec1[i1,12:16])
break
# dict1 = dict()
# dict1['vec'], dict1['index'] = vec1,b
# np.save('phyloP_%s'%(chrom_id),dict1,allow_pickle=True)
fields = ['index']
for j in range(0,12):
temp1 = (j-8)*2.5
fields.append('%s-%s'%(temp1,temp1+2.5))
fields.extend(range(0,4))
data1 = pd.DataFrame(data = np.hstack((b[:,np.newaxis],vec1)),columns=fields)
data1.to_csv('phyloP_%s.txt'%(chrom_id),sep='\t',index=False)
return vec1
def read_phyloP_1(ref_filename,header,file_path,chrom_vec,n_level=15,offset=10,magnitude=2):
file1 = pd.read_csv(ref_filename,header=header,sep='\t')
# col1, col2, col3 = '%s.chrom'%(species_name), '%s.start'%(species_name), '%s.stop'%(species_name)
colnames = list(file1)
col1, col2, col3, col4 = colnames[0], colnames[1], colnames[2], colnames[3]
chrom_ori, start_ori, stop_ori, serial_ori = np.asarray(file1[col1]), np.asarray(file1[col2]), np.asarray(file1[col3]), np.asarray(file1[col4])
num_sample = len(chrom_ori)
# chrom_vec = np.unique(chrom_ori)
# chrom_vec = [chrom_id]
# n_level, offset, magnitude = 15, 10, 2
score_max = (n_level-offset)*magnitude
for chrom_id in chrom_vec:
# filename1 = '%s/hg19.phyloP100way.%s.bedGraph'%(file_path,chrom_id)
filename1 = '%s/chr%s.phyloP100way.bedGraph'%(file_path,chrom_id)
data1 = pd.read_csv(filename1,header=None,sep='\t')
chrom, start, stop, score = data1[0], data1[1], data1[2], data1[3]
len1 = stop-start
chrom_id1 = 'chr%s'%(chrom_id)
b = np.where(chrom_ori==chrom_id1)[0]
num_sample1 = len(b)
vec1 = np.zeros((num_sample1,n_level+4))
print(chrom_id,len(chrom),len(b))
cnt = 0
m_idx = len(start)-1
start_idx = 0
print("number of regions", len(b))
for i in b:
t_start, t_stop = start_ori[i], stop_ori[i] # position of zero region
position = [t_start,t_stop]
if start_idx<=m_idx:
b1, start_idx = utility_1.search_region_include(position, start, stop, m_idx, start_idx)
# print(count,t_start,t_stop,t_stop-t_start,start_idx,len(id3))
if len(b1)==0:
continue
t_len1, t_score = np.asarray(len1[b1]), np.asarray(score[b1])
t_score[t_score>score_max] = score_max-1e-04
s1 = 0
s2 = np.sum(t_len1)
for j in range(0,n_level):
temp1 = (j-offset)*magnitude
b2 = np.where((t_score<temp1+magnitude)&(t_score>=temp1))[0]
# print(b2)
vec1[cnt,j] = np.sum(t_len1[b2])*1.0/s2
s1 = s1+temp1*vec1[cnt,j]
vec1[cnt,n_level:n_level+4] = [s1,np.median(t_score),np.max(t_score),np.min(t_score)]
cnt += 1
pre_b1 = b1
if cnt%1000==0:
print(chrom_id,cnt,len(b1),s2,vec1[cnt,-4:])
# break
# dict1 = dict()
# dict1['vec'], dict1['index'] = vec1,b
# np.save('phyloP_%s'%(chrom_id),dict1,allow_pickle=True)
fields = ['index']
for j in range(0,n_level):
temp1 = (j-offset)*magnitude
fields.append('%s-%s'%(temp1,temp1+magnitude))
fields.extend(range(0,4))
idx = serial_ori[b]
data1 = pd.DataFrame(data = np.hstack((idx[:,np.newaxis],vec1)),columns=fields)
data1.to_csv('phyloP_%s.txt'%(chrom_id),sep='\t',index=False)
return vec1
def read_motif_1(filename,output_filename=-1):
data1 = pd.read_csv(filename,sep='\t')
colnames = list(data1)
col1, col2, col3 = colnames[0], colnames[1], colnames[2]
chrom, start, stop = np.asarray(data1[col1]), np.asarray(data1[col2]), np.asarray(data1[col3])
region_len = stop-start
m1, m2, median_len = np.max(region_len), np.min(region_len), np.median(region_len)
b1 = np.where(region_len!=median_len)[0]
print(m1,m2,median_len,len(b1))
bin_size = median_len
motif_name = colnames[3:]
mtx1 = np.asarray(data1.loc[:,motif_name])
mtx1 = mtx1*1000.0/np.outer(region_len,np.ones(mtx1.shape[1]))
print('motif',len(motif_name))
print(mtx1.shape)
print(np.max(mtx1),np.min(mtx1),np.median(mtx1))
if output_filename!=-1:
fields = colnames
data1 = pd.DataFrame(columns=fields)
data1[colnames[0]], data1[colnames[1]], data1[colnames[2]] = chrom, start, stop
num1 = len(fields)-3
for i in range(0,num1):
data1[colnames[i+3]] = mtx1[:,i]
data1.to_csv(output_filename,header=True,index=False,sep='\t')
print(output_filename, data1.shape)
return mtx1, chrom, start, stop, colnames
def read_gc_1(ref_filename,header,filename,output_filename):
sel_idx = []
file1 = pd.read_csv(ref_filename,header=header,sep='\t')
f_list = load_seq_altfeature(filename,sel_idx)
# col1, col2, col3 = '%s.chrom'%(species_name), '%s.start'%(species_name), '%s.stop'%(species_name)
colnames = list(file1)
col1, col2, col3, col4 = colnames[0], colnames[1], colnames[2], colnames[3]
chrom_ori, start_ori, stop_ori, serial_ori = np.asarray(file1[col1]), np.asarray(file1[col2]), np.asarray(file1[col3]), np.asarray(file1[col4])
num_sample = len(chrom_ori)
if num_sample!=f_list.shape[0]:
print('error!',num_sample,f_list.shape[0])
fields = ['chrom','start','stop','serial','GC','GC_N','GC_skew']
file2 = pd.DataFrame(columns=fields)
file2['chrom'], file2['start'], file2['stop'], file2['serial'] = chrom_ori, start_ori, stop_ori, serial_ori
for i in range(0,3):
file2[fields[i+4]] = f_list[:,i]
file2.to_csv(output_filename,index=False,sep='\t')
return f_list
def generate_serial(filename1,chrom,start,stop):
# chrom_vec = np.sort(np.unique(chrom))
# print(chrom_vec)
chrom_vec = []
for i in range(1,23):
chrom_vec.append('chr%d'%(i))
chrom_vec += ['chrX']
chrom_vec += ['chrY']
print(chrom_vec)
# print(chrom)
print(len(chrom))
data1 = pd.read_csv(filename1,header=None,sep='\t')
ref_chrom, chrom_size = np.asarray(data1[0]), np.asarray(data1[1])
serial_start = 0
serial_vec = np.zeros(len(chrom))
bin_size = stop[1]-start[1]
print(bin_size)
for chrom_id in chrom_vec:
b1 = np.where(ref_chrom==chrom_id)[0]
t_size = chrom_size[b1[0]]
b2 = np.where(chrom==chrom_id)[0]
if len(b1)>0:
size1 = int(np.ceil(t_size*1.0/bin_size))
serial = np.int64(start[b2]/bin_size)+serial_start
serial_vec[b2] = serial
print(chrom_id,b2,len(serial),serial_start,size1)
serial_start = serial_start+size1
else:
print("error!")
return
return np.int64(serial_vec)
def generate_serial_local(filename1,chrom,start,stop,chrom_num):
# chrom_vec = np.sort(np.unique(chrom))
# print(chrom_vec)
chrom_vec = []
for i in range(1,chrom_num+1):
chrom_vec.append('chr%d'%(i))
chrom_vec += ['chrX']
chrom_vec += ['chrY']
chrom_vec += ['chrM']
print(chrom_vec)
print(chrom)
print(len(chrom))
t_chrom = np.unique(chrom)
data1 = pd.read_csv(filename1,header=None,sep='\t')
ref_chrom, chrom_size = np.asarray(data1[0]), np.asarray(data1[1])
# serial_start = np.zeros(len(chrom))
serial_start = 0
serial_start_1 = dict()
serial_vec = np.zeros(len(chrom))
bin_size = stop[1]-start[1]
print(bin_size)
for chrom_id in chrom_vec:
b1 = np.where(ref_chrom==chrom_id)[0]
t_size = chrom_size[b1[0]]
serial_start_1[chrom_id] = serial_start
size1 = int(np.ceil(t_size*1.0/bin_size))
serial_start = serial_start+size1
for chrom_id in t_chrom:
b2 = np.where(chrom==chrom_id)
serial = np.int64(start[b2]/bin_size)+serial_start_1[chrom_id]
serial_vec[b2] = serial
return np.int64(serial_vec)
def generate_serial_start(filename1,chrom,start,stop,chrom_num=19):
# chrom_vec = np.sort(np.unique(chrom))
# print(chrom_vec)
chrom_vec = []
for i in range(1,chrom_num+1):
chrom_vec.append('chr%d'%(i))
chrom_vec += ['chrX']
chrom_vec += ['chrY']
print(chrom_vec)
print(chrom)
print(len(chrom))
data1 = pd.read_csv(filename1,header=None,sep='\t')
ref_chrom, chrom_size = np.asarray(data1[0]), np.asarray(data1[1])
serial_start = 0
serial_vec = -np.ones(len(chrom))
bin_size = stop[1]-start[1]
print(bin_size)
start_vec = dict()
for chrom_id in chrom_vec:
start_vec[chrom_id] = serial_start
b1 = np.where(ref_chrom==chrom_id)[0]
t_size = chrom_size[b1[0]]
b2 = np.where(chrom==chrom_id)[0]
if len(b1)>0:
size1 = int(np.ceil(t_size*1.0/bin_size))
serial = np.int64(start[b2]/bin_size)+serial_start
serial_vec[b2] = serial
print(chrom_id,b2,len(serial),serial_start,size1)
serial_start = serial_start+size1
else:
print("error!")
return
return np.int64(serial_vec), start_vec
def shuffle_array(vec):
num1 = len(vec)
idx = np.random.permutation(num1)
vec = vec[idx]
return vec, idx
# input: estimated attention, type_id: training, validation, or test data
# output: ranking of attention
def select_region1_sub(filename,type_id):
data1 = pd.read_csv(filename,sep='\t')
colnames = list(data1)
# chrom start stop serial signal predicted_signal predicted_attention
chrom, start, serial = data1['chrom'], data1['start'], data1['serial']
chrom, start, serial = np.asarray(chrom), np.asarray(start), np.asarray(serial)
predicted_attention = data1['predicted_attention']
predicted_attention = np.asarray(predicted_attention)
ranking = stats.rankdata(predicted_attention,'average')/len(predicted_attention)
rank1 = np.zeros((len(predicted_attention),2))
rank1[:,0] = ranking
chrom_vec = np.unique(chrom)
for t_chrom in chrom_vec:
b1 = np.where(chrom==t_chrom)[0]
t_attention = predicted_attention[b1]
t_ranking = stats.rankdata(t_attention,'average')/len(t_attention)
rank1[b1,1] = t_ranking
data1['Q1'] = rank1[:,0] # rank across all the included chromosomes
data1['Q2'] = rank1[:,1] # rank by each chromosome
data1['typeId'] = np.int8(type_id*np.ones(len(rank1)))
return data1,chrom_vec
# merge estimated attention from different training/test splits
# type_id1: chromosome order; type_id2: training: 0, test: 1, valid: 2
def select_region1_merge(filename_list,output_filename,type_id1=0,type_id2=1):
list1 = []
chrom_numList = []
# b1 = np.where((self.chrom!='chrX')&(self.chrom!='chrY'))[0]
# ref_chrom, ref_start, ref_serial = self.chrom[b1], self.start[b1], self.serial[b1]
# num_sameple = len(ref_chrom)
i = 0
serial1 = []
num1 = len(filename_list)
vec1 = list(range(num1))
if type_id1==1:
vec1 = list(range(num1-1,-1,-1))
for i in vec1:
filename1 = filename_list[i]
# data1: chrom, start, stop, serial, signal, predicted_signal, predicted_attention, Q1, Q2, typeId
# typeId: training: 0, test: 1, valid: 2
data1, chrom_vec = select_region1_sub(filename1,type_id2)
print(filename1,len(data1))
# list1.append(data1)
# if i==0:
# serial1 = np.asarray(data1['serial'])
t_serial = np.asarray(data1['serial'],dtype=np.int64)
t_serial2 = np.setdiff1d(t_serial,serial1)
serial1 = np.union1d(serial1,t_serial)
id1 = mapping_Idx(t_serial,t_serial2)
colnames = list(data1)
data1 = data1.loc[id1,colnames]
list1.append(data1)
chrom_numList.append(chrom_vec)
data2 = pd.concat(list1, axis=0, join='outer', ignore_index=True,
keys=None, levels=None, names=None, verify_integrity=False, copy=True)
print('sort')
data2 = data2.sort_values(by=['serial'])
data2.to_csv(output_filename,index=False,sep='\t')
return data2, chrom_numList
class Reader(object):
def __init__(self, ref_filename, feature_idvec = [1,1,1,1]):
# Initializes RepliSeq
self.ref_filename = ref_filename
self.feature_idvec = feature_idvec
def generate_serial(self,filename1,filename2,output_filename,header=None):
data1 = pd.read_csv(filename2, header=header, sep='\t')
colnames = list(data1)
chrom, start, stop = np.asarray(data1[colnames[0]]), np.asarray(data1[colnames[1]]), np.asarray(data1[colnames[2]])
serial_vec, start_vec = generate_serial_start(filename1,chrom,start,stop)
if output_filename!=None:
colnames2 = colnames[0:3]+['serial']+colnames[3:]
data2 = pd.DataFrame(columns=colnames2)
data2['serial'] = serial_vec
for colname1 in colnames:
data2[colname1] = data1[colname1]
flag = False
if header!=None:
flag = True
data2.to_csv(output_filename,header=flag,index=False,sep='\t')
return serial_vec, start_vec
def load_motif(self,filename1,motif_filename,output_filename):
# output_filename = None
# ref_filename = 'hg38.5k.serial.bed'
# motif_filename = 'hg38.motif.count.txt'
# output_filename1 = None
mtx1, chrom, start, stop, colnames = read_motif_1(motif_filename)
serial_vec, start_vec = generate_serial_start(filename1,chrom,start,stop)
if output_filename!=None:
colnames2 = ['chrom','start','stop','serial']
data2 = pd.DataFrame(columns=colnames2)
data2['chrom'], data2['start'], data2['stop'], data2['serial'] = chrom, start, stop, serial_vec
data3 = pd.DataFrame(columns=colnames[3:],data=mtx1)
data1 = pd.concat([data2,data3], axis=1, join='outer', ignore_index=True,
keys=None, levels=None, names=None, verify_integrity=False, copy=True)
data1.to_csv(output_filename,header=True,index=False,sep='\t')
print('data1',data1.shape)
return True
class ConvergenceMonitor(object):
_template = "{iter:>10d} {logprob:>16.4f} {delta:>+16.4f}"
def __init__(self, tol, n_iter, verbose):
self.tol = tol
self.n_iter = n_iter
self.verbose = verbose
self.history = deque(maxlen=2)
self.iter = 0
def __repr__(self):
class_name = self.__class__.__name__
params = dict(vars(self), history=list(self.history))
return "{0}({1})".format(
class_name, _pprint(params, offset=len(class_name)))
def report(self, logprob):
if self.verbose:
delta = logprob - self.history[-1] if self.history else np.nan
message = self._template.format(
iter=self.iter + 1, logprob=logprob, delta=delta)
print(message, file=sys.stderr)
self.history.append(logprob)
self.iter += 1
@property
def converged(self):
return (self.iter == self.n_iter or
(len(self.history) == 2 and
self.history[1] - self.history[0] < self.tol))
class _Base1(BaseEstimator):
def __init__(self, file_path, species_id, resolution, run_id, generate,
chromvec,test_chromvec,
featureid,type_id,cell,method,ftype,ftrans,tlist,
flanking,normalize,
config,
attention=1,feature_dim_motif=1,
kmer_size=[6,5]):
# Initializes RepliSeq
self.run_id = run_id
self.cell = cell
self.generate = generate
self.train_chromvec = chromvec
self.chromosome = chromvec[0]
print('train_chromvec',train_chromvec)
print('test_chromvec',test_chromvec)
self.test_chromvec = test_chromvec
self.config = config
self.n_epochs = config['n_epochs']
self.species_id = species_id
self.type_id = type_id
self.cell_type = cell
self.cell_type1 = config['celltype_id']
self.method = method
self.ftype = ftype
self.ftrans = ftrans[0]
self.ftrans1 = ftrans[1]
self.t_list = tlist
self.flanking = flanking
self.flanking1 = 3
self.normalize = normalize
self.batch_size = config['batch_size']
# config = dict(output_dim=hidden_unit,fc1_output_dim=fc1,fc2_output_dim=fc2,units1=units1[0],
# units2=units1[1],n_epochs=n_epochs,batch_size=batch_size)
# config['feature_dim_vec'] = units1[2:]
self.tol = config['tol']
self.attention = attention
self.attention_vec = [12,17,22,32,51,52,58,60]
self.attention_vec1 = [1]
self.lr = config['lr']
self.step = config['step']
self.feature_type = -1
self.kmer_size = kmer_size
self.activation = config['activation']
self.min_delta = config['min_delta']
self.chromvec_sel = chromvec
self.feature_dim_transform = config['feature_dim_transform']
feature_idvec = [1,1,1,1]
# ref_filename = 'hg38_5k_serial.bed'
if 'ref_filename' in config:
ref_filename = config['ref_filename']
else:
ref_filename = 'hg38_5k_serial.bed'
self.reader = Reader(ref_filename, feature_idvec)
self.predict_type_id = 0
self.method = method
self.train = self.config['train_mode']
self.path = file_path
self.model_path = '%s/test_%d.h5'%(self.path,run_id)
self.pos_code = config['pos_code']
self.feature_dim_select1 = config['feature_dim_select']
self.method_vec = [[11,31],[22,32,52,17,51,58,60],[56,62]]
self.resolution = resolution
# if self.species_id=='mm10':
# self.cell_type1 = config['cell_type1']
if 'cell_type1' in self.config:
self.cell_type1 = config['cell_type1']
if ('load_type' in self.config) and (self.config['load_type']==1):
self.load_type = 1
else:
self.load_type = 0
if (method>10) and not(method in [56]) :
self.predict_context = 1
else:
self.predict_context = 0
if ftype[0]==-5:
self.feature_idx1= -5 # full dimensions
elif ftype[0]==-6:
self.feature_idx1 = -6 # frequency dimensions
else:
self.feature_idx1 = ftype
if 'est_attention_type1' in self.config:
self.est_attention_type1 = self.config['est_attention_type1']
else:
self.est_attention_type1 = 1
if 'est_attention_sel1' in self.config:
self.est_attention_sel1 = self.config['est_attention_sel1']
else:
self.est_attention_sel1 = 0
# self.feature_idx = [0,2]
self.feature_idx = featureid
self.x, self.y = dict(), dict() # feature matrix and signals
self.vec = dict() # serial
self.vec_local = dict()
if self.species_id.find('hg')>=0:
self.chrom_num = 22
elif self.species_id.find('mm')>=0:
self.chrom_num = 19
else:
self.chrom_num = -1
self.region_list_test, self.region_list_train, self.region_list_valid = [],[],[]
if 'region_list_test' in config:
self.region_list_test = config['region_list_test']
if 'region_list_train' in config:
self.region_list_train = config['region_list_train']
if 'region_list_valid' in config:
self.region_list_valid = config['region_list_valid']
flag = False
if 'scale' in config:
flag = True
self.scale = config['scale']
else:
self.scale = [0,1]
if ('activation_basic' in config) and (config['activation_basic']=='tanh'):
if (flag==True) and (self.scale[0]>=0):
flag = False
if flag==False:
self.scale = [-1,1]
self.region_boundary = []
self.serial_vec = []
self.f_mtx = []
print('scale',self.scale)
print(self.test_chromvec)
filename1 = '%s_chr%s-chr%s_chr%s-chr%s'%(self.cell_type, self.train_chromvec[0], self.train_chromvec[-1], self.test_chromvec[0], self.test_chromvec[-1])
self.filename_load = filename1
print(self.filename_load,self.method,self.predict_context,self.attention)
self.set_generate(generate,filename1)
def load_ref_serial(self, ref_filename, header=None):
if header==None:
file1 = pd.read_csv(ref_filename,header=header,sep='\t')
else:
file1 = pd.read_csv(ref_filename,sep='\t')
colnames = list(file1)
# col1, col2, col3 = '%s.chrom'%(species_name), '%s.start'%(species_name), '%s.stop'%(species_name)
col1, col2, col3, col_serial = colnames[0], colnames[1], colnames[2], colnames[3]
self.chrom_ori, self.start_ori, self.stop_ori, self.serial_ori = np.asarray(file1[col1]), np.asarray(file1[col2]), np.asarray(file1[col3]), np.asarray(file1[col_serial])
print('load ref serial', self.serial_ori.shape)
return self.serial_ori
# load local serial and signal
def load_local_serial(self, filename1, header=None, region_list=[], type_id2=1, signal_normalize=1,region_list_1=[]):
if header==None:
file2 = pd.read_csv(filename1,header=header,sep='\t')
else:
file2 = pd.read_csv(filename1,sep='\t')
colnames = list(file2)
col1, col2, col3, col_serial = colnames[0], colnames[1], colnames[2], colnames[3]
# sort the table by serial
file2 = file2.sort_values(by=[col_serial])
self.chrom, self.start, self.stop, self.serial = np.asarray(file2[col1]), np.asarray(file2[col2]), np.asarray(file2[col3]), np.asarray(file2[col_serial])
b = np.where((self.chrom!='chrX')&(self.chrom!='chrY')&(self.chrom!='chrM'))[0]
self.chrom, self.start, self.stop, self.serial = self.chrom[b], self.start[b], self.stop[b], self.serial[b]
if self.chrom_num>0:
chrom_num = self.chrom_num
else:
chrom_num = len(np.unique(self.chrom))
chrom_vec = [str(i) for i in range(1,chrom_num+1)]
print('chrom_vec', chrom_vec)
self.bin_size = self.stop[1]-self.start[1]
scale = self.scale
if len(colnames)>=5:
col_signal = colnames[4]
self.signal = np.asarray(file2[col_signal])
self.signal = self.signal[b]
self.signal_pre = self.signal.copy()
if signal_normalize==1:
if self.run_id>10:
# self.signal = signal_normalize(self.signal,[0,1]) # normalize signals
self.signal_pre1, id1, signal_vec1 = self.signal_normalize_chrom(self.chrom,self.signal,chrom_vec,scale)
if not('train_signal_update' in self.config) or (self.config['train_signal_update']==1):
train_signal, id2, signal_vec2 = self.signal_normalize_chrom(self.chrom,self.signal,self.train_chromvec,scale)
id_1 = mapping_Idx(id1,id2)
self.signal = self.signal_pre1.copy()
self.signal[id_1] = train_signal
else:
self.signal = self.signal_pre1.copy()
else:
print('signal_normalize_bychrom')
self.signal, id1, signal_vec = self.signal_normalize_bychrom(self.chrom,self.signal,chrom_vec,scale)
else:
self.signal = np.ones(len(b))
# print(self.signal.shape)
print('load local serial', self.serial.shape, self.signal.shape, np.max(self.signal), np.min(self.signal))
if 'tol_region_search' in self.config:
tol = self.config['tol_region_search']
else:
tol = 2
# only train or predict on some regions
print('load_local_serial',len(self.chrom))
if len(region_list_1)>0:
num1 = len(region_list_1)
list1 = []
for i in range(num1):
t_region = region_list_1[i]
t_chrom, t_start, t_stop = 'chr%d'%(t_region[0]), t_region[1], t_region[2]
t_id1 = np.where((self.chrom==t_chrom)&(self.start<t_stop)&(self.stop>t_start))[0]
list1.extend(t_id1)
b1 = np.asarray(list1)
self.chrom, self.start, self.stop, self.serial = self.chrom[b1], self.start[b1], self.stop[b1], self.serial[b1]
print('load_local_serial',num1,len(self.chrom))
print(region_list_1)
if len(region_list)>0:
# print('load_local_serial',region_list)
# id1, region_list = self.region_search_1(chrom,start,stop,serial,region_list)
id1, region_list = self.region_search_1(self.chrom,self.start,self.stop,self.serial,region_list,type_id2,tol)
self.chrom, self.start, self.stop, self.serial, self.signal = self.chrom[id1], self.start[id1], self.stop[id1], self.serial[id1], self.signal[id1]
id2 = self.region_search_boundary(self.chrom,self.start,self.stop,self.serial,region_list)
# print('region_search_boundary', id2[:,0], self.start[id2[:,1:3]],self.stop[id2[:,1:3]])
self.region_boundary = id2
# print(self.serial[id2[:,1:3]])
print('region_boundary',id2)
# return
else:
print('load_local_serial',region_list)
# assert len(region_list)>0
# return
return self.serial, self.signal
# training, validation and test data index
def prep_training_test(self,train_sel_list_ori):
train_id1, test_id1, y_signal_train1, y_signal_test, train1_sel_list, test_sel_list = self.generate_train_test_1(train_sel_list_ori)
self.idx_list = {'test':test_id1}
self.y_signal = {'test':y_signal_test}
if len(y_signal_test)>0:
print('y_signal_test',np.max(y_signal_test),np.min(y_signal_test))
if len(y_signal_train1)>0:
print('y_signal_train',np.max(y_signal_train1),np.min(y_signal_train1))
self.idx_list.update({'train':[],'valid':[]})
else:
return
# y_signal_test_ori = signal_normalize(y_signal_test,[0,1])
# shuffle array
# x_test_trans, shuffle_id2 = shuffle_array(x_test_trans)
# test_sel_list = test_sel_list[shuffle_id2]
# x_train1_trans, shuffle_id1 = shuffle_array(x_train1_trans)
# train_sel_list = train_sel_list[shuffle_id1]
print(train1_sel_list[0:5])
# split training and validation data
if 'ratio1' in self.config:
ratio = self.config['ratio1']
else:
ratio = 0.95
if 'type_id1' in self.config:
type_id_1 = self.config['type_id1']
else:
type_id_1 = 0
idx_train, idx_valid, idx_test = self.generate_index_1(train1_sel_list, test_sel_list, ratio, type_id_1)
print('idx_train,idx_valid,idx_test', len(idx_train), len(idx_valid), len(idx_test))
if (len(self.region_list_train)>0) or (len(self.region_list_valid)>0):
idx_train, idx_valid = self.generate_train_test_2(train1_sel_list,idx_train,idx_valid)
print('idx_train,idx_valid', len(idx_train), len(idx_valid))
train_sel_list, val_sel_list = train1_sel_list[idx_train], train1_sel_list[idx_valid]
self.idx_list.update({'train':train_id1[idx_train],'valid':train_id1[idx_valid]})
self.idx_train_val = {'train':idx_train,'valid':idx_valid}
self.y_signal.update({'train':y_signal_train1[idx_train],'valid':y_signal_train1[idx_valid]})
return train_sel_list, val_sel_list, test_sel_list
# prepare data from predefined features: kmer frequency feature and motif feature
def prep_data_sub2(self,path1,file_prefix,type_id2,feature_dim1,feature_dim2,flag_1):
species_id = self.species_id
celltype_id = self.cell_type1
if species_id=='mm10':
kmer_dim_ori, motif_dim_ori = 100, 50
filename1 = '%s/%s_%d_%d_%d.npy'%(path1,file_prefix,type_id2,kmer_dim_ori,motif_dim_ori)
# filename2 = 'test_%s_genome%d_kmer7.h5'%(species_id,celltype_id)
filename2 = '%s_%d_kmer7_0_200_trans.h5'%(species_id,celltype_id)
else:
kmer_dim_ori, motif_dim_ori = 50, 50
filename1 = '%s/%s_%d_%d_%d.npy'%(path1,file_prefix,type_id2,kmer_dim_ori,motif_dim_ori)
# filename2 = 'test_%s_kmer7.h5'%(species_id)
filename2 = '%s_kmer7_0_200_trans.h5'%(species_id)
kmer_size1, kmer_size2, kmer_size3 = 5,6,7
x_train1_trans, train_sel_list_ori = [], []
flag1, flag2 = 0, 0
flag3 = True
# if kmer_size2 in self.kmer_size:
if flag3==True:
if os.path.exists(filename1)==True:
print("loading data...")
data1 = np.load(filename1,allow_pickle=True)
data_1 = data1[()]
x_train1_trans_ori, train_sel_list_ori = np.asarray(data_1['x1']), np.asarray(data_1['idx'])
print('train_sel_list',train_sel_list_ori.shape)
print('x_train1_trans',x_train1_trans_ori.shape)
if kmer_size2 in self.kmer_size:
flag1 = 1
serial1 = train_sel_list_ori[:,1]
dim1 = x_train1_trans_ori.shape[1]
if (self.feature_dim_motif==0) or (flag_1==True):
x_train1_trans = x_train1_trans_ori[:,0:-motif_dim_ori]
else:
# d1 = np.min((dim1-motif_dim_ori+feature_dim2,d1))
# d2 = dim1-motif_dim_ori
# sel_id1 = list(range(21))+list(range(21,21+feature_dim1))
# x_train1_trans_1 = x_train1_trans[:,sel_id1]
# x_train1_trans_2 = x_train1_trans[:,d2:d1]
x_train1_trans_1 = x_train1_trans_ori[:,0:dim1-motif_dim_ori]
x_train1_trans_2 = x_train1_trans_ori[:,dim1-motif_dim_ori:]
else:
print('data not found!')
print(filename1)
return x_train1_trans, trans_sel_list_ori
if kmer_size3 in self.kmer_size:
with h5py.File(filename2,'r') as fid:
serial2 = fid["serial"][:]
feature_mtx = fid["vec"][:]
# feature_mtx = feature_mtx[:,0:kmer_dim_ori]
print(serial2)
print(len(serial2),feature_mtx.shape)
flag2 = 1
if flag1==1:
if flag2==1:
t_serial = np.intersect1d(serial1,serial2)
id1 = mapping_Idx(serial1,t_serial)
id2 = mapping_Idx(serial2,t_serial)
if 'feature_dim_transform_1' in self.config:
sel_idx = self.config['feature_dim_transform_1']
sel_id1, sel_id2 = list(0,21)+list(range(sel_idx[0])), range(sel_idx[1])
else:
sel_id1 = list(0,21)+list(range(10))
sel_id2 = range(feature_dim1-sel_idx1)
if (self.feature_dim_motif==0) or (flag_1==True):
x_train1_trans = np.hstack((x_train1_trans[id1,sel_id1],feature_mtx[id2,sel_id2]))
else:
x_train1_trans = np.hstack((x_train1_trans_1[id1,sel_id1],feature_mtx[id2,sel_id2],x_train1_trans_2[id1,0:feature_dim2]))
train_sel_list_ori = train_sel_list_ori[id1]
else:
pass
elif flag2==1:
t_serial = np.intersect1d(serial1,serial2)
id1 = mapping_Idx(serial1,t_serial)
id2 = mapping_Idx(serial2,t_serial)
x_train1_trans = np.hstack((x_train1_trans_ori[id1,0:2],feature_mtx[id2,0:feature_dim1]))
train_sel_list_ori = train_sel_list_ori[id1]
self.feature_dim_select1 = -1
if (self.feature_dim_motif==1) and (flag_1==False):
x_train1_trans = np.hstack((x_train1_trans,x_train1_trans_2[id1,0:feature_dim2]))
# id1 = mapping_Idx(self.serial_ori,serial2)
# b1 = (id1>=0)
# id1 = id1[b1]
# serial2, feature_mtx = serial2[b1], feature_mtx[b1]
# chrom1 = self.chrom_ori[id1]
# chrom2 = np.zeros(len(serial2),dtype=np.int32)
# chrom_vec = np.unique(chrom1)
# for chrom_id in chrom_vec:
# b2 = np.where(chrom1==chrom_id)[0]
# chrom_id1 = int(chrom_id[3:])
# chrom2[b2] = chrom_id1
# x_train1_trans = feature_mtx[:,0:feature_dim1]
# trans_sel_list_ori = np.vstack((chrom2,serial2)).T
else:
print('data not found!')
return x_train1_trans, train_sel_list_ori
# prepare data from predefined features
def prep_data_sub1(self,path1,file_prefix,type_id2,feature_dim_transform,load_type=0):
self.feature_dim_transform = feature_dim_transform
# map_idx = mapping_Idx(serial_ori,serial)
sub_sample_ratio = 1
shuffle = 0
normalize, flanking, attention, run_id = self.normalize, self.flanking, self.attention, self.run_id
config = self.config
vec2 = dict()
tol = self.tol
L = flanking
# np.save(filename1)
print("feature transform")
# filename1 = '%s/%s_%d_%d_%d.npy'%(path1,file_prefix,type_id2,feature_dim_transform[0],feature_dim_transform[1])
print(self.species_id)
t_featuredim1, t_featuredim2 = feature_dim_transform[0], feature_dim_transform[1]
flag1 = False
if self.species_id=='hg38':
if 'motif_trans_typeid' in self.config:
flag1 = True
if (self.species_id=='mm10'):
flag1 = True
if (t_featuredim1>0) or (flag1==False):
x_train1_trans, train_sel_list_ori = self.prep_data_sub2(path1,file_prefix,type_id2,t_featuredim1,t_featuredim2,flag1)
if len(x_train1_trans)==0:
print('data not found!')
return -1
if t_featuredim2>0:
print('train_sel_list',train_sel_list_ori.shape)
print('x_train1_trans',x_train1_trans.shape)
if (self.feature_dim_motif>=1) and (flag1==True):
if self.species_id=='mm10':
annot1 = '%s_%d_motif'%(self.species_id,self.cell_type1)
else:
annot1 = '%s_motif'%(self.species_id)
motif_trans_typeid = self.config['motif_trans_typeid']
motif_featuredim = self.config['motif_featuredim']
motif_filename = '%s_%d_%d_trans.h5'%(annot1,motif_trans_typeid,motif_featuredim)
if motif_featuredim<t_featuredim2:
print('error! %d %d',motif_featuredim,t_featuredim2)
t_featuredim2 = motif_featuredim
with h5py.File(motif_filename,'r') as fid:
serial_1 = fid["serial"][:]
motif_data = fid["vec"][:]
print(len(serial_1),motif_data.shape)
serial1 = train_sel_list_ori[:,1]
serial2 = serial_1
t_serial = np.intersect1d(serial1,serial2)
id1 = mapping_Idx(serial1,t_serial)
id2 = mapping_Idx(serial2,t_serial)
x_train1_trans = np.hstack((x_train1_trans[id1],motif_data[id2,0:t_featuredim2]))
train_sel_list_ori = train_sel_list_ori[id1]
# train_sel_list_ori2 = serial_1[id2]
else:
print("data not found!")
return
x_train1_trans = self.feature_dim_select(x_train1_trans,feature_dim_transform)
# feature loaded not specific to cell type
if load_type==1:
return x_train1_trans, train_sel_list_ori
list1 = ['motif_feature','feature2']
for t_feature in list1:
if (t_feature in self.config) and (self.config[t_feature]==1):
if t_feature=='feature2':
pre_config = self.config['pre_config']
if self.chrom_num>0:
chrom_num = self.chrom_num
else:
chrom_num = len(np.unique(self.chrom))
chrom_vec = list(range(1,chrom_num+1))
feature_mtx2, serial_2 = self.prep_data_sequence_3(pre_config,chrom_vec)
else:
x = 1
x_train1_trans_ori1 = x_train1_trans.copy()
train_sel_list_ori1 = train_sel_list_ori.copy()
serial1 = train_sel_list_ori[:,1]
serial2 = serial_2[:,1]
t_serial = np.intersect1d(serial1,serial2)
id1 = mapping_Idx(serial1,t_serial)[0]
id2 = mapping_Idx(serial2,t_serial)[0]
x_train1_trans = np.hstack((x_train1_trans[id1],feature_mtx2[id2]))
train_sel_list_ori = train_sel_list_ori[id1]
train_sel_list_ori2 = serial_2[id2]
b1 = np.where(train_sel_list_ori[:,0]!=train_sel_list_ori2[:,0])[0]
if len(b1)>0:
print('error! train_sel_list_ori',len(b1))
if ('centromere' in self.config) and (self.config['centromere']==1):
regionlist_filename = 'hg38.centromere.bed'
serial1 = train_sel_list_ori[:,1]
serial_list1, centromere_serial = self.select_region(serial1, regionlist_filename)
id1 = mapping_Idx(serial1,serial_list1)
id1 = id1[id1>=0]
x_train1_trans = x_train1_trans[id1]
train_sel_list_ori = train_sel_list_ori[id1]
print(x_train1_trans.shape,train_sel_list_ori.shape)
print('positional encoding', self.pos_code)
print('feature dim',x_train1_trans.shape)
self.feature_dim = x_train1_trans.shape[1]
start = time.time()
if self.pos_code ==1:
x_train1_trans = self.positional_encoding1(x_train1_trans,train_sel_list_ori,self.feature_dim)
print(x_train1_trans.shape)
stop = time.time()
print('positional encoding', stop-start)
## shuffle array
if ('shuffle' in self.config) and (self.config['shuffle']==1):
x_train1_trans, shuffle_id1 = shuffle_array(x_train1_trans)
print('array shuffled')
# np.random.shuffle(x_tran1_trans)
# train_sel_list = train_sel_list[shuffle_id1]
elif ('noise' in self.config) and (self.config['noise']>0):
if self.config['noise']==1:
x_train1_trans = np.zeros_like(x_train1_trans)
print('x_train1_trans, noise 1', x_train1_trans[0:5])
elif self.config['noise']==2:
x_train1_trans = np.random.uniform(0,1,x_train1_trans.shape)
else:
x_train1_trans = np.random.normal(0,1,x_train1_trans.shape)
else:
pass
if 'sub_sample_ratio' in self.config:
sub_sample_ratio = self.config['sub_sample_ratio']
num_sample = len(train_sel_list_ori)
sub_sample = int(num_sample*sub_sample_ratio)
train_sel_list_ori = train_sel_list_ori[0:sub_sample]
x_train1_trans = x_train1_trans[0:sub_sample]
# align train_sel_list_ori and serial
print(train_sel_list_ori.shape,len(self.serial))
id1 = mapping_Idx(train_sel_list_ori[:,1],self.serial)
id2 = (id1>=0)
print('mapping',len(self.serial),np.sum(id2),len(self.serial),len(id2))
# self.chrom, self.start, self.stop, self.serial, self.signal = self.chrom[id2], self.start[id2], self.stop[id2], self.serial[id2], self.signal[id2]
self.local_serial_1(id2)
id1 = id1[id2]
train_sel_list_ori = train_sel_list_ori[id1]
x_train1_trans = x_train1_trans[id1]
self.x_train1_trans = x_train1_trans
self.train_sel_list = train_sel_list_ori
return x_train1_trans, train_sel_list_ori
def output_generate_sequences(self,idx_sel_list,seq_list):
num1 = len(seq_list)
t_serial1 = idx_sel_list[:,1]
seq_list = np.asarray(seq_list)
t_serial = t_serial1[seq_list]
id1 = mapping_Idx(self.serial,t_serial[:,0])
chrom1, start1, stop1 = self.chrom[id1], self.start[id1], self.stop[id1]
id2 = mapping_Idx(self.serial,t_serial[:,1])
chrom2, start2, stop2 = self.chrom[id2], self.start[id2], self.stop[id2]
fields = ['chrom','start','stop','serial1','serial2']
data1 = pd.DataFrame(columns=fields)
data1['chrom'], data1['start'], data1['stop'] = chrom1, start1, stop2
data1['serial1'], data1['serial2'] = t_serial[:,0], t_serial[:,1]
data1['region_len'] = t_serial[:,1]-t_serial[:,0]+1
output_filename = 'test_seqList_%d_%d.txt'%(idx_sel_list[0][0],idx_sel_list[0][1])
data1.to_csv(output_filename,index=False,sep='\t')
return True
# prepare data from predefined features
def prep_data(self,path1,file_prefix,type_id2,feature_dim_transform):
x_train1_trans, train_sel_list_ori = self.prep_data_sub1(path1,file_prefix,type_id2,feature_dim_transform)
train_sel_list, val_sel_list, test_sel_list = self.prep_training_test(train_sel_list_ori)
# keys = ['train','valid','test']
keys = ['train','valid']
# self.idx_sel_list = {'train':train1_sel_list,'valid':val_sel_list,'test':test_sel_list}
idx_sel_list = {'train':train_sel_list,'valid':val_sel_list,'test':test_sel_list}
# self.idx_sel_list = idx_sel_list
# seq_list_train, seq_list_valid: both locally calculated
self.seq_list = dict()
start = time.time()
for i in keys:
self.seq_list[i] = generate_sequences(idx_sel_list[i],region_list=self.region_boundary)
print(len(self.seq_list[i]))
self.output_generate_sequences(idx_sel_list[i],self.seq_list[i])
stop = time.time()
print('generate_sequences', stop-start)
# generate initial state index
self.init_id = dict()
self.init_index(keys)
# training and validation data
# x_train1_trans = self.x_train1_trans
for i in keys:
idx = self.idx_list[i]
if self.method<5 or self.method in [56]:
self.x[i] = x_train1_trans[idx]
self.y[i] = self.y_signal[i]
print(self.x[i].shape, self.y[i].shape)
else:
idx_sel_list = self.train_sel_list[idx]
start = time.time()
x, y, self.vec[i], self.vec_local[i] = sample_select2a1(x_train1_trans[idx],self.y_signal[i],
idx_sel_list, self.seq_list[i], self.tol, self.flanking)
stop = time.time()
print('sample_select2a1',stop-start)
# concate context for baseline methods
if self.method<=10:
# x_train, x_valid, y_train, y_valid = train_test_split(x_train1, y_train1, test_size=0.2, random_state=42)
x = x.reshape(x.shape[0],x.shape[1]*x.shape[-1])
y = y[:,self.flanking]
self.x[i], self.y[i] = x, y
print(self.x[i].shape, self.y[i].shape)
return True
# prepare data from predefined features
def prep_data_1(self,path1,file_prefix,type_id2,feature_dim_transform,
n_fold=5, ratio=0.9, type_id=1):
x_train1_trans, train_sel_list_ori = self.prep_data_sub1(path1,file_prefix,type_id2,feature_dim_transform)
print(train_sel_list_ori)
id1 = mapping_Idx(train_sel_list_ori[:,1],self.serial)
id2 = (id1>=0)
print('mapping',len(self.serial),np.sum(id2))
self.chrom, self.start, self.stop, self.serial, self.signal = self.chrom[id2], self.start[id2], self.stop[id2], self.serial[id2], self.signal[id2]
id1 = id1[id2]
train_sel_list_ori = train_sel_list_ori[id1]
self.x_train1_trans = self.x_train1_trans[id1]
print(train_sel_list_ori.shape,self.x_train1_trans.shape)
id_vec = self.generate_index_2(train_sel_list_ori, n_fold=n_fold, ratio=ratio, type_id=type_id)
return id_vec
def find_serial_ori_1_local(self,chrom_vec,type_id2=1):
# filename1 = 'mm10_%d_%s_encoded1.h5'%(self.config['cell_type1'],chrom_id1)
self.species_id = 'mm10'
self.cell_type1 = self.config['cell_type1']
file_path1 = '/work/magroup/yy3/data1/replication_timing3/mouse'
# filename1 = '%s/mm10_5k_seq_genome%d_1.txt'%(file_path1,self.config['cell_type1'])
chrom_id1 = 'chr1'
filename1 = '%s_%d_%s_encoded1.h5'%(self.species_id,self.cell_type1,chrom_id1)
list1, list2 = [], []
serial_vec = []
print(filename1)
if os.path.exists(filename1)==False:
# prepare data from predefined features
# one hot encoded feature vectors for each chromosome
self.prep_data_sequence_ori()
print('prep_data_sequence_ori',filename1)
for chrom_id in chrom_vec:
# if chrom_id<22:
# continue
chrom_id1 = 'chr%s'%(chrom_id)
# if self.config['species_id']==0:
# filename2 = 'mm10_%d_%s_encoded1.h5'%(self.config['cell_type1'],chrom_id1)
# else:
# filename2 = '%s_%s_encoded1.h5'%(self.species_id,chrom_id1)
filename2 = '%s_%d_%s_encoded1.h5'%(self.species_id,self.cell_type1,chrom_id1)
with h5py.File(filename2,'r') as fid:
serial1 = fid["serial"][:]
if type_id2==1:
seq1 = fid["vec"][:]
list2.extend(seq1)
list1.extend([chrom_id]*len(serial1))
serial_vec.extend(serial1)
print(chrom_id,len(serial1))
list1, serial_vec = np.asarray(list1), np.asarray(serial_vec)
serial_vec = np.hstack((list1[:,np.newaxis],serial_vec))
f_mtx = np.asarray(list2)
# data_1 = pd.read_csv(filename1,sep='\t')
# colnames = list(data_1)
# local_serial = np.asarray(data_1['serial'])
# local_seq = np.asarray(data_1['seq'])
# print('local_seq', local_seq.shape)
# serial_vec = local_serial
# f_mtx = local_seq
# filename2 = '%s/mm10_5k_serial.bed'%(file_path1)
# file2 = pd.read_csv(filename2,header=None,sep='\t')
# ref_chrom, ref_start, ref_stop, ref_serial = np.asarray(file2[0]), np.asarray(file2[1]), np.asarray(file2[2]), np.asarray(file2[3])
# # assert list(local_serial==list(ref_serial))
# id_vec1 = []
# for chrom_id in chrom_vec:
# # if chrom_id<22:
# # continue
# # chrom_id1 = 'chr%s'%(chrom_id)
# id1 = np.where(ref_chrom=='chr%d'%(chrom_id))[0]
# id_vec1.extend(id1)
# print(chrom_id,len(id1))
# id_vec1 = np.asarray(id_vec1)
# ref_chrom_1, ref_serial_1 = ref_chrom[id_vec1], ref_serial[id_vec1]
# print('ref chrom local', len(ref_chrom_1), len(ref_serial_1))
# id1 = utility_1.mapping_Idx(ref_serial_1,local_serial)
# id2 = np.where(id1>=0)[0]
# id1 = id1[id2]
# # assert len(id2)==len(id1)
# chrom1 = ref_chrom_1[id1]
# local_chrom = [int(chrom1[3:]) for chrom1 in ref_chrom_1]
# local_chrom = np.asarray(local_chrom)
# local_serial, local_seq = local_serial[id2], local_seq[id2]
# serial_vec = np.column_stack((local_chrom,local_serial))
# f_mtx = np.asarray(local_seq)
return serial_vec, f_mtx
# find serial and feature vectors
# input: type_id1: load sequence feature or kmer frequency feature, motif feature
# type_id2: load serial or feature vectors
def find_serial_ori_1(self,file_path,file_prefix,chrom_vec,type_id1=0,type_id2=0,select_config={}):
# load the sequences
if type_id1==0:
# list2 = np.zeros((interval,region_unit_size,4),dtype=np.int8)
filename1 = '%s_serial_2.txt'%(self.species_id)
list1, list2 = [], []
serial_vec = []
if (os.path.exists(filename1)==False) or (type_id2==1):
if self.config['species_id']==0:
serial_vec, list2 = self.find_serial_ori_1_local(chrom_vec)
else:
for chrom_id in chrom_vec:
# if chrom_id<22:
# continue
chrom_id1 = 'chr%s'%(chrom_id)
filename2 = '%s_%s_encoded1.h5'%(self.species_id,chrom_id1)
with h5py.File(filename2,'r') as fid:
serial1 = fid["serial"][:]
if type_id2==1:
seq1 = fid["vec"][:]
list2.extend(seq1)
list1.extend([chrom_id]*len(serial1))
serial_vec.extend(serial1)
print(chrom_id,len(serial1))
list1, serial_vec = np.asarray(list1), np.asarray(serial_vec)
serial_vec = np.hstack((list1[:,np.newaxis],serial_vec))
np.savetxt(filename1,serial_vec,fmt='%d',delimiter='\t')
else:
serial_vec = np.loadtxt(filename1,dtype=np.int64)
if serial_vec.shape[-1]>2:
cnt1 = serial_vec[:,-1]
b1 = np.where(cnt1>0)[0]
ratio1 = len(b1)/len(serial_vec)
print('sequence with N', len(b1),len(serial_vec),ratio1)
# serial_vec = serial_vec[:,0]
f_mtx = np.asarray(list2)
elif type_id1==2:
filename1 = select_config['input_filename1']
layer_name = select_config['layer_name']
with h5py.File(filename1,'r') as fid:
f_mtx = np.asarray(fid[layer_name][:],dtype=np.float32)
print(f_mtx.shape)
serial_vec = fid["serial"][:]
assert len(serial_vec )==f_mtx.shape[0]
print(serial_vec[0:5])
else:
# load kmer frequency features and motif features
load_type_id2 = 0
x_train1_trans, train_sel_list_ori = self.prep_data_sub1(file_path,file_prefix,load_type_id2,self.feature_dim_transform,load_type=1)
# serial_vec = train_sel_list_ori[:,1]
serial_vec = np.asarray(train_sel_list_ori)
f_mtx = np.asarray(x_train1_trans)
return serial_vec, f_mtx
def find_serial_ori(self,file_path,file_prefix,type_id1=0,type_id2=0,select_config={}):
chrom_vec = np.unique(self.chrom)
chrom_vec1 = []
for chrom_id in chrom_vec:
try:
id1 = chrom_id.find('chr')
if id1>=0:
chrom_id1 = int(chrom_id[3:])
chrom_vec1.append(chrom_id1)
except:
continue
chrom_vec1 = np.sort(chrom_vec1)
serial_vec, f_mtx = self.find_serial_ori_1(file_path,file_prefix,chrom_vec1,
type_id1=type_id1,type_id2=type_id2,
select_config=select_config)
self.serial_vec = serial_vec
self.f_mtx = f_mtx
# list2 = np.zeros((interval,region_unit_size,4),dtype=np.int8)
print(len(self.chrom),len(self.serial))
# cnt1 = serial_vec[:,1]
# b1 = np.where(cnt1>0)[0]
# ratio1 = len(b1)/len(serial_vec)
# print(len(b1),len(serial_vec),ratio1)
id1 = mapping_Idx(serial_vec[:,1],self.serial)
b1 = np.where(id1>=0)[0]
self.local_serial_1(b1,type_id=0)
print(len(self.chrom),len(self.serial))
return True
def prep_data_2(self,file_path,file_prefix,seq_len_thresh=50):
self.find_serial_ori(file_path,file_prefix)
chrom_vec = np.unique(self.chrom)
chrom_vec1 = []
for chrom_id in chrom_vec:
try:
id1 = chrom_id.find('chr')
if id1>=0:
chrom_id1 = int(chrom_id[3:])
chrom_vec1.append(chrom_id1)
except:
continue
chrom_vec1 = np.sort(chrom_vec1)
sample_num = len(self.chrom)
idx_sel_list = -np.ones((sample_num,2),dtype=np.int64)
for chrom_id in chrom_vec1:
chrom_id1 = 'chr%d'%(chrom_id)
b1 = np.where(self.chrom==chrom_id1)[0]
idx_sel_list[b1,0] = [chrom_id]*len(b1)
idx_sel_list[b1,1] = self.serial[b1]
id1 = idx_sel_list[:,0]>=0
idx_sel_list = idx_sel_list[id1]
sample_num = len(id1)
y = self.signal[id1]
x_mtx = idx_sel_list[id1]
seq_list = generate_sequences(idx_sel_list, gap_tol=5, region_list=[])
seq_len = seq_list[:,1]-seq_list[:,0]+1
thresh1 = seq_len_thresh
b1 = np.where(seq_len>thresh1)[0]
print(len(seq_list),len(b1))
seq_list = seq_list[b1]
seq_len1 = seq_list[:,1]-seq_list[:,0]+1
print(sample_num,np.sum(seq_len1),seq_list.shape,np.max(seq_len),np.min(seq_len),np.median(seq_len),np.max(seq_len1),np.min(seq_len1),np.median(seq_len1))
self.output_generate_sequences(idx_sel_list,seq_list)
t_mtx, signal_mtx, vec1_serial, vec1_local = sample_select2a1(x_mtx, y, idx_sel_list, seq_list, tol=self.tol, L=self.flanking)
t_serial = vec1_serial[:,self.flanking]
context_size = vec1_serial.shape[1]
id1 = mapping_Idx(idx_sel_list[:,1],t_serial)
b1 = np.where(id1>=0)[0]
if len(b1)!=len(vec1_serial):
print('error!',len(b1),len(vec1_serial))
return -1
sel_id1 = id1[b1]
# idx_sel_list1 = idx_sel_list[sel_id1]
# label1 = y[sel_id1]
t_chrom = idx_sel_list[sel_id1,0]
print(t_chrom,t_serial)
print(t_chrom.shape,t_serial.shape)
print(vec1_serial.shape)
list_ID = []
cnt1 = 0
interval = 200
list1, list2 = [],[]
list3 = []
# region_unit_size = 5000
# list2 = np.zeros((interval,region_unit_size,4),dtype=np.int8)
for chrom_id in chrom_vec1:
# if chrom_id<22:
# continue
chrom_id1 = 'chr%s'%(chrom_id)
filename1 = '%s_%s_encoded1.h5'%(self.species_id,chrom_id1)
t_id1 = np.where(t_chrom==chrom_id)[0]
t_serial1 = t_serial[t_id1] # serial by chromosome
sample_num1 = len(t_serial1)
num_segment = np.int(np.ceil(sample_num1/interval))
print(chrom_id1,num_segment,interval,sample_num1)
with h5py.File(filename1,'r') as fid:
serial1 = fid["serial"][:]
seq1 = fid["vec"][:]
serial1 = serial1[:,0]
print(serial1.shape, seq1.shape)
id1 = utility_1.mapping_Idx(serial1,t_serial1)
id2 = np.where(id1>=0)[0]
num1 = len(id2)
segment_id = 0
t_signal_mtx = signal_mtx[t_id1[id2]]
list3.extend(t_signal_mtx)
for i in range(num1):
cnt2 = i+1
t_id2 = id2[i]
label_serial = t_serial1[t_id2]
t_vec1_serial = vec1_serial[t_id1[t_id2]]
id_1 = mapping_Idx(serial1,t_vec1_serial)
b1 = np.where(id_1>=0)[0]
if len(b1)!=context_size:
b2 = np.where(id_1<0)[0]
print('error!',chrom_id1,label_serial,t_vec1_serial[b2],len(b1),context_size)
np.savetxt('temp1.txt',serial1,fmt='%d',delimiter='\t')
np.savetxt('temp2.txt',t_vec1_serial,fmt='%d',delimiter='\t')
return -1
t_mtx = seq1[id_1[b1]]
list1.append(t_vec1_serial)
list2.append(t_mtx)
local_id = cnt2%interval
label_id = cnt1
output_filename = 'test1_%s_%s_%d.h5'%(self.cell,chrom_id1,segment_id)
if (cnt2%interval==0) or (cnt2==num1):
output_filename1 = '%s/%s'%(file_path,output_filename)
list1 = np.asarray(list1)
list2 = np.asarray(list2,dtype=np.int8)
print(chrom_id1,segment_id,local_id,label_id,label_serial,list1.shape,list2.shape)
# with h5py.File(output_filename1,'w') as fid:
# fid.create_dataset("serial", data=list1, compression="gzip")
# fid.create_dataset("vec", data=list2, compression="gzip")
# dict1 = {'serial':list1.tolist(),'vec':list2.tolist()}
# np.save(output_filename,dict1,allow_pickle=True)
# with open(output_filename, "w") as fid:
# json.dump(dict1,fid)
# with open(output_filename,"w",encoding='utf-8') as fid:
# json.dump(dict1,fid,separators=(',', ':'), sort_keys=True, indent=4)
list1, list2 = [], []
segment_id += 1
cnt1 = cnt1+1
list_ID.append([label_id,label_serial,output_filename,local_id])
# if cnt2%interval==0:
# break
# with open(output_filename, "r") as fid:
# dict1 = json.load(fid)
# serial1, vec1 = np.asarray(dict1['serial']), np.asarray(dict1['vec'])
# print(serial1.shape,vec1.shape)
# with h5py.File(output_filename1,'r') as fid:
# serial1 = fid["serial"][:]
# vec1 = fid["vec"][:]
# print(serial1.shape,vec1.shape)
fields = ['label_id','label_serial','filename','local_id']
list_ID = np.asarray(list_ID)
data1 = pd.DataFrame(columns=fields,data=list_ID)
output_filename = '%s/%s_label_ID_1'%(file_path,self.cell)
data1.to_csv(output_filename+'.txt',index=False,sep='\t')
# np.save(output_filename,data1,allow_pickle=True)
output_filename = '%s/%s_label.h5'%(file_path,self.cell)
list3 = np.asarray(list3)
print(list3.shape)
with h5py.File(output_filename,'w') as fid:
fid.create_dataset("vec", data=np.asarray(list3), compression="gzip")
return list_ID
# find serial for training and validation data
def prep_data_2_sub1(self,file_path,file_prefix,type_id1=0,type_id2=0,gap_tol=5,seq_len_thresh=5,select_config={}):
if type_id1>=0:
self.find_serial_ori(file_path,file_prefix,
type_id1=type_id1,type_id2=type_id2,
select_config=select_config)
chrom_vec = np.unique(self.chrom)
chrom_vec1 = []
for chrom_id in chrom_vec:
try:
id1 = chrom_id.find('chr')
if id1>=0:
chrom_id1 = int(chrom_id[3:])
chrom_vec1.append(chrom_id1)
except:
continue
chrom_vec1 = np.sort(chrom_vec1)
sample_num = len(self.chrom)
idx_sel_list = -np.ones((sample_num,2),dtype=np.int64)
if 'gap_thresh' in self.config:
gap_tol = self.config['gap_thresh']
if 'seq_len_thresh' in self.config:
seq_len_thresh = self.config['seq_len_thresh']
for chrom_id in chrom_vec1:
chrom_id1 = 'chr%d'%(chrom_id)
b1 = np.where(self.chrom==chrom_id1)[0]
idx_sel_list[b1,0] = [chrom_id]*len(b1)
idx_sel_list[b1,1] = self.serial[b1]
id1 = idx_sel_list[:,0]>=0
idx_sel_list = idx_sel_list[id1]
sample_num = len(id1)
y = self.signal[id1]
x_mtx = idx_sel_list[id1]
self.train_sel_list_ori = idx_sel_list
self.y_signal_1 = self.signal[id1]
ref_serial = idx_sel_list[:,1]
# train_sel_list, val_sel_list = train1_sel_list[idx_train], train1_sel_list[idx_valid]
# self.idx_list.update({'train':train_id1[idx_train],'valid':train_id1[idx_valid]})
# self.idx_train_val = {'train':idx_train,'valid':idx_valid}
# self.y_signal.update({'train':y_signal_train1[idx_train],'valid':y_signal_train1[idx_valid]})
train_sel_list, val_sel_list, test_sel_list = self.prep_training_test(idx_sel_list)
print(len(train_sel_list),len(val_sel_list),len(test_sel_list))
keys = ['train','valid','test']
# keys = ['train','valid']
# self.idx_sel_list = {'train':train1_sel_list,'valid':val_sel_list,'test':test_sel_list}
self.idx_sel_list_ori = {'train':train_sel_list,'valid':val_sel_list,'test':test_sel_list}
# self.idx_sel_list = idx_sel_list
# seq_list_train, seq_list_valid: both locally calculated
self.seq_list = dict()
start = time.time()
# seq_len_thresh = 20
self.local_serial_dict = dict()
for i in keys:
# self.seq_list[i] = generate_sequences(idx_sel_list1[i],region_list=self.region_boundary)
# print(len(self.seq_list[i]))
# self.output_generate_sequences(idx_sel_list[i],self.seq_list[i])
idx_sel_list1 = self.idx_sel_list_ori[i]
# region_list_id = 'region_list_%s'%(i)
# if region_list_id in self.config:
# region_list = self.config[region_list_id]
# else:
# region_list = []
# region_list = np.asarray(region_list)
# print(region_list_id,region_list)
# if i=='test':
# region_boundary = self.region_boundary
# else:
# region_boundary = []
region_boundary = self.region_boundary
print('region_boundary',region_boundary)
# assert len(region_boundary)==0
seq_list = generate_sequences(idx_sel_list1, gap_tol=gap_tol, region_list=region_boundary)
# seq_len = seq_list[:,1]-seq_list[:,0]+1
# thresh1 = seq_len_thresh
# b1 = np.where(seq_len>thresh1)[0]
# print(len(seq_list),len(b1))
# seq_list = seq_list[b1]
# seq_len1 = seq_list[:,1]-seq_list[:,0]+1
# print(sample_num,np.sum(seq_len1),len(seq_list),np.max(seq_len),np.min(seq_len),np.median(seq_len),np.max(seq_len1),np.min(seq_len1),np.median(seq_len1))
# reselect the regions according to the subsequence length
# recalculate seq_list
idx_sel_list1, seq_list = self.select_region_local_1(idx_sel_list1,seq_list,
gap_tol=gap_tol,
seq_len_thresh=seq_len_thresh,
region_list=[])
self.idx_sel_list_ori[i] = idx_sel_list1
self.seq_list[i] = seq_list
x1 = idx_sel_list1
sel_id = utility_1.mapping_Idx(ref_serial,idx_sel_list1[:,1])
y1 = self.y_signal_1[sel_id]
x, y, t_vec_serial, t_vec_local = sample_select2a1(x1,y1,
idx_sel_list1, seq_list, self.tol, self.flanking)
t_serial1 = t_vec_serial[:,self.flanking]
# if np.sum(t_serial1!=sel_idx_list1[:,1])>0:
# print('error!',i)
# return
id1 = utility_1.mapping_Idx(idx_sel_list1[:,1],t_serial1)
b1 = np.where(id1>=0)[0]
if len(b1)!=len(t_serial1):
print('error!',i)
return
idx_sel_list1 = idx_sel_list1[id1[b1]]
self.local_serial_dict[i] = [idx_sel_list1,y1,y,t_vec_serial,t_vec_local]
print(i,t_serial1.shape,y.shape)
stop = time.time()
print('generate_sequences', stop-start)
return self.local_serial_dict
# load feature
def load_feature_local(self,chrom_vec,type_id=0,select_config={}):
# load sequences
if type_id==0:
serial_vec = []
list1, list2 = [],[]
# list2 = np.zeros((interval,region_unit_size,4),dtype=np.int8)
if self.config['species_id']==0:
serial_vec, f_mtx = self.find_serial_ori_1_local(chrom_vec)
else:
for chrom_id in chrom_vec:
# if chrom_id<22:
# continue
chrom_id1 = 'chr%s'%(chrom_id)
filename1 = '%s_%s_encoded1.h5'%(self.species_id,chrom_id1)
with h5py.File(filename1,'r') as fid:
serial1 = fid["serial"][:]
seq1 = fid["vec"][:]
serial_vec.extend(serial1)
list1.extend([chrom_id]*len(serial1))
list2.extend(seq1)
print(len(serial1),seq1.shape)
list1 = np.asarray(list1)
serial_vec = np.hstack((list1[:,np.newaxis],serial_vec))
f_mtx = np.asarray(list2)
# kmer frequency and motif feature
elif type_id==1:
if len(self.serial_vec)>0 and (len(self.f_mtx)>0):
serial_vec = self.serial_vec
f_mtx = self.f_mtx
else:
type_id2 = 0
x_train1_trans, train_sel_list_ori = self.prep_data_sub1(self.file_path,self.file_prefix,type_id2,self.feature_dim_transform,load_type=1)
# serial_vec = train_sel_list_ori[:,1]
serial_vec = np.asarray(train_sel_list_ori)
f_mtx = np.asarray(x_train1_trans)
else:
filename1 = select_config['input_filename1']
layer_name = select_config['layer_name']
with h5py.File(filename1,'r') as fid:
f_mtx = np.asarray(fid[layer_name][:],dtype=np.float32)
print(f_mtx.shape)
serial_vec = fid["serial"][:]
assert len(serial_vec )==f_mtx.shape[0]
print(serial_vec[0:5])
return serial_vec, f_mtx
# find serial
def find_serial_local(self,ref_serial,vec_serial_ori,sel_id):
serial_1 = vec_serial_ori[:,self.flanking]
# print(len(ref_serial),ref_serial)
# print(len(serial_1),serial_1)
assert np.max(np.abs(ref_serial-serial_1))==0
t_vec_serial = np.ravel(vec_serial_ori[sel_id])
serial1 = np.unique(t_vec_serial)
id1 = mapping_Idx(ref_serial,serial1)
b1 = np.where(id1<0)[0]
if len(b1)>0:
print('error!',len(b1))
print(serial1[b1])
b_1 = np.where(id1>=0)[0]
id1 = id1[b_1]
sample_num = len(ref_serial)
id2 = np.setdiff1d(np.arange(sample_num),id1)
if len(id2)>0:
t_serial2 = ref_serial[id2]
id_2 = mapping_Idx(serial_1,t_serial2)
sel_id = list(sel_id)+list(id_2)
sel_id = np.unique(sel_id)
print('find serial local',len(sel_id),len(id_2))
return sel_id
# load training and validation data
def prep_data_2_sub2(self,type_id1=0,keys=['train','valid'],stride=1,type_id=0,select_config={}):
chrom1 = []
for i in range(0,len(keys)):
key1 = keys[i]
idx_sel_list, y_ori, y, vec_serial, vec_local = self.local_serial_dict[key1]
chrom1.extend(idx_sel_list[:,0])
chrom_vec1 = np.sort(np.unique(chrom1))
serial_vec, f_mtx = self.load_feature_local(chrom_vec1,type_id=type_id1,select_config=select_config)
print('load feature local', serial_vec.shape, f_mtx.shape)
if serial_vec.shape[1]>2:
cnt1 = serial_vec[:,-1]
b1 = np.where(cnt1>0)[0]
ratio1 = len(b1)/len(serial_vec)
print(len(b1),len(serial_vec),ratio1)
ref_serial = serial_vec[:,1]
for i in range(0,len(keys)):
key1 = keys[i]
idx_sel_list, y_ori, y, vec_serial, vec_local = self.local_serial_dict[key1]
num1 = len(idx_sel_list)
if stride>1:
id1 = list(range(0,num1,stride))
# the windows cover the positions
print(num1,stride)
if type_id==1:
id1 = self.find_serial_local(idx_sel_list[:,1],vec_serial,id1)
y, vec_serial, vec_local = y[id1], vec_serial[id1], vec_local[id1]
self.local_serial_dict[key1] = [idx_sel_list, y_ori, y, vec_serial, vec_local]
id2 = mapping_Idx(ref_serial,idx_sel_list[:,1])
print(key1,len(ref_serial),len(idx_sel_list))
print(ref_serial[0:5])
print(idx_sel_list[0:5,1])
b1 = np.where(id2<0)[0]
if len(b1)>0:
print('error!',len(b1),key1)
# return
print('mapping',len(id2))
# update
b_1 = np.where(id2>=0)[0]
id2 = id2[b_1]
idx_sel_list, y_ori = idx_sel_list[b_1], y_ori[b_1]
y, vec_serial, vec_local = y[b_1], vec_serial[b_1], vec_local[b_1]
self.local_serial_dict[key1] = [idx_sel_list, y_ori, y, vec_serial, vec_local]
self.x[key1] = f_mtx[id2]
self.idx[key1] = id2
return True
# training and predition with sequences
def control_pre_test1_repeat(self,path1,file_prefix,run_id_load=-1):
self.prep_data_2_sub1(path1,file_prefix)
config = self.config.copy()
units1=[50,50,32,25,50,25,0,0]
flanking = 50
context_size = 2*flanking+1
n_step_local_ori = 5000
region_unit_size = 1
feature_dim = 4
local_conv_list1 = []
regularizer2, bnorm, activation = 1e-04, 1, 'relu'
if self.run_id==110001:
config_vec1 = [[64, 15, 5, 1, 2, 2, 0.2, 0],
[32, 5, 1, 1, 10, 10, 0.2, 0],
[32, 3, 1, 1, 5, 5, 0.2, 0]]
for t1 in config_vec1:
n_filters, kernel_size1, stride, dilation_rate1, pool_length1, stride1, drop_out_rate, boundary = t1
conv_1 = [n_filters, kernel_size1, stride, regularizer2, dilation_rate1, boundary, bnorm, activation, pool_length1, stride1, drop_out_rate]
local_conv_list1.append(conv_1)
config['local_conv_list1'] = local_conv_list1
print(local_conv_list1)
feature_dim1, feature_dim2, return_sequences_flag1, sample_local, pooling_local = 32, 25, True, 0, 0
n_step_local1 = 10
feature_dim3 = []
local_vec_1 = [feature_dim1, feature_dim2, feature_dim3, return_sequences_flag1, sample_local, pooling_local]
attention2_local = 0
select2 = 1
concatenate_1, concatenate_2 = 0, 1
hidden_unit = 32
regularizer2_2 = 1e-04
config.update({'attention1':0,'attention2':1,'select2':select2,'context_size':context_size,'n_step_local':n_step_local1,'n_step_local_ori':n_step_local_ori})
config.update({'local_vec_1':local_vec_1,'attention2_local':attention2_local})
config['feature_dim_vec'] = units1[2:]
config['feature_dim_vec_basic'] = units1[2:]
config.update({'local_conv_list1':local_conv_list1,'local_vec_1':local_vec_1})
config.update({'attention1':0,'attention2':1,'context_size':context_size,
'n_step_local_ori':n_step_local_ori})
config.update({'select2':select2,'attention2_local':attention2_local})
config.update({'concatenate_1':concatenate_1,'concatenate_2':concatenate_2})
config.update({'feature_dim':feature_dim,'output_dim':hidden_unit,'regularizer2_2':regularizer2_2})
model = utility_1.get_model2a1_attention_1_2_2_sample5(config)
# find feature vectors with the serial
self.x = dict()
self.idx = dict()
self.prep_data_2_sub2(type_id1=0,keys=['train','valid'],stride=1)
mtx_train = self.x['train']
idx_sel_list_train, y_train_ori_1, y_train_ori, vec_serial_train, vec_local_train = self.local_serial_dict['train']
mtx_valid = self.x['valid']
idx_sel_list_valid, y_valid_ori_1, y_valid_ori, vec_serial_valid, vec_local_valid = self.local_serial_dict['valid']
train_num1, valid_num1 = len(y_train_ori), len(y_valid_ori)
print('train',len(idx_sel_list_train),len(y_train_ori),mtx_train.shape)
print('valid',len(idx_sel_list_valid),len(y_valid_ori),mtx_valid.shape)
x_valid = mtx_valid[vec_local_valid]
y_valid = y_valid_ori
print(x_valid.shape,y_valid.shape)
type_id2 = 2
MODEL_PATH = 'test%d.h5'%(self.run_id)
n_epochs = 1
BATCH_SIZE = 32
n_step_local = n_step_local_ori
earlystop = EarlyStopping(monitor='val_loss', min_delta=self.min_delta, patience=self.step, verbose=1, mode='auto')
checkpointer = ModelCheckpoint(filepath=MODEL_PATH, monitor='val_loss', verbose=1, save_best_only=True, save_weights_only=False)
num_sample1 = 1
interval = 2500
select_num = np.int(np.ceil(train_num1/interval))
# select_num1 = select_num*interval
# print(num_sample1,select_num,interval,select_num1)
if select_num>1:
t1 = np.arange(0,train_num1,interval)
pos = np.vstack((t1,t1+interval)).T
pos[-1][1] = train_num1
print(train_num1,select_num,interval)
print(pos)
else:
pos = [[0,train_num1]]
start2 = time.time()
train_id_1 = np.arange(train_num1)
valid_id_1 = np.arange(valid_num1)
| np.random.shuffle(valid_id_1) | numpy.random.shuffle |
#!/usr/bin/env python3
import argparse
import os
import sys
import re
import math
import warnings
import time
import struct
from collections import defaultdict
import pandas as pd
import numpy as np
import hicstraw
import cooler
from scipy.stats import expon
from scipy.ndimage import gaussian_filter
from scipy.ndimage.filters import maximum_filter
from scipy.signal import convolve2d
import scipy.ndimage.measurements as scipy_measurements
from scipy import sparse
from statsmodels.stats.multitest import multipletests
from multiprocessing import Process, Manager
def parseBP(s):
"""
:param s: string
:return: string converted to number, taking account for kb or mb
"""
if not s:
return False
if s.isnumeric():
return int(s)
s = s.lower()
if "kb" in s:
n = s.split("kb")[0]
if not n.isnumeric():
return False
return int(n) * 1000
elif "mb" in s:
n = s.split("mb")[0]
if not n.isnumeric():
return False
return int(n) * 1000000
return False
def parse_args(args):
parser = argparse.ArgumentParser(description="Check the help flag")
parser.add_argument("-f",
"--file",
dest="f_path",
help="REQUIRED: Contact map",
required=False)
parser.add_argument("-d",
"--distance",
dest="distFilter",
help="REQUIRED: Maximum distance (in bp) allowed between loop loci",
required=False)
parser.add_argument("-o",
"--outfile",
dest="outdir",
help="REQUIRED: Name of the output file.\
Output is a numpy binary.",
required=True)
parser.add_argument("-r",
"--resolution",
dest="resolution",
help="REQUIRED: Resolution used for the contact maps",
required=True)
parser.add_argument("-bed", "--bed", dest="bed",
help="BED file for HiC-Pro type input",
default="",
required=False)
parser.add_argument("-m", "--matrix", dest="mat",
help="MATRIX file for HiC-Pro type input",
default="",
required=False)
parser.add_argument("-b", "--biases", dest="biasfile",
help="RECOMMENDED: biases calculated by\
ICE or KR norm for each locus for contact map are read from BIASFILE",
required=False)
parser.add_argument(
"-cz",
"--chromosomeSize",
default="",
dest="chrSize_file",
help="RECOMMENDED: .hic corressponfing chromosome size file.",
required=False)
parser.add_argument(
"-norm",
"--normalization",
default=False,
dest="norm_method",
help="RECOMMENDED: Hi-C normalization method (KR, VC,...).",
required=False)
# parser.add_argument("-cb",
# '--cooler-balance',
# dest='cooler_balance',
# default=False,
# #action='store_false',
# required=False,
# help="OPTIONAL: The cooler data was normalized prior to creating the .cool file.")
# parser.set_defaults(cooler_balance=False)
parser.add_argument(
"-st",
"--sparsityThreshold",
dest="st",
type=float,
default=0.88,
help="OPTIONAL: Mustache filters out contacts in sparse areas, you can relax this for sparse datasets(i.e. -st 0.8). Default value is 0.88.",
required=False)
parser.add_argument(
"-pt",
"--pThreshold",
dest="pt",
type=float,
default=0.2,
help="OPTIONAL: P-value threshold for the results in the final output. Default is 0.2",
required=False)
parser.add_argument(
"-sz",
"--sigmaZero",
dest="s_z",
type=float,
default=1.6,
help="OPTIONAL: sigma0 value for the method. DEFAULT is 1.6. \
Experimentally chosen for 5Kb resolution",
required=False)
parser.add_argument("-oc", "--octaves", dest="octaves", default=2,
type=int,
help="OPTIONAL: Octave count for the method. \
DEFAULT is 2.",
required=False)
parser.add_argument("-i", "--iterations", dest="s", default=10,
type=int,
help="OPTIONAL: iteration count for the method. \
DEFAULT is 10. Experimentally chosen for \
5Kb resolution",
required=False)
parser.add_argument("-p", "--processes", dest="nprocesses", default=4, type=int,
help="OPTIONAL: Number of parallel processes to run. DEFAULT is 4. Increasing this will also increase the memory usage",
required=False)
# parser.add_argument("-c",
# "--changefile",
# dest="changedir",
# help="...",
# required=False,
# default="")
parser.add_argument(
"-ch",
"--chromosome",
dest="chromosome",
nargs='+',
help="REQUIRED: Specify which chromosome to run the program for. Optional for cooler files.",
default='n',
required=False)
parser.add_argument(
"-ch2",
"--chromosome2",
dest="chromosome2",
nargs='+',
help="Optional: Specify the second chromosome for interchromosomal analysis.",
default='n',
required=False)
parser.add_argument("-v",
"--verbose",
dest="verbose",
type=bool,
default=True,
help="OPTIONAL: Verbosity of the program",
required=False)
return parser.parse_args()
def kth_diag_indices(a, k):
rows, cols = np.diag_indices_from(a)
if k < 0:
return rows[-k:], cols[:k]
elif k > 0:
return rows[:-k], cols[k:]
else:
return rows, cols
def is_chr(s, c):
# if 'X' == c or 'chrX':
# return 'X' in c
# if 'Y' == c:
# return 'Y' in c
return str(c).replace('chr', '') == str(s).replace('chr', '') # re.findall("[1-9][0-9]*", str(s))
def get_sep(f):
"""
:param f: file path
:return: Guesses the value separator in the file.
"""
with open(f) as file:
for line in file:
if "\t" in line:
return '\t'
if " " in line.strip():
return ' '
if "," in line:
return ','
if len(line.split(' ')) == 1:
return ' '
break
raise FileNotFoundError
def read_bias(f, chromosome, res):
"""
:param f: Path to the bias file
:return: Dictionary where keys are the bin coordinates and values are the bias values.
"""
d = defaultdict(lambda: 1.0)
if f:
sep = get_sep(f)
with open(f) as file:
for pos, line in enumerate(file):
line = line.strip().split(sep)
if len(line) == 3:
if is_chr(line[0], chromosome):
val = float(line[2])
if not np.isnan(val):
if val < 0.2:
d[(float(line[1]) // res)] = np.Inf
else:
d[(float(line[1]) // res)] = val
else:
d[(float(line[1]) // res)] = np.Inf
elif len(line) == 1:
val = float(line[0])
if not np.isnan(val):
if val < 0.2:
d[pos] = np.Inf
else:
d[pos] = val
else:
d[pos] = np.Inf
return d
return False
def read_pd(f, distance_in_bp, bias, chromosome, res):
sep = get_sep(f)
df = pd.read_csv(f, sep=sep, header=None)
df.dropna(inplace=True)
if df.shape[1] == 5:
df = df[np.vectorize(is_chr)(df[0], chromosome)]
if df.shape[0] == 0:
print('Could\'t read any interaction for this chromosome!')
return
df = df[np.vectorize(is_chr)(df[2], chromosome)]
df = df.loc[np.abs(df[1] - df[3]) <= ((distance_in_bp / res + 1) * res), :]
df[1] //= res
df[3] //= res
bias = read_bias(bias, chromosome, res)
if bias:
factors = np.vectorize(bias.get)(df[1], 1)
df[4] = np.divide(df[4], factors)
factors = np.vectorize(bias.get)(df[3], 1)
df[4] = np.divide(df[4], factors)
df = df.loc[df[4] > 0, :]
x = np.min(df.loc[:, [1, 3]], axis=1)
y = np.max(df.loc[:, [1, 3]], axis=1)
val = np.array(df[4])
elif df.shape[1] == 3:
df = df.loc[np.abs(df[1] - df[0]) <= ((distance_in_bp / res + 1) * res), :]
df[0] //= res
df[1] //= res
bias = read_bias(bias, chromosome, res)
if bias:
factors = np.vectorize(bias.get)(df[0], 1)
df[2] = np.divide(df[2], factors)
factors = np.vectorize(bias.get)(df[1], 1)
df[2] = np.divide(df[2], factors)
df = df.loc[df[2] > 0, :]
x = np.min(df.loc[:, [0, 1]], axis=1)
y = np.max(df.loc[:, [0, 1]], axis=1)
val = np.array(df[2])
return x, y, val
def read_hic_file(f, norm_method, CHRM_SIZE, distance_in_bp, chr1, chr2, res):
"""
:param f: .hic file path
:param chr: Which chromosome to read the file for
:param res: Resolution to extract information from
:return: Numpy matrix of contact counts
"""
if not CHRM_SIZE:
hic = hicstraw.HiCFile(f)
chromosomes = hic.getChromosomes()
chrSize_in_bp = {}
for i in range(1, len(chromosomes)):
chrSize_in_bp["chr" + str(chromosomes[i].name).replace("chr", '')] = chromosomes[i].length
CHRM_SIZE = chrSize_in_bp["chr" + chr1.replace("chr", '')]
CHUNK_SIZE = max(2 * distance_in_bp / res, 2000)
start = 0
end = min(CHRM_SIZE, CHUNK_SIZE * res) # CHUNK_SIZE*res
result = []
val = []
while start < CHRM_SIZE:
print(int(start), int(end))
if not norm_method:
temp = hicstraw.straw("observed", "KR", f, str(chr1) + ":" + str(int(start)) + ":" + str(int(end)),
str(chr2) + ":" + str(int(start)) + ":" + str(int(end)), "BP", res)
else:
temp = hicstraw.straw("observed", str(norm_method), f,
str(chr1) + ":" + str(int(start)) + ":" + str(int(end)),
str(chr2) + ":" + str(int(start)) + ":" + str(int(end)), "BP", res)
if len(temp) == 0:
start = min(start + CHUNK_SIZE * res - distance_in_bp, CHRM_SIZE)
if end == CHRM_SIZE - 1:
break
else:
end = min(end + CHUNK_SIZE * res - distance_in_bp, CHRM_SIZE - 1)
continue
if result == []:
result += [[int(record.binX), int(record.binY), record.counts] for record in temp]
prev_block = set([(record.binX, record.binY, record.counts) for record in temp])
else:
cur_block = set([(int(record.binX), int(record.binY), record.counts) for record in temp])
to_add_list = list(cur_block - prev_block)
del prev_block
result[0] += [x[0] for x in to_add_list]
result[1] += [x[1] for x in to_add_list]
result[2] += [x[2] for x in to_add_list]
prev_block = cur_block
del cur_block
start = min(start + CHUNK_SIZE * res - distance_in_bp, CHRM_SIZE)
if end == CHRM_SIZE - 1:
break
else:
end = min(end + CHUNK_SIZE * res - distance_in_bp, CHRM_SIZE - 1)
x = np.array(result[0]) // res
y = np.array(result[1]) // res
val = np.array(result[2])
nan_indx = np.logical_or.reduce((np.isnan(result[0]), np.isnan(result[1]), np.isnan(result[2])))
x = x[~nan_indx]
y = y[~nan_indx]
val = val[~nan_indx]
x = x.astype(int)
y = y.astype(int)
if len(val) == 0:
print(f'There is no contact in chrmosome {chr1} to work on.')
return [], [], []
else:
val[np.isnan(val)] = 0
if (chr1 == chr2):
dist_f = np.logical_and(np.abs(x - y) <= distance_in_bp / res, val > 0)
x = x[dist_f]
y = y[dist_f]
val = val[dist_f]
if len(val > 0):
return x, y, val
else:
print(f'There is no contact in chrmosome {chr1} to work on.')
return [], [], []
def read_cooler(f, distance_in_bp, chr1, chr2, cooler_balance):
"""
:param f: .cool file path
:param chr: Which chromosome to read the file for
:return: Numpy matrix of contact counts
"""
clr = cooler.Cooler(f)
res = clr.binsize
print(f'Your cooler data resolution is {res}')
if chr1 not in clr.chromnames or chr2 not in clr.chromnames:
raise NameError('wrong chromosome name!')
CHRM_SIZE = clr.chromsizes[chr1]
CHUNK_SIZE = max(2 * distance_in_bp / res, 2000)
start = 0
end = min(CHUNK_SIZE * res, CHRM_SIZE) # CHUNK_SIZE*res
result = []
val = []
###########################
if chr1 == chr2:
# try:
# normVec = clr.bins()['weight'].fetch(chr1)
# result = clr.matrix(balance=True,sparse=True).fetch(chr1)#as_pixels=True, join=True
while start < CHRM_SIZE:
print(int(start), int(end))
if not cooler_balance:
temp = clr.matrix(balance=True, sparse=True).fetch((chr1, int(start), int(end)))
else:
temp = clr.matrix(balance=cooler_balance, sparse=True).fetch((chr1, int(start), int(end)))
temp = sparse.triu(temp)
np.nan_to_num(temp, copy=False, nan=0, posinf=0, neginf=0)
start_in_px = int(start / res)
if len(temp.row) == 0:
start = min(start + CHUNK_SIZE * res - distance_in_bp, CHRM_SIZE)
if end == CHRM_SIZE - 1:
break
else:
end = min(end + CHUNK_SIZE * res - distance_in_bp, CHRM_SIZE - 1)
continue
if result == []:
result += [list(start_in_px + temp.row), list(start_in_px + temp.col), list(temp.data)]
prev_block = set(
[(x, y, v) for x, y, v in zip(start_in_px + temp.row, start_in_px + temp.col, temp.data)])
else:
cur_block = set(
[(x, y, v) for x, y, v in zip(start_in_px + temp.row, start_in_px + temp.col, temp.data)])
to_add_list = list(cur_block - prev_block)
del prev_block
result[0] += [x[0] for x in to_add_list]
result[1] += [x[1] for x in to_add_list]
result[2] += [x[2] for x in to_add_list]
prev_block = cur_block
del cur_block
start = min(start + CHUNK_SIZE * res - distance_in_bp, CHRM_SIZE)
if end == CHRM_SIZE - 1:
break
else:
end = min(end + CHUNK_SIZE * res - distance_in_bp, CHRM_SIZE - 1)
# except:
# raise NameError('Reading from the file failed!')
if len(result) == 0:
print(f'There is no contact in chrmosome {chr1} to work on.')
return [], [], [], res
x = np.array(result[0])
y = np.array(result[1])
val = np.array(result[2])
else:
result = clr.matrix(balance=True, sparse=True).fetch(chr1, chr2)
result = sparse.triu(result)
np.nan_to_num(result, copy=False, nan=0, posinf=0, neginf=0)
x = result.row
y = result.col
val = result.data
##########################
if len(val) == 0:
print(f'There is no contact in chrmosome {chr1} to work on.')
return [], [], [], res
else:
val[np.isnan(val)] = 0
if (chr1 == chr2):
dist_f = np.logical_and(np.abs(x - y) <= distance_in_bp / res, val > 0)
x = x[dist_f]
y = y[dist_f]
val = val[dist_f]
# return np.array(x),np.array(y),np.array(val), res, normVec
if len(val > 0):
return np.array(x), np.array(y), np.array(val), res
else:
print(f'There is no contact in chrmosome {chr1} to work on.')
return [], [], [], res
def read_mcooler(f, distance_in_bp, chr1, chr2, res, cooler_balance):
"""
:param f: .cool file path
:param chr: Which chromosome to read the file for
:param res: Resolution to extract information from
:return: Numpy matrix of contact counts
"""
uri = '%s::/resolutions/%s' % (f, res)
# uri = '%s::/7' % (f)
clr = cooler.Cooler(uri)
# print(clr.bins()[:100])
if chr1 not in clr.chromnames or chr2 not in clr.chromnames:
raise NameError('wrong chromosome name!')
CHRM_SIZE = clr.chromsizes[chr1]
CHUNK_SIZE = max(2 * distance_in_bp / res, 2000)
start = 0
end = min(CHRM_SIZE, CHUNK_SIZE * res) # CHUNK_SIZE*res
result = []
val = []
if chr1 == chr2:
try:
# result = clr.matrix(balance=True,sparse=True).fetch(chr1)#as_pixels=True, join=True
while start < CHRM_SIZE:
print(int(start), int(end))
if not cooler_balance:
temp = clr.matrix(balance=True, sparse=True).fetch((chr1, int(start), int(end)))
else:
temp = clr.matrix(balance=cooler_balance, sparse=True).fetch((chr1, int(start), int(end)))
temp = sparse.triu(temp)
np.nan_to_num(temp, copy=False, nan=0, posinf=0, neginf=0)
start_in_px = int(start / res)
if len(temp.row) == 0:
start = min(start + CHUNK_SIZE * res - distance_in_bp, CHRM_SIZE)
if end == CHRM_SIZE - 1:
break
else:
end = min(end + CHUNK_SIZE * res - distance_in_bp, CHRM_SIZE - 1)
continue
if result == []:
result += [list(start_in_px + temp.row), list(start_in_px + temp.col), list(temp.data)]
prev_block = set(
[(x, y, v) for x, y, v in zip(start_in_px + temp.row, start_in_px + temp.col, temp.data)])
# print('result==[]')
else:
cur_block = set(
[(x, y, v) for x, y, v in zip(start_in_px + temp.row, start_in_px + temp.col, temp.data)])
to_add_list = list(cur_block - prev_block)
del prev_block
result[0] += [x[0] for x in to_add_list]
result[1] += [x[1] for x in to_add_list]
result[2] += [x[2] for x in to_add_list]
prev_block = cur_block
del cur_block
start = min(start + CHUNK_SIZE * res - distance_in_bp, CHRM_SIZE)
if end == CHRM_SIZE - 1:
break
else:
end = min(end + CHUNK_SIZE * res - distance_in_bp, CHRM_SIZE - 1)
except:
raise NameError('Reading from the file failed!')
if len(result) == 0:
print(f'There is no contact in chrmosome {chr1} to work on.')
return [], [], []
x = np.array(result[0])
y = | np.array(result[1]) | numpy.array |
import numpy as np
def groupby(keys, values, agg):
aggregated = {key: agg(values[keys == key]) for key in | np.unique(keys) | numpy.unique |
"""
Tests of the pipeline for topography containers
"""
import numpy as np
from SurfaceTopography import read_container
def test_scale_dependent_statistical_property_uniform(file_format_examples):
c, = read_container(f'{file_format_examples}/container1.zip')
s = c.scale_dependent_statistical_property(lambda x, y: np.var(x), n=1, distance=[0.01, 0.1, 1.0, 10], unit='um')
assert ( | np.diff(s) | numpy.diff |
#!/usr/bin/env python3
from multiprocessing.pool import ThreadPool
import os, time, subprocess, sys
import numpy as np
from numpy import pi
import os.path
of = False
def calculate_error(_params, _erads, _eres, log=None):
'''
_params -- vector of results
0: na in control sample (Noggin2 production)
1: nx in all samples (Noggin2 degradation in absence of MMP3)
2: mmp3 amount in control sample
3: sa in all samples (SMAD cascade intensity)
4: sb in all samples (BMP amount for SMAD switch)
5: smad_threshold determines bifurcation point for SMAD amount (can be
correlated with *sa*)
6: nu determines rate of delta reaction to other agents
7: h determines the strength of delta hysteresis
8: tb determines the strength of chordin degradation inhibition by mmp3.
Less tb - more inhibition.
9: ca, production rate of Chordin in all samples
10: cx, degratation rate of Chordin in all samples (in absence of MMP3)
11: s0 for definition of de_init parameter
12: (psa-cth) delta of threshold determines value of SMAD activation, which defines the edge of
the somitic mesoderm (CACT expr. area)
13: efa, ENAF production rate
14: efx, ENAF degradation rate
15: eft, ENAF threshold for cell fate switching
_erads -- vector of equisurf. radii
_eres -- vector of results
@RETURN: float
'''
T = 10
pna = _params[0]; pnx = _params[1]; pm3 = _params[2]; psa = _params[3]
psb = _params[4]; pst = _params[5]; pnu = _params[6]; ph = _params[7]
ptb = _params[8]; pca = _params[9]; pcx = _params[10]; s0 = _params[11]
pct = psa-_params[12]; efa = _params[13]; efx = _params[14]; eft = _params[15]
'''
T L na nx mmp3 sa sb smthr nu h tb ca cx dinit
0 1 2 3 4 5 6 7 8 9 10 11 12 13
wh. 0 T pi*r pna pnx pm3 psa psb pst pnu ph ptb pca pcx 5*s0/r^2/pi
hf. 1 T pi*r pna pnx pm3/10. psa psb pst pnu ph ptb pca pcx 5*s0/r^2/pi
wh. -N 2 T pi*r pna/5. pnx pm3 psa psb pst pnu ph ptb pca pcx 5*s0/r^2/pi
hf. -N 3 T pi*r pna/5. pnx pm3/10. psa psb pst pnu ph ptb pca pcx 5*s0/r^2/pi
wh. 4 T pi*r pna pnx pm3 psa psb pst pnu ph ptb pca pcx 5*s0/r^2/pi
wh. -M 5 T pi*r pna pnx pm3/5. psa psb pst pnu ph ptb pca pcx 5*s0/r^2/pi
wh. -M-N 6 T pi*r pna/5. pnx pm3/5. psa psb pst pnu ph ptb pca pcx 5*s0/r^2/pi
wh. 7 T pi*r pna pnx pm3 psa psb pst pnu ph ptb pca pcx 5*s0/r^2/pi
wh. -C 8 T pi*r pna pnx pm3 psa psb pst pnu ph ptb pca/5. pcx 5*s0/r^2/pi
wh. -C-N 9 T pi*r pna/5. pnx pm3 psa psb pst pnu ph ptb pca/5. pcx 5*s0/r^2/pi
PCT is not forwarded to C++ code because it acts at the stage of result interpretation
'''
_r = _erads
param_array = np.array([ \
[ T, pi*_r[0] ,pna , pnx , pm3 , psa, psb, pst, pnu, ph, ptb, pca , pcx, 1.59e-5*s0*_r[0]**2, efa, efx, eft ],\
[ T, pi*_r[1] ,pna , pnx , pm3/10., psa, psb, pst, pnu, ph, ptb, pca , pcx, 1.59e-5*s0*_r[1]**2, efa, efx, eft ],\
[ T, pi*_r[2] ,pna/5., pnx , pm3 , psa, psb, pst, pnu, ph, ptb, pca , pcx, 1.59e-5*s0*_r[2]**2, efa, efx, eft ],\
[ T, pi*_r[3] ,pna/5., pnx , pm3/10., psa, psb, pst, pnu, ph, ptb, pca , pcx, 1.59e-5*s0*_r[3]**2, efa, efx, eft ],\
[ T, pi*_r[4] ,pna , pnx , pm3 , psa, psb, pst, pnu, ph, ptb, pca , pcx, 1.59e-5*s0*_r[4]**2, efa, efx, eft ],\
[ T, pi*_r[5] ,pna , pnx , pm3/5. , psa, psb, pst, pnu, ph, ptb, pca , pcx, 1.59e-5*s0*_r[5]**2, efa, efx, eft ],\
[ T, pi*_r[6] ,pna/5., pnx , pm3/5. , psa, psb, pst, pnu, ph, ptb, pca , pcx, 1.59e-5*s0*_r[6]**2, efa, efx, eft ],\
[ T, pi*_r[7] ,pna , pnx , pm3 , psa, psb, pst, pnu, ph, ptb, pca , pcx, 1.59e-5*s0*_r[7]**2, efa, efx, eft ],\
[ T, pi*_r[8] ,pna , pnx , pm3 , psa, psb, pst, pnu, ph, ptb, pca/5., pcx, 1.59e-5*s0*_r[8]**2, efa, efx, eft ],\
[ T, pi*_r[9] ,pna/5., pnx , pm3 , psa, psb, pst, pnu, ph, ptb, pca/5., pcx, 1.59e-5*s0*_r[9]**2, efa, efx, eft ],\
])
nexp = param_array.shape[0]
assert(nexp == _eres.shape[1])
log.write('Parameters vector: %s\n' % (' '.join(map(str, _params))) )
log.write('Radii vector: %s\n' % (' '.join(map(str, _erads))) )
# param_strings - parameters ready for getting to apply_async
param_strings = [[] for x in range(nexp)]
for i in range(nexp):
param_strings[i] += [ "-O" ]
param_strings[i] += [ "_current_/exp%d.txt" % (i) ]
param_strings[i] += [ "--input-params" ]
param_strings[i] += map(str, param_array[i,:])
# check _current_ folder
#print(param_strings)
try:
_fd = open('_current_/1', 'w')
_fd.write('check')
_fd.close()
except IOError as e:
print("Cant write files into _current_")
print(e)
sys.exit(1)
try:
pass
perform_simulations(param_strings, log)
except Exception as e:
print("Error in performing simulations. Killing..")
print(e)
sys.exit(1)
try:
g_results = extract_results_delta(param_array[:,1], "_current_", log)
s_results = extract_results_smad(param_array[:,1], "_current_", log, \
psa, psb, pct)
except Exception as e:
print("Error in extracting results. Killing..")
raise e
# sum CHD ignoring None
rSQ = 0; _n = 0
for ires in range(nexp):
if not (_eres[0,ires] is None):
rSQ += (_eres[0,ires] - g_results[ires])**2
_n += 1
rVal = 3*rSQ/_n
# sum CACT ignoring None
rSQ = 0; _n = 0;
for ires in range(nexp):
if not (_eres[1,ires] is None):
rSQ += (_eres[1,ires] - s_results[ires])**2
_n += 1
rVal += rSQ/_n
print(' RESULT DELTA VEC: %s\n' % (' '.join(map(str, g_results)) ))
print(' RESULT SMAD VEC: %s\n' % (' '.join(map(str, s_results)) ))
log.write('Resulting Delta: %s\n' % \
(' '.join(map(lambda x: '%5.1f' % x, g_results))) )
log.write('Resulting Smad: %s\n' % \
(' '.join(map(lambda x: '%5.1f' % x, s_results))) )
log.write('Summary result: %f\n' % (rVal))
log.flush()
return rVal
# performing single simulation series
# parralelizing it into ncores
# paramSL - list of strings
# log - opened descriptor for log writing
def perform_simulations(paramSL, log):
program_file = '../solver/tw.x'
try:
num_cores = int(os.environ['L1M_NTHREADS'])
except:
num_cores = 2
print('Use %d number of cores' % (num_cores) )
nexp = len(paramSL)
tp = ThreadPool(num_cores)
for i in range(nexp):
# launch(program_file, params, repeats)
# launch(program_file, i, params_list[i,:] )
tp.apply_async(launch, ( program_file, paramSL[i] ) )
tp.close()
tp.join()
pass
# Extracting the main result from the trajectories
# Ls - array of simulation lengths, L (mcm)
# dirname - folder with trajectory files
# log - log descriptor
def extract_results_delta(Ls, dirname, log):
nexp = len(Ls)
res = np.zeros(nexp)
try:
for i in range(nexp):
try:
a = np.loadtxt(os.path.join(dirname, 'exp%d.txt' % (i)))
except ValueError as e:
print('Can\'t load the matrix')
print('THE SOLUTION DIVERGES!')
# Do not break calculation in this case;
# just put -666 for identification of the divergence
res[i] = -666
continue
Nx = (a.shape[1]-1) / 5.
assert(abs(Nx - int(Nx)) < 1e-8)
Nx = int(Nx)
print('Matrix loaded: ', Nx)
xx = np.linspace(0, Ls[i], Nx)
de = a[:,1+3*Nx:1+4*Nx] # delta distribution
grd = | np.gradient(de, axis=1) | numpy.gradient |
"""
Some utility functions for the data analysis project.
"""
import numpy as np
import healpy as hp
import pylab as plt
import os
from pixell import curvedsky
from pspy import pspy_utils, so_cov, so_spectra, so_mcm, so_map_preprocessing
from pspy.cov_fortran.cov_fortran import cov_compute as cov_fortran
from pspy.mcm_fortran.mcm_fortran import mcm_compute as mcm_fortran
from pixell import enmap
import gc
def get_filtered_map(orig_map, binary, filter, inv_pixwin_lxly=None, weighted_filter=False, tol=1e-4, ref=0.9):
"""Filter the map in Fourier space using a predefined filter. Note that we mutliply the maps by a binary mask before
doing this operation in order to remove pathological pixels
We also include an option for removing the pixel window function
Parameters
---------
orig_map: ``so_map``
the map to be filtered
binary: ``so_map``
a binary mask removing pathological pixels
filter: 2d array
a filter applied in fourier space
inv_pixwin_lxly: 2d array
the inverse of the pixel window function in fourier space
weighted_filter: boolean
wether to use weighted filter a la sigurd
tol, ref: floats
only in use in the case of the weighted filter, these arg
remove crazy pixels value in the weight applied
"""
if weighted_filter == False:
if inv_pixwin_lxly is not None:
orig_map = fourier_mult(orig_map, binary, filter * inv_pixwin_lxly)
else:
orig_map = fourier_mult(orig_map, binary, filter)
else:
orig_map.data *= binary.data
one_mf = (1 - filter)
rhs = enmap.ifft(one_mf * enmap.fft(orig_map.data, normalize=True), normalize=True).real
gc.collect()
div = enmap.ifft(one_mf * enmap.fft(binary.data, normalize=True), normalize=True).real
del one_mf
gc.collect()
div = np.maximum(div, np.percentile(binary.data[::10, ::10], ref * 100) * tol)
orig_map.data -= rhs / div
del rhs
del div
gc.collect()
if inv_pixwin_lxly is not None:
ft = enmap.fft(orig_map.data, normalize=True)
ft *= inv_pixwin_lxly
orig_map.data = enmap.ifft(ft, normalize=True).real
gc.collect()
return orig_map
def fourier_mult(orig_map, binary, fourier_array):
"""do a fourier multiplication of the FFT of the orig_map with a fourier array, binary help to remove pathological pixels
Parameters
---------
orig_map: ``so_map``
the map to be filtered
binary: ``so_map``
a binary mask removing pathological pixels
fourier_array: 2d array
the fourier array we want to multiply the FFT of the map with
"""
orig_map.data *= binary.data
ft = enmap.fft(orig_map.data, normalize=True)
ft *= fourier_array
orig_map.data = enmap.ifft(ft, normalize=True).real
return orig_map
def get_coadded_map(orig_map, coadd_map, coadd_mask):
"""Co-add a map with another map given its associated mask.
Parameters
---------
orig_map: ``so_map``
the original map without point sources
coadd_map: ``so_map``
the map to be co-added
coadd_mask: ``so_map``
the mask associated to the coadd_map
"""
if coadd_map.ncomp == 1:
coadd_map.data *= coadd_mask.data
else:
coadd_map.data[:] *= coadd_mask.data
orig_map.data += coadd_map.data
return orig_map
def fill_sym_mat(mat):
"""Make a upper diagonal or lower diagonal matrix symmetric
Parameters
----------
mat : 2d array
the matrix we want symmetric
"""
return mat + mat.T - np.diag(mat.diagonal())
def get_nspec(dict):
surveys = dict["surveys"]
nspec = {}
for kind in ["cross", "noise", "auto"]:
nspec[kind] = 0
for id_sv1, sv1 in enumerate(surveys):
arrays_1 = dict["arrays_%s" % sv1]
for id_ar1, ar1 in enumerate(arrays_1):
for id_sv2, sv2 in enumerate(surveys):
arrays_2 = dict["arrays_%s" % sv2]
for id_ar2, ar2 in enumerate(arrays_2):
if (id_sv1 == id_sv2) & (id_ar1 > id_ar2) : continue
if (id_sv1 > id_sv2) : continue
if (sv1 != sv2) & (kind == "noise"): continue
if (sv1 != sv2) & (kind == "auto"): continue
nspec[kind] += 1
return nspec
def get_noise_matrix_spin0and2(noise_dir, survey, arrays, lmax, nsplits):
"""This function uses the measured noise power spectra
and generate a three dimensional array of noise power spectra [n_arrays, n_arrays, lmax] for temperature
and polarisation.
The different entries ([i,j,:]) of the arrays contain the noise power spectra
for the different array pairs.
for example nl_array_t[0,0,:] => nl^{TT}_{ar_{0},ar_{0}), nl_array_t[0,1,:] => nl^{TT}_{ar_{0},ar_{1})
this allows to consider correlated noise between different arrays.
Parameters
----------
noise_data_dir : string
the folder containing the noise power spectra
survey : string
the survey to consider
arrays: 1d array of string
the arrays we consider
lmax: integer
the maximum multipole for the noise power spectra
n_splits: integer
the number of data splits we want to simulate
nl_per_split= nl * n_{splits}
"""
spectra = ["TT", "TE", "TB", "ET", "BT", "EE", "EB", "BE", "BB"]
n_arrays = len(arrays)
nl_array_t = np.zeros((n_arrays, n_arrays, lmax))
nl_array_pol = np.zeros((n_arrays, n_arrays, lmax))
for c1, ar1 in enumerate(arrays):
for c2, ar2 in enumerate(arrays):
if c1>c2 : continue
l, nl = so_spectra.read_ps("%s/mean_%sx%s_%s_noise.dat" % (noise_dir, ar1, ar2, survey), spectra=spectra)
nl_t = nl["TT"][:lmax]
nl_pol = (nl["EE"][:lmax] + nl["BB"][:lmax])/2
l = l[:lmax]
nl_array_t[c1, c2, :] = nl_t * nsplits * 2 * np.pi / (l * (l + 1))
nl_array_pol[c1, c2, :] = nl_pol * nsplits * 2 * np.pi / (l * (l + 1))
for i in range(lmax):
nl_array_t[:,:,i] = fill_sym_mat(nl_array_t[:,:,i])
nl_array_pol[:,:,i] = fill_sym_mat(nl_array_pol[:,:,i])
return l, nl_array_t, nl_array_pol
def get_foreground_matrix(fg_dir, all_freqs, lmax):
"""This function uses the best fit foreground power spectra
and generate a three dimensional array of foregroung power spectra [nfreqs, nfreqs, lmax].
The different entries ([i,j,:]) of the array contains the fg power spectra for the different
frequency channel pairs.
for example fl_array_T[0,0,:] => fl_{f_{0},f_{0}), fl_array_T[0,1,:] => fl_{f_{0},f_{1})
this allows to have correlated fg between different frequency channels.
(Not that for now, no fg are including in pol)
Parameters
----------
fg_dir : string
the folder containing the foreground power spectra
all_freqs: 1d array of string
the frequencies we consider
lmax: integer
the maximum multipole for the noise power spectra
"""
nfreqs = len(all_freqs)
fl_array = np.zeros((nfreqs, nfreqs, lmax))
for c1, freq1 in enumerate(all_freqs):
for c2, freq2 in enumerate(all_freqs):
if c1 > c2 : continue
l, fl_all = np.loadtxt("%s/fg_%sx%s_TT.dat"%(fg_dir, freq1, freq2), unpack=True)
fl_all *= 2 * np.pi / (l * (l + 1))
fl_array[c1, c2, 2:lmax] = fl_all[:lmax-2]
for i in range(lmax):
fl_array[:,:,i] = fill_sym_mat(fl_array[:,:,i])
return l, fl_array
def multiply_alms(alms, bl, ncomp):
"""This routine mutliply the alms by a function bl
Parameters
----------
alms : 1d array
the alms to be multiplied
bl : 1d array
the function to multiply the alms
ncomp: interger
the number of components
ncomp = 3 if T,Q,U
ncomp = 1 if T only
"""
alms_mult = alms.copy()
if ncomp == 1:
alms_mult = hp.sphtfunc.almxfl(alms_mult, bl)
else:
for i in range(ncomp):
alms_mult[i] = hp.sphtfunc.almxfl(alms_mult[i], bl)
return alms_mult
def generate_noise_alms(nl_array_t, lmax, n_splits, ncomp, nl_array_pol=None, dtype=np.complex128):
"""This function generates the alms corresponding to the noise power spectra matrices
nl_array_t, nl_array_pol. The function returns a dictionnary nlms["T", i].
The entry of the dictionnary are for example nlms["T", i] where i is the index of the split.
note that nlms["T", i] is a (narrays, size(alm)) array, it is the harmonic transform of
the noise realisation for the different frequencies.
Parameters
----------
nl_array_t : 3d array [narrays, narrays, lmax]
noise power spectra matrix for temperature data
lmax : integer
the maximum multipole for the noise power spectra
n_splits: integer
the number of data splits we want to simulate
ncomp: interger
the number of components
ncomp = 3 if T,Q,U
ncomp = 1 if T only
nl_array_pol : 3d array [narrays, narrays, lmax]
noise power spectra matrix for polarisation data
(in use if ncomp==3)
"""
nlms = {}
if ncomp == 1:
for k in range(n_splits):
nlms[k] = curvedsky.rand_alm(nl_array_t,lmax=lmax, dtype=dtype)
else:
for k in range(n_splits):
nlms["T", k] = curvedsky.rand_alm(nl_array_t, lmax=lmax, dtype=dtype)
nlms["E", k] = curvedsky.rand_alm(nl_array_pol, lmax=lmax, dtype=dtype)
nlms["B", k] = curvedsky.rand_alm(nl_array_pol, lmax=lmax, dtype=dtype)
return nlms
def remove_mean(so_map, window, ncomp):
"""This function removes the mean value of the map after having applied the
window function
Parameters
----------
so_map : so_map
the map we want to subtract the mean from
window : so_map or so_map tuple
the window function, if ncomp=3 expect
(win_t,win_pol)
ncomp : integer
the number of components
ncomp = 3 if T,Q,U
ncomp = 1 if T only
"""
if ncomp == 1:
so_map.data -= np.mean(so_map.data * window.data)
else:
so_map.data[0] -= np.mean(so_map.data[0] * window[0].data)
so_map.data[1] -= np.mean(so_map.data[1] * window[1].data)
so_map.data[2] -= np.mean(so_map.data[2] * window[1].data)
return so_map
def deconvolve_tf(lb, ps, tf1, tf2, ncomp, lmax=None):
"""This function deconvolves the transfer function
Parameters
----------
ps : dict or 1d array
the power spectra with tf applied
tf1 : 1d array
transfer function of map1
tf2 : 1d array
transfer function of map2
ncomp : integer
the number of components
ncomp = 3 if T,Q,U
ncomp = 1 if T only
"""
tf = tf1 * tf2
if lmax is not None:
id = np.where(lb < lmax)
tf = tf[id]
if ncomp == 1:
ps /= tf
else:
spectra = ["TT", "TE", "TB", "ET", "BT", "EE", "EB", "BE", "BB"]
for spec in spectra:
ps[spec] /= tf
return ps
def is_symmetric(mat, tol=1e-8):
return np.all(np.abs(mat-mat.T) < tol)
def is_pos_def(mat):
return np.all(np.linalg.eigvals(mat) > 0)
def fast_cov_coupling(sq_win_alms_dir,
na_r,
nb_r,
nc_r,
nd_r,
lmax,
l_exact=None,
l_band=None,
l_toep=None):
if l_toep is None: l_toep = lmax
if l_band is None: l_band = lmax
if l_exact is None: l_exact = lmax
try:
alm_TaTc = np.load("%s/alms_%sx%s.npy" % (sq_win_alms_dir, na_r, nc_r))
except:
alm_TaTc = np.load("%s/alms_%sx%s.npy" % (sq_win_alms_dir, nc_r, na_r))
try:
alm_TbTd = np.load("%s/alms_%sx%s.npy" % (sq_win_alms_dir, nb_r, nd_r))
except:
alm_TbTd = np.load("%s/alms_%sx%s.npy" % (sq_win_alms_dir, nd_r, nb_r))
try:
alm_TaTd = np.load("%s/alms_%sx%s.npy" % (sq_win_alms_dir, na_r, nd_r))
except:
alm_TaTd = np.load("%s/alms_%sx%s.npy" % (sq_win_alms_dir, nd_r, na_r))
try:
alm_TbTc = np.load("%s/alms_%sx%s.npy" % (sq_win_alms_dir, nb_r, nc_r))
except:
alm_TbTc = np.load("%s/alms_%sx%s.npy" % (sq_win_alms_dir, nc_r, nb_r))
wcl = {}
wcl["TaTcTbTd"] = hp.alm2cl(alm_TaTc, alm_TbTd)
wcl["TaTdTbTc"] = hp.alm2cl(alm_TaTd, alm_TbTc)
l = np.arange(len(wcl["TaTcTbTd"]))
wcl["TaTcTbTd"] *= (2 * l + 1) / (4 * np.pi)
wcl["TaTdTbTc"] *= (2 * l + 1) / (4 * np.pi)
coupling = np.zeros((2, lmax, lmax))
cov_fortran.calc_cov_spin0(wcl["TaTcTbTd"], wcl["TaTdTbTc"], l_exact, l_band, l_toep, coupling.T)
coupling_dict = {}
for id_cov, name in enumerate(["TaTcTbTd", "TaTdTbTc"]):
if l_toep < lmax:
coupling[id_cov] = so_mcm.format_toepliz_fortran(coupling[id_cov], l_toep, lmax)
mcm_fortran.fill_upper(coupling[id_cov].T)
coupling_dict[name] = coupling[id_cov]
list1 = ["TaTcTbTd", "PaPcPbPd", "TaTcPbPd", "PaPcTbTd",
"TaPcTbPd", "TaTcTbPd", "TaPcTbTd", "TaPcPbTd",
"TaPcPbPd", "PaPcTbPd", "TaTcPbTd", "PaTcTbTd",
"PaTcPbTd", "PaTcTbPd", "PaTcPbPd", "PaPcPbTd"]
list2 = ["TaTdTbTc", "PaPdPbPc", "TaPdPbTc", "PaTdTbPc",
"TaPdTbPc", "TaPdTbTc", "TaTdTbPc", "TaTdPbPc",
"TaPdPbPc", "PaPdTbPc", "TaTdPbTc", "PaTdTbTc",
"PaTdPbTc", "PaPdTbTc", "PaPdPbTc", "PaTdPbPc"]
for id1 in list1:
coupling_dict[id1] = coupling_dict["TaTcTbTd"]
for id2 in list2:
coupling_dict[id2] = coupling_dict["TaTdTbTc"]
return coupling_dict
def covariance_element(coupling, id_element, ns, ps_all, nl_all, binning_file, mbb_inv_ab, mbb_inv_cd):
"""
This routine deserves some explanation
We want to compute the covariance between two power spectra
C1 = Wa * Xb, C2 = Yc * Zd
Here W, X, Y, Z can be either T or E and a,b,c,d will be an index
corresponding to the survey and array we consider so for example a = s17_pa5_150 or a = dr6_pa4_090
The formula for the analytic covariance of C1, C2 is given by
Cov( Wa * Xb, Yc * Zd) = < Wa Yc> <Xb Zd> + < Wa Zd> <Xb Yc> (this is just from the wick theorem)
In practice we need to include the effect of the mask (so we have to introduce the coupling dict D)
and we need to take into account that we use only the cross power spectra, that is why we use the chi function
Cov( Wa * Xb, Yc * Zd) = D(Wa*Yc,Xb Zd) chi(Wa,Yc,Xb Zd) + D(Wa*Zd,Xb*Yc) chi(Wa,Zd,Xb,Yc)
Parameters
----------
coupling : dictionnary
a dictionnary that countains the coupling terms arising from the window functions
id_element : list
a list of the form [a,b,c,d] where a = dr6_pa4_090, etc, this identify which pair of power spectrum we want the covariance of
ns: dict
this dictionnary contains the number of split we consider for each of the survey
ps_all: dict
this dict contains the theoretical best power spectra, convolve with the beam for example
ps["dr6&pa5_150", "dr6&pa4_150", "TT"] = bl_dr6_pa5_150 * bl_dr6_pa4_150 * (Dl^{CMB}_TT + fg_TT)
nl_all: dict
this dict contains the estimated noise power spectra, note that it correspond to the noise power spectrum per split
e.g nl["dr6&pa5_150", "dr6&pa4_150", "TT"]
binning_file:
a binning file with three columns bin low, bin high, bin mean
mbb_inv_ab and mbb_inv_cd:
the inverse mode coupling matrices corresponding to the C1 = Wa * Xb and C2 = Yc * Zd power spectra
"""
na, nb, nc, nd = id_element
lmax = coupling["TaTcTbTd"].shape[0]
bin_lo, bin_hi, bin_c, bin_size = pspy_utils.read_binning_file(binning_file, lmax)
nbins = len(bin_hi)
speclist = ["TT", "TE", "ET", "EE"]
nspec = len(speclist)
analytic_cov = np.zeros((nspec * nbins, nspec * nbins))
for i, (W, X) in enumerate(speclist):
for j, (Y, Z) in enumerate(speclist):
id0 = W + "a" + Y + "c"
id1 = X + "b" + Z + "d"
id2 = W + "a" + Z + "d"
id3 = X + "b" + Y + "c"
M = coupling[id0.replace("E","P") + id1.replace("E","P")] * chi(na, nc, nb, nd, ns, ps_all, nl_all, W + Y + X + Z)
M += coupling[id2.replace("E","P") + id3.replace("E","P")] * chi(na, nd, nb, nc, ns, ps_all, nl_all, W + Z + X + Y)
analytic_cov[i * nbins: (i + 1) * nbins, j * nbins: (j + 1) * nbins] = so_cov.bin_mat(M, binning_file, lmax)
mbb_inv_ab = so_cov.extract_TTTEEE_mbb(mbb_inv_ab)
mbb_inv_cd = so_cov.extract_TTTEEE_mbb(mbb_inv_cd)
analytic_cov = np.dot(np.dot(mbb_inv_ab, analytic_cov), mbb_inv_cd.T)
return analytic_cov
def covariance_element_beam(id_element, ps_all, norm_beam_cov, binning_file, lmax):
"""
This routine compute the contribution from beam errors to the analytical covariance of the power spectra
We want to compute the beam covariance between the two spectra
C1 = Wa * Xb, C2 = Yc * Zd
Here W, X, Y, Z can be either T or E and a,b,c,d will be an index
corresponding to the survey and array we consider so for example a = dr6_pa5_150 or a = dr6_pa4_090
The formula for the analytic covariance of C1, C2 is given by
let's denote the normalised beam covariance <BB>_ac = < delta B_a delta B_c >/np.outer(B_a, B_c)
Cov( Wa * Xb, Yc * Zd) = Dl^{WaXb} Dl^{YcZd} ( <BB>_ac + <BB>_ad + <BB>_bc + <BB>_bd )
Parameters
----------
id_element : list
a list of the form [a,b,c,d] where a = dr6_pa4_090, etc, this identify which pair of power spectrum we want the covariance of
ps_all: dict
this dict contains the theoretical best power spectra, convolve with the beam for example
ps["dr6&pa5_150", "dr6&pa4_150", "TT"] = bl_dr6_pa5_150 * bl_dr6_pa4_150 * (Dl^{CMB}_TT + fg_TT)
norm_beam_cov: dict
this dict contains the normalized beam covariance for each survey and array
binning_file: str
a binning file with three columns bin low, bin high, bin mean
lmax: int
the maximum multipole to consider
"""
na, nb, nc, nd = id_element
sv_alpha, ar_alpha = na.split("&")
sv_beta, ar_beta = nb.split("&")
sv_gamma, ar_gamma = nc.split("&")
sv_eta, ar_eta = nd.split("&")
bin_lo, bin_hi, bin_c, bin_size = pspy_utils.read_binning_file(binning_file, lmax)
nbins = len(bin_hi)
speclist = ["TT", "TE", "ET", "EE"]
nspec = len(speclist)
analytic_cov_from_beam = | np.zeros((nspec * nbins, nspec * nbins)) | numpy.zeros |
import numpy as np
import csv
def most(x):
most = x[0]
pos = 0
for i in range(len(x)):
if(x[i] > most):
most = x[i]
pos = i
return pos
def l_relu(x):
return max(0.01 * x, x)
def l_relu_dx(x):
return 1 if x > 0 else 0.01
def forward(val, w1, w2, b1, b2):
x = np.matmul(w1, val) + b1
for i in range(len(x)):
x[i][0] = l_relu(x[i][0])
y = np.matmul(w2, x) + b2
for i in range(len(y)):
y[i][0] = l_relu(y[i][0])
z = softmax(y)
return [most(z), x, y]
def softmax(x):
return np.exp(x) / np.sum(np.exp(x), axis=0)
def backprop(actual, x0, x1, x2, w1, w2, b1, b2, alpha):
y = np.array([[0.0] for i in range(10)])
y[actual] = 1
r = | np.matmul(w2, x1) | numpy.matmul |
import json
import cv2
import glob
import matplotlib.pyplot as plt
import re
import numpy as np
from imantics import Polygons, Mask
visual = False # only use True with 1 image for testing because there is a bug in openCV drawing
stop = True
data = None
def load_image(addr):
img = cv2.imread(addr, -1)
# if visual == True:
# print(np.unique(img))
# # cv2.imshow('img', img)
# # cv2.waitKey(100)
# plt.imshow(img)
# plt.show()
return img
def is_edge_point(img, row, col):
rows, cols = img.shape
value = (int)(img[row, col])
if value == 0:
return False
count = 0
for i in range(-1, 2):
for j in range(-1, 2):
if row + i >= 0 and row + i < rows and col + j >= 0 and col + j < cols:
value_neib = (int)(img[row + i, col + j])
if value_neib == value:
count = count + 1
if count > 2 and count < 8:
return True
return False
def edge_downsample(img):
rows, cols = img.shape
for row in range(rows):
for col in range(cols):
if img[row, col] > 0:
for i in range(-2, 3):
for j in range(-2, 3):
if i == 0 and j == 0:
continue
roww = row + i
coll = col + j
if roww >= 0 and roww < rows and coll >= 0 and coll < cols:
if img[roww, coll] == img[row, col]:
img[roww, coll] = 0
return img
def next_edge(img, obj_id, row, col):
rows, cols = img.shape
incre = 1
while (incre < 10):
for i in range(-incre, incre + 1, 2 * incre):
for j in range(-incre, incre + 1, 1):
roww = row + i
coll = col + j
if roww >= 0 and roww < rows and coll >= 0 and coll < cols:
value = img[roww, coll]
if value == obj_id:
return True, roww, coll
for i in range(-incre + 1, incre, 1):
for j in range(-incre, incre + 1, 2 * incre):
roww = row + i
coll = col + j
if roww >= 0 and roww < rows and coll >= 0 and coll < cols:
value = img[roww, coll]
if value == obj_id:
return True, roww, coll
incre = incre + 1
return False, row, col
def find_region(img, classes_label, obj_id, row, col):
region = {}
region['region_attributes'] = {}
region['shape_attributes'] = {}
rows, cols = img.shape
roww = row
coll = col
edges_x = []
edges_y = []
find_edge = True
poly_img = | np.zeros((rows, cols), np.uint8) | numpy.zeros |
import numpy as np
import pandas as pd
import plotly.graph_objects as go
import toolsClass
import multiprocessing
import time
from scipy.interpolate import interp1d
import scipy.integrate as integrate
#from tqdm.contrib.concurrent import process_map #for process bar. very slow...
tools = toolsClass.tools()
import logging
log = logging.getLogger(__name__)
class GYRO:
def __init__(self, rootDir, dataPath):
"""
rootDir is root location of python modules (where dashGUI.py lives)
dataPath is the location where we write all output to
"""
self.rootDir = rootDir
tools.rootDir = self.rootDir
self.dataPath = dataPath
tools.dataPath = self.dataPath
return
def allowed_class_vars(self):
"""
Writes a list of recognized class variables to HEAT object
Used for error checking input files and for initialization
Here is a list of variables with description:
testvar dummy for testing
"""
self.allowed_vars = [
'N_gyroSteps',
'gyroDeg',
'gyroT_eV',
'N_vSlice',
'N_vPhase',
'N_gyroPhase',
'ionMassAMU',
'vMode',
'ionFrac'
]
return
def setTypes(self):
"""
Set variable types for the stuff that isnt a string from the input file
"""
integers = [
'N_gyroSteps',
'gyroDeg',
'N_vSlice',
'N_vPhase',
'N_gyroPhase',
]
floats = [
'ionFrac',
'gyroT_eV',
'ionMassAMU',
]
for var in integers:
if (getattr(self, var) is not None) and (~np.isnan(float(getattr(self, var)))):
try:
setattr(self, var, int(getattr(self, var)))
except:
print("Error with input file var "+var+". Perhaps you have invalid input values?")
log.info("Error with input file var "+var+". Perhaps you have invalid input values?")
for var in floats:
if var is not None:
if (getattr(self, var) is not None) and (~np.isnan(float(getattr(self, var)))):
try:
setattr(self, var, float(getattr(self, var)))
except:
print("Error with input file var "+var+". Perhaps you have invalid input values?")
log.info("Error with input file var "+var+". Perhaps you have invalid input values?")
return
def setupConstants(self, ionMassAMU=2.014):
"""
Sets up constants
default mass is deuterium 2.014 MeV/c^2
"""
#unit conversions
self.kg2eV = 5.609e35 #1kg = 5.609e35 eV/c^2
self.eV2K = 1.160e4 #1ev=1.160e4 K
#constants
self.AMU = 931.494e6 #ev/c^2
self.kB = 8.617e-5 #ev/K
self.e = 1.602e-19 # C
self.c = 299792458 #m/s
self.diamag = -1 #diamagnetism = -1 for ions, 1 for electrons
self.mass_eV = ionMassAMU * self.AMU
self.Z=1 #assuming isotopes of hydrogen here
return
def temp2thermalVelocity(self, T_eV):
"""
Calculates thermal velocity from a temperature, where thermal velocity
is defined as the most probable speed
T_eV is temperature in eV
can also be found with: d/dv( v*f(v) ) = 0
note that this is for v, not vPerp or v||
"""
return np.sqrt(2.0*T_eV/(self.mass_eV/self.c**2))
def setupFreqs(self, B):
"""
Calculates frequencies, periods, that are dependent upon B
These definitions follow Freidberg Section 7.7.
B is magnetic field magnitude
"""
self.omegaGyro = self.Z * self.e * B / (self.mass_eV / self.kg2eV)
if np.isscalar(self.omegaGyro):
self.omegaGyro = np.array([self.omegaGyro])
self.fGyro = np.abs(self.omegaGyro)/(2*np.pi)
self.TGyro = 1.0/self.fGyro
return
def setupRadius(self, vPerp):
"""
calculates gyro radius.
rGyro has a column for each MC run (N_MC columns), and a
row for each point on the PFC (N_pts), so it is a matrix
of shape: N_pts X N_MC
"""
N_pts = len(self.omegaGyro)
#get number of vPerps
if np.isscalar(vPerp):
vPerp = np.array([vPerp])
N_MC = 1
else:
N_MC = len(vPerp)
self.rGyro = np.zeros((N_pts,N_MC))
for i in range(N_MC):
self.rGyro[:,i] = vPerp[i] / np.abs(self.omegaGyro)
return
def setupVelocities(self, N):
"""
sets up velocities based upon vMode input from GUI
N is the number of source mesh elements (ie len(PFC.centers) )
len(self.t1) is number of points in divertor we are calculating HF on
"""
#get velocity space phase angles
self.uniformVelPhaseAngle()
if self.vMode == 'single':
print("Gyro orbit calculation from single plasma temperature")
log.info("Gyro orbit calculation from single plasma temperature")
self.T0 = np.ones((N))*self.gyroT_eV
#get average velocity for each temperature point
self.vThermal = self.temp2thermalVelocity(self.T0)
#set upper bound of v*f(v) (note that this cuts off high energy particles)
self.vMax = 5 * self.vThermal
#get 100 points to initialize functional form of f(v) (note this is a 2D matrix cause vMax is 2D)
self.vScan = np.linspace(0,self.vMax,10000).T
#get velocity slices for each T0
self.pullEqualProbabilityVelocities()
else:
#TO ADD THIS YOU WILL NEED TO PASS IN XYZ COORDINATES OF CTRS AND INTERPOLATE
print("3D plasma temperature interpolation from file not yet supported. Run gyro orbits in single mode")
log.info("3D plasma temperature interpolation from file not yet supported. Run gyro orbits in single mode")
return
def pullEqualProbabilityVelocities(self):
"""
creates vSlices: array of velocities indexed to match T0 array (or PFC.centers)
each vSlice is positioned at a place in the PDF so it has an equal probability
of occuring. ie the area under the PDF curve between each vSlice is equal.
in loop, i is mesh element index
"""
self.vSlices = np.ones((len(self.T0),self.N_vSlice))*np.nan
self.energySlices = np.zeros((len(self.T0),self.N_vSlice))
self.energyIntegrals = np.zeros((len(self.T0),self.N_vSlice))
self.energyFracs = np.zeros((len(self.T0),self.N_vSlice))
self.vBounds = np.zeros((len(self.T0),self.N_vSlice+1))
for i in range(len(self.T0)):
#get speed range for this T0
v = self.vScan[i,:]
#generate the (here maxwellian) velocity vector PDF
#pdf = lambda x: (self.mass_eV/self.c**2) / (self.T0[i]) * np.exp(-(self.mass_eV/self.c**2 * x**2) / (2*self.T0[i]) )
pdf = lambda x: ( (self.mass_eV/self.c**2) / (2 * np.pi * self.T0[i]) )**(3.0/2.0) * np.exp(-(self.mass_eV/self.c**2 * x**2) / (2*self.T0[i]) )
#speed pdf (integrate over solid angle)
v_pdf = 4*np.pi * v**2 * pdf(v)
#generate the CDF
v_cdf = np.cumsum(v_pdf[1:])*np.diff(v)
v_cdf = np.insert(v_cdf, 0, 0)
#create bspline interpolators for the cdf and cdf inverse
inverseCDF = interp1d(v_cdf, v, kind='linear')
forwardCDF = interp1d(v, v_cdf, kind='linear')
#CDF location of vSlices and bin boundaries
cdfBounds = np.linspace(0,v_cdf[-1],self.N_vSlice+1)
#CDF location of velocity bin bounds omitting 0 and 1
#old method does not make vSlices truly bin centers
#cdfBounds = np.linspace(0,1,self.N_vSlice+1)[1:-1]
#old method 2 spaces centers uniformly
# #calculate N_vSlice velocities for each pdf each with equal area (probability)
# cdfMax = v_cdf[-1]
# cdfMin = v_cdf[0]
# sliceWidth = cdfMax / (self.N_vSlice+1)
# #CDF location of vSlices omitting 0 and 1
# cdfSlices = np.linspace(0,1,self.N_vSlice+2)[1:-1]
# #CDF location of velocity bin bounds omitting 0 and 1
# #old method does not make vSlices truly bin centers
# #cdfBounds = np.linspace(0,1,self.N_vSlice+1)[1:-1]
# #new method makes vSlices bin centers, except for the end bins
# cdfBounds = np.diff(cdfSlices)/2.0 + cdfSlices[:-1]
# #vSlices are Maxwellian distribution sample locations (@ bin centers)
# self.vSlices[i,:] = inverseCDF(cdfSlices)
# vBounds = inverseCDF(cdfBounds)
# vBounds = np.insert(vBounds,0,0)
# vBounds = np.append(vBounds,self.vMax[i])
#new method spaces bins uniformly, then makes vSlices center of these bins in CDF space
cdfSlices = np.diff(cdfBounds)/2.0 + cdfBounds[:-1]
#vSlices are Maxwellian distribution sample locations (@ bin centers)
self.vSlices[i,:] = inverseCDF(cdfSlices)
vBounds = inverseCDF(cdfBounds)
self.vBounds[i,:] = vBounds
#print(cdfBounds)
#print(cdfSlices)
#print(self.vBounds)
#print(self.vSlices)
#Now find energies that correspond to these vSlices
#we integrate: v**2 * f(v)
#energy pdf (missing 1/2*mass but that gets divided out later anyways )
#EofV = lambda x: x**2 * pdf(x)
#EofV = lambda x: 4*np.pi * x**4 * pdf(x)
f_E = lambda x: 2 * np.sqrt(x / np.pi) * (self.T0[i])**(-3.0/2.0) * np.exp(-x / self.T0[i])
#energy slices that correspond to velocity slices
self.energySlices[i,:] = f_E(0.5 * (self.mass_eV/self.c**2) * self.vSlices[i,:]**2)
#energy integrals
for j in range(self.N_vSlice):
Elo = 0.5 * (self.mass_eV/self.c**2) * vBounds[j]**2
Ehi = 0.5 * (self.mass_eV/self.c**2) * vBounds[j+1]**2
self.energyIntegrals[i,j] = integrate.quad(f_E, Elo, Ehi)[0]
energyTotal = self.energyIntegrals[i,:].sum()
#for testing
#if i==0:
# print("Integral Test===")
# print(energyTotal)
# print(integrate.quad(f_E, 0.0, self.vMax[i])[0])
#energy fractions
for j in range(self.N_vSlice):
self.energyFracs[i,j] = self.energyIntegrals[i,j] / energyTotal
print("Found N_vPhase velocities of equal probability")
log.info("Found N_vPhase velocities of equal probability")
return
def uniformGyroPhaseAngle(self):
"""
Uniform sampling of a uniform distribution between 0 and 2pi
returns angles in radians
"""
self.gyroPhases = np.linspace(0,2*np.pi,self.N_gyroPhase+1)[:-1]
return
def uniformVelPhaseAngle(self):
"""
Sampling of a uniform distribution between 0 and pi/2 (only forward velocities)
vPerp is x-axis of velocity space
vParallel is y-axis of velocity space
returns angles in radians
"""
self.vPhases = np.linspace(0.0,np.pi/2,self.N_vPhase+2)[1:-1]
return
def singleGyroTrace(self,vPerp,vParallel,gyroPhase,N_gyroSteps,
BtraceXYZ,controlfilePath,TGyro,rGyro,omegaGyro,
verbose=True):
"""
Calculates the gyro-Orbit path and saves to .csv and .vtk
vPerp and vParallel [m/s] are in velocities
gyroPhase [degrees] is initial orbit phase angle
N_gyroSteps is number of discrete line segments per gyro period
BtraceXYZ is the points of the Bfield trace that we will gyrate about
"""
print("Calculating gyro trace...")
#Loop thru B field trace while tracing gyro orbit
helixTrace = None
for i in range(len(BtraceXYZ)-1):
#points in this iteration
p0 = BtraceXYZ[i,:]
p1 = BtraceXYZ[i+1,:]
#vector
delP = p1 - p0
#magnitude or length of line segment
magP = np.sqrt(delP[0]**2 + delP[1]**2 + delP[2]**2)
#time it takes to transit line segment
delta_t = magP / (vParallel)
#Number of steps in line segment
Tsample = self.TGyro / N_gyroSteps
Nsteps = int(delta_t / Tsample)
#length (in time) along guiding center
t = np.linspace(0,delta_t,Nsteps+1)
#guiding center location
xGC = np.linspace(p0[0],p1[0],Nsteps+1)
yGC = np.linspace(p0[1],p1[1],Nsteps+1)
zGC = np.linspace(p0[2],p1[2],Nsteps+1)
# construct orthogonal system for coordinate transformation
w = delP
if np.all(w==[0,0,1]):
u = np.cross(w,[0,1,0]) #prevent failure if bhat = [0,0,1]
else:
u = np.cross(w,[0,0,1]) #this would fail if bhat = [0,0,1] (rare)
v = np.cross(w,u)
#normalize
u = u / np.sqrt(u.dot(u))
v = v / np.sqrt(v.dot(v))
w = w / np.sqrt(w.dot(w))
xfm = np.vstack([u,v,w]).T
#get helix path along (proxy) z axis reference frame
x_helix = self.rGyro*np.cos(self.omegaGyro*t + gyroPhase)
y_helix = self.diamag*self.rGyro*np.sin(self.omegaGyro*t + gyroPhase)
z_helix = np.zeros((len(t)))
#perform rotation to field line reference frame
helix = np.vstack([x_helix,y_helix,z_helix]).T
helix_rot = np.zeros((len(helix),3))
for j,coord in enumerate(helix):
helix_rot[j,:] = helix[j,0]*u + helix[j,1]*v + helix[j,2]*w
#perform translation to field line reference frame
helix_rot[:,0] += xGC
helix_rot[:,1] += yGC
helix_rot[:,2] += zGC
#update gyroPhase variable so next iteration starts here
gyroPhase = self.omegaGyro*t[-1] + gyroPhase
#append to helix trace
if helixTrace is None:
helixTrace = helix_rot
else:
helixTrace = np.vstack([helixTrace,helix_rot])
helixTrace*=1000.0 #scale for ParaView
print("Saving data to CSV and VTK formats")
#save data to csv format
head = 'X[mm],Y[mm],Z[mm]'
np.savetxt(controlfilePath+'helix.csv', helixTrace, delimiter=',', header=head)
#save data to vtk format
tools.createVTKOutput(controlfilePath+'helix.csv', 'trace', 'Gyro_trace')
if verbose==True:
print("V_perp = {:f} [m/s]".format(vPerp))
print("V_parallel = {:f} [m/s]".format(vParallel))
print("Cyclotron Freq = {:f} [rad/s]".format(self.omegaGyro[0]))
print("Cyclotron Freq = {:f} [Hz]".format(self.fGyro[0]))
print("Gyro Radius = {:f} [m]".format(self.rGyro[0][0]))
print("Number of gyro points = {:f}".format(len(helixTrace)))
print("Longitudinal dist between gyro points = {:f} [m]".format(magP/float(Nsteps)))
print("Each line segment length ~ {:f} [m]".format(magP))
return
def gyroTraceParallel(self, i, mode='MT'):
"""
parallelized gyro trace. called by multiprocessing.pool.map()
i is index of parallel run from multiprocessing, corresponds to a mesh face
we are tracing in the ROI
writes helical trace to self.helixTrace[i] in 2D matrix format:
columns = X,Y,Z
rows = steps up helical trace
also updates self.lastPhase for use in next iteration step
mode options are:
-Signed Volume Loop: 'SigVolLoop'
-Signed Volume Matrix: 'SigVolMat'
-Moller-Trumbore Algorithm: 'MT'
"""
#vector
delP = self.p1[i] - self.p0[i]
#magnitude
magP = np.sqrt(delP[0]**2 + delP[1]**2 + delP[2]**2)
#time it takes to transit line segment
delta_t = magP / (self.vParallelMC[self.GYRO_HLXmap][i])
#Number of steps in line segment
Tsample = self.TGyro[self.GYRO_HLXmap][i] / self.N_gyroSteps
Nsteps = int(delta_t / Tsample)
#length (in time) along guiding center
t = np.linspace(0,delta_t,Nsteps+1)
#guiding center location
xGC = np.linspace(self.p0[i,0],self.p1[i,0],Nsteps+1)
yGC = np.linspace(self.p0[i,1],self.p1[i,1],Nsteps+1)
zGC = np.linspace(self.p0[i,2],self.p1[i,2],Nsteps+1)
arrGC = np.vstack([xGC,yGC,zGC]).T
# construct orthogonal system for coordinate transformation
w = delP
if np.all(w==[0,0,1]):
u = np.cross(w,[0,1,0]) #prevent failure if bhat = [0,0,1]
else:
u = np.cross(w,[0,0,1]) #this would fail if bhat = [0,0,1] (rare)
v = np.cross(w,u)
#normalize
u = u / np.sqrt(u.dot(u))
v = v / np.sqrt(v.dot(v))
w = w / np.sqrt(w.dot(w))
xfm = np.vstack([u,v,w]).T
#get helix path along (proxy) z axis reference frame
rGyro = self.rGyroMC[self.GYRO_HLXmap][i]
omega = self.omegaGyro[self.GYRO_HLXmap][i]
theta = self.lastPhase[self.GYRO_HLXmap][i]
x_helix = rGyro*np.cos(omega*t + theta)
y_helix = self.diamag*rGyro*np.sin(omega*t + theta)
z_helix = np.zeros((len(t)))
#perform rotation to field line reference frame
helix = np.vstack([x_helix,y_helix,z_helix]).T
helix_rot = np.zeros((len(helix),3))
for j,coord in enumerate(helix):
helix_rot[j,:] = helix[j,0]*u + helix[j,1]*v + helix[j,2]*w
#perform translation to field line reference frame
helix_rot[:,0] += xGC
helix_rot[:,1] += yGC
helix_rot[:,2] += zGC
#shift entire helix to ensure we capture intersections in p0 plane
helix_rot[:,0] += w[0]*0.0003
helix_rot[:,1] += w[1]*0.0003
helix_rot[:,2] += w[2]*0.0003
#update gyroPhase variable so next iteration starts here
lastPhase = omega*t[-1] + theta
#=== intersection checking ===
q1 = helix_rot[:-1,:]
q2 = helix_rot[1:,:]
#Filter by psi
if self.psiFilterSwitch == True:
psiP1 = self.PFC_psiP1
psiP2 = self.PFC_psiP2
psiP3 = self.PFC_psiP3
psiMin = self.psiMin[i]
psiMax = self.psiMax[i]
#account for psi sign convention
if psiMin > psiMax:
pMin = psiMax
pMax = psiMin
else:
pMin = psiMin
pMax = psiMax
#target faces outside of this toroidal slice
test0 = np.logical_and(psiP1 < pMin,
psiP2 < pMin,
psiP3 < pMin)
test1 = np.logical_and(psiP1 > pMax,
psiP2 > pMax,
psiP3 > pMax)
test = np.logical_or(test0,test1)
usePsi = np.where(test == False)[0]
else:
usePsi = np.arange(len(self.PFC_t1))
#Filter by toroidal angle
if self.phiFilterSwitch == True:
phiP1 = self.PFC_phiP1
phiP2 = self.PFC_phiP2
phiP3 = self.PFC_phiP3
phiMin = self.phiMin[i]
phiMax = self.phiMax[i]
#angle wrap cases (assumes we never trace in MAFOT steps larger than 10degrees)
if np.abs(phiMin-phiMax) > np.radians(5):
phiP1[phiP1<0] += 2*np.pi
phiP2[phiP2<0] += 2*np.pi
phiP3[phiP3<0] += 2*np.pi
if phiMin < 0: phiMin+=2*np.pi
if phiMax < 0: phiMax+=2*np.pi
#account for toroidal sign convention
if phiMin > phiMax:
pMin = phiMax
pMax = phiMin
else:
pMin = phiMin
pMax = phiMax
#target faces outside of this toroidal slice
test0 = np.logical_and(phiP1 < pMin,
phiP2 < pMin,
phiP3 < pMin)
test1 = np.logical_and(phiP1 > pMax,
phiP2 > pMax,
phiP3 > pMax)
test = np.logical_or(test0,test1)
usePhi = np.where(test == False)[0]
else:
usePhi = np.arange(len(self.PFC_t1))
#combine filter algorithms
use = | np.intersect1d(usePsi,usePhi) | numpy.intersect1d |
#!/usr/bin/env python
"""
Copyright (c) 2018 Intel Corporation
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import os
import time
import numpy as np
from argparse import ArgumentParser
import torch
from transformers import AutoTokenizer, AutoModelForCausalLM
from openvino.inference_engine import IECore
def generate_greedy_pytorch(tokens, model, n):
complete_seq = tokens.permute((1, 0)).tolist()
for _ in range(n):
out = model(tokens)
next_tokens = torch.argmax(out.logits[:, -1], dim = -1).unsqueeze(1)
tokens = torch.cat([tokens, next_tokens], dim=-1)
tokens = tokens[:, 1:]
complete_seq.extend(next_tokens.tolist())
return np.array(complete_seq).T.tolist()
def generate_greedy_openvino(tokens, exec_net, n, logits_dict_key = "2859"):
complete_seq = tokens.T.tolist()
for _ in range(n):
out = exec_net.infer(inputs={"0": inputs})[logits_dict_key]
next_tokens = np.argmax(out[:, -1], axis=-1).reshape(-1, 1)
tokens = np.hstack((tokens, next_tokens))
tokens = tokens[:, 1:]
complete_seq.extend(next_tokens.tolist())
return | np.array(complete_seq) | numpy.array |
import numpy as np
from sklearn.base import BaseEstimator
from HTMLParser import HTMLParser
class FeatureMapper:
def __init__(self, features):
self.features = features
def fit(self, X, y=None):
for feature_name, column_name, extractor in self.features:
extractor.fit(X[column_name], y)
def transform(self, X):
extracted = []
for feature_name, column_name, extractor in self.features:
fea = extractor.transform(X[column_name])
if hasattr(fea, "toarray"):
extracted.append(fea.toarray())
else:
extracted.append(fea)
if len(extracted) > 1:
return np.concatenate(extracted, axis=1)
else:
return extracted[0]
def fit_transform(self, X, y=None):
extracted = []
for feature_name, column_name, extractor in self.features:
fea = extractor.fit_transform(X[column_name], y)
if hasattr(fea, "toarray"):
extracted.append(fea.toarray())
else:
extracted.append(fea)
if len(extracted) > 1:
return | np.concatenate(extracted, axis=1) | numpy.concatenate |
import numpy as np
import cv2
import glob
import random
import math
import matplotlib.pyplot as plt
from ReadCameraModel import*
from mpl_toolkits.mplot3d import Axes3D
# Find skew matrix
def skew(imagePoint0,imagePoint1):
a = imagePoint0[0]
b = imagePoint0[1]
c = imagePoint0[2]
skew1 = np.array([[0,-c,b],[c,0,-a],[-b,a,0]])
a = imagePoint1[0]
b = imagePoint1[1]
c = imagePoint1[2]
skew2 = np.array([[0,-c,b],[c,0,-a],[-b,a,0]])
return skew1,skew2
# Calculate Linear Triangulation
def linearTriangulate(K,P0,P1,P2,P3,P4,imagePoint0,imagePoint1):
X1 = []
X2 = []
X3 = []
X4 = []
pose1 = np.dot(K,P0) #3X4
pose21 = np.dot(K,P1) #3X4
pose22 = np.dot(K,P2) #3X4
pose23 = np.dot(K,P3) #3X4
pose24 = np.dot(K,P4) #3X4
for i in range(0,imagePoint0.shape[0]):
skew1,skew2 = skew(imagePoint0[i],imagePoint1[i]) #3X3
A1 = np.dot(skew1,pose1) #3X4
A21 = np.dot(skew2,pose21) #3X4
A22 = np.dot(skew2,pose22)
A23 = np.dot(skew2,pose23)
A24 = np.dot(skew2,pose24)
A_1 = np.vstack((A1,A21)) #6X4
A_2 = np.vstack((A1,A22))
A_3 = np.vstack((A1,A23))
A_4 = np.vstack((A1,A24))
u1,s1,v1 = np.linalg.svd(A_1) #6X6,6X4,4X4
u2,s2,v2 = np.linalg.svd(A_2)
u3,s3,v3 = np.linalg.svd(A_3)
u4,s4,v4 = np.linalg.svd(A_4)
X1.append(v1[-1])
X2.append(v2[-1])
X3.append(v3[-1])
X4.append(v4[-1])
X1 = np.array(X1)
X1 = X1.T[:3,:]/X1.T[3,:] #Xt is 3xn
X2 = np.array(X2)
X2 = X2.T[:3,:]/X2.T[3,:] #Xt is 3xn
X3 = np.array(X3)
X3 = X3.T[:3,:]/X3.T[3,:] #Xt is 3xn
X4 = np.array(X4)
X4 = X4.T[:3,:]/X4.T[3,:] #Xt is 3xn
return X1, X2, X3, X4
# Find Projection Matrix
def findPMatrix(C,R,K):
T = np.array([[1.0,0,0,-C[0][0]],[0,1.0,0,-C[1][0]],[0,0,1.0,-C[2][0]]])
P = np.matmul(K,np.matmul(R,T))
return P
# Check Cheirality
def checkChieralty(X,R,C):
sub = X - C
r3 = R[2,:]
r3 = r3.reshape((1,3))
triangu = np.matmul(r3 ,sub)
test = triangu.T
numberOfpoints = len(np.where(test > 0)[0])
return numberOfpoints
# Finds L1 Norm of a Matrix
def findL1Norm(F):
norm = np.max(F.sum(axis= 0))
return norm
# Implements Zhang's Method of Fundamental Matrix
def zhangsMethod(imageW, imageH, points1, points2):
dw = int(imageW/8)
dh = int(imageH/8)
gridPoints1 = []
for i in range(0,8):
for j in range(0,8):
points1X1 = np.where(points1[:,0] < (i+1)*dw)
testX1 = points1[points1X1]
points1X2 = np.where(testX1[:,0] > i*dw)
testX2 = testX1[points1X2]
points2X1 = np.where(testX2[:,1] < (j+1)*dh)
testX3 = testX2[points2X1]
points2X2 = np.where(testX3[:,1] > j*dh)
testX4 = testX3[points2X2]
gridPoints1.append(testX4)
gridPoints1N = list(filter(lambda x: x.size != 0, gridPoints1))
return gridPoints1N
# Function to Normalize the data for the 8-point Algorithm
def normalizeData(points):
meanX = np.mean(points[:,0])
meanY = np.mean(points[:,1])
d = np.sqrt((points[:,0] - meanX)**2 + (points[:,1] - meanY)**2)
dMean = np.mean(d)
scale = math.sqrt(2)/dMean
T = np.array([[scale, 0 , -meanX*scale],[0, scale, -meanY*scale],[0, 0, 1]])
normalizedPoints = np.matmul(T, points.T)
return normalizedPoints.T, T #returns points as nx3
# Function implementing Normalized F Matrix Calculation Method
def normalizeFMethod(points1, points2):
points1Normalize, T1 = normalizeData(points1)
points2Normalize, T2 = normalizeData(points2)
in1, in2, in3, in4, in5, in6, in7, in8, in9 = points2Normalize[:,0]*points1Normalize[:,0],points2Normalize[:,0]*points1Normalize[:,1],points2Normalize[:,0],points2Normalize[:,1]*points1Normalize[:,0],points2Normalize[:,1]*points1Normalize[:,1],points2Normalize[:,1],points1Normalize[:,0], points1Normalize[:,1], np.ones(len(points1Normalize[:,1]))
a = np.vstack((in1, in2, in3, in4, in5, in6, in7, in8, in9))
A = a.T
u, s, V = np.linalg.svd(A)
Fmatrix = np.reshape(V[8,:], (3, 3))
u1,s1,v1 = np.linalg.svd(Fmatrix)
avg = (s1[1]+s1[0])/2
s1[0], s1[1] = avg, avg
s1[2] = 0
fmat = np.matmul(u1,np.matmul(np.diag(s1),v1))
F = np.matmul(T2.T, np.matmul(fmat, T1))
Fnormal = F/findL1Norm(F)
Fnormal = Fnormal/Fnormal[2][2]
if Fnormal[2,2] < 0:
Fnormal = -1 * Fnormal
return Fnormal
# Finding FMatrix using RANSAC
def findRANSAC(points1, points2, imageW, imageH):
gridPoints1N = zhangsMethod(imageW, imageH, points1, points2)
it = 0
numb = 0
while(it < 250):
blockNumber = []
i = 0
if(len(gridPoints1N) <=8):
while(i < 8):
b = random.randint(0,len(gridPoints1N)-1)
blockNumber.append(b)
i += 1
else:
while(i < 8):
b = random.randint(0,len(gridPoints1N)-1)
if not b in blockNumber:
blockNumber.append(b)
else:
i = i - 1
i += 1
pnts1 = []
pnts2 = []
for i in blockNumber:
itr = random.randint(0, len(gridPoints1N[i])-1)
pnts1.append(list(gridPoints1N[i][itr,:]))
pos = 0
for p in range(0 , points1.shape[0]):
if(points1[p][0] == gridPoints1N[i][itr,0] and points1[p][1] == gridPoints1N[i][itr,1]):
pos = p
pnts2.append(list(points2[pos]))
pnts1 = np.array(pnts1)
pnts2 = np.array(pnts2)
F = normalizeFMethod(pnts1, pnts2)
checkInliner = np.matmul(points2 , np.matmul(F, points1.T))
diagonalOfInliners = checkInliner.diagonal()
inliers = np.where(abs(diagonalOfInliners) <= 0.05)[0]
numberOfInliners = len(inliers)
if(numberOfInliners > numb):
numb = numberOfInliners
Ffinal = F
inliersPoints1 = points1[inliers]
inliersPoints2 = points2[inliers]
it += 1
Fn = normalizeFMethod(inliersPoints1, inliersPoints2)
return Fn
# Finding essential matrix
def findEssentialMatrix(F, Ml, Mr):
E = np.matmul(Mr, np.matmul(F, Ml))
u,s,v = np.linalg.svd(E)
s = np.eye(3)
s[2][2] = 0
E = np.dot(u,np.dot(s,v))
E = E/findL1Norm(E)
return E
# Getting Camera Poses
def getCameraPose(E):
u, s, v = np.linalg.svd(E)
W = | np.array([[0, -1, 0],[1, 0, 0],[0, 0, 1]]) | numpy.array |
import numpy as np
import pytest
from napari._tests.utils import check_layer_world_data_extent
from napari.layers import Surface
def test_random_surface():
"""Test instantiating Surface layer with random 2D data."""
np.random.seed(0)
vertices = np.random.random((10, 2))
faces = np.random.randint(10, size=(6, 3))
values = np.random.random(10)
data = (vertices, faces, values)
layer = Surface(data)
assert layer.ndim == 2
assert np.all([np.all(ld == d) for ld, d in zip(layer.data, data)])
assert np.all(layer.vertices == vertices)
assert np.all(layer.faces == faces)
assert np.all(layer.vertex_values == values)
assert layer._data_view.shape[1] == 2
assert layer._view_vertex_values.ndim == 1
def test_random_3D_surface():
"""Test instantiating Surface layer with random 3D data."""
np.random.seed(0)
vertices = np.random.random((10, 3))
faces = np.random.randint(10, size=(6, 3))
values = np.random.random(10)
data = (vertices, faces, values)
layer = Surface(data)
assert layer.ndim == 3
assert np.all([np.all(ld == d) for ld, d in zip(layer.data, data)])
assert layer._data_view.shape[1] == 2
assert layer._view_vertex_values.ndim == 1
layer.dims.ndisplay = 3
assert layer._data_view.shape[1] == 3
assert layer._view_vertex_values.ndim == 1
def test_random_4D_surface():
"""Test instantiating Surface layer with random 4D data."""
np.random.seed(0)
vertices = np.random.random((10, 4))
faces = np.random.randint(10, size=(6, 3))
values = np.random.random(10)
data = (vertices, faces, values)
layer = Surface(data)
assert layer.ndim == 4
assert np.all([np.all(ld == d) for ld, d in zip(layer.data, data)])
assert layer._data_view.shape[1] == 2
assert layer._view_vertex_values.ndim == 1
layer.dims.ndisplay = 3
assert layer._data_view.shape[1] == 3
assert layer._view_vertex_values.ndim == 1
def test_random_3D_timeseries_surface():
"""Test instantiating Surface layer with random 3D timeseries data."""
np.random.seed(0)
vertices = np.random.random((10, 3))
faces = np.random.randint(10, size=(6, 3))
values = | np.random.random((22, 10)) | numpy.random.random |
from __future__ import annotations
from typing import Callable, Iterable, Optional, Tuple, Union
import copy
import materia as mtr
from ..utils import memoize
import numpy as np
import scipy.integrate
import scipy.interpolate
# import matplotlib.pyplot as plt
import warnings
# __all__ = []
class DataSeries:
def __init__(self, x: mtr.Quantity, y: mtr.Quantity) -> None:
self.x = x
self.y = y
# def plot(self):
# plt.plot(self.x.value, self.y.value)
# plt.show()
def broaden_gaussian(
self, fwhm: mtr.Quantity
) -> Callable[Iterable[Union[int, float]], Iterable[Union[int, float]]]:
def f(energies: mtr.Quantity) -> Iterable[Union[int, float]]:
s = 0
for excitation in self.excitations:
x = (energies - excitation.energy) / fwhm
x = np.array([e.value for e in x])
s += excitation.oscillator_strength * np.exp(-0.5 * x ** 2)
return s / (np.sqrt(2 * np.pi) * fwhm)
return f
def broaden_lorentzian(gamma):
def _f(omega, gamma, w):
return gamma * omega / ((w ** 2 - omega ** 2) ** 2 + omega ** 2 * gamma ** 2)
def f(omega, fs, ws):
return (fs[None, :] @ np.vstack([_f(omega, gamma, w) for w in ws])).squeeze()
return f
class DeltaSeries:
def __init__(self, x: mtr.Quantity, y: mtr.Quantity) -> None:
self.x = x
self.y = y / x.unit
def broaden(self, peak_func, x_eval, in_place=True):
value = peak_func(x=self.x, y=self.y, x_eval=x_eval)
if in_place:
self.x = x_eval * self.x.unit
self.y = value * self.y.unit
return self
else:
new_series = copy.deepcopy(self)
new_series.x = x_eval * self.x.unit
new_series.y = value * self.y.unit
return new_series
# def plot(self):
# plt.scatter(self.x.value, self.y.value)
# plt.show()
class TimeSeries(DataSeries):
@property
def dt(self) -> mtr.Quantity:
# FIXME: should we even check for uniform spacing since this method,
# like most/all methods, is susceptible to floating point errors?
spacings = np.diff(self.x.value)
dt, *_ = spacings
if not np.allclose(spacings, dt):
raise ValueError(
"Time array does not have a unique spacing.\
Consider interpolating to a uniformly spaced time array first."
)
else:
return dt * self.x.unit
@property
def T(self) -> mtr.Quantity:
return self.x[-1] - self.x[0]
def damp(self) -> None: # , final_damp_value):
final_damp_value = 1e-4 # Quantity(value=1e-4,unit=self.x.unit)
etasq = -np.log(final_damp_value) / self.T ** 2
damp = np.exp(-(etasq * self.x ** 2).value)
self.y *= damp
def fourier_transform(self, pad_len=None):
if pad_len is None:
pad_len = len(self.x)
fft_value = np.fft.fft(a=self.y.value, n=pad_len) / pad_len
fft_real = fft_value.real * self.y.unit
fft_imag = fft_value.imag * self.y.unit
fft_freq = np.fft.fftfreq(n=pad_len, d=self.dt().value) / self.x.unit
return (
mtr.Spectrum(x=fft_freq, y=fft_real),
mtr.Spectrum(x=fft_freq, y=fft_imag),
)
class Spectrum(DataSeries):
def match(self, match_to, in_place=True, interp_method="cubic_spline"):
return self.extrapolate(x_extrap_to=match_to.x, in_place=in_place).interpolate(
x_interp_to=match_to.x, in_place=in_place, method=interp_method
)
def extrapolate(self, x_extrap_to, in_place=True):
if self.x.unit != x_extrap_to.unit:
raise ValueError(
"X-axis units do not match. Change units before extrapolating."
)
x_extrap, y_extrap = mtr.extrapolate(
x=self.x.value, y=self.y.value, x_extrap_to=x_extrap_to.value
)
x_extrap = x_extrap * self.x.unit
y_extrap = y_extrap * self.y.unit
if in_place:
self.x, self.y = x_extrap, y_extrap
return self
else:
new_spectrum = copy.deepcopy(self)
new_spectrum.x = x_extrap
new_spectrum.y = y_extrap
return new_spectrum
# return self.__class__(x=x_extrap,y=y_extrap)
def interpolate(self, x_interp_to, in_place=True, method="cubic_spline"):
if self.x.unit != x_interp_to.unit:
raise ValueError(
"X-axis units do not match. Change units before interpolating."
)
x_interp, y_interp = mtr.interpolate(
x=self.x.value, y=self.y.value, x_interp_to=x_interp_to.value, method=method
)
x_interp = x_interp * self.x.unit
y_interp = y_interp * self.y.unit
if in_place:
self.x, self.y = x_interp, y_interp
return self
else:
new_spectrum = copy.deepcopy(self)
new_spectrum.x = x_interp
new_spectrum.y = y_interp
return new_spectrum
# return self.__class__(x=x_interp, y=y_interp)
# def plot(self, x=None):
# # FIXME: add axes labels, title
# if x is not None:
# plt.plot(x, self.y.match(match_to=x, in_place=False))
# else:
# plt.plot(self.x, self.y)
# plt.xlabel(f"Units: {self.x.unit}")
# plt.ylabel(f"Units: {self.y.unit}")
# plt.show()
class PolarizabilitySpectrum(Spectrum):
def __init__(self, x, y):
super().__init__(x=x, y=y)
class AbsorptionSpectrum(Spectrum):
def __init__(self, x, y):
super().__init__(x=x, y=y)
class ReflectanceSpectrum(Spectrum):
def reflect_illuminant(self, illuminant):
new_spectrum = copy.deepcopy(illuminant)
new_spectrum.x = self.x
new_spectrum.y = self.y * illuminant.match(match_to=self, in_place=False).y
return new_spectrum
class TransmittanceSpectrum(Spectrum):
def transmit_illuminant(self, illuminant):
new_spectrum = copy.deepcopy(illuminant)
new_spectrum.x = self.x
new_spectrum.y = self.y * illuminant.match(match_to=self, in_place=False).y
return new_spectrum
def avt(self):
from .data import ASTMG173
photopic = PhotopicResponse().match(match_to=self)
astmg = ASTMG173().match(match_to=self)
num = scipy.integrate.simps(
y=(self.y * astmg.y * photopic.y).value, x=self.x.value
)
denom = scipy.integrate.simps(y=(astmg.y * photopic.y).value, x=astmg.x.value)
return num / denom
class SPDSpectrum(Spectrum):
@property
@memoize
def XYZ(self) -> Tuple[float, float, float]:
# FIXME: this is an ugly workaround to avoid circular import - change it!!
from .data import (
CIE1931ColorMatchingFunctionX,
CIE1931ColorMatchingFunctionY,
CIE1931ColorMatchingFunctionZ,
)
xbar = CIE1931ColorMatchingFunctionX().match(match_to=self, in_place=True)
ybar = CIE1931ColorMatchingFunctionY().match(match_to=self, in_place=True)
zbar = CIE1931ColorMatchingFunctionZ().match(match_to=self, in_place=True)
X = scipy.integrate.simps(y=(self.y * xbar.y).value, x=self.x.value)
Y = scipy.integrate.simps(y=(self.y * ybar.y).value, x=self.x.value)
Z = scipy.integrate.simps(y=(self.y * zbar.y).value, x=self.x.value)
return X / Y, 1.0, Z / Y
# FIXME: verify correctness with test case
def von_kries_XYZ(self, source_illuminant, destination_illuminant):
source_lms = np.array(source_illuminant.LMS())
destination_lms = np.array(destination_illuminant.LMS())
LMS = np.array(self.LMS())[:, None]
hpe = hunt_pointer_estevez_transform()
adapted_LMS = np.diag(destination_lms / source_lms) @ LMS
adapted_XYZ = np.linalg.inv(hpe) @ adapted_LMS
adapted_X, adapted_Y, adapted_Z = adapted_XYZ.squeeze()
return adapted_X, adapted_Y, adapted_Z
def UVW(self, white_point=None):
X, Y, Z = self.XYZ
return self._XYZ_to_UVW(X=X, Y=Y, Z=Z, white_point=white_point)
def von_kries_UVW(
self, source_illuminant, destination_illuminant, white_point=None
):
X, Y, Z = self.von_kries_XYZ(
source_illuminant=source_illuminant,
destination_illuminant=destination_illuminant,
)
return self._XYZ_to_UVW(X=X, Y=Y, Z=Z, white_point=white_point)
# FIXME: shouldn't the white_point actually be
# the desination illuminant's white point?
def _XYZ_to_UVW(self, X, Y, Z, white_point=None):
if white_point is None:
U = 2 * X / 3
V = Y
W = 0.5 * (-X + 3 * Y + Z)
else:
u0, v0 = white_point
u, v = self.uv
W = 25 * | np.power(Y, 1 / 3) | numpy.power |
#!/usr/bin/env python
import roslib; roslib.load_manifest('robot_kf')
import rospy
import math
import numpy as np
import scipy.optimize
from nav_msgs.msg import Odometry
from robot_kf.msg import WheelOdometry
from sensor_msgs.msg import Imu
class OdometryCalibrator:
def __init__(self):
self.time_gps = list()
self.time_compass = list()
self.time_odom = list()
self.data_gps = list()
self.data_compass = list()
self.data_odom = list()
def setup(self, topic_odom, topic_gps, topic_compass):
self.sub_odom = rospy.Subscriber(topic_odom, WheelOdometry, self.callback_odom)
self.sub_gps = rospy.Subscriber(topic_gps, Odometry, self.callback_gps)
self.sub_compass = rospy.Subscriber(topic_compass, Imu, self.callback_compass)
def callback_gps(self, msg):
datum_gps = self._get_odom_pose(msg)
self.time_gps.append(msg.header.stamp)
self.data_gps.append(datum_gps[0:2])
def callback_compass(self, msg):
datum_compass = self._get_compass_yaw(msg)
self.time_compass.append(msg.header.stamp)
self.data_compass.append(datum_compass)
def callback_odom(self, msg):
datum_odom = self._get_wheel_movement(msg)
self.time_odom.append(msg.header.stamp)
self.data_odom.append(datum_odom)
def optimize(self, guess, alpha):
gps = np.array(self.data_gps)
compass = np.array(self.data_compass)
odom = np.array(self.data_odom)
# Find the distance between subsequent GPS samples.
zero = | np.zeros((1, 2)) | numpy.zeros |
#!/usr/bin/env python3
import sklearn.metrics
import numpy as np
from sklearn.preprocessing import normalize # version : 0.17
def Y_2_allocation(Y):
i = 0
allocation = np.array([])
for m in range(Y.shape[0]):
allocation = np.hstack((allocation, np.where(Y[m] == 1)[0][0]))
i += 1
return allocation
def Allocation_2_Y(allocation):
N = np.size(allocation)
unique_elements = np.unique(allocation)
num_of_classes = len(unique_elements)
class_ids = np.arange(num_of_classes)
i = 0
Y = np.zeros(num_of_classes)
for m in allocation:
class_label = np.where(unique_elements == m)[0]
a_row = np.zeros(num_of_classes)
a_row[class_label] = 1
Y = np.hstack((Y, a_row))
Y = np.reshape(Y, (N+1,num_of_classes))
Y = np.delete(Y, 0, 0)
return Y
def Kx_D_given_W(db, setX=None, setW=None):
if setX is None: outX = db['Dloader'].X.dot(db['W'])
else: outX = setX.dot(db['W'])
if setW is None: outX = db['Dloader'].X.dot(db['W'])
else: outX = db['Dloader'].X.dot(setW)
if db['kernel_type'] == 'rbf':
Kx = rbk_sklearn(outX, db['Dloader'].σ)
elif db['kernel_type'] == 'rbf_slow':
Kx = rbk_sklearn(outX, db['Dloader'].σ)
elif db['kernel_type'] == 'linear':
Kx = outX.dot(outX.T)
elif db['kernel_type'] == 'polynomial':
poly_sklearn(outX, db['poly_power'], db['poly_constant'])
np.fill_diagonal(Kx, 0) # Set diagonal of adjacency matrix to 0
D = compute_inverted_Degree_matrix(Kx)
return [Kx, D]
def poly_sklearn(data, p, c):
poly = sklearn.metrics.pairwise.polynomial_kernel(data, degree=p, coef0=c)
return poly
def normalized_rbk_sklearn(X, σ):
Kx = rbk_sklearn(X, σ)
D = compute_inverted_Degree_matrix(Kx)
return D.dot(Kx).dot(D)
def rbk_sklearn(data, sigma):
gammaV = 1.0/(2*sigma*sigma)
rbk = sklearn.metrics.pairwise.rbf_kernel(data, gamma=gammaV)
np.fill_diagonal(rbk, 0) # Set diagonal of adjacency matrix to 0
return rbk
def Ku_kernel(labels):
Y = Allocation_2_Y(labels)
Ky = Y.dot(Y.T)
return Ky
def double_center(M, H):
HMH = H.dot(M).dot(H)
return HMH
def nomalized_by_Degree_matrix(M, D):
D2 = np.diag(D)
DMD = M*( | np.outer(D2, D2) | numpy.outer |
import os
import sys
import yaml
import numpy as np
import torch
import torch.utils.data as data
import numpy as np
import numpy.random as npr
import cv2
import copy
import glob
import scipy
import datasets
from config.config import cfg
from transforms3d.quaternions import mat2quat, quat2mat
from utils.se3 import *
from utils.pose_error import *
from utils.cython_bbox import bbox_overlaps
_SUBJECTS = [
'20200709-subject-01',
'20200813-subject-02',
'20200820-subject-03',
'20200903-subject-04',
'20200908-subject-05',
'20200918-subject-06',
'20200928-subject-07',
'20201002-subject-08',
'20201015-subject-09',
'20201022-subject-10',
]
_SERIALS = [
'836212060125',
'839512060362',
'840412060917',
'841412060263',
'932122060857',
'932122060861',
'932122061900',
'932122062010',
]
_YCB_CLASSES = {
1: '002_master_chef_can',
2: '003_cracker_box',
3: '004_sugar_box',
4: '005_tomato_soup_can',
5: '006_mustard_bottle',
6: '007_tuna_fish_can',
7: '008_pudding_box',
8: '009_gelatin_box',
9: '010_potted_meat_can',
10: '011_banana',
11: '019_pitcher_base',
12: '021_bleach_cleanser',
13: '024_bowl',
14: '025_mug',
15: '035_power_drill',
16: '036_wood_block',
17: '037_scissors',
18: '040_large_marker',
19: '051_large_clamp',
20: '052_extra_large_clamp',
21: '061_foam_brick',
}
_MANO_JOINTS = [
'wrist',
'thumb_mcp',
'thumb_pip',
'thumb_dip',
'thumb_tip',
'index_mcp',
'index_pip',
'index_dip',
'index_tip',
'middle_mcp',
'middle_pip',
'middle_dip',
'middle_tip',
'ring_mcp',
'ring_pip',
'ring_dip',
'ring_tip',
'little_mcp',
'little_pip',
'little_dip',
'little_tip'
]
_MANO_JOINT_CONNECT = [
[0, 1], [ 1, 2], [ 2, 3], [ 3, 4],
[0, 5], [ 5, 6], [ 6, 7], [ 7, 8],
[0, 9], [ 9, 10], [10, 11], [11, 12],
[0, 13], [13, 14], [14, 15], [15, 16],
[0, 17], [17, 18], [18, 19], [19, 20],
]
_BOP_EVAL_SUBSAMPLING_FACTOR = 4
class dex_ycb_dataset(data.Dataset):
def __init__(self, setup, split, obj_list):
self._setup = setup
self._split = split
self._color_format = "color_{:06d}.jpg"
self._depth_format = "aligned_depth_to_color_{:06d}.png"
self._label_format = "labels_{:06d}.npz"
self._height = 480
self._width = 640
# paths
self._name = 'dex_ycb_' + setup + '_' + split
self._image_set = split
self._dex_ycb_path = self._get_default_path()
path = os.path.join(self._dex_ycb_path, 'data')
self._data_dir = path
self._calib_dir = os.path.join(self._data_dir, "calibration")
self._model_dir = os.path.join(self._data_dir, "models")
self._obj_file = {
k: os.path.join(self._model_dir, v, "textured_simple.obj")
for k, v in _YCB_CLASSES.items()
}
# define all the classes
self._classes_all = ('002_master_chef_can', '003_cracker_box', '004_sugar_box', '005_tomato_soup_can', '006_mustard_bottle', \
'007_tuna_fish_can', '008_pudding_box', '009_gelatin_box', '010_potted_meat_can', '011_banana', '019_pitcher_base', \
'021_bleach_cleanser', '024_bowl', '025_mug', '035_power_drill', '036_wood_block', '037_scissors', '040_large_marker', \
'051_large_clamp', '052_extra_large_clamp', '061_foam_brick')
self._num_classes_all = len(self._classes_all)
self._class_colors_all = [(255, 0, 0), (0, 255, 0), (0, 0, 255), (255, 255, 0), (255, 0, 255), (0, 255, 255), \
(128, 0, 0), (0, 128, 0), (0, 0, 128), (128, 128, 0), (128, 0, 128), (0, 128, 128), \
(64, 0, 0), (0, 64, 0), (0, 0, 64), (64, 64, 0), (64, 0, 64), (0, 64, 64),
(192, 0, 0), (0, 192, 0), (0, 0, 192)]
self._extents_all = self._load_object_extents()
self._posecnn_class_indexes = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12, 13, 14, 15, 16, 17, 18, 20, 21]
# compute class index
class_index = []
for name in obj_list:
for i in range(self._num_classes_all):
if name == self._classes_all[i]:
class_index.append(i)
break
print('class index:', class_index)
self._class_index = class_index
# select a subset of classes
self._classes = obj_list
self._num_classes = len(self._classes)
self._class_colors = [self._class_colors_all[i] for i in class_index]
self._extents = self._extents_all[class_index]
self._points, self._points_all = self._load_object_points(self._classes, self._extents)
# Seen subjects, camera views, grasped objects.
if self._setup == 's0':
if self._split == 'train':
subject_ind = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
serial_ind = [0, 1, 2, 3, 4, 5, 6, 7]
sequence_ind = [i for i in range(100) if i % 5 != 4]
if self._split == 'val':
subject_ind = [0, 1]
serial_ind = [0, 1, 2, 3, 4, 5, 6, 7]
sequence_ind = [i for i in range(100) if i % 5 == 4]
if self._split == 'test':
subject_ind = [2, 3, 4, 5, 6, 7, 8, 9]
serial_ind = [0, 1, 2, 3, 4, 5, 6, 7]
sequence_ind = [i for i in range(100) if i % 5 == 4]
# Unseen subjects.
if self._setup == 's1':
if self._split == 'train':
subject_ind = [0, 1, 2, 3, 4, 5, 9]
serial_ind = [0, 1, 2, 3, 4, 5, 6, 7]
sequence_ind = list(range(100))
if self._split == 'val':
subject_ind = [6]
serial_ind = [0, 1, 2, 3, 4, 5, 6, 7]
sequence_ind = list(range(100))
if self._split == 'test':
subject_ind = [7, 8]
serial_ind = [0, 1, 2, 3, 4, 5, 6, 7]
sequence_ind = list(range(100))
# Unseen camera views.
if self._setup == 's2':
if self._split == 'train':
subject_ind = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
serial_ind = [0, 1, 2, 3, 4, 5]
sequence_ind = list(range(100))
if self._split == 'val':
subject_ind = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
serial_ind = [6]
sequence_ind = list(range(100))
if self._split == 'test':
subject_ind = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
serial_ind = [7]
sequence_ind = list(range(100))
# Unseen grasped objects.
if self._setup == 's3':
if self._split == 'train':
subject_ind = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
serial_ind = [0, 1, 2, 3, 4, 5, 6, 7]
sequence_ind = [
i for i in range(100) if i // 5 not in (3, 7, 11, 15, 19)
]
if self._split == 'val':
subject_ind = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
serial_ind = [0, 1, 2, 3, 4, 5, 6, 7]
sequence_ind = [i for i in range(100) if i // 5 in (3, 19)]
if self._split == 'test':
subject_ind = [0, 1, 2, 3, 4, 5, 6, 7, 8, 9]
serial_ind = [0, 1, 2, 3, 4, 5, 6, 7]
sequence_ind = [i for i in range(100) if i // 5 in (7, 11, 15)]
self._subjects = [_SUBJECTS[i] for i in subject_ind]
self._serials = [_SERIALS[i] for i in serial_ind]
self._intrinsics = []
for s in self._serials:
intr_file = os.path.join(self._calib_dir, "intrinsics", "{}_{}x{}.yml".format(s, self._width, self._height))
with open(intr_file, 'r') as f:
intr = yaml.load(f, Loader=yaml.FullLoader)
intr = intr['color']
self._intrinsics.append(intr)
# build mapping
self._sequences = []
self._mapping = []
self._ycb_ids = []
offset = 0
for n in self._subjects:
seq = sorted(os.listdir(os.path.join(self._data_dir, n)))
seq = [os.path.join(n, s) for s in seq]
assert len(seq) == 100
seq = [seq[i] for i in sequence_ind]
self._sequences += seq
for i, q in enumerate(seq):
meta_file = os.path.join(self._data_dir, q, "meta.yml")
with open(meta_file, 'r') as f:
meta = yaml.load(f, Loader=yaml.FullLoader)
c = np.arange(len(self._serials))
f = np.arange(meta['num_frames'])
f, c = np.meshgrid(f, c)
c = c.ravel()
f = f.ravel()
s = (offset + i) * np.ones_like(c)
m = np.vstack((s, c, f)).T
self._mapping.append(m)
self._ycb_ids.append(meta['ycb_ids'])
offset += len(seq)
self._mapping = np.vstack(self._mapping)
# sample a subset for training
if split == 'train':
self._mapping = self._mapping[::10]
# dataset size
self._size = len(self._mapping)
print('dataset %s with images %d' % (self._name, self._size))
def __len__(self):
return self._size
def get_bop_id_from_idx(self, idx):
s, c, f = map(lambda x: x.item(), self._mapping[idx])
scene_id = s * len(self._serials) + c
im_id = f
return scene_id, im_id
def __getitem__(self, idx):
s, c, f = self._mapping[idx]
is_testing = f % _BOP_EVAL_SUBSAMPLING_FACTOR == 0
if self._split == 'test' and not is_testing:
sample = {'is_testing': is_testing}
return sample
scene_id, im_id = self.get_bop_id_from_idx(idx)
video_id = '%04d' % (scene_id)
image_id = '%06d' % (im_id)
# posecnn result path
posecnn_result_path = os.path.join(self._dex_ycb_path, 'results_posecnn', self._name, 'vgg16_dex_ycb_epoch_16.checkpoint.pth',
video_id + '_' + image_id + '.mat')
d = os.path.join(self._data_dir, self._sequences[s], self._serials[c])
roidb = {
'color_file': os.path.join(d, self._color_format.format(f)),
'depth_file': os.path.join(d, self._depth_format.format(f)),
'label_file': os.path.join(d, self._label_format.format(f)),
'intrinsics': self._intrinsics[c],
'ycb_ids': self._ycb_ids[s],
'posecnn': posecnn_result_path,
}
# Get the input image blob
im_color, im_depth = self._get_image_blob(roidb['color_file'], roidb['depth_file'])
# build the label blob
im_label, intrinsic_matrix, poses, gt_boxes, poses_result, rois_result, labels_result \
= self._get_label_blob(roidb, self._num_classes)
is_syn = 0
im_scale = 1.0
im_info = np.array([im_color.shape[1], im_color.shape[2], im_scale, is_syn], dtype=np.float32)
sample = {'image_color': im_color[:, :, (2, 1, 0)],
'image_depth': im_depth,
'label': im_label,
'intrinsic_matrix': intrinsic_matrix,
'gt_poses': poses,
'gt_boxes': gt_boxes,
'poses_result': poses_result,
'rois_result': rois_result,
'labels_result': labels_result,
'extents': self._extents,
'points': self._points_all,
'im_info': im_info,
'video_id': video_id,
'image_id': image_id}
if self._split == 'test':
sample['is_testing'] = is_testing
return sample
def _get_image_blob(self, color_file, depth_file):
# rgba
rgba = cv2.imread(color_file, cv2.IMREAD_UNCHANGED)
if rgba.shape[2] == 4:
im = np.copy(rgba[:,:,:3])
alpha = rgba[:,:,3]
I = np.where(alpha == 0)
im[I[0], I[1], :] = 0
else:
im = rgba
im_color = im.astype('float') / 255.0
# depth image
im_depth = cv2.imread(depth_file, cv2.IMREAD_UNCHANGED)
im_depth = im_depth.astype('float') / 1000.0
return im_color, im_depth
def _get_label_blob(self, roidb, num_classes):
""" build the label blob """
# parse data
cls_indexes = roidb['ycb_ids']
classes = np.array(self._class_index)
fx = roidb['intrinsics']['fx']
fy = roidb['intrinsics']['fy']
px = roidb['intrinsics']['ppx']
py = roidb['intrinsics']['ppy']
intrinsic_matrix = np.eye(3, dtype=np.float32)
intrinsic_matrix[0, 0] = fx
intrinsic_matrix[1, 1] = fy
intrinsic_matrix[0, 2] = px
intrinsic_matrix[1, 2] = py
label = np.load(roidb['label_file'])
# label image
im_label = label['seg']
# poses
poses = label['pose_y']
if len(poses.shape) == 2:
poses = np.reshape(poses, (1, 3, 4))
num = poses.shape[0]
assert num == len(cls_indexes), 'number of poses not equal to number of objects'
# bounding boxes
gt_boxes = np.zeros((num, 5), dtype=np.float32)
for i in range(num):
cls = int(cls_indexes[i]) - 1
ind = np.where(classes == cls)[0]
if len(ind) > 0:
R = poses[i, :, :3]
T = poses[i, :, 3]
# compute box
x3d = np.ones((4, self._points_all.shape[1]), dtype=np.float32)
x3d[0, :] = self._points_all[ind,:,0]
x3d[1, :] = self._points_all[ind,:,1]
x3d[2, :] = self._points_all[ind,:,2]
RT = np.zeros((3, 4), dtype=np.float32)
RT[:3, :3] = R
RT[:, 3] = T
x2d = np.matmul(intrinsic_matrix, np.matmul(RT, x3d))
x2d[0, :] = np.divide(x2d[0, :], x2d[2, :])
x2d[1, :] = np.divide(x2d[1, :], x2d[2, :])
gt_boxes[i, 0] = np.min(x2d[0, :])
gt_boxes[i, 1] = np.min(x2d[1, :])
gt_boxes[i, 2] = np.max(x2d[0, :])
gt_boxes[i, 3] = | np.max(x2d[1, :]) | numpy.max |
# Copyright (c) 2015-2021 <NAME> and contributors.
# MC3 is open-source software under the MIT license (see LICENSE).
import pytest
import numpy as np
import mc3
import mc3.stats as ms
# Preamble for time-averaging runs:
np.random.seed(12)
N = 1000
white = np.random.normal(0, 5, N)
red = np.sin(np.arange(N)/(0.1*N))*np.random.normal(1.0, 1.0, N)
data = white + red
expected_red_rms = np.array(
[5.20512494, 2.36785563, 1.72466452, 1.49355819, 1.52934937,
1.35774105, 1.11881588, 1.13753563, 1.16566184, 1.03510878,
1.11692786, 0.95551055, 1.04041202, 0.86876758, 0.93962365,
0.95093077, 0.86283389, 0.89332354, 0.95500342, 0.82927083])
expected_red_rmslo = np.array(
[0.11639013, 0.12995296, 0.1285489 , 0.13412548, 0.15774034,
0.15574358, 0.13041103, 0.14351302, 0.1550736 , 0.14721337,
0.16700106, 0.15015152, 0.1685249 , 0.14533717, 0.1627079 ,
0.16987469, 0.1604309 , 0.17348578, 0.19451647, 0.17348533])
expected_red_rmshi = np.array(
[0.11639013, 0.12995296, 0.1285489 , 0.13412548, 0.15774034,
0.15574358, 0.1611256 , 0.18169027, 0.20020244, 0.19264249,
0.22147211, 0.20384028, 0.23076986, 0.2007309 , 0.22759927,
0.24306181, 0.23335404, 0.25645724, 0.29446565, 0.26262799])
expected_red_stderr = np.array(
[5.20664133, 2.13096763, 1.57786671, 1.31163 , 1.14789132,
1.03429558, 0.94962841, 0.8838618 , 0.83021424, 0.78624182,
0.74867937, 0.71682123, 0.68816067, 0.66305576, 0.64091963,
0.62131904, 0.60393775, 0.58855564, 0.57504053, 0.55986528])
expected_binsz = np.array(
[ 1., 6., 11., 16., 21., 26., 31., 36., 41., 46., 51., 56., 61.,
66., 71., 76., 81., 86., 91., 96.])
expected_white_rms = np.array(
[5.13108371, 2.24264189, 1.54890969, 1.32144868, 1.3520051 ,
1.16925098, 0.88639028, 0.91812782, 0.93234654, 0.8127796 ,
0.86662417, 0.7447655 , 0.81963664, 0.68330918, 0.65699017,
0.73730708, 0.62304519, 0.65482596, 0.7385728 , 0.60835201])
expected_white_rmslo = np.array(
[0.11473452, 0.12308096, 0.11544891, 0.11866959, 0.13944868,
0.13412229, 0.10331912, 0.11583223, 0.12403454, 0.11559367,
0.1295761 , 0.11703448, 0.13276393, 0.11431161, 0.11376628,
0.13171286, 0.11584582, 0.12716893, 0.15043357, 0.12726862])
expected_white_rmshi = np.array(
[0.11473452, 0.12308096, 0.11544891, 0.11866959, 0.13944868,
0.13412229, 0.12765297, 0.14664586, 0.16013053, 0.15126515,
0.17184018, 0.15888177, 0.18180051, 0.15788028, 0.15913869,
0.18845872, 0.16850302, 0.18798885, 0.22773145, 0.19266356])
expected_white_stderr = np.array(
[5.13332205, 2.1009596 , 1.55564739, 1.29315979, 1.13172685,
1.01973075, 0.93625586, 0.87141536, 0.81852327, 0.77517006,
0.73813656, 0.70672705, 0.67847008, 0.65371869, 0.63189428,
0.6125697 , 0.59543317, 0.58026767, 0.56694288, 0.55198132])
expected_daub4_inverse = np.array([
-0.0301851821, -0.0522822690, -0.0662912607, -0.0824674511, -0.0905555462,
-0.1008108399, -0.1132333322, -0.1250751254, 0.1325825215, 0.3180280110,
0.4312613433, 0.5638438647, 0.1412513157, -0.1325825215, -0.2576576469,
-0.4225925490, -0.1671021007, -0.0242642855, 0.0059208966, 0.0662912607,
0.0140089918, -0.0080880952, 0.0000000000, 0.0000000000, 0.0000000000,
0.0000000000, 0.0000000000, 0.0000000000, 0.0000000000, 0.0000000000,
0.0000000000, 0.0000000000,])
expected_daub4_forward = np.array([
0.1625300592, 0.0874699408, -0.0463140877, 0.2795672632, -0.0905555462,
0.0000000000, 0.0140089918, 0.1412513157, 0.3537658774, -0.0625000000,
0.0000000000, 0.0000000000, 0.0000000000, 0.0000000000, 0.0000000000,
-0.1082531755, 0.0000000000, 0.8365163037, -0.1294095226, 0.0000000000,
0.0000000000, 0.0000000000, 0.0000000000, 0.0000000000, 0.0000000000,
0.0000000000, 0.0000000000, 0.0000000000, 0.0000000000, 0.0000000000,
0.0000000000, 0.0000000000,])
def test_bin_array_unweighted():
data = np.array([0,1,2, 3,3,3, 3,3,4])
binsize = 3
bindata = ms.bin_array(data, binsize)
np.testing.assert_allclose(bindata,
np.array([1.0, 3.0, np.mean([3,3,4])]))
def test_bin_array_weighted():
data = np.array([0,1,2, 3,3,3, 3,3,4])
unc = np.array([3,1,1, 1,2,3, 2,2,4])
binsize = 3
bindata, binstd = ms.bin_array(data, binsize, unc)
np.testing.assert_allclose(bindata,
np.array([1.42105263, 3.0, 3.11111111]))
np.testing.assert_allclose(binstd,
np.array([0.68824720, 0.85714286, 1.33333333]))
def test_residuals_no_priors():
data = np.array([1.1, 1.2, 0.9, 1.0])
model = np.array([1.0, 1.0, 1.0, 1.0])
uncert = np.array([0.1, 0.1, 0.1, 0.1])
res = ms.residuals(model, data, uncert)
np.testing.assert_allclose(res, np.array([-1.0, -2.0, 1.0, 0.0]))
def test_residuals():
data = np.array([1.1, 1.2, 0.9, 1.0])
model = np.array([1.0, 1.0, 1.0, 1.0])
uncert = np.array([0.1, 0.1, 0.1, 0.1])
params = np.array([2.5, 5.5])
priors = np.array([2.0, 5.0])
plow = np.array([0.0, 1.0])
pup = np.array([0.0, 1.0])
res = ms.residuals(model, data, uncert, params, priors, plow, pup)
np.testing.assert_allclose(res, np.array([-1.0, -2.0, 1.0, 0.0, 0.5]))
def test_chisq():
data = np.array([1.1, 1.2, 0.9, 1.0])
model = np.array([1.0, 1.0, 1.0, 1.0])
uncert = np.array([0.1, 0.1, 0.1, 0.1])
chisq = ms.chisq(model, data, uncert)
assert chisq == 6.0
def test_chisq_priors():
data = np.array([1.1, 1.2, 0.9, 1.0])
model = np.array([1.0, 1.0, 1.0, 1.0])
uncert = np.array([0.1, 0.1, 0.1, 0.1])
params = np.array([2.5, 5.5])
priors = np.array([2.0, 5.0])
plow = np.array([0.0, 1.0])
pup = np.array([0.0, 1.0])
chisq = ms.chisq(model, data, uncert, params, priors, plow, pup)
assert chisq == 6.25
def test_dwt_chisq():
data = np.array([2.0, 0.0, 3.0, -2.0, -1.0, 2.0, 2.0, 0.0])
model = np.ones(8)
params = np.array([1.0, 0.1, 0.1])
chisq = ms.dwt_chisq(model, data, params)
np.testing.assert_allclose(chisq, 1693.22308882)
def test_dwt_chisq_priors():
data = np.array([2.0, 0.0, 3.0, -2.0, -1.0, 2.0, 2.0, 0.0])
model = np.ones(8)
params = np.array([1.0, 0.1, 0.1])
priors = np.array([1.0, 0.2, 0.3])
plow = np.array([0.0, 0.0, 0.1])
pup = np.array([0.0, 0.0, 0.1])
chisq = ms.dwt_chisq(model, data, params, priors, plow, pup)
np.testing.assert_allclose(chisq, 1697.2230888243134)
def test_dwt_chisq_params_error():
data = np.array([2.0, 0.0, 3.0, -2.0, -1.0, 2.0, 2.0, 0.0])
model = np.ones(8)
params = np.array([1.0, 0.1])
with pytest.raises(SystemExit):
chisq = ms.dwt_chisq(model, data, params)
def test_log_prior_uniform():
post = np.array([[3.0, 2.0], [3.1, 1.0], [3.6, 1.5]])
prior = np.array([3.5, 0.0])
priorlow = np.array([0.0, 0.0])
priorup = np.array([0.0, 0.0])
pstep = np.array([1.0, 1.0])
log_prior = ms.log_prior(post, prior, priorlow, priorup, pstep)
np.testing.assert_equal(log_prior, np.array([0.0, 0.0, 0.0]))
def test_log_prior_gaussian():
post = np.array([[3.0, 2.0], [3.1, 1.0], [3.6, 1.5]])
prior = np.array([3.5, 0.0])
priorlow = np.array([0.1, 0.0])
priorup = | np.array([0.1, 0.0]) | numpy.array |
import os
from astro_ghost.PS1QueryFunctions import find_all, get_PS1_Pic, get_PS1_type, get_PS1_mask, query_ps1_noname
from astro_ghost.NEDQueryFunctions import getNEDInfo
from datetime import datetime
from astropy import units as u
from astropy.coordinates import SkyCoord
import pandas as pd
import numpy as np
import pickle
from astropy.io import ascii
from collections import Counter
import scipy
from scipy import ndimage
import numpy as np
from matplotlib import pyplot as plt
from astropy.table import Table
from matplotlib.colors import LogNorm
from astropy.utils.data import get_pkg_data_filename
#from astro_ghost import DLR as dlr
from photutils import Background2D
import numpy.ma as ma
from astropy.io import fits
import warnings
from astropy.utils.exceptions import AstropyUserWarning
from astropy.utils.exceptions import AstropyWarning
from matplotlib import colors
from scipy import interpolate
from astropy.wcs import WCS
from astropy.stats import mad_std
from astropy.stats import sigma_clipped_stats
from astropy.visualization.mpl_normalize import ImageNormalize
from photutils import CircularAperture
from astropy.visualization import SqrtStretch
from photutils import DAOStarFinder
from photutils import MedianBackground, MeanBackground
from astropy.stats import SigmaClip
############# functions ####################################
def updateStep(px, gradx, grady, step, point, size):
max_x = px
max_y = px
grad = np.array([gradx[point[0], point[1]], grady[point[0], point[1]]])
#make sure we move at least one unit in grid spacing - so the grad must have len 1
# if grad[0] + grad[1] > 0:
ds = step/np.sqrt(grad[0]**2 + grad[1]**2)
ds = np.nanmin([ds, step])
# else:
# ds = step
newPoint = [point[0] + ds*grad[0], point[1] + ds*grad[1]]
newPoint = [int(newPoint[0]), int(newPoint[1])] #round to nearest index
if (newPoint[0] >= max_x) or (newPoint[1] >= max_y) or (newPoint[0] < 0) or (newPoint[1] < 0):
#if we're going to go out of bounds, don't move
return point
elif ((newPoint == point) and (size == 'large')): #if we're stuck, perturb one pixel in a random direction:
a = np.random.choice([-1, 0, 1], 2)#
newPoint = [newPoint[0] + a[0], newPoint[1] + a[1]]
return newPoint
def dist(p1, p2):
return np.sqrt((p1[0] - p2[0])**2 + (p1[1] - p2[1])**2)
def plot_DLR_vectors_GD(size, path, transient, transient_df, host_dict_candidates, host_dict_final, host_df, R_dict, ra_dict, df = "TNS", dual_axes=0, scale=1, postCut=0):
hostList = host_dict_candidates[str(transient)]
#os.chdir(path)
if type(hostList) is np.ndarray:
if len(hostList) > 1:
chosen = host_dict_final[transient]
else:
chosen = hostList[0]
else:
chosen = hostList
hostList = np.array(hostList)
band = 'r'
px = int(size*scale)
row = transient_df[transient_df['Name'] == transient]
tempRA = Angle(row.RA, unit=u.degree)
tempDEC = Angle(row.DEC, unit=u.degree)
transientRA = tempRA.degree[0]
transientDEC = tempDEC.degree[0]
print(transientRA)
print(transientDEC)
searchRA = transientRA
searchDEC = transientDEC
a = find_all("PS1_ra={}_dec={}_{}arcsec_{}.fits".format(searchRA, searchDEC, int(px*0.25), band), '.')
if not a:
get_PS1_Pic(searchRA, searchDEC, px, band)
a = find_all("PS1_ra={}_dec={}_{}arcsec_{}.fits".format(searchRA, searchDEC, int(px*0.25), band), '.')
#a = find_all("PS1_ra={}_dec={}_{}arcsec_{}.fits".format(transientRA, transientDEC, int(px*0.25), band), '.')
#if not a:
# get_PS1_Pic(transientRA, transientDEC, px, band)
# a = find_all("PS1_ra={}_dec={}_{}arcsec_{}.fits".format(transientRA, transientDEC, int(px*0.25), band), '.')
hdu = fits.open(a[0])[0]
image_file = get_pkg_data_filename(a[0])
image_data = fits.getdata(image_file, ext=0)
wcs = WCS(hdu.header)
fig = figure(num=None, figsize=(20,20), facecolor='w', edgecolor='k')
#ax = plt.subplot(projection=wcs)
fig.add_axes(projection=wcs)
axes_coords = [0, 0, 1, 1] # plotting full width and height
ax = fig.add_axes(axes_coords, projection=wcs)
axes_coords2 = [-0.045, -0.03, 1.06, 1.08]
ax_grads = fig.add_axes(axes_coords2, projection=None)
plt.axis('off')
for host in hostList:
hostDF = host_df[host_df['objID'] == host]
band = choose_band_SNR(hostDF)
XX = hostDF[band + 'momentXX'].values[0]
YY = hostDF[band + 'momentYY'].values[0]
XY = hostDF[band + 'momentXY'].values[0]
U = np.float(XY)
Q = np.float(XX) - np.float(YY)
if (Q == 0):
r_a = 1.e-5
else:
phi = 0.5*np.arctan(U/Q)
kappa = Q**2 + U**2
a_over_b = (1 + kappa + 2*np.sqrt(kappa))/(1 - kappa)
r_a = ra_dict[host]
r_b = r_a/(a_over_b)
hostDF['r_a'] = r_a
hostDF['r_b'] = r_b
hostDF['phi'] = phi
hostRA = host_df.loc[host_df['objID'] == host,'raMean'].values[0]
hostDEC = host_df.loc[host_df['objID'] == host,'decMean'].values[0]
hostDLR = R_dict[host]
c = '#666dc9'
c2 = 'red'
if (host == chosen):
c = c2 = '#d308d0'
hostDF['raMean'], hostDF['decMean']
plot_ellipse(ax, px, hostDF, searchRA, searchDEC, c)
#plot_ellipse(ax, px, hostDF, transientRA, transientDEC, c)
# in arcseconds
dx = float(hostRA - transientRA)*3600
dy = float(hostDEC - transientDEC)*3600
dist = np.sqrt(dx**2 + dy**2)
if hostDLR == 10000000000.0:
hostDLR = 0.0
else:
hostDLR = dist/hostDLR
#in arcseconds
scale_factor = hostDLR/dist
DLR_RA = float(hostRA) - dx*scale_factor/3600
DLR_DEC = float(hostDEC) - dy*scale_factor/3600
pointRA = [hostRA, DLR_RA]
pointDEC = [hostDEC, DLR_DEC]
ax.plot(pointRA, pointDEC, transform=ax.get_transform('fk5'), lw=6, color= c)
# ax.imshow(image_data, norm=colors.LogNorm(), cmap='gray_r')
ax.imshow(image_data, norm=colors.PowerNorm(gamma = 0.5, vmin=1, vmax=1.e4), cmap='gray')
plt.axis('off')
return ax_grads
def plot_ellipse(ax, px, s, ra, dec, color):
i=0
size = px #PS cutout image size, 240*sidelength in arcmin
x0, y0 = ((ra-s['raMean'])*4*3600*np.cos(s['decMean']/180*np.pi)+(size/2)), (s['decMean']-dec)*4*3600+(size/2)
i=i+1
y, x = np.mgrid[0:size, 0:size]# 4 pixel for 1 arcsec for PS1, here image size is set to be 20"*20", depend on your cutout image size
#make fitted image
n_radius=2
theta1 = s['phi']#rot angle
a1= s['r_a']
b1= s['r_b']
e1 = mpatches.Ellipse((x0, y0), 4*n_radius*a1, 4*n_radius*b1, theta1, lw=6, ls='--', edgecolor=color,
facecolor='none', label='source 1')
ax.add_patch(e1)
def denoise(img, weight=0.1, eps=1e-3, num_iter_max=200):
"""Perform total-variation denoising on a grayscale image.
Parameters
----------
img : array
2-D input data to be de-noised.
weight : float, optional
Denoising weight. The greater `weight`, the more de-noising (at
the expense of fidelity to `img`).
eps : float, optional
Relative difference of the value of the cost function that determines
the stop criterion. The algorithm stops when:
(E_(n-1) - E_n) < eps * E_0
num_iter_max : int, optional
Maximal number of iterations used for the optimization.
Returns
-------
out : array
De-noised array of floats.
Notes
-----
Rudin, Osher and Fatemi algorithm.
"""
u = np.zeros_like(img)
px = np.zeros_like(img)
py = np.zeros_like(img)
nm = np.prod(img.shape[:2])
tau = 0.125
i = 0
while i < num_iter_max:
u_old = u
# x and y components of u's gradient
ux = np.roll(u, -1, axis=1) - u
uy = np.roll(u, -1, axis=0) - u
# update the dual variable
px_new = px + (tau / weight) * ux
py_new = py + (tau / weight) * uy
norm_new = np.maximum(1, np.sqrt(px_new **2 + py_new ** 2))
px = px_new / norm_new
py = py_new / norm_new
# calculate divergence
rx = np.roll(px, 1, axis=1)
ry = np.roll(py, 1, axis=0)
div_p = (px - rx) + (py - ry)
# update image
u = img + weight * div_p
# calculate error
error = np.linalg.norm(u - u_old) / np.sqrt(nm)
if i == 0:
err_init = error
err_prev = error
else:
# break if error small enough
if np.abs(err_prev - error) < eps * err_init:
break
else:
e_prev = error
# don't forget to update iterator
i += 1
return u
def get_clean_img(ra, dec, px, band):
#first, mask the data
a = find_all("PS1_ra={}_dec={}_{}arcsec_{}.fits".format(ra, dec, int(px*0.25), band), '.')
if not a:
get_PS1_Pic(0, ra, dec, px, band)
a = find_all("PS1_ra={}_dec={}_{}arcsec_{}.fits".format(ra, dec, int(px*0.25), band), '.')
b = find_all("PS1_ra={}_dec={}_{}arcsec_{}_mask.fits".format(ra, dec, int(px*0.25), band), '.')
if not b:
get_PS1_mask(ra, dec, px, band)
b = find_all("PS1_ra={}_dec={}_{}arcsec_{}_mask.fits".format(ra, dec, int(px*0.25), band), '.')
c = find_all("PS1_ra={}_dec={}_{}arcsec_{}_stack.num.fits".format(ra, dec, int(px*0.25), band), '.')
if not c:
get_PS1_type(ra, dec, px, band, 'stack.num')
c = find_all("PS1_ra={}_dec={}_{}arcsec_{}_stack.num.fits".format(ra, dec, int(px*0.25), band), '.')
#d = find_all("PS1_ra={}_dec={}_{}arcsec_{}_wt.fits".format(ra, dec, int(px*0.25), band), '.')
#if not d:
# get_PS1_wt(ra, dec, px, band)
# d = find_all("PS1_ra={}_dec={}_{}arcsec_{}_wt.fits".format(ra, dec, int(px*0.25), band), '.')
image_data_mask = fits.open(b[0])[0].data
image_data_num = fits.open(c[0])[0].data
#image_data_wt = fits.open(d[0])[0].data
image_data = fits.open(a[0])[0].data
hdu = fits.open(a[0])[0]
wcs = WCS(hdu.header)
bit = image_data_mask
mask = image_data_mask
for i in np.arange(np.shape(bit)[0]):
for j in np.arange(np.shape(bit)[1]):
if image_data_mask[i][j] == image_data_mask[i][j]:
bit[i][j] = "{0:016b}".format(int(image_data_mask[i][j]))
tempBit = str(bit[i][j])[:-2]
if len(str(int(bit[i][j]))) > 12:
if (tempBit[-6] == 1) or (tempBit[-13] == 1):
mask[i][j] = np.nan
elif len(str(int(bit[i][j]))) > 5:
if (tempBit[-6] == 1):
mask[i][j] = np.nan
mask = ~np.isnan(image_data_mask)
mask_num = image_data_num
#weighted
#image_data *= image_data_wt
image_masked = ma.masked_array(image_data, mask=mask)
image_masked_num = ma.masked_array(image_masked, mask=mask_num)
#edited to PASS BACK THE MASKED ARRAY!!
#then return the data
return np.array(image_masked_num), wcs, hdu
def getSteps(SN_dict, SN_names, hostDF):
steps = []
hostDF.replace(-999, np.nan, inplace=True)
hostDF.replace(-999, np.nan, inplace=True)
for name in SN_names:
hostList = SN_dict[name]
if (type(hostList) is np.int64 or type(hostList) is float):
hostList = [hostList]
checkNan = [x == x for x in hostList]
if np.sum(checkNan) > 0:
hostRadii = hostDF.loc[hostDF['objID'].isin(hostList), 'rKronRad'].values
mean = np.nanmean(hostRadii)
if mean == mean:
# print(mean)
# mean /= 2
mean = np.max([mean,2])
step = np.min([mean, 50])
steps.append(step) #find some proper scaling factor between the mean and the step size
else:
steps.append(5)
else:
steps.append(5)
return steps
############# end functions ####################################
def gradientAscent(path, SN_dict, SN_dict_postDLR, SN_names, hostDF, transientDF, fn, plot=1):
#os.chdir(path)
warnings.filterwarnings('ignore', category=AstropyUserWarning)
warnings.filterwarnings('ignore', category=AstropyWarning)
#warnings.filterwarnings("ignore", category=AstropyUserWarning)
#debugging purposes
step_sizes = getSteps(SN_dict, SN_names, hostDF)
unchanged = []
#r = 0 #counter for occasionally saving to file
N_associated = 0
f = open(fn, 'w')
print("Starting size of data frame: %i" % len(hostDF), file=f)
try:
os.makedirs('quiverMaps')
except:
print("Already have the folder quiverMaps!")
for i in np.arange(len(step_sizes)):
try:
# if True:
transient_name = SN_names[i]
print("Transient: %s"% transient_name, file=f)
ra = transientDF.loc[transientDF['Name'] == transient_name, 'RA'].values[0]
dec = transientDF.loc[transientDF['Name'] == transient_name, 'DEC'].values[0]
px = 800
g_img, wcs, g_hdu = get_clean_img(ra, dec, px, 'g')
g_mask = np.ma.masked_invalid(g_img).mask
r_img, wcs, r_hdu = get_clean_img(ra, dec, px, 'r')
r_mask = np.ma.masked_invalid(r_img).mask
i_img, wcs, i_hdu = get_clean_img(ra, dec, px, 'i')
i_mask = np.ma.masked_invalid(i_img).mask
#cleanup - remove the fits files when we're done using them
for band in ['g', 'r', 'i']:
os.remove("PS1_ra={}_dec={}_{}arcsec_{}_stack.num.fits".format(ra, dec, int(px*0.25), band))
os.remove("PS1_ra={}_dec={}_{}arcsec_{}_mask.fits".format(ra, dec, int(px*0.25), band))
os.remove("PS1_ra={}_dec={}_{}arcsec_{}.fits".format(ra, dec, int(px*0.25), band))
#os.chdir(path)
# if e.errno != errno.EEXIST:
# raise
#os.chdir("./quiverMaps")
nancount = 0
obj_interp = []
for obj in [g_img, r_img, i_img]:
data = obj
mean, median, std = sigma_clipped_stats(data, sigma=20.0)
daofind = DAOStarFinder(fwhm=3.0, threshold=20.*std)
sources = daofind(data - median)
try:
xvals = np.array(sources['xcentroid'])
yvals = np.array(sources['ycentroid'])
# for col in sources.colnames:
# sources[col].info.format = '%.8g' # for consistent table output
for k in np.arange(len(xvals)):
tempx = xvals[k]
tempy = yvals[k]
yleft = np.max([int(tempy) - 7, 0])
yright = np.min([int(tempy) + 7, np.shape(data)[1]-1])
xleft = np.max([int(tempx) - 7, 0])
xright = np.min([int(tempx) + 7, np.shape(data)[1]-1])
for r in np.arange(yleft,yright+1):
for j in np.arange(xleft, xright+1):
if dist([xvals[k], yvals[k]], [j, r]) < 5:
data[r, j] = np.nan
nancount += np.sum(np.isnan(data))
positions = np.transpose((sources['xcentroid'], sources['ycentroid']))
apertures = CircularAperture(positions, r=5.)
norm = ImageNormalize(stretch=SqrtStretch())
if plot:
fig = plt.figure(figsize=(10,10))
ax = fig.gca()
ax.imshow(data)
apertures.plot(color='blue', lw=1.5, alpha=0.5)
plt.axis('off')
plt.savefig("quiverMaps/detectedStars_%s.png"%transient_name, bbox_inches='tight')
plt.close()
except:
print("No stars here!", file=f)
backx = np.arange(0,data.shape[1])
backy = np.arange(0, data.shape[0])
backxx, backyy = np.meshgrid(backx, backy)
#mask invalid values
array = np.ma.masked_invalid(data)
x1 = backxx[~array.mask]
y1 = backyy[~array.mask]
newarr = array[~array.mask]
data = interpolate.griddata((x1, y1), newarr.ravel(), (backxx, backyy), method='cubic')
obj_interp.append(data)
#gvar = np.var(obj_interp[0])
#gmean = np.nanmedian(obj_interp[0])
gMax = np.nanmax(obj_interp[0])
g_ZP = g_hdu.header['ZPT_0001']
r_ZP = r_hdu.header['ZPT_0001']
i_ZP = i_hdu.header['ZPT_0001']
#combining into a mean img -
# m = -2.5*log10(F) + ZP
gmag = -2.5*np.log10(obj_interp[0]) + g_ZP
rmag = -2.5*np.log10(obj_interp[1]) + r_ZP
imag = -2.5*np.log10(obj_interp[2]) + i_ZP
#now the mean can be taken
mean_zp = (g_ZP + r_ZP + i_ZP)/3
meanMag = (gmag + rmag + imag)/3
meanImg = 10**((mean_zp-meanMag)/2.5) #convert back to flux
#meanImg = (obj_interp[0] + obj_interp[0] + obj_interp[0])/3
print("NanCount = %i"%nancount,file=f)
#mean_center = np.nanmean([g_img[int(px/2),int(px/2)], i_img[int(px/2),int(px/2)], i_img[int(px/2),int(px/2)]])
#if mean_center != mean_center:
# mean_center = 1.e-30
mean_center = meanImg[int(px/2),int(px/2)]
print("Mean_center = %f" % mean_center,file=f)
#mean, median, std = sigma_clipped_stats(meanImg, sigma=10.0)
meanImg[meanImg != meanImg] = 1.e-30
mean, median, std = sigma_clipped_stats(meanImg, sigma=10.0)
print("mean image = %e"% mean, file=f)
aboveCount = np.sum(meanImg > 1.)
aboveCount2 = np.sum(meanImg[int(px/2)-100:int(px/2)+100, int(px/2)-100:int(px/2)+100] > 1.)
aboveFrac2= aboveCount2/40000
print("aboveCount = %f"% aboveCount,file=f)
print("aboveCount2 = %f "% aboveCount2, file=f)
totalPx = px**2
aboveFrac = aboveCount/totalPx
print("aboveFrac= %f" % aboveFrac, file=f)
print("aboveFrac2 = %f "% aboveFrac2, file=f)
#meanImg[meanImg < 1.e-5] = 0
if ((median <15) and (np.round(aboveFrac2, 2) < 0.70)) or ((mean_center > 1.e3) and (np.round(aboveFrac,2) < 0.60) and (np.round(aboveFrac2,2) < 0.75)):
bs = 15
fs = 1
if aboveFrac2 < 0.7:
step_sizes[int(i)] = 2.
else:
step_sizes[int(i)] = 10.
print("Small filter", file=f)
size = 'small'
elif ((mean_center > 40) and (median > 500) and (aboveFrac > 0.60)) or ((mean_center > 300) and (aboveFrac2 > 0.7)):
bs = 75 #the big sources
fs = 3
print("Large filter", file=f)
step_sizes[int(i)] = np.max([step_sizes[int(i)], 50])
size = 'large'
#if step_sizes[int(i)] == 5:
# step_sizes[int(i)] *= 5
# step_sizes[int(i)] = np.min([step_sizes[int(i)], 50])
#if mean_center < 200: #far from the center with a large host
# fs = 5
#elif mean_center < 5000:
# step_sizes[int(i)] = np.max([step_sizes[int(i)], 50])
#size = 'large'
else:
bs = 40 #everything in between
fs = 3
print("Medium filter", file=f)
#if step_sizes[int(i)] == 5:
# step_sizes[int(i)] *= 3
# step_sizes[int(i)] = np.max([step_sizes[int(i)], 25])
step_sizes[int(i)] = np.max([step_sizes[int(i)], 15])
size = 'medium'
# step_sizes[int(i)] *= 3
#if (median)
sigma_clip = SigmaClip(sigma=15.)
bkg_estimator = MeanBackground()
#bkg_estimator = BiweightLocationBackground()
bkg3_g = Background2D(g_img, box_size=bs, filter_size=fs,
sigma_clip=sigma_clip, bkg_estimator=bkg_estimator)
bkg3_r = Background2D(r_img, box_size=bs, filter_size=fs,
sigma_clip=sigma_clip, bkg_estimator=bkg_estimator)
bkg3_i = Background2D(i_img, box_size=bs, filter_size=fs,
sigma_clip=sigma_clip, bkg_estimator=bkg_estimator)
#pretend the background is in counts too (I think it is, right?) and average in mags
bkg3_g.background[bkg3_g.background < 0] = 1.e-30
bkg3_r.background[bkg3_r.background < 0] = 1.e-30
bkg3_i.background[bkg3_i.background < 0] = 1.e-30
backmag_g = -2.5*np.log10(bkg3_g.background) + g_ZP
backmag_r = -2.5*np.log10(bkg3_r.background) + r_ZP
backmag_i = -2.5*np.log10(bkg3_i.background) + i_ZP
mean_zp = (g_ZP + r_ZP + i_ZP)/3.
backmag = 0.333*backmag_g + 0.333*backmag_r + 0.333*backmag_i
background = 10**(mean_zp-backmag/2.5)
if plot:
fig, axs = plt.subplots(1, 3, sharex=True, sharey=True,figsize=(20,10))
axs[0].imshow(bkg3_g.background)
axs[0].axis('off')
axs[1].imshow(bkg3_r.background)
axs[1].axis('off')
axs[2].imshow(bkg3_i.background)
axs[2].axis('off')
plt.savefig("quiverMaps/backgrounds_%s.png" % transient_name, bbox_inches='tight')
plt.close()
mean, median, std = sigma_clipped_stats(meanImg, sigma=1.0)
meanImg[meanImg <= (mean)] = 1.e-30
meanImg[meanImg < 0] = 1.e-30
if plot:
fig = plt.figure(figsize=(10,10))
ax = fig.gca()
ax.imshow((meanImg)/np.nanmax(meanImg))
plt.axis('off')
plt.savefig("quiverMaps/normalizedMeanImage_%s.png" % transient_name, bbox_inches='tight')
plt.close()
fig = plt.figure(figsize=(10,10))
ax = fig.gca()
ax.imshow(background/np.nanmax(background))
plt.axis('off')
plt.savefig("quiverMaps/normalizedMeanBackground_%s.png" % transient_name, bbox_inches='tight')
plt.close()
if nancount > 1.e5:
imgWeight = 0
elif (mean_center > 1.e4): #and (size is not 'large'):
imgWeight = 0.75
elif size == 'medium':
imgWeight = 0.33
else:
imgWeight = 0.10
print("imgWeight= %f"%imgWeight, file=f)
fullbackground = ((1-imgWeight)*background/np.nanmax(background) + imgWeight*meanImg/np.nanmax(meanImg))*np.nanmax(background)
# background = (0.66*background/np.max(background) + imgWeight*meanImg/np.nanmax(meanImg))*np.max(background)
n = px
X, Y = np.mgrid[0:n, 0:n]
dx, dy = np.gradient(fullbackground.T)
n_plot = 10
dx_small = dx[::n_plot, ::n_plot]
dy_small = dy[::n_plot, ::n_plot]
print("step = %f"% step_sizes[int(i)], file=f)
start = [[int(px/2),int(px/2)]] #the center of the grid
if True:
#if background[int(px/2),int(px/2)] > 0: #if we have some background flux (greater than 3 stdevs away from the median background), follow the gradient
start.append(updateStep(px, dx, dy, step_sizes[int(i)], start[-1], size))
for j in np.arange(1.e3):
start.append(updateStep(px, dx, dy, step_sizes[int(i)], start[-1], size))
it_array = np.array(start)
endPoint = start[-1]
if plot:
fig = plt.figure(figsize=(10,10))
ax = fig.gca()
ax.imshow(fullbackground)
plt.axis("off")
plt.savefig("quiverMaps/fullBackground_%s.png"%transient_name, bbox_inches='tight')
plt.close()
coords = wcs.wcs_pix2world(endPoint[0], endPoint[1], 0., ra_dec_order = True) # Note the third argument, set to 0, which indicates whether the pixel coordinates should be treated as starting from (1, 1) (as FITS files do) or from (0, 0)
print("Final ra, dec after GD : %f %f"% (coords[0], coords[1]), file=f)
col = '#D34E24'
col2 = '#B54A24'
#lookup by ra, dec
try:
if size == 'large':
a = query_ps1_noname(float(coords[0]), float(coords[1]), 20)
else:
a = query_ps1_noname(float(coords[0]), float(coords[1]), 5)
except TypeError:
continue
if a:
print("Found a host here!", file=f)
a = ascii.read(a)
a = a.to_pandas()
a = a[a['nDetections'] > 1]
#a = a[a['ng'] > 1]
#a = a[a['primaryDetection'] == 1]
smallType = ['AbLS', 'EmLS' , 'EmObj', 'G', 'GammaS', 'GClstr', 'GGroup', 'GPair', 'GTrpl', 'G_Lens', 'IrS', 'PofG', 'RadioS', 'UvES', 'UvS', 'XrayS', '', 'QSO', 'QGroup', 'Q_Lens']
medType = ['G', 'IrS', 'PofG', 'RadioS', 'GPair', 'GGroup', 'GClstr', 'EmLS', 'RadioS', 'UvS', 'UvES', '']
largeType = ['G', 'PofG', 'GPair', 'GGroup', 'GClstr']
if len(a) > 0:
a = getNEDInfo(a)
if (size == 'large'):# and (np.nanmax(a['rKronRad'].values) > 5)):
# print("L: picking the largest >5 kronRad host within 10 arcsec", file=f)
print("L: picking the closest NED galaxy within 20 arcsec", file=f)
#a = a[a['rKronRad'] == np.nanmax(a['rKronRad'].values)]
tempA = a[a['NED_type'].isin(largeType)]
if len(tempA) > 0:
a = tempA
tempA = a[a['NED_type'] == 'G']
if len(tempA) > 0:
a = tempA
#tempA = a[a['NED_mag'] == np.nanmin(a['NED_mag'])]
#if len(tempA) > 0:
# a = tempA
if len(a) > 1:
a = a.iloc[[0]]
elif (size == 'medium'):
#print("M: Picking the largest host within 5 arcsec", file=f)
print("M: Picking the closest NED galaxy within 5 arcsec", file=f)
#a = a[a['rKronRad'] == np.nanmax(a['rKronRad'].values)]
tempA = a[a['NED_type'].isin(medType)]
if len(tempA) > 0:
a = tempA
if len(a) > 1:
a = a.iloc[[0]]
else:
tempA = a[a['NED_type'].isin(smallType)]
if len(tempA) > 0:
a = tempA
a = a.iloc[[0]]
print("S: Picking the closest non-stellar source within 5 arcsec", file=f)
#else:
# f.flush()
# continue
#threshold = [1, 1, 0, 0, 0, 0]
#flag = ['nDetections', 'nr', 'rPlateScale', 'primaryDetection', 'rKronRad', 'rKronFlux']
#j = 0
#while len(a) > 1:
# if np.sum(a[flag[int(j)]] > threshold[int(j)]) > 0:
# tempA = a[a[flag[int(j)]] > threshold[int(j)]]
# j += 1
# a = tempA
# if (j == 6):
# break
# else:
# break
#if len(a) > 1:
# if len(~a['rKronRad'].isnull()) > 0:
# a = a[a['rKronRad'] == np.nanmax(a['rKronRad'].values)]
# else:
# a = a.iloc[0]
print("Nice! Host association chosen.", file=f)
print("NED type: %s" % a['NED_type'].values[0], file=f)
print(a['objID'].values[0], file=f)
print("Chosen Host RA and DEC: %f %f"% (a['raMean'], a['decMean']), file=f)
SN_dict_postDLR[transient_name] = a['objID'].values[0]
print("Dict value: %i"%SN_dict_postDLR[transient_name],file=f)
N = len(hostDF)
hostDF = pd.concat([hostDF, a], ignore_index=True)
N2 = len(hostDF)
if N2 != (N+1):
print("ERROR! Value not concatenated!!", file=f)
return
finalRA = | np.array(a['raMean']) | numpy.array |
import numpy as np
import collections
import pickle
from tqdm import tqdm
from nltk.corpus import stopwords
from numpy.linalg import norm
from memory_profiler import profile
import time
import traceback
class InfoRetrievalSetup:
"""
Index creation
- dictionary: entities vocabulary from all entities
- input: list of all KB entities
- logic: iterate over all tokens contained in the entity, update dictionary
for each token, keep track of tokens already updated
- output: dictionary (k,v) where k= token, v= list of entities
the token is in
"""
def __init__(self, index_file_path= None):
if index_file_path:
self.loadIndex(index_file_path)
def loadIndex(self, index_file):
self.index = pickle.load(open(index_file, "rb"))
print('Index initialized.')
def build_index(self, entities_list):
print('Constructing index...')
entities_list = set(entities_list)
token_entities_index = collections.defaultdict(list)
# helper mapping structures
# entities maps
ids_to_entities = collections.defaultdict(list)
entities_to_ids = collections.defaultdict(int)
# vocab tokens maps
ids_to_tokens = collections.defaultdict(list)
tokens_to_ids = collections.defaultdict(int)
if not isinstance(entities_list, list):
entities_list = list(entities_list) # make sure we're considering only the unique cases
for i in range(len(entities_list)):
existing = set()
entity_id = i
entity_text = entities_list[i]
ids_to_entities[entity_id] = entity_text
entities_to_ids[entity_text] = entity_id
entity_text = entity_text.split('_')
if ('category:' in entity_text):
continue
for token in entity_text:
if (token not in existing) and not (token == 'kb'): # idea is to consider only the first occurence of the term in the entity
token_entities_index[token].append(entity_id)
existing.add(token)
# Build a token=>int map for the vocabulary and replace the token term with its id
token_id = 0
token_entities_index_numeric = collections.defaultdict(list)
for token, posting_list in token_entities_index.items():
tokens_to_ids[token] = token_id
ids_to_tokens[token_id] = token
token_entities_index_numeric[token_id] = posting_list
token_id += 1
self.index = token_entities_index_numeric
self.tokens_to_ids = tokens_to_ids
self.ids_to_tokens = ids_to_tokens
self.ids_to_entities = ids_to_entities
self.entities_to_ids = entities_to_ids
self.N = len(entities_list)
self.entities = entities_list
print('Index length...', len(self.index))
return token_entities_index_numeric, ids_to_entities, entities_to_ids, ids_to_tokens, tokens_to_ids
def store_ir_datastructures(self, output_folder_path):
pickle.dump(self.index, open(output_folder_path + "/tokens_entities_index.p", "wb"))
pickle.dump(self.ids_to_entities, open(output_folder_path + "/ids_to_entities_map.p", "wb"))
pickle.dump(self.entities_to_ids, open(output_folder_path + "/entities_to_ids_map.p", "wb"))
pickle.dump(self.ids_to_tokens, open(output_folder_path + "/ids_to_tokens_map.p", "wb"))
pickle.dump(self.tokens_to_ids, open(output_folder_path + "/tokens_to_ids_map.p", "wb"))
pickle.dump(self.term_idfs_dict, open(output_folder_path + "/term_idf_scores_dict.p", "wb"))
pickle.dump(self.norms_idfs_dict, open(output_folder_path + "/entities_norms_dict.p", "wb"))
pickle.dump(self.most_frq_terms, open(output_folder_path + "/most_frq_terms_in_entities_dict.p", "wb"))
pickle.dump(self.norms_tfidfs_dict, open(output_folder_path + "/entities_norms_tfidf_dict.p", "wb"))
def loadIndex(self, index_file):
index = pickle.load(open(index_file, "rb"))
return index
def storeIndex(self, path, index):
pickle.dump(index, open(path + "index.p", "wb"))
def build_idf_mapping(self, index = None, N = None):
if index:
self.index = index
if N:
self.N = N
terms_idfs = collections.defaultdict(int)
for term_id in self.index:
terms_idfs[term_id] = self.compute_idf(term_id)
self.term_idfs_dict = terms_idfs
return terms_idfs
def compute_entities_norms_idf(self):
norms_dict = collections.defaultdict(float)
for entity in self.entities:
entity_id = self.entities_to_ids[entity]
entities_text = [elem for elem in entity.split('_') if not elem == 'kb']
idf_vector = []
for token in entities_text:
token_id = self.tokens_to_ids[token]
idf_vector = | np.append(idf_vector, self.term_idfs_dict[token_id]) | numpy.append |
'''
Useful functions - specifically will be used for feed training images
and model inference.
'''
import numpy as np
from os import listdir, mkdir, sep, path, walk
from os.path import join, exists, splitext
from scipy.misc import imread, imsave, imresize
def list_images(directory):
images = []
for file in listdir(directory):
name = file.lower()
if name.endswith('.png'):
images.append(join(directory, file))
elif name.endswith('.jpg'):
images.append(join(directory, file))
elif name.endswith('.jpeg'):
images.append(join(directory, file))
return images
def get_train_images(paths, resize_len=512, crop_height=256, crop_width=256):
images = []
for path in paths:
image = imread(path, mode='RGB')
height, width, _ = image.shape
if height < width:
new_height = resize_len
new_width = int(width * new_height / height)
else:
new_width = resize_len
new_height = int(height * new_width / width)
image = imresize(image, [new_height, new_width], interp='nearest')
# crop the image
start_h = | np.random.choice(new_height - crop_height + 1) | numpy.random.choice |
import sys
assert sys.version_info >= (3, 5)
import sklearn
assert sklearn.__version__ >= "0.20"
import numpy as np
import os
import matplotlib as mpl
import matplotlib.pyplot as plt
from sklearn.decomposition import PCA
from matplotlib.patches import FancyArrowPatch
from mpl_toolkits.mplot3d import proj3d
from mpl_toolkits.mplot3d import Axes3D
####################################################################################################################################################
### Preparation ####################################################################################################################################
####################################################################################################################################################
| np.random.seed(42) | numpy.random.seed |
# -*- coding: utf-8 -*-
"""
This module provides basic geometry elements generally geared towards (geo)json usage.
"""
from collections import OrderedDict
from uuid import uuid4
from typing import Union, List
import json
import logging
import numpy
from sarpy.io.kml import Document as KML_Document
def _compress_identical(coords):
"""
Eliminate consecutive points with same first two coordinates.
Parameters
----------
coords : numpy.ndarray
Returns
-------
numpy.ndarray
coords array with consecutive identical points supressed (last point retained)
"""
if coords.shape[0] < 2:
return coords
include = numpy.zeros((coords.shape[0], ), dtype=numpy.bool)
include[-1] = True
for i, (first, last) in enumerate(zip(coords[:-1, :], coords[1:, :])):
if not (first[0] == last[0] and first[1] == last[1]):
include[i] = True
return coords[include, :]
def _validate_contain_arguments(pts_x, pts_y):
# helper method for Polygon functionality
if not isinstance(pts_x, numpy.ndarray):
pts_x = numpy.array(pts_x, dtype=numpy.float64)
if not isinstance(pts_y, numpy.ndarray):
pts_y = numpy.array(pts_y, dtype=numpy.float64)
if pts_x.shape != pts_y.shape:
raise ValueError(
'pts_x and pts_y must be the same shape. Got {} and {}'.format(pts_x.shape, pts_y.shape))
return pts_x, pts_y
def _validate_grid_contain_arguments(grid_x, grid_y):
# helper method for Polygon functionality
if not isinstance(grid_x, numpy.ndarray):
grid_x = numpy.array(grid_x, dtype=numpy.float64)
if not isinstance(grid_y, numpy.ndarray):
grid_y = numpy.array(grid_y, dtype=numpy.float64)
if len(grid_x.shape) != 1 or len(grid_y.shape) != 1:
raise ValueError('grid_x and grid_y must be one dimensional.')
if numpy.any((grid_x[1:] - grid_x[:-1]) <= 0):
raise ValueError('grid_x must be monotonically increasing')
if numpy.any((grid_y[1:] - grid_y[:-1]) <= 0):
raise ValueError('grid_y must be monotonically increasing')
return grid_x, grid_y
def _get_kml_coordinate_string(coordinates, transform):
# type: (numpy.ndarray, Union[None, callable]) -> str
def identity(x):
return x
if transform is None:
transform = identity
if coordinates.ndim == 1:
return '{0:0.9f},{1:0.9f}'.format(*transform(coordinates)[:2])
return ' '.join(
'{0:0.9f},{1:0.9f}'.format(*el[:2]) for el in transform(coordinates))
class _Jsonable(object):
"""
Abstract class for json serializability.
"""
_type = '_Jsonable'
@property
def type(self):
"""
The type identifier.
Returns
-------
str
"""
return self._type
@classmethod
def from_dict(cls, the_json):
"""
Deserialize from json.
Parameters
----------
the_json : Dict
Returns
-------
"""
raise NotImplementedError
def to_dict(self, parent_dict=None):
"""
Deserialize from json.
Parameters
----------
parent_dict : None|Dict
Returns
-------
Dict
"""
raise NotImplementedError
def __str__(self):
return '{}(**{})'.format(self.__class__.__name__, json.dumps(self.to_dict(), indent=1))
def __repr__(self):
return '{}(**{})'.format(self.__class__.__name__, self.to_dict())
def copy(self):
"""
Make a deep copy of the item.
Returns
-------
"""
the_type = self.__class__
return the_type.from_dict(self.to_dict())
class Feature(_Jsonable):
"""
Generic feature class - basic geojson functionality. Should generally be extended
to coherently handle properties for specific use case.
"""
__slots__ = ('_uid', '_geometry', '_properties')
_type = 'Feature'
def __init__(self, uid=None, geometry=None, properties=None):
self._geometry = None
self._properties = None
if uid is None:
self._uid = str(uuid4())
elif not isinstance(uid, str):
raise TypeError('uid must be a string.')
else:
self._uid = uid
self.geometry = geometry
self.properties = properties
@property
def uid(self):
"""
The feature unique identifier.
Returns
-------
str
"""
return self._uid
@property
def geometry(self):
"""
The geometry object.
Returns
-------
Geometry
"""
return self._geometry
@geometry.setter
def geometry(self, geometry):
if geometry is None:
self._geometry = None
elif isinstance(geometry, Geometry):
self._geometry = geometry
elif isinstance(geometry, dict):
self._geometry = Geometry.from_dict(geometry)
else:
raise TypeError('geometry must be an instance of Geometry base class')
@property
def properties(self): # type: () -> _Jsonable
"""
The properties.
Returns
-------
"""
return self._properties
@properties.setter
def properties(self, properties):
self._properties = properties
@classmethod
def from_dict(cls, the_json):
typ = the_json['type']
if typ != cls._type:
raise ValueError('Feature cannot be constructed from {}'.format(the_json))
return cls(uid=the_json.get('id', None),
geometry=the_json.get('geometry', None),
properties=the_json.get('properties', None))
def to_dict(self, parent_dict=None):
if parent_dict is None:
parent_dict = OrderedDict()
parent_dict['type'] = self.type
parent_dict['id'] = self.uid
if self.geometry is None:
parent_dict['geometry'] = None
else:
parent_dict['geometry'] = self.geometry.to_dict()
parent_dict['properties'] = self.properties
return parent_dict
def add_to_kml(self, doc, coord_transform, parent=None):
"""
Add this feature to the kml document. **Note that coordinates or transformed
coordinates are assumed to be WGS-84 coordinates in longitude, latitude order.**
Currently only the first two (i.e. longitude and latitude) are used in
this export.
Parameters
----------
doc : sarpy.io.kml.Document
coord_transform : None|callable
If callable, the the transform will be applied to the coordinates before
adding to the document.
parent : None|minidom.Element
The parent node.
Returns
-------
None
"""
params = {}
if self.uid is not None:
params['id'] = self.uid
if self.properties is not None:
params['description'] = str(self.properties)
placemark = doc.add_container(par=parent, typ='Placemark', **params)
if self.geometry is not None:
self.geometry.add_to_kml(doc, placemark, coord_transform)
class FeatureList(_Jsonable):
"""
Generic FeatureList class - basic geojson functionality. Should generally be
extended to coherently handle specific Feature extension.
"""
__slots__ = ('_features', '_feature_dict')
_type = 'FeatureList'
def __init__(self, features=None):
self._features = None
self._feature_dict = None
if features is not None:
self.features = features
def __len__(self):
if self._features is None:
return 0
return len(self._features)
def __getitem__(self, item): # type: () -> Feature|List[Feature]
if isinstance(item, str):
return self._feature_dict[item]
return self._features[item]
@property
def features(self):
"""
The features list.
Returns
-------
List[Feature]
"""
return self._features
@features.setter
def features(self, features):
if features is None:
self._features = None
self._feature_dict = None
return
if not isinstance(features, list):
raise TypeError('features must be a list of features. Got {}'.format(type(features)))
for entry in features:
if isinstance(entry, Feature):
self.add_feature(entry)
elif isinstance(entry, dict):
self.add_feature(Feature.from_dict(entry))
else:
raise TypeError(
'Entries of features are required to be instances of Feature or '
'dictionary to be deserialized. Got {}'.format(type(entry)))
@classmethod
def from_dict(cls, the_json):
typ = the_json['type']
if typ != cls._type:
raise ValueError('FeatureList cannot be constructed from {}'.format(the_json))
return cls(features=the_json['features'])
def to_dict(self, parent_dict=None):
if parent_dict is None:
parent_dict = OrderedDict()
parent_dict['type'] = self.type
if self._features is None:
parent_dict['features'] = None
else:
parent_dict['features'] = [entry.to_dict() for entry in self._features]
return parent_dict
def add_feature(self, feature):
"""
Add a feature.
Parameters
----------
feature : Feature
Returns
-------
None
"""
if not isinstance(feature, Feature):
raise TypeError('This requires a Feature instance, got {}'.format(type(feature)))
if self._features is None:
self._feature_dict = {feature.uid: 0}
self._features = [feature, ]
else:
self._feature_dict[feature.uid] = len(self._features)
self._features.append(feature)
def export_to_kml(self, file_name, coord_transform=None, **params):
"""
Export to a kml document. **Note that underlying geometry coordinates or
transformed coordinates are assumed in longitude, latitude order.**
Currently only the first two (i.e. longitude and latitude) are used in this export.
Parameters
----------
file_name : str|zipfile.ZipFile|file like
coord_transform : None|callable
The coordinate transform function.
params : dict
Returns
-------
None
"""
with KML_Document(file_name=file_name, **params) as doc:
if self.features is not None:
for feat in self.features:
feat.add_to_kml(doc, coord_transform)
class Geometry(_Jsonable):
"""
Abstract Geometry base class.
"""
_type = 'Geometry'
@classmethod
def from_dict(cls, geometry):
"""
Deserialize from json.
Parameters
----------
geometry : Dict
Returns
-------
"""
typ = geometry['type']
if typ == 'GeometryCollection':
obj = GeometryCollection.from_dict(geometry)
print('returning {}'.format(obj.__class__.__name__))
return obj
else:
obj = GeometryObject.from_dict(geometry)
print('returning {}'.format(obj.__class__.__name__))
return obj
def to_dict(self, parent_dict=None):
raise NotImplementedError
def add_to_kml(self, doc, parent, coord_transform):
"""
Add the geometry to the kml document. **Note that coordinates or transformed
coordinates are assumed in longitude, latitude order.**
Parameters
----------
doc : sarpy.io.kml.Document
parent : xml.dom.minidom.Element
coord_transform : None|callable
Returns
-------
None
"""
raise NotImplementedError
class GeometryCollection(Geometry):
"""
Geometry collection - following the geojson structure
"""
__slots__ = ('_geometries', )
_type = 'GeometryCollection'
def __init__(self, geometries):
"""
Parameters
----------
geometries : None|List[Geometry]
"""
self._geometries = None
if geometries is not None:
self.geometries = geometries
@property
def geometries(self):
"""
The geometry collection.
Returns
-------
None|List[Geometry]
"""
return self._geometries
@geometries.setter
def geometries(self, geometries):
if geometries is None:
self._geometries = None
return
elif not isinstance(geometries, list):
raise TypeError(
'geometries must be None or a list of Geometry objects. Got type {}'.format(type(geometries)))
elif len(geometries) < 2:
raise ValueError('geometries must have length greater than 1.')
for entry in geometries:
if not isinstance(entry, Geometry):
raise TypeError(
'geometries must be a list of Geometry objects. Got an element of type {}'.format(type(entry)))
@classmethod
def from_dict(cls, geometry): # type: (Union[None, dict]) -> GeometryCollection
typ = geometry.get('type', None)
if typ != cls._type:
raise ValueError('GeometryCollection cannot be constructed from {}'.format(geometry))
geometries = []
for entry in geometry['geometries']:
if isinstance(entry, Geometry):
geometries.append(entry)
elif isinstance(entry, dict):
geometries.append(Geometry.from_dict(entry))
else:
raise TypeError(
'The geometries attribute must contain either a Geometry or json serialization of a Geometry. '
'Got an entry of type {}'.format(type(entry)))
return cls(geometries)
def to_dict(self, parent_dict=None):
if parent_dict is None:
parent_dict = OrderedDict()
parent_dict['type'] = self.type
if self.geometries is None:
parent_dict['geometries'] = None
else:
parent_dict['geometries'] = [entry.to_dict() for entry in self.geometries]
return parent_dict
def add_to_kml(self, doc, parent, coord_transform):
if self.geometries is None:
return
multigeometry = doc.add_multi_geometry(parent)
for geometry in self.geometries:
if geometry is not None:
geometry.add_to_kml(doc, multigeometry, coord_transform)
class GeometryObject(Geometry):
"""
Abstract geometry object class - mirrors basic geojson functionality
"""
_type = 'Geometry'
def get_coordinate_list(self):
"""
The geojson style coordinate list.
Returns
-------
list
"""
raise NotImplementedError
@classmethod
def from_dict(cls, geometry): # type: (dict) -> GeometryObject
typ = geometry.get('type', None)
if typ is None:
raise ValueError('Poorly formed json for GeometryObject {}'.format(geometry))
elif typ == 'Point':
return Point(coordinates=geometry['coordinates'])
elif typ == 'MultiPoint':
return MultiPoint(coordinates=geometry['coordinates'])
elif typ == 'LineString':
return LineString(coordinates=geometry['coordinates'])
elif typ == 'MultiLineString':
return MultiLineString(coordinates=geometry['coordinates'])
elif typ == 'Polygon':
return Polygon(coordinates=geometry['coordinates'])
elif typ == 'MultiPolygon':
return MultiPolygon(coordinates=geometry['coordinates'])
else:
raise ValueError('Unknown type {} for GeometryObject from json {}'.format(typ, geometry))
def to_dict(self, parent_dict=None):
if parent_dict is None:
parent_dict = OrderedDict()
parent_dict['type'] = self.type
parent_dict['coordinates'] = self.get_coordinate_list()
return parent_dict
def add_to_kml(self, doc, parent, coord_transform):
raise NotImplementedError
class Point(GeometryObject):
"""
A geometric point.
"""
__slots__ = ('_coordinates', )
_type = 'Point'
def __init__(self, coordinates=None):
self._coordinates = None
if coordinates is not None:
self.coordinates = coordinates
@property
def coordinates(self):
return self._coordinates
@coordinates.setter
def coordinates(self, coordinates): # type: (Union[None, list, tuple, numpy.ndarray]) -> None
if coordinates is None:
self._coordinates = None
return
if not isinstance(coordinates, numpy.ndarray):
coordinates = numpy.array(coordinates, dtype=numpy.float64)
if coordinates.ndim != 1:
raise ValueError(
'coordinates must be a one-dimensional array. Got shape {}'.format(coordinates.shape))
elif not (2 <= coordinates.size <= 4):
raise ValueError(
'coordinates must have between 2 and 4 entries. Got shape {}'.format(coordinates.shape))
else:
self._coordinates = coordinates
def get_coordinate_list(self):
if self._coordinates is None:
return None
else:
return self._coordinates.tolist()
@classmethod
def from_dict(cls, geometry): # type: (dict) -> Point
if not geometry.get('type', None) == cls._type:
raise ValueError('Poorly formed json {}'.format(geometry))
cls(coordinates=geometry['coordinates'])
def add_to_kml(self, doc, parent, coord_transform):
if self.coordinates is None:
return
doc.add_point(_get_kml_coordinate_string(self.coordinates, coord_transform), par=parent)
class MultiPoint(GeometryObject):
"""
A collection of geometric points.
"""
_type = 'MultiPoint'
__slots__ = ('_points', )
def __init__(self, coordinates=None):
self._points = None
if coordinates is not None:
self.points = coordinates
@property
def points(self):
return self._points
@points.setter
def points(self, points):
if points is None:
self._points = None
if not isinstance(points, list):
raise TypeError(
'Multipoint requires that points is None or a list of points. '
'Got type {}'.format(type(points)))
if len(points) < 2:
raise ValueError(
'A MultiPoint requires at least two point components. '
'Got {}.'.format(len(points)))
self._points = [Point(coordinates=entry) for entry in points]
def get_coordinate_list(self):
if self._points is None:
return None
return [point.get_coordinate_list() for point in self._points]
@classmethod
def from_dict(cls, geometry): # type: (dict) -> MultiPoint
if not geometry.get('type', None) == cls._type:
raise ValueError('Poorly formed json {}'.format(geometry))
cls(coordinates=geometry['coordinates'])
def add_to_kml(self, doc, parent, coord_transform):
if self._points is None:
return
multigeometry = doc.add_multi_geometry(parent)
for geometry in self._points:
if geometry is not None:
geometry.add_to_kml(doc, multigeometry, coord_transform)
class LineString(GeometryObject):
"""
A geometric line.
"""
__slots__ = ('_coordinates', )
_type = 'LineString'
def __init__(self, coordinates=None):
self._coordinates = None
if coordinates is not None:
self.coordinates = coordinates
@property
def coordinates(self):
return self._coordinates
@coordinates.setter
def coordinates(self, coordinates): # type: (Union[None, list, tuple, numpy.ndarray]) -> None
if coordinates is None:
self._coordinates = None
return
if not isinstance(coordinates, numpy.ndarray):
coordinates = numpy.array(coordinates, dtype=numpy.float64)
if coordinates.ndim != 2:
raise ValueError(
'coordinates must be a two-dimensional array. '
'Got shape {}'.format(coordinates.shape))
if not (2 <= coordinates.shape[1] <= 4):
raise ValueError(
'The second dimension of coordinates must have between 2 and 4 entries. '
'Got shape {}'.format(coordinates.shape))
if coordinates.shape[0] < 2:
raise ValueError(
'coordinates must consist of at least 2 points. '
'Got shape {}'.format(coordinates.shape))
coordinates = _compress_identical(coordinates)
if coordinates.shape[0] < 2:
raise ValueError(
'coordinates must consist of at least 2 points after suppressing '
'consecutive repeated points. Got shape {}'.format(coordinates.shape))
self._coordinates = coordinates
def get_coordinate_list(self):
if self._coordinates is None:
return None
else:
return self._coordinates.tolist()
@classmethod
def from_dict(cls, geometry): # type: (dict) -> LineString
if not geometry.get('type', None) == cls._type:
raise ValueError('Poorly formed json {}'.format(geometry))
cls(coordinates=geometry['coordinates'])
def get_length(self):
"""
Gets the length of the line.
Returns
-------
None|float
"""
if self._coordinates is None:
return None
diffs = self._coordinates[1:, :] - self._coordinates[:-1, :]
return float(numpy.sum(numpy.sqrt(diffs[:, 0]*diffs[:, 0] + diffs[:, 1]*diffs[:, 1])))
def add_to_kml(self, doc, parent, coord_transform):
if self.coordinates is None:
return
doc.add_line_string(_get_kml_coordinate_string(self.coordinates, coord_transform), par=parent)
class MultiLineString(GeometryObject):
"""
A collection of geometric lines.
"""
__slots__ = ('_lines', )
_type = 'MultiLineString'
def __init__(self, coordinates=None):
self._lines = None
if coordinates is not None:
self.lines = coordinates
@property
def lines(self):
return self._lines
@lines.setter
def lines(self, lines):
if lines is None:
self._lines = None
return
if not isinstance(lines, list):
raise TypeError(
'MultiLineString requires that lines is None or a list of LineStrings. '
'Got type {}'.format(type(lines)))
if len(lines) < 2:
raise ValueError(
'A MultiLineString requires at least two LineString components. '
'Got {}.'.format(len(lines)))
self._lines = [LineString(coordinates=entry) for entry in lines]
def get_coordinate_list(self):
if self._lines is None:
return None
return [line.get_coordinate_list() for line in self._lines]
@classmethod
def from_dict(cls, geometry): # type: (dict) -> MultiLineString
if not geometry.get('type', None) == cls._type:
raise ValueError('Poorly formed json {}'.format(geometry))
cls(coordinates=geometry['coordinates'])
def get_length(self):
"""
Gets the length of the lines.
Returns
-------
None|float
"""
if self._lines is None:
return None
return sum(entry.get_length() for entry in self._lines)
def add_to_kml(self, doc, parent, coord_transform):
if self._lines is None:
return
multigeometry = doc.add_multi_geometry(parent)
for geometry in self._lines:
if geometry is not None:
geometry.add_to_kml(doc, multigeometry, coord_transform)
class LinearRing(LineString):
"""
This is not directly a valid geojson member, but plays the role of a single
polygonal element, and is only used as a Polygon constituent.
"""
__slots__ = ('_coordinates', '_diffs', '_bounding_box', '_segmentation')
_type = 'LinearRing'
def __init__(self, coordinates=None):
self._coordinates = None
self._diffs = None
self._bounding_box = None
self._segmentation = None
super(LinearRing, self).__init__(coordinates)
def get_coordinate_list(self):
if self._coordinates is None:
return None
else:
return self._coordinates.tolist()
def reverse_orientation(self):
if self._coordinates is None:
return
self._coordinates = self._coordinates[::-1, :]
self._diffs *= -1
@property
def bounding_box(self):
"""
The bounding box of the form [[x_min, x_max], [y_min, y_max]].
*Note that would be extremely misleading for a naively constructed
lat/lon polygon crossing the boundary of discontinuity and/or surrounding a pole.*
Returns
-------
numpy.ndarray
"""
return self.bounding_box
def get_perimeter(self):
"""
Gets the perimeter of the linear ring.
Returns
-------
float
"""
return self.get_length()
def get_area(self):
"""
Gets the area of the polygon. If a polygon is self-intersecting, then this
result may be pathological. If this is positive, then the orientation is
counter-clockwise. If this is negative, then the orientation is clockwise.
If zero, then this polygon is degenerate.
Returns
-------
float
"""
return float(0.5*numpy.sum(self._coordinates[:-1, 0]*self._coordinates[1:, 1] -
self._coordinates[1:, 0]*self._coordinates[:-1, 1]))
def get_centroid(self):
"""
Gets the centroid of the polygon - note that this may not actually lie in
the polygon interior for non-convex polygon. This will result in an undefined value
if the polygon is degenerate.
Returns
-------
numpy.ndarray
"""
arr = self._coordinates[:-1, 0]*self._coordinates[1:, 1] - \
self._coordinates[1:, 0]*self._coordinates[:-1, 1]
area = 0.5*numpy.sum(arr) # signed area
x = numpy.sum(0.5*(self._coordinates[:-1, 0] + self._coordinates[1:, 0])*arr)
y = numpy.sum(0.5*(self._coordinates[:-1, 1] + self._coordinates[1:, 1])*arr)
return numpy.array([x, y], dtype=numpy.float64)/(3*area)
@property
def coordinates(self):
"""
Gets the coordinates array.
Returns
-------
numpy.ndarray
"""
return self._coordinates
@coordinates.setter
def coordinates(self, coordinates):
self.set_coordinates(coordinates)
def set_coordinates(self, coordinates):
if coordinates is None:
self._coordinates = None
self._bounding_box = None
self._segmentation = None
self._diffs = None
return
if not isinstance(coordinates, numpy.ndarray):
coordinates = numpy.array(coordinates, dtype=numpy.float64)
if len(coordinates.shape) != 2:
raise ValueError(
'coordinates must be two-dimensional. Got shape {}'.format(coordinates.shape))
if not (2 <= coordinates.shape[1] <= 4):
raise ValueError('The second dimension of coordinates must have between 2 and 4 entries. '
'Got shape {}'.format(coordinates.shape))
if coordinates.shape[0] < 3:
raise ValueError('coordinates must consist of at least 3 points. '
'Got shape {}'.format(coordinates.shape))
coordinates = _compress_identical(coordinates)
if (coordinates[0, 0] != coordinates[-1, 0]) or \
(coordinates[0, 1] != coordinates[-1, 1]):
coordinates = numpy.vstack((coordinates, coordinates[0, :]))
if coordinates.shape[0] < 4:
raise ValueError(
'After compressing repeated (in sequence) points and ensuring first and '
'last point are the same, coordinates must contain at least 4 points. '
'Got shape {}'.format(coordinates.shape))
self._coordinates = coordinates
# construct bounding box
self._bounding_box = numpy.empty((2, 2), dtype=coordinates.dtype)
self._bounding_box[0, :] = (numpy.min(coordinates[:, 0]), numpy.max(coordinates[:, 0]))
self._bounding_box[1, :] = (numpy.min(coordinates[:, 1]), numpy.max(coordinates[:, 1]))
# construct diffs
self._diffs = coordinates[1:, :] - coordinates[:-1, :]
self._segmentation = {
'x': self._construct_segmentation(coordinates[:, 0], coordinates[:, 1]),
'y': self._construct_segmentation(coordinates[:, 1], coordinates[:, 0])
}
@staticmethod
def _construct_segmentation(coords, o_coords):
# helper method
def overlap(fst, lst, segment):
if fst == lst and fst == segment['min']:
return True
if segment['max'] <= fst or lst <= segment['min']:
return False
return True
def do_min_val_value(segment, val1, val2):
segment['min_value'] = min(val1, val2, segment['min_value'])
segment['max_value'] = max(val1, val2, segment['max_value'])
inds = | numpy.argsort(coords[:-1]) | numpy.argsort |
import errno
import os
import pickle
import numpy
import numpy as np
from numpy import linalg as LA
def emb_normalize(embeddings,
center=False,
unit_var=False,
pca=False,
drop_first_n=0,
reduce_dim=0,
max_norm=False):
if drop_first_n > 0 or reduce_dim > 0:
pca = True
print(f"∥μ∥:{LA.norm(embeddings.mean(axis=0))}")
print(f"var:{embeddings.var()}")
if center:
# zero-center the data
print("Centering word embeddings (mean subtraction) ...")
embeddings -= embeddings.mean(axis=0)
if unit_var:
print("Normalizing word embeddings (unit variance) ...")
embeddings /= embeddings.std(axis=0)
if max_norm:
print("Scaling to max norm 1 ...")
embeddings /= max(LA.norm(embeddings, axis=1))
if pca:
# get the data covariance matrix
cov = np.dot(embeddings.T, embeddings) / embeddings.shape[0]
# plt.imshow(cov, cmap='hot', interpolation='nearest')
# plt.show()
U, S, V = | np.linalg.svd(cov) | numpy.linalg.svd |
#from planenet code is adapted for planercnn code
import cv2
import numpy as np
WIDTH = 256
HEIGHT = 192
ALL_TITLES = ['PlaneNet']
ALL_METHODS = [('sample_np10_hybrid3_bl0_dl0_ds0_crfrnn5_sm0', '', 0, 2)]
def predict3D(folder, index, image, depth, segmentation, planes, info):
writePLYFile(folder, index, image, depth, segmentation, planes, info)
#writePLYFile(options.test_dir, image_index + options.startIndex, segmentationImageBlended, pred_dict['depth'][image_index], segmentation, pred_dict['plane'][image_index], pred_dict['info'][image_index])
print("done")
def getCameraFromInfo(info):
camera = {}
camera['fx'] = info[0]
camera['fy'] = info[5]
camera['cx'] = info[2]
camera['cy'] = info[6]
camera['width'] = info[16]
camera['height'] = info[17]
camera['depth_shift'] = info[18]
return camera
def writePLYFile(folder, index, image, depth, segmentation, planes, info):
imageFilename = str(index) + '_model_texture.png'
cv2.imwrite(folder + '/' + imageFilename, image)
width = image.shape[1]
height = image.shape[0]
numPlanes = planes.shape[0]
camera = getCameraFromInfo(info)
#camera = getNYURGBDCamera()
#camera = getSUNCGCamera()
urange = (np.arange(width, dtype=np.float32) / width * camera['width'] - camera['cx']) / camera['fx']
urange = urange.reshape(1, -1).repeat(height, 0)
vrange = (np.arange(height, dtype=np.float32) / height * camera['height'] - camera['cy']) / camera['fy']
vrange = vrange.reshape(-1, 1).repeat(width, 1)
X = depth * urange
Y = depth
Z = -depth * vrange
XYZ = np.stack([X, Y, Z], axis=2)
#focalLength = 517.97
faces = []
#minDepthDiff = 0.15
#maxDepthDiff = 0.3
#occlusionBoundary = boundaries[:, :, 1]
betweenRegionThreshold = 0.1
nonPlanarRegionThreshold = 0.02
planesD = np.linalg.norm(planes, axis=1, keepdims=True)
planeNormals = -planes / np.maximum(planesD, 1e-4)
croppingRatio = -0.05
dotThreshold = np.cos(np.deg2rad(30))
for y in range(height - 1):
for x in range(width - 1):
if y < height * croppingRatio or y > height * (1 - croppingRatio) or x < width * croppingRatio or x > width * (1 - croppingRatio):
continue
segmentIndex = segmentation[y][x]
if segmentIndex == -1:
continue
point = XYZ[y][x]
#neighborPixels = []
validNeighborPixels = []
for neighborPixel in [(x, y + 1), (x + 1, y), (x + 1, y + 1)]:
neighborSegmentIndex = segmentation[neighborPixel[1]][neighborPixel[0]]
if neighborSegmentIndex == segmentIndex:
if segmentIndex < numPlanes:
validNeighborPixels.append(neighborPixel)
else:
neighborPoint = XYZ[neighborPixel[1]][neighborPixel[0]]
if | np.linalg.norm(neighborPoint - point) | numpy.linalg.norm |
#!/usr/bin/env python2
# -*- coding: utf-8 -*-
"""
Created on Mon Oct 15 15:57:46 2018
@author:
<NAME>
MIT Kavli Institute for Astrophysics and Space Research,
Massachusetts Institute of Technology,
77 Massachusetts Avenue,
Cambridge, MA 02109,
USA
Email: <EMAIL>
Web: www.mnguenther.com
"""
#::: plotting settings
import seaborn as sns
sns.set(context='paper', style='ticks', palette='deep', font='sans-serif', font_scale=1.5, color_codes=True)
sns.set_style({"xtick.direction": "in","ytick.direction": "in"})
sns.set_context(rc={'lines.markeredgewidth': 1})
#::: modules
import numpy as np
import matplotlib.pyplot as plt
import ellc
from pprint import pprint
np.random.seed(42)
###############################################################################
#::: params
###############################################################################
params = {
'b_radius_1':0.1,
'b_radius_2':0.01,
'b_sbratio':0.,
'b_incl':89.,
'b_epoch':1.1,
'b_period':3.4,
'b_K':0.1,
'b_q':1,
'ld_1_Leonardo':'quad',
'ldc_1_Leonardo':[0.3,0.1],
'ld_1_Michelangelo':'quad',
'ldc_1_Michelangelo':[0.5,0.4]
}
a_1 = 0.019771142 * params['b_K'] * params['b_period']
params['b_a'] = (1.+1./params['b_q'])*a_1
pprint(params)
q1 = (0.3 + 0.1)**2
q2 = 0.5*0.3*(0.3 + 0.1)**(-1)
print('Leonardo q1 = '+str(q1))
print('Leonardo q1 = '+str(q2))
q1 = (0.5 + 0.4)**2
q2 = 0.5*0.5*(0.5 + 0.4)**(-1)
print('Michelangelo q1 = '+str(q1))
print('Michelangelo q1 = '+str(q2))
###############################################################################
#::: "truth" signals
###############################################################################
planet = 'b'
inst = 'Leonardo'
time_Leonardo = np.arange(0,10,5./60./24.)[::3]
time_Leonardo = time_Leonardo[ (time_Leonardo<2) | (time_Leonardo>4) ]
flux_Leonardo = ellc.lc(
t_obs = time_Leonardo,
radius_1 = params[planet+'_radius_1'],
radius_2 = params[planet+'_radius_2'],
sbratio = params[planet+'_sbratio'],
incl = params[planet+'_incl'],
t_zero = params[planet+'_epoch'],
period = params[planet+'_period'],
ld_1 = params['ld_1_'+inst],
ldc_1 = params['ldc_1_'+inst]
)
flux_Leonardo += 3e-4*np.sin(time_Leonardo/2.7) #red noise
flux_Leonardo += np.random.normal(0,2e-3,size=len(flux_Leonardo)) #white noise
flux_err_Leonardo = 2e-3*np.ones_like(flux_Leonardo) #white noise
header = 'time,flux,flux_err'
X = np.column_stack(( time_Leonardo, flux_Leonardo, flux_err_Leonardo ))
np.savetxt('allesfit/Leonardo.csv', X, delimiter=',', header=header)
inst = 'Michelangelo'
time_Michelangelo = np.arange(52,52.25,2./60./24.)[::2]
flux_Michelangelo = ellc.lc(
t_obs = time_Michelangelo,
radius_1 = params[planet+'_radius_1'],
radius_2 = params[planet+'_radius_2'],
sbratio = params[planet+'_sbratio'],
incl = params[planet+'_incl'],
t_zero = params[planet+'_epoch'],
period = params[planet+'_period'],
ld_1 = params['ld_1_'+inst],
ldc_1 = params['ldc_1_'+inst]
)
flux_Michelangelo += 2e-3*np.sin(time_Michelangelo*8) #red noise
flux_Michelangelo += np.random.normal(0,5e-4,size=len(flux_Michelangelo)) #white noise
flux_err_Michelangelo = 5e-4*np.ones_like(flux_Michelangelo) #white noise
header = 'time,flux,flux_err'
X = np.column_stack(( time_Michelangelo, flux_Michelangelo, flux_err_Michelangelo ))
| np.savetxt('allesfit/Michelangelo.csv', X, delimiter=',', header=header) | numpy.savetxt |
from skimage import segmentation, exposure
import matplotlib.pyplot as plt
import matplotlib.colors as colors
import numpy as np
from sklearn.ensemble import RandomForestClassifier
from sklearn import metrics
from lib import utils
def display_image(raw,watershed,classified,type):
# Save a color
empty_color = [.1,.1,.1] #Almost black
snow_color = [.9,.9,.9] #Almost white
pond_color = [.31,.431,.647] #Blue
gray_color = [.65,.65,.65] #Gray
water_color = [0.,0.,0.] #Black
shadow_color = [.100, .545, .0]#Orange
custom_colormap = [empty_color,snow_color,gray_color,pond_color,water_color,shadow_color]
custom_colormap = colors.ListedColormap(custom_colormap)
#Making sure there is atleast one of every pixel so the colors map properly (only changes
# display image, not saved data)
classified[0][0] = 0
classified[1][0] = 1
classified[2][0] = 2
classified[3][0] = 3
classified[4][0] = 4
classified[5][0] = 5
# Figure that show 3 images: raw, segmented, and classified
if type == 1:
# Creating the watershed display image with borders highlighted
ws_bound = segmentation.find_boundaries(watershed)
ws_display = utils.create_composite([raw,raw,raw])
ws_display[:,:,0][ws_bound] = 255
ws_display[:,:,1][ws_bound] = 255
ws_display[:,:,2][ws_bound] = 22
fig, axes = plt.subplots(1,3,subplot_kw={'xticks':[], 'yticks':[]})
fig.subplots_adjust(left=0.05,right=0.99,bottom=0.05,top=0.90,wspace=0.02,hspace=0.2)
tnrfont = {'fontname':'Times New Roman'}
axes[0].imshow(raw,cmap='gray',interpolation='None')
axes[0].set_title("Raw Image", **tnrfont)
axes[1].imshow(ws_display,interpolation='None')
axes[1].set_title("Image Segments", **tnrfont)
axes[2].imshow(classified,cmap=custom_colormap,interpolation='None')
axes[2].set_title("Classification Output", **tnrfont)
# Figure that shows 2 images: raw and classified.
if type == 2:
fig, axes = plt.subplots(1,2,subplot_kw={'xticks':[], 'yticks':[]})
fig.subplots_adjust(hspace=0.3,wspace=0.05)
axes[0].imshow(raw,interpolation='None')
axes[0].set_title("Raw Image")
axes[1].imshow(classified,cmap=custom_colormap,interpolation='None')
axes[1].set_title("Classification Output")
plt.show()
# Plots a watershed image on top of and beside the original image
## Used for debugging
def display_watershed(original_data, watershed_data, block=5):
# block = 5
watershed = watershed_data[block]
original_1 = original_data[6][block]
original_2 = original_data[4][block]
original_3 = original_data[1][block]
# randcolor = colors.ListedColormap(np.random.rand(256,3))
ws_bound = segmentation.find_boundaries(watershed)
ws_display = utils.create_composite([original_1,original_2,original_3])
ws_display[:,:,0][ws_bound] = 240
ws_display[:,:,1][ws_bound] = 80
ws_display[:,:,2][ws_bound] = 80
display_im = utils.create_composite([original_1,original_2,original_3])
fig, axes = plt.subplots(1,2,subplot_kw={'xticks':[], 'yticks':[]})
fig.subplots_adjust(hspace=0.3,wspace=0.05)
# axes[1].imshow(self.sobel_image,interpolation='none',cmap='gray')
axes[0].imshow(display_im,interpolation='none')
axes[1].imshow(ws_display,interpolation='none')
plt.show()
def display_histogram(image_band):
'''
Displays a histogram of the given band's data.
Ignores zero values.
'''
hist, bin_centers = exposure.histogram(image_band[image_band>0],nbins=1000)
plt.figure(1)
plt.bar(bin_centers, hist)
# plt.xlim((0,np.max(image_band)))
# plt.ylim((0,100000))
plt.xlabel("Pixel Intensity")
plt.ylabel("Frequency")
plt.show()
# Method to assess the training set and classification tree used for this classification
def test_training(label_vector, training_feature_matrix):
print("Size of training set: %i" %len(label_vector))
print( | np.shape(training_feature_matrix) | numpy.shape |
import pytest
import numpy as np
import lentil
def test_collect_charge_scalar():
img = np.random.uniform(low=0, high=100, size=(10, 10))
qe = np.random.uniform()
out = lentil.detector.collect_charge(img, 1, qe)
assert np.array_equal(out, img*qe)
def test_collect_charge_array():
img = np.random.uniform(low=0, high=100, size=(5, 10, 10))
qe = np.random.uniform(size=5)
out = lentil.detector.collect_charge(img, np.ones(5), qe)
assert np.array_equal(out, np.einsum('ijk,i->jk', img, qe))
def test_collect_charge_spectrum():
img = np.random.uniform(low=0, high=100, size=(5, 10, 10))
wave = np.arange(1, 6)
value = np.random.uniform(size=5)
qe = lentil.radiometry.Spectrum(wave, value)
out = lentil.detector.collect_charge(img, wave, qe)
assert np.array_equal(out, np.einsum('ijk,i->jk', img, value))
def test_collect_charge_bayer_even():
img = np.random.uniform(low=0, high=100, size=(5, 2, 2))
qe_red = np.random.uniform(size=5)
qe_green = np.random.uniform(size=5)
qe_blue = np.random.uniform(size=5)
bayer_pattern = [['R', 'G'], ['G', 'B']]
out = lentil.detector.collect_charge_bayer(img, np.ones(5), qe_red,
qe_green, qe_blue, bayer_pattern)
ul = np.sum(img[:, 0, 0]*qe_red)
ur = np.sum(img[:, 0, 1]*qe_green)
ll = np.sum(img[:, 1, 0]*qe_green)
lr = np.sum(img[:, 1, 1]*qe_blue)
assert np.array_equal(out, np.array([[ul, ur], [ll, lr]]))
def collect_charge_bayer_odd():
img = np.random.uniform(low=0, high=100, size=(5, 3, 3))
qe_red = np.random.uniform(size=5)
qe_green = np.random.uniform(size=5)
qe_blue = np.random.uniform(size=5)
bayer_pattern = [['R', 'G'], ['G', 'B']]
out = lentil.detector.collect_charge_bayer(img, np.ones(5), qe_red,
qe_green, qe_blue, bayer_pattern)
a = np.sum(img[:, 0, 0]*qe_red)
b = np.sum(img[:, 0, 1]*qe_green)
c = np.sum(img[:, 0, 2]*qe_red)
d = np.sum(img[:, 1, 0]*qe_green)
e = np.sum(img[:, 1, 1]*qe_blue)
f = np.sum(img[:, 1, 2]*qe_green)
g = np.sum(img[:, 2, 0]*qe_red)
h = np.sum(img[:, 2, 1]*qe_green)
i = np.sum(img[:, 2, 2]*qe_red)
assert np.array_equal(out, np.array([[a, b, c], [d, e, f], [g, h, i]]))
def test_collect_charge_bayer_oversample():
img = np.random.uniform(low=0, high=100, size=(5, 4, 4))
qe_red = np.random.uniform(size=5)
qe_green = np.random.uniform(size=5)
qe_blue = np.random.uniform(size=5)
bayer_pattern = [['R', 'G'], ['G', 'B']]
out = lentil.detector.collect_charge_bayer(img, np.ones(5), qe_red,
qe_green, qe_blue, bayer_pattern,
oversample=2)
ul = np.einsum('ijk,i->jk', img[:, 0:2, 0:2], qe_red)
ur = np.einsum('ijk,i->jk', img[:, 0:2, 2:4], qe_green)
ll = np.einsum('ijk,i->jk', img[:, 2:4, 0:2], qe_green)
lr = np.einsum('ijk,i->jk', img[:, 2:4, 2:4], qe_blue)
assert np.array_equal(out, np.block([[ul, ur], [ll, lr]]))
def test_pixel_unitary():
a = np.random.uniform(low=0, high=1, size=(100, 100))
b = lentil.detector.pixel(a, oversample=1)
assert np.isclose(np.sum(a), np.sum(b))
def test_pixelate():
# we've already tested rescale and the pixel MTF separately, so we're just
# going to ensure the returned image has the right shape here
img = np.ones((10, 10))
out = lentil.detector.pixelate(img, 2)
assert out.shape == (5, 5)
def test_adc_saturation_capacity():
img = 10 * np.ones((10, 10))
out = lentil.detector.adc(img, gain=1, saturation_capacity=1)
assert np.array_equal(out, np.ones((10, 10)))
def test_adc_warn_saturate():
with pytest.warns(UserWarning):
img = 10 * np.ones((10, 10))
out = lentil.detector.adc(img, gain=1, saturation_capacity=1,
warn_saturate=True)
def test_adc_float_gain():
g = np.random.uniform(size=1)
img = 10*np.random.uniform(size=(10, 10))
out = lentil.detector.adc(img, g, saturation_capacity=None)
assert np.array_equal(out, np.floor(g*img))
def test_adc_array_gain():
g = np.random.uniform(size=(10, 10))
img = 10*np.random.uniform(size=(10, 10))
out = lentil.detector.adc(img, g, saturation_capacity=None)
assert np.array_equal(out, np.floor(g*img))
def test_adc_nonlinear_gain():
g = np.random.uniform(low=0.9, high=1.5, size=4)
img = 10*np.ones((10, 10))
out = lentil.detector.adc(img, g, saturation_capacity=None)
assert np.isclose(out[0, 0], np.floor(np.polyval(np.append(g, 0), 10)))
def test_adc_nonlinear_gain_cube():
g = np.random.uniform(low=0.9, high=1.5, size=(4, 10, 10))
img = 10*np.ones((10, 10))
out = lentil.detector.adc(img, g, saturation_capacity=None)
index = np.random.uniform(low=0, high=9, size=2)
r = int(index[0])
c = int(index[1])
assert np.isclose(out[r, c], np.floor(np.polyval(np.append(g[:, r, c], 0), 10)))
def test_adc_invalid_gain():
img = 10*np.ones((10, 10))
with pytest.raises(ValueError):
out = lentil.detector.adc(img, gain=np.ones((1, 1, 1, 1)))
def test_adc_dtype():
g = np.random.uniform(size=1)
img = 10*np.random.uniform(size=(10, 10))
out = lentil.detector.adc(img, g, dtype=np.int8)
assert out.dtype == np.int8
def test_shot_noise_poisson():
img = np.random.uniform(low=0, high=1e5, size=(2, 2))
shot1 = lentil.detector.shot_noise(img, method='poisson')
shot2 = lentil.detector.shot_noise(img, method='poisson')
assert np.all(shot1 != shot2)
assert shot1.shape == (2, 2)
def test_shot_noise_poisson_seed():
img = np.random.uniform(low=0, high=1e5, size=(2, 2))
shot1 = lentil.detector.shot_noise(img, method='poisson', seed=12345)
shot2 = lentil.detector.shot_noise(img, method='poisson', seed=12345)
assert | np.array_equal(shot1, shot2) | numpy.array_equal |
import datetime
import io
import pathlib
import pickle
import re
import uuid
import gym
import numpy as np
import tensorflow as tf
import tensorflow.compat.v1 as tf1
import tensorflow_probability as tfp
from tensorflow.keras.mixed_precision import experimental as prec
from tensorflow_probability import distributions as tfd
class AttrDict(dict):
__setattr__ = dict.__setitem__
__getattr__ = dict.__getitem__
class Module(tf.Module):
def save(self, filename):
values = tf.nest.map_structure(lambda x: x.numpy(), self.variables)
with pathlib.Path(filename).open('wb') as f:
pickle.dump(values, f)
def load(self, filename):
with pathlib.Path(filename).open('rb') as f:
values = pickle.load(f)
tf.nest.map_structure(lambda x, y: x.assign(y), self.variables, values)
def get(self, name, ctor, *args, **kwargs):
# Create or get layer by name to avoid mentioning it in the constructor.
if not hasattr(self, '_modules'):
self._modules = {}
if name not in self._modules:
self._modules[name] = ctor(*args, **kwargs)
return self._modules[name]
def nest_summary(structure):
if isinstance(structure, dict):
return {k: nest_summary(v) for k, v in structure.items()}
if isinstance(structure, list):
return [nest_summary(v) for v in structure]
if hasattr(structure, 'shape'):
return str(structure.shape).replace(', ', 'x').strip('(), ')
return '?'
def graph_summary(writer, fn, *args):
step = tf.summary.experimental.get_step()
def inner(*args):
tf.summary.experimental.set_step(step)
with writer.as_default():
fn(*args)
return tf.numpy_function(inner, args, [])
def video_summary(name, video, step=None, fps=20):
name = name if isinstance(name, str) else name.decode('utf-8')
if np.issubdtype(video.dtype, np.floating):
video = np.clip(255 * video, 0, 255).astype(np.uint8)
B, T, H, W, C = video.shape
try:
frames = video.transpose((1, 2, 0, 3, 4)).reshape((T, H, B * W, C))
summary = tf1.Summary()
image = tf1.Summary.Image(height=B * H, width=T * W, colorspace=C)
image.encoded_image_string = encode_gif(frames, fps)
summary.value.add(tag=name + '/gif', image=image)
tf.summary.experimental.write_raw_pb(summary.SerializeToString(), step)
except (IOError, OSError) as e:
print('GIF summaries require ffmpeg in $PATH.', e)
frames = video.transpose((0, 2, 1, 3, 4)).reshape((1, B * H, T * W, C))
tf.summary.image(name + '/grid', frames, step)
def encode_gif(frames, fps):
from subprocess import Popen, PIPE
h, w, c = frames[0].shape
pxfmt = {1: 'gray', 3: 'rgb24'}[c]
cmd = ' '.join([
f'ffmpeg -y -f rawvideo -vcodec rawvideo',
f'-r {fps:.02f} -s {w}x{h} -pix_fmt {pxfmt} -i - -filter_complex',
f'[0:v]split[x][z];[z]palettegen[y];[x]fifo[x];[x][y]paletteuse',
f'-r {fps:.02f} -f gif -'])
proc = Popen(cmd.split(' '), stdin=PIPE, stdout=PIPE, stderr=PIPE)
for image in frames:
proc.stdin.write(image.tostring())
out, err = proc.communicate()
if proc.returncode:
raise IOError('\n'.join([' '.join(cmd), err.decode('utf8')]))
del proc
return out
def simulate(agent, envs, steps=0, episodes=0, state=None):
# Initialize or unpack simulation state.
if state is None:
step, episode = 0, 0
done = np.ones(len(envs), np.bool)
length = np.zeros(len(envs), np.int32)
obs = [None] * len(envs)
agent_state = None
else:
step, episode, done, length, obs, agent_state = state
import time
while (steps and step < steps) or (episodes and episode < episodes):
# Reset envs if necessary.
print("step, episode: ", step, episode)
start = time.time()
if done.any():
indices = [index for index, d in enumerate(done) if d]
promises = [envs[i].reset(blocking=False) for i in indices]
for index, promise in zip(indices, promises):
obs[index] = promise()
# Step agents.
obs = {k: np.stack([o[k] for o in obs]) for k in obs[0]}
action, agent_state = agent(obs, done, agent_state)
action = np.array(action)
print(action)
print("get action time: ", time.time()-start)
start = time.time()
assert len(action) == len(envs)
# Step envs.
promises = [e.step(a, blocking=False) for e, a in zip(envs, action)]
obs, _, done = zip(*[p()[:3] for p in promises])
obs = list(obs)
done = | np.stack(done) | numpy.stack |
import torch
from torch.utils.data import Dataset
from sklearn.preprocessing import LabelEncoder
import pandas as pd
import os
from pyntcloud import PyntCloud
import numpy as np
import random
def translate_pointcloud(pointcloud):
xyz1 = np.random.uniform(low=2. / 3., high=3. / 2., size=[3])
xyz2 = np.random.uniform(low=-0.2, high=0.2, size=[3])
translated_pointcloud = np.add(np.multiply(pointcloud, xyz1), xyz2).astype('float32')
return translated_pointcloud
def jitter_pointcloud(pointcloud, sigma=1, clip=0.02):
N, C = pointcloud.shape
rotation = np.copy(pointcloud)
rotation += np.clip(sigma * np.random.randn(N, C), -1 * clip, clip)
return rotation
def generate_24_rotations():
res = []
for id in [[0, 1, 2], [1, 2, 0], [2, 0, 1]]:
R = np.identity(3)[:, id].astype(int)
R1= np.asarray([R[:, 0], R[:, 1], R[:, 2]]).T
R2 = np.asarray([-R[:, 0], -R[:, 1], R[:, 2]]).T
R3 = np.asarray([-R[:, 0], R[:, 1], -R[:, 2]]).T
R4 = np.asarray([R[:, 0], -R[:, 1], -R[:, 2]]).T
res += [R1, R2, R3, R4]
for id in [[0, 2, 1], [1, 0, 2], [2, 1, 0]]:
R = np.identity(3)[:, id].astype(int)
R1 = np.asarray([-R[:, 0], -R[:, 1], -R[:, 2]]).T
R2 = np.asarray([-R[:, 0], R[:, 1], R[:, 2]]).T
R3 = np.asarray([R[:, 0], -R[:, 1], R[:, 2]]).T
R4 = np.asarray([R[:, 0], R[:, 1], -R[:, 2]]).T
res += [R1, R2, R3, R4]
return res
def rotate_pointcloud(pointcloud):
# theta = np.random.normal(0, (np.pi**2)/16, 1)[0]
# print(theta)
theta = np.pi * 2 * np.random.choice(24) / 24
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta)], [np.sin(theta), np.cos(theta)]])
rotation = np.copy(pointcloud)
rotation[:, [0, 2]] = pointcloud[:, [0, 2]].dot(rotation_matrix) # random rotation (x,z)
return rotation, theta
def three_d_rotation(pointcloud):
alpha = np.pi * 2 * np.random.choice(24) / 24
beta = np.pi * 2 * np.random.choice(24) / 24
gamma = np.pi * 2 * np.random.choice(24) / 24
rotation_matrix = np.array(
[[np.cos(beta) * np.cos(gamma),
(np.sin(alpha) * np.sin(beta) * np.cos(gamma)) - (np.cos(alpha) * np.cos(gamma)),
(np.cos(alpha) * np.sin(beta) * np.cos(gamma)) + (np.sin(alpha) * np.sin(gamma))],
[np.cos(beta) * np.sin(gamma),
(np.sin(alpha) * np.sin(beta) * np.sin(gamma)) + (np.cos(alpha) * np.cos(gamma)),
(np.cos(alpha) * np.sin(beta) * np.sin(gamma)) - (np.sin(alpha) * np.cos(gamma))],
[-np.sin(beta),
np.sin(alpha) * np.cos(beta),
np.cos(alpha) * np.cos(beta)]]
)
rotation = np.copy(pointcloud)
rotation[:, ] = pointcloud[:, ].dot(rotation_matrix)
return rotation, (alpha, beta, gamma)
class PointCloudDataset(Dataset):
def __init__(
self,
annotations_file,
img_dir,
img_size=400,
label_col="Treatment",
transform=None,
target_transform=None,
centring_only=False,
):
self.annot_df = pd.read_csv(annotations_file)
self.img_dir = img_dir
self.img_size = img_size
self.label_col = label_col
self.transform = transform
self.target_transform = target_transform
self.centring_only = centring_only
self.new_df = self.annot_df[
(self.annot_df.xDim <= self.img_size)
& (self.annot_df.yDim <= self.img_size)
& (self.annot_df.zDim <= self.img_size)
].reset_index(drop=True)
# encode label
le = LabelEncoder()
label_col_enc = self.new_df.loc[:, self.label_col]
label_col_enc = le.fit_transform(label_col_enc)
self.new_df["label_col_enc"] = label_col_enc
def __len__(self):
return len(self.new_df)
def __getitem__(self, idx):
# read the image
treatment = self.new_df.loc[idx, "Treatment"]
img_path = os.path.join(
self.img_dir, treatment, self.new_df.loc[idx, "serialNumber"]
)
image = PyntCloud.from_file(img_path + ".ply")
image = torch.tensor(image.points.values)
# TODO: take away after testing
if self.centring_only:
mean = torch.mean(image, 0)
# mean = torch.tensor([[13.4828, 26.5144, 24.4187]])
# std = torch.tensor([[9.2821, 20.4512, 18.9049]])
std = torch.tensor([[20.0, 20.0, 20.0]])
image = (image - mean) / std
# / std
# TODO: _____________________________________________
else:
mean = torch.tensor([[13.4828, 26.5144, 24.4187]])
std = torch.tensor([[9.2821, 20.4512, 18.9049]])
image = (image - mean) / std
# return encoded label as tensor
label = self.new_df.loc[idx, "label_col_enc"]
label = torch.tensor(label)
# return the classical features as torch tensor
feats = self.new_df.iloc[idx, 16:-4]
feats = torch.tensor(feats)
return image, label, feats
class PointCloudDatasetAll(Dataset):
def __init__(
self,
annotations_file,
img_dir,
img_size=400,
label_col="Treatment",
transform=None,
target_transform=None,
centring_only=True,
cell_component="cell",
):
self.annot_df = pd.read_csv(annotations_file)
self.img_dir = img_dir
self.img_size = img_size
self.label_col = label_col
self.transform = transform
self.target_transform = target_transform
self.centring_only = centring_only
self.cell_component = cell_component
self.new_df = self.annot_df[
(self.annot_df.xDim <= self.img_size)
& (self.annot_df.yDim <= self.img_size)
& (self.annot_df.zDim <= self.img_size)
].reset_index(drop=True)
# encode label
le = LabelEncoder()
label_col_enc = self.new_df.loc[:, self.label_col]
label_col_enc = le.fit_transform(label_col_enc)
self.new_df["label_col_enc"] = label_col_enc
def __len__(self):
return len(self.new_df)
def __getitem__(self, idx):
# read the image
treatment = self.new_df.loc[idx, "Treatment"]
plate_num = "Plate" + str(self.new_df.loc[idx, "PlateNumber"])
if self.cell_component == "cell":
component_path = "stacked_pointcloud"
else:
component_path = "stacked_pointcloud_nucleus"
img_path = os.path.join(
self.img_dir,
plate_num,
component_path,
treatment,
self.new_df.loc[idx, "serialNumber"],
)
image = PyntCloud.from_file(img_path + ".ply")
image = torch.tensor(image.points.values)
# TODO: take away after testing
if self.centring_only:
mean = torch.mean(image, 0)
std = torch.tensor([[20.0, 20.0, 20.0]])
image = (image - mean) / std
else:
mean = torch.tensor([[13.4828, 26.5144, 24.4187]])
std = torch.tensor([[9.2821, 20.4512, 18.9049]])
image = (image - mean) / std
# TODO: _____________________________________________
# return encoded label as tensor
label = self.new_df.loc[idx, "label_col_enc"]
label = torch.tensor(label)
# return the classical features as torch tensor
feats = self.new_df.iloc[idx, 16:-4]
feats = torch.tensor(feats)
serial_number = self.new_df.loc[idx, "serialNumber"]
return image, label, serial_number
class PointCloudDatasetAllBoth(Dataset):
def __init__(
self,
annotations_file,
img_dir,
img_size=400,
label_col="Treatment",
transform=None,
target_transform=None,
centring_only=False,
cell_component="cell",
proximal=1,
):
self.annot_df = pd.read_csv(annotations_file)
self.img_dir = img_dir
self.img_size = img_size
self.label_col = label_col
self.transform = transform
self.target_transform = target_transform
self.cell_component = cell_component
self.proximal = proximal
self.new_df = self.annot_df[
(self.annot_df.xDim <= self.img_size)
& (self.annot_df.yDim <= self.img_size)
& (self.annot_df.zDim <= self.img_size)
& (
(self.annot_df.Treatment == "Nocodazole")
| (self.annot_df.Treatment == "Blebbistatin")
)
& (self.annot_df.Proximal == self.proximal)
].reset_index(drop=True)
# encode label
le = LabelEncoder()
label_col_enc = self.new_df.loc[:, self.label_col]
label_col_enc = le.fit_transform(label_col_enc)
self.new_df["label_col_enc"] = label_col_enc
def __len__(self):
return len(self.new_df)
def __getitem__(self, idx):
# read the image
treatment = self.new_df.loc[idx, "Treatment"]
plate_num = "Plate" + str(self.new_df.loc[idx, "PlateNumber"])
cell_path = "stacked_pointcloud"
nuc_path = "stacked_pointcloud_nucleus"
cell_img_path = os.path.join(
self.img_dir,
plate_num,
cell_path,
treatment,
self.new_df.loc[idx, "serialNumber"],
)
nuc_img_path = os.path.join(
self.img_dir,
plate_num,
nuc_path,
treatment,
self.new_df.loc[idx, "serialNumber"],
)
cell = PyntCloud.from_file(cell_img_path + ".ply")
nuc = PyntCloud.from_file(nuc_img_path + ".ply")
cell = torch.tensor(cell.points.values)
nuc = torch.tensor(nuc.points.values)
full = torch.tensor(np.concatenate((cell[:1024], nuc[:1024])))
mean = torch.mean(full, 0)
std = torch.tensor([[20.0, 20.0, 20.0]])
image = (full - mean) / std
# return encoded label as tensor
label = self.new_df.loc[idx, "label_col_enc"]
label = torch.tensor(label)
# return the classical features as torch tensor
feats = self.new_df.iloc[idx, 16:-4]
feats = torch.tensor(feats)
return image, label, feats
class PointCloudDatasetAllBothNotSpec(Dataset):
def __init__(
self,
annotations_file,
img_dir,
img_size=400,
label_col="Treatment",
transform=None,
target_transform=None,
centring_only=False,
cell_component="cell",
proximal=1,
):
self.annot_df = pd.read_csv(annotations_file)
self.img_dir = img_dir
self.img_size = img_size
self.label_col = label_col
self.transform = transform
self.target_transform = target_transform
self.cell_component = cell_component
self.proximal = proximal
self.new_df = self.annot_df[
(self.annot_df.xDim <= self.img_size)
& (self.annot_df.yDim <= self.img_size)
& (self.annot_df.zDim <= self.img_size)
].reset_index(drop=True)
# encode label
le = LabelEncoder()
label_col_enc = self.new_df.loc[:, self.label_col]
label_col_enc = le.fit_transform(label_col_enc)
self.new_df["label_col_enc"] = label_col_enc
def __len__(self):
return len(self.new_df)
def __getitem__(self, idx):
# read the image
treatment = self.new_df.loc[idx, "Treatment"]
plate_num = "Plate" + str(self.new_df.loc[idx, "PlateNumber"])
cell_path = "stacked_pointcloud"
nuc_path = "stacked_pointcloud_nucleus"
cell_img_path = os.path.join(
self.img_dir,
plate_num,
cell_path,
treatment,
self.new_df.loc[idx, "serialNumber"],
)
nuc_img_path = os.path.join(
self.img_dir,
plate_num,
nuc_path,
treatment,
self.new_df.loc[idx, "serialNumber"],
)
cell = PyntCloud.from_file(cell_img_path + ".ply")
nuc = PyntCloud.from_file(nuc_img_path + ".ply")
cell = torch.tensor(cell.points.values)
nuc = torch.tensor(nuc.points.values)
full = torch.tensor(np.concatenate((cell[:1024], nuc[:1024])))
mean = torch.mean(full, 0)
std = torch.tensor([[20.0, 20.0, 20.0]])
image = (full - mean) / std
# return encoded label as tensor
label = self.new_df.loc[idx, "label_col_enc"]
label = torch.tensor(label)
# return the classical features as torch tensor
feats = self.new_df.iloc[idx, 16:-4]
feats = torch.tensor(feats)
serial_number = self.new_df.loc[idx, "serialNumber"]
return image, label, serial_number
class PointCloudDatasetAllBothNotSpec1024(Dataset):
def __init__(
self,
annotations_file,
img_dir,
img_size=400,
label_col="Treatment",
transform=None,
target_transform=None,
centring_only=True,
cell_component="cell",
proximal=1,
):
self.annot_df = pd.read_csv(annotations_file)
self.img_dir = img_dir
self.img_size = img_size
self.label_col = label_col
self.transform = transform
self.target_transform = target_transform
self.cell_component = cell_component
self.proximal = proximal
self.new_df = self.annot_df[
(self.annot_df.xDim <= self.img_size)
& (self.annot_df.yDim <= self.img_size)
& (self.annot_df.zDim <= self.img_size)
].reset_index(drop=True)
# encode label
le = LabelEncoder()
label_col_enc = self.new_df.loc[:, self.label_col]
label_col_enc = le.fit_transform(label_col_enc)
self.new_df["label_col_enc"] = label_col_enc
def __len__(self):
return len(self.new_df)
def __getitem__(self, idx):
# read the image
treatment = self.new_df.loc[idx, "Treatment"]
plate_num = "Plate" + str(self.new_df.loc[idx, "PlateNumber"])
cell_path = "stacked_pointcloud"
nuc_path = "stacked_pointcloud_nucleus"
cell_img_path = os.path.join(
self.img_dir,
plate_num,
cell_path,
treatment,
self.new_df.loc[idx, "serialNumber"],
)
nuc_img_path = os.path.join(
self.img_dir,
plate_num,
nuc_path,
treatment,
self.new_df.loc[idx, "serialNumber"],
)
cell = PyntCloud.from_file(cell_img_path + ".ply")
nuc = PyntCloud.from_file(nuc_img_path + ".ply")
cell = torch.tensor(cell.points.values)
nuc = torch.tensor(nuc.points.values)
full = torch.tensor(np.concatenate((cell[:512], nuc[:512])))
mean = torch.mean(full, 0)
std = torch.tensor([[20.0, 20.0, 20.0]])
image = (full - mean) / std
# return encoded label as tensor
label = self.new_df.loc[idx, "label_col_enc"]
label = torch.tensor(label)
# return the classical features as torch tensor
feats = self.new_df.iloc[idx, 16:-4]
feats = torch.tensor(feats)
serial_number = self.new_df.loc[idx, "serialNumber"]
return image, label, serial_number
class PointCloudDatasetAll1024(Dataset):
def __init__(
self,
annotations_file,
img_dir,
img_size=400,
label_col="Treatment",
transform=None,
target_transform=None,
centring_only=True,
cell_component="cell",
):
self.annot_df = pd.read_csv(annotations_file)
self.img_dir = img_dir
self.img_size = img_size
self.label_col = label_col
self.transform = transform
self.target_transform = target_transform
self.centring_only = centring_only
self.cell_component = cell_component
self.new_df = self.annot_df[
(self.annot_df.xDim <= self.img_size)
& (self.annot_df.yDim <= self.img_size)
& (self.annot_df.zDim <= self.img_size)
].reset_index(drop=True)
# encode label
le = LabelEncoder()
label_col_enc = self.new_df.loc[:, self.label_col]
label_col_enc = le.fit_transform(label_col_enc)
self.new_df["label_col_enc"] = label_col_enc
def __len__(self):
return len(self.new_df)
def __getitem__(self, idx):
# read the image
treatment = self.new_df.loc[idx, "Treatment"]
plate_num = "Plate" + str(self.new_df.loc[idx, "PlateNumber"])
if self.cell_component == "cell":
component_path = "stacked_pointcloud"
else:
component_path = "stacked_pointcloud_nucleus"
img_path = os.path.join(
self.img_dir,
plate_num,
component_path,
treatment,
self.new_df.loc[idx, "serialNumber"],
)
image = PyntCloud.from_file(img_path + ".ply")
image = torch.tensor(image.points.values)
# TODO: take away after testing
if self.centring_only:
image = image[:1024]
mean = torch.mean(image, 0)
std = torch.tensor([[20.0, 20.0, 20.0]])
image = (image - mean) / std
# TODO: _____________________________________________
# return encoded label as tensor
label = self.new_df.loc[idx, "label_col_enc"]
label = torch.tensor(label)
# return the classical features as torch tensor
feats = self.new_df.iloc[idx, 16:-4]
feats = torch.tensor(feats)
serial_number = self.new_df.loc[idx, "serialNumber"]
return image, label, serial_number
class PointCloudDatasetAllRotation1024(Dataset):
def __init__(
self,
annotations_file,
img_dir,
img_size=400,
label_col="Treatment",
transform=None,
target_transform=None,
centring_only=True,
cell_component="cell",
):
self.annot_df = pd.read_csv(annotations_file)
self.img_dir = img_dir
self.img_size = img_size
self.label_col = label_col
self.transform = transform
self.target_transform = target_transform
self.centring_only = centring_only
self.cell_component = cell_component
self.new_df = self.annot_df[
(self.annot_df.xDim <= self.img_size)
& (self.annot_df.yDim <= self.img_size)
& (self.annot_df.zDim <= self.img_size)
].reset_index(drop=True)
# encode label
le = LabelEncoder()
label_col_enc = self.new_df.loc[:, self.label_col]
label_col_enc = le.fit_transform(label_col_enc)
self.new_df["label_col_enc"] = label_col_enc
def __len__(self):
return len(self.new_df)
def __getitem__(self, idx):
# read the image
treatment = self.new_df.loc[idx, "Treatment"]
plate_num = "Plate" + str(self.new_df.loc[idx, "PlateNumber"])
if self.cell_component == "cell":
component_path = "stacked_pointcloud"
else:
component_path = "stacked_pointcloud_nucleus"
img_path = os.path.join(
self.img_dir,
plate_num,
component_path,
treatment,
self.new_df.loc[idx, "serialNumber"],
)
image = PyntCloud.from_file(img_path + ".ply")
image = torch.tensor(image.points.values)
# TODO: take away after testing
image = image[:1024]
mean = torch.mean(image, 0)
std = torch.tensor([[20.0, 20.0, 20.0]])
image = (image - mean) / std
rotated_image, angles = three_d_rotation(image)
# TODO: _____________________________________________
# return encoded label as tensor
label = self.new_df.loc[idx, "label_col_enc"]
label = torch.tensor(label)
# return the classical features as torch tensor
feats = self.new_df.iloc[idx, 16:-4]
feats = torch.tensor(feats)
serial_number = self.new_df.loc[idx, "serialNumber"]
return image, rotated_image, angles, serial_number
class PointCloudDatasetAllBothNotSpecRotation(Dataset):
def __init__(
self,
annotations_file,
img_dir,
img_size=400,
label_col="Treatment",
transform=None,
target_transform=None,
centring_only=False,
cell_component="cell",
proximal=1,
):
self.annot_df = pd.read_csv(annotations_file)
self.img_dir = img_dir
self.img_size = img_size
self.label_col = label_col
self.transform = transform
self.target_transform = target_transform
self.cell_component = cell_component
self.proximal = proximal
self.new_df = self.annot_df[
(self.annot_df.xDim <= self.img_size)
& (self.annot_df.yDim <= self.img_size)
& (self.annot_df.zDim <= self.img_size)
].reset_index(drop=True)
# encode label
le = LabelEncoder()
label_col_enc = self.new_df.loc[:, self.label_col]
label_col_enc = le.fit_transform(label_col_enc)
self.new_df["label_col_enc"] = label_col_enc
def __len__(self):
return len(self.new_df)
def __getitem__(self, idx):
# read the image
treatment = self.new_df.loc[idx, "Treatment"]
plate_num = "Plate" + str(self.new_df.loc[idx, "PlateNumber"])
cell_path = "stacked_pointcloud"
nuc_path = "stacked_pointcloud_nucleus"
cell_img_path = os.path.join(
self.img_dir,
plate_num,
cell_path,
treatment,
self.new_df.loc[idx, "serialNumber"],
)
nuc_img_path = os.path.join(
self.img_dir,
plate_num,
nuc_path,
treatment,
self.new_df.loc[idx, "serialNumber"],
)
cell = PyntCloud.from_file(cell_img_path + ".ply")
nuc = PyntCloud.from_file(nuc_img_path + ".ply")
cell = torch.tensor(cell.points.values)
nuc = torch.tensor(nuc.points.values)
full = torch.tensor(np.concatenate((cell[:1024], nuc[:1024])))
mean = torch.mean(full, 0)
std = torch.tensor([[20.0, 20.0, 20.0]])
image = (full - mean) / std
rotated_image, angles = three_d_rotation(image.numpy())
rotated_image = torch.tensor(rotated_image)
angles = torch.tensor(angles)
# TODO: _____________________________________________
# return encoded label as tensor
label = self.new_df.loc[idx, "label_col_enc"]
label = torch.tensor(label)
# return the classical features as torch tensor
feats = self.new_df.iloc[idx, 16:-4]
feats = torch.tensor(feats)
serial_number = self.new_df.loc[idx, "serialNumber"]
return image, rotated_image, angles, serial_number
class PointCloudDatasetAllBothNotSpecRotation1024(Dataset):
def __init__(
self,
annotations_file,
img_dir,
img_size=400,
label_col="Treatment",
transform=None,
target_transform=None,
centring_only=True,
cell_component="cell",
proximal=1,
):
self.annot_df = pd.read_csv(annotations_file)
self.img_dir = img_dir
self.img_size = img_size
self.label_col = label_col
self.transform = transform
self.target_transform = target_transform
self.cell_component = cell_component
self.proximal = proximal
self.new_df = self.annot_df[
(self.annot_df.xDim <= self.img_size)
& (self.annot_df.yDim <= self.img_size)
& (self.annot_df.zDim <= self.img_size)
].reset_index(drop=True)
# encode label
le = LabelEncoder()
label_col_enc = self.new_df.loc[:, self.label_col]
label_col_enc = le.fit_transform(label_col_enc)
self.new_df["label_col_enc"] = label_col_enc
def __len__(self):
return len(self.new_df)
def __getitem__(self, idx):
# read the image
treatment = self.new_df.loc[idx, "Treatment"]
plate_num = "Plate" + str(self.new_df.loc[idx, "PlateNumber"])
cell_path = "stacked_pointcloud"
nuc_path = "stacked_pointcloud_nucleus"
cell_img_path = os.path.join(
self.img_dir,
plate_num,
cell_path,
treatment,
self.new_df.loc[idx, "serialNumber"],
)
nuc_img_path = os.path.join(
self.img_dir,
plate_num,
nuc_path,
treatment,
self.new_df.loc[idx, "serialNumber"],
)
cell = PyntCloud.from_file(cell_img_path + ".ply")
nuc = PyntCloud.from_file(nuc_img_path + ".ply")
cell = torch.tensor(cell.points.values)
nuc = torch.tensor(nuc.points.values)
full = torch.tensor(np.concatenate((cell[:512], nuc[:512])))
mean = torch.mean(full, 0)
std = torch.tensor([[20.0, 20.0, 20.0]])
image = (full - mean) / std
rotated_image, angles = three_d_rotation(image.numpy())
rotated_image = torch.tensor(rotated_image)
angles = torch.tensor(angles)
# TODO: _____________________________________________
# return encoded label as tensor
label = self.new_df.loc[idx, "label_col_enc"]
label = torch.tensor(label)
# return the classical features as torch tensor
feats = self.new_df.iloc[idx, 16:-4]
feats = torch.tensor(feats)
serial_number = self.new_df.loc[idx, "serialNumber"]
return image, rotated_image, angles, serial_number
class PointCloudDatasetAllBothNotSpec2DRotation1024(Dataset):
def __init__(
self,
annotations_file,
img_dir,
img_size=400,
label_col="Treatment",
transform=None,
target_transform=None,
centring_only=True,
cell_component="cell",
proximal=1,
):
self.annot_df = pd.read_csv(annotations_file)
self.img_dir = img_dir
self.img_size = img_size
self.label_col = label_col
self.transform = transform
self.target_transform = target_transform
self.cell_component = cell_component
self.proximal = proximal
self.new_df = self.annot_df[
(self.annot_df.xDim <= self.img_size)
& (self.annot_df.yDim <= self.img_size)
& (self.annot_df.zDim <= self.img_size)
].reset_index(drop=True)
# encode label
le = LabelEncoder()
label_col_enc = self.new_df.loc[:, self.label_col]
label_col_enc = le.fit_transform(label_col_enc)
self.new_df["label_col_enc"] = label_col_enc
def __len__(self):
return len(self.new_df)
def __getitem__(self, idx):
# read the image
treatment = self.new_df.loc[idx, "Treatment"]
plate_num = "Plate" + str(self.new_df.loc[idx, "PlateNumber"])
cell_path = "stacked_pointcloud"
nuc_path = "stacked_pointcloud_nucleus"
cell_img_path = os.path.join(
self.img_dir,
plate_num,
cell_path,
treatment,
self.new_df.loc[idx, "serialNumber"],
)
nuc_img_path = os.path.join(
self.img_dir,
plate_num,
nuc_path,
treatment,
self.new_df.loc[idx, "serialNumber"],
)
cell = PyntCloud.from_file(cell_img_path + ".ply")
nuc = PyntCloud.from_file(nuc_img_path + ".ply")
cell = torch.tensor(cell.points.values)
nuc = torch.tensor(nuc.points.values)
full = torch.tensor(np.concatenate((cell[:512], nuc[:512])))
mean = torch.mean(full, 0)
std = torch.tensor([[20.0, 20.0, 20.0]])
image = (full - mean) / std
rotated_image, angles = rotate_pointcloud(image.numpy())
rotated_image = torch.tensor(rotated_image)
angles = torch.tensor(angles)
# TODO: _____________________________________________
# return encoded label as tensor
label = self.new_df.loc[idx, "label_col_enc"]
label = torch.tensor(label)
# return the classical features as torch tensor
feats = self.new_df.iloc[idx, 16:-4]
feats = torch.tensor(feats)
serial_number = self.new_df.loc[idx, "serialNumber"]
return image, rotated_image, angles, serial_number
class PointCloudDatasetAllBothKLDivergranceRotation1024(Dataset):
def __init__(
self,
annotations_file,
img_dir,
img_size=400,
label_col="Treatment",
transform=None,
target_transform=None,
centring_only=True,
cell_component="cell",
proximal=1,
rotation_matrices=generate_24_rotations(),
):
self.annot_df = pd.read_csv(annotations_file)
self.img_dir = img_dir
self.img_size = img_size
self.label_col = label_col
self.transform = transform
self.target_transform = target_transform
self.cell_component = cell_component
self.proximal = proximal
self.rotation_matrices = rotation_matrices
self.new_df = self.annot_df[
(self.annot_df.xDim <= self.img_size)
& (self.annot_df.yDim <= self.img_size)
& (self.annot_df.zDim <= self.img_size)
].reset_index(drop=True)
# encode label
le = LabelEncoder()
label_col_enc = self.new_df.loc[:, self.label_col]
label_col_enc = le.fit_transform(label_col_enc)
self.new_df["label_col_enc"] = label_col_enc
def __len__(self):
return len(self.new_df)
def __getitem__(self, idx):
# read the image
treatment = self.new_df.loc[idx, "Treatment"]
plate_num = "Plate" + str(self.new_df.loc[idx, "PlateNumber"])
cell_path = "stacked_pointcloud"
nuc_path = "stacked_pointcloud_nucleus"
cell_img_path = os.path.join(
self.img_dir,
plate_num,
cell_path,
treatment,
self.new_df.loc[idx, "serialNumber"],
)
nuc_img_path = os.path.join(
self.img_dir,
plate_num,
nuc_path,
treatment,
self.new_df.loc[idx, "serialNumber"],
)
cell = PyntCloud.from_file(cell_img_path + ".ply")
nuc = PyntCloud.from_file(nuc_img_path + ".ply")
cell = torch.tensor(cell.points.values)
nuc = torch.tensor(nuc.points.values)
full = torch.tensor( | np.concatenate((cell[:512], nuc[:512])) | numpy.concatenate |
##############################################################################
#
# Copyright (c) 2003-2018 by The University of Queensland
# http://www.uq.edu.au
#
# Primary Business: Queensland, Australia
# Licensed under the Apache License, version 2.0
# http://www.apache.org/licenses/LICENSE-2.0
#
# Development until 2012 by Earth Systems Science Computational Center (ESSCC)
# Development 2012-2013 by School of Earth Sciences
# Development from 2014 by Centre for Geoscience Computing (GeoComp)
#
##############################################################################
from __future__ import print_function, division
__copyright__="""Copyright (c) 2003-2018 by The University of Queensland
http://www.uq.edu.au
Primary Business: Queensland, Australia"""
__license__="""Licensed under the Apache License, version 2.0
http://www.apache.org/licenses/LICENSE-2.0"""
__url__="https://launchpad.net/escript-finley"
"""
test for util operations for unary operations without tagged data
:remark: use see `test_util`
:var __author__: name of author
:var __copyright__: copyrights
:var __license__: licence agreement
:var __url__: url entry point on documentation
:var __version__: version
:var __date__: date of the version
"""
__author__="<NAME>, <EMAIL>"
import esys.escriptcore.utestselect as unittest
import numpy
from esys.escript import *
from test_util_base import Test_util_base
haveLapack = hasFeature('lapack')
class Test_util_unary_with_tagged_data(Test_util_base):
"""
test for unary operations. only tagged data are tested.
"""
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_log10_taggedData_rank0(self):
arg=Data(55.1652630602,self.functionspace)
arg.setTaggedValue(1,10.5828519405)
res=log10(arg)
ref=Data(1.74166569349,self.functionspace)
ref.setTaggedValue(1,1.02460272017)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_log10_taggedData_rank1(self):
arg=Data(numpy.array([72.282406932431243, 35.729324148665768]),self.functionspace)
arg.setTaggedValue(1,numpy.array([23.804409029158563, 82.472345107833661]))
res=log10(arg)
ref=Data(numpy.array([1.8590326057050119, 1.5530248012211607]),self.functionspace)
ref.setTaggedValue(1,numpy.array([1.3766574041024322, 1.916308343937587]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_log10_taggedData_rank2(self):
arg=Data(numpy.array([[51.453194171526192, 13.570707659190413, 60.521549437302234, 31.599538694833306,
14.891175253445139], [98.173449179193497, 5.0087678798438278, 52.481382734493792, 29.128158011918146, 98.064075237764598],
[36.407550507350827, 89.884167676960288, 39.308304837547745, 75.538185852569995, 33.601340111371606], [63.889377928887228,
4.6186118848356488, 69.136277385337451, 2.6710200091532696, 63.918258275478514]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[3.7796743359003022, 13.180984629177244, 59.461828020927761, 20.406986279454642,
72.171015597628937], [1.9587215571356407, 17.280986015551932, 84.05693756007831, 45.547880329201142, 32.870521541704392],
[26.737813837799116, 37.531132826532321, 51.180765330858762, 78.2056706013324, 52.489986316281318], [63.141114740929247,
23.684128984789403, 81.88613234855724, 36.918777925154153, 19.245705222936365]]))
res=log10(arg)
ref=Data(numpy.array([[1.7114123405757837, 1.1326024950044853, 1.7819100380467305, 1.4996807426262928,
1.1729289748471519], [1.9919940495580575, 0.69973090574844821, 1.7200052689371468, 1.464313021855508, 1.991509937212202],
[1.5611914606717532, 1.9536832012319965, 1.5944843153649138, 1.8781665504719267, 1.5263565985615033], [1.8054286595616771,
0.66451146877260103, 1.8397059914266514, 0.4266771414217112, 1.8056249322687863]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[0.57745438179108843, 1.1199478535847189, 1.7742382564867842, 1.309778872484638,
1.8583628167754329], [0.29197270307270162, 1.2375685187503027, 1.9245735633402872, 1.6584681709302851, 1.516806594890489],
[1.4271258951849872, 1.5743916732030367, 1.7091067758161855, 1.8932382443844189, 1.7200764595510651], [1.8003122449866822,
1.3744574176029094, 1.9132103589710923, 1.5672473166998622, 1.2843338296326507]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_log10_taggedData_rank3(self):
arg=Data(numpy.array([[[90.690037899671296, 23.785411821546496], [91.533710749831201, 35.110847145935921]],
[[31.790987742717761, 80.73086172262245], [76.879402834581697, 85.744103669605451]], [[8.1269631331611549, 52.871037837294452],
[9.1059277056430368, 62.81826904111756]], [[91.343888112038101, 42.790045009836057], [98.923113540366373, 76.508459842843422]],
[[75.511345908209677, 53.05029465716877], [21.756665086458423, 84.34627478690912]], [[20.881744462723443, 21.498514780242811],
[94.039541086706947, 63.040386118170531]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[49.039591873498914, 47.203472254904376], [13.39618662616329, 80.407751980277737]],
[[94.740716086709412, 71.680553195598819], [22.063626045323556, 78.003840393051846]], [[32.738459746566498,
34.410070237534583], [90.599810283330726, 38.951280733947229]], [[97.583590849819274, 21.088714492740912], [60.799357198434329,
9.2997265651169414]], [[92.165943907973187, 12.913360305242884], [89.764291870306224, 11.704176719145334]],
[[33.563051881776232, 10.411945777444954], [23.411376390403166, 48.768282109713994]]]))
res=log10(arg)
ref=Data(numpy.array([[[1.9575595833906394, 1.3763106752125882], [1.9615810688900812, 1.5454413081428469]],
[[1.50230402160926, 1.9070395881750126], [1.885810001264896, 1.9332042647871273]], [[0.90992828964073624, 1.7232178355155006],
[0.95932419800764912, 1.7980859652326844]], [[1.9606794936916605, 1.6313427433082643], [1.9952977770394453,
1.8837094594664194]], [[1.8780122111672353, 1.7246878004452211], [1.3375923264330749, 1.9260659064246666]],
[[1.3197667768440355, 1.3324084578214161], [1.9733105010994656, 1.7996188645144506]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[1.6905468471583012, 1.6739739462120611], [1.1269811891924277, 1.9052979203898317]],
[[1.9765366629687919, 1.855401348340298], [1.3436768880708625, 1.8921159850039784]], [[1.5150582432096382, 1.5366855590999351],
[1.9571272882619251, 1.59052174202249]], [[1.9893767950999692, 1.3240501072063162], [1.7838989877160136, 0.96847017941343561]],
[[1.964570475228363, 1.1110392687676565], [1.9531036091864897, 1.0683408704968049]], [[1.5258614442649203, 1.0175318977566594],
[1.3694269472799274, 1.6881374575925516]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_log10_taggedData_rank4(self):
arg=Data(numpy.array([[[[90.916803177883821, 54.525338275396493, 13.364424356447465, 51.314413977784419],
[80.231561588327807, 82.162456273997748, 8.3566662162590539, 94.730819165987], [77.473176646131762, 20.972609574388059,
74.896335161378119, 88.10387415596874]], [[11.595133878605829, 29.493110939671826, 31.490679142790391, 39.161104201178077],
[32.61379799879397, 58.334287443171277, 88.246072805422187, 35.746693154753984], [88.031724708015759, 42.086782575753446,
76.30576562684233, 41.664454917294485]]], [[[73.805732338880929, 29.722157924518495, 11.979308129040467, 56.678829139293917],
[6.1110346378486105, 61.420099159473246, 24.460988572874975, 9.9837108208795708], [29.304214355701266, 69.239538294798919,
43.726703031386528, 96.453481611027584]], [[83.748022272324235, 32.953465755838039, 34.11675054427031, 16.642877884588994],
[64.574790966313543, 42.938611636354324, 46.810954363884647, 91.97971646326387], [96.485547539718311, 42.855584051837369,
73.227470310618529, 73.565844556183777]]], [[[88.201355962594207, 41.836289548798113, 69.197678273827108, 31.32522051118902],
[44.933739003053383, 82.304262181531868, 46.662125485783939, 25.216812874514684], [37.715123702749331, 0.33654002188789439,
77.616411403471773, 19.152072401340583]], [[92.715182555824981, 51.479018971675195, 58.389552448640487, 11.079825716836668],
[66.120381536086015, 54.696122559623113, 74.602124135737157, 46.764404847359458], [92.441508878592927, 49.13843332363826,
84.277334637691311, 61.375020008040991]]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[[62.928329908936867, 78.142232594489769, 23.694870846158736, 77.379623356172573],
[11.343774294284144, 5.0141456599208922, 65.791042346980248, 72.904521530203226], [46.165649854154751, 46.031503262450066,
64.12361229840144, 51.813579296266198]], [[22.679300826571335, 62.977332064943198, 13.771125130940399, 59.844651806488763],
[14.177003870203592, 30.872939480711043, 76.89257820864357, 21.359624412764553], [64.357528521726167, 45.754541308463061,
86.917154454162898, 62.525134832715636]]], [[[75.962390497323355, 70.145694672660483, 76.932538896196164, 61.719435975622567],
[77.812655042655194, 15.285729007526603, 50.390239206343267, 62.704163646191077], [49.67778501460851, 46.415926037609047,
56.588556029575471, 27.934863117344474]], [[76.060984285811514, 81.295553924710816, 69.918265989518105, 83.458206572989525],
[63.469111974419398, 69.954750106734039, 31.380947651740421, 19.198733624736676], [64.480248540295207, 13.727409292553201,
31.845984674993723, 65.803516596296177]]], [[[99.122756107881074, 86.808131124216828, 1.4321294301423275, 8.3438957972984138],
[34.503440819741336, 35.67099265092223, 48.831668912254365, 14.139212054299726], [98.020513665211695, 25.954655701381547,
1.3758192696653002, 95.607029783574006]], [[49.7055983523964, 12.62977930442664, 26.742962982817151, 83.708869974268808],
[40.504846807543508, 68.747127993174473, 99.752608339104768, 95.244092191429729], [53.238233591188212, 34.920347644790411,
10.5293904374393, 9.2580418923770118]]]]))
res=log10(arg)
ref=Data(numpy.array([[[[1.958644156632638, 1.7365983686236373, 1.1259502572038609, 1.7102393735278381],
[1.9043452451825991, 1.9146734141614923, 0.92203305583094985, 1.9764912927969005], [1.8891513637839643, 1.3216524721079508,
1.8744605672571355, 1.9449950058872605]], [[1.0642757674116168, 1.4697205844431256, 1.4981820269365049, 1.5928549288337999],
[1.5134013769682526, 1.7659238973023306, 1.9456953871829765, 1.5532358724009305], [1.9446392105900805, 1.6241457263418644,
1.8825573542762462, 1.6197657044633726]]], [[[1.8680900938889471, 1.4730803374132002, 1.0784317358816056, 1.7534208700934903],
[0.78611474534376435, 1.7883105132528869, 1.3884740047472364, 0.99929199314012029], [1.4669300824636733, 1.8403541632373015,
1.640746733021623, 1.9843179086742269]], [[1.9229745598717729, 1.5179010966078323, 1.5329676600131747, 1.2212284266018456],
[1.8100630090021725, 1.6328479979852315, 1.670347495447966, 1.9636920664836188], [1.9844622657395339, 1.6320074182054687,
1.8646740314178081, 1.8666762251564302]]], [[[1.9454752618046733, 1.6215531600863007, 1.8400915232171524, 1.4958941368555099],
[1.652572558384511, 1.9154223260262504, 1.6689645172636371, 1.4016901956994581], [1.5765155365073076, -0.47296328136118848,
1.8899535592530265, 1.2822157748461316]], [[1.9671508577724528, 1.7116302620231869, 1.766335146430758, 1.0445329290917416],
[1.8203353509269165, 1.7379565400327668, 1.8727511932541194, 1.6699154116173478], [1.9658670256463047, 1.6914213057918996,
1.9257107921182273, 1.787991646709524]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[1.7988462059099257, 1.8928858152534429, 1.3746543458291129, 1.8886266114703387],
[1.0547575768197655, 0.70019694597262339, 1.8181667671078665, 1.8627544640480471], [1.6643189533177833, 1.6630551579903574,
1.8070179796377215, 1.7144435945053167]], [[1.355629661691661, 1.7991842584914508, 1.1389694245123259, 1.7770253444715181],
[1.1515844579186774, 1.4895779815869408, 1.8858844230448231, 1.329593611797627], [1.808599357832434, 1.660434205884993,
1.9391054996884767, 1.7960546369405848]]], [[[1.8805986233038012, 1.8460010205206747, 1.886110065083187, 1.7904219485998918],
[1.8910502341136941, 1.1842861559437698, 1.7023464199899885, 1.7972963795632348], [1.6961622236947298, 1.6666670194238626,
1.7527286121825749, 1.4461465476300437]], [[1.8811619412061271, 1.9100667945637591, 1.8445906490239146, 1.9214690479550947],
[1.8025624219423253, 1.8448172095369506, 1.4966660544104895, 1.2832725829781746], [1.8094267027962698, 1.1375885826765326,
1.5030546817757668, 1.8182491033032764]]], [[[1.9961733690924155, 1.9385604064782116, 0.15598226954758712,
0.92136887176784188], [1.5378624067983235, 1.5523151950355665, 1.6887015673518488, 1.1504252079046915], [1.9913169740527836,
1.4142152721675985, 0.1385613878659461, 1.9804898262088639]], [[1.6964053061705062, 1.1013957616632397, 1.4272095231877091,
1.9227714792212125], [1.6075069939758382, 1.8372545596256498, 1.9989242604737723, 1.9788380467284004], [1.7262236374426407,
1.5430785586149354, 1.0224032299396042, 0.96651914162145247]]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_wherePositive_taggedData_rank0(self):
arg=Data(-77.2124777804,self.functionspace)
arg.setTaggedValue(1,-76.5223591123)
res=wherePositive(arg)
ref=Data(0.0,self.functionspace)
ref.setTaggedValue(1,0.0)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_wherePositive_taggedData_rank1(self):
arg=Data(numpy.array([95.507404522977254, 85.699228977736311]),self.functionspace)
arg.setTaggedValue(1,numpy.array([22.570768490261898, -91.124851922506281]))
res=wherePositive(arg)
ref=Data(numpy.array([1.0, 1.0]),self.functionspace)
ref.setTaggedValue(1,numpy.array([1.0, 0.0]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_wherePositive_taggedData_rank2(self):
arg=Data(numpy.array([[-76.258485344070493, -13.031569421642786, 56.331314317015853, 76.297860126066155,
88.130561052243763], [-6.1668191468469757, 54.331037054715466, -81.929096086646751, 11.266746882647325, 48.963064080280049],
[60.302120288359191, -98.222376211103324, 24.902263686516406, 76.321693298041907, -15.612529577369273], [-89.36373926383007,
-99.797095905565556, 55.669412249479365, 73.050408854136265, 58.641360635396893]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[84.521756116539905, -21.50523951337766, -71.758529828844189, -31.116527593639944,
73.480533241007663], [61.356497085608538, 87.01679183964643, 73.718163356212273, 65.825276106677222, -67.838266379557695],
[94.900878893099161, 6.6085152597015195, -16.431260589637816, -60.728182658412621, -20.252278299611689], [16.99540909074102,
-92.327824606679144, -84.194337061595093, -99.086577441520987, 1.4609814172980435]]))
res=wherePositive(arg)
ref=Data(numpy.array([[0.0, 0.0, 1.0, 1.0, 1.0], [0.0, 1.0, 0.0, 1.0, 1.0], [1.0, 0.0, 1.0, 1.0, 0.0], [0.0, 0.0, 1.0,
1.0, 1.0]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[1.0, 0.0, 0.0, 0.0, 1.0], [1.0, 1.0, 1.0, 1.0, 0.0], [1.0, 1.0, 0.0, 0.0, 0.0], [1.0,
0.0, 0.0, 0.0, 1.0]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_wherePositive_taggedData_rank3(self):
arg=Data(numpy.array([[[80.508473585546227, -73.527464319061067], [-63.562066924370612, -27.980541518495002]],
[[-41.335157425780203, -43.662467900732139], [19.157012696460683, -20.184083339906508]], [[35.870058595838856,
72.278036946039947], [75.339493834805268, -9.1707737241088836]], [[-68.38683588297539, -47.88605412318423],
[-20.399875642984753, -29.241844531878812]], [[-67.76044429517556, 55.107326245665774], [59.476906111528308,
65.132080499441145]], [[39.011636203343926, 68.793212772548998], [-5.2117301620619116,
-37.964739068093408]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[-43.212061592928961, 66.640597663557344], [-43.214739911125989, 71.228530019395464]],
[[86.72455453389685, 0.070521918497504998], [18.131949004592585, 67.642647170226724]], [[-20.485683043230935,
-76.185964145658346], [1.5005108312435596, 24.688848573063282]], [[86.368146458112335, 12.287053770624041],
[65.053528607732602, -40.176824870036555]], [[67.412368199122028, 93.02485737256805], [2.3354688446274565,
-77.333138418682523]], [[68.799317717343797, 50.656492146642165], [-11.239017823949453, -0.61920809407223487]]]))
res=wherePositive(arg)
ref=Data(numpy.array([[[1.0, 0.0], [0.0, 0.0]], [[0.0, 0.0], [1.0, 0.0]], [[1.0, 1.0], [1.0, 0.0]], [[0.0, 0.0], [0.0,
0.0]], [[0.0, 1.0], [1.0, 1.0]], [[1.0, 1.0], [0.0, 0.0]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[0.0, 1.0], [0.0, 1.0]], [[1.0, 1.0], [1.0, 1.0]], [[0.0, 0.0], [1.0, 1.0]], [[1.0,
1.0], [1.0, 0.0]], [[1.0, 1.0], [1.0, 0.0]], [[1.0, 1.0], [0.0, 0.0]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_wherePositive_taggedData_rank4(self):
arg=Data(numpy.array([[[[88.580956068056565, 14.00279382125251, 99.108011223826338, 45.511681652958828],
[82.854668978990787, -71.320284703817521, -47.231688078520051, -87.449857804724985], [93.257616694753921, 52.060412772871643,
18.537844019590111, 53.497901549669848]], [[-46.91724689666372, -75.312992998386903, 24.379946633037505, 3.6136809288494618],
[55.419075241203274, 64.655875544211057, 5.6929661553654682, -80.668713367017716], [9.6958327067133041, -69.61412534721569,
-39.445219790469352, 87.567956888590658]]], [[[-73.321457711307843, 10.82305253374048, -40.400284930212905,
-92.490874982007981], [-79.425928971727643, -75.222388693443605, 89.503284861115134, 83.502961391303643], [-88.448173270777147,
-50.193426055655976, -70.923108466792598, -25.976738197547292]], [[-8.5488119421924864, -1.9838167877165915,
-56.838230691876412, -35.192343099118673], [-14.387471763442306, -65.661449017261418, 75.22011478664038, -84.87320516882086],
[98.450531686197365, -81.019483890591289, -94.982842703436916, -49.156850403858819]]], [[[75.118284154717031,
-51.311615796136792, -89.182477325683962, 55.44041573353897], [-80.77776129565197, -34.097004704596088, 75.361574051712552,
-16.248241864062635], [84.169901667127789, 74.398090927221261, 86.145123497406473, 88.071402053067715]], [[93.648624647556488,
1.6348597078223719, 6.0244027607641755, 19.383999786570769], [-41.76041050584827, 10.228798270020405, -47.174639917060254,
-7.0250102695790275], [-48.870699185907625, -19.365332607009293, 51.663276846691986,
-68.319951789687167]]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[[43.072392507789715, 25.44897255914222, -92.365292140693199, -72.28679201432702],
[-35.283625661883562, 51.080784351734991, 94.294048609912153, -48.875639845246745], [54.267354274548921, -77.595839033594572,
13.255608540993677, -79.509755448949591]], [[76.273739615561396, -51.362336359893511, -85.195354636261797,
-4.4124416020654849], [-97.854643345235729, -10.568395289402361, -79.904773298276851, -37.753686446232606],
[64.250602682004057, -79.115735111510105, -32.503923559859047, 90.214123166503839]]], [[[39.304515721103343, 85.49840367909664,
60.966173887100808, 4.4734960523447711], [53.114809276601221, -14.423789459082229, -13.61152991089152, -96.486812903270419],
[-52.378017052068572, -0.16685024940963444, 2.2217407671002007, 2.7128133952607953]], [[58.208300545381121, -23.46308457904766,
-67.68416120310016, -35.150913017323049], [-18.407699905877124, 1.6451869874854879, -1.401899624666143, -87.412868064712512],
[-65.336170807327917, 68.755684784091613, 85.913136752325443, 27.997231935596872]]], [[[-66.686788600040472,
6.9245385685220668, -75.689596750307246, -73.922470171071836], [-56.830071118701973, -87.957208168819264, 15.670539647819766,
-25.0926801353923], [-9.3946841261667942, 81.217979881426032, 31.881116652908219, -94.330057102451676]], [[-13.101408221863963,
5.3815053309403993, -42.53780805955558, -33.796637768394746], [72.590706488145599, -33.171908847280093, 38.102432612245622,
-71.169285857339815], [-54.513514454446252, -15.087111212827736, 68.23763859582499, -73.63388136632733]]]]))
res=wherePositive(arg)
ref=Data(numpy.array([[[[1.0, 1.0, 1.0, 1.0], [1.0, 0.0, 0.0, 0.0], [1.0, 1.0, 1.0, 1.0]], [[0.0, 0.0, 1.0, 1.0], [1.0,
1.0, 1.0, 0.0], [1.0, 0.0, 0.0, 1.0]]], [[[0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]], [[0.0, 0.0, 0.0,
0.0], [0.0, 0.0, 1.0, 0.0], [1.0, 0.0, 0.0, 0.0]]], [[[1.0, 0.0, 0.0, 1.0], [0.0, 0.0, 1.0, 0.0], [1.0, 1.0, 1.0, 1.0]], [[1.0,
1.0, 1.0, 1.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[1.0, 1.0, 0.0, 0.0], [0.0, 1.0, 1.0, 0.0], [1.0, 0.0, 1.0, 0.0]], [[1.0, 0.0, 0.0,
0.0], [0.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 1.0]]], [[[1.0, 1.0, 1.0, 1.0], [1.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 1.0]], [[1.0,
0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 1.0, 1.0, 1.0]]], [[[0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0], [0.0, 1.0, 1.0,
0.0]], [[0.0, 1.0, 0.0, 0.0], [1.0, 0.0, 1.0, 0.0], [0.0, 0.0, 1.0, 0.0]]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_whereNegative_taggedData_rank0(self):
arg=Data(-19.2850342868,self.functionspace)
arg.setTaggedValue(1,-31.7600922133)
res=whereNegative(arg)
ref=Data(1.0,self.functionspace)
ref.setTaggedValue(1,1.0)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_whereNegative_taggedData_rank1(self):
arg=Data(numpy.array([-69.991852168228164, -51.135726516141467]),self.functionspace)
arg.setTaggedValue(1,numpy.array([64.371955068626278, 56.155825493201263]))
res=whereNegative(arg)
ref=Data(numpy.array([1.0, 1.0]),self.functionspace)
ref.setTaggedValue(1,numpy.array([0.0, 0.0]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_whereNegative_taggedData_rank2(self):
arg=Data(numpy.array([[48.844728607913282, -72.785354714899881, 53.3081372120038, 18.65599332913655,
-46.488345451249288], [-82.483069621758148, -33.022373579278181, -62.408982644197899, -30.801150776046654,
-3.1747181449523367], [68.051986644816708, -10.324492516248156, -35.538799676186628, -76.221649010357453, -10.365176815811154],
[12.925649512488647, -69.48406607854993, -14.171821915240514, 66.552057082826508, -98.385243996883332]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[63.457797271160132, 61.751200439630537, 75.390084871615102, -50.818227552321105,
64.870767673506009], [-37.606099936006679, -75.587774814583128, -30.927974509536099, -95.537626302784801, 85.46249790652584],
[-78.74170463945444, -96.246956122658901, 26.26558952019225, -96.296602166810459, 28.778665120929929], [13.299637195309444,
63.658102616485678, 86.796794951252622, 49.68308177081957, -86.280121323311391]]))
res=whereNegative(arg)
ref=Data(numpy.array([[0.0, 1.0, 0.0, 0.0, 1.0], [1.0, 1.0, 1.0, 1.0, 1.0], [0.0, 1.0, 1.0, 1.0, 1.0], [0.0, 1.0, 1.0,
0.0, 1.0]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[0.0, 0.0, 0.0, 1.0, 0.0], [1.0, 1.0, 1.0, 1.0, 0.0], [1.0, 1.0, 0.0, 1.0, 0.0], [0.0,
0.0, 0.0, 0.0, 1.0]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_whereNegative_taggedData_rank3(self):
arg=Data(numpy.array([[[69.353620509386616, -74.080076314847517], [-30.249703014852415, -95.672525613399003]],
[[-8.5039415761560377, 60.253313051648774], [-13.801342152251323, 40.764779434191979]], [[-36.581197219625516,
75.047667541458054], [-77.793778451165309, -72.594277712095419]], [[-72.619314204148793, -14.806208252588647],
[87.915581023315411, 95.105365322376201]], [[15.147306304672597, 14.666885700887903], [81.180471023319853,
85.165436080616928]], [[43.823915191016482, -49.98290658400564], [-72.588576349996117,
96.137982642309737]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[-1.2728722356096398, -60.492198430412984], [24.24667632089907, 27.091663987424013]],
[[-32.679381620480711, -97.47865445886741], [-56.075348674988426, 38.715773862053993]], [[16.009087713355214,
-68.066576558113326], [25.559656695696759, -9.5774290533191078]], [[-52.544021441893761, 47.869839568114628],
[-72.606586250159438, 18.849506685859737]], [[-73.113930006549779, 4.602906873284013], [-56.38605187693679,
-27.367675802071062]], [[70.16996004059547, 60.366327688828079], [15.101213546349101, 72.59226569598178]]]))
res=whereNegative(arg)
ref=Data(numpy.array([[[0.0, 1.0], [1.0, 1.0]], [[1.0, 0.0], [1.0, 0.0]], [[1.0, 0.0], [1.0, 1.0]], [[1.0, 1.0], [0.0,
0.0]], [[0.0, 0.0], [0.0, 0.0]], [[0.0, 1.0], [1.0, 0.0]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[1.0, 1.0], [0.0, 0.0]], [[1.0, 1.0], [1.0, 0.0]], [[0.0, 1.0], [0.0, 1.0]], [[1.0,
0.0], [1.0, 0.0]], [[1.0, 0.0], [1.0, 1.0]], [[0.0, 0.0], [0.0, 0.0]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_whereNegative_taggedData_rank4(self):
arg=Data(numpy.array([[[[78.567215316552364, 73.624001898494328, 6.6089006903017093, -16.066074308908668],
[4.6493463096686014, 90.440517625816341, -39.347037075742385, -33.297969929859519], [64.699949994741132, 29.115653652245214,
37.822018084896769, 40.128249957713678]], [[-55.450069727212096, 81.466719888892953, -14.393705514447504, 50.041870644315622],
[-26.112384871019117, 91.0126228352174, -89.730765921875076, 49.059538764459973], [38.483838119837088, -96.69931018125024,
20.572376725250095, -19.453405707808002]]], [[[13.464674311866403, -63.957052627899927, 27.769891567982711,
-33.550495063440906], [43.131655313012601, 4.7880717355257048, 11.709216606284343, -73.375330948322741], [24.471638138818889,
-70.587099709547374, 42.297065361106633, -34.039431318624949]], [[21.857294994809905, -19.704682667449276, -86.108666845333829,
-75.436492450552578], [87.94303965840291, 97.530458057774098, 25.97064557505557, -36.945527429857819], [90.911480668328323,
6.1671903724853223, 25.709485934911285, -21.355346056419705]]], [[[67.782998886788846, 70.917380141486149, 13.823579458254926,
18.402548374224679], [-15.060657679519679, 82.09963819729029, -31.92180428664399, -97.536671374116139], [37.669076925828392,
-75.58771930699335, 45.895049803831114, 35.48815045691137]], [[12.714019161158106, -57.944653564941675, 62.430334339808155,
-66.857496337271897], [-6.4429554795663648, -8.3994486590568158, -80.192945429058966, -93.438462560326158],
[34.875330751872951, 69.588212740586386, -70.503265404744013, 35.080768936736405]]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[[89.523458495532452, 6.8140581760945906, 52.278561982811112, 1.9103504285077975],
[-31.826956501731047, 58.297165172424911, 29.759024667767875, 0.92087730333993534], [28.957424646854918, 94.135111344573943,
-32.433995320712384, 67.081234380844705]], [[11.644557903097066, 56.050511369559786, -11.185754045196305, -94.014631510042364],
[-89.421458369162281, -27.806019206551923, 42.132488895560329, 37.020232240255524], [43.230885088291984, -83.189373937963836,
-74.094138681022528, -14.531760465098415]]], [[[-26.981360981714403, 24.064730821609444, -21.105581216059704,
-97.174757209589899], [33.300290491855606, 10.01590267931401, 51.489118545402135, -96.912506834915362], [47.653206939723475,
64.688747326811637, 94.943693671280016, 47.03846492475401]], [[-35.473632387755515, 72.503085095886973, 4.845984081191105,
64.852159504672017], [-19.964052254250646, 84.483169362896547, 73.78740822181058, 45.240727131786315], [-13.995221221821026,
-34.521569172453638, 98.500596615631622, 66.324330733855049]]], [[[-16.964585323232882, 26.406760086703088, 20.25984200782429,
-62.287754490513514], [-2.4701333556092777, -77.61548111631889, 86.671403323307715, 50.284535309177016], [-39.214050892482689,
-36.902295671557624, 26.750130444414737, 91.76317471624742]], [[50.615056318343221, -90.898178535525375, 94.958720223937036,
-93.80724680506188], [4.8266070012118405, 10.075720310299204, 42.099211642413536, 10.006938668548315], [55.032904164362009,
11.263981513981918, -63.130755368899848, 81.657868184177858]]]]))
res=whereNegative(arg)
ref=Data(numpy.array([[[[0.0, 0.0, 0.0, 1.0], [0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0]], [[1.0, 0.0, 1.0, 0.0], [1.0,
0.0, 1.0, 0.0], [0.0, 1.0, 0.0, 1.0]]], [[[0.0, 1.0, 0.0, 1.0], [0.0, 0.0, 0.0, 1.0], [0.0, 1.0, 0.0, 1.0]], [[0.0, 1.0, 1.0,
1.0], [0.0, 0.0, 0.0, 1.0], [0.0, 0.0, 0.0, 1.0]]], [[[0.0, 0.0, 0.0, 0.0], [1.0, 0.0, 1.0, 1.0], [0.0, 1.0, 0.0, 0.0]], [[0.0,
1.0, 0.0, 1.0], [1.0, 1.0, 1.0, 1.0], [0.0, 0.0, 1.0, 0.0]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[0.0, 0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0]], [[0.0, 0.0, 1.0,
1.0], [1.0, 1.0, 0.0, 0.0], [0.0, 1.0, 1.0, 1.0]]], [[[1.0, 0.0, 1.0, 1.0], [0.0, 0.0, 0.0, 1.0], [0.0, 0.0, 0.0, 0.0]], [[1.0,
0.0, 0.0, 0.0], [1.0, 0.0, 0.0, 0.0], [1.0, 1.0, 0.0, 0.0]]], [[[1.0, 0.0, 0.0, 1.0], [1.0, 1.0, 0.0, 0.0], [1.0, 1.0, 0.0,
0.0]], [[0.0, 1.0, 0.0, 1.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0]]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_whereNonNegative_taggedData_rank0(self):
arg=Data(-78.1544615646,self.functionspace)
arg.setTaggedValue(1,-78.4151851666)
res=whereNonNegative(arg)
ref=Data(0.0,self.functionspace)
ref.setTaggedValue(1,0.0)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_whereNonNegative_taggedData_rank1(self):
arg=Data(numpy.array([17.602013218893518, 29.547786128150307]),self.functionspace)
arg.setTaggedValue(1,numpy.array([0.26203337714731845, 57.479799350895149]))
res=whereNonNegative(arg)
ref=Data(numpy.array([1.0, 1.0]),self.functionspace)
ref.setTaggedValue(1,numpy.array([1.0, 1.0]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_whereNonNegative_taggedData_rank2(self):
arg=Data(numpy.array([[-0.38327384036398371, -34.645139490102878, 65.429233454558641, 95.235253228502785,
29.097950082051085], [-43.855663256862009, 55.686325731330783, -67.824366444401477, 16.702344987904212, 22.218041380401374],
[78.969508595512451, -60.305312026473089, -59.523292190062982, 74.808651981782504, 79.872897022513683], [63.606277951467064,
-76.462470884188775, -72.691576180524351, -49.079190521880697, 45.394053081951711]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[-55.552381227989599, -54.271211889675719, -12.582403003252466, 8.5917236679262601,
-22.455892824367908], [-16.776071759898258, -19.553013266124879, -21.813530512599172, 9.6881948240011582, 98.11892272389133],
[-4.3416722922198403, 38.725023582219706, 1.4757500981863529, -39.708613920267013, -80.375084634623164], [-99.616497105650254,
-57.007203450971453, 22.87724914844766, 83.97051539516184, 37.346070066579273]]))
res=whereNonNegative(arg)
ref=Data(numpy.array([[0.0, 0.0, 1.0, 1.0, 1.0], [0.0, 1.0, 0.0, 1.0, 1.0], [1.0, 0.0, 0.0, 1.0, 1.0], [1.0, 0.0, 0.0,
0.0, 1.0]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[0.0, 0.0, 0.0, 1.0, 0.0], [0.0, 0.0, 0.0, 1.0, 1.0], [0.0, 1.0, 1.0, 0.0, 0.0], [0.0,
0.0, 1.0, 1.0, 1.0]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_whereNonNegative_taggedData_rank3(self):
arg=Data(numpy.array([[[-13.892573110308774, 43.478804863362512], [-36.040457512639264, -51.981819940572649]],
[[-62.169201833468925, 57.986370401331953], [-4.1036429741114802, 48.023435568940073]], [[-80.645707040180412,
-10.141695439237907], [-4.8056301100563417, 48.75486098147897]], [[-91.963242822660888, 88.059478204857612],
[43.320911501208769, -22.30145015628095]], [[-86.786948436003428, 31.120205822215894], [34.433146395475489,
87.18740518030657]], [[-9.4220225035139435, -20.184163123649284], [-19.921535324926339,
25.857031424846014]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[41.114617505053531, 77.617269604848303], [-73.085362575419381, -7.1084361894678381]],
[[43.213365294039818, 70.52545134609511], [99.082934876352368, -17.971939602273878]], [[33.829613730905436,
30.404114402478598], [-57.246747638382956, 34.541916089376258]], [[42.317171529871842, -54.768491746554183],
[-23.879054879709557, -50.383761075240805]], [[-57.28165027876075, -45.225575620770144], [-31.710104697280144,
-75.917892701858989]], [[19.07744929226061, -71.495870306203571], [-10.602129940209977, 68.760350259599107]]]))
res=whereNonNegative(arg)
ref=Data(numpy.array([[[0.0, 1.0], [0.0, 0.0]], [[0.0, 1.0], [0.0, 1.0]], [[0.0, 0.0], [0.0, 1.0]], [[0.0, 1.0], [1.0,
0.0]], [[0.0, 1.0], [1.0, 1.0]], [[0.0, 0.0], [0.0, 1.0]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[1.0, 1.0], [0.0, 0.0]], [[1.0, 1.0], [1.0, 0.0]], [[1.0, 1.0], [0.0, 1.0]], [[1.0,
0.0], [0.0, 0.0]], [[0.0, 0.0], [0.0, 0.0]], [[1.0, 0.0], [0.0, 1.0]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_whereNonNegative_taggedData_rank4(self):
arg=Data(numpy.array([[[[31.064058622394185, -23.221839301023678, 68.763472697005085, 59.65832614821349],
[-40.149807242440751, 55.03145271535027, -64.4959578656334, -2.1518498706666946], [55.404104858000437, 49.396652662387567,
-49.624666695838648, -62.848812119970042]], [[-91.389556659029211, 72.332633311747458, -52.32546838722876,
-38.634885477981307], [-71.603348675002792, -2.4791274164933697, 24.130510966659983, -5.0112124293864042],
[-25.546226826848041, 27.236363562768304, -61.886267845384936, -60.880197873757623]]], [[[27.658384194987519,
84.510658257152215, -58.927887590834139, 49.108023674331434], [-70.267015166041148, -79.457401602686048, 56.127202277200126,
25.839278403805395], [26.875154742009613, 1.4487959347651866, -88.070139902975072, -38.510490284412093]],
[[-47.959868897350646, -5.7819206338316036, -56.800386597248306, -55.237835036677076], [28.516757929967042, 90.778719180512979,
15.739130130788766, -74.040382579111679], [-71.111296754698344, 12.107778742152561, -79.104526891456999,
-41.005699875297388]]], [[[18.567303332583634, -73.676314650547354, -97.749794073567415, 59.159591299795522],
[29.97705193558275, 90.413624368349787, 24.306766472883965, -69.797371947362393], [-39.289453466471549, -40.65110745107021,
-59.028684721855718, -20.270569577803272]], [[83.125187150431088, 27.563776588259614, -9.5176832989115212, -90.5959013499223],
[-69.524801083902418, -31.838028445081164, 14.626332705121882, 38.303372039757761], [-9.2077846008452156, -84.558594581494532,
-15.169395910120073, 39.522809676227837]]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[[-69.876802801564537, -5.9375511403534773, 37.812297768315801, 17.556587568915376],
[53.958312857432333, -19.376205062615014, -22.920706818379031, 68.341061958106707], [49.048432585399325, -25.760395137889574,
-82.684148984451667, 70.938532287692567]], [[79.604355123625282, 28.576654650134685, 50.589177240580227, 31.904425649228699],
[49.30738620973429, 22.581941862820869, 70.638562695750181, 65.557713647175206], [58.121816082877899, -69.657739078881974,
57.259334058428038, 37.031029061370617]]], [[[-57.067719073541355, -83.590170196214359, -87.889020994197423,
77.03678490031848], [23.421242774091994, -64.962452658702915, 43.744442519067377, 67.095949407897251], [-9.97401351270058,
-84.15959986185959, 46.118179052522692, -52.922820472672541]], [[52.186352698780212, -8.0958035404479176, -33.331663389002927,
-76.607138954123229], [-20.87488584894281, -63.126524249384097, 8.0428232453640902, 52.19000132579842], [-60.91173907515013,
18.081845081324616, -44.231668576405255, -37.550260961693603]]], [[[-27.309398591668639, -5.5219138202315321,
-87.956648017701525, 10.89423659338236], [32.139714674893639, -17.347998935818666, -41.884445570079933, -22.512510804223936],
[45.623599790055323, -34.102558427374177, 87.032277901218464, -25.231126136650801]], [[-82.687583433642246, 10.301272646701861,
-80.557394277641677, 58.389873199971959], [61.375478497215084, 78.589623746356949, -90.675956160020263, -73.180287451090507],
[-60.580572035442451, 60.154646880978504, 59.209979266176958, 79.32948990654927]]]]))
res=whereNonNegative(arg)
ref=Data(numpy.array([[[[1.0, 0.0, 1.0, 1.0], [0.0, 1.0, 0.0, 0.0], [1.0, 1.0, 0.0, 0.0]], [[0.0, 1.0, 0.0, 0.0], [0.0,
0.0, 1.0, 0.0], [0.0, 1.0, 0.0, 0.0]]], [[[1.0, 1.0, 0.0, 1.0], [0.0, 0.0, 1.0, 1.0], [1.0, 1.0, 0.0, 0.0]], [[0.0, 0.0, 0.0,
0.0], [1.0, 1.0, 1.0, 0.0], [0.0, 1.0, 0.0, 0.0]]], [[[1.0, 0.0, 0.0, 1.0], [1.0, 1.0, 1.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[1.0,
1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 1.0], [0.0, 0.0, 0.0, 1.0]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[0.0, 0.0, 1.0, 1.0], [1.0, 0.0, 0.0, 1.0], [1.0, 0.0, 0.0, 1.0]], [[1.0, 1.0, 1.0,
1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 0.0, 1.0, 1.0]]], [[[0.0, 0.0, 0.0, 1.0], [1.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0, 0.0]], [[1.0,
0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 1.0], [0.0, 1.0, 0.0, 0.0]]], [[[0.0, 0.0, 0.0, 1.0], [1.0, 0.0, 0.0, 0.0], [1.0, 0.0, 1.0,
0.0]], [[0.0, 1.0, 0.0, 1.0], [1.0, 1.0, 0.0, 0.0], [0.0, 1.0, 1.0, 1.0]]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_whereNonPositive_taggedData_rank0(self):
arg=Data(59.5300640359,self.functionspace)
arg.setTaggedValue(1,-2.15432794908)
res=whereNonPositive(arg)
ref=Data(0.0,self.functionspace)
ref.setTaggedValue(1,1.0)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_whereNonPositive_taggedData_rank1(self):
arg=Data(numpy.array([-35.653771579383431, -57.809515571795679]),self.functionspace)
arg.setTaggedValue(1,numpy.array([-58.726261256725685, -4.9867937639187971]))
res=whereNonPositive(arg)
ref=Data(numpy.array([1.0, 1.0]),self.functionspace)
ref.setTaggedValue(1,numpy.array([1.0, 1.0]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_whereNonPositive_taggedData_rank2(self):
arg=Data(numpy.array([[66.312754795349093, 82.034605718677227, -46.821806782974143, -87.117091329676626,
79.995435394657591], [23.928044089256971, 37.587150540719591, 8.7201565013642579, 16.527262198522521, 43.468010592942164],
[-58.095116913299293, 29.439827568578721, -0.091616442994578051, -54.761434852877166, -11.808816784702444],
[-69.299763869285897, -13.113050785108982, -5.1976088703165289, 21.099974177713761, 6.0733045244008679]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[73.896405389024125, -46.001844583629413, 98.809723817761267, 30.537291415620416,
40.574672076255666], [55.468937066548705, 11.801387698915406, 20.974309113460365, 77.660614559427415, 28.161039265035498],
[70.623954948137481, -37.457034114261312, -40.898398662139201, 26.109057449542121, 10.398162551919015], [-63.730141883353532,
62.137449485782696, -90.96748737577029, -20.908383264888286, -70.74195335323418]]))
res=whereNonPositive(arg)
ref=Data(numpy.array([[0.0, 0.0, 1.0, 1.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0], [1.0, 0.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0,
0.0, 0.0]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[0.0, 1.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 1.0, 0.0, 0.0], [1.0,
0.0, 1.0, 1.0, 1.0]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_whereNonPositive_taggedData_rank3(self):
arg=Data(numpy.array([[[25.436500051631739, -52.748846289150244], [41.085054249733929, 37.28010897113333]],
[[99.576478076717791, -9.9679696571838718], [-61.344641894951302, -3.7437435600148774]], [[-50.906089775590772,
64.943203676394404], [42.050588774194182, 63.118383844777753]], [[-35.778055648047726, -63.920957612224157],
[15.37985889218254, -68.424348417967053]], [[-56.55550570286416, 42.304324718922885], [70.622324649491162,
-12.596055870540511]], [[34.100758417960179, 8.1628573265152085], [-32.962482469141108,
81.284708270077232]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[-43.479659251216304, 54.528183443358017], [-57.05484443799638, -63.991441300924265]],
[[-26.9579882337963, 26.355076456251851], [-46.233015073952679, 36.53708746210657]], [[-24.786797130371284,
18.160754379725191], [34.747212955275302, 87.480335155520635]], [[28.625134809911913, -60.971379031499382],
[-88.66690636480152, -2.9400365936678128]], [[-51.567858740292259, 4.1984826727889129], [-31.243781268304645,
-95.989957539907223]], [[-17.551752211418361, -80.268436137583237], [75.208631120335241, 46.121751987400842]]]))
res=whereNonPositive(arg)
ref=Data(numpy.array([[[0.0, 1.0], [0.0, 0.0]], [[0.0, 1.0], [1.0, 1.0]], [[1.0, 0.0], [0.0, 0.0]], [[1.0, 1.0], [0.0,
1.0]], [[1.0, 0.0], [0.0, 1.0]], [[0.0, 0.0], [1.0, 0.0]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[1.0, 0.0], [1.0, 1.0]], [[1.0, 0.0], [1.0, 0.0]], [[1.0, 0.0], [0.0, 0.0]], [[0.0,
1.0], [1.0, 1.0]], [[1.0, 0.0], [1.0, 1.0]], [[1.0, 1.0], [0.0, 0.0]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_whereNonPositive_taggedData_rank4(self):
arg=Data(numpy.array([[[[-97.589886785634022, 75.704136798826028, 72.570441980077703, 41.142807006068949],
[52.186886198585341, -81.13583733115226, 19.645361321685243, 25.969196500252664], [90.98310621636935, 91.207424784752419,
32.52582221573229, -46.376838969725199]], [[-24.243208827988809, 20.526435158270147, 46.439239753806106, -33.65061411585431],
[-50.041229640131604, -49.061380582231109, -39.874744210873516, -36.550968632191804], [32.736481368599613, -75.040028568765322,
46.201877528037613, 66.414419319197265]]], [[[2.5037656480006802, -29.171488642086899, 37.367016013632622,
-70.265534203155468], [-5.0615104556033259, -75.110477824991989, 7.1287630039422965, -17.239036014080057],
[-28.096242015816813, -7.528715826985021, -85.497229220965764, 22.546758335502105]], [[74.817640632876163, 40.267037402027995,
10.981140051252439, -15.739056364934243], [-11.202000266050078, 76.223681897029763, -17.41622944432541, -3.2765461050635594],
[-25.659541213077148, 80.506749270081087, -1.000794733449311, 98.399202561993803]]], [[[-46.153348025882913,
64.301872580934884, 67.551433419371364, 86.776352830516998], [28.558361132430576, 78.958726721940224, -35.553376040555037,
-17.553520543738372], [11.165619248232318, -97.969411066483929, 50.903682207966739, -10.289318584097984]],
[[22.570215658384171, 75.89748134306177, -89.388639375388706, -54.6472406344094], [-33.869164257400811, 38.645420950299723,
-3.8841219051897156, 28.668559253094486], [-82.413441576756185, -78.700513819287238, -75.50816982500163,
-52.061106946967861]]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[[36.219375399209952, -17.824472494521174, 82.058503767107737, -71.379484930391683],
[13.550225541993569, -67.628807348691709, 20.055125227804993, -29.234664858312229], [-18.292530127300381, -39.897170713739548,
-39.328945564903783, 2.857031958593808]], [[49.335977298056065, 80.508916415103982, -18.493351689982831, 51.621759203569923],
[33.973864190922626, -7.4031669533306257, 11.727041061390153, 64.639807865470686], [42.573651614588044, 41.852110298728377,
-64.065734918246676, -6.9916640699874506]]], [[[41.50002565593519, 87.840585919409989, 2.0581894170644546,
-3.9639341014441811], [8.7028087633685089, 69.156286173220167, -83.153991217442822, 14.406484280025737], [-34.420051853304614,
94.578630567732802, -48.230261021352902, 53.242310400679315]], [[-84.442282958911122, -99.822594295799561, -39.959520090517287,
-90.546856339981431], [20.518433145652864, -98.471982254610907, 22.178227167774111, 71.388198500404911], [69.127077441526353,
43.428513943743894, -71.615864538073225, 20.113448559972809]]], [[[89.953272044597895, 43.16167804611743, 53.919371581222919,
1.3311125255161187], [-95.465237294020739, 67.804004576510494, -14.742900384283658, -27.263059934517742], [69.754390418730139,
79.35923926098971, -51.386888599874567, 51.913251831821356]], [[16.947530613873013, -39.040428548927153, -46.681825859807603,
-77.418328228167098], [62.579502644870047, 54.635165987247035, 10.68424789801503, 66.321201110893043], [78.476241287880896,
-29.449312093617081, -59.013155676678885, 6.6196016328634357]]]]))
res=whereNonPositive(arg)
ref=Data(numpy.array([[[[1.0, 0.0, 0.0, 0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 0.0, 1.0]], [[1.0, 0.0, 0.0, 1.0], [1.0,
1.0, 1.0, 1.0], [0.0, 1.0, 0.0, 0.0]]], [[[0.0, 1.0, 0.0, 1.0], [1.0, 1.0, 0.0, 1.0], [1.0, 1.0, 1.0, 0.0]], [[0.0, 0.0, 0.0,
1.0], [1.0, 0.0, 1.0, 1.0], [1.0, 0.0, 1.0, 0.0]]], [[[1.0, 0.0, 0.0, 0.0], [0.0, 0.0, 1.0, 1.0], [0.0, 1.0, 0.0, 1.0]], [[0.0,
0.0, 1.0, 1.0], [1.0, 0.0, 1.0, 0.0], [1.0, 1.0, 1.0, 1.0]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[0.0, 1.0, 0.0, 1.0], [0.0, 1.0, 0.0, 1.0], [1.0, 1.0, 1.0, 0.0]], [[0.0, 0.0, 1.0,
0.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 1.0]]], [[[0.0, 0.0, 0.0, 1.0], [0.0, 0.0, 1.0, 0.0], [1.0, 0.0, 1.0, 0.0]], [[1.0,
1.0, 1.0, 1.0], [0.0, 1.0, 0.0, 0.0], [0.0, 0.0, 1.0, 0.0]]], [[[0.0, 0.0, 0.0, 0.0], [1.0, 0.0, 1.0, 1.0], [0.0, 0.0, 1.0,
0.0]], [[0.0, 1.0, 1.0, 1.0], [0.0, 0.0, 0.0, 0.0], [0.0, 1.0, 1.0, 0.0]]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_whereZero_taggedData_rank0(self):
arg=Data(-60.3073206028,self.functionspace)
arg.setTaggedValue(1,-63.6162748199)
res=whereZero(arg)
ref=Data(0.0,self.functionspace)
ref.setTaggedValue(1,0.0)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_whereZero_taggedData_rank1(self):
arg=Data(numpy.array([57.478569368864356, 25.206882696210428]),self.functionspace)
arg.setTaggedValue(1,numpy.array([26.231910549413783, -27.085991237832573]))
res=whereZero(arg)
ref=Data(numpy.array([0.0, 0.0]),self.functionspace)
ref.setTaggedValue(1,numpy.array([0.0, 0.0]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_whereZero_taggedData_rank2(self):
arg=Data(numpy.array([[23.125617746775646, -41.981698421134659, 88.634083806667888, 85.022668924224405,
35.388273276293091], [-85.767444923711466, 47.859030088870099, -69.395187041220851, 35.5734575739055, 24.859215630808464],
[45.04844052116951, -95.695008500839691, -94.766369979921919, -29.956871512177429, -11.074586010585591], [-20.148312524898017,
-79.433644676490502, -19.87738780106119, 58.95117313559922, 50.971789815159298]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[-81.469429807666714, 11.07859262128126, 33.849563441706891, 14.848734531164027,
-42.320200232359248], [37.962288693888382, -43.238362945841821, -64.391556397361285, -7.8460700293939283, -20.730397433363208],
[97.393519560018603, -94.167885954290782, 94.002103086540188, 51.422088904276251, 63.729022355064359], [95.391379833296668,
62.703543794846581, -70.921738135430985, 70.232187871319354, 86.806722655888649]]))
res=whereZero(arg)
ref=Data(numpy.array([[0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0,
0.0, 0.0]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0, 0.0], [0.0,
0.0, 0.0, 0.0, 0.0]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_whereZero_taggedData_rank3(self):
arg=Data(numpy.array([[[3.4027245130060919, -61.09284878347885], [-57.399713635858582, -71.643573213948272]],
[[-11.523234389426221, -44.578343090400388], [83.731526181974061, -65.104529547136394]], [[-12.081015577295332,
-4.172878078793758], [-89.292376152335635, -25.743293720522729]], [[53.199172477134539, 60.186513433215026],
[67.767250940092424, 89.139480682860551]], [[17.059488076066813, 37.190593835102874], [-19.912772973905007,
-65.497513416201755]], [[-25.360470411847373, 43.142714268731936], [21.199116504341944,
37.635585100309612]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[-11.26010310706846, 34.128222291421537], [-68.434470407451215, 70.638428466633115]],
[[-77.544908385112606, 29.23464957312018], [-3.0407974101420763, -31.841776259248377]], [[-4.4176903627463133,
37.991200497907613], [-9.6402073259949077, -9.0856737835734833]], [[26.730099650557975, -65.247161722597966],
[-46.62552821590311, -56.733831760674391]], [[-36.874008752740004, -2.7797064670085092], [-64.175546396086474,
-99.28541091199989]], [[-5.5337745528672997, -45.378676661048623], [-90.349005740211496, 97.078047761501324]]]))
res=whereZero(arg)
ref=Data(numpy.array([[[0.0, 0.0], [0.0, 0.0]], [[0.0, 0.0], [0.0, 0.0]], [[0.0, 0.0], [0.0, 0.0]], [[0.0, 0.0], [0.0,
0.0]], [[0.0, 0.0], [0.0, 0.0]], [[0.0, 0.0], [0.0, 0.0]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[0.0, 0.0], [0.0, 0.0]], [[0.0, 0.0], [0.0, 0.0]], [[0.0, 0.0], [0.0, 0.0]], [[0.0,
0.0], [0.0, 0.0]], [[0.0, 0.0], [0.0, 0.0]], [[0.0, 0.0], [0.0, 0.0]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_whereZero_taggedData_rank4(self):
arg=Data(numpy.array([[[[93.831260447485874, 7.7407513574382989, 78.739371854572369, -68.363475865430772],
[72.824139460523099, 69.006620095233416, -67.000181405109601, 95.691658959713436], [-46.580816493258205, -19.62723037000471,
7.1964131702848562, -94.003505042058904]], [[85.06939733259054, -12.729485711394545, -95.590773804649672, 66.129455034149544],
[52.5485700301343, 1.8469556361458359, 59.238187186745563, 0.89938435519005111], [50.763202555174757, 38.515989700998915,
14.509412952688436, 19.098524401100889]]], [[[15.441947994447844, -87.547935450574357, -15.093661946970599,
-34.577822306130841], [47.655788884739167, -13.593073478163831, 73.901883902793401, 50.766658802389429], [93.106292386838589,
-26.449736171409441, -32.523468497737113, -36.825111629796645]], [[-71.149835259772914, -77.966052917274098,
-40.594142361637765, -93.497294871292127], [-37.049924286179639, -49.307577406565684, 68.805856372840026, -83.077598973248371],
[-35.950769604858124, 53.444154742123146, -29.736934427716307, -0.43882835811794507]]], [[[-28.804291026424494,
36.420207954120713, 44.975880956788671, -18.160405554758484], [-15.015205668084675, -36.844405430803782, -55.648827533689385,
-63.666847070332658], [-38.323848308813055, -86.094993931559884, -47.504890814498715, 75.386260617980327]],
[[-49.43361721674431, -48.467520039782322, -13.393183500735859, 33.478259837688171], [-46.591630982573548, -15.732761279461855,
55.398884354877111, 42.656388373806152], [20.973563827725044, -83.810921836893868, 37.036944354976555,
95.055268401462797]]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[[-72.44920515300953, -5.1325597489963855, -70.544555413458653, -21.868457284041369],
[28.059209379223262, -97.012837923502218, 17.364646849616733, 69.883388876193692], [-88.768250111578212, -49.421061117246538,
33.314147890655022, -43.967461259845294]], [[35.724082924424522, 21.20468417199848, 5.277992247636206, 77.828098329437609],
[83.375548593581215, 72.566063535932898, -39.58180677719443, 85.382749592078113], [72.093057622870248, -28.938840377791905,
-22.476983470220873, -96.655105739800831]]], [[[-95.917035925462301, -0.3656737198951987, 78.260689518762831,
-26.841902628320639], [69.925254995666364, 62.325571622342864, 47.660763937485541, 32.260139442261902], [-2.2726094824157173,
8.5002090466555558, -41.642153397299793, 33.220453104115677]], [[22.661303145423545, -52.489538131795044, -89.151747983141831,
18.242363722336137], [-25.178052459037687, -20.34523575497515, 25.391874579437612, -58.809820165710214], [-60.790728856888791,
37.195293760072531, -41.479538487050348, -21.114525244725101]]], [[[-49.820373222887085, -49.810943103539486,
-24.582970051099622, -22.599787936123761], [76.777056975485948, -58.811863993488878, 77.842740611399165, 18.640966616664173],
[-19.158614872609775, -72.976807090542167, -86.531194215051471, 48.429555859657114]], [[85.258816970664725,
-15.780961333046449, 49.948813051783191, 53.155720106784031], [-85.9905021073629, -0.23998617994342908, 82.190464755424955,
63.007615196139739], [-23.037986153437245, -37.536769208240784, 75.375056084992167, -10.052811879961808]]]]))
res=whereZero(arg)
ref=Data(numpy.array([[[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[0.0, 0.0, 0.0, 0.0], [0.0,
0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]]], [[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[0.0, 0.0, 0.0,
0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]]], [[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[0.0,
0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[0.0, 0.0, 0.0,
0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]]], [[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]], [[0.0,
0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]]], [[[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0,
0.0]], [[0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0], [0.0, 0.0, 0.0, 0.0]]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_whereNonZero_taggedData_rank0(self):
arg=Data(-1.9710533403,self.functionspace)
arg.setTaggedValue(1,99.5842297151)
res=whereNonZero(arg)
ref=Data(1.0,self.functionspace)
ref.setTaggedValue(1,1.0)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_whereNonZero_taggedData_rank1(self):
arg=Data(numpy.array([-59.49925191560812, 86.042241301467669]),self.functionspace)
arg.setTaggedValue(1,numpy.array([42.554874917129013, -64.6377412204602]))
res=whereNonZero(arg)
ref=Data(numpy.array([1.0, 1.0]),self.functionspace)
ref.setTaggedValue(1,numpy.array([1.0, 1.0]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_whereNonZero_taggedData_rank2(self):
arg=Data(numpy.array([[-6.8281814038072071, -59.600553597455487, -83.253618903284348, -55.555714436842266,
-58.64125306605785], [-54.669636874026729, -40.668963536281467, 48.151126090125331, -82.810118365697718, -2.0625309958108886],
[-50.145961443418784, 33.196540210116666, 18.939660902081542, -35.312472223501246, 45.025825447182569], [-91.787750952036063,
-19.219817615082405, 30.739339723723532, 46.808225524785058, -53.347997111730059]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[-6.1766328777260071, 12.353983207929858, 87.643004443775652, 0.1054506713352481,
-49.898039891751097], [-6.464367133652658, -99.376428379787214, 81.830552974282909, 3.3503835694606181, 99.424767953367194],
[-23.667088793561319, 65.145198516233364, -10.00780177932495, -69.125279379621645, -70.19911439214637], [-56.661848212973823,
-78.777508065420989, -44.257423096893753, 23.610690037963238, 52.764627315898679]]))
res=whereNonZero(arg)
ref=Data(numpy.array([[1.0, 1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0,
1.0, 1.0]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[1.0, 1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0, 1.0], [1.0,
1.0, 1.0, 1.0, 1.0]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_whereNonZero_taggedData_rank3(self):
arg=Data(numpy.array([[[-77.400629617637264, -7.8585991909028223], [79.37731461196492, -78.311221968888802]],
[[-2.2871031341482109, 61.613282578413418], [14.239083629293276, -86.60455578579527]], [[32.648402678335373,
67.120663891666482], [-16.40250641841989, -45.441109646024543]], [[-88.814372300408252, 96.863741115845073],
[-53.568931159701449, 61.772732453745817]], [[12.78118059732283, 61.665805717605537], [81.736967250564334,
-79.838957222371846]], [[-45.746992316765287, -50.359908369594095], [67.084057007701773,
-77.367125763337725]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[20.916835852467159, 98.359772976470907], [-65.641953951612976, 48.35339386982011]],
[[72.394336553366969, 0.15850039108870817], [-37.64849616557904, -37.7540766591151]], [[4.7508355327704663,
31.008319102711397], [-54.917295021552114, -24.534459964429843]], [[19.525286161344553, 63.669539108570319],
[-1.0431050089863732, -17.966268638209357]], [[-79.076564771286044, -45.063188127277719], [-57.520467509927364,
-69.399848959156472]], [[74.966631181955592, -21.675113256460349], [47.3018877491821, -95.419016191439553]]]))
res=whereNonZero(arg)
ref=Data(numpy.array([[[1.0, 1.0], [1.0, 1.0]], [[1.0, 1.0], [1.0, 1.0]], [[1.0, 1.0], [1.0, 1.0]], [[1.0, 1.0], [1.0,
1.0]], [[1.0, 1.0], [1.0, 1.0]], [[1.0, 1.0], [1.0, 1.0]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[1.0, 1.0], [1.0, 1.0]], [[1.0, 1.0], [1.0, 1.0]], [[1.0, 1.0], [1.0, 1.0]], [[1.0,
1.0], [1.0, 1.0]], [[1.0, 1.0], [1.0, 1.0]], [[1.0, 1.0], [1.0, 1.0]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_whereNonZero_taggedData_rank4(self):
arg=Data(numpy.array([[[[-56.530041249956064, -75.595773656477377, 57.930726925789344, -35.064711718518922],
[89.927579325782204, -71.292052004298981, 74.426753570770302, 9.9254833972331795], [66.426699960830888, -79.419436482993362,
70.191990839107291, 25.664966939851837]], [[91.588972225813166, 75.24780363003768, 22.708376398136238, 79.829437424982274],
[78.426467246737957, 34.311496583740819, -45.051218540773853, 21.126634987063937], [0.93545608150542137, -64.07783608516722,
50.0880392185164, 3.0383401979216416]]], [[[33.521385232890651, 65.495547288295967, -36.26432133678432, 43.817943477527393],
[25.622359237391734, 46.879767530213655, 44.264770652047645, -82.611848465548164], [8.6931324018650855, 98.705476157468638,
-69.064996470241397, -82.140570519506227]], [[50.341676972542217, -57.113225217844878, 23.496128915773994, -84.4500434098574],
[-42.826308284507533, -40.068614099685277, -64.107129980786979, -64.752370052337284], [-68.77258294388686, -65.093744454055411,
-2.0441147238691144, 38.710454571834248]]], [[[23.964849311323277, -9.8504539937835318, -24.01385095241659,
-65.609734597116542], [-34.482955634964824, -55.357317162052141, -29.891659032054235, -59.722077669432629],
[24.465604000129801, -3.1488488615906647, 90.185493485946637, -9.9469471059439201]], [[47.887647758738581, -18.650373829652906,
-88.084195156027434, -50.216336238949744], [84.137883656735909, -12.149905093038768, 96.23351445652429, -70.450849093320912],
[79.18622708472455, 93.448904090150648, 15.570836279018437, -91.458357264821544]]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[[-12.894979773801623, 48.187305922131372, -22.228691774958477, 71.32250925591336],
[88.198575928561297, -82.879750265641363, 6.0047803940490638, -75.104784095705114], [95.651210249923281, 99.227840476768279,
38.201888712638237, -85.338045871397298]], [[61.890764559936457, -99.084864794308373, 19.908026187604563, 76.48683075285868],
[15.244819962785968, 81.134443755015496, 91.695315009752335, 53.656684202280047], [-13.910795126783682, 62.546356367686997,
57.939671348548501, -16.711735701291104]]], [[[-17.647299335238117, 75.459048312325109, -41.034997451013353,
22.776483937861556], [-97.792781150657731, -89.213116628864611, -36.170629374287323, 76.170885998109583], [51.302094246614928,
73.764119678021643, 2.9931334740095537, -6.7949120092559525]], [[-81.233259812949598, 86.178813783813297, -0.82062800096618105,
95.276937599720668], [25.56603608019212, -69.150407154520252, -97.002071851697821, -38.857149391397485], [86.964544699076953,
-44.217066849378782, -92.21466310897317, -30.366338991012668]]], [[[66.127428481144136, 84.702864167161209, 53.320435341942385,
34.341339969042622], [75.475890485661608, 6.568751254456501, -32.661380753798539, 73.048056732159722], [8.3932556720025104,
86.553622630163773, -96.143353218643952, -12.061654127884765]], [[53.325736920559024, 24.80213757617615, -70.674103395487791,
-11.797716418097565], [-39.44141732563584, -42.670437444648911, 79.49977026651581, 79.898615913406843], [-32.436244300917423,
63.389192944364225, 48.691557489453828, 91.496017284059604]]]]))
res=whereNonZero(arg)
ref=Data(numpy.array([[[[1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]], [[1.0, 1.0, 1.0, 1.0], [1.0,
1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]]], [[[1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]], [[1.0, 1.0, 1.0,
1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]]], [[[1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]], [[1.0,
1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]], [[1.0, 1.0, 1.0,
1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]]], [[[1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]], [[1.0,
1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]]], [[[1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0,
1.0]], [[1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sin_taggedData_rank0(self):
arg=Data(-24.7105931613,self.functionspace)
arg.setTaggedValue(1,-37.638505349)
res=sin(arg)
ref=Data(0.40972088744,self.functionspace)
ref.setTaggedValue(1,0.0605693981609)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sin_taggedData_rank1(self):
arg=Data(numpy.array([19.860974255803598, 48.899013130941427]),self.functionspace)
arg.setTaggedValue(1,numpy.array([14.319017737469665, -59.326252904429587]))
res=sin(arg)
ref=Data(numpy.array([0.84758534887649317, -0.97919776342443343]),self.functionspace)
ref.setTaggedValue(1,numpy.array([0.98351066065067827, -0.3560220138624291]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sin_taggedData_rank2(self):
arg=Data(numpy.array([[37.890743553866486, -52.175642782800914, 61.917008025503975, -8.538416676807941,
-94.304749798245496], [-17.787570828089727, -19.048274463511873, -8.2634570563295142, -56.253500812466228, 87.627404284894396],
[-14.454217499387354, 73.713310630128319, -52.818033941567855, 90.807246316901796, 59.632923220807299], [2.3430650859352511,
56.726750975618302, -69.98474018040875, -30.128841460819984, 0.11683572211893534]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[-60.059131288860598, -78.931098378024842, -99.522738887570867, -9.6007074071729619,
-66.250286193785655], [15.651568400631106, 57.654505938017678, -21.858524591969015, -92.849176312405305, -45.214082756051297],
[-85.045751900057368, 10.170104148330267, 85.540180625403167, 34.743740334373229, 27.680023474288177], [72.313181060961483,
-93.451973592336017, 68.715544032783157, -57.013152797460179, 69.395677045629242]]))
res=sin(arg)
ref=Data(numpy.array([[0.19046098975424755, -0.94296657311066345, -0.7924680880494267, -0.77477635663664268,
-0.056939378452443026], [0.87332421967504115, -0.1974132538348578, -0.91732979816211846, 0.29089958624583467,
-0.33083665313437571], [-0.95015908369497537, -0.99349574918962724, -0.55556403598677151, 0.29450799309098907,
0.057305786038470398], [0.71632946014175625, 0.17714342493014262, -0.76413661669322097, 0.96002319680218495,
0.11657009080686483]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[0.36056248350542847, 0.38137387117559401, 0.84588700623357704, 0.17502331279847,
0.2733177892111176], [0.056364979209719938, 0.89384032979663164, -0.13223553506078178, 0.98521137495670197,
-0.94308411592712293], [0.22091275938263169, -0.67821155091384655, -0.65726119089937152, -0.18514670365491534,
0.5599375367095778], [-0.056519892938693105, 0.7144278132655969, -0.38895255454538685, -0.44796245325739548,
0.2769693862212248]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sin_taggedData_rank3(self):
arg=Data(numpy.array([[[-55.804968616645148, 58.560992915206214], [42.652192703056301, -91.42882116095241]],
[[39.310441995226739, 33.870993341596233], [80.401033234710297, 73.000828209637689]], [[5.7360515152169285,
82.330874482727353], [-75.426134234758621, 7.5453684113771118]], [[-19.895965390103115, -88.950469683568315],
[31.355932404642459, 36.487846363447858]], [[67.666456279782437, 58.020389340319895], [-37.89476101041673,
-42.399630457776482]], [[-23.830782444196501, 6.0849055767691738], [91.294861085921525,
-52.847710860098182]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[-18.125779887526264, -95.420123122001257], [-8.1568796731757516,
32.219735537825017]], [[26.245851241680057, 96.102520961925848], [14.02431043315147, -9.9572364002324321]],
[[17.181359346351925, 47.963801042849468], [-95.527667200507665, -64.204019349910141]], [[-98.658267090216341,
-7.0939733146426107], [-41.783037015039959, -46.517744129299096]], [[-57.202627940362859, 79.223818560607498],
[-70.017222005175654, 23.987327490175844]], [[71.375583584624991, 89.788775552486129], [98.882752617270086,
21.455679838723768]]]))
res=sin(arg)
ref=Data(numpy.array([[[0.67701499649890673, 0.90409941974537544], [-0.97117328078000487, 0.31706594876811195]],
[[0.99917861697072197, 0.63385392022976472], [-0.95812352836612924, -0.67738144661254696]], [[-0.52024157266400017,
0.60476080407034305], [-0.027906925031102141, 0.95275570243286156]], [[-0.8656310935760867, -0.83375573809919057],
[-0.059958148294456545, -0.93606199112953326]], [[-0.99254113222478446, 0.99509611012034227], [-0.19440335819459723,
0.99992954803650558]], [[0.96408030914008547, -0.19698305893325982], [-0.18755672118757361,
-0.53064744896026339]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[0.66221880833880609, -0.92166239454618104], [-0.95447604942411934,
0.72000463780845769]], [[0.89707725485516432, 0.95995773196345946], [0.9936384606300962, 0.50765297031738676]],
[[-0.99526034460150747, -0.74458387237142676], [-0.95798362538691173, -0.98033782538997782]], [[0.95477283132130442,
-0.72483027934968591], [0.80893159374577939, -0.56970402250150143]], [[-0.6083342836862955, -0.63189999754289639],
[-0.78468240482370322, -0.9108809171944825]], [[0.77140583862359613, 0.96806942866170043], [-0.99700488005111876,
0.51024422262880564]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sin_taggedData_rank4(self):
arg=Data(numpy.array([[[[95.057014858860498, -50.023546676067944, -19.520550887074563, -14.915416158905259],
[-72.276262048791182, -57.778697211921127, -10.589425732964969, 25.789691603835237], [71.432158308304565, 83.59773672148529,
32.062292384526415, -22.527648292677569]], [[-39.074624167039062, 92.303231204531414, -1.4192851682411742,
-68.883629510294497], [46.825962360174429, 85.58307574133471, 83.176640836526758, -93.888543574320394], [59.195891546840528,
-43.884372920271829, 46.885208516026694, -24.330067940056807]]], [[[-85.391419005371418, -52.993590690134319, 41.1653245235631,
70.963880995127738], [-6.8099927112965162, 14.755258748362692, -77.400445539133742, -3.6276152651411877], [-88.775588041032492,
89.080955577757038, 0.97522108268380236, 11.904044693769748]], [[28.114564123404421, 17.406751514150216, -90.824155259332073,
66.590378374684491], [-23.314357242078572, 66.459924224373196, 1.190010463508969, -19.129294185825657], [12.759163310131783,
94.16098679455763, -80.470912052594556, -87.769040453881502]]], [[[-68.103901459227245, 96.524362598603318,
-3.2834594710336376, -25.520289808877067], [56.393892750276962, 17.548302326605253, 15.721717465663659, 76.20380788007958],
[-65.13810360798314, -4.9406764890286041, 65.373960553505867, -11.670204391287569]], [[54.171569268655503, 53.359368061868707,
-46.012260984758143, -78.151318891542985], [20.615711960999178, 40.160242458140658, -80.640118059335776, -94.602105820605374],
[58.356391780158305, -78.074396086921837, 69.50163735189372, -68.547938015025153]]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[[1.279534719127895, 77.967895548901566, 56.093855457217416, 55.241022797731574],
[-99.18622666243968, -10.886097986772711, 44.708474069781573, -26.616286906326849], [-92.350922530980355, 54.858168650538886,
51.906834850649233, 77.865437338097109]], [[24.233171382130436, -49.713594059906626, 75.407909417958365, 86.691179170294532],
[96.871157363423322, 23.148017134014822, -29.421912304159292, -58.976308318504977], [-5.4545343262298189, -89.036199846063681,
-83.742519983327739, 35.372319522991887]]], [[[-95.511576008994936, -83.10806319229718, 63.825192259702391, 92.80910275607684],
[44.426319323500707, 88.815074429332554, -18.021325932633019, -69.917789857742505], [-84.319087816871672, -30.317627038327316,
-38.345827346198959, -81.91833965828738]], [[11.186751110650022, -54.257619696250828, 84.729270493118236, -8.0244377640246114],
[77.805655721275429, -14.229050163525699, 32.671007471850089, -96.882778316793122], [-56.456065533953058, -25.01675593935984,
65.68053871510449, -14.266571167222295]]], [[[-39.965547886942353, 19.317802794261738, 80.566440631464729, 43.745566353754214],
[28.366421988006579, 68.970448204763755, -64.475182800936267, 20.331319130101249], [-87.117125888478327, 91.992851667866603,
30.281916963531046, -39.27414258485895]], [[93.364522015716602, 78.717156004724472, 61.222491284093536, 86.104631528043967],
[29.395392816847448, 16.532063410538484, 10.319065205651995, 10.917748038478663], [-92.263775558488874, 50.00911791017316,
-6.6661922286034354, -51.536766809586055]]]]))
res=sin(arg)
ref=Data(numpy.array([[[[0.72375966950305115, 0.23958247784190015, -0.62176555507714748, -0.71214373694684574],
[0.019629755360536964, -0.94249864392330207, 0.9186492389762253, 0.61070482468498899], [0.73418989352805997,
0.9408956015682427, 0.60228932671485913, 0.51113062555018463]], [[-0.98099259974089892, -0.93096397739276848,
-0.98854412433116323, 0.22934906491349744], [0.29353958469160507, -0.68897514951619387, 0.99714636265035372,
0.35155913660386867], [0.47447618324952018, 0.097767803237166412, 0.23642150489763264, 0.71921602354647907]]],
[[[0.53829894600069828, -0.4018025366804851, -0.31894868102661073, 0.96159352277239407], [-0.50277617521839357,
0.8149857013480003, -0.90837056606621547, 0.46711279434815001], [-0.7249719991634016, 0.89850915924785046, 0.82782593247756842,
-0.61495265106171171]], [[0.15909090210144514, -0.99182021016560207, -0.27830764816239967, -0.57853598042401821],
[0.96950642440138313, -0.46751584043254807, 0.92837285606475217, -0.27610409657055596], [0.19160059401890014,
-0.086683885904533534, 0.93579131574343599, 0.19430985516438759]]], [[[0.84743598014352139, 0.76110153119788515,
0.14139142291111614, -0.37791993461054291], [-0.15415780695875661, -0.96389276338977681, -0.013753764053628931,
0.72123543134514545], [-0.74152608990617153, 0.97405537665333219, 0.56421749260196419, 0.78093804490852481]],
[[-0.69217200421492153, 0.04768895464089825, -0.89642177035913229, -0.37879826270669459], [0.98097790580385125,
0.62915222544178329, 0.86305700540000696, -0.34695851228481017], [0.97206368588434766, -0.44879839287225581,
0.37704058161193998, 0.53718857715535373]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[0.9578823534926072, 0.54124816795457964, -0.43929370911716231,
-0.96557502749159507], [0.97455749834004068, 0.99401345246169626, 0.66401590677623057, -0.99619607619697281],
[0.94731148656605035, -0.99284417199371577, 0.99751195316042607, 0.62441237853622522]], [[-0.78305944951663353,
0.52429620121273146, 0.0096855803621312825, -0.95610712166060408], [0.49533020111560083, -0.9155481075478985,
0.91177208072521287, -0.65482579444370448], [0.73702027966623906, -0.8779702346494217, -0.88218472628424938,
-0.72758863915572014]]], [[[-0.95324448144023388, -0.9896294977803074, 0.83785349282853971, -0.99128491043904499],
[0.42957507779222781, 0.75159719546211767, 0.73673567820434016, -0.71927034677937474], [-0.48285656690859402,
0.89043473057109679, -0.60256841133763539, -0.23472014974367561]], [[-0.98178130166608535, 0.75163971078732728,
0.093593967784617274, -0.98550749523114423], [0.66996424044290459, -0.99578170573160452, 0.95057449576530817,
-0.48520180467023327], [0.092469940703161432, 0.11572541384732027, 0.2887366377307638, -0.99163895037731464]]],
[[[-0.7676438791646546, 0.45132255753166978, -0.89789686094785226, -0.23452586033429529], [-0.091958006320412053,
-0.14408689254970225, -0.99737060586631121, 0.99603916939064607], [0.74960719408299126, -0.77496816002780011,
-0.9061156382123059, -0.99999103487825647]], [[-0.77281036418314564, -0.17641158915267149, -0.99925644477650222,
-0.95848189929893357], [-0.90056129662048501, -0.73393688041745886, -0.77976304128985197, -0.99697306576558797],
[0.91583747858958031, -0.25356559568768045, -0.37371120994166129, -0.95548059670784435]]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_cos_taggedData_rank0(self):
arg=Data(52.3923651613,self.functionspace)
arg.setTaggedValue(1,92.6499316384)
res=cos(arg)
ref=Data(-0.527866301451,self.functionspace)
ref.setTaggedValue(1,-0.0270483432209)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_cos_taggedData_rank1(self):
arg=Data(numpy.array([-74.897126998165533, 76.673400450800756]),self.functionspace)
arg.setTaggedValue(1,numpy.array([99.065445380314515, -86.748306948983256]))
res=cos(arg)
ref=Data(numpy.array([0.87705625402072684, 0.29133259267605394]),self.functionspace)
ref.setTaggedValue(1,numpy.array([0.10508243263067833, 0.34712991573165969]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_cos_taggedData_rank2(self):
arg=Data(numpy.array([[40.593544538866865, -8.8839015039393558, -49.468879573084436, -24.708042838510465,
20.413703995745891], [-79.108713409558405, -68.647136982462371, -80.858963259372672, -43.373193372132903, -19.507573187625411],
[64.214585816318845, -78.826300537435486, 57.661889712775803, 95.493641862455291, -48.386749127960769], [67.334847000926004,
-34.70671409523483, -36.873199353443709, 3.6386929918643176, 35.181153901083945]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[-30.787970015064928, 82.074060959202797, 25.479756845345577, 10.895119259966464,
63.74412167304564], [-60.035262414428935, 54.332578347635263, 18.293985264200202, -9.7571535510820695, -70.419305661969503],
[-66.629926110044835, -43.57208065884415, 57.437026616340574, 20.73240225691022, -80.496461940478952], [19.883318148806438,
-98.729450313914597, 73.172600335425471, -53.114967436072469, 41.781624603862156]]))
res=cos(arg)
ref=Data(numpy.array([[-0.96961115090719441, -0.85725773398423355, 0.69913962613000602, 0.91116305149951837,
0.0066482036132297587], [-0.84249563945044104, 0.8925167529418252, 0.68043098049226469, 0.82016110495020733,
0.79120632545455827], [0.18695697167168232, -0.95924330257900359, 0.44177331900046657, 0.3192463084776716,
-0.3030933270774539], [-0.20786820316301155, -0.98889106925546555, 0.67788641598410604, -0.87896904245554386,
-0.81176118995632829]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[0.80922972543055438, 0.92389756193357364, 0.9403918665642913, -0.10028616065108438,
0.6119531025181365], [-0.9410747774886985, -0.60143262098807782, 0.8495995978399381, -0.94526988131298229,
0.26338463011163266], [-0.79214526943475394, 0.91703450582859369, 0.63068646834096875, -0.30701028605243086,
0.3763461069696134], [0.5115898554852758, -0.22867682408200724, -0.60902205590663616, -0.95763905321643927,
-0.5890447354610614]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_cos_taggedData_rank3(self):
arg=Data(numpy.array([[[-36.309518950317376, 0.93081070250144649], [31.019427711604664, -74.09863093545404]],
[[-38.496677203305893, -85.824133574935331], [95.203836891504238, 22.838846451350705]], [[60.75609230931488,
6.003670139700219], [-31.49567872236139, -63.206983059929222]], [[-9.6812822737183666, 0.078728886948780996],
[66.900652835446493, -94.869473621923703]], [[-6.6770163744116076, 22.876520146740972], [-55.737787303088737,
6.2425399184533319]], [[-81.429470177177521, -81.6116687923749], [-97.082967034525325,
-67.37269287178016]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[-76.097111647315714, 55.656786197463788], [11.140883121429439, 54.147885791873421]],
[[-24.180524234728694, -45.703945118544723], [42.10979532559989, -22.79785029244421]], [[67.322737034238003,
18.304852118006011], [7.015704936158869, -94.401853589660817]], [[35.279952362677818, -7.8217175297602637],
[-81.23891082515344, 54.069639284286751]], [[4.2395499199061106, -11.974337349322099], [-77.095389819359994,
26.711493864407473]], [[-66.565935528207518, 41.011773246282445], [-62.114425668075299, -64.456999774045073]]]))
res=cos(arg)
ref=Data(numpy.array([[[0.18021343448473101, 0.59718391060744369], [0.92241876797360978, 0.26789121482669265]],
[[0.69845114643777229, -0.53934947535935607], [0.57674535988171327, -0.66171093596184249]], [[-0.48377631503826568,
0.96118931155712628], [0.99682147963778267, 0.93046000296873854]], [[-0.967282744906935, 0.99690248160545425],
[-0.60007048402362761, 0.81289300751647198]], [[0.92344574856902162, -0.63300189293768494], [0.68886045878833047,
0.99917408990060674]], [[0.96843093014337922, 0.99756913767256539], [-0.95342403093885042,
-0.17070899404470352]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[0.76555810931252977, 0.62794880060935965], [0.14479801574845574,
-0.73792168383216183]], [[0.57987832415218665, -0.15028015665535041], [-0.29714910041836201, -0.69188358205701828]],
[[-0.21969811854411034, 0.85528080687409014], [0.74349177468513394, 0.98815406589512933]], [[-0.75019910814961466,
0.032258506831785543], [0.90368477270236081, -0.78841749829099372]], [[-0.45541567307928488, 0.82980773284067688],
[-0.12603373471688831, -0.0079562249516561077]], [[-0.82955287573817371, -0.9854033904934385], [0.75349957000523238,
-0.054323621236985108]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_cos_taggedData_rank4(self):
arg=Data(numpy.array([[[[37.610264246462435, -85.560668463311075, 92.790982866326573, -21.753674410293172],
[-23.164181244709354, 64.496397223384463, 69.277186049494105, 6.3927475799028457], [67.583896168477764, 36.111360823700437,
30.266912701944563, -54.963319263159384]], [[-58.145969583496672, -97.225443498755453, -56.934313916342269,
35.421162068171839], [65.866615855863898, -57.072034755161027, -95.488754117534285, 81.149953518095799], [-18.30949886526929,
-89.680457620572071, -73.87886392983259, 81.259366551703209]]], [[[1.8677491996480029, 36.828382975770609, -80.40672114911041,
-49.292595896369647], [-37.983864569797767, 35.583525872048824, -42.451157688857613, 33.755615612774108], [32.674252940671579,
90.058275023987306, -96.26155980692819, -90.500098763836021]], [[90.079955965660446, -70.687430685137031, -51.111371179982747,
-74.109677346578138], [-32.896920002886091, 62.26499948195692, -59.833741060334056, 11.794198300820895], [43.437452546746755,
97.455115222231768, 87.354131572829402, 3.2818247457694412]]], [[[78.306182680183269, -64.892175839143391, -55.104588214315342,
-96.744717049677931], [-38.337933398739985, -72.796076467431135, 60.379171901212146, -81.927733276050247], [63.885059436029167,
-31.980639093805863, -57.261994523508044, 17.357515328643643]], [[77.429908518363192, 9.5882415367278355, 72.484182388500756,
63.089077313098954], [84.07047179403375, -21.092477779767819, 41.614178023999727, -98.204118862286279], [-71.275012546567766,
78.730240012789466, -11.573247145900382, 33.098945113087012]]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[[-40.533710954365063, -21.161469670738327, -69.120302774135837, -14.38267699844107],
[-91.158843533364944, -85.491074434980831, 87.152587959079909, -33.044835488961624], [-68.672525163755367, -1.8217843916724235,
-33.6594071845862, -94.719797609019921]], [[5.7039466733317994, 69.091962753216535, 42.000508648719546, 63.142145355765422],
[79.524244986771464, 62.133683756888729, -63.061242691543782, 51.048740976244147], [-88.653022332832293, -81.214225577010723,
35.550248226917518, 76.160743630564809]]], [[[-63.524226576744191, -56.896009941669014, 63.19522201987138, 66.388629592533931],
[-56.646135485855687, 8.9406032636504307, 89.111063185551444, 12.201705041404125], [64.844923341968638, 93.705153189621086,
42.451679671109446, 55.611996897559266]], [[-50.4500969589295, -56.48304920853591, -43.397487648713806, 24.970484957830536],
[10.247946263340424, 53.164372653170489, 20.567237785266812, 9.4104989925598801], [-56.157152366568909, 42.552761052044843,
56.078516299029076, 18.940543407164128]]], [[[-33.632224346804193, -69.405810068119834, 44.225943185591831,
95.213025790079087], [-38.509288601106675, -62.938695142627999, 82.460256045254965, -40.372955049612315], [71.091785922673608,
-67.332900637102753, 99.968681344820283, 87.597127665814384]], [[-15.352405373769315, 13.607690117097107, -27.035258608117374,
-88.065123343235953], [46.351984421658017, 40.175457656434133, 90.498104230403385, -29.926375524616702], [89.955509906700911,
75.738059235642481, 92.170833583735543, 28.373336853066405]]]]))
res=cos(arg)
ref=Data(numpy.array([[[[0.99605564800414159, -0.74003978479165422, 0.11375282452021319, -0.97193527337579688],
[-0.38735756213150352, -0.09361056558100582, 0.98688284383678593, 0.99400405568827155], [0.039643724768732305,
-0.016953880290015939, 0.40938737891293392, -0.014551661058647967]], [[-0.026502388661538694, -0.98659374258249288,
0.92655557103089836, -0.6496724166496719], [-0.99429911442146879, 0.86614127304686683, 0.32387445454567942,
0.86207036562540851], [0.85767934528605649, -0.14455871961558475, 0.051413892338749011, 0.91225420946081004]]],
[[[-0.29260780746660703, 0.64426928163414932, 0.29180026978390428, 0.56291609637951678], [0.95973114604298926,
-0.51823819807950822, 0.039646471705651949, -0.69533990552752001], [0.3074098932007972, -0.49938120217558235,
-0.42863722843891311, -0.82188268941215192]], [[-0.51804635050563663, -0.0015959786891496938, 0.66306628166652848,
0.27851730967463495], [0.089682205138485488, 0.84359470875913611, -0.98972429938704287, 0.71639675290673688],
[0.85520784080680545, -0.99783970980023062, 0.81938284847117593, -0.9901835826774219]]], [[[-0.9728315762339087,
-0.47020799835491661, 0.12637793045273601, -0.79930700568503443], [0.80279896236009785, -0.85799367135344373,
-0.77193843948759455, 0.96981526124126383], [0.49478722014669613, 0.84474246657231211, 0.75618918968201176,
0.078674345855532332]], [[-0.4447440791197817, -0.98666955220849251, -0.97422171127622192, 0.96709984785995873],
[-0.7299620834657633, -0.62265062060091214, -0.71548829876243725, -0.68605277637160145], [-0.55567766942870023,
-0.98192413256230948, 0.54607588753058456, -0.11198684728536197]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[-0.9532464104225401, -0.67511267180012213, 0.99998614310380007,
-0.24305111882757385], [-0.99861396263192814, -0.78501698113748208, 0.68804387880332429, -0.058079923319158462],
[0.90367832948274707, -0.24836118666524837, -0.62308777688597838, 0.89065254831191276]], [[0.8368796543112903,
0.99973376956186366, -0.39951907575386714, 0.95224436545888613], [-0.55333911666953939, 0.76602026469360152,
0.97380536659847572, 0.70861811863632929], [0.77224567755776974, 0.89284034215998742, -0.54640627400327102,
0.72309766743732995]]], [[[0.76973301984773468, 0.94028075556243462, 0.93470472120159609, -0.91504217164360968],
[0.99525378084999272, -0.8850594108800458, 0.4117079619272091, 0.93424308242332088], [-0.42799571611881249,
0.85635538858959426, 0.040168038134317727, 0.59247321999590674]], [[0.98300708870591891, 0.99784787493267124,
0.83381726388896149, 0.9868653057824498], [-0.67990129583749026, -0.97069204733006775, -0.14635792168284548,
-0.9998980572655366], [0.92433186130108524, 0.14079090200582819, 0.89149968128133894, 0.99586349366046734]]],
[[[-0.60159909873717321, 0.9580229239875917, 0.97046484832780555, 0.56921448509653549], [0.68937039860779081,
0.99429781336097167, 0.71172390998364865, -0.89258527400759391], [-0.39489284771819805, -0.20977165731745406,
0.84603990208711677, 0.93324045680470225]], [[-0.93745242426842468, 0.5050818721371243, -0.32567073631884641,
0.99495120989406094], [-0.71658296354855922, -0.78676444613630847, -0.82074485588505508, 0.081155964310960879],
[-0.40786936285205416, 0.94280949490341759, -0.48481330596382211, -0.99510320754671833]]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_tan_taggedData_rank0(self):
arg=Data(81.2949649872,self.functionspace)
arg.setTaggedValue(1,12.3613553191)
res=tan(arg)
ref=Data(-0.406904128478,self.functionspace)
ref.setTaggedValue(1,-0.207936773642)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_tan_taggedData_rank1(self):
arg=Data(numpy.array([-95.851047486395899, -87.804262570020512]),self.functionspace)
arg.setTaggedValue(1,numpy.array([35.849126399037175, 13.927401673303507]))
res=tan(arg)
ref=Data(numpy.array([30.785362859177258, 0.16171984883073159]),self.functionspace)
ref.setTaggedValue(1,numpy.array([3.4882512502950971, 4.6971057126849036]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_tan_taggedData_rank2(self):
arg=Data(numpy.array([[-6.2123974990845596, 78.448990575809376, -99.326922204393099, 34.466953331401896,
75.081637288912191], [43.152743095243096, -23.515299958653429, -45.139362755422809, 37.555578473020233, 83.674961808589416],
[-20.329004079626117, -89.68698187313413, 63.797873117120815, -97.852830452441481, -20.352637389791738], [73.846890877324569,
34.650839207487195, 41.531549740281122, 42.117481567836307, -14.893287864506703]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[32.462416722884939, -48.149844207496905, -56.126695807148465, 60.69249745488338,
-88.519356123058287], [-77.612992699512802, -82.951630694447118, 41.156303211277162, 76.793150351335555, -72.585387811636906],
[91.644542807801855, 51.947395321575783, -56.950928573202873, 18.806113794978117, -2.4791441058101356], [13.946361780608797,
-89.75004089425245, -7.8270132480697612, -75.115117512127185, -17.025098103456472]]))
res=tan(arg)
ref=Data(numpy.array([[0.07090628276813575, -0.091076341049522724, 2.6032644546506951, -0.090814285504178624,
-0.32760519036327246], [-1.0924542795462242, -21.423000533021405, -2.2775209371153795, -0.14452724439561776,
-2.2228009589039441], [-10.916660613554196, 6.5460789983613727, 1.4468186297845727, -0.49976325011584999, -14.745229039106976],
[-51.371685347992141, 0.093591861362473022, 0.82675810400212302, 3.3025618500686895, 1.0603382585364733]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[1.7292248126420149, -1.6500818693323478, 0.44893985316262996, 1.5650975398719362,
-0.61967622915013898], [1.3320242181202921, -3.2261605736834746, 0.32651185794691795, 5.6272845624900736, -0.3411363783892129],
[0.59719690743737996, -8.962491127283629, -0.42546071042084166, -0.043469475465786209, 0.78003579386887978],
[5.1771917698562682, 4.5869701687908737, -37.07146436191114, 0.29092035884983614, -3.8573430010424579]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_tan_taggedData_rank3(self):
arg=Data(numpy.array([[[-31.363348163296223, -63.428729589813429], [-96.507312106299665, 31.782542686119456]],
[[-34.681083479682911, 94.077182149079221], [15.294827923512429, -2.7318887141934312]], [[40.882434195822839,
-64.18724581426693], [11.572828840371855, 90.497867255100772]], [[-28.641061694503762, -87.171931801575766],
[-11.020127312733962, -30.967479102653854]], [[-7.3186224758889296, -50.786266134306011], [27.048134010190211,
2.5490774473318112]], [[61.949209649501199, 85.202746332031268], [-96.092429938290053,
-18.405433004645062]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[-56.051822909334504, 24.794825821519993], [-0.67715445796852691,
83.973745178864789]], [[-67.137455945524025, -23.605845271936829], [-50.37296104694218, 61.52765962606091]],
[[89.358776831997943, -14.338540458285891], [-29.266662560421409, -3.8651288601611213]], [[34.695285195870127,
90.166487346465175], [-10.666151183063903, 29.826418400042996]], [[35.897311083718819, -5.6908691923045893],
[81.788877267557382, -28.486351983044273]], [[-10.244239444046215, 61.596990071263548], [9.0672269007921784,
5.6119735254265208]]]))
res=tan(arg)
ref=Data(numpy.array([[[0.052626876950351224, -0.67956116470517647], [1.2147515603304766, 0.38397534066424899]],
[[-0.12419702009054982, -0.17227194869809437], [-0.43836392791494461, 0.43427926215657442]], [[0.041753938290720764,
-4.5704240024751872], [-1.5355049455338108, -0.69642068661122147]], [[-0.38410347124596128, 1.0146352438416113],
[40.719994162243026, 0.48114165299842049]], [[-1.6859462819034383, -0.57360288493474976], [-2.7861599010747122,
-0.67320458364148295]], [[-1.2161965333027973, 0.39911670622190515], [3.5598363671486943,
0.47582704782624802]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[0.54221273929241065, -0.35139316545369953], [-0.80396583079821793,
-1.1366271548317615]], [[-2.3211791986595185, 22.764215950312941], [-0.10789436233823439, -3.6616056049070829]],
[[5.6030778046056575, 4.8985896068181933], [-1.5314392133091446, -0.88334381751181845]], [[0.13864424980142789,
-1.3683712672276451], [-2.9249993656191324, 53.435949643775317]], [[4.2516444503647923, 0.6729153019338332],
[0.10788392698491613, -0.2152531375981952]], [[-1.0705573488747255, -2.8639517816691598], [-0.37360951726234692,
-0.79422844785382163]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_tan_taggedData_rank4(self):
arg=Data(numpy.array([[[[-97.656975158803732, 86.62989131986555, -77.554231481640841, 3.8669983308735141],
[-46.038298726138535, 40.067291705031494, -98.270364197569336, -94.6969458479483], [87.514637704319199, -92.077863733525263,
-3.2419643949078676, -63.397042107941658]], [[-78.795717243110204, -15.279620056917338, 80.460981482647753,
58.412764427127541], [78.818186435655377, 37.682189940374087, 44.849206889045604, 44.848625721891608], [-35.5574347957388,
-12.199741839763533, 97.654780429841566, -80.88961765682032]]], [[[-22.652246910060853, -67.38886650009394,
-23.477307930487527, -10.646755410960651], [28.156205936499333, 61.620904094908099, -68.627816293502761, 67.122379266164245],
[-98.867312915684863, -67.45476737752476, -25.299310914293784, 37.934440895232058]], [[49.380808935348227, -39.408417085726846,
-20.541815407391638, 83.649278193509474], [-87.485520624890597, 58.270434291856475, 94.943963892353963, -72.626165639298705],
[76.160653617847743, -56.169631642306463, -99.798183422398054, -90.426229350215536]]], [[[88.117152264551066,
52.841153774969399, 66.022106559130634, 40.622509829181638], [-4.2428278873716465, -39.006278992668378, 25.439473540797223,
61.543987328178218], [-15.166974840542792, -90.535188054136981, 84.651661558032657, -16.693664047828548]],
[[74.767862181117096, -78.437587118526309, -79.957730051825251, -82.440913401255031], [13.979534083611483, 37.832449897143647,
-41.090148981970032, -8.6289016011243689], [41.053492503522762, 8.9183240359808309, -66.578380761411694,
66.20182099550621]]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[[-65.426437737609433, -42.513342752929837, -61.194113194155307, -64.800952933449537],
[-99.624865579613584, -3.2191662753780008, -52.527343047996354, -63.282633267519969], [39.453397429286866, -27.130144978241091,
-56.965824807007913, 74.720174006700404]], [[-8.9359752895985025, -12.273022000155564, 79.215464783067716, 54.215640736250037],
[-10.545731014079962, 39.462823926104164, 40.564010266889511, 92.013499250506641], [20.430731908343816, -93.73852236295572,
38.616446665550825, 16.303457902544551]]], [[[-8.6131335963080886, 91.666532748276779, 67.718239380865299,
-24.067799387278825], [2.6235650197201892, 84.44637358608324, 55.396761917366206, 30.029168851594335], [3.7411742822091298,
7.2609312887790338, -64.032694520213113, 71.454721297104697]], [[63.195820999024335, 10.691306316136078, 70.801143686977696,
34.146817443572871], [-81.881989121896652, 39.550920228819763, 4.1371918628010178, 93.50590280236878], [97.927125099631667,
64.442557162542499, 81.251825689023093, -72.766391730318389]]], [[[-99.988258264364134, 6.3957796419498578,
-83.783069257657814, -42.282613168159912], [36.226072219705117, 53.514293085963089, 55.242986060328917, 30.924108560738716],
[48.213793181692523, 33.583178486196687, -17.776417123041654, -79.70940504468291]], [[-26.186562848112317, 35.053508602494276,
-74.15973561349287, -8.998608974101586], [72.625731679153461, -45.763185712759238, 96.157419130429048, 82.60958774405006],
[28.466122898236932, 13.661431663776554, -64.305636252969435, 78.126668760850237]]]]))
res=tan(arg)
ref=Data(numpy.array([[[[-0.27417913318913278, -4.1566210200104123, 1.5091093500762029, 0.88667753844569219],
[1.8966516810504024, -0.97631203132464983, -1.2121104814243526, -0.48202716829016767], [-0.48300153499471354,
-1.4644610945344889, -0.10071016976344437, -0.63420186225059838]], [[-0.26163711886038371, 0.45661727740971819,
-2.7363785710173922, -3.3111423421076758], [0.2857904741045893, -0.016923518088534008, 1.1779198979973127, 1.1765333132126685],
[-1.5571186702709128, 0.38398982640921775, 0.27182083255769302, 1.0128687939892587]]], [[[-0.77786631973260933,
-6.3841435669149291, -11.786941762312257, -2.749590141226586], [-0.11868049092962783, -2.6579585972745292, 0.52982517743967028,
2.2281208753465154], [-10.738431613965821, -11.146506047949773, -0.16812749423430426, 0.23977164577867416]],
[[-1.2212418866506991, 7.1735230424816061, 8.192421452643833, -2.3846593272264083], [0.51943402007653894, -6.5734237237147752,
0.83578647210017276, -0.38732785222465582], [0.95508720601143937, 0.39829550561729016, 0.89993916898678605,
0.80859125720920677]]], [[[0.15375262833206235, -0.63522917927167522, 0.048699277776481524, -0.22172457509128218],
[-1.9707778487603462, -3.7049205779162753, 0.3167282752575496, -3.4396185005903077], [0.60077400519944801, 0.64237874378765658,
-0.17303671645831173, -1.5094893398587974]], [[-0.72966855935529684, 0.10258684244697755, -6.4899256428743657,
-0.94950861550679122], [6.2912235198952393, 0.13413392183447914, -0.25475027110751947, 1.0211790862248971],
[0.21605885016652787, -0.55471233347047222, -0.69140625670162514, 0.23243020992993024]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[0.60899603533502777, 9.7851675904442956, -14.915637546865332, 2.3764544607985711],
[1.2760657540936704, -0.077729601321507483, 1.2090043292697605, -0.48401767690643016], [-5.3886094617159346, 2.200117216306007,
-0.44316694059504153, -0.80544075052469299]], [[0.53185115511734737, 0.30206325695557501, 0.80148938350550525,
1.047442535039711], [-2.0709860162340474, -5.1191444088641491, -0.28397859569936534, 1.2792587767882238], [-96.338809394137982,
0.55838397029774967, 1.3060273034426464, 0.67754301675915884]]], [[[1.0539199784527833, 0.62743135353607671,
-5.6890981603187942, 1.8052864811134648], [-0.56994582002442928, -0.39550809900908157, -2.2459675092850895,
-5.3721562872445512], [0.68352279680128936, 1.4837179824840887, -2.5785739645551788, -1.033580665481149]],
[[0.38093975110296807, 3.1845224145186783, -8.6338809194109842, -0.43546576800373976], [-0.20331407344276023,
-3.4643985918583193, 1.5424352550195062, -0.91653700413563366], [0.59637912459294451, -25.044476766299347,
-0.45811677917613758, -0.55904458203375451]]], [[[0.60311480417088692, 0.11307256431662512, 1.7033508050940664,
-7.7156847407432672], [-10.196871172372907, 0.1076307213120567, -3.6831649770066708, -0.53572561021033649],
[1.9166401734031362, -1.4728710216778025, 1.8407229850668281, -2.3572817040493903]], [[-1.758855002901736, 0.54110631448591939,
2.8976606059699086, 0.45399233058581712], [0.38682887197477689, 4.6895764808665765, -2.8373957569747743, 1.3357904861442966],
[0.19417566417476489, 1.9409854797509967, -10.275523383878257, -0.43837851346057438]]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_asin_taggedData_rank0(self):
arg=Data(0.989657679547,self.functionspace)
arg.setTaggedValue(1,-0.473489993439)
res=asin(arg)
ref=Data(1.42685052415,self.functionspace)
ref.setTaggedValue(1,-0.49324888058)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_asin_taggedData_rank1(self):
arg=Data(numpy.array([-0.60614764750793948, -0.76269603799552499]),self.functionspace)
arg.setTaggedValue(1,numpy.array([-0.074431252427650141, 0.46633079704255254]))
res=asin(arg)
ref=Data(numpy.array([-0.65120801803032125, -0.86747147905375954]),self.functionspace)
ref.setTaggedValue(1,numpy.array([-0.074500149323603215, 0.48513840344368775]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_asin_taggedData_rank2(self):
arg=Data(numpy.array([[-0.38725529068582309, -0.035002566048380368, 0.29215359547572062, -0.74047728151511116,
0.86375088045015236], [-0.32624879922924277, 0.2147493204278017, 0.69593358057302734, -0.67043661608803573,
-0.36870988653065229], [0.47510384739864087, -0.76918433519434637, -0.34537195722060143, 0.011183356922037646,
-0.11541581215659935], [0.27260362912724068, 0.61437603398068741, -0.25663483045732682, -0.041872856264680069,
-0.91618191033723884]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[0.41390194006847647, 0.47906669770750199, -0.65223346998753673, -0.15143835662945238,
-0.33784007340735844], [-0.70843138277904028, -0.54438594011729147, 0.034207507853760921, -0.32612382903828285,
0.70186023759432148], [0.46194569999679191, 0.20862219804974647, 0.43258225592841981, 0.89408040795536747,
-0.21133552002226552], [0.47668525800656281, -0.98272511924427908, 0.12392756171121211, -0.74247467351461138,
-0.70926856758101375]]))
res=asin(arg)
ref=Data(numpy.array([[-0.39765272784734212, -0.035009717396994572, 0.29647790574666982, -0.8337802351353637,
1.0426662706606511], [-0.33233251116675155, 0.21643515475826403, 0.76971914249277162, -0.73479708883031969,
-0.37762073948486119], [0.49508204578845194, -0.87756375238241691, -0.35263510222938133, 0.011183590047521475,
-0.1156735991133048], [0.27609811766175707, 0.66159490472202953, -0.25953880511997846, -0.04188510212750262,
-1.1584472025690309]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[0.42673623857902859, 0.49959114862575316, -0.71052717518368491, -0.1520232503062508,
-0.3446210935366435], [-0.78727319189839995, -0.57565690165345518, 0.034214182708838151, -0.33220031031626085,
0.77800568719018426], [0.4801877492089463, 0.21016594677352965, 0.44735491475086192, 1.1063738876172791, -0.21294114010121065],
[0.49688012059430242, -1.3846520382811431, 0.12424698835488085, -0.83675707078112471, -0.78846008349356012]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_asin_taggedData_rank3(self):
arg=Data(numpy.array([[[-0.27078670807121452, -0.6521353392582292], [-0.98935156671000035, 0.71924091352997421]],
[[-0.596001253659459, 0.61191364730020958], [0.66651667689067051, 0.90368733535070822]], [[0.73399023574476341,
0.70767255658646566], [-0.13936321871982982, -0.62987342971179183]], [[-0.4948738057441634, 0.49128944743073721],
[-0.88331247766967902, -0.86621949805759135]], [[0.14384806329256028, 0.095935576215879115], [-0.69504930628562311,
-0.41646976607569797]], [[-0.39226444326380583, 0.2452138366202059], [-0.1518719139549316,
0.93521490540250141]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[-0.22465455750635011, -0.64081370087964307], [0.6605109979083339,
-0.54615750594210355]], [[0.56019147828484561, -0.67363253611107687], [-0.26666555499697531, 0.5822337693194235]],
[[0.96028563934974787, -0.53648605635740176], [0.016439994710971462, 0.30498182045115629]], [[-0.27256730376760929,
0.25969816117226885], [-0.64645590181357815, -0.097098197456417124]], [[0.090976011478984375, 0.61837879336933055],
[-0.69346965532985538, -0.27865603672245576]], [[-0.18099305537471033, -0.21217824413547803], [-0.24741889558471541,
-0.10075712603260512]]]))
res=asin(arg)
ref=Data(numpy.array([[[-0.27421017834129852, -0.71039772542571444], [-1.4247320578861771, 0.80270911325546157]],
[[-0.63851198939279763, 0.65847783965152196], [0.72952644193099725, 1.1283041433740428]], [[0.82417870001472116,
0.78619861108497946], [-0.13981832890670637, -0.68139024128778247]], [[-0.51768960196670843, 0.51356956389655972],
[-1.0828819297210799, -1.0475858703421546]], [[0.1443488307389203, 0.096083348390609224], [-0.76848847834177259,
-0.42955884388247545]], [[-0.4030920504517207, 0.2477402572487051], [-0.15246188107240444,
1.2088655180324399]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[-0.22658851752026385, -0.69555772267891613], [0.72149914702634521,
-0.57777028882908332]], [[0.59461693452585995, -0.73911287993369601], [-0.26993164239658068, 0.62147348780168055]],
[[1.2880241486013853, -0.56626768327750376], [0.016440735350003028, 0.30991934023703022]], [[-0.27606036257642469,
0.26270962685432236], [-0.7029299940282141, -0.097251423034157833]], [[0.091101977070680226, 0.66667810639918901],
[-0.76629367807938997, -0.28239443270507703]], [[-0.1819960893458433, -0.21380341867513972], [-0.25001541559304535,
-0.1009283906282767]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_asin_taggedData_rank4(self):
arg=Data(numpy.array([[[[0.17667641262684564, -0.20528415608852835, -0.86377896580752189, -0.5808064056077008],
[-0.53575131452410674, -0.94973341635526076, -0.94934619991031122, -0.36012502776308042], [0.30866069414823993,
-0.22625586744343318, 0.89172233386738275, 0.041379170795829534]], [[-0.74767149169546465, 0.50300329169060554,
-0.77484404377979366, -0.8352964117129088], [-0.30013599718442951, -0.10886586640435891, 0.2730681338127674,
-0.1615077727930434], [-0.41658644426176838, 0.87939475296716063, -0.86922788571063614, -0.69906009036185479]]],
[[[0.12016629383182509, -0.70534585776132253, -0.87839274516803956, 0.2480882290553843], [0.31790318495990477,
0.03461698975366434, -0.027498912345320314, -0.66291073124981814], [0.50626012778834562, 0.77210638834559853,
0.32650848757464579, 0.9203611453694176]], [[-0.47360390489237758, 0.85676222423230053, -0.015787865739628981,
-0.37070260942360855], [-0.72961058537924894, -0.14494860353517136, -0.52932600855417877, 0.88281683601885486],
[-0.6909459206541444, 0.69160226630289623, -0.54290210856405896, 0.34524619417902236]]], [[[0.36542406478716893,
-0.96793946499057182, 0.38442480479198515, -0.57435150997595197], [0.12545758270235607, 0.96934407264412958,
-0.24044762690293819, -0.340989156148089], [-0.44460870104468952, 0.88803936519219207, 0.55053765965557822,
-0.5546454459624317]], [[0.83841967081685675, 0.50019773286874503, 0.22967486065013221, 0.56354640088505747],
[-0.65856710498790327, -0.90765207375799539, -0.58747426169848049, -0.53180638195873375], [-0.83276666060509819,
-0.68616293259457728, -0.17418580748690327, -0.62859194512462024]]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[[-0.45006916215173465, -0.92518399158900699, -0.04783796036863075,
-0.19626369399103549], [0.09169070885520969, -0.87029170445041015, -0.0045652336502262081, -0.32329394607483464],
[0.55866492213457741, -0.22397258908984941, 0.052303570015150402, -0.7530677681629222]], [[0.54997830254931301,
0.35346830466133805, -0.40403225037158907, -0.04482659680963097], [-0.11974859579133346, 0.94132610730205091,
-0.66324538714156511, -0.22519453019335234], [-0.70728958122137842, 0.92386346743545333, -0.6458871899721651,
-0.4609281078649099]]], [[[0.12930111020374491, -0.98520462255645291, -0.40152660242615856, 0.010632521180307775],
[0.031150770980220788, 0.55887463503362822, -0.54295081925718014, 0.050402433196199858], [0.013662223054701439,
0.40718009215619322, -0.71883819418052053, -0.96296533562944775]], [[-0.1927124007905795, -0.68855068933515107,
0.6316010981181337, -0.041869003739051891], [-0.046326825303942165, 0.92598846850093763, -0.72356399590221465,
0.25634295052044487], [0.22051227192098355, 0.90166643600746443, -0.06859211365531892, -0.045149621190890721]]],
[[[0.39410312014393267, -0.044987876742481614, 0.8979457783429603, 0.72166466731038081], [-0.83729280796152195,
-0.97851172484586679, 0.17112070485897912, 0.2107396926071694], [-0.22810606176169324, -0.92777597337878248,
0.58581791765258862, -0.57511066270834021]], [[-0.30425553857922449, 0.63784070657640024, 0.76802042170834328,
0.56358714499635787], [0.76594910306777875, 0.85231338535685475, -0.94636186542722056, 0.77240089163366621],
[0.31982221170560687, -0.32750948889637299, -0.034744253720429996, -0.50257821297680039]]]]))
res=asin(arg)
ref=Data(numpy.array([[[[0.17760871448276636, -0.20675403804646908, -1.0427220071893906, -0.61971896050972197],
[-0.56539728309730153, -1.2523832523933589, -1.2511486870629975, -0.36840190993020011], [0.31378465192018662,
-0.2282321452737586, 1.1011365828156379, 0.041390988386389355]], [[-0.84454869639849361, 0.52707016378896132,
-0.8864683510196445, -0.98867169122207821], [-0.30483522099816734, -0.10908206377238625, 0.27658094036046738,
-0.16221829359885875], [-0.42968718537947675, 1.0745894259046815, -1.0536384870903868, -0.7740822073469662]]],
[[[0.12045738832689482, -0.78291093220064634, -1.072488853750152, 0.25070628836034903], [0.32351712207393363,
0.034623907281987255, -0.027502379259771104, -0.72469982402898492], [0.53084255928695823, 0.88214908246269863,
0.33260724413419246, 1.1690029652625629]], [[-0.4933782124574862, 1.0289582745627337, -0.015788521685103963,
-0.3797654160853966], [-0.8177523437209665, -0.14546102725991955, -0.55780595912809805, 1.0818256899500005],
[-0.76279673253995905, 0.76370505307109449, -0.57388899520112124, 0.35250109635513183]]], [[[0.37408833395292318,
-1.3168934796373459, 0.39458466710266937, -0.61181174217240464], [0.12578904459759122, 1.3225470507143868,
-0.24282698097084726, -0.34796891551730025], [-0.46073736173792446, 1.0930630422580974, 0.58300815127146821,
-0.58793680336412102]], [[0.99437717008195303, 0.52382711290120032, 0.23174359978487752, 0.59867256987095474],
[-0.71891304682557911, -1.137655796129406, -0.6279341788586954, -0.56073216019431915], [-0.98408642456901807,
-0.75620114331540811, -0.17507887513122425, -0.67974142884518618]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[-0.46684278730023371, -1.1815201100750479, -0.047856225143968707,
-0.19754604320899144], [0.091819674168062873, -1.0557942602491508, -0.0045652495079866228, -0.32920830092822206],
[0.59277522179959163, -0.22588871606051758, 0.052327446914480526, -0.85271238565039864]], [[0.58233825824309027,
0.36127616478278501, -0.41592063812310764, -0.044841623005844253], [-0.12003665216187458, 1.2265382433727618,
-0.72514689608315608, -0.22714269023447864], [-0.78565671511328383, 1.1780552671570363, -0.70218481885046757,
-0.47904074383403694]]], [[[0.12966414087424233, -1.3985638911620022, -0.41318311251144624, 0.010632721525817797],
[0.031155811146442221, 0.59302809425903769, -0.57394699929636273, 0.050423798063665655], [0.013662648113827933,
0.41936448841432711, -0.8021296351981142, -1.2977925042003347]], [[-0.19392561536532552, -0.75948862127879602,
0.68361662963146053, -0.041881246220352109], [-0.046343412238644396, 1.1836453452376237, -0.80895172981266905,
0.25923682302888701], [0.22233963948715654, 1.1236078001339596, -0.06864601410630812, -0.045164974774920652]]],
[[[0.40509178676205976, -0.045003065808640254, 1.1150794983658745, 0.80620405929955008], [-0.99231290343135414,
-1.3631154327855153, 0.17196703968158128, 0.2123315842232695], [-0.23013201668885772, -1.1884076378143387, 0.62588891338210939,
-0.6127394135964227]], [[-0.30915681905645181, 0.69169133505319369, 0.87574435958173658, 0.59872189267169562],
[0.87251631631408044, 1.0203925151294579, -1.2417837981042867, 0.88261263771725229], [0.32554183754839172,
-0.33366648106510866, -0.034751247851029093, -0.52657840683397406]]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_acos_taggedData_rank0(self):
arg=Data(-0.291587772644,self.functionspace)
arg.setTaggedValue(1,0.0219832082111)
res=acos(arg)
ref=Data(1.86668265093,self.functionspace)
ref.setTaggedValue(1,1.54881134759)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_acos_taggedData_rank1(self):
arg=Data(numpy.array([0.12826451971761799, 0.33491528109011126]),self.functionspace)
arg.setTaggedValue(1,numpy.array([0.22031555171954387, 0.13047651014043127]))
res=acos(arg)
ref=Data(numpy.array([1.4421774808126324, 1.2292810095841651]),self.functionspace)
ref.setTaggedValue(1,numpy.array([1.3486583675462354, 1.4399467435143751]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_acos_taggedData_rank2(self):
arg=Data(numpy.array([[-0.41152758157568659, 0.50648377704044156, 0.004765718148047382, -0.19506763365502267,
0.27315738992922611], [0.33882351517959264, -0.029041529921664644, 0.28655425515791988, -0.58345582451173839,
-0.95744850153173044], [0.32067870881780935, -0.59696574288858684, 0.80001515403024226, 0.20766175365526407,
-0.92537824876724606], [-0.62608849117550203, 0.49584899163835461, -0.91012458136569108, -0.28964137555570646,
-0.019645711019819267]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[0.30161380606367949, 0.47771896537569658, -0.4069706892444982, 0.36563120333238408,
0.34635703099859194], [0.77283471581548135, 0.39085192259836066, -0.91464145652686901, -0.5785113219336232,
-0.55834836602534121], [-0.89226447891919258, -0.005420603335181351, -0.23047363216965455, -0.31627282725938288,
0.24708083627782318], [-0.36674157132228213, 0.84284509604242497, -0.55735470644132779, 0.28132905259474161,
-0.54756243112029979]]))
res=acos(arg)
ref=Data(numpy.array([[1.9949258434411383, 1.0396944084450979, 1.566030590606778, 1.7671227348451009,
1.2941226028750279], [1.2251301602101246, 1.5998419405884083, 1.2801680039289014, 2.1937737283320473, 2.8488240011834036],
[1.2443503750305041, 2.2105099844323077, 0.64347585165092913, 1.3616123305271102, 2.7528286077860806], [2.2473230240603672,
1.0519841271516832, 2.7143809675072594, 1.8646484580550975, 1.5904433017575812]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[1.2644114932860036, 1.0727399259992247, 1.9899315573166447, 1.196485454770198,
1.2171113576324784], [0.68750036295992678, 1.1692393692469043, 2.7254164558807252, 2.1876987449669811, 2.1631899258148524],
[2.6731322180353185, 1.5762169566759732, 1.8033607174166493, 1.8925943802877971, 1.3211298029884102], [1.9463004481873596,
0.56824804065117174, 2.1619926599703043, 1.2856175077339616, 2.1502446915093585]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_acos_taggedData_rank3(self):
arg=Data(numpy.array([[[-0.0018967734613412057, 0.59300273345453158], [-0.81381641137830263, 0.11977023446026669]],
[[-0.037555176051128791, 0.021140469012053797], [0.19984949833653998, -0.11442177154206457]], [[0.34269780843852971,
-0.29360044358101034], [0.53220357878081814, -0.36059448682460116]], [[-0.78355112918456538, -0.083700661501167062],
[0.32350069086320987, -0.38066114197452472]], [[-0.97705826933358608, 0.40808141278379839], [-0.42109501987534292,
0.38704401675830336]], [[-0.38925229608360801, -0.94561123734124619], [0.38734879630697328,
-0.84315118092500463]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[0.81502930234987203, 0.0050248424447604245], [-0.39601071676781818,
0.78146079161187321]], [[-0.40269237311187456, 0.59630968314412591], [0.43146464177692678, 0.28202596494731069]],
[[-0.88345581362782388, -0.40846927031376268], [-0.65907710124008867, -0.51535297922329859]], [[0.3791891533095284,
-0.1878703433573069], [-0.550755371929043, 0.00053164521361281913]], [[-0.67703943464546334, -0.71143279160208883],
[0.1862121811026225, -0.84162834520638197]], [[-0.77834727203948528, -0.40520983099715968], [-0.82342473982088238,
0.50868059221882711]]]))
res=acos(arg)
ref=Data(numpy.array([[[1.5726931013935923, 0.93601340923207366], [2.5214858775304476, 1.4507378791003851]],
[[1.6083603363688714, 1.5496542827850082], [1.3695920087141873, 1.6854692556999198]], [[1.2210092255140392,
1.8687874345506976], [1.009595077794389, 1.9397015089412193]], [[2.4711571166083619, 1.654595029389675], [1.2413695403820464,
1.9613074868225726]], [[2.9269767557006974, 1.1504447893795458], [2.0054485856592787, 1.1733727421774625]],
[[1.970616056541908, 2.8102648289377887], [1.1730421776900686, 2.5739136323477916]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[0.61801656176645547, 1.5657714632044866], [1.9779646207779984, 0.67379274631597552]],
[[1.9852526825543613, 0.93190017823864835], [1.1246806413386898, 1.2848911859657939]], [[2.6539841054134725,
1.9915727464827593], [2.2903872908337721, 2.1122158133671496]], [[1.1818764764700775, 1.7597897566840754], [2.1540652930004773,
1.5702646815562391]], [[2.3145286806652781, 2.3623312612316218], [1.3834908482456931, 2.5710876379804022]],
[[2.4628254031317334, 1.9880046639617615], [2.5382167551116179, 1.0371447242973137]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_acos_taggedData_rank4(self):
arg=Data(numpy.array([[[[0.17419378414794906, 0.20877755349477622, -0.9560863974612771, -0.14865727072192525],
[-0.17733095703209234, -0.075638384984418994, -0.95407857695419507, 0.6554653414860343], [-0.61341371635489927,
-0.52092549325400384, 0.56479500388669734, 0.6212897490792797]], [[0.62874023072485619, 0.23625020456583901,
-0.16020531855055364, 0.55591840537558723], [-0.79872077551867249, 0.04557168403173395, 0.3613949288270315,
-0.95572639016521221], [0.79815227797346733, 0.17234806171331019, 0.26868955082890489, 0.98424717569008591]]],
[[[-0.54543833436541767, 0.13415007696775616, 0.13534722231297658, -0.72252620207541485], [0.72328095946739612,
-0.13862718470167823, 0.044552606563023067, -0.69905432254503375], [-0.085225036679073551, 0.98164131149719402,
0.76464095979981717, -0.86551052241781756]], [[0.62239954920343643, -0.70181949407386546, -0.8168814556558559,
-0.13547995725989515], [0.82528036114158154, 0.87633827050160984, -0.080143289194266831, 0.83348769953227908],
[0.32539673255685431, 0.93803075579628348, -0.27823622244460522, -0.39447875543393573]]], [[[-0.59356695628956446,
0.42068072139579571, -0.2966816048473806, -0.7687956041808558], [-0.71878760019674037, 0.97463077881539117,
-0.31728131209986821, -0.73484475981245589], [-0.12727988625513686, -0.94231248176633886, -0.77734468240253696,
-0.90444340163288839]], [[-0.18269489759149571, 0.29439735776753762, -0.27185087948144659, -0.62048852806225785],
[-0.98053876278297158, -0.26218270695695034, -0.68261964802157793, -0.019759295107996899], [-0.089430250213055507,
0.74948889896974036, -0.75852786166420638, 0.35370400678788205]]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[[-0.1619684522755932, -0.10284993380175211, 0.25821458903539396,
-0.82364767575665576], [-0.36584913630709737, -0.72676741961971969, -0.97076126343096136, 0.91364000367151532],
[-0.77168739033605294, 0.39113009993279912, 0.48535347425417696, -0.52215067436842233]], [[-0.84535167107707998,
-0.87641054599151036, -0.4118154689589868, -0.72256509448603845], [-0.81670007180109772, -0.19694267578855462,
-0.27574844635108997, 0.95030776906209558], [0.76565743374572093, 0.46912480629116415, 0.50374256986893307,
0.093162354426354455]]], [[[-0.73310899789237172, 0.2875723021943497, -0.81245888640465547, 0.58799309157190205],
[-0.90714187533421775, -0.073098886394030282, 0.67818356310455719, 0.26455797708589723], [-0.66652933651616353,
-0.23032652509234064, 0.22892282028568234, 0.44787613065694942]], [[-0.44317080986172785, 0.31925123670093725,
-0.18287302844160769, 0.89497686805988264], [-0.30669668515287685, -0.75162807334334503, 0.61189143325364581,
0.053990654942066563], [-0.11090521653913066, -0.24677109143944231, -0.70304586767346078, 0.54736846949774476]]],
[[[-0.59873211647357749, -0.31774376245292935, 0.43070766306328645, -0.016079803275450555], [0.5261614333552953,
0.6797542937501353, -0.68473148542300333, -0.41052525916288962], [0.68024025081334183, -0.40577339681915869,
0.4087295894868801, 0.61958252572202288]], [[-0.72011815326102413, -0.10455144248350179, 0.75334594465757676,
-0.56149938171962099], [0.16913646946275085, -0.45034525250556334, -0.36739372725079256, -0.041181300029745849],
[-0.64556849960251772, -0.040795848776974841, 0.85515343427832669, 0.6096018411341606]]]]))
res=acos(arg)
ref=Data(numpy.array([[[[1.3957093511629617, 1.360471526581287, 2.8441407383080222, 1.7200066442732735],
[1.7490700866368647, 1.6465070213809823, 2.8373649283858922, 0.85599770843850642], [2.2311721630105854, 2.1187311401011293,
0.97061150484655345, 0.90040872753615897]], [[0.89086421992331588, 1.3322913334783058, 1.7316949814428857, 0.9813289023048628],
[2.495962524203899, 1.5252088542888533, 1.2010328242314052, 2.8429148515570239], [0.64657435379029982, 1.3975834206961624,
1.2987640322677911, 0.1777321440297753]]], [[[2.1477083304186579, 1.4362405897686086, 1.4350324256625075, 2.3782457505851391],
[0.76225455373160755, 1.7098714069908214, 1.5262289680482763, 2.3448704680068988], [1.6561248714124477, 0.19191216072142248,
0.70031230156130786, 2.616965031785782]], [[0.89899161244489156, 2.3487448168313412, 2.5267798820772609, 1.7066941968405251],
[0.60009784920496412, 0.50258924678280592, 1.6510256575906948, 0.5854062256837147], [1.2393650626053583, 0.3538928296356465,
1.8527536586887519, 1.9762968634509979]]], [[[2.2062801487665875, 1.136600790119608, 1.8720122511782569, 2.4477519846112199],
[2.3728531862116133, 0.22573089755729248, 1.8936576221249246, 2.3962340910120021], [1.6984224020679184, 2.8002691893334375,
2.4612300776159772, 2.7008694873643462]], [[1.7545231138284718, 1.2719714573571184, 1.8461121498570447, 2.2401618278670212],
[2.9439834930659066, 1.8360796668105717, 2.3221377460874395, 1.5905569078982662], [1.6603462151776323, 0.72350662179783332,
2.4318473355004873, 1.2092681825027514]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[1.7334814462425481, 1.673828455510769, 1.3096226655494099, 2.5386097574192203],
[1.9453413548263998, 2.3844003298346768, 2.8991783717530111, 0.41864650952806914], [2.4522863458860922, 1.1689371321766686,
1.0640289187834875, 2.1201670767225766]], [[2.5780194523086384, 2.6391534710101032, 1.9952417407117879, 2.3783020085117652],
[2.526465489342848, 1.7690348671872353, 1.8501645764491155, 0.31657329697684766], [0.69873356468677461, 1.0824968212721804,
1.0428705855020852, 1.4774986804685288]]], [[[2.3936783941171464, 1.279105226241779, 2.5191536430590338, 0.94222086861618171],
[2.7072381573266595, 1.6439604702314694, 0.82550822412311597, 1.3030507874849933], [2.3003397504688494, 1.8032095431448985,
1.3398253531931346, 1.1064078436784477]], [[2.0299290447557947, 1.2458570544086836, 1.7547042970540918, 0.46241697896834122],
[1.8825168384735331, 2.4213232683150521, 0.91234657301380784, 1.5167794070081579], [1.6819301661171466, 1.8201432078142425,
2.3504678570451745, 0.99157974394862303]]], [[[2.2127135213118359, 1.8941453082545316, 1.125519574857567, 1.5868768230835029],
[1.0167160158190489, 0.82336874945488769, 2.3250315257063354, 1.9938263516346553], [0.82270597341644369, 1.9886211901601889,
1.149734692650326, 0.90258559607088251]], [[2.3747689167871977, 1.6755391877344084, 0.71766104149909937, 2.1669930068530197],
[1.4008428772515369, 2.0379483121019257, 1.9470015447043691, 1.6119892756074885], [2.2725637754510268, 1.6116035001525102,
0.54494956853218945, 0.91523810991933396]]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_atan_taggedData_rank0(self):
arg=Data(-14.7254534244,self.functionspace)
arg.setTaggedValue(1,-30.821724654)
res=atan(arg)
ref=Data(-1.50299080856,self.functionspace)
ref.setTaggedValue(1,-1.53836305618)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_atan_taggedData_rank1(self):
arg=Data(numpy.array([79.645526314923757, 31.251712111300833]),self.functionspace)
arg.setTaggedValue(1,numpy.array([17.802002898972049, -11.766456436013172]))
res=atan(arg)
ref=Data(numpy.array([1.5582413534867612, 1.5388089940682592]),self.functionspace)
ref.setTaggedValue(1,numpy.array([1.5146818449820139, -1.4860127074426057]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_atan_taggedData_rank2(self):
arg=Data(numpy.array([[-51.014136430165856, -10.355356517133202, 28.251309295456821, 13.133655639607198,
-4.4836594247888968], [67.588862677322936, -35.526211499854085, 57.141721412265554, 3.5594116807501166, 24.644697631626315],
[-59.211703959261456, -72.046541293224493, -68.738506813922058, 36.183945043854038, 40.813283481240802], [-84.224967550292789,
2.382009962300117, -97.667999664168718, -17.641085801592737, -0.094387632167098445]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[71.722254489170183, -77.163739464663479, -62.935449615094207, 71.421618282216969,
-92.086755719577582], [86.1279234487221, -75.927119138293932, -53.426711624869739, 78.243007256108257, -93.228905473462987],
[51.146224478610861, 18.135315836848292, 25.055391871257115, -63.299860388548915, 43.155037911250076], [82.260960323640546,
-59.034950659315768, 43.419415178367842, -96.721347394404589, 69.939661568517351]]))
res=atan(arg)
ref=Data(numpy.array([[-1.5511964273421315, -1.4745264598757113, 1.5354145091343874, 1.494802690678932,
-1.3513557426844944], [1.5560020692406058, -1.542655526646358, 1.553297765207124, 1.2969112580541855, 1.5302418945935552],
[-1.5539093791983924, -1.5569173012079685, -1.5562494664061093, 1.5431667932509703, 1.5462994014411897], [-1.5589239207380323,
1.1733268586785741, -1.5605579164505456, -1.5141710727583415, -0.094108820426104287]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[1.5568545563458982, -1.5578375973438927, -1.55490836758775, 1.5567958786717648,
-1.5599374287193295], [1.5591862121193236, -1.5576265635211364, -1.5520812830291499, 1.5580163277677324, -1.5600704510968921],
[1.5512470322905925, 1.5157110808099072, 1.5309059297790453, -1.5549998176728896, 1.5476282073492109], [1.5586404907725899,
-1.5538588285718626, 1.5477692261183333, -1.5604577159532746, 1.5564992621352502]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_atan_taggedData_rank3(self):
arg=Data(numpy.array([[[-51.439521199073958, -42.19343667612452], [-57.017857592051982, 42.357194430644654]],
[[-94.668069377568997, -97.267849956585323], [95.97751393208145, 72.083118529080309]], [[13.931243087317895,
48.314113039294057], [16.667854995256334, 7.2354641537036457]], [[-35.24080770006239, 6.1151782689095882], [7.2809362943481659,
-47.744411262592919]], [[-20.3055738368638, 38.892625438958902], [13.233199681154034, -53.915986490531978]],
[[-94.858405410928896, 99.82234948258116], [48.503882060032311, 30.917187302988793]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[91.878551047153621, -29.510183345226608], [-83.224925312144791, 83.845227550590039]],
[[88.225493436926286, 11.532409624377109], [2.5458204819547916, -60.296569325231573]], [[54.038333175852074,
-4.1384279748112789], [23.760565206323776, 21.555672180440922]], [[-33.836072987446173, -77.826802360528504],
[42.386301929980959, -28.853499934767314]], [[-92.851208362115315, 51.187485401450829], [-17.065728694813885,
53.278176745218417]], [[-99.442133011258861, -62.662157473770883], [-58.469826126068639, 17.824111168192687]]]))
res=atan(arg)
ref=Data(numpy.array([[[-1.5513584698690774, -1.5471003942988308], [-1.5532597596608479, 1.5471919713913882]],
[[-1.5602334958935613, -1.5605157996684915], [1.5603775966626152, 1.5569243429573056]], [[1.4991381283805512,
1.5501013957909033], [1.5108724340083037, 1.433458236465541]], [[-1.5424277452541746, 1.4087034902352189], [1.4343053177252822,
-1.5498545289386529]], [[-1.5215885210143809, 1.5450901753003594], [1.4953721636022326, -1.5522510785229557]],
[[-1.5602546889363347, 1.5607788652232746], [1.5501823406963127, 1.5384631301370586]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[1.559912823466391, -1.5369226810631742], [-1.5587812739091436, 1.5588701549951265]],
[[1.5594622198376147, 1.4843005338925794], [1.1965116310113704, -1.554213155607542]], [[1.5522930567093209,
-1.3337034661793217], [1.5287346084657718, 1.5244380631272727]], [[-1.5412506699602671, -1.5579479899433375],
[1.547208174948046, -1.5361523528598342]], [[-1.5600268240968616, 1.5512627864671464], [-1.5122662848001094,
1.5520291193633591]], [[-1.5607405660888007, -1.5548390862365009], [-1.5536951554558593, 1.5147513012507214]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_atan_taggedData_rank4(self):
arg=Data(numpy.array([[[[-60.786516054960018, -97.443926672116945, -59.730786105649635, -60.552672183608202],
[0.067178496327429116, 76.097886671263439, -10.611506241697001, 62.520570069561273], [98.734673799525353, 84.863778955168158,
38.252515267672806, -5.1350082880344985]], [[77.589460335333513, 84.577328211722374, 67.229876917205758, -27.884169005126182],
[-55.428762309397705, -42.909045438315019, 5.4478832792561178, 98.719071343277363], [50.041890457414098, -61.217337389541605,
-31.625752675276402, 68.601618873426759]]], [[[-25.801223779222397, -29.445798766093745, -44.798995576077047,
-13.060782989795968], [-24.868508822871931, 54.456897228862431, -69.005823306352426, -38.013099364990332], [52.520288283056431,
75.090539051454812, 88.11571157196164, 29.940468771848515]], [[-69.126425092990985, 78.527356119425946, -53.114343044690806,
83.848543031621091], [-33.685382028364685, 20.306687112361828, 32.650151957007466, -81.059070647169236], [83.469680236467951,
97.861914954668038, -43.032363052419306, 45.804898833159399]]], [[[72.4991519048626, -92.751679859513942, 26.266552717005069,
26.556036707565454], [-65.853696462593632, 49.53468229171304, -4.1588537709789364, -98.080606358862553], [-75.680355306374167,
28.449553815517618, -86.195746809070101, -79.665759321116923]], [[-3.0262101017673757, -86.441575573369178,
-58.423640738004678, 16.659518568604952], [61.02394512053786, -82.308850756533687, -63.48331127418183, 77.928338187268736],
[13.711361913844101, -40.319664743955784, -20.625042794109746, -40.404672515921902]]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[[34.440676365049114, -8.928215565852156, 82.549857592202045, -23.402816990850781],
[81.444943705518057, 24.836590977242707, 12.529228281117838, -30.233298854898962], [-27.744182872849407, 52.425228768719734,
61.267860951786759, -33.849170571356012]], [[-31.944123108883531, -77.558579126251928, -86.569996998801585,
-22.877811832681274], [-68.796333830065777, 38.3887156141069, 55.836989989650135, -88.366850454930685], [30.300995863327671,
4.9007881921832848, -27.507754163421041, 76.045942075681637]]], [[[64.072551296634714, 74.888616072380785, -33.075348532233932,
-32.445757899913886], [42.326950917626533, 85.004838459602013, -52.797797801406674, 61.710543752406068], [95.848068180331182,
-60.15064788445914, 16.670945317241291, -65.666197079729741]], [[76.675531866034845, -46.804170258023724, -79.050686910748411,
20.250281649863666], [44.623682968710199, -23.035506964204686, -54.724272963446111, 40.878243061765005], [53.368726075681678,
-99.662799699787371, -0.39118677248215761, 29.872163016238545]]], [[[23.227160380259122, -40.824445575015567,
9.9914772245777641, -70.767759555201451], [43.089621482280933, -84.683624327833257, -72.192063858769018, 55.872440414467803],
[-58.127478581268498, 34.87567327109511, 10.580861885981534, -58.73857948325918]], [[77.723769033663018, -18.880641278910758,
-80.850165897272433, -58.112001436928253], [53.595081958851097, 5.6247875895140282, -29.124774133157288, -98.409076696537625],
[46.599025926989782, -63.970578322466842, -22.107674051037463, 45.27636423071533]]]]))
res=atan(arg)
ref=Data(numpy.array([[[[-1.554346793780754, -1.5605343748051601, -1.5540561053483741, -1.5542832800750914],
[0.06707771134963493, 1.5576561137349634, -1.4768364848555173, 1.5548029547769511], [1.5606685188986997, 1.5590132818747466,
1.5446602078043219, -1.3784619450341296]], [[1.5579086919105407, 1.5589733781788726, 1.5559230843172502, -1.5349490465472442],
[-1.5527571087994305, -1.5474954351984298, 1.3892596545420326, 1.560666918317682], [1.5508157282893953, -1.5544625378689159,
-1.5391870576196998, 1.5562204436506224]]], [[[-1.53205786478674, -1.5368486716248135, -1.5484781040920352,
-1.4943803282798005], [-1.5306064810252038, 1.5524352435915465, -1.5563058104960985, -1.5444956717247109], [1.5517583661301895,
1.5574798570707853, 1.5594481000782767, 1.5374091272945045]], [[-1.5563310879232202, 1.5580625993455297, -1.5519712446798413,
1.5588706265268419], [-1.5411185754107688, 1.5215912143863453, 1.540178169450183, -1.5584602703189787], [1.5588165018096578,
1.5605782026465265, -1.547562184377139, 1.5489680683305043]]], [[[1.5570039366370194, -1.5600152686245654, 1.5327434733828007,
1.5331578889109807], [-1.5556123173747718, 1.550611193130053, -1.3348250771073642, -1.5606009845295099], [-1.5575836270993995,
1.5356608560533649, -1.559195346750025, -1.5582445417846158]], [[-1.2516463284150974, -1.5592283355033394, -1.5536816391795429,
1.5108425198654833], [1.5544107833647154, -1.5586475628061378, -1.5550454580714337, 1.5579647286965086], [1.4979930117462867,
-1.5459996168270638, -1.5223495175888366, -1.5460517655438717]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[1.541769047752825, -1.4592567339075215, 1.5586830280198429, -1.5280924056594469],
[1.5585187106853997, 1.5305548882051436, 1.4911517816513042, -1.5377322682685537], [-1.5347683314855143, 1.5517238541043337,
1.5544760048742774, -1.5412620956961551]], [[-1.539501884061409, -1.5579035610577179, -1.5592454944007521, -1.527113658719671],
[-1.5562616921013832, 1.544752894838392, 1.5528889663387913, -1.5594803490192568], [1.5378060847306976, 1.3695107286356141,
-1.5344589427920325, 1.5576471391021702]]], [[[1.5551902865231184, 1.5574439559923015, -1.5405715366363946,
-1.5399854101404904], [1.5471751118445947, 1.5590328331973433, -1.5518584072127872, 1.5545930587248358], [1.5603635268687615,
-1.5541729333474901, 1.5108835156383955, -1.5555689685427048]], [[1.5577550959903859, -1.5494339594471369, -1.5581468900546853,
1.5214543797077005], [1.5483904520035858, -1.5274123226048211, -1.5525249334605411, 1.5463383139491345], [1.5520609537393462,
-1.5607628293839917, -0.37288575485185699, 1.5373328409638529]]], [[[1.5277698519717662, -1.5463060966366591,
1.4710432191222749, -1.5566665386668717], [1.5475930473251533, -1.5589882171355174, -1.5569452745436581, 1.552900325927786],
[-1.5535944562222297, 1.5421308993211618, 1.4765659635152497, -1.5537733852560058]], [[1.5579309596200162, -1.5178814663571647,
-1.5584283990382004, -1.5535898757003221], [1.552140063159527, 1.3948501935545574, -1.5364747794967917, -1.5606350122506203],
[1.5493399440355367, -1.5551654135988773, -1.5255939766631132, 1.5487133380455611]]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sinh_taggedData_rank0(self):
arg=Data(1.99198759591,self.functionspace)
arg.setTaggedValue(1,2.30052765476)
res=sinh(arg)
ref=Data(3.59683227352,self.functionspace)
ref.setTaggedValue(1,4.93962040641)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sinh_taggedData_rank1(self):
arg=Data(numpy.array([3.7121592231366236, -4.5205237974298704]),self.functionspace)
arg.setTaggedValue(1,numpy.array([0.30788026299313653, 2.5568342424834434]))
res=sinh(arg)
ref=Data(numpy.array([20.45884479360328, -45.93641525360411]),self.functionspace)
ref.setTaggedValue(1,numpy.array([0.31276737629464152, 6.408690275759203]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sinh_taggedData_rank2(self):
arg=Data(numpy.array([[-4.8144899441545528, 0.78267245526398277, -3.9902900754326529, 3.5628490203251033,
2.1183518970045636], [-2.5070220439451694, 2.22072356217594, -2.3873817066715231, -3.8412251092257801, -4.7972608158325922],
[-3.1935509834404807, -4.3564582503990001, -2.9952608734330868, 2.2478968274432152, -3.5759483501851563], [-1.3426146737839293,
4.3069146426030791, -4.4619207942108039, -0.17652673360272075, -3.3503263854852361]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[-1.0103477084057824, -1.4588465915190465, 2.8809504601572611, 2.3288760653961047,
2.2303693881343687], [1.0183167542036395, 1.93447626412806, 3.7104269993832428, 4.3335802861769874, 4.3921380450277265],
[-1.3774340986519782, 0.92228142402738822, -2.5941241432249171, -3.3237064911008796, 4.6346105466494585], [1.0666499877056488,
-0.89076238705677468, 4.5302531678878104, 4.5060336794272828, 2.0384873019197407]]))
res=sinh(arg)
ref=Data(numpy.array([[-61.637901615656197, 0.86506368613794438, -27.026038642285407, 17.617581367615905,
4.0985943037096586], [-6.093415206821609, 4.5527324006135492, -5.3965433394599067, -23.280520435716621, -60.584890612216782],
[-12.166901315420557, -38.983816886070187, -9.9702753219255733, 4.6810906645722357, -17.850249501178922], [-1.7839401011571518,
37.098846158833695, -43.321126637656903, -0.17744497495638895, -14.238482567850086]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[-1.1912317449878338, -2.0342457843344053, 8.8875657756020434, 5.0844955627700941,
4.5979066282350889], [1.2036641503921339, 3.3879593528934069, 20.423393864250595, 38.101777842909016, 40.400321013863852],
[-1.8562452941244159, 1.0587053970421214, -6.6550738613616067, -13.863522278520332, 51.489045675912898], [1.2807348782797063,
-1.0133220385263433, 46.385634798441771, 45.275432746886217, 3.7743794349039121]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sinh_taggedData_rank3(self):
arg=Data(numpy.array([[[-1.6699752087038435, -0.2455134886023238], [-2.1687245044555992, -1.5692632008931549]],
[[4.5726976745572561, -2.7279776977250503], [-2.2847357293147286, 2.6373859093927754]], [[-0.73793065293643068,
0.38302358868877739], [-3.6068322119333249, -2.7337954547102572]], [[-0.45348176104555638, -2.6490340460279951],
[-4.3886113896165462, 1.8804425676698937]], [[-2.8375230103816538, 1.111346967580932], [4.1139208151096955,
-0.49572589111884202]], [[-2.3299326190666081, -0.62551236912490715], [1.3632704432597551,
-2.2917130664840233]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[-2.0562165096337282, 0.35936888959956814], [-3.6372329621674284,
-2.4786029292995906]], [[0.65206578693234096, 4.2616217865668187], [1.5056871818097299, -4.3212094251499877]],
[[4.4704679225004966, -4.3597404599188021], [-0.87623360805733697, -4.4308201122339383]], [[-0.9558721806683721,
0.017485009222809822], [-0.5985209090958632, -3.281153953325652]], [[2.6733102681464143, 0.58838863251234308],
[-3.4875728007909155, -2.8625086273204059]], [[-1.538276838516841, -4.4373864115838035], [-3.3984080066326641,
-2.2757928405839403]]]))
res=sinh(arg)
ref=Data(numpy.array([[[-2.5618921853515806, -0.24798739704939532], [-4.3163983334752745, -2.2974547188559029]],
[[48.39728795997064, -7.6182796518722613], [-4.8606444823509278, 6.9525358015487786]], [[-0.80675026447437204,
0.39245790554232302], [-18.410999897933262, -7.6631103274115127]], [[-0.4691851238549285, -7.0348268024411853],
[-40.258050303704749, 3.2019416421082258]], [[-8.5074259309283864, 1.3546664880209558], [30.584901942778352,
-0.51628045436067105]], [[-5.0899733579461612, -0.66710809345606392], [1.8265668432775661,
-4.8953877347643511]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[-3.8442018204801496, 0.36715416750958968], [-18.98012788845724,
-5.9203664863683336]], [[0.69926678187260138, 35.455407944451679], [2.1426925432268189, -37.633167256872468]],
[[43.693083396997459, -39.11202224549578], [-0.99274414975503578, -41.99418680140694]], [[-1.1082307083479541,
0.017485900172098688], [-0.63490086709424287, -13.284436370815889]], [[7.2094126025166956, 0.62293131907508703],
[-16.337945591940521, -8.7241305581397839]], [[-2.2209042584132921, -42.27091867492458], [-14.941504453786477,
-4.816459660325112]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sinh_taggedData_rank4(self):
arg=Data(numpy.array([[[[-3.261368733617156, -1.7097421056887061, 2.4770674176220533, 2.6184686144257991],
[4.2685570141508649, 0.26471561412312994, 1.8677744659878321, 1.2974082786792565], [-3.1644599226939727, -0.98342184660922349,
-1.5818484011953915, 1.7453818193010404]], [[-3.1409423348480905, -1.503135136465966, -4.1317701664086295,
-2.6487331291018479], [-2.9344044174187998, -3.9794616897516244, -1.0636935004548986, 2.4681705131671574], [1.7248384820876259,
-0.61623840444111444, -3.182398118986538, 1.8412600667009382]]], [[[-1.2967881315097731, -3.2351451577555101,
4.2939167700693446, 4.8638089405008476], [2.9825316536154167, 2.5020908411933496, -4.341334514919474, -4.8611901904936659],
[2.5682832345839106, 3.1020556388946119, 4.0616444500999389, 0.58287106482411755]], [[-0.92122010658250453, 3.6178485248091157,
4.1247030396785007, -1.7719429245102813], [-4.2954747345288151, -2.1268789753154813, -2.0943498166080774, -3.5016915119012504],
[4.0796282018855194, -0.70150021612442082, 3.9968340640276256, -2.8002455053291451]]], [[[2.7768169172372295,
3.5354672331727777, -4.9833519421308301, -2.7281177342220486], [-3.6388062639788132, -0.24005900416176473, -3.9369129849748861,
-2.7620876149297002], [3.7327362822865435, 1.1752019535122198, -4.968172850646587, 0.45185628083386664]],
[[0.78465593645643406, -4.8933212754874553, -3.887154034060468, -2.5322616232125039], [-1.0742963974782302, 3.3289574703031288,
1.1899668301054973, -2.9336209382110057], [-2.4653189902719017, 1.4461862760456876, 2.7436930808359143,
4.5571819794183686]]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[[2.1332145838014913, 1.2177971547397419, -1.9450668430632225, -3.8915509651360161],
[0.34984554597693851, -3.5724187380684516, -0.36417739374160707, -1.4056767503527468], [3.6886009926451795,
-2.3524969636932003, -4.5102454679585238, 4.3770785828462468]], [[2.4748540247021777, -3.1040806961176317, 4.3318809570689503,
4.7424354863417442], [4.4482502719559776, -3.5813275761400054, 1.6790719518347696, 3.3826258086123921], [0.41668209832613634,
3.5649415125773416, 1.6284709294696516, 0.46697670447318185]]], [[[2.4215260841968869, 4.4326478509045142, -4.4189346955673434,
-3.159517367887986], [0.77984444249549512, 0.52106214096843129, 2.2959783381073375, 2.8620825221995148], [-0.83928142017985685,
2.3961087684562008, -1.5912632794131518, -2.6327788717179725]], [[3.9275031010752279, 0.31923128079452301, -2.2762341712323044,
2.3868873445961158], [4.1180987252889008, 0.58883415948539231, -3.5287883781020488, 2.7441939267749778], [4.0184160311880355,
-4.6753857407642982, -0.90603684893730918, 1.3999807565920435]]], [[[3.9197722873955865, 0.8568499678692918,
3.7086105728287162, -0.21167328998860668], [-1.8846910022439625, 4.0109184386500658, -3.7641561030274531,
-0.47027664746013542], [-1.5388852245439333, 2.3534855378811139, 4.2231499372068146, -3.2804922592029619]],
[[-0.05624724006019477, -2.8226259374957232, 2.7214574181168203, -1.3648584776772541], [1.458452045981061, 4.3818228672288857,
-1.779611092507988, -4.0543356587838], [-1.3735137487486937, 4.7330680902546227, 2.3550068942781071, -3.9075745779532935]]]]))
res=sinh(arg)
ref=Data(numpy.array([[[[-13.023440270699259, -2.6733116654343072, 5.9111539019062862, 6.8208952782641425],
[35.7022516817728, 0.26781809379022642, 3.1597027400448123, 1.6932800751791159], [-11.816858393542429, -1.1497800913986027,
-2.329171645463961, 2.7767549168418548]], [[-11.541203334322933, -2.1366650567825722, -31.136015622546896,
-7.0326889417647314], [-9.3785671704044926, -26.734768755711737, -1.2759364890266949, 5.858048933103599], [2.7167063739913448,
-0.65598843191103351, -12.031501841254393, 3.072929944227814]]], [[[-1.6920608694436352, -12.685352743257289,
36.619585226109336, 64.754431865133057], [9.8435290579275119, 6.0630393584281839, -38.398477943694964, -64.585057821227878],
[6.4833728384402045, 11.099336246706669, 29.026252346926242, 0.61644023180207441]], [[-1.0571603791351021, 18.615241481692724,
30.916635699137355, -2.856134354291088], [-36.67670297233353, -4.1347179890900154, -3.998504686318193, -16.570684097163284],
[29.553285005086391, -0.7604675809797572, 27.20359770161199, -8.1939446899377977]]], [[[8.0027790235537211, 17.140948545266145,
-72.977985228256998, -7.6193557153388518], [-19.010054278242894, -0.24237135670329799, -25.620303855164558,
-7.8848508059247102], [20.884691434092481, 1.4650201726547696, -71.878506579845961, 0.46739024222723913]],
[[0.86768804011917233, -66.69419356195327, -24.375691040284792, -6.2512252509030963], [-1.2931970487315634, 13.936699944900679,
1.4913704109166104, -9.3711804841265494], [-5.841126728603502, 2.0057102769093333, 7.7399766017332157,
47.652005601622172]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[4.161752367783798, 1.541926632612777, -3.4255608402399025, -24.483195400361897],
[0.3570257223960186, -17.787257314136472, -0.37228079161732119, -1.9165423642645187], [19.981928064707756, -5.2083266003894222,
-45.466571941208194, 39.796285782641952]], [[5.8978987635397626, -11.121926851445268, 38.037063069187369, 57.352263445220188],
[42.732776058437857, -17.946679307528704, 2.5870158831439984, 14.707018154821265], [0.42884487385080838, 17.654543955312313,
2.4499235521097709, 0.48413477435416613]]], [[[5.5871242630376541, 42.071033145542899, -41.497881296562845,
-11.758388262639029], [0.86132781108094103, 0.54496286536137339, 4.9167436772901087, 8.7203896119493791],
[-0.94134111336199311, 5.4446471750107008, -2.3531397442848156, -6.9202492513096638]], [[25.380166933590768,
0.32468104508626694, -4.8186311121891992, 5.3938307354249373], [30.71301849893301, 0.62345627965547068, -17.026653430917108,
7.7438863291025068], [27.797482713465271, -53.632308557373527, -1.0351868092899348, 1.9042601111874786]]],
[[[25.184561827402003, 0.96561560781209588, 20.386285498772896, -0.21325752577179305], [-3.2162218030768943,
27.589713049238945, -21.552054166525547, -0.48780375093616124], [-2.2223864880448345, 5.21357200785015, 34.116735473841551,
-13.275624175244463]], [[-0.05627690347203064, -8.3807569501370018, 7.5683418170849297, -1.8298760541851917],
[2.0333516065142763, 39.985598858890903, -2.8794234783627348, -28.814753045485492], [-1.8479935964675063, 56.817448854328696,
5.2216543312111838, -24.87899171972731]]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_cosh_taggedData_rank0(self):
arg=Data(-3.33964228765,self.functionspace)
arg.setTaggedValue(1,4.45561542511)
res=cosh(arg)
ref=Data(14.1222419084,self.functionspace)
ref.setTaggedValue(1,43.0603707159)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_cosh_taggedData_rank1(self):
arg=Data(numpy.array([-0.13877438581352841, -0.98259382848166865]),self.functionspace)
arg.setTaggedValue(1,numpy.array([-4.3574558223327253, -1.677224337266833]))
res=cosh(arg)
ref=Data(numpy.array([1.0096446284726606, 1.5228576115054628]),self.functionspace)
ref.setTaggedValue(1,numpy.array([39.035549183605326, 2.7687878265704762]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_cosh_taggedData_rank2(self):
arg=Data(numpy.array([[3.6339325575992536, 3.6003092610162302, 1.9584137399530954, 1.0694664510664698,
-0.37917195236293022], [2.8785993252032664, -3.0900484176532297, 3.1752833068489892, -3.5111549864703031, 4.255322840622437],
[2.0391630012792916, -4.3327016874450539, -2.6982102068084535, -3.0441546421582411, 4.214997939810214], [4.805038197601009,
-3.2837195225291618, -2.531898818520947, -2.9915602053560839, 0.95310199737158996]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[0.23915076781097699, 3.7715468041843607, -4.3893406000801383, 3.9409596096136763,
-4.4591106172531809], [-0.79570459044341924, -4.9855183758363273, -2.1377195897080261, -1.2985129868699143,
2.5687675010839595], [-0.72076603643646031, -1.2171966834491519, -0.70239230129778285, -4.1634325077059406,
0.14043642198635808], [-2.1962284423656131, -0.68365662414911732, -1.1633302866935447, 4.5948451769437177,
-0.4700634110282973]]))
res=cosh(arg)
ref=Data(numpy.array([[18.943914275341193, 18.318434937398699, 3.6145783248028835, 1.6285079976630699,
1.0727510814321364], [8.9227760436425374, 11.011820915712233, 11.987690862915786, 16.758392401505439, 35.246877846320565],
[3.9071562261311463, 38.081436864226781, 7.460224902554307, 10.5199571391222, 33.854399380223668], [61.066172085048954,
13.356147253406316, 6.3284367588194961, 9.983470747464299, 1.4896430948431345]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[1.0287330987029193, 21.735116828944143, 40.299835565663813, 25.743698163829791,
43.211097722447775], [1.3336324938705348, 73.143109653419216, 4.2990004786149294, 1.9683908914628332, 6.5631803398985999],
[1.2711935565261079, 1.8368823113377528, 1.2569873598703747, 32.153690276380928, 1.0098774121609404], [4.5511305484919777,
1.2429382704244676, 1.756509148044366, 49.491457436632956, 1.1125291456069766]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_cosh_taggedData_rank3(self):
arg=Data(numpy.array([[[2.2594094214565343, -3.3290383685707656], [4.5122363492198989, 2.3616061614668551]],
[[-0.78315440920111534, -4.9445078035481647], [-2.5870583922278079, 0.54336796085071803]], [[-2.2564960296248815,
-1.0922215776887101], [1.7220877741473926, -1.2680128955602719]], [[-4.7968660202901106, 1.8820956661735808],
[-0.55071124481399369, -2.392255525132474]], [[-1.9561751623976353, 3.6639866209171164], [1.6308692498233102,
1.3156981063393305]], [[0.93858824856890788, -0.37594139126048809], [3.0401317577773099,
3.3062633851984806]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[4.5082410257474734, 2.7799840957038571], [2.9998965498752392, 0.8376503607327983]],
[[-3.0742571047141567, -2.6501688991516961], [4.8047153237939639, 3.6547840727268941]], [[1.4892709697169861,
3.1091178911469477], [0.84905211092721178, -4.0113192232287229]], [[0.57916069108996293, -3.8200016353254016],
[-4.3902497028351064, 1.1074094810753987]], [[1.0867920869099219, -4.9751281118846391], [4.6498844446615237,
-4.6057390252989627]], [[4.2083455246221426, -3.299687693396459], [-4.8051197517458437, -3.7372411259947755]]]))
res=cosh(arg)
ref=Data(numpy.array([[[4.8409217004581384, 13.973657885406432], [45.568176435498991, 5.3511222948482491]],
[[1.3226633378307002, 70.204425514379224], [6.6829296515419099, 1.1512924649961667]], [[4.8271429171721767,
1.6581796951312628], [2.8874462830250218, 1.9175870101400594]], [[60.569229009714817, 3.3597619421386007], [1.1555129136535931,
5.5147803140711771]], [[3.606811684222702, 19.521103835073294], [2.6520362629520506, 1.9978195651943991]],
[[1.4737748024949511, 1.0715021754303207], [10.477913216116429, 13.659820812115026]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[45.386523884584705, 8.090402041223733], [10.066625699236537, 1.371828409565327]],
[[10.84001412211161, 7.1135343667655935], [61.046461244057511, 19.342519863210843]], [[2.3296994698881761, 11.222957817551869],
[1.3826252214777885, 27.61888954682524]], [[1.1724542565995428, 22.813105331832602], [40.336477634532706, 1.6784609004147599]],
[[1.6510224629867831, 72.387139007404855], [52.291231814415738, 50.03344721314393]], [[33.630031580706699, 13.57053373731201],
[61.071151819778152, 21.002913453619939]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_cosh_taggedData_rank4(self):
arg=Data(numpy.array([[[[-4.2974496482443385, -4.5141752496351151, 1.279224746016018, 3.6686464638653025],
[1.5642465306660078, -3.6550169225724973, -4.783271042866363, 2.3639071234236022], [-4.8837760852087531, -0.92880494563806426,
1.1661108769933435, -2.869160403619524]], [[-3.4776906208283864, -0.40395614262695556, -1.5446529839825116,
-2.7579904120989451], [1.8349405477146661, -3.8378201393977585, 1.3433045012858305, 4.3472141505175408], [0.7181640331745669,
-0.41398944472294108, 4.6769407318346552, -4.8778258871553088]]], [[[-2.4849883440153242, 2.1345431108307942,
-0.09067629606352412, -4.3150106990500738], [4.3118061338163951, 2.4975944032177999, 4.175128675721389, -0.7758441537477232],
[-3.9713262603505806, -3.3159913471560118, -0.18233452053653032, -0.66293502941204352]], [[4.9788464217729853,
0.65529004311657868, 3.3675428303822663, -0.079349711831890879], [-1.8723533081093855, -3.0300430916460952,
0.51874134723845078, 4.5086381057403386], [2.9026067240258424, -3.6918617768589703, -4.0661575918193105,
-3.5278224263153781]]], [[[0.0015231207730606044, -3.4671950612278115, -4.7151737408852421, 1.3355453672089359],
[-3.611921752531714, 1.3179876222783404, 2.4802830245059315, 3.4904987636760065], [1.6663642593810604, 3.9457517334304164,
0.61643625583120443, 2.7089869134481699]], [[1.2885016388929635, -1.4828486104919558, 3.4244574147497708, -2.1274761147070054],
[-4.9722256226904884, 2.1224822895404829, -3.3802787629574036, 4.6747109104337454], [3.8750910639279113, 4.8191435251217936,
2.4467943595773542, -0.015571286302071385]]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[[0.43479655428703889, -4.453019927481642, -0.57836799850371357, -2.1495952675388672],
[-4.5343815245793015, 1.5310770099215967, 2.8984213237035359, 3.0780399132153011], [4.5122783435803022, 4.4843493252518059,
1.3586522171022919, -2.3081057748737344]], [[-1.1824987809769425, 2.4884096785953993, -4.5317815425489592,
0.67363953752730943], [-1.9843232485967732, -2.0085682378296066, -4.1607211508996613, -1.0505768606305352],
[-4.4766448951046929, -1.0969734828880795, 2.5602234150431045, 1.6978621896143666]]], [[[4.3644036146205014,
1.5620129800850844, 4.5439913548426514, 2.3331518079278046], [-3.8887166881366895, 0.0069936039297040509, -0.20591644090240813,
4.4383248956821379], [-0.87172659086694004, 0.82903722202787833, 4.1864529892369333, -2.8022144333005317]],
[[4.5115179717289795, -3.6483711729962587, 0.81876939622194733, 1.689971164217746], [-0.33458354116486344, 3.5336789816595413,
-3.3644747866075297, -4.6902428060034644], [4.2804200141624467, -0.56490089533574483, 3.3489598815617807,
-0.009823071629792679]]], [[[4.5748546036038853, -4.5647170547892832, -4.4595799485564624, -3.8442023386090449],
[-2.5666480472200193, -3.9269313339727696, 2.6734901478379864, 0.8905586703061914], [3.0050076872803686, 4.9517454070040685,
-4.3279279631812981, 0.42460718079153104]], [[4.3245850003831627, 4.445343421639647, 3.892492263981266, -2.5689296346773061],
[2.0304529199152448, -0.93601555140498416, 4.4870935497851416, 1.9197064347913448], [-0.7708198383595688, 3.9685457595406746,
-3.4935088288480665, 3.1260055792475629]]]]))
res=cosh(arg)
ref=Data(numpy.array([[[[36.762837994027265, 45.65659302281535, 1.9360527151951246, 19.612161951845756],
[2.494159154100938, 19.347018267235629, 59.751475973278325, 5.3632322912433219], [66.0681102505865, 1.4632538079216522,
1.7605312973779623, 8.8394814942999496]], [[16.207862700025089, 1.0827058308402404, 2.4498655073561295, 7.9157713718384866],
[3.2121923357012863, 23.222853177989286, 2.0463331206435584, 38.637931391546402], [1.2691557688218085, 1.0869245433467289,
53.72509313313838, 65.676204192935117]]], [[[6.0421534482658936, 4.2857410419310309, 1.0041139129568435, 37.413895144702927],
[37.294234611722473, 6.1177528566639348, 32.53179123057128, 1.3163701272671944], [26.536848580189528, 13.79299490344593,
1.0166690434382355, 1.2279079665662052]], [[72.656772164263117, 1.2224961982676807, 14.520819069300805, 1.0031498405789798],
[3.328672363997796, 10.37321902129923, 1.1375906033763266, 45.404545168785155], [9.1382310638755566, 20.072198262374123,
29.174768845073917, 17.039554903078329]]], [[[1.0000011599486689, 16.038965793559022, 55.818581090736359, 2.032541800778572],
[18.532080317128734, 2.001784597279733, 6.0141820470909675, 16.416394943974005], [2.7409109645024752, 25.867268111398666,
1.1960900211712384, 7.5403307581926216]], [[1.9515154764366831, 2.3162337362313838, 15.369273332817901, 4.2563965180112602],
[72.17736078389747, 4.2357887581890381, 14.706498851241967, 53.605449988802455], [24.103919322318855, 61.933518473582737,
5.8189143906114449, 1.0001212349281225]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[1.0960225692535404, 42.948782682946323, 1.171969429063197, 4.3489579207932341],
[46.588305110529134, 2.4197279528705185, 9.1002935322558294, 10.880922619136969], [45.570089621262504, 44.315276977830948,
2.07397627472012, 5.0774044710830113]], [[1.7845141072786201, 6.0625759919859421, 46.467361591383025, 1.2356061898862878],
[3.7057982822633435, 3.7934099749489825, 32.066670403577341, 1.6045180010307729], [43.975251828592121, 1.6644838293860444,
6.5079975829609102, 2.8226660571902742]]], [[[39.307615406934822, 2.4890619012612207, 47.038065761129225, 5.2036881533245731],
[24.434315230816242, 1.0000244553476394, 1.021275808528771, 42.322434694633777], [1.4046322236615734, 1.363790576586579,
32.902111174866675, 8.2708891230076151]], [[45.535460921925825, 19.219040991386912, 1.3543407276740012, 2.8019246386266583],
[1.0564971895349591, 17.139468791631344, 14.476442601019187, 54.444398838889946], [36.142312618675867, 1.1638449492852287,
14.254111851189526, 1.0000482467560752]]], [[[48.512120216524281, 48.022915117799073, 43.23137737168971, 23.371402546329659],
[6.5494471264698602, 25.385352322226726, 7.2797329080762765, 1.4234584308282261], [10.117954823599094, 70.714328957337898,
37.900142492657835, 1.0915081670348838]], [[37.773699375110674, 42.620437709879383, 24.526665923553246, 6.5642321138931639],
[3.8744058806768513, 1.4709944873492109, 44.437024098405821, 3.4828031871171841], [1.3120856335155002, 26.463217744961234,
16.465792043660247, 11.413343128307044]]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_tanh_taggedData_rank0(self):
arg=Data(-2.6745098131,self.functionspace)
arg.setTaggedValue(1,2.24999429988)
res=tanh(arg)
ref=Data(-0.990539348174,self.functionspace)
ref.setTaggedValue(1,0.978025866982)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_tanh_taggedData_rank1(self):
arg=Data(numpy.array([4.0429210388498369, -4.553079746235924]),self.functionspace)
arg.setTaggedValue(1,numpy.array([-4.0380504188786954, 4.5601616689616158]))
res=tanh(arg)
ref=Data(numpy.array([0.99938445513124963, -0.99977806439502981]),self.functionspace)
ref.setTaggedValue(1,numpy.array([-0.99937843153347994, 0.99978118535812588]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_tanh_taggedData_rank2(self):
arg=Data(numpy.array([[0.94929715636828504, -1.1252080104850992, 3.0775450372379431, 0.39392537807042416,
4.6611413956493752], [-2.2781170875126575, 0.39864122049943695, 1.4807319239075429, 0.31907060217879479, 2.7340061400084306],
[4.8281201559380023, 0.33278204427698199, -0.03019536420573754, 0.85159999608142201, -2.8485402708762573],
[-4.0655536970027297, -2.3543586836588961, -3.0951504374238881, 3.9262504055167966, 4.7776251671833698]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[-2.7905907538531682, -1.9238988637436236, 2.4967949227313904, -0.98038029629172208,
-0.53276288893683077], [3.1990612919316312, 4.2720182948609757, -3.544755273211091, -0.63684518647695842, -4.212099692504844],
[4.4910646440702422, 3.6890101388015637, -4.5732677642340178, -3.0827917856459921, -4.8189743425755394], [1.5007530732396921,
-1.3393262317293884, 3.1997937399572649, -0.11051581408317634, 0.6210324333144257]]))
res=tanh(arg)
ref=Data(numpy.array([[0.73946469369799694, -0.80937282834822533, 0.99576369263661357, 0.37473932010645611,
0.99982119683898318], [-0.97921520913008431, 0.37878573799455306, 0.90160502733343728, 0.30866631305972148,
0.99159623814157904], [0.99987195863139711, 0.32101834788909706, -0.030186190576096662, 0.69190442064171198,
-0.99331095049135554], [-0.99941168874938979, -0.98212846610382587, -0.9959099611394906, 0.99922274519998722,
0.99985835320805616]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[-0.99249206904458975, -0.95823735370164276, 0.98652879793628578, -0.75323048485240829,
-0.48749022343925108], [0.9966761738303791, 0.99961067033016027, -0.99833377855485317, -0.56274764873125716,
-0.99956111435622053], [0.99974876146582037, 0.99875110702314973, -0.99978684584692601, -0.99580782119916667,
-0.99986959514911722], [0.90528424625476578, -0.87151032140220963, 0.99668103125140428, -0.11006806410778014,
0.55184645934107424]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_tanh_taggedData_rank3(self):
arg=Data(numpy.array([[[3.9319419321492326, -4.6773949429983581], [-1.6957074074546696, -0.3229077445540991]],
[[-1.8266060427348019, -1.730201856159189], [1.5347509785248175, 1.5800451406727483]], [[-3.9746856156268384,
-3.378147662479086], [-4.700768864980577, -2.8259083525191455]], [[-4.7178549411356228, 2.3147937121451543],
[-2.5287575330841805, -4.9759550410080289]], [[-1.9889442091701359, 2.0763858673555893], [-0.89324314818143424,
4.8435662338741032]], [[1.0855719509476556, 1.3980856479384425], [-3.3411855727218343,
1.1884422150953995]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[0.18007476897031971, 2.6390160533098843], [0.66061662665767606, 3.6870086302202214]],
[[1.7676633929476484, -3.1629355019247907], [-2.2255792588360457, 2.7793800480694166]], [[3.7616232526774755,
-3.5469102256069442], [3.3179169097905596, 3.7942072824756714]], [[-3.781196309444411, -1.8379556904559688],
[-3.6304445504488481, 0.19089611625277758]], [[1.6588646948326788, -0.82644991727768691], [-1.3992808319709527,
1.1558232933098402]], [[-0.16027203890771524, -1.1043264438901468], [-4.4435817783439955, 0.75121698416917582]]]))
res=tanh(arg)
ref=Data(numpy.array([[[0.99923153918711738, -0.99982691525909118], [-0.93487029410812394, -0.31213374827028922]],
[[-0.94949297086745355, -0.9390797830900246], [0.91123410731563181, 0.91860894220900358]], [[-0.99929448101174156,
-0.99767564105310336], [-0.99983481973131216, -0.99300230526074806]], [[-0.99984036849946445, 0.98067105796147169],
[-0.98735772705782232, -0.99990473142376457]], [[-0.96323809712209596, 0.96904507128497563], [-0.71299189138499997,
0.99987585339128593]], [[0.79525625932489785, 0.88493715428778319], [-0.99749752737187658,
0.83009511356467591]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[0.17815326499048401, 0.98984688185870251], [0.5787736291230885,
0.99874610080415804]], [[0.943352763565379, -0.99642757853354436], [-0.97693891859499782, 0.99232248276775581]],
[[0.99891983292877162, -0.9983409384133336], [0.99737847288418935, 0.99898794619179809]], [[-0.99896127879588104,
-0.95059853090699908], [-0.99859601991351088, 0.1886105895864382]], [[0.93006410027832387, -0.67856517259541438],
[-0.88519609925054854, 0.81967404386980236]], [[-0.15891368471195128, -0.80204771283378307], [-0.99972373624583699,
0.63587442674619299]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_tanh_taggedData_rank4(self):
arg=Data(numpy.array([[[[-4.3904786729676273, -1.3677399701000903, 1.2492768693436567, 1.6695091701042077],
[-1.1627795517358663, 0.35660855309170714, -0.46129260277138862, -1.7611848739159894], [-3.7810577291663838,
-2.0467897156040902, 1.1706919709615651, 3.7103119132055848]], [[-1.2077170353024047, -3.0220575428815022, 0.54424950738834799,
-4.0974212221222857], [1.0870071905494978, -3.9509226719566701, 4.9383274385239844, -3.462350798012598], [1.106586239998987,
-0.73756837828936561, -1.1154183434553619, -1.010742353319559]]], [[[4.2742091858943052, -4.4776532415095307,
0.78417239720070508, -2.9515811360954225], [0.26505756362717126, 0.20435778865898513, 4.0715597053665036, -0.5982678836059776],
[2.3077118446636131, -0.87190481734862324, -3.4819139618408279, 3.2206093747921081]], [[-2.1956521025267159,
2.8685733283260415, -0.16719147893383024, 2.4042635129004015], [0.87519664955802856, -3.7014687212979061, 2.638378779605917,
-0.83596449534645867], [2.3208311754025477, 3.8824298417061609, 2.4604804345754143, 3.5948332657865727]]],
[[[-0.26749703618618348, -1.2826804689293789, -3.264731588047407, -1.7231794009474299], [1.3682684482114569,
-2.0116005895625753, -3.1642117538144801, -3.5928937676969785], [-4.1224336582508689, 3.7918402890951022, 2.1360683781019247,
-4.5145324695032318]], [[3.676700537089971, -1.3877142255602957, 2.3563702753938749, 4.6093296257172032],
[-0.079417586476702162, -1.4360928554366206, 2.4988989289519861, -3.5116673714054611], [3.720863735327919, 0.38544618147401621,
1.7572959880964749, 3.4491478912053424]]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[[-2.7769145726094999, 0.11945281953911202, -1.4067552037790296, 1.5589683509896632],
[-2.7510965901641535, -1.0370837248525575, -0.28874003212379407, -0.98700887192113651], [3.1057519393572068,
-1.4003943846167175, 0.70059038875906499, -0.36200211282703698]], [[4.6955874639656408, -0.66345419856759413,
1.7641256430203081, -1.9073904288166155], [4.4353606592905734, -0.36559209725971975, -4.9392722083184717, 4.9226038002143397],
[3.836648384845299, 2.6929475146669537, 2.4329110513061476, 2.8445249965550436]]], [[[-0.89043276203920829, 4.3113456509537027,
1.778020452151754, 0.12369555701304868], [4.7844077059707608, 1.4489307402634841, -3.6503174739201869, -1.99283222133817],
[-4.7433953583709343, 4.6096860898229437, 0.18591799454264546, 3.3160695299285372]], [[-4.624734823596115, 2.2410297338758411,
-0.83950933296629859, -2.4824381853162203], [2.0752791842153231, -3.8967447836233173, 3.083827109819147, -2.2637401638383436],
[0.55648965417526242, 4.7482343482895963, 3.3904464820695814, -2.9209716988816012]]], [[[-1.9149730632634174,
-4.2971583535961386, -0.26052672348999018, -0.71626825945409145], [-1.1898289287067843, 4.7073960353737476,
-1.1931947279614228, -1.5559762703348001], [2.1894866324076894, 1.391085552226893, 1.9956929272477284, -1.7691043597388703]],
[[-3.0697918472745589, -3.5894138807767617, 0.38379736482434179, -0.72520622611928953], [-2.2696826972861492,
-2.7804417698429775, -0.16059887652981075, 1.1741874888349084], [4.9025685475518319, 0.76347964182875927, 3.5862084201560478,
-0.34438423874314417]]]]))
res=tanh(arg)
ref=Data(numpy.array([[[[-0.99969278532403616, -0.87817610848912808, 0.84808073894312919, 0.93148676159885946],
[-0.8219436543648142, 0.3422232651605423, -0.4311371323644399, -0.94263519675608121], [-0.99896099101301683,
-0.96718843060907411, 0.8244938727134159, 0.99880316564106586]], [[-0.83599336463688867, -0.99526766658497667,
0.49619794188256428, -0.99944800485077057], [0.79578320492270638, -0.9992601537435607, 0.99989728568405345,
-0.99803553852335436], [0.80285236632034096, -0.6276737849707098, -0.80596936061049773, -0.7660688874136774]]],
[[[0.99961237222589605, -0.99974193223298102, 0.65509503516962486, -0.99455328260935616], [0.25901992634275139,
0.20155971686626986, 0.99941871123847936, -0.53581588511280043], [0.98039804226660943, -0.70234062733546598,
-0.99811084524004323, 0.99681615203384977]], [[-0.97553386909203443, 0.99357281253397534, -0.16565086856274386,
0.98381234887456548], [0.70400480322945558, -0.99878182267865834, 0.98983399879723288, -0.68366567252371069],
[0.98090083462602384, 0.99915158173096452, 0.98552133873443648, 0.99849245594704916]]], [[[-0.26129428984398761,
-0.85719744491289385, -0.99708467717848859, -0.93824473321643498], [0.87829697173019017, -0.96483807098709573,
-0.99643666929946806, -0.99848660126662192], [-0.99947493196034642, 0.99898314622584328, 0.97248007751598686,
-0.99976027967480008]], [[0.99871999858964711, -0.88266698117705134, 0.98219958347946346, 0.99980167654271335],
[-0.07925104034615385, -0.892908449932727, 0.98658498639005543, -0.99821988625575575], [0.99882814391514707,
0.36742779628281924, 0.94220023134945963, 0.99798302738464895]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[-0.99228467821875421, 0.11888788724678255, -0.88680309929422041,
0.91525316568888726], [-0.99187748567038492, -0.77673384310464333, -0.28097472652808358, -0.7560840082512007],
[0.99599559753350175, -0.88543686564725355, 0.6047423865273871, -0.3469763293343231]], [[0.99983309924459796,
-0.58065757519364325, 0.9429620059078242, -0.95686570094178047], [0.99971915694545332, -0.35013016421316723,
-0.99989747957363229, 0.99989400444715759], [0.99907026835111246, 0.99088029778882336, 0.98470685939018232,
0.99325719931769052]]], [[[-0.71160741539570893, 0.99964011430124544, 0.94448185426893583, 0.12306852263067239],
[0.99986026155085483, 0.89548124193836365, -0.99865069097308279, -0.96351765578941984], [-0.99984831724864787,
0.99980181786870692, 0.18380508703408843, 0.99736878182221311]], [[-0.99980769321539753, 0.97763278263585041,
-0.685549085155853, -0.98613910571125796], [0.96897754473209763, -0.9991755175591378, 0.99581647460748857,
-0.97861537415463118], [0.5053682585729965, 0.99984977804083497, 0.99773205331379355, -0.99421041813147026]]],
[[[-0.95750111372509072, -0.9996297583526631, -0.25478812887481112, -0.61459245431351639], [-0.83052580516350172,
0.99983699446428242, -0.83156705197266667, -0.91476618401113408], [0.97523406991146411, 0.88340949152279369,
0.9637220149745247, -0.94351117775814486]], [[-0.99569763330906691, -0.99847603966904497, 0.36600071186854327,
-0.62012380533737677], [-0.97886535761949167, -0.99233870586149786, -0.15923225196341351, 0.82560995086101829],
[0.99988967114712812, 0.64312197086910849, 0.99846624581148857, -0.33138582492983976]]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_asinh_taggedData_rank0(self):
arg=Data(46.3645811357,self.functionspace)
arg.setTaggedValue(1,98.4380067047)
res=asinh(arg)
ref=Data(4.52979928711,self.functionspace)
ref.setTaggedValue(1,5.28259995573)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_asinh_taggedData_rank1(self):
arg=Data(numpy.array([-50.957589662824198, 43.941100766909756]),self.functionspace)
arg.setTaggedValue(1,numpy.array([-31.858501623280745, 39.107585495989866]))
res=asinh(arg)
ref=Data(numpy.array([-4.6242371551287169, 4.4761267522983275]),self.functionspace)
ref.setTaggedValue(1,numpy.array([-4.1546976770753421, 4.3596270535740214]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_asinh_taggedData_rank2(self):
arg=Data(numpy.array([[-20.169170810618326, -4.4530711308543118, -5.3065110218440452, -8.4088220772265316,
-56.444316808490115], [-33.229801569473778, -44.603828873814734, 39.260385275691903, -60.813530866399979, -67.011560484373405],
[63.34900773972393, 13.17996875841969, -84.621298599133738, -27.161422270695113, 78.248898320973581], [-98.098038498193404,
95.682616010306447, -58.113208847615525, -79.134026237356125, -29.391569621781727]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[17.836298714213882, -77.588227218715232, -61.26367562584587, 19.375142389965802,
89.334409995076243], [2.9065687663115227, 51.893692489828197, 11.895367000745495, -8.1024096735480953, 71.448735058484459],
[-50.921060735037948, 40.334991542461438, -11.902046289316189, 56.33007303532878, -27.166995246623955], [-82.821608578095123,
-91.599639663887103, 86.585921151704355, 48.186701674446084, -3.9531724905915979]]))
res=asinh(arg)
ref=Data(numpy.array([[-3.6979164153723203, -2.1991164930555258, -2.3708439269598305, -2.8259456306618453,
-4.7264802495600335], [-4.1968206188493351, -4.4910925210264905, 4.3635253359028381, -4.8010270839542413, -4.898067997438317],
[4.8418687142690242, 3.2732814095310392, -5.1313680826244967, -3.9952835475913395, 5.0530827588070446], [-5.2791405298139438,
5.2542111175109474, -4.7556141841594481, -5.0643300499069621, -4.0741443379636699]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[3.5751677238153401, -5.0446044112554631, -4.8084008830850919, 3.6578034038105751,
5.1855652488628774], [1.7884793917224828, 4.642437253245169, 3.1710583006908988, -2.7890952467104695, 4.9621763496592148],
[-4.6235201866096052, 4.3905201792510207, -3.17161767446549, 4.724454508591605, -3.9954885674968175], [-5.109872625312927,
-5.2106043127238761, 5.1543177540685887, 4.5683379150373344, -2.0832921903606496]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_asinh_taggedData_rank3(self):
arg=Data(numpy.array([[[-53.045516481019646, -34.703398617100873], [58.744179081028165, 45.73939652168292]],
[[88.640179862797964, -15.929739850377061], [20.336500323486419, -26.009231077461465]], [[67.483452353018436,
-83.415215077694313], [-43.73819066557256, 0.34649147770160482]], [[94.466567030758256, 57.78821000816265],
[-93.07193931131404, -65.081452487206121]], [[-54.611456218841695, 17.51214150630156], [5.6853926345566492,
38.237862836031212]], [[1.5782708895186488, -79.609362925181571], [47.883885039412519,
99.778654373519828]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[-81.045113203624751, 0.65914527304526871], [32.93544022845623, 58.747988317145939]],
[[-12.311575835767854, -70.143366604591705], [-1.6980341384122681, -27.212534038212041]], [[55.458512265543362,
-94.003044543229095], [-62.792580806533628, -27.521709794921676]], [[41.596851570120577, 44.487697223450283],
[2.7831853943152538, -67.591557346139922]], [[47.14957401263112, -88.752613111599388], [91.038711972236257,
18.784281872602193]], [[66.890360146771712, -3.1392983005148949], [-98.753784215323947, -58.363920786326858]]]))
res=asinh(arg)
ref=Data(numpy.array([[[-4.6643863622180026, -4.2401923259759915], [4.7664116866516526, 4.5162266515773712]],
[[5.17776424960676, -3.4623187168170242], [3.7061684380230009, -3.9519680523364986]], [[4.9050844901356578,
-5.1170138356918358], [-4.4714994864581099, 0.33990819970999336]], [[5.24142117780635, 4.7500068099460622],
[-5.2265487748261839, -4.868845799503112]], [[-4.6934746764153612, 3.5568558212420287], [2.4386934192694749,
4.3371443181086304]], [[1.2374103926768709, -5.0703183356413444], [4.5620352151292289,
5.2961265670636051]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[-5.0881911926886616, 0.61887608041426068], [4.1879268588038681, 4.7664765196432715]],
[[-3.2053324091778697, -4.9437392306615724], [-1.2998232670923699, -3.997162286806859]], [[4.7088636688405723,
-5.2365026413753837], [-4.8330475059782119, -4.008452214765553]], [[4.4213161140266797, 4.4884861572133286],
[1.7475584270504236, -4.9066849811247062]], [[4.5465845923807562, -5.1790317883473698], [5.2044621654621297,
3.6268753961564348]], [[4.8962579135667461, -1.8615996019862997], [-5.2858025387415069, -4.7599184690473226]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_asinh_taggedData_rank4(self):
arg=Data(numpy.array([[[[32.142378157394234, -7.8682084560468724, -32.972614582663724, 50.856847074540553],
[72.329877464044415, 6.0619145811457571, 71.261710531993657, 70.2065904388474], [61.147646057937493, -26.137436099401938,
48.323697144662191, 29.857105568663485]], [[81.14862167131389, -28.070075464932472, 54.484029947945999, 53.274297598689998],
[51.817829777738496, 55.524654561168546, 31.893469267783274, 98.108247444728335], [25.185957882420567, 56.589702849849886,
29.257428051768414, -49.316002216427599]]], [[[91.093502909783012, 30.593790782804035, -52.906781759597266,
37.807168034506248], [91.33559788100942, 46.479645801342286, 45.285940387630603, 17.009006113589351], [98.990499666054916,
20.732810397625983, -52.586859007443024, -97.39008994479434]], [[60.855541035297279, 43.563415593268758, -10.416755000859922,
19.761378421237396], [45.545393669751689, 34.038254695973365, 61.458790464133983, -93.805588539667809], [70.373745615324566,
-69.821983987919253, -17.526059272214738, 99.463265178516878]]], [[[42.375759778528959, -71.513498720101126,
43.403494376930126, 11.702516371887256], [-68.02507709473943, -82.804863052600837, 17.935644233624799, -1.5560052642727271],
[1.3086438337024902, 19.0159623777798, -43.415467037283427, -1.6840694232704436]], [[-76.523723879344232, 36.460220047753864,
74.414529475659975, -40.585507061813097], [61.18925351487826, 60.973990669294437, -56.486512227103702, -91.992194442103738],
[-50.821095523487195, -8.7683370172323407, 99.212906160042508, -49.787947715823513]]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[[5.2104342432591579, -30.037610349220742, 89.76520642245714, 84.346276912645891],
[-55.935949781763505, 3.6554505577462351, -69.732922792584205, -85.696618441581251], [34.087801962805088, -57.540358433913227,
-66.932756076465267, -61.739307728871459]], [[-38.745454217109639, 47.2458765604907, -48.024451295756876, 98.938828051951276],
[-18.113719915986181, 30.600562603733465, 62.13859672089356, 79.646004829972981], [62.93949402434626, 85.362178604516401,
-79.088554588305286, -30.549957095115914]]], [[[-21.024971919379709, -46.9921546943443, -77.839828653838069,
30.940535083915421], [70.790958255553022, -44.895476702573319, -36.648852352895432, 12.929335827946304], [-6.9478133681051872,
-62.232340492245108, -42.755472485742985, -56.420558326951522]], [[-32.044278205615356, 79.157959500980951,
-76.393704765628769, -52.443645665174962], [16.265823630600167, -55.034754577520985, -47.645861374723552, -89.04121484500331],
[94.688526939952055, -16.61551229436607, -99.980912127854069, -47.709640655711503]]], [[[2.1087843309750127,
-46.754097185308829, -43.01720776980433, 85.276796349298849], [-4.6606838290411474, -81.34895135365592, -85.417222857880887,
-96.332056825957508], [-79.83581002747087, 21.866282224322433, 68.064610754277766, -47.003477247839534]],
[[-62.743770898030562, 72.147582177197421, 69.296613493157508, 28.171166780459345], [75.529397553659948, -35.058371858520204,
-28.47809790522318, -75.017021702145499], [-37.177757115795629, 38.676084888663922, -63.72524923587919, 1.7494417076027844]]]]))
res=asinh(arg)
ref=Data(numpy.array([[[[4.1635644265624778, -2.7599915358235867, -4.1890544070007278, 4.6222585911216543],
[4.9744322502971672, 2.5019077838324715, 4.9595555706500258, 4.9446400848058811], [4.806505402870445, -3.9568815433193461,
4.5711762859685958, 4.0898503596713338]], [[5.0894674517671321, -4.0281683959008951, 4.6911390175212615, 4.6686892479006827],
[4.6409745686395487, 4.7100554093212761, 4.1557941279942145, 5.2792445878073861], [3.9198276751840293, 4.7290522782152147,
4.0695726016881411, -4.5914985748389192]]], [[[5.2050637903432921, 4.1142112460980922, -4.6617680124018888,
4.3258207506496307], [5.2077177590877843, 4.5322773741285998, 4.5062656785445956, 3.5277531738452392], [5.2881965746419173,
3.7254457616153438, -4.6557038316413522, -5.2718979969231512]], [[4.8017175551692963, 4.4674965942390115, -3.0388587872128419,
3.6775161997905594], [4.5119771682426055, 4.2208479181377072, 4.811580237341091, -5.234400023643297], [4.9470179186672389,
-4.9391473745425918, -3.5576489640531608, 5.2929608319555426]]], [[[4.4398628679964025, -4.96308228583203, 4.4638198136319165,
3.1547715938291749], [-4.9130776200696218, -5.1096704317305806, 3.5807134591217551, -1.225433176641918], [1.0837103112235609,
3.6391165749315824, -4.4640955479804072, -1.2927150793063693]], [[-5.0307906787819405, 4.2895569942507352, 5.0028435354728717,
-4.3967099542129784], [4.8071855230902134, 4.8036618081252804, -4.7272274114351864, -5.214880451554639], [-4.6215554958906369,
-2.8675302639627946, 5.2904406860829596, -4.6010209599440053]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[2.3528942953432184, -4.0958744228371096, 5.1903756488916493, 5.1281129886870076],
[-4.7174343552973603, 2.0075716195155695, -4.937871144778371, -5.1439945873654223], [4.22230186808053, -4.7457092677460411,
-4.8968914542943383, -4.8161335672436891]], [[-4.3503271096734606, 4.5486245418147186, -4.5649658426227173,
5.2876744794258697], [-3.5905779156353059, 4.1144324497476124, 4.8225792420650633, 5.0707784647637046], [4.8353841364967165,
5.1400846170845353, -5.0637553150269641, -4.112778220940899]]], [[[-3.7394231192664891, -4.5432410388930284,
-5.0478416752898454, 4.1254753641580617], [4.9529283477765169, -4.4976082378834796, -4.2947153739876791, 3.2541381655755188],
[-2.6367133653925436, -4.8240865342451933, -4.448781110277845, -4.726059308101533]], [[-4.1605091983900415, 5.0646324196519528,
-5.0290903098032347, -4.6529772433272774], [3.4831569454011433, -4.7011945976992884, -4.5570530599693662, -5.1822780622147535],
[5.2437679039808112, -3.5043882377854798, -5.2981514782154209, -4.5583904818282788]]], [[[1.4912530895747718,
-4.5381634201485017, -4.4548824695960123, 5.1390839500525125], [-2.2436246899234331, -5.0919328992003212, -5.1407291969411935,
-5.2609752679328521], [-5.0731585525994474, 3.7786154625242512, 4.9136585515533389, -4.5434819009655412]],
[[-4.8322699827017939, 4.9719089789160158, 4.9315952755712287, 4.0317610464510798], [5.0177129533122331, -4.2503649608354834,
-4.0425905960208626, -5.0109066456916977], [-4.3090386599181487, 4.3485357163877962, -4.8477895988422537,
1.3256207429919689]]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_acosh_taggedData_rank0(self):
arg=Data(49.9810509193,self.functionspace)
arg.setTaggedValue(1,71.3408711101)
res=acosh(arg)
ref=Data(4.60469104168,self.functionspace)
ref.setTaggedValue(1,4.96056744693)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_acosh_taggedData_rank1(self):
arg=Data(numpy.array([75.872128581964489, 31.270745005346555]),self.functionspace)
arg.setTaggedValue(1,numpy.array([91.194940269901991, 60.292904573535402]))
res=acosh(arg)
ref=Data(numpy.array([5.0221531537701187, 4.1355744181179075]),self.functionspace)
ref.setTaggedValue(1,numpy.array([5.2061165345882037, 4.7922928301529595]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_acosh_taggedData_rank2(self):
arg=Data(numpy.array([[13.716727126294922, 18.582048298979366, 7.5020529608606203, 37.240476559713919,
47.923636526032062], [23.137297999502238, 93.601586495900719, 44.214564115710346, 36.167402243946711, 46.702642863490553],
[23.270622841679405, 9.2774257115223389, 59.291871515770787, 33.506154158989204, 38.271499005024928], [46.757553911983621,
6.8257457794847447, 22.981256925823288, 86.170385026518829, 23.420848755718815]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[15.948822919888016, 2.6658485927005215, 60.224282793904251, 44.876404405068655,
34.120337847111642], [62.222746267715351, 21.245738679003445, 45.817023654907636, 40.859047475717304, 58.128988691848726],
[10.190092458920921, 48.417808389183413, 42.896938034834868, 70.93329041076818, 8.3231821063895897], [26.439411367064803,
15.072763430534389, 72.467415365655967, 32.34764058755561, 40.90238765596505]]))
res=acosh(arg)
ref=Data(numpy.array([[3.3104318336497132, 3.6146183386321131, 2.7038519866369914, 4.3103631168831464,
4.5626471481294111], [3.834125802828078, 5.2321659777026861, 4.4820735137429342, 4.2811142287291988, 4.5368332971619001],
[3.839876941498396, 2.9178139584744245, 4.7755482825351914, 4.2046535493993629, 4.3376819221817646], [4.5380086345560136,
2.6084392106579743, 3.8273524505590331, 5.1494400678077143, 3.8463177083491402]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[3.4615479170038235, 1.6364755314613311, 4.7911538849173159, 4.4969351619322433,
4.2228259954588951], [4.8238032312056154, 3.7487492070394848, 4.5176837838527817, 4.4031256752655068, 4.7557376697168952],
[3.0121467628386225, 4.5729082091092721, 4.4518117430828532, 4.9548373538850878, 2.8085633470967837], [3.9676451082613573,
3.4049343192673835, 4.9762365895508731, 4.1694492579956304, 4.4041861546123844]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_acosh_taggedData_rank3(self):
arg=Data(numpy.array([[[96.020179154808503, 91.79778167929949], [95.326949143229541, 45.421316747623791]],
[[30.65219771657458, 74.770295168847696], [77.989358990586055, 11.574100860239977]], [[92.626717442077236, 3.1700861207519435],
[81.107542243865836, 58.693576539606504]], [[19.827981381026582, 98.929766771654783], [93.210281085417222,
17.872534507474096]], [[15.212656462365901, 45.839114797078196], [67.995696601337741, 21.57180672061461]],
[[88.431893439575802, 86.459272754032739], [93.400261681763538, 3.5041690372595453]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[25.302663405742905, 90.965425641453351], [31.884491697764958, 35.880019812296034]],
[[87.641297339838275, 20.377144107642813], [80.276026842238238, 90.841319587541577]], [[14.097491687234964,
99.790641727293078], [14.181879052710332, 69.957347613100836]], [[81.947655870784715, 62.082411711815226], [8.6837333697858377,
15.244370873759896]], [[61.74742196011465, 29.437516030577598], [54.649929929545088, 40.35589353447758]], [[94.022187035702345,
83.335572962817793], [87.379860935581533, 36.951175898939482]]]))
res=acosh(arg)
ref=Data(numpy.array([[[5.2576784330296311, 5.212705644723707], [5.2504322211627388, 4.5090075081620471]],
[[4.1155853549672061, 5.0075231423119817], [5.0496784696098871, 3.1400456206904903]], [[5.2216956660972746,
1.8210457157023874], [5.0888851315296018, 4.7654048945882801]], [[3.6796048228214242, 5.2875318075733615], [5.2279764321968374,
3.5756287593263258]], [[3.4141929052963755, 4.5181659425846679], [4.9125375214849907, 3.7639967265035321]],
[[5.1753479006258116, 5.1527872028336335], [5.2300126684264212, 1.9260894616398991]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[3.9236661269309159, 5.2035964629627127], [4.1550209149744246, 4.2731335198543867]],
[[5.1663669487865587, 3.7069584430952047], [5.0785794154217756, 5.202231127665808]], [[3.3378837578237039, 5.2961964832588198],
[3.3438668793761139, 4.9409818305305739]], [[5.0991906511154443, 4.8215450339501285], [2.8512667866370323,
3.4162799784592983]], [[4.8161338295407301, 4.0751284773769649], [4.694011395692308, 4.3907310833319526]],
[[5.2366496860798568, 5.11598668493777], [5.1633792680707424, 4.3025615032480333]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_acosh_taggedData_rank4(self):
arg=Data(numpy.array([[[[46.496494307641363, 74.917094330056727, 39.893774938569727, 23.744943878587605],
[1.9737426076200388, 56.13429325455833, 31.478338229941585, 76.686853948479268], [43.543067819658987, 81.289887895435285,
32.113423511300105, 5.213549323262523]], [[26.577313488763004, 82.755886663842674, 6.4828955638004224, 81.780421145322038],
[84.79256558820957, 69.233222959376874, 73.836164807553629, 87.692408248293873], [37.136000517418708, 90.288377224446137,
62.614392713419683, 88.339987656018039]]], [[[61.202863958945962, 31.566286842895735, 7.1708278242804298, 98.950695215124099],
[87.222678883207024, 86.95839324301987, 17.656917302211554, 54.991339984079993], [92.159416624775972, 31.425747720223157,
47.207404840689208, 79.815101091507159]], [[13.75432234393317, 36.005105956151937, 80.930354510392675, 17.903169928485063],
[37.209969721856766, 68.392829385096988, 68.225744945843331, 25.131306602144075], [57.726340455843392, 45.183440336464102,
96.487976002311996, 74.482543907471182]]], [[[97.032639801911586, 59.169720141290711, 65.544382023430359, 27.350556781956005],
[85.48226011720655, 8.7268878117714603, 49.450968175354753, 75.078362059466997], [47.954002472987767, 16.036826907987312,
99.975563170888265, 78.829796914932373]], [[39.21420494818117, 42.262998162260104, 73.751675519611155, 51.828252577302301],
[60.148666432515796, 37.443825584849876, 97.665835616597235, 78.975812123743339], [6.9121385596705096, 34.189572613115473,
27.703168010672275, 50.045255814521546]]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[[76.45018571903698, 24.717910838106601, 78.98873320779046, 62.765160850734503],
[61.239518935391644, 70.008902728343131, 78.300714796360708, 34.932147232071522], [37.022585726034904, 58.180738950315579,
27.287564890515544, 54.517546763840656]], [[15.093623698826033, 30.443962452205266, 89.802715985190773, 77.992879086297037],
[37.816659621995385, 64.854538050781173, 81.326022233556586, 1.9993032471653205], [38.637332121131173, 32.158640232053635,
71.342867154253426, 5.2704550021018708]]], [[[60.437096462714948, 49.090407043277665, 78.184244621340568, 60.917754368590664],
[42.949146499752899, 31.471629405983144, 36.886647249542328, 40.010907031786985], [9.581053748614563, 32.843241151168968,
75.216103702188008, 68.09522545374368]], [[82.504776175599545, 57.609847555036787, 95.669336674553534, 78.017033779006482],
[40.298523228110923, 14.938993210257649, 31.561252137958434, 28.44443377692734], [24.326622031518038, 61.769365476509179,
50.466775790330708, 40.289781067050903]]], [[[13.88323115651615, 6.714972583508235, 97.860470433016005, 75.032728358835342],
[11.04088136407165, 77.052563320907453, 97.427382444573666, 33.006120873883368], [1.7554298156787875, 51.058303561715107,
29.46416973203182, 94.334872484467382]], [[3.5895347426782043, 40.561254020265949, 67.84874109154778, 93.690445556218563],
[25.256475539837954, 56.511124744109935, 3.5800990775641948, 63.00192152079822], [42.748122023741885, 80.763225726336117,
74.43049456512324, 31.553184442256732]]]]))
res=acosh(arg)
ref=Data(numpy.array([[[[4.5324084412759778, 5.0094847279060319, 4.3792103554802608, 3.8600731017873722],
[1.3016637290797095, 4.7208147464351278, 4.1421944195445226, 5.0328349651237305], [4.4667658109698571, 5.0911309744336259,
4.1621787939300265, 2.3350809370517589]], [[3.9728510376447228, 5.109005824071593, 2.5563122244559442, 5.0971476629476182],
[5.1333202761679582, 4.9305758676636238, 4.9949499704360436, 5.1669499996633848], [4.3075527147709547, 5.196125251025741,
4.8300785753238706, 5.1743080104110604]]], [[[4.8072744174301363, 4.1449858695233406, 2.658270592315148, 5.2877433445020525],
[5.1615786939465957, 5.1585438835194486, 3.5634719544667166, 4.7002402176468125], [5.2166376113963704, 4.1405214877183933,
4.5475857430671622, 5.0728206578448036]], [[3.3131761972681315, 4.2766150387385435, 5.0866979727721846, 3.5773440805534538],
[4.3095433003567933, 4.9183617154061148, 4.9159154531733078, 3.916865458190002], [4.7487857259228701, 4.50375535715325,
5.2625387259978202, 5.0036669013457349]]], [[[5.2681680417753087, 4.7734856928164495, 4.8758164840313665, 4.0015496982806926],
[5.1414218364571465, 2.8562570863627581, 4.5940265687942565, 5.01163522143636], [4.5632807172725283, 3.4670614367497063,
5.29804795523276, 5.0603980061109874]], [[4.3620236184007943, 4.4369191414800992, 4.9938049295476628, 4.6409895153027874],
[4.7898973432892191, 4.3158106467268684, 5.2746727815426553, 5.0622487258630153], [2.6211521330965311, 4.2248539437647477,
4.014368049013294, 4.6059750587364245]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[5.0297437651741488, 3.9002658632157274, 5.0624123337133149, 4.8324838701438377],
[4.8078732275406084, 4.9417185850198733, 5.0536631335958884, 4.2463497717069103], [4.3044928897720416, 4.756627671189591,
3.9992423655604172, 4.6915856649381364]], [[3.4063203831311251, 4.1087650332057839, 5.1907313988379462, 5.0497236078916279],
[4.325722057994339, 4.8652346239002213, 5.0915754213748823, 1.3165555330279051], [4.3471986353008933, 4.1635865170827602,
4.9605954282607705, 2.3461398613499078]]], [[[4.7946818261908994, 4.5867070641055383, 5.0521744319245858, 4.8026044713453375],
[4.4530284006885319, 4.1419811640723054, 4.3008130131764508, 4.3821430718817362], [2.9502001551652342, 4.184661309810318,
5.0134683412122536, 4.9140003614485606]], [[5.1059666365457144, 4.7467653633473841, 5.2540177010556652, 5.0500332892131663],
[4.3893080251791545, 3.3959998775424971, 4.1448262804214515, 4.0407905386517378], [3.8842957690757829, 4.8164891884239784,
4.6143642220081729, 4.3890909997782792]]], [[[3.3225293134567675, 2.5918958799586354, 5.276663767488472, 5.0110271683900773],
[3.0926948705710546, 5.0375928992743964, 5.2722281468793524, 4.1896106435456426], [1.1625825203718756, 4.6260194561242551,
4.0760340229896617, 5.2399700114788725]], [[1.9511766454658923, 4.3958084665836674, 4.910373700880009, 5.2331149143207876],
[3.9218376124513488, 4.7275064044247292, 1.9484357218467823, 4.8362494165490313], [4.4483356113795223, 5.0846305854377469,
5.0029677813756557, 4.1445704990230352]]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_atanh_taggedData_rank0(self):
arg=Data(-0.320619038958,self.functionspace)
arg.setTaggedValue(1,0.869122682798)
res=atanh(arg)
ref=Data(-0.332336921208,self.functionspace)
ref.setTaggedValue(1,1.32948203584)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_atanh_taggedData_rank1(self):
arg=Data(numpy.array([-0.49724785679895611, 0.76485832136382981]),self.functionspace)
arg.setTaggedValue(1,numpy.array([-0.71695223330373481, 0.98907589120670503]))
res=atanh(arg)
ref=Data(numpy.array([-0.5456433240595332, 1.0078187373348622]),self.functionspace)
ref.setTaggedValue(1,numpy.array([-0.90134518516976136, 2.6022266354573262]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_atanh_taggedData_rank2(self):
arg=Data(numpy.array([[-0.41875373709407426, -0.031282543674564844, -0.37120980277072957, -0.33787277754808165,
0.9056835178923357], [0.10920509246927712, -0.9517935928864919, -0.38928920166887748, 0.51987390317679982,
-0.38673372014824514], [0.84666821394639546, 0.70139465198953088, 0.65524269199234908, -0.76892126906681368,
0.53641715611532659], [0.8319590120911895, 0.54197223487670665, 0.96505599773867456, 0.18013767879594189,
-0.23629819004673036]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[0.36214053363992749, -0.664498666560283, -0.18821662111337301, -0.16146935463873657,
0.6826053287306455], [0.94446504653387175, -0.33438894541106456, -0.024588916748005452, -0.85997299749442313,
0.7590303783132617], [-0.17183976558739666, -0.58358085652249014, 0.31083502908173499, 0.85373153758284226,
-0.75382778617691071], [0.02157269345526025, -0.2087677756939843, -0.3645241397483423, 0.076955395055613884,
0.49258045667332828]]))
res=atanh(arg)
ref=Data(numpy.array([[-0.44617979391481238, -0.031292754010403323, -0.38982552275887766, -0.35168921785961199,
1.5029700335665168], [0.10964234311011919, -1.8505060400721478, -0.41096200383131098, 0.57616694042294059,
-0.40795359483226379], [1.2442671095703073, 0.8700403910046729, 0.78443110215353462, -1.017683354133686, 0.59911167917750008],
[1.1944666231886989, 0.60694387161398944, 2.0147645883194851, 0.18212498120425324, -0.24084972556636608]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[0.37934733814448002, -0.80082666251032752, -0.19048764725210868, -0.1628950195664004,
0.8339763214069672], [1.7778647815902611, -0.34776162471495142, -0.024593874154403211, -1.2932409850373054,
0.99392357017656985], [-0.17356179326165472, -0.66787580856210826, 0.32146948112818524, 1.2697561085057214,
-0.98176231871677033], [0.021576040897969627, -0.21188262231678223, -0.38209346340171296, 0.077107850497316832,
0.53946179405081751]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_atanh_taggedData_rank3(self):
arg=Data(numpy.array([[[0.10445331614917319, 0.2730814888903883], [-0.60232593544217883, 0.96715501656915182]],
[[-0.17016809723013615, -0.089807528529218916], [0.23654377024927897, 0.83272135685004955]], [[0.016551420278897,
-0.38236850351537788], [-2.0657074242591555e-05, -0.40819212706994223]], [[-0.3729914622085253, 0.62722527860088206],
[0.82747007179222232, 0.25145176276119274]], [[-0.73980019966402311, 0.96693217416513644], [0.90586640577652378,
0.21899534641151908]], [[0.19566248084568705, 0.47149584732702499], [-0.48621869468657664,
-0.79464808240093432]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[-0.12685419941171083, 0.86382397828941637], [0.55687203880422764,
-0.43398285832464556]], [[-0.063925399703885222, 0.92085617372342865], [0.55098040379713153, 0.39890555903637726]],
[[0.58521949389478389, -0.47732531221219043], [-0.52649872740584502, -0.05400171295475209]], [[-0.20264962628344207,
0.89825210951105694], [0.42220448045958414, -0.56965978536278339]], [[-0.1625462217423036, -0.45516235003933736],
[-0.81533961227730445, -0.34509219866705487]], [[-0.13641943291083147, -0.020985590801264165], [0.89028940365120146,
-0.60385493016714553]]]))
res=atanh(arg)
ref=Data(numpy.array([[[0.10483570163371808, 0.2801906138917637], [-0.69678941288133878, 2.0462723010157]],
[[-0.1718397684110442, -0.090050148021531648], [0.24110984512183212, 1.1969481388179688]], [[0.01655293194521994,
-0.40283080913531905], [-2.0657074245538708e-05, -0.43343996902678245]], [[-0.39189351498418051, 0.73682864354852906],
[1.1800585483131474, 0.25696195979910064]], [[-0.95003787955311769, 2.0428347777140869], [1.5039885009533656,
0.22260060180307562]], [[0.19821835350061251, 0.51199204142601529], [-0.5310963428117158,
-1.0839195243854924]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[-0.12754129113092447, 1.3082186161867242], [0.628287702104393,
-0.46479335197476535]], [[-0.064012689955305346, 1.5946297532263998], [0.61978799882113045, 0.42234670222024745]],
[[0.67036433965710951, -0.51951462581386376], [-0.58528868912562182, -0.054054297989210139]], [[-0.20549411284441793,
1.4630953537864373], [0.45037168207904948, -0.64701904247591646]], [[-0.16400091263674657, -0.49119244692959763],
[-1.1427547236990911, -0.35986168723884399]], [[-0.13727527703665104, -0.020988672265444559], [1.4233196317293295,
-0.69919242983311702]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_atanh_taggedData_rank4(self):
arg=Data(numpy.array([[[[-0.47883527804935155, 0.31661906413420882, -0.5872020411014347, 0.43673454125507827],
[0.88136900358406756, 0.94422263261031114, 0.2891277683997282, 0.33076405922761665], [-0.21998818991754054,
0.18048183712899557, -0.25041587475362848, -0.25214869329219225]], [[-0.84430089168638789, 0.76091050674122229,
-0.020508996055177531, 0.29404167593808239], [0.68934167553083725, -0.95776231509069376, -0.88145154914219881,
0.77744088285490931], [0.78737460506857015, -0.4719906631589994, -0.76786773432549871, 0.56997395187593303]]],
[[[-0.78365368219581333, 0.94733757009719777, 0.34808028723294027, -0.036308308933188926], [0.44280850580686337,
-0.46404387448339723, -0.14428462443261891, 0.46397654196715088], [-0.72398981623771386, -0.71386141901887123,
0.11403708557516423, -0.53026185484339128]], [[0.68236920590850803, -0.24036690933114169, 0.24096702828664163,
0.98040177083805702], [0.094472694677199653, 0.6400010790928965, -0.047874513368018401, 0.52630540675289095],
[-0.22171125448934548, 0.70527518354726904, -0.47501571993423464, -0.72243237090033174]]], [[[0.0082387877639482632,
0.68679756821273141, -0.25873096009920205, 0.20529892260052329], [-0.19243878258846681, 0.05455697263932402,
0.16135364518328221, -0.24061386219544423], [-0.34484162854240008, -0.67668499260554094, 0.40134472749143524,
0.50389744806479242]], [[0.20426635215593136, 0.3870127342101326, -0.75330067354803076, 0.71517180319260576],
[-0.59636722535596642, 0.16902679259625208, 0.60854487911843269, 0.6080127077236932], [0.21358767561056036,
0.43831246429727311, 0.87978778079261621, 0.86710279990576811]]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[[-0.55014926999716374, -0.59690189470907706, -0.67176566257807679,
0.60736951817123375], [0.8190301281645469, -0.55001881887120574, -0.6964571059396939, -0.16068048124821255],
[0.23872416972055821, -0.44608459772892295, 0.59084321623347447, 0.42861986752160219]], [[0.80353157195060887,
0.012109217448764475, -0.59731241459393281, 0.93750011998591587], [0.3427576485459467, -0.72154456903205721,
-0.81933208907246291, 0.80205913114622973], [0.035634874594440369, -0.97323376998325939, 0.67274012944959649,
-0.34467983303558469]]], [[[-0.10479839282841275, -0.64423003763584374, -0.85763880950837013, 0.55412818085007887],
[0.24284448011249649, -0.67346626588488845, -0.5046529367889272, -0.36718085565034209], [0.055975396650420395,
0.47512512598591927, -0.090812732728584811, -0.94658819925651927]], [[0.28524615600658376, -0.29676570986200579,
-0.51954918876546308, 0.48518566577271982], [-0.86630407866681014, 0.5790670951452479, 0.42401975127206515,
-0.92355038400324141], [-0.61578149445748631, -0.15303976680596276, 0.18150698656005404, -0.84959247418575401]]],
[[[0.64224812282493415, 0.16779589620113322, -0.32146937769322403, -0.2795229445390589], [0.41598563468316607,
0.3894916483189903, -0.44123474668060192, 0.6000180351672102], [0.38205455307082259, -0.54806836352622523,
0.0088431283841204911, 0.88616403016408252]], [[0.064301128460620083, 0.2913061121724092, -0.17972507421612294,
-0.49258468089632418], [0.77347160470842447, -0.032497425496647359, -0.58649359459086758, 0.23987952531047596],
[-0.33256196883461886, 0.77464344490024484, -0.64400203692561186, -0.10111214352067044]]]]))
res=atanh(arg)
ref=Data(numpy.array([[[[-0.52147196294207276, 0.32788498573535435, -0.67338487643976419, 0.46818855662601327],
[1.3818685896095428, 1.7756246583188797, 0.29761420443632619, 0.34368593107252376], [-0.22364369829230918, 0.1824807046839074,
-0.25585646084994412, -0.25770606941434526]], [[-1.2359651061495662, 0.99837417850627852, -0.020511872271671897,
0.30298473846351426], [0.84670025891519385, -1.9181222970821421, -1.3822385567527231, 1.0388685705919063], [1.0644856064871606,
-0.51262849467258542, -1.0151110548081639, 0.64748426157864258]]], [[[-1.0547693615321863, 1.8051580368562086,
0.36325771777197419, -0.036324276565210908], [0.47571893391859066, -0.50245268032031365, -0.1452985614521865,
0.50236687302329996], [-0.91597939071280587, -0.89501400057946223, 0.11453530898741597, -0.59050937260928005]],
[[0.83353431857678084, -0.24516348409085417, 0.24580049937095722, 2.3078079021972875], [0.094755268528087466,
0.75817557241805289, -0.047911139382633972, 0.58502126565893153], [-0.22545511450504421, 0.87771983963299549,
-0.51652780344410243, -0.91271406398924038]]], [[[0.0082389741813193099, 0.84186867590027514, -0.26474784084557118,
0.20825842151635696], [-0.19486851141497685, 0.054611198454615352, 0.16277621481062421, -0.24542559638535763],
[-0.35957727095671943, -0.82297340064389157, 0.42525082339072229, 0.55451632638526749]], [[0.20718065376792391,
0.40828171962949339, -0.98054254816788611, 0.89769117554517319], [-0.68749017583866245, 0.17066466375457767,
0.70660718620863483, 0.70576246545920529], [0.21692751606572119, 0.47014004829152839, 1.3748277463307201,
1.3212836529188918]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[-0.61859534593457599, -0.68832037278553204, -0.81395391660559058,
0.70474268120208172], [1.1538640889775169, -0.61840829443535583, -0.86038718410040294, -0.16208513353159817],
[0.24342074184310508, -0.47980146702787174, 0.67896053116777877, 0.45820470516013301]], [[1.1085001296138581,
0.012109809371730973, -0.68895835800676963, 1.7169945930949277], [0.35721394930077227, -0.9108595789817534,
-1.1547820644444977, 1.104358453933548], [0.035649969677798599, -2.1501439822914454, 0.8157318952041559,
-0.35939365030578813]]], [[[-0.10518459752972166, -0.76537157126945188, -1.2843470399300845, 0.62431925114105657],
[0.24779464619772806, -0.81705954941654779, -0.55552944455890785, -0.38516072985708483], [0.056033968346053203,
0.51666909868942745, -0.091063618089528731, -1.7979008842415258]], [[0.29338366539773703, -0.30596920988869425,
-0.57572206515026991, 0.52974437563961629], [-1.3180736740413761, 0.66105802771914668, 0.45258279622299263,
-1.6126480031695198], [-0.71818130025412152, -0.15425163308315157, 0.18354057447044783, -1.2546860803299611]]],
[[[0.76199086156420348, 0.16939783695662922, -0.33328497538680557, -0.28716450911655816], [0.44282777426673942,
0.41120063253722738, -0.47376302178526142, 0.69317536098519239], [0.40246314878330663, -0.61561614700266343,
0.0088433589085275318, 1.4037709403877687]], [[0.064389969528247493, 0.2999928969476674, -0.18169857147454585,
-0.53946737159589209], [1.0289119803083038, -0.032508872740294195, -0.67230428425667288, 0.24464627841275868],
[-0.34570605594566106, 1.0318354841886035, -0.76498190274334987, -0.10145885170923308]]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_exp_taggedData_rank0(self):
arg=Data(-0.749952155816,self.functionspace)
arg.setTaggedValue(1,1.87435313957)
res=exp(arg)
ref=Data(0.472389153274,self.functionspace)
ref.setTaggedValue(1,6.51660242443)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_exp_taggedData_rank1(self):
arg=Data(numpy.array([3.7583213879690298, -4.0248042968760078]),self.functionspace)
arg.setTaggedValue(1,numpy.array([-4.0326117793437213, 2.0857030228564621]))
res=exp(arg)
ref=Data(numpy.array([42.876392709074644, 0.017866920423330365]),self.functionspace)
ref.setTaggedValue(1,numpy.array([0.017727967895924832, 8.050249001830192]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_exp_taggedData_rank2(self):
arg=Data(numpy.array([[3.766157828602319, -4.5823946696429996, 0.66617764913528088, 3.1886036213038711,
3.2584512608104532], [-2.7859216252555252, -1.4135218651939963, 1.3112977143029916, 4.7018302511834644, 1.7097977950670256],
[1.4508523744480133, 0.55253078019373714, -2.6877047949953683, -2.6846750320431956, 0.10904970548395898], [-1.8038527766057699,
0.13601142120047616, -3.0528315745434984, -2.9504614920251693, 4.9405838296608291]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[0.77557025230999699, -3.8554442789337271, -0.91546431588342969, -4.5959540113452011,
-3.2190686796674086], [1.4523968377221514, 3.5863184621410138, -0.095093254500025814, -1.1206737181718895,
-0.035904973775130244], [1.2975960427889675, -0.79138326811261184, 4.8737751190258649, 2.4737950526565182, 1.4934451722788324],
[2.1259309043066335, 2.3309264909898095, 4.874579950755157, 3.0897776496287364, -3.5758573980747324]]))
res=exp(arg)
ref=Data(numpy.array([[43.213710976177346, 0.010230368587152355, 1.9467817978129194, 24.254535269112523,
26.009224428474024], [0.061672224151586852, 0.24328495576601736, 3.7109863889101957, 110.14858765860721, 5.5278436073622528],
[4.2667498341569088, 1.737645055572572, 0.068036918977638014, 0.068243367300953484, 1.1152177814033668], [0.16466325379708055,
1.1456949787321744, 0.047225013744888564, 0.052315557164201748, 139.85187540287308]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[2.1718302653500525, 0.021164198413568151, 0.40033070302660334, 0.010092587743486195,
0.039992286551843337], [4.2733447640782849, 36.10092405638504, 0.90928813530390384, 0.32606004802823046, 0.96473196396405092],
[3.6604864293964274, 0.45321744024910665, 130.81382367479199, 11.867398903728049, 4.4524084401639898], [8.3806954834885605,
10.287468365848504, 130.91914916968017, 21.972191907545891, 0.027991415696668204]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_exp_taggedData_rank3(self):
arg=Data(numpy.array([[[-3.6030801562417594, -4.5294423627776723], [-1.557160197615659, -0.64085226193734268]],
[[3.1534384970888407, -1.4188623741518338], [-0.32228628176517304, 0.50951392267301809]], [[-1.3476041299826225,
-3.32599590280795], [2.0029626527776827, 0.17501479898352912]], [[0.027034969552835797, 3.5645457550243353],
[4.1592609451144007, -2.4301462095345872]], [[-1.4609157602736733, -2.6511851511926929], [2.600171679270459,
-0.70192657249718238]], [[-1.7778158632064134, -2.1404423164026731], [3.9519788369582631,
-4.5989900094571379]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[-0.13931485388617304, -0.92433138388086356], [1.2828540748799355e-05,
-4.8469702777681896]], [[-4.4080333086228478, -1.1631495098987896], [-2.2966600443214649, 0.064924432335386584]],
[[2.916822038268009, 0.87499562050241675], [2.7965207298154517, 1.7350460169549091]], [[-1.4466065967927477,
-2.5710156312892343], [-4.0840019104915717, 3.2422451755687174]], [[-4.3762443959870501, 1.2266604944000514],
[-4.8494607331215622, -4.418190947910178]], [[-0.10058151287573036, 1.8710638004256079], [-3.2551884826265995,
-2.0233995710584862]]]))
res=exp(arg)
ref=Data(numpy.array([[[0.027239690595102155, 0.010786689455490304], [0.21073366423825682, 0.5268432242253307]],
[[23.416443683529156, 0.24198915348209477], [0.72449075214705883, 1.6644819313514767]], [[0.25986211125282049,
0.035936711454248524], [7.4109797665781239, 1.1912638459759448]], [[1.0274037299843257, 35.32340428052094],
[64.024187721822358, 0.088023961699000022]], [[0.232023699324167, 0.070567530289168845], [13.466049678150117,
0.49562951719433684]], [[0.16900687858407906, 0.11760281386238031], [52.038240209413715,
0.010061993132243331]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[0.86995407691125259, 0.39679663502748053], [1.000012828623035,
0.0078521313242256845]], [[0.012179107337097306, 0.31250040622116754], [0.10059426364925389, 1.0670783847158629]],
[[18.482457486951546, 2.3988647881215508], [16.387530820420302, 5.6691886766023503]], [[0.235367631750873,
0.076457852989352454], [0.0168399386970619, 25.59111383914971]], [[0.012572487351205171, 3.4098233771610715],
[0.0078326002724350228, 0.012056022541700125]], [[0.90431139638593339, 6.4952023243694939], [0.038573549534221326,
0.13220525908750566]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_exp_taggedData_rank4(self):
arg=Data(numpy.array([[[[-2.5238222687744738, -4.7456197569138041, 2.4673965431652034, -1.244247551702399],
[3.9651636170231548, 4.0776910888432969, 0.33317305369197836, 3.6969964378853799], [3.9522407667631043, -1.0872449164824829,
1.6801184274785852, -2.8666709250314359]], [[4.2594562258901689, -4.889578615758472, -3.1308615369041681, 2.1392519037970645],
[0.17641976501634815, -1.5746448211534414, -0.84524587352898095, 2.9243780446190479], [1.2842983675630162, 2.7926593166299227,
-0.070666242007418667, -1.4855591873635543]]], [[[1.5763070240757084, -4.7399252089965671, -4.4330538755629174,
3.681196791537392], [-2.8156600221094874, -1.2778689800443024, 1.4019765504207253, 0.73514942949937634], [-1.1656020759259276,
-3.7433522348482926, 1.4569591575424701, -0.53394250890947514]], [[2.3786113981284966, -0.87615865560504158,
-0.41129136755905193, -4.7966371337088347], [-1.5388053274330717, 4.1842415750395165, -0.82823766333376536,
2.0074764920300883], [3.5655062419593779, -1.0807913303813055, 0.14992361631707851, 1.5118919445282142]]],
[[[-3.3648564449762963, -3.078810214450729, 2.1446259920400266, 3.0442472934728944], [2.4303560614080606, 4.5022657497612339,
-3.6084630275613758, -3.8599028815954508], [0.39985165592526428, -1.0962344443252938, -4.0342766535351613,
3.1574594571937133]], [[2.9382085600344032, -2.1075141636809769, -4.4790587859448125, 3.8831338833937394],
[-0.88934483455337077, 3.2530676768122309, 1.1912487104888179, 1.5913330617778207], [2.5683829199460373, -4.8954076890133447,
4.5839373708486839, 3.8595928573139471]]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[[4.2648314236496532, 2.8364333726771429, -3.3447905621878982, -2.4414228825719086],
[-4.0502262415385548, 4.1458941943236809, -2.6143141305910502, 4.2118330275753131], [1.6496044974526356, -2.7777414297140846,
-3.9663660028469661, 0.045758605819602671]], [[0.58717311654154702, -2.7605066566946723, 4.603529134451751,
-3.9524884858165077], [-4.6202718129902269, -0.85933524206832246, 4.3247077827126894, 0.33279730693200626],
[-3.2409615827119742, -3.8063217097324467, 0.39137752345011467, -3.5665802016338057]]], [[[4.8607266697872795,
-4.969367327992023, -0.98962871483763237, -1.7804206030760605], [1.3076035022144179, 3.1626720342784047, -2.1039601570953614,
-2.4314893070462995], [4.668111404954228, -4.7790518839956428, 4.6299993091851555, -1.8861309215463971]], [[-1.960385423872796,
-2.2347760247542316, 0.41911304951970063, -1.8633640886195613], [-0.73179718793669579, 2.9290999688245485, -1.9420212454055177,
-0.38085093142676385], [-4.7675775804253231, -2.0447338138388274, -0.49968774313595965, -1.1945768914876465]]],
[[[-3.29590409812511, -3.0251482714096891, -3.3217228493760187, -3.7250915734222811], [2.257355212448271, 4.8692559789410161,
2.8160941264843631, 0.47498556914010859], [-4.8594977762105271, -3.6563196957128827, -1.0925704610547982,
0.88273087888402202]], [[2.9052807354938324, -0.41588815824257974, -4.0638914834502842, -4.1298152285808962],
[2.188339245387195, 1.1100247395792611, -0.48625551670779821, 3.2114719610297282], [4.7983149598282502, -0.5953446380306362,
4.6452284950547842, 2.2952676397007332]]]]))
res=exp(arg)
ref=Data(numpy.array([[[[0.080152655505830658, 0.0086896748505372011, 11.791707653037269, 0.28815765031031876],
[52.728896028898035, 59.009065747752359, 1.3953887547525055, 40.326000631888107], [52.051872360771483, 0.33714407542267766,
5.3661914380144706, 0.056887996095747358]], [[70.771489285571263, 0.0075245925517613936, 0.043680148977066881,
8.4930816096222337], [1.1929387075019384, 0.2070810904436986, 0.4294517544102977, 18.622639996115122], [3.6121326784506551,
16.324373808965724, 0.93177282686446505, 0.22637571883571311]]], [[[4.8370596417137266, 0.0087392997820314143,
0.011878159805614893, 39.693870945779999], [0.059865193346532772, 0.27863043525547398, 4.0632232005962257, 2.0857936483198323],
[0.31173491743100945, 0.023674607118095742, 4.2928856716344859, 0.58628895778987566]], [[10.789909567861674,
0.41637929986988848, 0.66279378685620638, 0.0082574691747315451], [0.21463736969557601, 65.643696214852767,
0.43681842951266009, 7.4445073500822074], [35.357348247592512, 0.3393268996880992, 1.1617455009391346, 4.5353032245954843]]],
[[[0.034566978025192006, 0.04601397085108954, 8.5388470432411587, 20.994222765561766], [11.362927259672304, 90.221318825839518,
0.027093456777850244, 0.021070045712901889], [1.4916034107005398, 0.33412689121892897, 0.017698477615323543,
23.510789823294036]], [[18.881990043978675, 0.12153971907203348, 0.011344085344041271, 48.576208955970465],
[0.41092488836655877, 25.869577820936623, 3.2911883839679281, 4.910290287640346], [13.044713030863051, 0.007480858738198164,
97.899101410590205, 47.446030131719859]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[71.152924262038169, 17.054828714195565, 0.035267600753315483, 0.087036919996353851],
[0.01741843342050033, 63.174086562400987, 0.073217989457584703, 67.480119432792065], [5.2049208605842301, 0.062178784059445436,
0.018942143909195415, 1.0468216837923499]], [[1.7988959516791232, 0.063259709283642204, 99.836029425246196,
0.019206846291659703], [0.0098501183071684206, 0.4234434761873957, 75.543434536535472, 1.3948645404410878],
[0.039126253875001821, 0.022229796402444147, 1.4790167715431195, 0.028252305852203917]]], [[[129.11799417067959,
0.0069475421600283986, 0.37171467754869364, 0.16856723245965202], [3.697302509270763, 23.633661588632254, 0.12197244052600395,
0.087905816293231603], [106.49642376925129, 0.0084039631233603397, 102.51399329228067, 0.15165744973159559]],
[[0.14080414118396287, 0.10701609612551928, 1.520612249558166, 0.15514981373760095], [0.48104368685579674, 18.71078262832426,
0.14341378217821199, 0.68327973756414706], [0.0085009481008868799, 0.12941463377750104, 0.60672008264714627,
0.30283205790085627]]], [[[0.037034547042194353, 0.048550622067129998, 0.036090599494215864, 0.024110892408788401],
[9.5577774186815603, 130.22399145161907, 16.711450221512699, 1.6079909926256291], [0.0077543773461377754, 0.025827390686055354,
0.33535337208917088, 2.417492579588719]], [[18.270372074237216, 0.65975405441116064, 0.017182025280169905,
0.016085850753487244], [8.9203862336155364, 3.0344334641142736, 0.6149246637123601, 24.815586931080908], [121.30583999301004,
0.55137250918361824, 104.08714646666205, 9.9270925408500208]]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sqrt_taggedData_rank0(self):
arg=Data(66.1187077838,self.functionspace)
arg.setTaggedValue(1,79.3562796516)
res=sqrt(arg)
ref=Data(8.1313410815,self.functionspace)
ref.setTaggedValue(1,8.90821416736)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sqrt_taggedData_rank1(self):
arg=Data(numpy.array([26.950944903716938, 22.036031901881039]),self.functionspace)
arg.setTaggedValue(1,numpy.array([79.659180592587674, 98.693397510457103]))
res=sqrt(arg)
ref=Data(numpy.array([5.191429947877265, 4.6942552020401527]),self.functionspace)
ref.setTaggedValue(1,numpy.array([8.9251991906392583, 9.9344550686213839]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sqrt_taggedData_rank2(self):
arg=Data(numpy.array([[83.203409742087729, 20.459876166893444, 41.094599447534264, 43.257432617853716,
84.805361343651796], [80.986686844056138, 49.344401691041469, 57.262902954007956, 32.781817377127261, 20.98870933921323],
[41.864732471259813, 72.898239703170674, 97.169858294017487, 72.127581542658106, 69.84393540957619], [7.2057070639609844,
12.014479889224537, 12.730936911149628, 79.860562402939749, 72.136801812195543]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[81.711392673450959, 87.088569596996351, 67.659203023768697, 62.902795439904644,
36.572517618275612], [54.411575073623894, 80.455555320083377, 9.8079978674290373, 54.140158144982514, 36.141067802738704],
[32.839293835627899, 69.388569353812997, 63.803860822764101, 51.68690733801651, 54.337516161355843], [82.133256868736865,
57.914763460609464, 6.2550368291751957, 50.321505083945027, 50.092735399229653]]))
res=sqrt(arg)
ref=Data(numpy.array([[9.1215903077307594, 4.5232594626987126, 6.4105069571395257, 6.5770382861781878,
9.2089826443343785], [8.9992603498318751, 7.0245570458955964, 7.5672255783746767, 5.7255407934209375, 4.5813436172386401],
[6.470296165652683, 8.5380465976223547, 9.8574772783921496, 8.4927958613555585, 8.3572684179447165], [2.6843448109289136,
3.4661909770271655, 3.568043849387172, 8.9364737118697857, 8.4933386728774423]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[9.0394354178483383, 9.3321256740892835, 8.2255214438823696, 7.9311282576884761,
6.0475216095749182], [7.3764202072295131, 8.969702075324653, 3.1317723204966605, 7.3579996021325327, 6.0117441564606446],
[5.7305578991602468, 8.3299801532664528, 7.9877318947723888, 7.1893607044031747, 7.3713985214039166], [9.0627400309584552,
7.6101749953998734, 2.5010071629595938, 7.0937652261648063, 7.0776221571393352]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sqrt_taggedData_rank3(self):
arg=Data(numpy.array([[[24.58711525153694, 26.513450628905794], [25.894046225897309, 76.099882908832683]],
[[96.86666740650108, 98.675082396336464], [53.846636874764542, 14.27238078898271]], [[97.63850940329813, 90.151928905789063],
[71.648695201571115, 74.209156956430576]], [[14.632460270663838, 46.13289266526926], [49.330643833957971, 72.03527701414572]],
[[47.999222087494871, 33.838367468886382], [75.127786968398865, 4.3599320763477758]], [[46.943202068363867,
80.275429008214473], [82.397086218544985, 62.859283550169593]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[28.930635932458532, 33.388573112772498], [16.88000592657939, 68.038470757620274]],
[[54.548353448446811, 81.081327110376535], [1.6631949481682415, 75.743741576440513]], [[28.384086636568604,
37.299784516494626], [4.5145476012025636, 68.176274025525743]], [[41.635212589459222, 64.391054127502827], [30.992011628576464,
19.448152032617305]], [[3.2357282019044789, 10.803298318703028], [99.439413034365714, 70.801579823071009]],
[[38.608472990519523, 38.359870069009858], [89.941281924017275, 47.089103130495459]]]))
res=sqrt(arg)
ref=Data(numpy.array([[[4.9585396289166574, 5.1491213453273552], [5.0886192848254339, 8.7235246837979812]],
[[9.8420865372389947, 9.9335332282293418], [7.338026769831556, 3.7778804625057569]], [[9.8812200361745877, 9.494836960463779],
[8.4645552276283897, 8.614473690042276]], [[3.8252398971389803, 6.7921198947949426], [7.0235777089712599, 8.487359837673063]],
[[6.9281470890487649, 5.8170755082675676], [8.667628681963647, 2.0880450369538908]], [[6.8515109332441311, 8.9596556300013273],
[9.0772840772196268, 7.9283846747095712]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[5.3787206594559764, 5.7782846168021615], [4.1085284380881912, 8.2485435537202729]],
[[7.3856857128127791, 9.0045170392629359], [1.2896491570067581, 8.7030880482987474]], [[5.3276717838628729,
6.1073549525547168], [2.1247464792776016, 8.2568925162875733]], [[6.4525353613489962, 8.0244036617995995], [5.5670469396778453,
4.4100058993857711]], [[1.7988129980363381, 3.2868371299325174], [9.9719312590072402, 8.4143674642287287]],
[[6.2135716774267218, 6.1935345376456779], [9.4837377612425193, 6.8621500370143069]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sqrt_taggedData_rank4(self):
arg=Data(numpy.array([[[[42.370128850225534, 64.183097608646705, 84.366078659801104, 16.515641051464272],
[73.785291697946164, 87.312657572269515, 69.428277252879781, 92.656721478953287], [69.698296458864249, 23.393048019403704,
88.109689395116419, 42.032468891247021]], [[2.6375163201099578, 59.041525354643206, 20.356627995608768, 11.897333150828965],
[37.925080098983869, 59.075116440382075, 56.144969467546872, 64.519772619227496], [20.071418547844651, 38.634724148514344,
80.718254953798279, 50.41857305264454]]], [[[59.576738600481768, 23.930830924167143, 18.360753569890573, 20.231150076534181],
[98.25922486474947, 68.178149570093638, 13.776804530518866, 51.945871290407467], [35.14832429151236, 67.415812532502684,
12.757339586205628, 44.063833075360989]], [[6.7443440383699587, 84.841224148387312, 29.790278716866581, 78.659203162433926],
[62.669492083963888, 49.131163942783786, 57.031266775264292, 23.536235325724508], [66.812957309291249, 93.23023188694566,
72.378130120625073, 76.741950163200173]]], [[[54.340441189118657, 38.923007006981855, 8.4805132822780038, 81.769308743472948],
[10.431711506617603, 89.283700349417444, 11.054894136992893, 69.501130141011274], [59.878072146242665, 16.353174235971739,
33.911661927785339, 43.933788186658099]], [[95.155663331834987, 40.374769085669357, 76.504062733443291, 24.269622789956216],
[19.066641354097424, 16.216531435944937, 2.9090938643160769, 36.193423650217412], [85.035802964632353, 33.758549347144886,
22.607096658902456, 59.29059546236266]]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[[45.461195758665305, 48.232713963030115, 20.574896465244645, 77.042123378126419],
[29.408402377656021, 89.644199991043934, 37.626983926343691, 54.665500847799954], [55.279852765647405, 9.8770030737031362,
84.692626914039323, 74.569785022600129]], [[86.480614059580262, 39.44670516230191, 33.913192480299656, 91.064507717285494],
[82.514056419443719, 79.033061042943729, 21.205161548235484, 63.447931595442171], [70.771892050057431, 85.179849589202817,
2.2976273736104913, 54.100290092830974]]], [[[44.920675505591753, 79.571083257406727, 70.036069047171736, 89.378823845420271],
[36.90881183703241, 11.022173065677999, 70.62518800399917, 26.439709298221533], [94.630299128155968, 22.253654455466215,
73.180687011483016, 62.69738947991646]], [[25.809044438683465, 63.915976605926396, 7.8198717354648863, 86.721732417437238],
[75.044351249076755, 84.229123933895082, 21.901578729424983, 6.8349416985481879], [37.271119471339006, 64.583431723199155,
27.275670855550583, 49.995981801746105]]], [[[0.17259469145601952, 9.252449088483786, 21.995295729482201, 34.634327605762955],
[36.564437853430213, 36.464875685420154, 23.468662561443722, 63.709619389652808], [77.299728799249792, 35.131812814366654,
52.447111388935163, 59.193503430356337]], [[28.172021648921582, 6.9193925888337438, 33.521086135909869, 32.466778283077545],
[80.781854588010745, 60.326859582936031, 60.965229409096004, 2.4499363952717865], [91.552398032123989, 20.523937743647728,
27.819367237334685, 53.113877776584353]]]]))
res=sqrt(arg)
ref=Data(numpy.array([[[[6.5092341216325549, 8.011435427477819, 9.1851009063483406, 4.063944026615558],
[8.5898365349956549, 9.3441242271423981, 8.3323632453752143, 9.6258361444060174], [8.3485505603586212, 4.8366360230436714,
9.3866761633240774, 6.4832452437993595]], [[1.624043201429678, 7.6838483427670026, 4.5118319999318199, 3.4492510999967756],
[6.1583341983838347, 7.6860338563125046, 7.4929946928812692, 8.0324200972824809], [4.4801136757725972, 6.2156837233336084,
8.9843338625519849, 7.1006037104350881]]], [[[7.7185969321167285, 4.8919148525058302, 4.284944990299242, 4.4979050764254884],
[9.9125791227485021, 8.25700608998768, 3.7117118059621581, 7.2073484229921521], [5.9286022207188402, 8.2107132779377139,
3.5717418140461423, 6.638059435961762]], [[2.5969874929175072, 9.2109296028352805, 5.4580471523124992, 8.8690023769550272],
[7.9164065133091723, 7.009362591761378, 7.5519048441611272, 4.8514158063110306], [8.1739193358688862, 9.6555803495670656,
8.5075337272693243, 8.7602482934674963]]], [[[7.3715969225886635, 6.2388305800832464, 2.912132085307602, 9.0426383729237418],
[3.2298160174563511, 9.4490052571377827, 3.3248900939719634, 8.3367337813445417], [7.7380922808042722, 4.0439058144288849,
5.8233720409901117, 6.6282567984846592]], [[9.7547764367941809, 6.3541143431377876, 8.7466600901969027, 4.9264208904595446],
[4.3665365398788802, 4.026975469995433, 1.7056065971718322, 6.0160970446143409], [9.2214859412478827, 5.8102107833662009,
4.754692067726622, 7.7000386662901024]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[6.7424918063476582, 6.944977607093497, 4.5359559593590237, 8.777364261446964],
[5.4229514452607832, 9.4680621032523824, 6.1340837886634452, 7.3936121650922395], [7.4350422168033052, 3.1427699683087109,
9.2028597139171548, 8.6353798424041628]], [[9.2994953658561634, 6.280661204228573, 5.8235034541330579, 9.5427725382765729],
[9.0837248097596905, 8.8900540517447766, 4.6049062474968459, 7.9654209929822395], [8.4126031672757176, 9.2292930167593461,
1.5157926552172269, 7.3552899394130602]]], [[[6.7022888258856579, 8.9202625105658591, 8.368755525594695, 9.4540374362184689],
[6.0752622854517488, 3.3199658229683626, 8.4038793425417033, 5.1419557853234732], [9.7278106030162803, 4.7173779216283078,
8.5545711179160246, 7.9181683159627552]], [[5.0802602727304693, 7.9947468131221262, 2.7964033570758149, 9.3124503981195641],
[8.6628142799598766, 9.17764261310578, 4.6799122565946663, 2.6143721423217827], [6.1050077372054989, 8.0363817556907513,
5.2226114976657589, 7.0707836766334538]]], [[[0.41544517262331926, 3.0417838661686312, 4.6899142560906375, 5.8850936785885537],
[6.0468535498579934, 6.0386153781657725, 4.8444465691597554, 7.9818305788617696], [8.79202643303862, 5.9272095301555394,
7.2420377925646839, 7.6937314373687578]], [[5.3077322510580336, 2.6304738335200644, 5.7897397295482866, 5.6979626431802401],
[8.9878726397302025, 7.7670367311437394, 7.8080233996252861, 1.5652272663328437], [9.5683017318709176, 4.5303352793858123,
5.2744068137881328, 7.2879268503864907]]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_log_taggedData_rank0(self):
arg=Data(36.4809356241,self.functionspace)
arg.setTaggedValue(1,80.0302228229)
res=log(arg)
ref=Data(3.59678981247,self.functionspace)
ref.setTaggedValue(1,4.38240434862)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_log_taggedData_rank1(self):
arg=Data(numpy.array([46.59425457123556, 68.172569815688405]),self.functionspace)
arg.setTaggedValue(1,numpy.array([65.427006436370618, 86.065200529495556]))
res=log(arg)
ref=Data(numpy.array([3.8414772410677034, 4.2220422818284451]),self.functionspace)
ref.setTaggedValue(1,numpy.array([4.1809351156780332, 4.455105154698046]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_log_taggedData_rank2(self):
arg=Data(numpy.array([[39.075877450675698, 97.653545876008806, 47.988417825860637, 53.932052577242985,
60.380431949040442], [93.350133391903867, 38.347348519622287, 60.437021565597611, 2.0425923742169343, 77.266159584750397],
[47.678248212978616, 91.262336639629311, 11.671719403634887, 49.71988614105117, 77.648453231227109], [49.229327831457574,
82.102378053363054, 49.729354379527422, 35.684271737364277, 43.531040542575127]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[53.427459984069628, 13.93063057392779, 1.342075620281171, 69.536438982499547,
11.21650951883837], [1.9134484258021096, 36.756806175549656, 52.373018075302362, 61.699751656220478, 12.632621993377159],
[45.322161925956458, 91.126460370251877, 6.6306875352383692, 83.584252908685002, 78.274345299988568], [46.494546030822839,
91.712856654908848, 31.115030158406128, 82.946055505457963, 98.493589852718884]]))
res=log(arg)
ref=Data(numpy.array([[3.6655053316069397, 4.5814259687577241, 3.8709596864969273, 3.9877249686516278,
4.1006650780961058], [4.5363572989305121, 3.6466853864301547, 4.101601856997652, 0.71421977278415627, 4.3472560794647173],
[3.8644752815825361, 4.5137381793751707, 2.4571687708042789, 3.9064049766368951, 4.352191629643908], [3.8964895400400414,
4.4079669813660951, 3.9065953901273409, 3.5747100241263499, 3.7734742593119375]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[3.9783248457056453, 2.6340900540986794, 0.29421738590469837, 4.2418509170789935,
2.4173867571169083], [0.64890707271250014, 3.6043234105668351, 3.9583915364819653, 4.1222799058825039, 2.5362825152421582],
[3.8137961385956465, 4.5122482162114013, 1.8917084994605029, 4.4258551400119392, 4.3602199030764179], [3.8393350160546169,
4.5186625728689567, 3.4376909872768944, 4.4181904628289717, 4.5899914684230305]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_log_taggedData_rank3(self):
arg=Data(numpy.array([[[21.699512716247472, 1.5282092621897898], [70.128490130107167, 99.241953232244768]],
[[78.501878729278019, 4.9314024598766171], [28.820316178767477, 1.3050055568589718]], [[77.79872786306737, 49.006304087499473],
[6.0837158518704957, 61.062626053957885]], [[54.00367327313819, 52.290800103947532], [28.218888543563132, 55.69049695930471]],
[[70.540276579110611, 10.438534795340397], [41.668397105605507, 16.209971253412206]], [[47.267330103680038,
54.403296082231499], [38.961522648812213, 76.555371480099396]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[51.938496576912861, 83.925735699153535], [1.6169120969879085, 6.9041394256440052]],
[[88.150381953863842, 98.403414431124375], [61.500668372426269, 88.227022389639899]], [[97.467337646036611,
69.782208819109613], [29.536218217059314, 56.802080857103853]], [[61.217789432766921, 4.925020459063683], [6.0472249368221469,
12.935582213769482]], [[62.146044807251613, 55.471641138480706], [81.292189922751476, 63.742035945518467]],
[[4.0955522013947023, 30.661458256155598], [17.597603579662788, 9.30923552928299]]]))
res=log(arg)
ref=Data(numpy.array([[[3.0772898048218194, 0.42409663306219014], [4.2503291327306743, 4.5975608405348396]],
[[4.3631225573590209, 1.5956234222193697], [3.3610805613220047, 0.26620729889507122]], [[4.354125079676205,
3.8919489446822295], [1.8056156691430751, 4.1118999941440908]], [[3.9890520678274091, 3.9568204493632884], [3.3399915602331363,
4.0198095212339595]], [[4.2561838456835561, 2.345504227331265], [3.7297429783063549, 2.7856265623599672]],
[[3.8558193613754197, 3.996424741773315], [3.6625745603723487, 4.3380142892056002]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[3.9500602604347153, 4.4299323089778522], [0.48051821732876043, 1.9321211484850926]],
[[4.4790442418335141, 4.5890755029594903], [4.1190480425977469, 4.4799132923888827]], [[4.5795173233770798, 4.245379089300493],
[3.3856172464655256, 4.039572959864234]], [[4.1144378242858997, 1.5943284287400294], [1.7995994786846001, 2.5599818253532565]],
[[4.1294871764666654, 4.0158719196846624], [4.3980499470284302, 4.1548442498897815]], [[1.4099015559293309, 3.423006434483387],
[2.8677627325270256, 2.2310069750585604]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_log_taggedData_rank4(self):
arg=Data(numpy.array([[[[31.101305125560753, 98.170407256844626, 89.495246884407379, 99.98908850739052],
[2.4049273930600767, 36.880499975611961, 16.353617842398464, 58.781248047924151], [72.016717419675047, 88.390811768491815,
58.246083798359486, 66.583340970007129]], [[20.465803140117011, 24.0036287870054, 88.198587567358629, 48.083853917268023],
[32.183002238023384, 52.361813109969468, 91.165656746391832, 24.096203766223894], [34.219537794626028, 27.966364691635807,
48.391457442108091, 21.127135955159684]]], [[[92.173901880553899, 84.090306966759115, 68.663513749469857, 28.930103696589871],
[76.193472611006214, 23.122185422458525, 52.911657222861116, 25.431379093778077], [48.1123157662987, 63.084679424916168,
88.227262797374976, 25.223940757757774]], [[77.883594744394102, 4.766542541594764, 67.914391833582812, 44.354444036844214],
[43.449846921835778, 24.389274109879604, 52.005422364971146, 90.067558807201308], [16.219814551493748, 93.953208312531657,
89.304393662856739, 57.450106876298889]]], [[[83.17121819234076, 3.0876023794315675, 13.178062484577275, 32.720549483090331],
[28.923086723062763, 48.413131454470019, 19.329222518853427, 49.531895870836308], [61.750663719317927, 25.293283597003178,
14.112470421243229, 93.044089674063756]], [[64.150504828677711, 10.388481675167892, 60.095156109859765, 94.542246846329334],
[14.28716150453554, 22.491472839959545, 37.072742949787475, 56.544755150434312], [47.953455399965009, 6.8295119322974971,
81.092486719769227, 88.957569057433133]]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[[3.3654654280794065, 31.448597851056814, 25.912141884195076, 97.838879435572466],
[61.609361018170169, 23.411945067093868, 77.425261409500308, 49.641325811406979], [1.5957777077323345, 94.738957291578956,
26.540677293555149, 14.644250966443629]], [[94.393421059841273, 44.913143788307792, 48.37457688577048, 24.936916903319691],
[27.135697963907795, 7.1178102866629755, 82.53709971518164, 27.34969678954003], [84.534447098035358, 70.271477764969092,
91.498492500534539, 98.035238969369871]]], [[[23.350304037279159, 73.165148854851068, 20.805692503306975, 70.60969981889366],
[76.636921854602633, 99.536056139752134, 36.05432002833745, 68.437860465731404], [97.395060152216843, 83.220325374523995,
84.216569453631237, 41.096858608280385]], [[7.3882123001998607, 0.26399485183249632, 58.100940249092623, 11.539242598340968],
[83.194333821216958, 65.589029104330493, 18.19752451210422, 29.394627656416741], [53.733171964816421, 57.534725114847042,
65.718206728755177, 8.3980661515725163]]], [[[22.27274452496971, 27.206143810199485, 94.515538865049535, 5.403807179524474],
[69.179637950013159, 84.701966892623375, 79.155529928216239, 94.704415753621575], [11.066488989252838, 7.6878341916531605,
69.879561161866874, 42.170520250872158]], [[69.119995601366483, 97.630776657501471, 97.832335448609641, 60.475862682733805],
[43.772437615973402, 26.522975367447277, 67.711453522387615, 83.932152243212755], [72.510806999393893, 10.554199212907934,
45.076886402670027, 87.39127569037376]]]]))
res=log(arg)
ref=Data(numpy.array([[[[3.4372497837567777, 4.58670481818156, 4.4941855164337712, 4.6050610651085302],
[0.87751971310618737, 3.6076829554189502, 2.7944491476300444, 4.073822893292145], [4.2768982784493446, 4.4817680249710827,
4.0646768592562585, 4.1984544106717419]], [[3.0187553537439253, 3.1782050183770179, 4.4795909489089949, 3.8729464433637721],
[3.4714384323655101, 3.9581775682108784, 4.5126782553964633, 3.1820543080153221], [3.5327967614945712, 3.3310025270698476,
3.8793232990147835, 3.0505582784353522]]], [[[4.5236770306297025, 4.4318913042931216, 4.2292179627974305, 3.3648827033780897],
[4.3332757977505798, 3.1407925645297952, 3.9686236779689277, 3.2359838089418784], [3.8735381893751741, 4.1444779417295763,
4.4799160172616155, 3.227793573594179]], [[4.3552153369168716, 1.5616212080338248, 4.2182479684170557, 3.7922129074975159],
[3.771607328617411, 3.1941434499923838, 3.9513479893944825, 4.5005600421442526], [2.7862336152984626, 4.5427968744252256,
4.4920506878239275, 4.0509168646289595]]], [[[4.4209013527895706, 1.1273948607343995, 2.5785535140631213, 3.4880033050865178],
[3.3646401247575981, 3.8797710879671699, 2.9616180709345326, 3.9026168230902107], [4.1231047239413003, 3.2305388900157679,
2.6470588335475065, 4.5330734633284591]], [[4.1612319605346206, 2.340697661147535, 4.0959292411193209, 4.5490467911745931],
[2.6593613371192286, 3.1131362524867643, 3.6128820082150246, 4.0350324511420927], [3.8702308612975078, 1.9212532116292804,
4.3955903146568103, 4.4881595038860205]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[1.2135662679688048, 3.4483543987203631, 3.2547116574713217, 4.5833220382836135],
[4.1208138235747116, 3.1532463650693905, 4.3493131021467581, 3.9048236685092923], [0.46736120845767137, 4.5511252914352394,
3.2786785483054603, 2.6840478329453119]], [[4.5474713785601448, 3.8047304866159513, 3.8789744047643144, 3.2163493120536915],
[3.3008501290901413, 1.9626001341145185, 4.4132478857761619, 3.3087054425008704], [4.4371591092581104, 4.2523659947961194,
4.5163224967417603, 4.5853269953568034]]], [[[3.1506100048986219, 4.2927191990513212, 3.0352266273078725, 4.2571675262643494],
[4.3390789691634684, 4.6005199517870539, 3.5850266909019495, 4.2259261870761291], [4.578775492239302, 4.4214916133587838,
4.4333916887485154, 3.7159316856924915]], [[1.9998857977389439, -1.3318256766604444, 4.062181847015947, 2.445753626199946],
[4.4211792423976677, 4.1834084427007436, 2.9012855690133099, 3.3808119248720274], [3.9840305382637378, 4.0523886805608198,
4.1853760062592782, 2.1280014592936731]]], [[[3.1033637123365332, 3.303442823170931, 4.5487642534165529, 1.6871037383963188],
[4.2367065700465449, 4.4391388232611524, 4.3714146503200348, 4.5507606279710613], [2.4039215319374505, 2.0396391042727164,
4.2467732053644385, 3.7417214046724601]], [[4.2358440608581605, 4.5811927783170869, 4.5832551507041686, 4.1022443215077606],
[3.779004341129665, 3.278011352367117, 4.2152553461462992, 4.4300087610783878], [4.2837356128125892, 2.3565238103888038,
3.8083696185225007, 4.4703954572028213]]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sign_taggedData_rank0(self):
arg=Data(-27.6238811345,self.functionspace)
arg.setTaggedValue(1,-26.6188411821)
res=sign(arg)
ref=Data(-1.0,self.functionspace)
ref.setTaggedValue(1,-1.0)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sign_taggedData_rank1(self):
arg=Data(numpy.array([65.988765188781628, 43.633425826032123]),self.functionspace)
arg.setTaggedValue(1,numpy.array([-43.177017978677057, 18.498142369576271]))
res=sign(arg)
ref=Data(numpy.array([1.0, 1.0]),self.functionspace)
ref.setTaggedValue(1,numpy.array([-1.0, 1.0]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sign_taggedData_rank2(self):
arg=Data(numpy.array([[-78.167710802301741, -55.033204362154265, 37.991255141785302, 50.222020893267029,
95.714640431707807], [-81.178579764161256, 21.860275678254681, 91.336894263942668, 37.932598260023099, -87.965842239718057],
[63.15669717506313, 80.395599270502714, 58.958976516236106, -19.250836112072108, -48.102635913480874], [-98.409552362349558,
29.763756955023496, -70.007046431425664, 16.56379790064571, -41.607232959589481]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[74.159820400094588, -19.916385054691645, 15.40425479416308, -45.475971811633698,
-27.911268074280457], [-29.786605106833306, -82.877167864945875, -84.972063657379977, -96.028192775161585,
-91.132164399088751], [58.162335820187224, -36.266848654009443, -72.489996854551606, 7.6308625158186771, 25.612300558077663],
[93.912536630409363, 0.74947614971907228, 11.966503685953754, -88.781531775281678, -95.942997369506429]]))
res=sign(arg)
ref=Data(numpy.array([[-1.0, -1.0, 1.0, 1.0, 1.0], [-1.0, 1.0, 1.0, 1.0, -1.0], [1.0, 1.0, 1.0, -1.0, -1.0], [-1.0, 1.0,
-1.0, 1.0, -1.0]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[1.0, -1.0, 1.0, -1.0, -1.0], [-1.0, -1.0, -1.0, -1.0, -1.0], [1.0, -1.0, -1.0, 1.0,
1.0], [1.0, 1.0, 1.0, -1.0, -1.0]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sign_taggedData_rank3(self):
arg=Data(numpy.array([[[40.659064171526694, -30.296135196623325], [65.135534658139875, -3.189213180563371]],
[[-16.553351525146027, -61.720967726757102], [76.972713494862035, 99.275215333914559]], [[47.475762989245681,
-97.393738249661268], [29.171397306032645, -63.642498879346746]], [[-65.809714821242551, 25.104527515218038],
[-25.908107285024215, -16.761112108721733]], [[-91.771675890562236, -30.217560827961364], [57.01823721886862,
13.089158046532233]], [[-9.2038411577464814, -51.536713875708799], [24.738016649301201,
-43.097223742291945]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[75.518086628986168, 5.7702731728101782], [-42.11765774528169, -9.9313715009520962]],
[[-33.56415502329628, 12.290219019430509], [-71.076843320533584, -48.936347244622212]], [[-29.458297241970527,
-0.79873463559410141], [56.480807815607648, 77.468899350274796]], [[-15.657215310960197, -97.911217676078493],
[97.233823754667782, 28.179624489186494]], [[-38.154815907369802, -2.8953583985458664], [-94.411611022922287,
-89.520621976287586]], [[20.02986172489021, 45.555499658943972], [-92.298172881010984, 50.848484074958037]]]))
res=sign(arg)
ref=Data(numpy.array([[[1.0, -1.0], [1.0, -1.0]], [[-1.0, -1.0], [1.0, 1.0]], [[1.0, -1.0], [1.0, -1.0]], [[-1.0, 1.0],
[-1.0, -1.0]], [[-1.0, -1.0], [1.0, 1.0]], [[-1.0, -1.0], [1.0, -1.0]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[1.0, 1.0], [-1.0, -1.0]], [[-1.0, 1.0], [-1.0, -1.0]], [[-1.0, -1.0], [1.0, 1.0]],
[[-1.0, -1.0], [1.0, 1.0]], [[-1.0, -1.0], [-1.0, -1.0]], [[1.0, 1.0], [-1.0, 1.0]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_sign_taggedData_rank4(self):
arg=Data(numpy.array([[[[48.404617181522724, 27.642908048648465, -32.063735049783858, -25.287287320564673],
[35.529509315351362, 20.52958828294021, 50.132503108908566, 21.961444483043181], [-30.536862467175467, -73.366944239336476,
-1.9104283260391099, -40.116305423450392]], [[-3.2592728125080583, -66.978916413183839, 51.039430417752982,
34.318446063538232], [-10.778480760564307, 34.381886410487425, -96.662586157020499, 23.49291212424454], [33.936269866253809,
6.0438240456580417, 53.991390973572862, 34.843592016698238]]], [[[-52.275180577618798, 15.674362624980304, 46.042809742277655,
38.412209266363305], [66.461375379834692, 45.821627659544617, 58.528372762759147, -77.609658246727861], [-91.311967332091655,
62.061963370741864, -42.381631148565965, -19.376230129856737]], [[-82.817924353598301, -94.396836339797801,
-80.332788125711602, -53.122903800926544], [58.309151553617909, -63.690512047675661, 12.750432515234706, 88.616992933489428],
[-76.463210395801909, -88.55862414809792, -53.122216991054394, 94.306145635218115]]], [[[31.191484321029691,
33.483202066627882, -68.553556516172563, -30.761725450809905], [39.954033622863392, 31.391308803793095, 7.0924416508365056,
82.108147705338354], [28.677362945828122, 76.875499532889648, -98.899773427430574, 63.640543048776806]], [[48.003219667446018,
32.816178561644875, -47.97394425834738, 64.03620964542236], [22.449712578557794, 72.880134481879196, -66.599797223033192,
-95.855372244240456], [-5.2195963768147777, 53.688991692833952, -4.6935389526849463,
-20.020330663766899]]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[[-63.279749648503802, -91.258116713624531, -34.456755905701655, -68.15939250069377],
[-42.719644685839995, -50.608567888041001, 86.926567471745585, 85.713364201437258], [-28.138127931660478, -48.833678656434088,
92.324131209205206, 46.337966629590767]], [[42.90425842608937, 64.851867297440293, -44.286214357511192, -83.350387738656664],
[81.716073201887639, -81.864272503868293, 92.45506396473931, 57.706464829259602], [-91.437108351660243, 53.053916668830283,
71.118625640502444, -27.194756979681543]]], [[[48.738615887994285, -2.8909177216855966, -26.101711802319798,
12.384670566250364], [-59.707938829568683, 7.1712734206349751, 13.096740235902374, 13.95432941544199], [67.638350545270868,
-69.038896120399571, -52.761020748111505, -34.828120061695998]], [[-40.401312312884819, -58.575266259290814,
-5.6760646716001304, 92.205219596258189], [87.355330242760971, 40.300165196433568, -55.950410136680517, 33.57412513030539],
[-99.413320460986569, 85.272736206140081, -8.649704146529686, -72.352005495304866]]], [[[76.119465279689791,
42.566334567806138, -50.386490732119427, 71.20528114907242], [61.744996594644761, 22.082948637093295, 78.339113397478116,
-49.481789958643674], [-96.910012358949714, 21.340439990309633, 92.448839100352387, -11.980830731257086]],
[[48.862626595701954, 89.576908309497242, -24.930909752705006, -56.400828022332483], [70.708156511024811, -52.976842818709493,
96.644726353542865, 68.041659790587545], [96.085623722167952, 49.460250235353953, 48.149498918497216, 57.854757467958734]]]]))
res=sign(arg)
ref=Data(numpy.array([[[[1.0, 1.0, -1.0, -1.0], [1.0, 1.0, 1.0, 1.0], [-1.0, -1.0, -1.0, -1.0]], [[-1.0, -1.0, 1.0, 1.0],
[-1.0, 1.0, -1.0, 1.0], [1.0, 1.0, 1.0, 1.0]]], [[[-1.0, 1.0, 1.0, 1.0], [1.0, 1.0, 1.0, -1.0], [-1.0, 1.0, -1.0, -1.0]],
[[-1.0, -1.0, -1.0, -1.0], [1.0, -1.0, 1.0, 1.0], [-1.0, -1.0, -1.0, 1.0]]], [[[1.0, 1.0, -1.0, -1.0], [1.0, 1.0, 1.0, 1.0],
[1.0, 1.0, -1.0, 1.0]], [[1.0, 1.0, -1.0, 1.0], [1.0, 1.0, -1.0, -1.0], [-1.0, 1.0, -1.0, -1.0]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[-1.0, -1.0, -1.0, -1.0], [-1.0, -1.0, 1.0, 1.0], [-1.0, -1.0, 1.0, 1.0]], [[1.0,
1.0, -1.0, -1.0], [1.0, -1.0, 1.0, 1.0], [-1.0, 1.0, 1.0, -1.0]]], [[[1.0, -1.0, -1.0, 1.0], [-1.0, 1.0, 1.0, 1.0], [1.0, -1.0,
-1.0, -1.0]], [[-1.0, -1.0, -1.0, 1.0], [1.0, 1.0, -1.0, 1.0], [-1.0, 1.0, -1.0, -1.0]]], [[[1.0, 1.0, -1.0, 1.0], [1.0, 1.0,
1.0, -1.0], [-1.0, 1.0, 1.0, -1.0]], [[1.0, 1.0, -1.0, -1.0], [1.0, -1.0, 1.0, 1.0], [1.0, 1.0, 1.0, 1.0]]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_abs_taggedData_rank0(self):
arg=Data(-14.3673757927,self.functionspace)
arg.setTaggedValue(1,-91.0616949648)
res=abs(arg)
ref=Data(14.3673757927,self.functionspace)
ref.setTaggedValue(1,91.0616949648)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_abs_taggedData_rank1(self):
arg=Data(numpy.array([-81.821732775420642, -68.22226512766818]),self.functionspace)
arg.setTaggedValue(1,numpy.array([21.333617426834195, 10.209481057564346]))
res=abs(arg)
ref=Data(numpy.array([81.821732775420642, 68.22226512766818]),self.functionspace)
ref.setTaggedValue(1,numpy.array([21.333617426834195, 10.209481057564346]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_abs_taggedData_rank2(self):
arg=Data(numpy.array([[-35.703961827623615, 94.271132011685381, -77.391701661321079, -48.396751261576078,
-89.628632351273765], [49.30062196572834, -45.716685546575796, -91.97360399287524, -46.086717554689407, 94.50160817876062],
[23.260490557882292, -46.121623208221905, 64.433592032582311, 18.144341652350775, -44.21085548471779], [-61.083601852216219,
85.575046878129143, 52.75009956117529, 97.008285145570085, 56.751065315172809]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[-51.972473313741155, -55.497035445328713, 62.267323877673789, 18.670956133573171,
13.711215778429931], [-48.087336536814519, -76.786375607585896, 37.410127192208563, 87.684260266087875, -26.905076717599144],
[16.189496788130981, 87.750429072332139, -36.767204229576599, -71.524650024934132, 81.291275301664541], [7.3770859265969762,
-24.93630589052367, 61.708601944027265, 89.294133020898926, -2.7788897536858315]]))
res=abs(arg)
ref=Data(numpy.array([[35.703961827623615, 94.271132011685381, 77.391701661321079, 48.396751261576078,
89.628632351273765], [49.30062196572834, 45.716685546575796, 91.97360399287524, 46.086717554689407, 94.50160817876062],
[23.260490557882292, 46.121623208221905, 64.433592032582311, 18.144341652350775, 44.21085548471779], [61.083601852216219,
85.575046878129143, 52.75009956117529, 97.008285145570085, 56.751065315172809]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[51.972473313741155, 55.497035445328713, 62.267323877673789, 18.670956133573171,
13.711215778429931], [48.087336536814519, 76.786375607585896, 37.410127192208563, 87.684260266087875, 26.905076717599144],
[16.189496788130981, 87.750429072332139, 36.767204229576599, 71.524650024934132, 81.291275301664541], [7.3770859265969762,
24.93630589052367, 61.708601944027265, 89.294133020898926, 2.7788897536858315]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_abs_taggedData_rank3(self):
arg=Data(numpy.array([[[-73.879162728531952, 53.891922757125315], [-30.709972491596574, -49.27453562582631]],
[[99.200427899109769, -0.10455889631015225], [24.929977391825204, -25.196431617614095]], [[99.69470286180362,
49.629118870818502], [-18.286571682827372, -99.882333404908422]], [[94.596602624460871, -48.944752738316531],
[-86.357256849018469, 94.554119229106021]], [[37.481086962966259, 84.979891468391372], [64.015940250013614,
-48.600306234165757]], [[-1.3540803820464049, 43.87503589064076], [24.242456069744136,
86.552268702416399]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[37.413937980923265, -0.28411691206147793], [33.044360612769196, -66.03355173059704]],
[[37.536911848971073, 0.023208526118992268], [-26.527789666862972, -33.2402194708271]], [[55.327425103705878,
-84.395066891225468], [45.013227563401301, -95.875031468356525]], [[64.760193848108571, -73.302359966808424],
[54.095816937340203, 37.527678340113113]], [[-76.71381733348575, -39.352383403035063], [80.080299993848996,
0.010359221408108965]], [[-96.050890564474372, -42.823985894886071], [3.4476034725966258, -36.523928707662435]]]))
res=abs(arg)
ref=Data(numpy.array([[[73.879162728531952, 53.891922757125315], [30.709972491596574, 49.27453562582631]],
[[99.200427899109769, 0.10455889631015225], [24.929977391825204, 25.196431617614095]], [[99.69470286180362,
49.629118870818502], [18.286571682827372, 99.882333404908422]], [[94.596602624460871, 48.944752738316531], [86.357256849018469,
94.554119229106021]], [[37.481086962966259, 84.979891468391372], [64.015940250013614, 48.600306234165757]],
[[1.3540803820464049, 43.87503589064076], [24.242456069744136, 86.552268702416399]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[37.413937980923265, 0.28411691206147793], [33.044360612769196, 66.03355173059704]],
[[37.536911848971073, 0.023208526118992268], [26.527789666862972, 33.2402194708271]], [[55.327425103705878,
84.395066891225468], [45.013227563401301, 95.875031468356525]], [[64.760193848108571, 73.302359966808424], [54.095816937340203,
37.527678340113113]], [[76.71381733348575, 39.352383403035063], [80.080299993848996, 0.010359221408108965]],
[[96.050890564474372, 42.823985894886071], [3.4476034725966258, 36.523928707662435]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_abs_taggedData_rank4(self):
arg=Data(numpy.array([[[[19.834948759065469, -88.423654358742937, -3.7896795469446403, 90.413210798680979],
[-25.385662633962866, 52.183056091414045, -99.49780362285307, -31.235081588621895], [-99.70036777743492, 29.485863942948555,
38.1126429021798, 42.984689715140888]], [[27.006030037219915, -32.962297315637798, 73.148237069388955, 14.390644834253024],
[10.073562289878168, 1.0118517682240196, 68.902436033381321, -49.896632248801367], [75.141298395785128, 83.15384865784992,
62.286701933145707, -70.233955546976915]]], [[[28.90835831201062, -93.402007113192667, 60.022984931751211, 42.105451316412314],
[-5.8776161547639418, -30.767571979578307, 22.107942044796999, 88.562401747987877], [-11.20004264511995, -76.166717134240727,
90.327718641335366, -63.619067436488663]], [[61.760636603356744, 63.532544897685085, 12.695030988835626, -72.470637851208224],
[35.616750250889851, -47.984761590856408, 46.377995043509088, 70.069148102663178], [71.939740735848517, 60.377059082125186,
53.39384866277004, 4.4345554627479515]]], [[[40.870145540859255, 96.113732253205882, -19.523812196208908, -94.457638344656488],
[-39.941605835336325, 29.189824293279798, 27.298137473725333, 95.978114614227195], [-98.911416748736187, 81.302220082165206,
-70.484408590592508, 82.172581716251415]], [[-24.045113821484222, 58.192111786183631, 39.743958607008949, 6.9836272098514627],
[12.807988012918514, -49.209827092167366, -77.845334523657925, -85.486568474094412], [4.895784651511434, 58.888254548139173,
22.796583205570116, 67.681339974157936]]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[[-21.339241360814867, 53.771385770613193, -93.689799860345531, -59.85130407171102],
[-91.676322024879113, -86.725986612907889, 6.8756644612703468, 2.4970468120601055], [86.426318617205339, 30.622935649678567,
38.69730973485099, 52.92337459984978]], [[8.8277708157388872, 80.007316235717923, -40.336819994603054, -24.353422327790653],
[-13.747314026960296, 16.958567973724541, -57.50595682676709, -56.25177925251905], [-31.207714298736988, -22.275561102343119,
16.313479988183616, -76.010284971162221]]], [[[26.998597697380859, -15.622385252752323, -52.52173936476985,
82.819884716042083], [78.907717062539973, -79.272005467613681, -83.47340370656633, 73.867592414028053], [28.703033182348804,
-20.988872679092665, 37.290447061925107, 8.1014432535999816]], [[-45.048580940662731, 19.491468419339085, 64.568062512177647,
11.178618880088734], [34.691570775011911, -69.589063689142193, 61.364777508593363, 77.280249139969868], [-1.1830828331200678,
91.262256646734187, -5.021627081867905, 93.388437572311091]]], [[[21.298620038202813, -13.98893927515401, 49.182789882221499,
57.595487238415643], [-79.702455143171449, 70.925982455775426, 81.897869050808879, -60.930959954287275], [-57.754562218588148,
-29.858113075280372, -14.897533692783952, 6.0864257187503057]], [[-3.5671759547432771, 84.139996576651015, 39.806429474961305,
9.3646747259164727], [45.475947995072914, 10.14946725212269, -3.9530147571287699, 23.62077091218417], [-34.033830893546195,
-8.3157508831654496, -64.196930272577688, 73.499380413212378]]]]))
res=abs(arg)
ref=Data(numpy.array([[[[19.834948759065469, 88.423654358742937, 3.7896795469446403, 90.413210798680979],
[25.385662633962866, 52.183056091414045, 99.49780362285307, 31.235081588621895], [99.70036777743492, 29.485863942948555,
38.1126429021798, 42.984689715140888]], [[27.006030037219915, 32.962297315637798, 73.148237069388955, 14.390644834253024],
[10.073562289878168, 1.0118517682240196, 68.902436033381321, 49.896632248801367], [75.141298395785128, 83.15384865784992,
62.286701933145707, 70.233955546976915]]], [[[28.90835831201062, 93.402007113192667, 60.022984931751211, 42.105451316412314],
[5.8776161547639418, 30.767571979578307, 22.107942044796999, 88.562401747987877], [11.20004264511995, 76.166717134240727,
90.327718641335366, 63.619067436488663]], [[61.760636603356744, 63.532544897685085, 12.695030988835626, 72.470637851208224],
[35.616750250889851, 47.984761590856408, 46.377995043509088, 70.069148102663178], [71.939740735848517, 60.377059082125186,
53.39384866277004, 4.4345554627479515]]], [[[40.870145540859255, 96.113732253205882, 19.523812196208908, 94.457638344656488],
[39.941605835336325, 29.189824293279798, 27.298137473725333, 95.978114614227195], [98.911416748736187, 81.302220082165206,
70.484408590592508, 82.172581716251415]], [[24.045113821484222, 58.192111786183631, 39.743958607008949, 6.9836272098514627],
[12.807988012918514, 49.209827092167366, 77.845334523657925, 85.486568474094412], [4.895784651511434, 58.888254548139173,
22.796583205570116, 67.681339974157936]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[21.339241360814867, 53.771385770613193, 93.689799860345531, 59.85130407171102],
[91.676322024879113, 86.725986612907889, 6.8756644612703468, 2.4970468120601055], [86.426318617205339, 30.622935649678567,
38.69730973485099, 52.92337459984978]], [[8.8277708157388872, 80.007316235717923, 40.336819994603054, 24.353422327790653],
[13.747314026960296, 16.958567973724541, 57.50595682676709, 56.25177925251905], [31.207714298736988, 22.275561102343119,
16.313479988183616, 76.010284971162221]]], [[[26.998597697380859, 15.622385252752323, 52.52173936476985, 82.819884716042083],
[78.907717062539973, 79.272005467613681, 83.47340370656633, 73.867592414028053], [28.703033182348804, 20.988872679092665,
37.290447061925107, 8.1014432535999816]], [[45.048580940662731, 19.491468419339085, 64.568062512177647, 11.178618880088734],
[34.691570775011911, 69.589063689142193, 61.364777508593363, 77.280249139969868], [1.1830828331200678, 91.262256646734187,
5.021627081867905, 93.388437572311091]]], [[[21.298620038202813, 13.98893927515401, 49.182789882221499, 57.595487238415643],
[79.702455143171449, 70.925982455775426, 81.897869050808879, 60.930959954287275], [57.754562218588148, 29.858113075280372,
14.897533692783952, 6.0864257187503057]], [[3.5671759547432771, 84.139996576651015, 39.806429474961305, 9.3646747259164727],
[45.475947995072914, 10.14946725212269, 3.9530147571287699, 23.62077091218417], [34.033830893546195, 8.3157508831654496,
64.196930272577688, 73.499380413212378]]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_length_taggedData_rank0(self):
arg=Data(0.0304173948959,self.functionspace)
arg.setTaggedValue(1,0.218413236568)
res=length(arg)
ref=Data(0.0304173948959,self.functionspace)
ref.setTaggedValue(1,0.218413236568)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_length_taggedData_rank1(self):
arg=Data(numpy.array([0.39703364688152853, -0.33246454817593807]),self.functionspace)
arg.setTaggedValue(1,numpy.array([-0.53598331151915435, 0.50067334409291053]))
res=length(arg)
ref=Data(0.517849777976,self.functionspace)
ref.setTaggedValue(1,0.73345204868)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_length_taggedData_rank2(self):
arg=Data(numpy.array([[0.6907462872229877, -0.90522553862549726, 0.087785407451554276, 0.30466419540456768,
0.79322552033540972], [0.88191058742529571, 0.99529532885936489, 0.41490962783197238, -0.0016893790093754912,
-0.95814885065677502], [-0.060249764286741447, 0.63991926602596116, -0.086836131633126534, 0.18124915949321885,
0.68271069967418541], [0.64740861624348423, -0.57455334179273243, -0.5571704702710476, 0.2573850096331336,
-0.34168400956685985]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[0.81018021133002383, 0.99939497604482352, -0.14079723796118393, 0.40272555558488365,
0.18472338544851841], [-0.07594389046701755, 0.63538212017493612, -0.60010668894251618, -0.33610184381106811,
-0.15191875538531718], [-0.24439106568273194, 0.66671313634788354, 0.14904931462513904, -0.58653890475427217,
-0.58062369844301442], [0.098248585440467551, 0.20530555521782179, -0.51610019710067645, 0.16323562948354797,
-0.71041456409833881]]))
res=length(arg)
ref=Data(2.6546513714,self.functionspace)
ref.setTaggedValue(1,2.19865063671)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_length_taggedData_rank3(self):
arg=Data(numpy.array([[[-0.90704201804086337, 0.6329509253938721], [0.21915003710942882, -0.77286765743151387]],
[[-0.49454988231884833, -0.52787084998857448], [-0.50038642296401559, 0.25066877240869223]], [[-0.11435301241890539,
-0.43863272457515157], [-0.21789841788237019, 0.67485153176592272]], [[-0.55566679864765667, -0.57930055750016884],
[0.86011645143557036, -0.7526814967676656]], [[0.51094878077660111, 0.77929881123688749], [-0.42495639450230005,
-0.07585333420623952]], [[-0.89054330821722716, -0.35325589691741888], [-0.3768246899267691,
-0.41975230182765833]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[-0.12217784564610956, -0.42220631009895904], [-0.61429599365799681,
0.61618111022446365]], [[-0.97675816669720295, 0.82574624011080133], [0.81295724921140167, 0.25317345312076855]],
[[-0.051786152179434497, 0.7305249935930429], [-0.93380271417452732, 0.50018267655097737]], [[-0.80264399896632499,
0.79509218774376844], [-0.21791667132633941, 0.66634447245200645]], [[-0.55794532541196795, -0.048202617623965605],
[-0.05960274244353414, 0.74611871917265127]], [[0.88304823875965166, 0.42665187568627805], [-0.43824304428388317,
-0.62742457744585889]]]))
res=length(arg)
ref=Data(2.76676324475,self.functionspace)
ref.setTaggedValue(1,3.02637754858)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_length_taggedData_rank4(self):
arg=Data(numpy.array([[[[0.031685672552886901, -0.9267701908635706, -0.056148458903377607, -0.84202454095770185],
[-0.3616646666858836, -0.29520741814627049, 0.024595806152944988, -0.71158936975814591], [0.12440081378701895,
0.72960501658634347, -0.87170545922253151, 0.10882643738812559]], [[0.88800242901141169, 0.48693301422640589,
-0.13887736360495673, -0.743971681222801], [-0.74717247198853753, 0.35260473534815429, 0.43837149883392601,
-0.55027619071689737], [0.66850441314063103, 0.7728717623705943, 0.21470523696142552, -0.71541050236116877]]],
[[[-0.48211303782598347, 0.3644457740018654, 0.68984391713960602, 0.74515540572117134], [0.53899835618675929,
-0.70996632321229947, -0.51515930082178918, -0.36888505048093223], [0.78774470226335747, -0.39544353241612185,
0.32281697817612787, -0.16311128990188162]], [[-0.51374217556516255, -0.45792789001444856, 0.47007708506811818,
-0.43333371235667362], [-0.02632140668309213, 0.93007210792179462, 0.59736202366291802, 0.22152676969085516],
[0.39775547303207204, 0.53313877938239496, 0.77934427730455358, -0.21566366366398793]]], [[[0.91343257162829294,
-0.77320607588319645, -0.85087366672245945, -0.8231988743945351], [0.2844336912954244, -0.91728899258227847,
-0.46154275241222287, -0.93255280333208801], [-0.53369991345904522, 0.12949000049493731, 0.53421477536661266,
-0.63975708880504234]], [[0.058270730436794649, 0.0515918698875375, -0.24523619977036026, 0.29671975332241707],
[-0.95019879958514597, -0.94737283445325193, -0.41748226318386861, -0.048340741857560765], [0.59312485406738369,
-0.30988717510892605, 0.090027828305644153, -0.51722372921834436]]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[[-0.20169792059747449, 0.13920330493621691, 0.034614492760971061,
-0.82851290218784412], [-0.01651072019815425, -0.78097847045185942, -0.13764015571975197, -0.35461745589441884],
[-0.49540307831103148, 0.2474487427834613, 0.22695961470352821, -0.37681697269105396]], [[0.68611428610099079,
-0.29255652866384385, -0.42345462193199213, 0.82794567130265717], [-0.092325345849896712, -0.15183768034385192,
0.13712248884188671, -0.72118044002659931], [-0.050604578031925973, -0.58555183018848322, -0.92016117326965108,
0.90294256985722066]]], [[[0.37734783987332321, 0.50440698564950592, -0.98813708121482202, 0.35026860039322605],
[0.69439644470699591, -0.065391298373910445, 0.22537555580617075, -0.56010684906819108], [0.8850708627713344,
0.33442383771972017, -0.88133340777125468, 0.79499967022722062]], [[0.84658289102126205, -0.45737265507509539,
0.22891245018035788, 0.66506738603993654], [0.30854215900653492, -0.15997939628404678, 0.60133183458548922,
0.41180859119482771], [-0.82182443995887455, 0.40193978476563985, -0.47097558780935489, -0.78813126661061927]]],
[[[-0.60025729863753186, -0.47916988408835803, -0.66879674780784004, -0.34290183723542933], [0.86889784066785403,
0.32127797136956282, 0.96139056560192393, 0.19777452842099286], [-0.52352911870216756, 0.70260881377974083,
-0.83733962168226328, -0.56735885586741075]], [[-0.94301726877443093, -0.25226331153593828, 0.52038556769907629,
0.53828722724477851], [-0.70767715580900048, -0.57712655180776129, -0.14200458485618395, -0.1111721398291996],
[0.64852743898007059, 0.99188751270956743, 0.55982434354197941, 0.038358717131004916]]]]))
res=length(arg)
ref=Data(4.84097039803,self.functionspace)
ref.setTaggedValue(1,4.824055271)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_maxval_taggedData_rank0(self):
arg=Data(-0.219558082185,self.functionspace)
arg.setTaggedValue(1,0.373894454941)
res=maxval(arg)
ref=Data(-0.219558082185,self.functionspace)
ref.setTaggedValue(1,0.373894454941)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_maxval_taggedData_rank1(self):
arg=Data(numpy.array([0.64744990357184862, -0.52792070755787024]),self.functionspace)
arg.setTaggedValue(1,numpy.array([-0.10929307972444979, 0.83862721932489936]))
res=maxval(arg)
ref=Data(0.647449903572,self.functionspace)
ref.setTaggedValue(1,0.838627219325)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_maxval_taggedData_rank2(self):
arg=Data(numpy.array([[-0.27229065227200344, 0.82294949506313886, -0.17177977432508462, -0.010882074546768816,
0.21768109521645918], [-0.29157181238782481, -0.25380425885757485, 0.027706303762511597, -0.94845012536927964,
0.87176092732644639], [-0.51643332578214518, 0.71998926614777581, 0.40354991809580687, 0.70904315000536799,
0.54655648312080007], [0.32165817766188853, -0.20424131255028888, 0.42895961651274672, -0.99791274480618064,
-0.85669519376242986]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[0.97262314128809613, 0.48547675148247826, 0.042278165643087728, -0.41624875992248667,
0.030567800083523444], [-0.049791194357233781, -0.79704488987202815, -0.96082903842770118, -0.83554878345036676,
0.60236115537073709], [0.28354667286636603, -0.29929954525932323, 0.022969958455315576, -0.24737146774844909,
0.19469978983867731], [-0.35513081769146426, -0.1046032314241474, 0.49567238233255839, -0.80993625419310633,
-0.9139531605288036]]))
res=maxval(arg)
ref=Data(0.871760927326,self.functionspace)
ref.setTaggedValue(1,0.972623141288)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_maxval_taggedData_rank3(self):
arg=Data(numpy.array([[[0.61474553298852697, 0.55779486217197505], [-0.31091458562805419, 0.016419889635135521]],
[[0.21004151551334682, 0.027687106765762914], [0.6637113716450791, -0.95040841718825075]], [[-0.9300566761481408,
-0.68906964030797435], [-0.97014359375905679, -0.74418973910997255]], [[0.97835172429442774, -0.46756642182408092],
[-0.42578086461554476, 0.52069167480569556]], [[-0.38782064307268715, 0.49053364163876134], [0.068892813320603263,
-0.053107367737293076]], [[-0.48133213301475331, 0.25593099013174481], [0.44390577068431614,
-0.97257874780052989]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[0.257603663342449, -0.038214017051409144], [-0.66873613005791666,
-0.035099420794828529]], [[0.91705389480638777, -0.92302237979729274], [0.7106922910822433, -0.94404085701758933]],
[[-0.79556970254884352, -0.25132479916123152], [0.29858220297465121, 0.90707472048112803]], [[-0.29244201831636918,
-0.017346997146175047], [0.12733928111159498, -0.38855138005928658]], [[0.14291175066952921, -0.49761469275017678],
[-0.76189392983334514, 0.84493776228691786]], [[-0.22005917389939156, -0.61656374043989004], [0.99298796284139845,
-0.067815876101644967]]]))
res=maxval(arg)
ref=Data(0.978351724294,self.functionspace)
ref.setTaggedValue(1,0.992987962841)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_maxval_taggedData_rank4(self):
arg=Data(numpy.array([[[[-0.38913213142622194, -0.54148345216537774, 0.41714738540363649, 0.71966537757449256],
[-0.88902480268446227, -0.3777307304947799, -0.13183640051157219, 0.81415347389128234], [-0.74384993993115311,
-0.89127969698262977, -0.55809388984463593, 0.83808549468591687]], [[-0.0072160550394104739, -0.32635219120000691,
0.62522095163012725, -0.84470730211227218], [-0.76620143726977852, -0.49704334323428423, 0.65091921570676603,
0.37557075348586233], [-0.88570985653924961, -0.14885693428091606, -0.1460372910003831, 0.46444747179886625]]],
[[[0.30454098886894498, 0.6867161497858465, 0.72424680264691355, 0.5095615427094411], [0.072474613257559994,
0.43806936539601549, -0.59905605757280056, -0.45990321243729815], [-0.72712992491035378, -0.55689232155025548,
0.36037470124764459, -0.57195607819276018]], [[0.0051060589653528776, -0.47599982553998998, -0.39156196066990367,
-0.71880248868370389], [0.41451955450758748, 0.0028147774045290674, -0.6972003711983854, 0.78507608882318736],
[0.25418862509575768, 0.2284337652701498, 0.61856440627353049, 0.98714160660309891]]], [[[-0.47720293386376555,
-0.65125648891362786, -0.30435692372835654, 0.31977497838442503], [0.72827978446594854, -0.63983256938337552,
0.78982468457827881, 0.22954824117307959], [0.32315333011323544, 0.53527371494472065, -0.4131594330366064,
0.99215992692482535]], [[-0.74789735956161274, -0.62925352602039042, 0.71361119864052269, -0.98014330258009075],
[-0.89800389430130223, -0.37060754911664562, 0.3856639538855593, 0.034422663486305183], [-0.34490780926818876,
0.47458909120499637, 0.94818559671902958, 0.1617906804998257]]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[[0.99933153424495091, -0.39534988719092179, -0.16778504692207585,
-0.5906967701363508], [0.43387683071959104, 0.99162615949373745, 0.10547807703791179, -0.8113777964293909],
[0.24291475766513315, -0.71669244345435779, -0.76485897580613305, 0.15564799608712043]], [[-0.75156489257223669,
-0.055450247835175936, 0.3346750287693776, -0.66254424416459123], [0.35374045325725345, -0.051590559912436884,
-0.587757300739292, -0.33917336326606917], [-0.57544619252547657, 0.20907053572412782, 0.68711149771337832,
-0.056393263581338671]]], [[[0.75211852960020509, -0.10030934714915718, 0.33951992771212902, 0.60018880521446327],
[0.78716758837909295, -0.059231168586686644, -0.35866282572045227, 0.85083431016927791], [0.15298677857710419,
0.89780425582787293, -0.20576313384645473, 0.062421360873735843]], [[-0.70974271086498986, -0.45339037418498562,
0.41140062690705359, -0.37665346319424886], [-0.044537762904711675, -0.39079696673697262, 0.089532841376569916,
0.2190192547531522], [0.36139300850043266, -0.44279309647849896, -0.86452061630608856, -0.1231662099055526]]],
[[[-0.58039192544896112, 0.53706765389132238, -0.72356516474408639, 0.6503741573846944], [-0.30912719510660591,
-0.83285543652320859, -0.37306494080273778, 0.6518672264629326], [0.98787250878747979, 0.54733052031198159,
-0.15622032199949798, 0.09467999908286262]], [[0.40533336391796038, 0.73239200515802327, 0.39369121056194256,
0.081340379201521706], [-0.88455610311843214, 0.51118489146623691, -0.19795740083901325, 0.46388740676326989],
[0.54780674501660931, 0.63586854173407947, 0.92134722611145814, -0.39904465723137394]]]]))
res=maxval(arg)
ref=Data(0.992159926925,self.functionspace)
ref.setTaggedValue(1,0.999331534245)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_minval_taggedData_rank0(self):
arg=Data(0.00722788253378,self.functionspace)
arg.setTaggedValue(1,0.691024712935)
res=minval(arg)
ref=Data(0.00722788253378,self.functionspace)
ref.setTaggedValue(1,0.691024712935)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_minval_taggedData_rank1(self):
arg=Data(numpy.array([-0.47859075115756422, -0.15003593348682531]),self.functionspace)
arg.setTaggedValue(1,numpy.array([-0.067933816863879004, -0.74579305994260148]))
res=minval(arg)
ref=Data(-0.478590751158,self.functionspace)
ref.setTaggedValue(1,-0.745793059943)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_minval_taggedData_rank2(self):
arg=Data(numpy.array([[-0.36168873882657993, -0.67372921463813573, 0.95569685886688305, 0.15779096197431586,
-0.24898227425545327], [-0.27356968554638628, 0.084426955507445944, -0.87908626632112941, -0.46051995344239027,
-0.42541441304041916], [-0.14074836177854189, 0.75123070420356286, 0.86230982812739998, -0.54837108857321315,
-0.77749802778211086], [-0.022482114313683077, 0.54155540121340873, -0.96328224231771142, 0.14101127782001344,
0.44096380596153772]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[-0.4800201278086158, 0.030265479756139024, 0.18506553588051, 0.034952750086585604,
0.31613749260546875], [0.21702894874281076, 0.9905115362133845, 0.12091812867766771, -0.51948993749364369,
0.28399846164050846], [-0.12574413416415542, -0.28875489198619508, -0.98032997474740724, 0.26065946805344775,
-0.79682683032993196], [0.78279712230924381, 0.49596074793599509, 0.61578931696589767, -0.32674782393935087,
0.15592301292387312]]))
res=minval(arg)
ref=Data(-0.963282242318,self.functionspace)
ref.setTaggedValue(1,-0.980329974747)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_minval_taggedData_rank3(self):
arg=Data(numpy.array([[[-0.8465826019970264, 0.89694911727829285], [0.87594383540616905, 0.94342173268714724]],
[[0.9706521300307176, -0.53661304945739108], [0.81066921409276915, -0.52315847873793642]], [[0.72760204320037447,
0.65094570568679222], [-0.77119150241041834, 0.4512829012153714]], [[0.49454458456031469, 0.58663758011234646],
[-0.77569241585888848, -0.27133491940751875]], [[0.29690990109617243, 0.50502608076647637], [-0.7582923726315618,
0.0096946343625710085]], [[-0.4250267226063793, -0.6090497397361152], [0.098508158636596344,
-0.56684989375571737]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[0.31462028021419175, 0.59740363549297837], [0.054399091875714456,
0.089606631226896605]], [[0.9748509842440114, -0.39638194564239226], [0.20679313347688, -0.60337302735508302]],
[[-0.8191687096963598, -0.81394151814838511], [0.44868905032346196, -0.28406609436304642]], [[0.12384704256533041,
-0.95904548813036494], [0.61285482385311929, -0.17959569661829544]], [[0.19304181831790745, 0.36508908336882229],
[-0.41743150582026445, -0.29917104704693598]], [[0.16069761697480067, 0.26671853918691113], [-0.5774634268596528,
-0.31004354846465287]]]))
res=minval(arg)
ref=Data(-0.846582601997,self.functionspace)
ref.setTaggedValue(1,-0.95904548813)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_minval_taggedData_rank4(self):
arg=Data(numpy.array([[[[-0.34624369284201362, 0.73107822148429458, -0.36551693557416232, -0.020215028669572366],
[0.1351050067633206, 0.23249118524493095, -0.76011821576308392, 0.45071155860356837], [0.18426006390789262,
-0.77886194275054765, -0.17063189696766501, 0.45075347499478347]], [[0.23551925256442985, -0.53414385378966478,
-0.47679787077175595, -0.9726535726299459], [-0.91914615413530365, -0.17765391232754735, 0.45082787140479064,
-0.968362694999094], [0.69666777807602775, 0.2130684865225696, 0.64760593585671877, 0.64903684670519413]]],
[[[0.18180576019742634, 0.62890796445359309, -0.13877607363038269, -0.10822311814395635], [0.28895838281375896,
-0.36598515521702191, 0.30413929033460807, -0.81140381230705128], [-0.76365479315177298, 0.71136700952304466,
-0.95596671935962552, 0.52118084564552913]], [[-0.43905020629611879, 0.57723600864036473, -0.22582169869491397,
-0.43742926957893391], [-0.46764952226860124, -0.066494182243584721, 0.92972113541559098, 0.044829809294563816],
[-0.49878556156045928, -0.96153559198737559, -0.99767482086608483, 0.74525441641626755]]], [[[-0.59605054963850534,
0.56186148085748022, 0.77287286011247414, 0.035023085983731717], [-0.97342431925030803, 0.17825829308663432,
-0.37794591543941247, 0.089384029569202106], [-0.75706695903965793, -0.31057995469060207, -0.57391135215614786,
-0.56504897076385308]], [[0.42656492210469588, 0.92732907019274857, 0.71470561916432929, 0.96500484536009212],
[0.18751272694170362, -0.95123662745307258, -0.8190703610202914, -0.66133004541039009], [-0.043758306539602554,
0.45325798844504162, -0.26304376860247247, 0.15468324307157122]]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[[0.36591388135908987, -0.95486755838163839, -0.0013252788976745311,
0.69567157492793963], [-0.79676713780837027, 0.3214926946703327, -0.75096581427350828, 0.35710657155542735],
[-0.97096953218037885, -0.3319681518796227, -0.57152204281278296, 0.10421746159115108]], [[-0.97119041056769873,
0.47173466396132091, 0.63208593730306029, -0.85656329130504094], [-0.62549849973292804, -0.75698243824438927,
0.46453304587151512, -0.42573348253225807], [-0.78748289295593743, 0.47450581221739219, -0.78643250843903134,
0.30232500379827609]]], [[[0.10369672224908166, 0.37373110705513635, 0.35557253428911517, -0.8134557994263798],
[-0.63922930590149818, -0.34344343708131575, 0.081508957580154862, -0.045641914923246096], [0.045075125491382595,
-0.81357712137145177, 0.19199928764727225, 0.98346733921059637]], [[0.016107659447112344, 0.36822191678862071,
-0.05021241790306008, 0.50015935043378978], [0.011940872302404593, -0.46073951816738523, 0.71275071871696527,
0.55288336323320908], [-0.87646193066608746, -0.80998760193673003, 0.067859757365372753, 0.47872123549665657]]],
[[[0.4683476290440689, 0.69014985356074243, -0.26471526741239182, 0.96932033126419936], [-0.1461901082287993,
-0.76413258812010354, -0.67613738605542029, 0.60089152926266887], [0.41343229663812564, 0.64858241536864947,
-0.84530164516922857, -0.79335799145751662]], [[-0.46974281932781614, -0.12403837218332758, 0.08729063956578309,
0.60621211421529453], [-0.82220451633893021, -0.54597977180396184, 0.58913700000503999, 0.087122789707702708],
[-0.90671128506770948, -0.34903110797882597, 0.21581878455246306, 0.90495837687090042]]]]))
res=minval(arg)
ref=Data(-0.997674820866,self.functionspace)
ref.setTaggedValue(1,-0.971190410568)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_clip_taggedData_rank0(self):
arg=Data(-0.586456381539,self.functionspace)
arg.setTaggedValue(1,0.209563153277)
res=clip(arg,-0.3,0.5)
ref=Data(-0.3,self.functionspace)
ref.setTaggedValue(1,0.209563153277)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_clip_taggedData_rank1(self):
arg=Data(numpy.array([-0.55774515991639473, -0.091560877171301236]),self.functionspace)
arg.setTaggedValue(1,numpy.array([0.66746629096576049, 0.29687459606292088]))
res=clip(arg,-0.3,0.5)
ref=Data(numpy.array([-0.29999999999999999, -0.091560877171301236]),self.functionspace)
ref.setTaggedValue(1,numpy.array([0.5, 0.29687459606292088]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_clip_taggedData_rank2(self):
arg=Data(numpy.array([[0.92078283012674222, 0.082966604818023537, -0.44955157461118889, 0.20379872665296062,
-0.027334825618857694], [0.2545815386580188, 0.63863229466603832, -0.2636242975469909, 0.31732729387167513,
-0.33927475782076022], [-0.86480384733397719, 0.63899360745381273, -0.14244318620875474, -0.28418136656865478,
0.52805530986754579], [-0.72182500154188611, -0.5520656722291335, -0.53596757119473004, -0.3496219738666011,
0.026421982468125993]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[-0.61641779923056816, 0.72414040414314074, 0.74658958249612439, -0.83506986304109487,
-0.27184762551498576], [-0.91880091709204414, 0.13957485854328522, 0.044242311261486256, -0.47633342462252881,
0.14077523635761846], [0.97315993643550813, -0.69880173278152213, 0.34287512312623369, -0.17545687063500126,
0.4117608764449725], [-0.65151651119897513, 0.07746827744971152, 0.76062795648091908, 0.63018904248319307,
-0.37035138194082373]]))
res=clip(arg,-0.3,0.5)
ref=Data(numpy.array([[0.5, 0.082966604818023537, -0.29999999999999999, 0.20379872665296062, -0.027334825618857694],
[0.2545815386580188, 0.5, -0.2636242975469909, 0.31732729387167513, -0.29999999999999999], [-0.29999999999999999, 0.5,
-0.14244318620875474, -0.28418136656865478, 0.5], [-0.29999999999999999, -0.29999999999999999, -0.29999999999999999,
-0.29999999999999999, 0.026421982468125993]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[-0.29999999999999999, 0.5, 0.5, -0.29999999999999999, -0.27184762551498576],
[-0.29999999999999999, 0.13957485854328522, 0.044242311261486256, -0.29999999999999999, 0.14077523635761846], [0.5,
-0.29999999999999999, 0.34287512312623369, -0.17545687063500126, 0.4117608764449725], [-0.29999999999999999,
0.07746827744971152, 0.5, 0.5, -0.29999999999999999]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_clip_taggedData_rank3(self):
arg=Data(numpy.array([[[-0.93595611718046134, 0.15030385020288572], [-0.075765264835379709, 0.11334342086568139]],
[[-0.85963167087054893, 0.31195616074306898], [-0.90301388731995025, -0.43484977485230036]], [[-0.35904775125395316,
-0.26262746461074271], [-0.40250105120701685, -0.89652871806573153]], [[0.43650836668275272, -0.3757431381089118],
[-0.30636747598259539, -0.84093431369407745]], [[0.47390187035348164, -0.10390754716923678], [0.51126763372526352,
0.15832367335744291]], [[0.67025430776305206, 0.82371861770706922], [0.96960307597921713,
0.090296012972978623]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[-0.41422621266137605, 0.66937169235210336], [-0.46879447383017303,
0.52242494387680716]], [[-0.14585330598553226, 0.71730332341970882], [-0.69219830659242065, -0.22634999681297296]],
[[0.6272045132079489, -0.63463675295257649], [-0.084877856284922171, -0.80679549099060122]], [[0.019282801790388548,
0.078272411760595517], [-0.72424186578333605, 0.11824563331903692]], [[0.05228289224929239, -0.5210500081800693],
[-0.52205651032033651, -0.011427130000266184]], [[0.37440287860429255, -0.22324475885669171], [0.44468789932497854,
0.93805307645514624]]]))
res=clip(arg,-0.3,0.5)
ref=Data(numpy.array([[[-0.29999999999999999, 0.15030385020288572], [-0.075765264835379709, 0.11334342086568139]],
[[-0.29999999999999999, 0.31195616074306898], [-0.29999999999999999, -0.29999999999999999]], [[-0.29999999999999999,
-0.26262746461074271], [-0.29999999999999999, -0.29999999999999999]], [[0.43650836668275272, -0.29999999999999999],
[-0.29999999999999999, -0.29999999999999999]], [[0.47390187035348164, -0.10390754716923678], [0.5, 0.15832367335744291]],
[[0.5, 0.5], [0.5, 0.090296012972978623]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[-0.29999999999999999, 0.5], [-0.29999999999999999, 0.5]], [[-0.14585330598553226,
0.5], [-0.29999999999999999, -0.22634999681297296]], [[0.5, -0.29999999999999999], [-0.084877856284922171,
-0.29999999999999999]], [[0.019282801790388548, 0.078272411760595517], [-0.29999999999999999, 0.11824563331903692]],
[[0.05228289224929239, -0.29999999999999999], [-0.29999999999999999, -0.011427130000266184]], [[0.37440287860429255,
-0.22324475885669171], [0.44468789932497854, 0.5]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_clip_taggedData_rank4(self):
arg=Data(numpy.array([[[[-0.97160016541599048, -0.43464196910924269, -0.23690005500198974, -0.54505821229189078],
[0.75723735103604484, 0.47728350383465212, -0.18251619176764877, 0.99025221671542263], [-0.33126525295723441,
-0.55097518327671136, 0.46386393103124957, -0.5028567057894]], [[-0.96428397806042776, -0.32723838098616564,
-0.51713937973626223, 0.085602827397356585], [0.39080321493740078, -0.85653662913195427, 0.16921767079099848,
0.39501968814837984], [-0.094819423058470464, 0.32719847739349617, 0.62624242042874534, 0.034850296976497885]]],
[[[-0.37578612340398809, -0.95145289363650298, -0.85275150976030334, 0.89051350131703133], [-0.10582668740299472,
-0.13140703934766851, 0.58226666842363484, -0.85098889641917208], [-0.73422134882814771, -0.74056143316833989,
-0.85585388778247506, 0.37383179934715027]], [[-0.60621611229601191, 0.83398721692112243, 0.79208200862070988,
-0.76887710240427642], [-0.88684756867392589, -0.73572431861923282, -0.85514288036024833, -0.90625444381469755],
[-0.63116702698441807, -0.62921521603185582, -0.027679572698908084, -0.49500631740870782]]], [[[-0.15914337383288424,
0.095371185176566842, 0.37912475459494721, 0.4024421559252549], [0.17156871625876557, -0.16181202305035125,
-0.54126597465794779, 0.83987886819444846], [0.069057416726155507, -0.49106958817623569, 0.93467757049987465,
0.28287952469825028]], [[0.68942704651852793, 0.81096750450878519, 0.37481094259877334, 0.1471418138825642],
[0.20070329637746753, -0.78818450250007777, -0.152719755269904, 0.92798318613388875], [0.83301083132574871,
0.20846747979704916, -0.32414811917626918, 0.82969746085395513]]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[[-0.36196197753514481, 0.77440039326442012, -0.87338825838538359,
0.98628288992701973], [-0.86385653721960209, -0.095672296296486437, 0.97906517488663858, 0.019998954995007034],
[0.051167693979698559, 0.28975749676662454, -0.36804371490310661, 0.050635603877595692]], [[-0.6385293149409712,
-0.90985051701987807, -0.69600231371520249, -0.32236913709792292], [0.38715429363993836, 0.52397282032521209,
-0.39983434790593364, -0.56617869533507559], [-0.65564890024253941, -0.49218838180277347, 0.044380484975500512,
-0.03600367808098448]]], [[[-0.90989197666954391, 0.75234794134747851, 0.75416516181827009, -0.44742433054519326],
[0.0043609042230479123, 0.68736358896449423, -0.91760739014087522, -0.051928216578162489], [0.49324932691969869,
-0.73846202351943213, -0.58343447923298308, -0.88740891097177732]], [[-0.57612097838084186, -0.68345113210291419,
0.28725377558437426, -0.70369648221104031], [-0.33883006688905759, 0.23641388111792661, -0.62579793663903915,
0.5329371352222847], [-0.045959609245912469, 0.08851424399081087, 0.10939786381011385, -0.56148257689693759]]],
[[[-0.0193463842052497, 0.91513147036489806, -0.66676371736626283, -0.48232555642646879], [-0.56241036893974305,
0.90529235043557299, -0.16265277717097537, -0.32665027468083485], [-0.35940366396570367, -0.9797279130755483,
-0.31377987720228839, 0.8283948486411099]], [[0.19189387156386983, -0.10747463664656132, 0.18670232305419088,
0.4988825432691899], [-0.71382834702133491, 0.99952010964477589, 0.56177755545088326, -0.21795844922005014],
[-0.60392884386678669, -0.64095031591082718, -0.24641710816927898, -0.64897637790561413]]]]))
res=clip(arg,-0.3,0.5)
ref=Data(numpy.array([[[[-0.29999999999999999, -0.29999999999999999, -0.23690005500198974, -0.29999999999999999], [0.5,
0.47728350383465212, -0.18251619176764877, 0.5], [-0.29999999999999999, -0.29999999999999999, 0.46386393103124957,
-0.29999999999999999]], [[-0.29999999999999999, -0.29999999999999999, -0.29999999999999999, 0.085602827397356585],
[0.39080321493740078, -0.29999999999999999, 0.16921767079099848, 0.39501968814837984], [-0.094819423058470464,
0.32719847739349617, 0.5, 0.034850296976497885]]], [[[-0.29999999999999999, -0.29999999999999999, -0.29999999999999999, 0.5],
[-0.10582668740299472, -0.13140703934766851, 0.5, -0.29999999999999999], [-0.29999999999999999, -0.29999999999999999,
-0.29999999999999999, 0.37383179934715027]], [[-0.29999999999999999, 0.5, 0.5, -0.29999999999999999], [-0.29999999999999999,
-0.29999999999999999, -0.29999999999999999, -0.29999999999999999], [-0.29999999999999999, -0.29999999999999999,
-0.027679572698908084, -0.29999999999999999]]], [[[-0.15914337383288424, 0.095371185176566842, 0.37912475459494721,
0.4024421559252549], [0.17156871625876557, -0.16181202305035125, -0.29999999999999999, 0.5], [0.069057416726155507,
-0.29999999999999999, 0.5, 0.28287952469825028]], [[0.5, 0.5, 0.37481094259877334, 0.1471418138825642], [0.20070329637746753,
-0.29999999999999999, -0.152719755269904, 0.5], [0.5, 0.20846747979704916, -0.29999999999999999, 0.5]]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[[-0.29999999999999999, 0.5, -0.29999999999999999, 0.5], [-0.29999999999999999,
-0.095672296296486437, 0.5, 0.019998954995007034], [0.051167693979698559, 0.28975749676662454, -0.29999999999999999,
0.050635603877595692]], [[-0.29999999999999999, -0.29999999999999999, -0.29999999999999999, -0.29999999999999999],
[0.38715429363993836, 0.5, -0.29999999999999999, -0.29999999999999999], [-0.29999999999999999, -0.29999999999999999,
0.044380484975500512, -0.03600367808098448]]], [[[-0.29999999999999999, 0.5, 0.5, -0.29999999999999999],
[0.0043609042230479123, 0.5, -0.29999999999999999, -0.051928216578162489], [0.49324932691969869, -0.29999999999999999,
-0.29999999999999999, -0.29999999999999999]], [[-0.29999999999999999, -0.29999999999999999, 0.28725377558437426,
-0.29999999999999999], [-0.29999999999999999, 0.23641388111792661, -0.29999999999999999, 0.5], [-0.045959609245912469,
0.08851424399081087, 0.10939786381011385, -0.29999999999999999]]], [[[-0.0193463842052497, 0.5, -0.29999999999999999,
-0.29999999999999999], [-0.29999999999999999, 0.5, -0.16265277717097537, -0.29999999999999999], [-0.29999999999999999,
-0.29999999999999999, -0.29999999999999999, 0.5]], [[0.19189387156386983, -0.10747463664656132, 0.18670232305419088,
0.4988825432691899], [-0.29999999999999999, 0.5, 0.5, -0.21795844922005014], [-0.29999999999999999, -0.29999999999999999,
-0.24641710816927898, -0.29999999999999999]]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2, 3, 4),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_trace_taggedData_rank2_offset0(self):
arg=Data(numpy.array([[-0.056722291636611466, -0.58718629093905927, 0.5130520912721297, 0.66092297449797521],
[-0.063919150248518131, -0.39058783497457283, 0.4661647306320098, 0.67021135619437922], [-0.8330832444261691,
-0.85322824239920525, 0.20482436629833045, -0.96129533456242999], [-0.69346857149921193, 0.45558369040100977,
0.84938565880042294, 0.43398982881393078]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[0.14749416644983149, 0.60083787293366497, -0.57073677566409819, -0.46610458227440743],
[0.57348959548348577, -0.69790943654012771, 0.053443762898546243, -0.57344483020596693], [-0.50199618324500483,
0.45102735241156111, -0.52714587985442107, -0.017548841179002128], [0.76466153523225411, 0.73817066103983109,
-0.01022356510953859, 0.4565914866524452]]))
res=trace(arg,0)
ref=Data(0.19150406850107693,self.functionspace)
ref.setTaggedValue(1,-0.6209696632922721)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_trace_taggedData_rank3_offset0(self):
arg=Data(numpy.array([[[0.27621931670337752, 0.79097590056883305], [0.25951150897721909, 0.31817228937435349],
[-0.30264453843236794, 0.50150595739076276], [-0.98358197877106046, -0.79007659361841576], [0.083365996568159462,
0.20330829941698836], [-0.19557587546996058, -0.98573294081094631]], [[-0.79867287463079251, -0.79833593063209984],
[-0.50599476325253301, 0.28464853963701153], [0.26247038837571446, -0.74722310930311919], [-0.30522375733916096,
-0.22922132310741117], [0.76621304427278858, -0.93443415586735745], [-0.055794160098196155, -0.56342540287272369]],
[[-0.76566607687640786, 0.73746119374128516], [-0.34094386574227986, 0.13237808550409502], [0.55551190660484662,
0.099929562752655299], [0.62416721084404547, -0.74680079679520173], [-0.32298137868118437, -0.92140648111250556],
[-0.46933363170955111, -0.6173242105648995]], [[-0.78927536706600288, -0.17827448265929013], [0.29041067654470787,
0.0090187849852394475], [0.38470494443244418, -0.4697278621834069], [0.97131863616471859, -0.229464269100631],
[0.27251067788871963, -0.1673771147344536], [-0.6104133183668432, -0.12904004274191427]], [[-0.001577990717716915,
-0.98040279977901434], [-0.7740282327068011, -0.86160004795905754], [-0.99294888809825377, 0.13804396580730138],
[-0.42198087116935734, -0.66074706364513558], [-0.13571659404271919, -0.40146697280195087], [0.7288641948187673,
0.63657731594340872]], [[0.80404064506986539, 0.75601472144811699], [0.150056166800417, 0.22187959041457184],
[0.10012067300234517, 0.7651851752515153], [0.8406334361842227, 0.37796088650765558], [-0.92804955176844484,
-0.74147540231797371], [0.82011305402821022, 0.15532709244856013]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[0.13068026864793758, 0.84826211242090954], [0.82294651753010584,
-0.0038648240236784925], [-0.68641664387261003, 0.30189977432863979], [-0.69405145641145394, -0.3344401427328203],
[0.42997704136196258, -0.68883809314630029], [0.32627296229962499, -0.05429336799479767]], [[0.45002800082828198,
0.16571548079195186], [-0.19146515787251306, -0.25944069210245391], [0.42181551373272685, 0.20993972258463467],
[-0.32968166459292703, -0.34521619419565819], [-0.46239959214099868, -0.39421043463289895], [-0.188351920308371,
-0.81136727582354662]], [[0.23154319984003435, -0.24954542875472541], [0.32839612369649074, -0.010181910541722683],
[0.85371015583719845, 0.060824309654751829], [0.60998908541486352, -0.10041314710819926], [-0.21491424918195423,
0.016843890670983797], [-0.81948469054048334, -0.73219493129556623]], [[0.91110914128736176, -0.45029317016500681],
[0.75141649423683887, 0.37493202330235631], [-0.95649911853113556, -0.5628139992624257], [0.32354191029466106,
-0.7987584974525852], [0.99623080387531782, 0.18893689271418501], [0.72418591288460332, -0.55842778836728257]],
[[-0.95128831559556049, -0.22009541767819618], [-0.16638224381960742, 0.7431403220769417], [0.43446696882816038,
-0.37684380902355885], [-0.67103103356015725, 0.16658940700564706], [0.95093577264569817, -0.52598902537807746],
[-0.62575812314747803, -0.41939051724396537]], [[-0.12219921113913323, -0.41583537753036892], [-0.015148535233316052,
-0.51255475686325225], [0.19531511234994969, 0.021398629688998305], [0.16504650528291065, -0.93951736049702372],
[-0.72053369468947892, -0.85570893480481525], [-0.18173169200857453, 0.058687917578594373]]]))
res=trace(arg,0)
ref=Data(numpy.array([1.9814515562059007, 0.69994985350447814]),self.functionspace)
ref.setTaggedValue(1,numpy.array([1.8856712575444079, -0.61641387527886082]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(2,),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_trace_taggedData_rank3_offset1(self):
arg=Data(numpy.array([[[0.18108653460154356, 0.89592989152069769], [0.39406006245310943, 0.36308832003165215]],
[[0.080199626318692596, 0.08698141710531293], [-0.3802472233632439, -0.44580146494500328]], [[-0.37185454390545525,
-0.64624032154496258], [-0.82542887049556013, 0.49792673962118883]], [[0.0061652005510264285, -0.29039438965093134],
[-0.13325260707029396, 0.60381721769509866]], [[-0.2641881854161634, -0.1041123088263789], [-0.32813370622184435,
-0.45119880870645801]], [[-0.19872503698560839, -0.79949975899995285], [0.37373978335533398,
-0.74402368667542862]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[-0.50480964706631526, -0.96868442725949899], [-0.20796807939835626,
0.80648029637272489]], [[0.057449307908810532, 0.3998916254657261], [-0.39257450539926908, -0.29696732329834341]],
[[0.023852805137722699, -0.17201731179752922], [0.11673292308063665, 0.89769205371407712]], [[0.73063864471010453,
0.30788376908094528], [-0.8512238294696961, 0.92354339078776171]], [[-0.69374180162018262, -0.74056846939003362],
[-0.98232592110935735, -0.89406649693828055]], [[-0.44480336519662456, 0.93254466857149532], [-0.76300109266753591,
0.52582923130116521]]]))
res=trace(arg,1)
ref=Data(numpy.array([0.5441748546331957, -0.36560183862631068, 0.12607219571573358, 0.60998241824612509,
-0.71538699412262141, -0.94274872366103701]),self.functionspace)
ref.setTaggedValue(1,numpy.array([0.30167064930640963, -0.23951801538953288, 0.92154485885179982, 1.6541820354978662,
-1.5878082985584632, 0.081025866104540656]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(6,),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_trace_taggedData_rank4_offset0(self):
arg=Data(numpy.array([[[[-0.45962016926742089, 0.02865417528062264, -0.29756263033779917, -0.64078111586047348],
[0.92164046954155987, -0.54003599165059346, -0.87875895286816386, -0.3473389494925978], [-0.16291776046801099,
0.3472887520663801, 0.71935937177222153, -0.85307965190501811]], [[0.61119743618802036, 0.3502655762828446,
0.94852304489440153, -0.14189427417899325], [0.54958981135014406, -0.55503130982859883, 0.34955467736809176,
-0.38497957362679047], [-0.74731172737983176, -0.47753952462364269, -0.4648049237210552, -0.70213150390386359]],
[[0.52644165479644611, -0.24525608263185306, 0.62476935396121691, 0.51308479655531358], [0.42821817624865099,
0.80878105218648533, 0.37614678448471972, 0.74153228074219246], [-0.97366155081882622, -0.64213371666188435,
0.39176893655801481, 0.38394330573311874]]], [[[0.94993289577142948, -0.4473345864598901, -0.72529706466837962,
-0.38227114886080593], [0.98620938838075523, -0.79085026677573689, 0.11079037569398587, 0.79410827583319343],
[-0.8710215341123464, 0.98928570276569316, 0.052036661356236413, -0.56869951694252019]], [[0.16791216694921607,
-0.34002163051015888, 0.55394790674046313, -0.95844254164756126], [-0.68136194488326041, -0.70654623063293909,
0.60406700230877153, 0.18419598881245269], [0.59470271617573411, 0.48548947450898305, -0.62593526940970579,
0.13784228435476797]], [[-0.051894336949978692, -0.30141195030075862, -0.54243831204364912, -0.3164835827279473],
[0.46113898893543026, -0.22328338897537137, -0.6262187521411291, -0.19838295564438924], [-0.63045214825138696,
0.42991135589819929, 0.94642647328742724, -0.46927884880828441]]], [[[0.43305217297293153, 0.2536927678947547,
0.10630172818818751, 0.64912963749688624], [-0.50640500723555193, -0.52039373680496936, 0.1096650192795996,
0.61739092724335665], [0.49146409570063665, -0.6172394263210772, -0.89858617462268153, 0.078998202459677591]],
[[0.31989693518174467, 0.53428120315129246, -0.86380321869857379, -0.99456503074107205], [-0.18966772064485382,
0.76550347311246214, 0.30692097150168318, 0.39425500510922684], [-0.60097695899076076, -0.22485950542948485,
-0.90370187909218513, 0.0069427133057222701]], [[-0.15704314633162131, 0.89746601086632749, 0.28888078395150973,
-0.34187992624489594], [-0.32033494156307274, 0.025128653723394123, -0.47866647717699262, 0.35294153182773935],
[-0.98451182873333698, 0.25346634893978104, -0.0048477378610420896, 0.57224910189677436]]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[[0.86093381951293702, 0.13996950513602546, 0.24911399113865595,
-0.11687764679711665], [0.38995601569710026, 0.25862337132906621, 0.20778367175756296, 0.99392603502022769],
[-0.32165386535491991, 0.080429152547056404, -0.85984733996427165, -0.15610294226570454]], [[0.12829546514003765,
-0.4326838477532966, -0.61985099234292229, 0.33445914047116521], [-0.51684630714752422, 0.92431298195122413,
-0.017961161723491603, -0.57836466241833606], [0.26372245020051421, 0.60965511465701305, -0.65459345305739824,
-0.71283949608603092]], [[-0.59051578048618047, -0.11976164669216027, 0.63811512110527224, 0.0477865454714963],
[-0.32869214968335014, -0.82123152677972677, -0.63369664321299068, 0.056672898394033666], [0.2250713152368351,
-0.62438720879757748, -0.83237146630406356, -0.78278097359720378]]], [[[0.89748544928773066, 0.5752572822211599,
0.69556833557413533, 0.059484354097367076], [-0.42037686438037203, 0.24414755485271677, 0.7447054096616772,
-0.73338535075846512], [0.84888276034233656, -0.89716332338609628, 0.41048988271148801, 0.77275425123689656]],
[[0.053278067331299361, 0.017090335800584278, -0.019834263331557578, -0.78955636184768418], [-0.19302690568577918,
0.6869966044908038, 0.34430011476446776, 0.26412124200458797], [0.034090275037528928, -0.25940999069091819,
0.70472187490894989, -0.51898070676783026]], [[-0.11532514059713761, 0.7543433470843377, -0.025429440687317673,
0.97222217024465118], [0.100498060597767, -0.1686524972193677, 0.72744816848679061, -0.48660950354642418],
[-0.59357339890728733, -0.1958546411491171, -0.89429512218716023, 0.041528696515233054]]], [[[-0.90076200036976117,
-0.25957462571823475, 0.79256977370563386, -0.65221636673000094], [-0.38479624425568026, -0.98351747514208587,
0.96635050065626227, 0.13207466451884509], [-0.72321532195459648, 0.49324232956389502, -0.77437271037580513,
-0.6970342093120272]], [[-0.5690486476849157, -0.18328781413382722, -0.12642173000883017, 0.72892817142317545],
[-0.45697982321472996, -0.27764089575719852, 0.42027522015782659, 0.69638001933025606], [0.39249120138943683,
0.81646129948630564, 0.38622156091256632, 0.88525915829596502]], [[-0.66147807133014758, 0.92568437253839231,
-0.83581384523615143, -0.89941202473191884], [0.88679461317858332, 0.78806539055185687, -0.90639197675436245,
-0.056691520904474046], [-0.68628218008533293, 0.5435284380104628, -0.53709806924608294, -0.32696579362174982]]]]))
res=trace(arg,0)
ref=Data(numpy.array([[-0.44875114864982613, 0.58609855563679125, 0.54526606035417369, -1.9411035837529307],
[-0.080056416904773275, -1.2214535685601384, -0.75335842773638495, 0.18979857114759424], [-0.55272687302561385,
1.0862445755151442, 0.088576364501473659, -0.14298826565347578]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[0.2527338155140888, 1.082744213475002, -0.60653411742905305, -1.8058460333767197],
[1.0837237231899044, 1.7336853663717269, -0.35430819023233173, 1.2013557561203416], [-0.97384577040272391, 0.36454759986660101,
-0.69222353430140471, -1.0020494426552846]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 4),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_trace_taggedData_rank4_offset1(self):
arg=Data(numpy.array([[[[-0.55798919316930928, -0.12634181074327833, -0.40664724647866635, 0.36708777522359504],
[0.92304655197309193, -0.56406149781700354, -0.75041568029159933, -0.095537386510300415]], [[-0.3636354593950577,
-0.40380370690849166, 0.49253431857420327, 0.10902624704289288], [-0.19597941217654924, -0.95163806574005538,
0.60454417490459766, 0.69513132231601227]]], [[[-0.20308414220377036, -0.53195408451393367, -0.33288857029946994,
0.44484182547390527], [-0.12726872938253497, -0.43447934510411157, -0.39760625791440996, -0.30936298377662186]],
[[0.28774381158004014, 0.61012947608207702, -0.46712034177702555, 0.91092147487322106], [-0.52390628765269009,
0.92927410510820163, -0.95012155523316077, -0.7222711322568518]]], [[[-0.24187336960914019, -0.59309059125950681,
0.53524786224937859, 0.50676945999040157], [-0.57463672422686285, 0.36012557101692289, -0.078463188038392806,
0.27486082740181295]], [[0.31528739403243211, -0.6370158849670815, -0.030539643795422844, -0.97217928007222509],
[-0.47368356817793988, 0.032379440525969638, 0.34563915318137517, 0.19814305077222172]]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[[0.75497579989715802, 0.75552023104928256, 0.48432549378389722, 0.20729794017016756],
[0.44405493284670539, -0.23000293723614651, -0.63748352328109958, -0.60237445266324907]], [[-0.27625459086835935,
0.023596305703124765, 0.61504158021241917, 0.2178716901929818], [0.96278475273563724, -0.96720183527676884,
0.45551629628869139, 0.47165988558270189]]], [[[0.90761996660281774, 0.16758328211165674, -0.31356109610772309,
0.087428502708541078], [0.90782881734087795, 0.72403519404879302, -0.24417912061706226, -0.46951868330094637]],
[[0.033861565372617086, 0.28163848857736618, 0.92408557671169467, 0.210632525440964], [0.87073073883033691,
-0.68692992446708412, 0.14252375810828632, 0.8245351604586233]]], [[[-0.45002828026838038, -0.97190522313419714,
0.12925822440936252, -0.533132911900696], [0.011444666627784983, 0.82785488430562437, -0.3958492260594686,
-0.0019680796983392312]], [[-0.17959231647010943, 0.92008786112041019, 0.38078133949239845, -0.36866225074134396],
[-0.036570198363608419, -0.48923887070189065, 0.34751294528039756, 0.53151271350459495]]]]))
res=trace(arg,1)
ref=Data(numpy.array([[-0.75396860534585852, -1.0779798764833337, 0.19789692842593132, 1.0622190975396073],
[-0.72699042985646045, 0.39732002059426796, -1.2830101255326307, -0.27742930678294653], [-0.71555693778708007,
-0.56071115073353717, 0.88088701543075376, 0.70491251076262329]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[1.7177605526327953, -0.21168160422748628, 0.93984179007258861, 0.67895782575286945],
[1.7783507054331547, -0.51934664235542738, -0.17103733799943677, 0.91196366316716437], [-0.4865984786319888,
-1.4611440938360878, 0.47677116968976008, -0.001620198396101058]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 4),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_trace_taggedData_rank4_offset2(self):
arg=Data(numpy.array([[[[0.50772321962003253, 0.81091093905750844, 0.94907993942795321], [0.73379368710043202,
0.1970196579321819, -0.8707801741741179], [-0.98974029208093661, 0.91681549483815017, 0.16956715133806566]],
[[-0.090494805445503701, -0.3653868306105883, -0.87491027134814137], [-0.60474449778279293, 0.67282700235052628,
0.27290190361213451], [-0.03248595072972571, 0.27611081782564129, 0.75418917955906206]]], [[[0.18190784316027542,
0.33954122026553279, 0.13024592156588244], [-0.79420056673733463, 0.12148228545768824, -0.85470994652055587],
[0.4829422648842423, -0.57023081427027478, 0.41433754047871041]], [[-0.58422268302045, -0.48231988981968299,
-0.15004624357161589], [-0.49652481012180316, -0.21011488677963097, 0.20476295735139871], [0.17434244586251535,
0.69659164351993152, 0.8316119029490372]]], [[[0.602226352906577, 0.21989094319305869, 0.82993438476018522],
[-0.093921878199302311, 0.22262225202444008, 0.50725270224144503], [-0.023680938989186773, 0.25862610493500493,
-0.52230124443454318]], [[0.098543228060376409, 0.9775002686847889, -0.47745678787660806], [-0.88792221187768017,
0.35446812257977789, 0.87923110313445352], [-0.87302618083614503, 0.76889289104143943,
-0.095107428145788653]]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[[0.54945295821485707, 0.68929893451274893, -0.88766836204478827],
[0.53024839700414561, 0.84905632473172754, -0.98556214149230015], [0.50718726310865336, -0.83719889242899215,
0.24136937710832362]], [[0.97221946890851418, 0.37857007790244546, 0.75560780727580856], [0.3521058501426968,
0.1507254827737019, 0.83412847473120277], [0.41312223430297745, -0.83067113456000574, 0.79847746537447262]]],
[[[0.71933408626310946, -0.67938598430105768, -0.25704327542401395], [0.96454768916804823, -0.57443380467803262,
-0.90584183288776843], [-0.99082600970368229, 0.92986322447411585, -0.009482479572462621]], [[0.042756189979888282,
0.7239714798641308, -0.54502595603950943], [-0.98616315736428839, -0.0324661868861289, 0.9949779711240978],
[-0.18729522777607888, 0.1730936272931467, -0.011733676370543566]]], [[[0.92150763074621023, 0.041752915946222213,
0.88604844559866125], [0.32871523323767282, -0.69184453250969358, 0.10222640271280437], [0.055288050478184303,
0.3547320790777162, -0.98092888964539116]], [[0.45832990498428905, -0.5973061806598583, 0.56108743526797444],
[0.85757837343863441, 0.24003028775473045, -0.093170295554950711], [-0.29609418582570224, -0.5992914494698971,
0.98416254607460307]]]]))
res=trace(arg,2)
ref=Data(numpy.array([[0.87431002889028009, 1.3365213764640846], [0.71772766909667407, 0.037274333148956229],
[0.3025473604964739, 0.35790392249436564]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[1.6398786600549082, 1.9214224170566887], [0.13541780201261422,
-0.0014436732767841853], [-0.75126579140887451, 1.6825227388136226]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 2),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_inverse_taggedData_dim1(self):
arg=Data(numpy.array([[2.9250662348343939]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[3.9569115924392309]]))
res=inverse(arg)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(1, 1),"wrong shape of result.")
self.assertTrue(Lsup(matrix_mult(res,arg)-kronecker(1))<=self.RES_TOL,"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_inverse_taggedData_dim2(self):
arg=Data(numpy.array([[2.1762682607002422, -0.78050782824831111], [-0.83510184055361858,
2.604492189120557]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[3.6893612252828687, 0.18096456487937673], [0.89569960507470925, 3.3056638034950092]]))
res=inverse(arg)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(2, 2),"wrong shape of result.")
self.assertTrue(Lsup(matrix_mult(res,arg)-kronecker(2))<=self.RES_TOL,"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_inverse_taggedData_dim3(self):
arg=Data(numpy.array([[1.0566731035132446, -0.23529223422203982, -0.73657527200271922], [-0.90461086237095145,
2.3942152365412581, -0.0078023115760492701], [-0.32951652966235834, 0.5634604257647613, 1.716379935670141]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[2.047257820397296, -0.099359655049029438, 0.2857664656803518], [0.87552190129350227,
3.9293973128822133, 0.1331903761748936], [-0.63398198479164769, -0.49699750938550524, 2.0757292701325061]]))
res=inverse(arg)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(3, 3),"wrong shape of result.")
self.assertTrue(Lsup(matrix_mult(res,arg)-kronecker(3))<=self.RES_TOL,"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_inverse_taggedData_dim4(self):
arg=Data(numpy.array([[1.0566731035132446, -0.23529223422203982, -0.73657527200271922,0], [-0.90461086237095145,
2.3942152365412581, -0.0078023115760492701,0], [-0.32951652966235834, 0.5634604257647613,
1.716379935670141,0],[0,0,0,1]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[2.047257820397296, -0.099359655049029438, 0.2857664656803518,0], [0.87552190129350227,
3.9293973128822133, 0.1331903761748936,0], [-0.63398198479164769, -0.49699750938550524, 2.0757292701325061,0],[0,0,0,1]]))
try:
res=inverse(arg)
except RuntimeError:
self.assertTrue(not haveLapack,'matrix>3x3 should have inverted') #We expect no lapack versions to throw
else:
self.assertTrue(haveLapack,'matrix <=3x3 should have thrown') #We should have thrown here if we don't have lapack
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 4),"wrong shape of result.")
self.assertTrue(Lsup(matrix_mult(res,arg)-kronecker(4))<=self.RES_TOL,"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_inverse_taggedData_singularDefaultTag(self):
#In this test the other tag is definitely invertible the error is in the default tag
arg=Data([[0]],self.functionspace, True)
arg.setTaggedValue(1,[[1]])
try:
inverse(arg)
except RuntimeError:
pass
else:
self.fail('Singular matrix (1x1) did not fail to invert.')
arg=Data([[0,0],[0,1]],self.functionspace, True)
arg.setTaggedValue(1,[[1,0],[0,1]])
try:
inverse(arg)
except RuntimeError:
pass
else:
self.fail('Singular matrix (2x2) did not fail to invert.')
arg=Data([[0,0,0],[0,1,0],[1,1,1]],self.functionspace, True)
arg.setTaggedValue(1,[[1,0,0],[0,1,0],[0,0,1]])
try:
inverse(arg)
except RuntimeError:
pass
else:
self.fail('Singular matrix (3x3) did not fail to invert.')
#Unsupported matrix sizes are checked in the _dim4 tests so I won't check it here
if haveLapack:
arg=Data([[0,0,0,0],[1,4,5,8],[1.0007, 4.00005, 19.00001, 34.000],[-1,1,-243,0]], self.functionspace, True)
arg.setTaggedValue(1, [[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]])
try:
inverse(arg)
except RuntimeError:
pass
else:
self.fail('Singular matrix (4x4) did not fail to invert.')
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_inverse_taggedData_singularNonDefaultTag(self):
#In this test the default tag is definitely invertible the error is in the other tag
arg=Data([[1]],self.functionspace, True)
arg.setTaggedValue(1,[[0]])
try:
inverse(arg)
except RuntimeError:
pass
else:
self.fail('Singular matrix (1x1) did not fail to invert.')
arg=Data([[1,0],[0,1]],self.functionspace, True)
arg.setTaggedValue(1,[[0,0],[0,1]])
try:
inverse(arg)
except RuntimeError:
pass
else:
self.fail('Singular matrix (2x2) did not fail to invert.')
arg=Data([[1,0,0],[0,1,0],[0,0,1]],self.functionspace, True)
arg.setTaggedValue(1,[[0,0,0],[0,1,0],[1,1,1]])
try:
inverse(arg)
except RuntimeError:
pass
else:
self.fail('Singular matrix (3x3) did not fail to invert.')
#Unsupported matrix sizes are checked in the _dim4 tests so I won't check it here
if haveLapack:
arg=Data([[1,0,0,0],[0,1,0,0],[0,0,1,0],[0,0,0,1]], self.functionspace, True)
arg.setTaggedValue(1,[[0,0,0,0],[1,4,5,8],[1.0007, 4.00005, 19.00001, 34.000],[-1,1,-243,0]] )
try:
inverse(arg)
except RuntimeError:
pass
else:
self.fail('Singular matrix (4x4) did not fail to invert.')
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_transpose_taggedData_rank0_offset0(self):
arg=Data(0.00245568699765,self.functionspace)
arg.setTaggedValue(1,0.962512736617)
res=transpose(arg,0)
ref=Data(0.00245568699765,self.functionspace)
ref.setTaggedValue(1,0.962512736617)
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_transpose_taggedData_rank1_offset0(self):
arg=Data(numpy.array([-0.87758195100245429, -0.95788037127525083, -0.024038687137474479]),self.functionspace)
arg.setTaggedValue(1,numpy.array([-0.78441288952160404, 0.45851237807201439, 0.39271640430391974]))
res=transpose(arg,0)
ref=Data(numpy.array([-0.87758195100245429, -0.95788037127525083, -0.024038687137474479]),self.functionspace)
ref.setTaggedValue(1,numpy.array([-0.78441288952160404, 0.45851237807201439, 0.39271640430391974]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(3,),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_transpose_taggedData_rank1_offset1(self):
arg=Data(numpy.array([0.73939623397127963, -0.2901711535835565, 0.63071822660466492]),self.functionspace)
arg.setTaggedValue(1,numpy.array([-0.7534223120479544, 0.94374047089151136, -0.90626579949244124]))
res=transpose(arg,1)
ref=Data(numpy.array([0.73939623397127963, -0.2901711535835565, 0.63071822660466492]),self.functionspace)
ref.setTaggedValue(1,numpy.array([-0.7534223120479544, 0.94374047089151136, -0.90626579949244124]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(3,),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_transpose_taggedData_rank2_offset0(self):
arg=Data(numpy.array([[0.70915610046599609, 0.043818073067544816, 0.74645618218174037, 0.62553214306715654,
-0.11956300507817685], [-0.50062323179383994, 0.52299230955585774, 0.7142389787381016, 0.25677472851364724,
0.92079877696533008], [0.31008743924906246, 0.47726950332211615, -0.85939158371426205, -0.88392632785062486,
0.72101415960259896], [0.32800126345184677, 0.4509499590511461, 0.23949591894812983, 0.39405505944469121,
0.34926631942547481]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[-0.44366593371475127, -0.067589366587520683, 0.020502575806330192,
-0.16330702945536912, 0.9374254393163397], [-0.63988681195560693, -0.35795669025855981, -0.1290339882203877,
-0.37450847109242602, -0.12872898889849527], [-0.79104289067735811, 0.64692009724393396, 0.67769213667122452,
0.53361516526763886, -0.45274178325858982], [-0.51206140966780844, -0.54499033468876146, -0.83771401393503098,
0.38259122836552772, -0.5364522636660829]]))
res=transpose(arg,0)
ref=Data(numpy.array([[0.70915610046599609, 0.043818073067544816, 0.74645618218174037, 0.62553214306715654,
-0.11956300507817685], [-0.50062323179383994, 0.52299230955585774, 0.7142389787381016, 0.25677472851364724,
0.92079877696533008], [0.31008743924906246, 0.47726950332211615, -0.85939158371426205, -0.88392632785062486,
0.72101415960259896], [0.32800126345184677, 0.4509499590511461, 0.23949591894812983, 0.39405505944469121,
0.34926631942547481]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[-0.44366593371475127, -0.067589366587520683, 0.020502575806330192,
-0.16330702945536912, 0.9374254393163397], [-0.63988681195560693, -0.35795669025855981, -0.1290339882203877,
-0.37450847109242602, -0.12872898889849527], [-0.79104289067735811, 0.64692009724393396, 0.67769213667122452,
0.53361516526763886, -0.45274178325858982], [-0.51206140966780844, -0.54499033468876146, -0.83771401393503098,
0.38259122836552772, -0.5364522636660829]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_transpose_taggedData_rank2_offset1(self):
arg=Data(numpy.array([[-0.46830113109276916, 0.74128794045461999, 0.33285428673822692, -0.1444916409930288,
-0.30300280085147979], [0.39289073163425714, 0.61302899906894481, -0.47533241822737238, 0.044516903477815672,
0.86467385181555056], [-0.18730740823889458, -0.88927473357523001, 0.18893489479808512, -0.075354852812760775,
-0.33760747317832962], [-0.84319625410513832, 0.23777111488637481, 0.84432582876204498, -0.6196361628572773,
-0.41773907171881053]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[0.13273707566402293, -0.2863921187239622, 0.57298828638933963, -0.42990828455474528,
0.90152672750166141], [0.45635571138500608, -0.19547021292521016, 0.53140617579889105, -0.54256946591153898,
0.35431012906603931], [-0.26857845195104102, -0.51818450872103639, -0.15334950623005428, 0.33378473873759895,
0.30042323293203799], [0.061110820746190786, -0.46795109038331351, -0.41556280785700794, 0.13045724537885905,
-0.54960086188564183]]))
res=transpose(arg,1)
ref=Data(numpy.array([[-0.46830113109276916, 0.39289073163425714, -0.18730740823889458, -0.84319625410513832],
[0.74128794045461999, 0.61302899906894481, -0.88927473357523001, 0.23777111488637481], [0.33285428673822692,
-0.47533241822737238, 0.18893489479808512, 0.84432582876204498], [-0.1444916409930288, 0.044516903477815672,
-0.075354852812760775, -0.6196361628572773], [-0.30300280085147979, 0.86467385181555056, -0.33760747317832962,
-0.41773907171881053]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[0.13273707566402293, 0.45635571138500608, -0.26857845195104102, 0.061110820746190786],
[-0.2863921187239622, -0.19547021292521016, -0.51818450872103639, -0.46795109038331351], [0.57298828638933963,
0.53140617579889105, -0.15334950623005428, -0.41556280785700794], [-0.42990828455474528, -0.54256946591153898,
0.33378473873759895, 0.13045724537885905], [0.90152672750166141, 0.35431012906603931, 0.30042323293203799,
-0.54960086188564183]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(5, 4),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_transpose_taggedData_rank2_offset2(self):
arg=Data(numpy.array([[-0.82240139433020243, -0.95786409988752097, 0.61408041791851109, 0.31562654676313717,
0.75615481530692175], [0.35221871732385979, -0.65220212852640258, -0.50732427046756245, 0.36961517515769793,
0.87770605802467072], [0.91546316386521998, -0.17352806778459984, 0.0028640348608768296, 0.80380753111963088,
0.84687418870632847], [0.87454903965632691, -0.85247090388120039, -0.96710843187079165, -0.65164137261910282,
-0.633009521583765]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[-0.084027811322535673, 0.36316097327855834, 0.76276282502956372, 0.825946226062648,
0.57980486410085952], [-0.73779586982099499, 0.37254441436551367, 0.99795370620464574, 0.56905875549575846,
0.60508644368687392], [0.64896635371895339, 0.87044045233501643, 0.87825962555539472, 0.49125589571124517,
0.73521629566881597], [-0.11021763134355433, -0.75915969386068149, -0.81314829289717072, 0.22800937731296655,
0.64674076289842564]]))
res=transpose(arg,2)
ref=Data(numpy.array([[-0.82240139433020243, -0.95786409988752097, 0.61408041791851109, 0.31562654676313717,
0.75615481530692175], [0.35221871732385979, -0.65220212852640258, -0.50732427046756245, 0.36961517515769793,
0.87770605802467072], [0.91546316386521998, -0.17352806778459984, 0.0028640348608768296, 0.80380753111963088,
0.84687418870632847], [0.87454903965632691, -0.85247090388120039, -0.96710843187079165, -0.65164137261910282,
-0.633009521583765]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[-0.084027811322535673, 0.36316097327855834, 0.76276282502956372, 0.825946226062648,
0.57980486410085952], [-0.73779586982099499, 0.37254441436551367, 0.99795370620464574, 0.56905875549575846,
0.60508644368687392], [0.64896635371895339, 0.87044045233501643, 0.87825962555539472, 0.49125589571124517,
0.73521629566881597], [-0.11021763134355433, -0.75915969386068149, -0.81314829289717072, 0.22800937731296655,
0.64674076289842564]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(4, 5),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_transpose_taggedData_rank3_offset0(self):
arg=Data(numpy.array([[[-0.47965404874130169, 0.82476560063874316], [-0.29508213416398998, -0.81393703720452937]],
[[-0.39452570551744826, 0.2710813598813071], [0.70191301420221497, 0.92131697529999679]], [[0.91092486057477284,
-0.17742675176658906], [0.93743803341741172, -0.24810658731032409]], [[0.8292404484536704, -0.67966081758338071],
[0.1097041521502371, -0.50250503549174441]], [[-0.95382582593020238, -0.15954466864076822], [-0.82226475873157745,
0.032324703810909572]], [[-0.80576486966156158, -0.70193160021310597], [0.24194008612087514,
-0.45186291580263571]]]),self.functionspace)
arg.setTaggedValue(1,numpy.array([[[-0.95232031657104654, -0.51171389143739954], [0.5021173784011006,
-0.13346048906733543]], [[-0.34000803534780566, 0.27531991852016002], [0.40500892817765788, -0.86936047318254461]],
[[0.31637715793619337, -0.44836632199340198], [-0.18777872723614197, -0.53942822518925126]], [[0.44283821537880441,
-0.49096132745134247], [0.61879880479923943, -0.98913356226678784]], [[-0.65325712666802938, 0.23825410802658231],
[0.023290776898226895, -0.72183082104110285]], [[0.94622363774828044, -0.67849303457969845], [-0.11481306112448086,
0.36914068273649492]]]))
res=transpose(arg,0)
ref=Data(numpy.array([[[-0.47965404874130169, 0.82476560063874316], [-0.29508213416398998, -0.81393703720452937]],
[[-0.39452570551744826, 0.2710813598813071], [0.70191301420221497, 0.92131697529999679]], [[0.91092486057477284,
-0.17742675176658906], [0.93743803341741172, -0.24810658731032409]], [[0.8292404484536704, -0.67966081758338071],
[0.1097041521502371, -0.50250503549174441]], [[-0.95382582593020238, -0.15954466864076822], [-0.82226475873157745,
0.032324703810909572]], [[-0.80576486966156158, -0.70193160021310597], [0.24194008612087514,
-0.45186291580263571]]]),self.functionspace)
ref.setTaggedValue(1,numpy.array([[[-0.95232031657104654, -0.51171389143739954], [0.5021173784011006,
-0.13346048906733543]], [[-0.34000803534780566, 0.27531991852016002], [0.40500892817765788, -0.86936047318254461]],
[[0.31637715793619337, -0.44836632199340198], [-0.18777872723614197, -0.53942822518925126]], [[0.44283821537880441,
-0.49096132745134247], [0.61879880479923943, -0.98913356226678784]], [[-0.65325712666802938, 0.23825410802658231],
[0.023290776898226895, -0.72183082104110285]], [[0.94622363774828044, -0.67849303457969845], [-0.11481306112448086,
0.36914068273649492]]]))
self.assertTrue(isinstance(res,Data),"wrong type of result.")
self.assertEqual(res.getShape(),(6, 2, 2),"wrong shape of result.")
self.assertTrue(Lsup(res-ref)<=self.RES_TOL*Lsup(ref),"wrong result")
#+++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++++
def test_transpose_taggedData_rank3_offset1(self):
arg=Data( | numpy.array([[[0.81053286814697678, 0.050191176759508238], [0.42342445008027152, 0.8922440392055786]],
[[-0.72331693394362895, 0.27069578317300502], [-0.45140780490291643, -0.67001507946193128]], [[0.18728261143033365,
0.42505803814105914], [-0.65870737965001647, 0.88986754283565572]], [[0.9052128319800945, 0.71380866302456791],
[-0.62578348903525249, 0.57438376912125078]], [[-0.70322298189245513, 0.5783032228742897], [0.98991210384307404,
0.68455971318831255]], [[-0.23035102351615544, 0.61590799799635754], [0.56157523223742367,
0.51936097775959378]]]) | numpy.array |
# Copyright 2020 The PyMC Developers
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
from collections import OrderedDict
import numpy as np
import theano.tensor as tt
from scipy.special import logsumexp
from scipy.stats import multivariate_normal
from scipy.optimize import approx_fprime
from theano import function as theano_function
import arviz as az
from pymc3.backends.ndarray import NDArray
from pymc3.model import Point, modelcontext
from pymc3.sampling import sample_prior_predictive
from pymc3.theanof import (
floatX,
inputvars,
join_nonshared_inputs,
make_shared_replacements,
gradient,
)
from pymc3.sinf.GIS import GIS
import torch
class NF_SMC:
"""Sequential Monte Carlo with normalizing flow based sampling."""
def __init__(
self,
draws=2000,
start=None,
threshold=0.5,
model=None,
random_seed=-1,
chain=0,
frac_validate=0.1,
iteration=None,
alpha=(0,0),
k_trunc=0.5,
pareto=False,
epsilon=1e-3,
local_thresh=3,
local_step_size=0.1,
local_grad=True,
nf_local_iter=0,
max_line_search=2,
verbose=False,
n_component=None,
interp_nbin=None,
KDE=True,
bw_factor=0.5,
edge_bins=None,
ndata_wT=None,
MSWD_max_iter=None,
NBfirstlayer=True,
logit=False,
Whiten=False,
batchsize=None,
nocuda=False,
patch=False,
shape=[28,28,1],
):
self.draws = draws
self.start = start
self.threshold = threshold
self.model = model
self.random_seed = random_seed
self.chain = chain
self.frac_validate = frac_validate
self.iteration = iteration
self.alpha = alpha
self.k_trunc = k_trunc
self.pareto = pareto
self.epsilon = epsilon
self.local_thresh = local_thresh
self.local_step_size = local_step_size
self.local_grad = local_grad
self.nf_local_iter = nf_local_iter
self.max_line_search = max_line_search
self.verbose = verbose
self.n_component = n_component
self.interp_nbin = interp_nbin
self.KDE = KDE
self.bw_factor = bw_factor
self.edge_bins = edge_bins
self.ndata_wT = ndata_wT
self.MSWD_max_iter = MSWD_max_iter
self.NBfirstlayer = NBfirstlayer
self.logit = logit
self.Whiten = Whiten
self.batchsize = batchsize
self.nocuda = nocuda
self.patch = patch
self.shape = shape
self.model = modelcontext(model)
if self.random_seed != -1:
np.random.seed(self.random_seed)
self.beta = 0
self.variables = inputvars(self.model.vars)
self.weights = np.ones(self.draws) / self.draws
#self.sinf_logq = np.array([])
self.log_marginal_likelihood = 0
def initialize_population(self):
"""Create an initial population from the prior distribution."""
population = []
var_info = OrderedDict()
if self.start is None:
init_rnd = sample_prior_predictive(
self.draws,
var_names=[v.name for v in self.model.unobserved_RVs],
model=self.model,
)
else:
init_rnd = self.start
init = self.model.test_point
for v in self.variables:
var_info[v.name] = (init[v.name].shape, init[v.name].size)
for i in range(self.draws):
point = Point({v.name: init_rnd[v.name][i] for v in self.variables}, model=self.model)
population.append(self.model.dict_to_array(point))
self.nf_samples = np.array(floatX(population))
#self.posterior = np.copy(self.nf_samples)
self.var_info = var_info
def setup_logp(self):
"""Set up the likelihood logp function based on the chosen kernel."""
shared = make_shared_replacements(self.variables, self.model)
self.prior_logp_func = logp_forw([self.model.varlogpt], self.variables, shared)
self.likelihood_logp_func = logp_forw([self.model.datalogpt], self.variables, shared)
self.posterior_logp_func = logp_forw([self.model.logpt], self.variables, shared)
self.posterior_dlogp_func = logp_forw([gradient(self.model.logpt, self.variables)], self.variables, shared)
self.prior_dlogp_func = logp_forw([gradient(self.model.varlogpt, self.variables)], self.variables, shared)
self.likelihood_dlogp_func = logp_forw([gradient(self.model.datalogpt, self.variables)], self.variables, shared)
def get_nf_logp(self):
"""Get the prior, likelihood and tempered posterior log probabilities, for the current NF samples."""
priors = [self.prior_logp_func(sample) for sample in self.nf_samples]
likelihoods = [self.likelihood_logp_func(sample) for sample in self.nf_samples]
self.nf_prior_logp = np.array(priors).squeeze()
self.nf_likelihood_logp = np.array(likelihoods).squeeze()
self.nf_posterior_logp = self.nf_prior_logp + self.nf_likelihood_logp * self.beta
def get_full_logp(self):
"""Get the prior, likelihood and tempered posterior log probabilities, for the full sample set."""
priors = [self.prior_logp_func(sample) for sample in self.posterior]
likelihoods = [self.likelihood_logp_func(sample) for sample in self.posterior]
self.prior_logp = np.array(priors).squeeze()
self.likelihood_logp = np.array(likelihoods).squeeze()
self.posterior_logp = self.prior_logp + self.likelihood_logp * self.beta
def eval_prior_logp(self, param_vals):
"""Evaluates the prior logp for given parameter values."""
prior_logps = [self.prior_logp_func(val) for val in param_vals]
return np.array(prior_logps).squeeze()
def eval_prior_dlogp(self, param_vals):
"""Evaluates the gradient of the prior logp for given parameter values."""
prior_dlogps = [self.prior_dlogp_func(val) for val in param_vals]
return np.array(prior_dlogps).squeeze()
def sinf_logq(self, param_vals):
"""Function for evaluating the SINF gradient."""
sinf_logq = self.nf_model.evaluate_density(torch.from_numpy(param_vals.astype(np.float32))).numpy().astype(np.float64)
return sinf_logq.item()
def target_logp(self, param_vals):
"""Evaluates logp of the target distribution for given parameter values."""
logps = [self.posterior_logp_func(val) for val in param_vals]
return np.array(logps).squeeze()
def tempered_logp(self, param_vals):
"""Evaluates the tempered logp of the target distribution for given parameter values."""
logps = [self.prior_logp_func(val) + self.beta * self.likelihood_logp_func(val) for val in param_vals]
return np.array(logps).squeeze()
def target_dlogp(self, param_vals):
"""Evaluates the gradient of the target distribution logp for given parameter values."""
dlogps = [self.posterior_dlogp_func(val) for val in param_vals]
return np.array(dlogps).squeeze()
def tempered_dlogp(self, param_vals):
"""Evaluates the gradient of the temepered target distribution for given parameter values."""
dlogps = [self.prior_dlogp_func(val) + self.beta * self.likelihood_dlogp_func(val) for val in param_vals]
return np.array(dlogps).squeeze()
def regularize_weights(self):
"""Either performs Pareto-smoothing of the IW, or applies clipping."""
if self.pareto:
psiw = az.psislw(self.log_sinf_weights)
self.log_sinf_weights = psiw[0]
self.sinf_weights = np.exp(self.log_sinf_weights)
elif not self.pareto:
self.log_sinf_weights = np.clip(self.log_sinf_weights, a_min=None,
a_max=logsumexp(self.log_sinf_weights) + (self.k_trunc - 1) * np.log(len(self.log_sinf_weights)))
self.log_sinf_weights = self.log_sinf_weights - logsumexp(self.log_sinf_weights)
self.sinf_weights = np.exp(self.log_sinf_weights)
def local_exploration(self, logq_func=None, dlogq_func=None):
"""Perform local exploration."""
self.high_iw_idx = np.where(self.log_sinf_weights >= np.log(self.local_thresh) - np.log(self.draws))[0]
self.num_local = len(self.high_iw_idx)
self.high_iw_samples = self.nf_samples[self.high_iw_idx, ...]
self.high_log_weight = self.log_sinf_weights[self.high_iw_idx]
self.high_weights = self.sinf_weights[self.high_iw_idx]
print(f'Number of points we perform additional local exploration around = {self.num_local}')
self.local_samples = np.empty((0, np.shape(self.high_iw_samples)[1]))
self.local_log_weight = np.array([])
self.modified_log_weight = np.array([])
self.local_weights = np.array([])
self.modified_weights = np.array([])
for i, sample in enumerate(self.high_iw_samples):
sample = sample.reshape(-1, len(sample))
if self.local_grad:
if dlogq_func is None:
raise Exception('Using gradient-based exploration requires you to supply dlogq_func.')
self.log_weight_grad = self.tempered_dlogp(sample.astype(np.float64)) - dlogq_func(sample.astype(np.float64))
elif not self.local_grad:
if logq_func is None:
raise Exception('Gradient-free approximates gradients with finite difference. Requires you to supply logq_func.')
self.log_weight_grad = approx_fprime(sample, self.tempered_logp, np.finfo(float).eps) - approx_fprime(sample, logq_func, np.finfo(float).eps)
self.log_weight_grad = np.asarray(self.log_weight_grad).astype(np.float64)
delta = 1.0 * self.local_step_size
proposed_step = sample + delta * self.log_weight_grad
line_search_iter = 0
while self.tempered_logp(proposed_step) < self.tempered_logp(sample):
delta = delta / 2.0
proposed_step = sample + delta * self.log_weight_grad
line_search_iter += 1
if line_search_iter >= self.max_line_search:
break
local_log_w = self.high_log_weight[i] + self.tempered_logp(proposed_step) - np.log(np.exp(self.tempered_logp(proposed_step)) + np.exp(self.tempered_logp(sample)))
modif_log_w = self.high_log_weight[i] + self.tempered_logp(sample) - np.log(np.exp(self.tempered_logp(proposed_step)) + np.exp(self.tempered_logp(sample)))
self.local_log_weight = np.append(self.local_log_weight, local_log_w)
self.modified_log_weight = np.append(self.modified_log_weight, modif_log_w)
self.local_weights = np.append(self.local_weights, np.exp(local_log_w))
self.modified_weights = np.append(self.modified_weights, np.exp(modif_log_w))
self.local_samples = np.append(self.local_samples, proposed_step, axis=0)
self.log_sinf_weights[self.high_iw_idx] = self.modified_log_weight
self.sinf_weights[self.high_iw_idx] = self.modified_weights
self.log_sinf_weights = np.append(self.log_sinf_weights, self.local_log_weight)
self.sinf_weights = np.append(self.sinf_weights, self.local_weights)
self.nf_samples = np.append(self.nf_samples, self.local_samples, axis=0)
def update_weights_beta(self):
"""Calculate the next inverse temperature (beta).
The importance weights based on current beta and tempered likelihood and updates the
marginal likelihood estimate.
"""
low_beta = old_beta = self.beta
up_beta = 2.0
rN = int(len(self.nf_likelihood_logp) * self.threshold)
# Weights between the current SINF samples, and the target tempered posterior.
if self.beta == 0:
# In this case the samples are exact from the prior.
#self.sinf_logq = np.append(self.sinf_logq, self.nf_prior_logp)
self.logq = np.copy(self.nf_prior_logp)
log_weights_q = np.ones_like(self.nf_prior_logp) / self.draws
else:
log_weights_q = self.nf_prior_logp + self.nf_likelihood_logp * self.beta - self.logq
log_weights_q = np.clip(log_weights_q, a_min=None,
a_max=np.log(np.mean(np.exp(log_weights_q))) + self.k_trunc * np.log(self.draws))
log_weights_q = log_weights_q - logsumexp(log_weights_q)
while up_beta - low_beta > 1e-6:
new_beta = (low_beta + up_beta) / 2.0
log_weights_un = (new_beta - old_beta) * self.nf_likelihood_logp
log_weights = log_weights_un - logsumexp(log_weights_un)
try:
ESS = int(np.exp(-logsumexp(log_weights_q + log_weights * 2)) / self.draws)
except:
ESS = 0
if ESS == rN:
break
elif ESS < rN:
up_beta = new_beta
else:
low_beta = new_beta
if new_beta >= 1:
new_beta = 1
log_weights_un = (new_beta - old_beta) * self.nf_likelihood_logp
#log_weights_un = self.prior_logp + self.likelihood_logp * new_beta - self.logq
log_weights = log_weights_un - logsumexp(log_weights_un)
#self.log_marginal_likelihood += logsumexp(log_weights_un) - np.log(self.draws)
self.beta = new_beta
self.weights = np.exp(log_weights)
# We normalize again to correct for small numerical errors that might build up
self.weights /= self.weights.sum()
#self.log_sinf_weights = self.prior_logp + self.likelihood_logp * self.beta - self.sinf_logq
self.log_sinf_weights = self.nf_prior_logp + self.nf_likelihood_logp * self.beta - self.logq
self.log_marginal_likelihood = logsumexp(self.log_sinf_weights) - | np.log(self.draws) | numpy.log |
from PIL import Image
import face_recognition
import pickle
import cv2
import numpy as np
class Cargar:
def __init__(self, _filePath) -> None:
self._filePath = _filePath
self._imagen = None
self._rotarCounter = 0
self._imagenTemp = None
def cargarImagen(self):
if self._filePath is not None:
self._imagen = Image.open(self._filePath) #cv2.imread(filePath.name)
imagenShape = [self._imagen.width, self._imagen.height]
if imagenShape[0] > 400:
imagenShape[0] = int(imagenShape[0] * 0.75)
imagenShape[1] = int(imagenShape[1] * 0.75)
self._imagen = self._imagen.resize(imagenShape)
self._imagenTemp = self._imagen
return self._imagen
def _predecir(self, imagen):
self._imagen = imagen #.resize(width=400)
encodingsPath = 'encodings.pickle'
detection_method = "hog"
data = pickle.loads(open(encodingsPath, "rb").read())
# image = cv2.imread(self._imagen)
rgb = cv2.cvtColor(np.array(self._imagen), cv2.COLOR_BGR2RGB)
boxes = face_recognition.face_locations(rgb, model=detection_method)
encodings = face_recognition.face_encodings(rgb, boxes)
names = []
for encoding in encodings:
matches = face_recognition.compare_faces(data["encodings"],encoding)
name = "Unknown"
if True in matches:
matchedIdxs = [i for (i, b) in enumerate(matches) if b]
counts = {}
for i in matchedIdxs:
name = data["names"][i]
counts[name] = counts.get(name, 0) + 1
name = max(counts, key=counts.get)
names.append(name)
for ((top, right, bottom, left), name) in zip(boxes, names):
# print("A: " + str(type(self._imagen)))
self._imagen = cv2.rectangle(np.array(self._imagen), (left, top), (right, bottom), (0, 255, 200), 1)
# print("B: " + str(type(self._imagen)))
y = top - 15 if top - 15 > 15 else top + 15
self._imagen = cv2.putText(self._imagen, name, (left, y), cv2.FONT_HERSHEY_SIMPLEX, 0.5, (0, 255, 0), 1)
return self._imagen, names
def _rotar(self):
self._imagen = cv2.rotate( | np.array(self._imagen) | numpy.array |
"""Averaged Matthews Correlation Coefficient (averaged over several thresholds, for imbalanced problems). Example how to use Driverless AI's internal scorer."""
import typing
import numpy as np
from h2oaicore.metrics import CustomScorer
from h2oaicore.metrics import MccScorer
from sklearn.preprocessing import LabelEncoder
class MyAverageMCCScorer(CustomScorer):
_description = "Average MCC over several thresholds"
_binary = True
_maximize = True
_perfect_score = 1
_display_name = "AVGMCC"
def score(self,
actual: np.array,
predicted: np.array,
sample_weight: typing.Optional[np.array] = None,
labels: typing.Optional[np.array] = None,
**kwargs) -> float:
"""Reasoning behind using several thresholds
MCC can vary a lot from one threshold to another
and especially may give different results on train and test datasets
Using an average over thresholds close to the prior may lead to a flatter
response and better generalization.
"""
lb = LabelEncoder()
labels = list(lb.fit_transform(labels))
actual = lb.transform(actual)
# Compute thresholds
if sample_weight is None:
sample_weight = | np.ones(actual.shape[0]) | numpy.ones |
'''
Implementation of Classifier Training, partly described inside Fanello et al.
'''
import sys
import signal
import errno
import glob
import numpy as np
import class_objects as co
import action_recognition_alg as ara
import cv2
import os.path
import cPickle as pickle
import logging
import yaml
import time
from OptGridSearchCV import optGridSearchCV
# pylint: disable=no-member,R0902,too-many-public-methods,too-many-arguments
# pylint: disable=too-many-locals, too-many-branches, too-many-statements
def timeit(func):
'''
Decorator to time extraction
'''
def wrapper(self, *arg, **kw):
t1 = time.time()
res = func(self, *arg, **kw)
t2 = time.time()
self.time.append(t2 - t1)
del self.time[:-5000]
return res
return wrapper
class Classifier(object):
'''
Class to hold all Classifier specific methods.
<descriptors>:['pca','ghog','3dhof']
<action_type>:True if no buffers are used
<sparsecoding_level> is True if sparse coding is used
Classifier Parameters, for example <AdaBoost_n_estimators> or
<RDF_n_estimators> or <kernel> can be
a list, which will be reduced using optimized grid search with cross
validation.
'''
def __init__(self, log_lev='INFO',
visualize=False, masks_needed=True,
buffer_size=co.CONST['buffer_size'],
sparse_dim_rat=co.CONST['sparse_dim_rat'],
descriptors='',
ptpca=False,
ptpca_components=None,
action_type='Dynamic',
classifiers_used='SVM', num_of_cores=4, name='',
svm_c=None,
AdaBoost_n_estimators=None,
RDF_n_estimators=None,
add_info=None,
sparsecoding_level=None,
kernel=None,
save_all_steps=False,
post_scores_processing_method=None,
hardcore=False,
for_app=False):
'''
sparsecoding_level = [Buffer, Features, None]
'''
if not os.path.isdir(co.CONST['AppData']):
os.makedirs(co.CONST['AppData'])
self.app_dir = co.CONST['AppData']
self.for_app = for_app
self.time = []
self.classifiers_ids = None
self.test_ind = None
# General configuration
if not isinstance(descriptors, list):
descriptors = [descriptors]
descriptors = sorted(descriptors)
###
features_params = {}
coders_params = {}
for descriptor in descriptors:
features_params[descriptor] = {}
features_params[descriptor]['params'] = {attrib.replace(descriptor, ''):
co.CONST[attrib] for
attrib in co.CONST if
attrib.startswith(descriptor)}
features_params[descriptor]['sparsecoded'] = sparsecoding_level
features_params[descriptor]['action_type'] = action_type
coders_params[descriptor] = {}
if not sparsecoding_level:
features_params[descriptor]['sparse_params'] = None
else:
features_params[descriptor]['sparse_params'] = {
attrib.replace('sparse', ''):
co.CONST[attrib] for
attrib in co.CONST if
attrib.startswith('sparse')}
coders_params[descriptor] = {
attrib.replace('sparse', ''):
co.CONST[attrib] for
attrib in co.CONST if
attrib.startswith('sparse') and
'fss' not in attrib}
self.test_name = None
self.kernel = kernel
self.svm_c = svm_c
self.RDF_n_estimators = RDF_n_estimators
self.AdaBoost_n_estimators = AdaBoost_n_estimators
self.sparse_dim_rat = sparse_dim_rat
if 'SVM' in classifiers_used and kernel is None:
self.kernel = 'linear'
if 'SVM' in classifiers_used:
if svm_c is None:
self.svm_c = co.CONST['SVM_C']
if post_scores_processing_method == 'CProb':
LOG.warning('Invalid post_scores_processing_method for SVM')
if hardcore:
raise Exception
else:
LOG.warning('Changing method to CSTD')
post_scores_processing_method = 'CSTD'
if 'RDF' in classifiers_used or 'AdaBoost' in classifiers_used:
if svm_c is not None:
LOG.warning(
'svm_c is not None for RDF or AdaBoost experimentation')
if hardcore:
raise Exception
if post_scores_processing_method is None:
if 'RDF' in classifiers_used or 'AdaBoost' in classifiers_used:
post_scores_processing_method = 'CProb'
else:
post_scores_processing_method = 'CSTD'
classifier_params = {}
if 'RDF' in classifiers_used and RDF_n_estimators is None:
self.RDF_n_estimators = co.CONST['RDF_trees']
if 'AdaBoost' in classifiers_used and AdaBoost_n_estimators is None:
self.AdaBoost_n_estimators = co.CONST['AdaBoost_Estimators']
if 'SVM' in classifiers_used:
classifier_params['SVM_kernel'] = self.kernel
classifier_params['SVM_C'] = self.svm_c
if 'RDF' in classifiers_used:
classifier_params['RDF_n_estimators'] = self.RDF_n_estimators
if 'AdaBoost' in classifiers_used:
classifier_params['AdaBoost_n_estimators'] = self.AdaBoost_n_estimators
if action_type != 'Passive':
dynamic_params = {'buffer_size': buffer_size,
'buffer_confidence_tol': co.CONST['buffer_confidence_tol'],
'filter_window_size':
co.CONST['STD_big_filt_window']}
else:
dynamic_params = {'buffer_size': 1}
if ptpca and ptpca_components is None:
ptpca_components = co.CONST['PTPCA_components']
ptpca_params = {'PTPCA_components': ptpca_components}
for descriptor in descriptors:
features_params[descriptor]['dynamic_params'] = dynamic_params
if sparsecoding_level:
if not isinstance(sparse_dim_rat, list):
sparse_dim_rat = [sparse_dim_rat] * len(descriptors)
if len(list(sparse_dim_rat)) != len(descriptors):
raise Exception('<sparse_dim_rat> should be either an integer/None or' +
' a list with same length with <descriptors>')
sparse_params = dict(zip(descriptors, sparse_dim_rat))
sparse_params['fss_max_iter'] = co.CONST['sparse_fss_max_iter']
else:
sparse_params = None
testing_params = {'online': None}
testing_params['post_scores_processing_method'] = \
post_scores_processing_method
fil = os.path.join(co.CONST['rosbag_location'],
'gestures_type.csv')
self.passive_actions = None
self.dynamic_actions = None
if os.path.exists(fil):
with open(fil, 'r') as inp:
for line in inp:
if line.split(':')[0] == 'Passive':
self.passive_actions = line.split(
':')[1].rstrip('\n').split(',')
elif line.split(':')[0] == 'Dynamic':
self.dynamic_actions = line.split(
':')[1].rstrip('\n').split(',')
action_params = {'Passive': self.passive_actions,
'Dynamic': self.dynamic_actions}
LOG.debug('Extracting: ' + str(descriptors))
self.parameters = {'classifier': classifiers_used,
'descriptors': descriptors,
'features_params': features_params,
'coders_params': coders_params,
'dynamic_params': dynamic_params,
'classifier_params': classifier_params,
'sparse_params': sparse_params,
'action_type': action_type,
'sparsecoded': sparsecoding_level,
'testing': False,
'testing_params': testing_params,
'actions_params': action_params,
'PTPCA': ptpca,
'PTPCA_params': ptpca_params}
self.training_parameters = {k: self.parameters[k] for k in
('classifier', 'descriptors',
'features_params',
'dynamic_params',
'classifier_params',
'sparse_params',
'action_type',
'sparsecoded',
'PTPCA',
'PTPCA_params') if k in
self.parameters}
self.descriptors = descriptors
self.add_info = add_info
self.log_lev = log_lev
self.visualize = visualize
self.buffer_size = buffer_size
self.masks_needed = masks_needed
self.action_type = action_type
self.classifiers_used = classifiers_used
self.num_of_cores = num_of_cores
self.name = name
self.ptpca = ptpca
self.action_recog = ara.ActionRecognition(
self.parameters,
log_lev=log_lev)
if not self.for_app:
self.available_tests = sorted(os.listdir(co.CONST['test_save_path']))
else:
self.available_tests = []
self.update_experiment_info()
if 'SVM' in self.classifiers_used:
from sklearn.svm import LinearSVC
self.classifier_type = LinearSVC(
class_weight='balanced', C=self.svm_c,
multi_class='ovr',
dual=False)
elif 'RDF' in self.classifiers_used:
from sklearn.ensemble import RandomForestClassifier
self.classifier_type =\
RandomForestClassifier(self.RDF_n_estimators)
elif 'AdaBoost' in self.classifiers_used:
from sklearn.ensemble import AdaBoostClassifier
self.classifier_type =\
AdaBoostClassifier(n_estimators=self.AdaBoost_n_estimators)
self.unified_classifier = None
if sparsecoding_level:
if not(sparsecoding_level == 'Features' or sparsecoding_level == 'Buffer'):
raise Exception('Invalid sparsecoding_level, its value shoud be '
+ 'None/False/Buffer/Features')
self.sparsecoded = sparsecoding_level
self.decide = None
# Training variables
self.training_data = None
self.train_ground_truth = None # is loaded from memory after training
self.train_classes = None # is loaded from memory after training
# Testing general variables
self.accuracy = None
self.f1_scores = None
self.confusion_matrix = None
self.scores_savepath = None
self.scores_std = []
self.scores_std_mean = []
self.scores = None
self.scores_filter_shape = None
self.std_big_filter_shape = None
self.std_small_filter_shape = None
self.recognized_classes = []
self.crossings = None
self.testname = ''
self.save_fold = None
self.online = False
# Testing offline variables
self.testdataname = ''
self.test_instances = None
# Testing online variables
self.count_prev = None
self.buffer_exists = None
self.scores_exist = None
self.img_count = -1
self._buffer = []
self.scores_running_mean_vec = []
self.big_std_running_mean_vec = []
self.small_std_running_mean_vec = []
self.saved_buffers_scores = []
self.new_action_starts_count = 0
self.test_ground_truth = None
self.mean_from = -1
self.on_action = False
self.act_inds = []
self.max_filtered_score = 0
self.less_filtered_scores_std = None
self.high_filtered_scores_std = None
self.classifier_folder = None
self.testing_initialized = False
self.classifiers_list = {}
self.classifier_savename = 'trained_'
self.classifier_savename += self.full_info.replace(' ', '_').lower()
try:
[self.unified_classifier,
info] = co.file_oper.load_labeled_data(
['Classifier'] + self.classifier_id)
co.file_oper.save_labeled_data(['Classifier'],
[self.unified_classifier,
self.training_parameters],
name=self.app_dir)
if isinstance(info, tuple):
self.training_params = info[0]
self.additional_params = info[1:]
else:
self.training_params = info
self.loaded_classifier = True
LOG.info('Loaded Classifier')
except TypeError:
if self.for_app:
[self.unified_classifier,
info] = co.file_oper.load_labeled_data(
['Classifier'],
name=self.app_dir)
self.loaded_classifier = True
else:
self.loaded_classifier = False
LOG.info('Classifier not Loaded')
self.load_tests()
try:
self.classifier_folder = str(self.classifiers_list[
self.classifier_savename])
except KeyError:
self.classifier_folder = str(len(self.classifiers_list))
self.coders_to_train = []
# parameters bound variables
self.frames_preproc = ara.FramesPreprocessing(self.parameters)
available_descriptors =\
ara.Actions(self.parameters).available_descriptors
try:
self.features_extractors = [available_descriptors[nam](
self.parameters, self.frames_preproc)
for nam in self.parameters['descriptors']]
self.buffer_operators = [
ara.BufferOperations(self.parameters)
for nam in self.parameters['descriptors']]
if self.sparsecoded:
[self.action_recog.
actions.load_sparse_coder(ind) for ind in range(
len(self.parameters['descriptors']))]
except BaseException: pass
def load_tests(self, reset=True):
if reset:
self.testdata = [None] * len(self.available_tests)
self.fscores = [None] * len(self.available_tests)
self.accuracies = [None] * len(self.available_tests)
self.results = [None] * len(self.available_tests)
self.conf_mats = [None] * len(self.available_tests)
self.test_times = [None] * len(self.available_tests)
for count, test in enumerate(self.available_tests):
if (self.testdata[count] is None or
self.testdata[count]['Accuracy'] is None):
self.testdata[count] = co.file_oper.load_labeled_data(
['Testing'] + self.tests_ids[count])
if (self.testdata[count] is not None and
self.testdata[count]['Accuracy'] is not None):
self.accuracies[count] = self.testdata[count]['Accuracy']
self.fscores[count] = self.testdata[count]['FScores']
self.results[count] = self.testdata[count]['Results']
self.conf_mats[count] = self.testdata[count]['ConfMat']
self.test_times[count] = self.testdata[count]['TestTime']
try:
self.partial_accuracies[count] = self.testdata[count][
'PartialAccuracies']
except BaseException: pass
else:
self.testdata[count] = {}
self.testdata[count]['Accuracy'] = {}
self.testdata[count]['FScores'] = {}
self.testdata[count]['Results'] = {}
self.testdata[count]['ConfMat'] = {}
self.testdata[count]['TestTime'] = {}
self.testdata[count]['Labels'] = {}
try:
self.testdata[count]['PartialAccuracies'] = {}
except BaseException: pass
def update_experiment_info(self):
if self.parameters['action_type'] == 'Passive':
info = 'passive '
else:
info = 'dynamic '
info = info + self.name + ' ' + self.classifiers_used + ' '
info += 'using'
if self.parameters['sparsecoded']:
info += ' sparsecoded'
for feature in self.parameters['descriptors']:
info += ' ' + feature
info += ' descriptors '
if 'SVM' in self.parameters['classifier']:
info += 'with ' + self.parameters[
'classifier_params']['SVM_kernel'] + ' kernel'
elif 'RDF' in self.parameters['classifier']:
info += ('with ' + str(self.parameters['classifier_params'][
'RDF_n_estimators']) + ' estimators')
elif 'AdaBoost' in self.parameters['classifier']:
info += ('with ' + str(self.parameters['classifier_params'][
'AdaBoost_n_estimators']) + ' estimators')
if self.parameters['action_type'] == 'Dynamic':
info += ' with buffer size ' + str(self.buffer_size)
if self.parameters['sparsecoded']:
info += ' with sparsecoding by ratio of ' + \
str(self.sparse_dim_rat)
if self.ptpca:
info += (' with ' +
str(self.parameters['PTPCA_params']['PTPCA_components']) +
' post-time-pca components')
self.full_info = info.title()
if self.add_info:
info += self.add_info
self.classifier_savename = 'trained_'
self.classifier_savename += self.full_info.replace(' ', '_').lower()
self.update_classifier_id()
self.update_tests_ids()
def update_classifier_id(self):
self.features_file_id = []
self.features_id = []
for count in range(len(self.parameters['descriptors'])):
_id, file_id = self.action_recog.actions.retrieve_descriptor_possible_ids(count,
assume_existence=True)
self.features_id.append(_id)
self.features_file_id.append(file_id)
self.classifier_id = [co.dict_oper.create_sorted_dict_view(
{'Classifier': str(self.classifiers_used)}),
co.dict_oper.create_sorted_dict_view(
{'ClassifierParams': str(co.dict_oper.create_sorted_dict_view(
self.parameters['classifier_params']))}),
co.dict_oper.create_sorted_dict_view(
{'ActionsType': str(self.action_type)}),
co.dict_oper.create_sorted_dict_view(
{'FeaturesParams': str(self.features_file_id)})]
def update_tests_ids(self):
self.tests_ids = []
for count, test in enumerate(self.available_tests):
self.tests_ids.append([co.dict_oper.create_sorted_dict_view({'Test': str(test)}),
co.dict_oper.create_sorted_dict_view(
{'TestingParams': str(co.dict_oper.create_sorted_dict_view(
self.parameters['testing_params']))})]
+ [self.classifier_id])
def initialize_classifier(self, classifier):
'''
Add type to classifier and set methods
'''
self.unified_classifier = classifier
if 'SVM' in self.classifiers_used:
self.unified_classifier.decide = self.unified_classifier.decision_function
self.unified_classifier.predict = self.unified_classifier.predict
elif 'RDF' in self.classifiers_used or 'AdaBoost' in self.classifiers_used:
self.unified_classifier.decide = self.unified_classifier.predict_proba
self.unified_classifier.predict = self.unified_classifier.predict
co.file_oper.save_labeled_data(['Classifier'] + self.classifier_id,
[self.unified_classifier,
self.training_parameters])
co.file_oper.save_labeled_data(['Classifier'],
[self.unified_classifier,
self.training_parameters],
name=self.app_dir)
def reset_offline_test(self):
'''
Reset offline testing variables
'''
# Testing general variables
self.scores_std = []
self.scores_std_mean = []
self.scores = None
self.recognized_classes = []
self.crossings = None
self.save_fold = None
self.testing_initialized = True
# Testing offline variables
def reset_online_test(self):
'''
Reset online testing variables
'''
# Testing general variables
self.scores_std = []
self.scores_std_mean = []
self.scores = []
self.recognized_classes = []
self.crossings = []
self.save_fold = None
# Testing online variables
self.count_prev = None
self.buffer_exists = []
self.scores_exist = []
self.img_count = -1
self._buffer = []
self.scores_running_mean_vec = []
self.big_std_running_mean_vec = []
self.small_std_running_mean_vec = []
self.saved_buffers_scores = []
self.new_action_starts_count = 0
self.test_ground_truth = None
self.mean_from = -1
self.on_action = False
self.act_inds = []
self.max_filtered_score = 0
self.less_filtered_scores_std = None
self.high_filtered_scores_std = None
self.testing_initialized = True
def add_train_classes(self, training_datapath):
'''
Set the training classes of the classifier
'''
try:
self.train_classes = [name for name in os.listdir(training_datapath)
if os.path.isdir(os.path.join(training_datapath, name))][::-1]
except:
if self.for_app:
with open(os.path.join(self.app_dir,
'train_classes'),'r') as inp:
self.train_classes = pickle.load(inp)
else:
raise
self.all_actions = ['Undefined'] + self.train_classes
# Compare actions in memory with actions in file 'gestures_type.csv'
if self.passive_actions is not None:
passive_actions = [clas for clas in
(self.passive_actions) if clas
in self.train_classes]
if self.dynamic_actions is not None:
dynamic_actions = [clas for clas in
(self.dynamic_actions) if clas
in self.train_classes]
if (self.dynamic_actions is not None and
self.passive_actions is not None):
if 'Sync' in self.classifiers_used:
self.train_classes = {'Passive': passive_actions,
'Dynamic': dynamic_actions}
else:
classes = []
if self.action_type == 'Dynamic' or self.action_type == 'All':
classes += dynamic_actions
if self.action_type == 'Passive' or self.action_type == 'All':
classes += passive_actions
self.train_classes = classes
with open(os.path.join(self.app_dir,
'train_classes'),'w') as out:
pickle.dump(self.train_classes, out)
def run_training(self, coders_retrain=False,
classifiers_retrain=False,
training_datapath=None, classifier_savename=None,
num_of_cores=4, classifier_save=True,
max_act_samples=None,
min_dict_iterations=5,
visualize_feat=False, just_sparse=False,
init_sc_traindata_num=200,
train_all=False):
'''
<Arguments>
For coders training:
Do not train coders if coder already exists or <coders_retrain>
is False. <min_dict_iterations> denote the minimum training iterations to
take place after the whole data has been processed from the trainer
of the coder.<init_dict_traindata_num> denotes how many samples
will be used in the first iteration of the sparse coder training
For svm training:
Train ClassifierS with <num_of_cores>.
Save them if <classifier_save> is True to <classifiers_savepath>. Do not train
if <classifiers_savepath> already exists and <classifiers_retrain> is False.
'''
self.train_all = train_all
self.parameters['testing'] = False
LOG.info(self.full_info + ':')
if classifier_savename is not None:
self.classifier_savename = classifier_savename
if training_datapath is None:
training_datapath = co.CONST['actions_path']
self.add_train_classes(training_datapath)
if self.unified_classifier is None:
LOG.info('Missing trained classifier:' +
self.full_info)
LOG.info('Classifier will be retrained')
classifiers_retrain = True
else:
if not self.sparsecoded:
return
self.prepare_training_data(training_datapath, max_act_samples,
visualize_feat=visualize_feat)
if just_sparse:
return
if self.sparsecoded and self.coders_to_train and classifiers_retrain:
# Enters only if coders were not initially trained or had to be
# retrained. Otherwise, sparse descriptors are computed when
#<Action.add_features> is called
LOG.info('Trained' + str([self.parameters['descriptors'][coder] for coder in
self.coders_to_train]))
LOG.info('Making Sparse Features..')
self.action_recog = ara.ActionRecognition(
self.parameters,
log_lev=self.log_lev,
feat_filename=os.path.join(co.CONST['feat_save_path'],
'saved'))
self.prepare_training_data(training_datapath, max_act_samples,
visualize_feat=visualize_feat)
self.process_training(num_of_cores, classifiers_retrain,
self.classifier_savename, classifier_save)
def prepare_training_data(self, path=None, max_act_samples=None,
visualize_feat=False):
'''
Read actions from the <path> and name them according to their parent
folder name
'''
LOG.info('Adding actions..')
while True:
self.training_data = []
self.training_samples_inds = []
for act_count, action in enumerate(self.train_classes):
LOG.info('Action:' + action)
descriptors, samples_indices, mean_depths, _, trained_coders, _ = self.add_action(name=action,
data=os.path.join(
path, action),
use_dexter=False,
action_type=self.action_type,
max_act_samples=max_act_samples)
if not(self.sparsecoded and None in trained_coders):
descriptors = np.hstack(tuple(descriptors))
fmask = np.prod(np.isfinite(
descriptors), axis=1).astype(bool)
descriptors = descriptors[fmask]
LOG.info('Action \'' + action + '\' has ' +
'descriptors of shape ' + str(descriptors.shape))
self.training_data.append(descriptors)
self.training_samples_inds.append(
np.array(samples_indices)[fmask])
else:
self.training_samples_inds = []
self.training_data = []
self.train_ground_truth = []
if self.training_data:
if self.action_type == 'Dynamic':
self.training_data = co.preproc_oper.equalize_samples(
samples=self.training_data,
utterance_indices=self.training_samples_inds,
mode='random')
self.train_ground_truth = []
for act_count, clas in enumerate(self.training_data):
self.train_ground_truth += clas.shape[0] * [act_count]
self.training_data = np.vstack((self.training_data))
if None in trained_coders and self.sparsecoded:
self.action_recog.actions.train_sparse_dictionary()
else:
break
finite_samples = np.prod(
np.isfinite(
self.training_data),
axis=1).astype(bool)
self.train_ground_truth = np.array(
self.train_ground_truth)[finite_samples]
self.training_data = self.training_data[finite_samples, :]
LOG.info('Total Training Data has shape:'
+ str(self.training_data.shape))
def process_training(self, num_of_cores=4, retrain=False,
savepath=None, save=True):
'''
Train (or load trained) Classifiers with number of cores num_of_cores, with buffer size (stride
is 1) <self.buffer_size>. If <retrain> is True, Classifiers are retrained, even if
<save_path> exists.
'''
loaded = 0
if save and savepath is None:
raise Exception('savepath needed')
if retrain or self.unified_classifier is None:
if retrain and self.unified_classifier is not None:
LOG.info('retrain switch is True, so the Classifier ' +
'is retrained')
classifier_params = {elem.replace(self.classifiers_used + '_', ''):
self.parameters['classifier_params'][elem]
for elem in
self.parameters['classifier_params']
if elem.startswith(self.classifiers_used)}
if any([isinstance(classifier_params[elem], list)
for elem in classifier_params]):
grid_search_params = classifier_params.copy()
from sklearn.multiclass import OneVsRestClassifier
if isinstance(self.classifier_type, OneVsRestClassifier):
grid_search_params = {('estimator__' + key): classifier_params[key]
for key in classifier_params}
grid_search_params = {key: (grid_search_params[key] if
isinstance(
grid_search_params[key], list)
else [
grid_search_params[key]]) for key in
classifier_params}
best_params, best_scores, best_estimators = optGridSearchCV(
self.classifier_type, self.training_data,
self.train_ground_truth, grid_search_params, n_jobs=4,
fold_num=3)
best_params = best_params[-1]
best_scores = best_scores[-1]
best_estimator = best_estimators[-1]
if isinstance(self.classifier_type, OneVsRestClassifier):
best_params = {key.replace('estimator__', ''):
classifier_params[
key.replace('estimator__', '')]
for key in best_params}
classifier_params = {self.classifiers_used + '_' + key: best_params[key] for key
in best_params}
self.parameters['classifier_params'].update(classifier_params)
self.training_parameters['classifier_params'].update(
classifier_params)
self.classifier_type = best_estimator
self.update_experiment_info()
savepath = self.classifier_savename
self.initialize_classifier(self.classifier_type.fit(self.training_data,
self.train_ground_truth))
def compute_testing_time(self, testname):
testing_time = {}
features_extraction_time = 0
if not self.online:
for count in range(len(self.parameters['descriptors'])):
try:
loaded = co.file_oper.load_labeled_data(
[str(self.features_id[count][-1])] +
self.features_file_id[count] +
[str(testname)])
(_, _, _, feat_times) = loaded
except BaseException:
return None
for key in feat_times:
LOG.info('Time:' + str(key) + ':' +
str(np.mean(feat_times[key])))
features_extraction_time += np.mean(feat_times[key])
try:
testing_time['Classification'] = self.time[
-1] / float(self.scores.shape[0])
except IndexError:
testing_time['Classification'] = (
co.file_oper.load_labeled_data(
['Testing'] + self.tests_ids[
self.available_tests.index(
testname)])['TestTime'][
'Classification'])
else:
testing_time['Classification'] = np.mean(self.time)
testing_time['Features Extraction'] = features_extraction_time
return testing_time
def add_action(self, name=None, data=None, visualize=False, offline_vis=False,
to_visualize=[], exit_after_visualization=False,
use_dexter=False,
action_type=None,
max_act_samples=None):
return self.action_recog.add_action(
name=name,
use_dexter=use_dexter,
action_type=self.action_type,
max_act_samples=max_act_samples,
data=data,
offline_vis=offline_vis,
to_visualize=to_visualize,
exit_after_visualization=exit_after_visualization)
def offline_testdata_processing(self, datapath):
'''
Offline testing data processing, using data in <datapath>.
'''
LOG.info('Processing test data..')
LOG.info('Extracting descriptors..')
(descriptors, _, mean_depths, test_name, _, _) = self.add_action(
name=None, data=datapath)
testdata = np.hstack(tuple(descriptors))
self.parameters['testing_params'][test_name] = test_name
self.parameters['testing_params']['current'] = test_name
return testdata
def save_plot(self, fig, lgd=None, display_all=False, info=None):
'''
<fig>: figure
<lgd>: legend of figure
<display_all>: whether to save as Total plot
Saves plot if the action resides in self.available_tests
'''
filename = None
if display_all:
testname = self.action_type.lower()
filename = os.path.join(*self.save_fold.split(os.sep)[:-1] +
['Total', testname + '.pdf'])
else:
if self.test_name is None:
self.test_name = (self.name + ' ' + self.classifiers_used).title()
if self.test_name in self.available_tests:
if self.save_fold is None:
if not self.online:
fold_name = co.file_oper.load_labeled_data(['Testing'],
just_catalog=True,
include_all_catalog=True)[
str(self.tests_ids[
self.available_tests.
index(self.test_name)])]
else:
fold_name = 'Online'
self.save_fold = os.path.join(
co.CONST['results_fold'], 'Classification', fold_name,
self.test_name)
if self.add_info is not None:
self.save_fold = os.path.join(
self.save_fold, self.add_info.replace(' ', '_').lower())
co.makedir(self.save_fold)
LOG.info('Saving to ' + self.save_fold)
if info is not None:
filename = os.path.join(
self.save_fold, (self.testname + ' ' + info +
'.pdf').replace(' ','_'))
else:
filename = os.path.join(
self.save_fold, self.testname.replace(' ','_') + '.pdf')
else:
LOG.warning('Requested figure to plot belongs to an' +
' action that does not reside in <self.'+
'available_tests> .Skipping..')
filename = None
import matplotlib.pyplot as plt
if filename is not None:
if lgd is None:
plt.savefig(filename)
else:
plt.savefig(filename,
bbox_extra_artists=(lgd,), bbox_inches='tight')
plt.close()
def plot_result(self, data, info=None, save=True, xlabel='Frames', ylabel='',
labels=None, colors=None, linewidths=None, alphas=None,
xticks_names=None, yticks_names=None, xticks_locs=None,
yticks_locs=None, markers=None, markers_sizes=None, zorders=None, ylim=None, xlim=None,
display_all=False, title=False):
'''
<data> is a numpy array dims (n_points, n_plots),
<labels> is a string list of dimension (n_plots)
<colors> ditto
'''
import matplotlib
from matplotlib import pyplot as plt
#matplotlib.rcParams['text.usetex'] = True
#matplotlib.rcParams['text.latex.unicode'] = True
# plt.style.classifiers_used('seaborn-ticks')
if len(data.shape) == 1:
data = np.atleast_2d(data).T
fig, axes = plt.subplots()
if xticks_locs is not None:
axes.set_xticks(xticks_locs, minor=True)
axes.xaxis.grid(True, which='minor')
if yticks_locs is not None:
axes.set_yticks(yticks_locs, minor=True)
axes.yaxis.grid(True, which='minor')
if xticks_names is not None:
plt.xticks(range(len(xticks_names)), xticks_names)
if yticks_names is not None:
plt.yticks(range(len(yticks_names)), yticks_names)
if markers is None:
markers = [','] * data.shape[1]
if markers_sizes is None:
markers_sizes = [10] * data.shape[1]
if colors is None:
colors = ['r', 'g', 'b', 'c', 'm', 'y', 'k']
if alphas is None:
alphas = data.shape[1] * [1]
if zorders is None:
zorders = data.shape[1] * [0]
while len(colors) < data.shape[1]:
colors += [tuple(np.random.random(3))]
if linewidths is None:
linewidths = [1] * data.shape[1]
lgd = None
for count in range(data.shape[1]):
if labels is not None:
axes.plot(data[:, count], label='%s' % labels[count],
color=colors[count],
linewidth=linewidths[count],
marker=markers[count], alpha=alphas[count],
zorder=zorders[count],
markersize=markers_sizes[count])
lgd = co.plot_oper.put_legend_outside_plot(axes,
already_reshaped=True)
else:
axes.plot(data[:, count],
color=colors[count],
linewidth=linewidths[count],
marker=markers[count], alpha=alphas[count],
zorder=zorders[count],
markersize=markers_sizes[count])
if title:
if info is not None:
plt.title(self.testname +
'\n Dataset: ' + self.testdataname +
'\n' + info.title())
else:
plt.title(self.testname +
'\n Dataset ' + self.testdataname)
info = ''
plt.xlabel(xlabel)
plt.ylabel(ylabel)
if ylim is not None:
plt.ylim(ylim)
if xlim is not None:
plt.xlim(xlim)
if save:
self.save_plot(fig, lgd, display_all=display_all, info=info)
return fig, lgd, axes
def init_testing(self, data=None, online=True, save=True, load=True,
testname=None, scores_savepath=None,
scores_filter_shape=5,
std_small_filter_shape=co.CONST['STD_small_filt_window'],
std_big_filter_shape=co.CONST['STD_big_filt_window'],
testdatapath=None, save_results=True):
'''
Initializes paths and names used in testing to save, load and visualize
data.
Built as a convenience method, in case <self.run_testing> gets overriden.
'''
self.parameters['testing'] = True
self.parameters['testing_params']['online'] = online
if online:
self.reset_online_test()
else:
self.reset_offline_test()
self.scores_filter_shape = scores_filter_shape
self.std_small_filter_shape = std_small_filter_shape
self.std_big_filter_shape = std_big_filter_shape
self.online = online
if testname is not None:
self.testname = testname.title()
else:
self.testname = (self.name + ' ' + self.classifiers_used).title()
if self.add_info is not None:
self.testname += ' ' + self.add_info.title()
self.parameters['testing_params']['current'] = self.testname
if online:
if testdatapath is not None:
self.testdataname = ('online (using '
+ os.path.basename(testdatapath) + ')')
else:
self.testdataname = 'online'
else:
self.testdataname = os.path.basename(data)
if not self.online:
if self.test_ind is not None:
available_tests_ids = co.file_oper.load_labeled_data(['Testing'],
just_catalog=True,
include_all_catalog=True)
if available_tests_ids is None:
fold_name = '0'
else:
curr_test_id = self.tests_ids[self.available_tests.
index(self.test_name)]
if str(curr_test_id) in available_tests_ids:
fold_name = str(available_tests_ids[str(curr_test_id)])
else:
fold_name = str(len(available_tests_ids))
else:
self.test_name = 'Online'
try:
fold_name = os.path.join(*[co.CONST['results_fold'],
'Classification', 'Online'])
except OSError:
fold_name = '0'
if self.test_ind is not None:
self.save_fold = os.path.join(
co.CONST['results_fold'], 'Classification', self.test_name,
fold_name)
co.makedir(self.save_fold)
if save or load:
fold_name = self.classifier_folder
if scores_savepath is None:
self.scores_savepath = self.testdataname + '_scores_for_'
self.scores_savepath += self.full_info.replace(' ',
'_').lower()
self.scores_savepath += '.pkl'
else:
self.scores_savepath = scores_savepath
return True
def run_testing(self, data=None, derot_angle=None, derot_center=None,
online=True,
scores_filter_shape=5,
std_small_filter_shape=co.CONST['STD_small_filt_window'],
std_big_filter_shape=co.CONST['STD_big_filt_window'],
ground_truth_type=None,
img_count=None, save=True, scores_savepath=None,
load=False, testname=None, display_scores=True,
construct_gt=True, just_scores=False, testdatapath=None,
compute_perform=True,
save_results=True):
'''
Test Classifiers using data (.png files) located in <data>. If <online>, the
testing is online, with <data> being a numpy array, which has been
firstly processed by <hand_segmentation_alg>. The scores retrieved from
testing are filtered using a box filter of shape <box_filter_shape>.
The running mean along a buffer
of the data is computed with a running window of length
<mean_filter_shape>. The ground truth for the testing data is given by
<ground_truth_type> (for further info about the variable refer to
<co.gd_oper.construct_ground_truth>). If the training is online, the count of
the frame is passed by <img_count>. If <save> is True,
testing results are saved to <scores_savepath>, or a path constructed
by the configuration. <testname> overrides the first line of the plots.
If <load> is True and <scores_save_path> exists, testing is bypassed and all the
necessary results are loaded from memory. If <just_scores> is True, the
classification stage is not done and only scores are computed. If
<testdatapath> is not <None> and <online> is True, then it will be
assumed that a pseudoonline testing is taking place
'''
loaded = False
if not online:
LOG.info('Testing:' + data)
try:
self.test_ind = self.available_tests.index(data)
self.test_name = data
except BaseException:
if data.split(os.sep)[-1] in self.available_tests:
self.test_ind = (
self.available_tests.index(data.split(os.sep)[-1]))
self.test_name = data.split(os.sep)[-1]
elif data in self.dynamic_actions or data in self.passive_actions:
self.test_ind = None
elif data.split(os.sep)[-1] in self.dynamic_actions or \
data.split(os.sep)[-1] in self.passive_actions:
self.test_ind = None
else:
raise Exception('test data must be inside test_save_path,' +
' check config.yaml')
if construct_gt and ground_truth_type is None:
ground_truth_type =os.path.join(
co.CONST['ground_truth_fold'],
self.test_name + '.csv')
elif isinstance(data, tuple):
derot_angle = data[1]
derot_center = data[2]
data = data[0]
if not self.testing_initialized or not online:
if not self.init_testing(data=data,
online=online,
save=save,
load=load,
testname=testname,
scores_savepath=scores_savepath,
scores_filter_shape=scores_filter_shape,
std_small_filter_shape=std_small_filter_shape,
std_big_filter_shape=std_big_filter_shape,
testdatapath=testdatapath,
save_results=save_results):
return False
if not online:
if self.test_ind is not None and (
load and self.accuracies[self.available_tests.index(self.test_name)]
is not None):
LOG.info('Tests already performed, loaded data')
try:
self.scores = self.results['Scores']
loaded = True
except:
pass
if not loaded:
if self.test_ind is not None:
testdata = self.offline_testdata_processing(
os.path.join(co.CONST['test_save_path'],
self.test_name))
else:
testdata = self.offline_testdata_processing(
data)
try:
self.test_ind = self.available_tests.index(data)
except BaseException: self.test_ind = None
LOG.info(self.full_info + ':')
LOG.info('Testing Classifiers using testdata with size: '
+ str(testdata.shape))
fmask = np.prod(np.isfinite(testdata), axis=1).astype(bool)
fin_scores = self.unified_classifier.decide(
testdata[fmask, :])
self.scores = np.zeros(
(testdata.shape[0], fin_scores.shape[1]))
self.scores[:] = None
self.scores[fmask] = fin_scores
if self.test_ind is not None:
self.testdata[self.test_ind]['Results']['Scores'] = self.scores
if construct_gt:
LOG.info('Constructing ground truth vector..')
self.test_ground_truth, self.test_breakpoints = co.gd_oper.construct_ground_truth(
os.path.join(co.CONST['test_save_path'], self.test_name),
classes_namespace=self.train_classes,
length=self.scores.shape[0],
ground_truth_type=ground_truth_type,
ret_breakpoints=True)
utterances_inds = co.gd_oper.merge_utterances_vectors(
co.gd_oper.create_utterances_vectors(
self.test_breakpoints, len(self.test_ground_truth)),
self.train_classes)
if not just_scores:
self.classify_offline(save=save, display=display_scores,
compute_perform=compute_perform,
extraction_method=
self.parameters[
'testing_params']['post_scores_processing_method'])
self.correlate_with_ground_truth(save=save,
display=display_scores,
compute_perform=compute_perform,
utterances_inds=utterances_inds)
self.display_scores_and_time(save=save)
if self.test_ind is not None:
co.file_oper.save_labeled_data(['Testing'] +self.tests_ids[
self.test_ind], self.testdata[self.test_ind])
if not just_scores:
if display_scores:
if self.parameters['testing_params'][
'post_scores_processing_method'] == 'CSTD':
self.plot_result(np.concatenate((
self.less_filtered_scores_std[:, None],
self.high_filtered_scores_std[:, None]), axis=1),
info='Scores Statistics',
xlabel='Frames',
labels=['STD', 'STD Mean'],
colors=['r', 'g'],
save=save)
mean_diff = (np.array(self.high_filtered_scores_std) -
np.array(self.less_filtered_scores_std))
mean_diff = (mean_diff) / float(np.max(np.abs(mean_diff[
np.isfinite(mean_diff)])))
plots = [mean_diff]
labels = ['ScoresSTD - ScoresSTDMean']
if self.test_ground_truth is not None:
plots += [((self.test_ground_truth - np.mean(self.test_ground_truth[
np.isfinite(self.test_ground_truth)])) / float(
np.max(self.test_ground_truth[
np.isfinite(self.test_ground_truth)])))[:, None]]
labels += ['Ground Truth']
self.plot_result(np.concatenate(plots, axis=1), labels=labels,
info='Metric of actions starting and ending ' +
'points', xlabel='Frames', save=save)
if display_scores:
self.plot_result(self.scores,
labels=self.train_classes,
info='Scores',
xlabel='Frames',
save=save,
)
return True, self.scores
else:
'''
input is processed from hand_segmentation_alg (any data
processed in such way, that the result is the same with my processing,
is acceptable, eg. Dexter)
There must be a continuous data streaming (method called in every
loop), even if the result of the previous algorithm is None
'''
scores_exist, score = self.process_online_data(data, img_count,
derot_angle,
derot_center,
just_scores=just_scores)
return scores_exist, score
def visualize_action(self, action, save=True,
save_name=None, *args, **kwargs):
'''
Visualizes action or a testing dataset using predefined locations in
config.yaml and the method co.draw_oper.plot_utterances
'''
dataset_loc = '/media/vassilis/Thesis/Datasets/PersonalFarm/'
results_loc = '/home/vassilis/Thesis/KinectPainting/Results/DataVisualization'
ground_truth, breakpoints, labels = co.gd_oper.load_ground_truth(action, ret_labs=True,
ret_breakpoints=True)
testing =True
images_base_loc = os.path.join(dataset_loc, 'actions',
'sets' if not testing else 'whole_result')
images_loc = os.path.join(
images_base_loc, action.replace(
'_', ' ').title())
imgs, masks, sync, angles, centers, samples_indices = co.imfold_oper.load_frames_data(
images_loc, masks_needed=True)
import cv2
masks_centers = []
xdim = 0
ydim = 0
conts = []
tmp = []
for mask, img in zip(masks, imgs):
conts = cv2.findContours(
mask, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_NONE)[1]
conts_areas = [cv2.contourArea(cont) for cont in conts]
tmp.append(np.sum(mask*img >0))
if np.sum(mask*img >0) < 500:
masks_centers.append(None)
else:
cont = conts[np.argmax(conts_areas)]
x, y, w, h = cv2.boundingRect(cont)
if w == 0 or h == 0:
masks_centers.append(None)
else:
masks_centers.append([y +h/2, x+w/2])
xdim = max(w, xdim)
ydim = max(h, ydim)
cropped_imgs = []
for img, center in zip(imgs, masks_centers):
if center is not None:
cropped_img = img[max(0, center[0]-ydim/2)
:min(img.shape[0], center[0]+ydim/2),
max(0, center[1]-xdim/2)
:min(img.shape[0], center[1]+xdim/2)]
inp_img = | np.zeros((ydim, xdim)) | numpy.zeros |
#!/usr/bin/env python3
# coding: utf-8
import numpy as np
# there are some method create ndarray(this is the storage unit in np)
class Create_ndarray():
# the method will return ndarray be created with list or tuple
def from_iterator(self, dataset):
assert isinstance(dataset, list) or isinstance(dataset, tuple), "the dataset is invalid"
print(np.array(dataset))
# there are some create method
def from_method(self):
# arange(start, stop, step, dtype)
arr = np.arange(1, 20, 3)
print('arang', arr)
# linspace(start, stop, num=50, endpoint=True, ...)
lis = np.linspace(1, 20, num=5)
print('linspace', lis)
# logspaces(start, stop, num=50, endpoint=True, base=10.0, dtype=None)
lgs = np.logspace(1, 2, num=5)
print('logspace', lgs)
# there are some data from file
def from_file(self):
f = open('file.data', 'r')
# genfromtxt(f, delimiter, autostrip=False, skip_header=0, skip_footer=0, usecols=(m,n)) get data from file
data = np.genfromtxt(f)
print('fileData', data)
# there are some attribute.
def some_attribute(self):
arr = np.arange(1, 20, 3)
print('dtype', arr.dtype)
print('itemsize', arr.itemsize)
print('ndim', arr.ndim)
print('real.data', arr.real)
print('size', arr.size)
print('shape', arr.shape)
# there are some method of np
def some_method(self):
arr = np.arange(1, 20, 2)
print('sub ndarray', arr[2:])
print("multi bool", arr[(arr > 5) & (arr < 9)])
# this only can use with one ndim.
print('specify line', arr[[n,m]])
print('repeat', arr.repeat(3))
print('tile', np.tile(arr, 4))
# we need reshape the ndarray, we will use more ndim matrix.
arr = arr.reshape(5, -1)
print('reshape', arr)
print('Transpose', arr.transpose())
# or
print('T', arr.T)
# and we need get two matrix, use np.split
arr1, arr2 = np.split(arr, 2)
print('vstack', np.vstack([arr1,arr2]))
# or
print('r_', np.r_[arr1, arr2])
print('hstack', np.hstack([arr1,arr2]))
# or
print('C_', np.c_[arr1, arr2])
print('same, 1 2', np.intersect1d(arr1,arr2))
print('same, 1 1', np.intersect1d(arr1,arr1))
print('diff, 1 1', | np.setdiff1d(arr1, arr1) | numpy.setdiff1d |
"""Implements the echo-top-based storm-tracking algorithm.
This algorithm is discussed in Section 3c of Homeyer et al. (2017). The main
advantage of this algorithm (in my experience) over segmotion (Lakshmanan and
Smith 2010) is that it provides more intuitive and longer storm tracks. The
main disadvantage of the echo-top-based algorithm (in my experience) is that it
provides only storm centers, not objects. In other words, the echo-top-based
algorithm does not provide the bounding polygons.
--- REFERENCES ---
<NAME>. and <NAME>, 2018: "A method for identifying midlatitude
mesoscale convective systems in radar mosaics, part II: Tracking". Journal
of Applied Meteorology and Climatology, in press,
doi:10.1175/JAMC-D-17-0294.1.
<NAME>., and <NAME>, and <NAME>, 2017: "On the development of
above-anvil cirrus plumes in extratropical convection". Journal of the
Atmospheric Sciences, 74 (5), 1617-1633.
<NAME>., and <NAME>, 2010: "Evaluating a storm tracking algorithm".
26th Conference on Interactive Information Processing Systems, Atlanta, GA,
American Meteorological Society.
"""
import copy
import os.path
import warnings
from itertools import chain
import numpy
import pandas
from scipy.ndimage.filters import gaussian_filter
from scipy.stats import mode as scipy_mode
from skimage.measure import label as label_image
from gewittergefahr.gg_io import myrorss_and_mrms_io
from gewittergefahr.gg_io import storm_tracking_io as tracking_io
from gewittergefahr.gg_utils import temporal_tracking
from gewittergefahr.gg_utils import track_reanalysis
from gewittergefahr.gg_utils import radar_utils
from gewittergefahr.gg_utils import radar_sparse_to_full as radar_s2f
from gewittergefahr.gg_utils import dilation
from gewittergefahr.gg_utils import grids
from gewittergefahr.gg_utils import projections
from gewittergefahr.gg_utils import polygons
from gewittergefahr.gg_utils import geodetic_utils
from gewittergefahr.gg_utils import time_conversion
from gewittergefahr.gg_utils import storm_tracking_utils as tracking_utils
from gewittergefahr.gg_utils import echo_classification as echo_classifn
from gewittergefahr.gg_utils import error_checking
TOLERANCE = 1e-6
TIME_FORMAT = '%Y-%m-%d-%H%M%S'
SEPARATOR_STRING = '\n\n' + '*' * 50 + '\n\n'
RADIANS_TO_DEGREES = 180. / numpy.pi
DEGREES_LAT_TO_METRES = 60 * 1852
CENTRAL_PROJ_LATITUDE_DEG = 35.
CENTRAL_PROJ_LONGITUDE_DEG = 265.
VALID_RADAR_FIELD_NAMES = [
radar_utils.ECHO_TOP_15DBZ_NAME, radar_utils.ECHO_TOP_18DBZ_NAME,
radar_utils.ECHO_TOP_20DBZ_NAME, radar_utils.ECHO_TOP_25DBZ_NAME,
radar_utils.ECHO_TOP_40DBZ_NAME, radar_utils.ECHO_TOP_50DBZ_NAME,
radar_utils.REFL_COLUMN_MAX_NAME # HACK
]
VALID_RADAR_SOURCE_NAMES = [
radar_utils.MYRORSS_SOURCE_ID, radar_utils.MRMS_SOURCE_ID
]
DEFAULT_MIN_ECHO_TOP_KM = 4.
DEFAULT_SMOOTHING_RADIUS_DEG_LAT = 0.024
DEFAULT_HALF_WIDTH_FOR_MAX_FILTER_DEG_LAT = 0.06
DEFAULT_MIN_INTERMAX_DISTANCE_METRES = 0.1 * DEGREES_LAT_TO_METRES
DEFAULT_MIN_SIZE_PIXELS = 0
DEFAULT_MAX_LINK_TIME_SECONDS = 360
DEFAULT_MAX_VELOCITY_DIFF_M_S01 = 30.
DEFAULT_MAX_LINK_DISTANCE_M_S01 = (
0.125 * DEGREES_LAT_TO_METRES / DEFAULT_MAX_LINK_TIME_SECONDS
)
DEFAULT_MAX_JOIN_TIME_SEC = 720
DEFAULT_MAX_JOIN_ERROR_M_S01 = 30.
DEFAULT_MIN_REANALYZED_DURATION_SEC = 1
DUMMY_TRACKING_SCALE_METRES2 = int(numpy.round(numpy.pi * 1e8)) # 10-km radius
MAX_VALUES_KEY = 'max_values'
def _check_radar_field(radar_field_name):
"""Error-checks radar field.
:param radar_field_name: Field name (string).
:raises: ValueError: if `radar_field_name not in VALID_RADAR_FIELD_NAMES`.
"""
error_checking.assert_is_string(radar_field_name)
if radar_field_name not in VALID_RADAR_FIELD_NAMES:
error_string = (
'\n{0:s}\nValid radar fields (listed above) do not include "{1:s}".'
).format(str(VALID_RADAR_FIELD_NAMES), radar_field_name)
raise ValueError(error_string)
def _check_radar_source(radar_source_name):
"""Error-checks source of radar data.
:param radar_source_name: Data source (string).
:raises: ValueError: if `radar_source_name not in VALID_RADAR_SOURCE_NAMES`.
"""
error_checking.assert_is_string(radar_source_name)
if radar_source_name not in VALID_RADAR_SOURCE_NAMES:
error_string = (
'\n{0:s}\nValid radar sources (listed above) do not include '
'"{1:s}".'
).format(str(VALID_RADAR_SOURCE_NAMES), radar_source_name)
raise ValueError(error_string)
def _gaussian_smooth_radar_field(radar_matrix, e_folding_radius_pixels,
cutoff_radius_pixels=None):
"""Applies Gaussian smoother to radar field. NaN's are treated as zero.
M = number of rows (unique grid-point latitudes)
N = number of columns (unique grid-point longitudes)
:param radar_matrix: M-by-N numpy array of data values.
:param e_folding_radius_pixels: e-folding radius.
:param cutoff_radius_pixels: Cutoff radius. If
`cutoff_radius_pixels is None`, will default to
`3 * e_folding_radius_pixels`.
:return: smoothed_radar_matrix: Smoothed version of input.
"""
e_folding_radius_pixels = float(e_folding_radius_pixels)
if cutoff_radius_pixels is None:
cutoff_radius_pixels = 3 * e_folding_radius_pixels
radar_matrix[numpy.isnan(radar_matrix)] = 0.
smoothed_radar_matrix = gaussian_filter(
input=radar_matrix, sigma=e_folding_radius_pixels, order=0,
mode='constant', cval=0.,
truncate=cutoff_radius_pixels / e_folding_radius_pixels)
smoothed_radar_matrix[
numpy.absolute(smoothed_radar_matrix) < TOLERANCE
] = numpy.nan
return smoothed_radar_matrix
def _find_local_maxima(radar_matrix, radar_metadata_dict,
neigh_half_width_pixels):
"""Finds local maxima in radar field.
M = number of rows (unique grid-point latitudes)
N = number of columns (unique grid-point longitudes)
P = number of local maxima
:param radar_matrix: M-by-N numpy array of data values.
:param radar_metadata_dict: Dictionary created by
`myrorss_and_mrms_io.read_metadata_from_raw_file`.
:param neigh_half_width_pixels: Half-width of neighbourhood for max filter.
:return: local_max_dict_simple: Dictionary with the following keys.
local_max_dict_simple['latitudes_deg']: length-P numpy array with latitudes
(deg N) of local maxima.
local_max_dict_simple['longitudes_deg']: length-P numpy array with
longitudes (deg E) of local maxima.
local_max_dict_simple['max_values']: length-P numpy array with magnitudes of
local maxima.
"""
filtered_radar_matrix = dilation.dilate_2d_matrix(
input_matrix=radar_matrix, percentile_level=100.,
half_width_in_pixels=neigh_half_width_pixels)
max_index_arrays = numpy.where(
numpy.absolute(filtered_radar_matrix - radar_matrix) < TOLERANCE
)
max_row_indices = max_index_arrays[0]
max_column_indices = max_index_arrays[1]
max_latitudes_deg, max_longitudes_deg = radar_utils.rowcol_to_latlng(
grid_rows=max_row_indices, grid_columns=max_column_indices,
nw_grid_point_lat_deg=
radar_metadata_dict[radar_utils.NW_GRID_POINT_LAT_COLUMN],
nw_grid_point_lng_deg=
radar_metadata_dict[radar_utils.NW_GRID_POINT_LNG_COLUMN],
lat_spacing_deg=radar_metadata_dict[radar_utils.LAT_SPACING_COLUMN],
lng_spacing_deg=radar_metadata_dict[radar_utils.LNG_SPACING_COLUMN]
)
max_values = radar_matrix[max_row_indices, max_column_indices]
sort_indices = numpy.argsort(-max_values)
max_values = max_values[sort_indices]
max_latitudes_deg = max_latitudes_deg[sort_indices]
max_longitudes_deg = max_longitudes_deg[sort_indices]
return {
temporal_tracking.LATITUDES_KEY: max_latitudes_deg,
temporal_tracking.LONGITUDES_KEY: max_longitudes_deg,
MAX_VALUES_KEY: max_values
}
def _remove_redundant_local_maxima(local_max_dict, projection_object,
min_intermax_distance_metres):
"""Removes redundant local maxima at one time.
P = number of local maxima retained
:param local_max_dict: Dictionary with at least the following keys.
local_max_dict['latitudes_deg']: See doc for `_find_local_maxima`.
local_max_dict['longitudes_deg']: Same.
local_max_dict['max_values']: Same.
:param projection_object: Instance of `pyproj.Proj` (used to convert local
maxima from lat-long to x-y coordinates).
:param min_intermax_distance_metres: Minimum distance between any pair of
local maxima.
:return: local_max_dict: Same as input, except that no pair of maxima is
within `min_intermax_distance_metres`. Also contains additional columns
listed below.
local_max_dict['x_coords_metres']: length-P numpy array with x-coordinates
of local maxima.
local_max_dict['y_coords_metres']: length-P numpy array with y-coordinates
of local maxima.
"""
x_coords_metres, y_coords_metres = projections.project_latlng_to_xy(
latitudes_deg=local_max_dict[temporal_tracking.LATITUDES_KEY],
longitudes_deg=local_max_dict[temporal_tracking.LONGITUDES_KEY],
projection_object=projection_object,
false_easting_metres=0., false_northing_metres=0.)
local_max_dict.update({
temporal_tracking.X_COORDS_KEY: x_coords_metres,
temporal_tracking.Y_COORDS_KEY: y_coords_metres
})
num_maxima = len(x_coords_metres)
keep_max_flags = numpy.full(num_maxima, True, dtype=bool)
for i in range(num_maxima):
if not keep_max_flags[i]:
continue
these_distances_metres = numpy.sqrt(
(x_coords_metres - x_coords_metres[i]) ** 2 +
(y_coords_metres - y_coords_metres[i]) ** 2
)
these_redundant_indices = numpy.where(numpy.logical_and(
these_distances_metres < min_intermax_distance_metres,
keep_max_flags
))[0]
if len(these_redundant_indices) == 1:
continue
keep_max_flags[these_redundant_indices] = False
this_best_index = numpy.argmax(
local_max_dict[MAX_VALUES_KEY][these_redundant_indices]
)
this_best_index = these_redundant_indices[this_best_index]
keep_max_flags[this_best_index] = True
indices_to_keep = numpy.where(keep_max_flags)[0]
for this_key in local_max_dict:
if isinstance(local_max_dict[this_key], list):
local_max_dict[this_key] = [
local_max_dict[this_key][k] for k in indices_to_keep
]
elif isinstance(local_max_dict[this_key], numpy.ndarray):
local_max_dict[this_key] = local_max_dict[this_key][
indices_to_keep]
# x_coords_metres, y_coords_metres = projections.project_latlng_to_xy(
# latitudes_deg=local_max_dict[temporal_tracking.LATITUDES_KEY],
# longitudes_deg=local_max_dict[temporal_tracking.LONGITUDES_KEY],
# projection_object=projection_object,
# false_easting_metres=0., false_northing_metres=0.)
#
# coord_matrix_metres = numpy.hstack((
# numpy.reshape(x_coords_metres, (x_coords_metres.size, 1)),
# numpy.reshape(y_coords_metres, (y_coords_metres.size, 1))
# ))
#
# distance_matrix_metres = euclidean_distances(
# X=coord_matrix_metres, Y=coord_matrix_metres)
#
# for i in range(len(x_coords_metres)):
# distance_matrix_metres[i, i] = numpy.inf
#
# these_rows, these_columns = numpy.where(
# distance_matrix_metres < min_intermax_distance_metres)
#
# for i in range(len(these_rows)):
# print (
# '{0:d}th max (at {1:.2f} deg N and {2:.2f} deg E) and {3:d}th max '
# '(at {4:.2f} deg N and {5:.2f} deg E) are within {6:.1f} metres'
# ).format(
# these_rows[i],
# local_max_dict[temporal_tracking.LATITUDES_KEY][these_rows[i]],
# local_max_dict[temporal_tracking.LONGITUDES_KEY][these_rows[i]],
# these_columns[i],
# local_max_dict[temporal_tracking.LATITUDES_KEY][these_columns[i]],
# local_max_dict[temporal_tracking.LONGITUDES_KEY][these_columns[i]],
# distance_matrix_metres[these_rows[i], these_columns[i]]
# )
return local_max_dict
def _check_time_period(
first_spc_date_string, last_spc_date_string, first_time_unix_sec,
last_time_unix_sec):
"""Error-checks time period.
:param first_spc_date_string: First SPC date in period (format "yyyymmdd").
:param last_spc_date_string: Last SPC date in period.
:param first_time_unix_sec: First time in period. If
`first_time_unix_sec is None`, defaults to first time on first SPC date.
:param last_time_unix_sec: Last time in period. If
`last_time_unix_sec is None`, defaults to last time on last SPC date.
:return: spc_date_strings: 1-D list of SPC dates (format "yyyymmdd").
:return: first_time_unix_sec: Same as input, but may have been replaced with
default.
:return: last_time_unix_sec: Same as input, but may have been replaced with
default.
"""
spc_date_strings = time_conversion.get_spc_dates_in_range(
first_spc_date_string=first_spc_date_string,
last_spc_date_string=last_spc_date_string)
if first_time_unix_sec is None:
first_time_unix_sec = time_conversion.string_to_unix_sec(
first_spc_date_string, time_conversion.SPC_DATE_FORMAT
) + time_conversion.MIN_SECONDS_INTO_SPC_DATE
if last_time_unix_sec is None:
last_time_unix_sec = time_conversion.string_to_unix_sec(
last_spc_date_string, time_conversion.SPC_DATE_FORMAT
) + time_conversion.MAX_SECONDS_INTO_SPC_DATE
error_checking.assert_is_greater(last_time_unix_sec, first_time_unix_sec)
assert time_conversion.is_time_in_spc_date(
first_time_unix_sec, first_spc_date_string)
assert time_conversion.is_time_in_spc_date(
last_time_unix_sec, last_spc_date_string)
return spc_date_strings, first_time_unix_sec, last_time_unix_sec
def _find_input_radar_files(
top_radar_dir_name, radar_field_name, radar_source_name,
first_spc_date_string, last_spc_date_string, first_time_unix_sec,
last_time_unix_sec):
"""Finds radar files (inputs to `run_tracking` -- basically main method).
T = number of files found
:param top_radar_dir_name: Name of top-level directory with radar files.
Files therein will be found by
`myrorss_and_mrms_io.find_raw_files_one_spc_date`.
:param radar_field_name: Field name (must be accepted by
`_check_radar_field`).
:param radar_source_name: Data source (must be accepted by
`_check_radar_source`).
:param first_spc_date_string: See doc for `_check_time_period`.
:param last_spc_date_string: Same.
:param first_time_unix_sec: Same.
:param last_time_unix_sec: Same.
:return: radar_file_names: length-T list of paths to radar files.
:return: valid_times_unix_sec: length-T numpy array of valid times.
"""
_check_radar_field(radar_field_name)
_check_radar_source(radar_source_name)
spc_date_strings, first_time_unix_sec, last_time_unix_sec = (
_check_time_period(
first_spc_date_string=first_spc_date_string,
last_spc_date_string=last_spc_date_string,
first_time_unix_sec=first_time_unix_sec,
last_time_unix_sec=last_time_unix_sec)
)
radar_file_names = []
valid_times_unix_sec = numpy.array([], dtype=int)
num_spc_dates = len(spc_date_strings)
for i in range(num_spc_dates):
these_file_names = myrorss_and_mrms_io.find_raw_files_one_spc_date(
spc_date_string=spc_date_strings[i],
field_name=radar_field_name, data_source=radar_source_name,
top_directory_name=top_radar_dir_name, raise_error_if_missing=False)
if len(these_file_names) == 0:
continue
if i == 0:
this_first_time_unix_sec = first_time_unix_sec + 0
else:
this_first_time_unix_sec = time_conversion.get_start_of_spc_date(
spc_date_strings[i])
if i == num_spc_dates - 1:
this_last_time_unix_sec = last_time_unix_sec + 0
else:
this_last_time_unix_sec = time_conversion.get_end_of_spc_date(
spc_date_strings[i])
these_times_unix_sec = numpy.array([
myrorss_and_mrms_io.raw_file_name_to_time(f)
for f in these_file_names
], dtype=int)
good_indices = numpy.where(numpy.logical_and(
these_times_unix_sec >= this_first_time_unix_sec,
these_times_unix_sec <= this_last_time_unix_sec
))[0]
radar_file_names += [these_file_names[k] for k in good_indices]
valid_times_unix_sec = numpy.concatenate((
valid_times_unix_sec, these_times_unix_sec[good_indices]
))
sort_indices = numpy.argsort(valid_times_unix_sec)
valid_times_unix_sec = valid_times_unix_sec[sort_indices]
radar_file_names = [radar_file_names[k] for k in sort_indices]
return radar_file_names, valid_times_unix_sec
def _find_input_tracking_files(
top_tracking_dir_name, first_spc_date_string, last_spc_date_string,
first_time_unix_sec, last_time_unix_sec):
"""Finds tracking files (inputs to `reanalyze_across_spc_dates`).
T = number of SPC dates
:param top_tracking_dir_name: Name of top-level directory with tracking
files. Files therein will be found by
`storm_tracking_io.find_files_one_spc_date`.
:param first_spc_date_string: See doc for `_check_time_period`.
:param last_spc_date_string: Same.
:param first_time_unix_sec: Same.
:param last_time_unix_sec: Same.
:return: spc_date_strings: length-T list of SPC dates (format "yyyymmdd").
:return: tracking_file_names_by_date: length-T list, where the [i]th element
is a 1-D list of paths to tracking files for the [i]th date.
:return: valid_times_by_date_unix_sec: length-T list, where the [i]th
element is a 1-D numpy array of valid times for the [i]th date.
"""
spc_date_strings, first_time_unix_sec, last_time_unix_sec = (
_check_time_period(
first_spc_date_string=first_spc_date_string,
last_spc_date_string=last_spc_date_string,
first_time_unix_sec=first_time_unix_sec,
last_time_unix_sec=last_time_unix_sec)
)
num_spc_dates = len(spc_date_strings)
tracking_file_names_by_date = [['']] * num_spc_dates
valid_times_by_date_unix_sec = [numpy.array([], dtype=int)] * num_spc_dates
keep_date_indices = []
for i in range(num_spc_dates):
these_file_names = tracking_io.find_files_one_spc_date(
spc_date_string=spc_date_strings[i],
source_name=tracking_utils.SEGMOTION_NAME,
top_tracking_dir_name=top_tracking_dir_name,
tracking_scale_metres2=DUMMY_TRACKING_SCALE_METRES2,
raise_error_if_missing=False
)[0]
if len(these_file_names) == 0:
tracking_file_names_by_date[i] = []
continue
keep_date_indices.append(i)
if i == 0:
this_first_time_unix_sec = first_time_unix_sec + 0
else:
this_first_time_unix_sec = time_conversion.get_start_of_spc_date(
spc_date_strings[i])
if i == num_spc_dates - 1:
this_last_time_unix_sec = last_time_unix_sec + 0
else:
this_last_time_unix_sec = time_conversion.get_end_of_spc_date(
spc_date_strings[i])
these_times_unix_sec = numpy.array([
tracking_io.file_name_to_time(f) for f in these_file_names
], dtype=int)
sort_indices = numpy.argsort(these_times_unix_sec)
these_file_names = [these_file_names[k] for k in sort_indices]
these_times_unix_sec = these_times_unix_sec[sort_indices]
good_indices = numpy.where(numpy.logical_and(
these_times_unix_sec >= this_first_time_unix_sec,
these_times_unix_sec <= this_last_time_unix_sec
))[0]
tracking_file_names_by_date[i] = [
these_file_names[k] for k in good_indices
]
valid_times_by_date_unix_sec[i] = these_times_unix_sec[good_indices]
spc_date_strings = [spc_date_strings[i] for i in keep_date_indices]
tracking_file_names_by_date = [
tracking_file_names_by_date[i] for i in keep_date_indices
]
valid_times_by_date_unix_sec = [
valid_times_by_date_unix_sec[i] for i in keep_date_indices
]
return (spc_date_strings, tracking_file_names_by_date,
valid_times_by_date_unix_sec)
def _make_regions_contiguous(
region_to_grid_rows, region_to_grid_columns, grid_cell_to_region,
num_grid_rows, num_grid_columns):
"""Makes regions (local maxima) contiguous.
M = number of rows in radar grid
N = number of columns in radar grid
:param region_to_grid_rows: 1-D list, where the [k]th element is a numpy
array with row indices of grid cells in the [k]th region.
:param region_to_grid_columns: Same but for columns.
:param grid_cell_to_region: Double-indexed dictionary. If key [i, j] has
value k, grid cell [i, j] belongs to region k.
:param num_grid_rows: M in the above discussion.
:param num_grid_columns: N in the above discussion.
:return: radar_to_region_matrix: M-by-N numpy array of region indices, where
-1 means "not part of a region".
"""
num_maxima = len(region_to_grid_rows)
radar_to_region_matrix = numpy.full(
(num_grid_rows, num_grid_columns), -1, dtype=int
)
for k in range(num_maxima):
radar_to_region_matrix[
region_to_grid_rows[k], region_to_grid_columns[k]
] = k
if len(region_to_grid_rows[k]) == 1:
continue
isolated_rows = []
isolated_columns = []
for i, j in zip(region_to_grid_rows[k], region_to_grid_columns[k]):
neigh_row_flags = numpy.logical_and(
region_to_grid_rows[k] >= i - 1, region_to_grid_rows[k] <= i + 1
)
neigh_column_flags = numpy.logical_and(
region_to_grid_columns[k] >= j - 1,
region_to_grid_columns[k] <= j + 1
)
num_neighbours = -1 + numpy.sum(numpy.logical_and(
neigh_row_flags, neigh_column_flags
))
if num_neighbours > 0:
continue
isolated_rows.append(i)
isolated_columns.append(j)
isolated_rows = numpy.array(isolated_rows, dtype=int)
isolated_columns = numpy.array(isolated_columns, dtype=int)
for i, j in zip(isolated_rows, isolated_columns):
these_region_indices = []
for i_new in range(i - 1, i + 2):
for j_new in range(j - 1, j + 2):
if (i_new, j_new) in grid_cell_to_region:
these_region_indices.append(
grid_cell_to_region[i_new, j_new]
)
else:
these_region_indices.append(numpy.nan)
these_region_indices = numpy.array(these_region_indices)
these_region_indices[these_region_indices == k] = numpy.nan
this_mode_object = scipy_mode(
these_region_indices, axis=None, nan_policy='omit')
radar_to_region_matrix[i, j] = int(numpy.round(
this_mode_object.mode
))
return radar_to_region_matrix
def _local_maxima_to_regions(
local_max_dict, echo_top_matrix_km, min_echo_top_km,
radar_latitudes_deg, radar_longitudes_deg):
"""Converts local maxima at one time from points to regions.
:param local_max_dict: See doc for `_local_maxima_to_polygons`.
:param echo_top_matrix_km: Same.
:param min_echo_top_km: Same.
:param radar_latitudes_deg: length-M numpy array of grid-point latitudes
(deg N).
:param radar_longitudes_deg: length-N numpy array of grid-point longitudes
(deg E).
:return: radar_to_region_matrix: M-by-N numpy array of integers. If
radar_to_region_matrix[i, j] = k, grid cell [i, j] belongs to the [k]th
local max. If radar_to_region_matrix[i, j] = -1, grid cell [i, j] is
not part of a local max.
"""
orig_region_id_matrix = label_image(
echo_top_matrix_km >= min_echo_top_km, connectivity=2)
rows_in_any_region, columns_in_any_region = numpy.where(
orig_region_id_matrix > 0)
num_maxima = len(local_max_dict[temporal_tracking.LATITUDES_KEY])
region_to_grid_rows = [None] * num_maxima
region_to_grid_columns = [None] * num_maxima
grid_cell_to_regions = {}
for k in range(num_maxima):
this_row = numpy.argmin(numpy.absolute(
local_max_dict[temporal_tracking.LATITUDES_KEY][k] -
radar_latitudes_deg
))
this_column = numpy.argmin(numpy.absolute(
local_max_dict[temporal_tracking.LONGITUDES_KEY][k] -
radar_longitudes_deg
))
this_region_id = orig_region_id_matrix[this_row, this_column]
if this_region_id == 0:
region_to_grid_rows[k] = | numpy.array([this_row], dtype=int) | numpy.array |
"""Visualization tools"""
import geopandas as pd
import matplotlib.pyplot as plt
import numpy as np
from skimage import exposure
import os
import pandas as pd
import geopandas as gpd
def normalize(array):
"""Normalizes numpy arrays into scale 0.0 - 1.0"""
array_min, array_max = array.min(), array.max()
return ((array - array_min) / (array_max - array_min))
def plot_prediction(image, label, prediction):
"""Plot an image with labels, optionally create a three band composite
Args:
image: a rgb or multiband image
label: true class
prediction: predicted class
ls_pct: linear stretch of three band
"""
fig = plt.figure()
ax = fig.add_subplot(111)
#check if hyperspec and create three band false color.
if image.shape[2] > 3:
plot_image = image[:, :, [11, 55, 113]]
for band in np.arange(plot_image.shape[2]):
plot_image[:, :, band] = normalize(plot_image[:, :, band])
plot_image.astype("float")
else:
plot_image = image.astype(int)
ax.imshow(plot_image)
ax.set_title("True: {}, Predicted: {} ".format(label, prediction))
return fig
def create_raster(results):
"""Reshape a set of predictions from DeepTreeAttention.predict into a raster image"""
#Create image
rowIDs = results['row']
colIDs = results['col']
predicted_raster = np.zeros((rowIDs.max() + 1, colIDs.max() + 1))
predicted_raster[rowIDs, colIDs] = results["label"]
predicted_raster = predicted_raster.astype("uint16")
return predicted_raster
def discrete_cmap(N, base_cmap=None):
"""Create an N-bin discrete colormap from the specified input map"""
# Note that if base_cmap is a string or None, you can simply do
# return plt.cm.get_cmap(base_cmap, N)
# The following works for string, None, or a colormap instance:
base = plt.cm.get_cmap(base_cmap)
color_list = base( | np.linspace(0, 1, N) | numpy.linspace |
# -*- coding: utf-8 -*-
"""
Created on Wed Jan 30 16:46:12 2019
Last edition on Tue Nov 17 14:43:37 2020
@author: <NAME>
@Aim: Universal objects for the treatment of Stag ouptuts
"""
from pathlib import Path
import numpy as np
import matplotlib.pyplot as plt
from .stagReader import fields, reader_time, reader_rprof, reader_plates_analyse
from .stagError import NoFileError, InputGridGeometryError, GridGeometryError, GridGeometryInDevError, \
MetaCheckFieldUnknownError, MetaFileInappropriateError, FieldTypeInDevError, \
VisuGridGeometryError, StagTypeError, CloudBuildIndexError, SliceAxisError, \
IncoherentSliceAxisError
class MainStagObject:
"""
Main class defining the highest level of inheritance
for StagData derived object
"""
def __init__(self):
"""
Parent builder
"""
# ----- Generic ----- #
self.pName = 'stagData'
self.verbose = True #Condition on the verbose output
self.fieldType = 'Temperature' #Field contained in the current object
self.fieldNature = 'Scalar' #Nature of the field: Scalar or Vectorial
self.path = '' #The path to the stag file
self.fname = '' #File name of the stag file
self.resampling = []#Resampling Parameters
self.header = [] #Raw header of stag file
self.simuAge = 0 #Dimensionless age of the simulation
self.ti_step = 0 #Inner step of the stag simualtion state
self.flds = [] #Raw fields of stag file
self.x_coords = [] #x matrix in the header (modified by the resampling)
self.y_coords = [] #y matrix in the header (modified by the resampling)
self.z_coords = [] #z matrix in the header (modified by the resampling)
self.ntb = 0 #number of blocks, 2 for yinyang or cubed sphere, 1 for others
self.slayers = [] #matrix of selected layers (same size as z_coord)
self.depths = [] #matrix of depths in real Earth for each layers
self.rcmb = 0 #Radius of the Core-Mantle Boundary
self.xind = [] #Mask: List of index keep in x (follows good index)
self.yind = [] #Mask: List of index keep in y (follows good index)
self.zind = [] #Mask: List of index keep in z (follows good index)
self.nx0 = 0 #Number of points in the x direction in the original input file
self.ny0 = 0 #Number of points in the y direction in the original input file
self.nz0 = 0 #Number of points in the z direction in the original input file
self.nx = 0 #Current number of points in the x direction (after resampling)
self.ny = 0 #Current number of points in the y direction (after resampling)
self.nz = 0 #Current number of points in the z direction (after resampling)
# Other
self.BIN = None
self.bin = None
def im(self,textMessage):
"""Print verbose internal message. This function depends on the
argument of self.verbose. If self.verbose == True then the message
will be displayed on the terminal.
<i> : textMessage = str, message to display
"""
if self.verbose == True:
print('>> '+self.pName+'| '+textMessage)
def stagImport(self, directory, fname, beginIndex=-1, endIndex=-1, resampling=[1,1,1]):
""" This function reads a stag data file using the modul stagReader.fields
and fill the appropriated fields of the current StagData object.
<i> : directory = str, path to reach the data file
fname = str, name of the data file
beginIndex = int, deepest index for considered layers. If beginIndex=-1, the
deepest index is 0, the deepest produced by stag.
(Default: beginIndex = -1)
endIndex = int, shallowest index for considered layers. If endIndex=-1, the
shallowest index is the shallowest index produced by stag.
(Default: endIndex = -1)
resampling = list/array/tuple, matrix of dim 3 containing the
resampling parameters (int) on X, Y and Z axis as:
resampling = [resampling_on_X,resampling_on_Y,resampling_on_Z]
(Default: resampling=[1,1,1], means no resampling)
"""
self.im('Reading and resampling: '+fname)
self.path = Path(directory+fname) #creat a Path object
self.fname = fname
self.resampling = resampling
# - First, test the geometry:
if self.geometry not in ('cart2D','cart3D','yy','spherical','annulus'):
raise InputGridGeometryError(self.geometry)
# - Read Stag binary files:
try:
(self.header,self.flds) = fields(self.path)
except:
raise NoFileError(directory,fname)
#Strcuture for 'flds' variables:
# [Var][x-direction][y_direction][z-direction][block_index]
self.x_coords = self.header.get('e1_coord')
self.y_coords = self.header.get('e2_coord')
self.z_coords = self.header.get('e3_coord')
# number of blocks, 2 for yinyang or cubed sphere
self.ntb = self.header.get('ntb')
#Conditioning the 2D/3D geometry problem:
if type(self.x_coords) != np.ndarray:
self.x_coords = np.array([self.x_coords])
self.im(' - 2D data detected: plan yz')
if self.geometry != 'cart2D':
raise GridGeometryError(self.geometry,'cart2D')
elif type(self.y_coords) != np.ndarray:
self.y_coords = np.array([self.y_coords])
self.im(' - 2D data detected: plan xz')
if self.geometry != 'cart2D':
raise GridGeometryError(self.geometry,'cart2D')
elif type(self.z_coords) != np.ndarray:
self.z_coords = np.array([self.z_coords])
self.im(' - 2D data detected: plan xy')
if self.geometry != 'cart2D':
raise GridGeometryError(self.geometry,'cart2D')
else:
self.im(' - 3D data detected')
if self.geometry != 'cart3D' and self.geometry != 'yy' and self.geometry != 'spherical':
raise GridGeometryError(self.geometry,'cart3D, yy or spherical')
if self.ntb == 1:
#Grid geometry compatible with cart3D or spherical
self.im(' -> Grid geometry compatible with cart3D or spherical')
if self.geometry == 'yy':
raise GridGeometryError(self.geometry,'cart3D or spherical')
elif self.ntb == 2:
self.im(' -> YinYang grid detected')
if self.geometry == 'cart3D' or self.geometry == 'spherical':
raise GridGeometryError(self.geometry,'yy')
self.nx0 = len(self.x_coords)
self.ny0 = len(self.y_coords)
self.nz0 = len(self.z_coords)
self.nx = len(self.x_coords)
self.ny = len(self.y_coords)
self.nz = len(self.z_coords)
self.im(" - Original grid geometry:")
self.im(" - Nx = "+str(self.nx0))
self.im(" - Ny = "+str(self.ny0))
self.im(" - Nz = "+str(self.nz0))
Nz = len(self.header.get('e3_coord'))
#attribution values of default parameters
if beginIndex == -1:
beginIndex = 0
if endIndex == -1:
endIndex = self.nz
self.slayers = np.linspace(1,self.nz,self.nz)
self.rcmb = self.header.get('rcmb')
self.simuAge = self.header.get('ti_ad')
self.ti_step = self.header.get('ti_step')
def resampling_coord(coords,sampling):
"""This function resamples coords matrix according to the sampling rate.
sampling must be an integer. If sampling ==1 the output is the input.
If sampling == 2, the output is twince smaller than the input.
Return the new matrix after the resampling and the matrix of elements
(index) that have been keep (1) and remove (0)
"""
n = 0
new_coords = []
ind = []
while n < len(coords):
new_coords.append(coords[n])
ind.append(n)
n += sampling
if new_coords[len(new_coords)-1] != coords[len(coords)-1] and len(coords)>1:#+sampling and len(coords)>1:
#garanty to have the firt AND the last value of coords: garanty to
#conserve the input shape
new_coords.append(coords[len(coords)-1])
ind.append(len(coords)-1)
index = []
for i in range(len(coords)):
if i in ind:
index.append(1)
else:
index.append(0)
return (np.array(new_coords), np.array(index)) #conserve the array-type
(self.x_coords, self.xind) = resampling_coord(self.x_coords,resampling[0])
(self.y_coords, self.yind) = resampling_coord(self.y_coords,resampling[1])
(self.z_coords, self.zind) = resampling_coord(self.z_coords,resampling[2])
#(self.slayers, self.BIN) = resampling_coord(self.slayers, resampling[2])
## Re-mapping of the zind matrix according to the range of depth considered
# -1- Work on indexes:
zindtemp = np.zeros(Nz)
for i in range(Nz):
if i>=beginIndex and i <endIndex:
zindtemp[i] = 1
multi = np.multiply(self.zind,zindtemp)
if np.count_nonzero(multi) == 0:
self.zind = zindtemp
else:
self.zind = multi
# -2- Work on coordinates matrix
indexNewZCoord = np.where(self.zind == 1)[0]
ztemp = self.header.get('e3_coord')
new_z_coords = []
new_slayers = []
for ind in indexNewZCoord:
new_z_coords.append(ztemp[ind])
new_slayers.append(self.slayers[ind]) #Follows self.z_coord
self.z_coords = new_z_coords
self.slayers = new_slayers
#Update the geometrical variable defining the grid
self.nx = len(self.x_coords)
self.ny = len(self.y_coords)
self.nz = len(self.z_coords)
self.im(" - New grid geometry:")
self.im(" - Nx = "+str(self.nx))
self.im(" - Ny = "+str(self.ny))
self.im(" - Nz = "+str(self.nz))
#Compute depths:
dCMB = 2890 #depth CMB (km)
self.depths = [(1-self.z_coords[i])*dCMB for i in range(self.nz)]
self.depths = np.array(sorted(self.depths,reverse=True)) #sorted as self.z_coord
#What type of data is reading ?
fname = fname.split('_')[-1]
n = [fname[i] for i in range(len(fname))]
if ''.join(n[0:3]) == 'div':
self.fieldType = 'Divergence'
elif ''.join(n[0:3]) == 'eta':
self.fieldType = 'Viscosity'
elif n[0] == 't':
self.fieldType = 'Temperature'
elif ''.join(n[0:2]) == 'vp':
self.fieldType = 'Velocity'
elif ''.join(n[0:4]) == 'smax':
self.fieldType = 'Sigma max'
elif ''.join(n[0:3]) == 'dam':
self.fieldType = 'Damage'
elif ''.join(n[0:2]) == 'cs':
self.fieldType = 'Topography'
elif ''.join(n[0:3]) == 'rho':
self.fieldType = 'Density'
elif ''.join(n[0:2]) == 'ly':
self.fieldType = 'Lyapunov'
elif ''.join(n[0:3]) == 'div':
self.fieldType = 'Divergence'
elif ''.join(n[0:3]) == 'vor':
self.fieldType = 'Vorticity'
elif ''.join(n[0:3]) == 'str':
self.fieldType = 'Stress'
elif ''.join(n[0:2]) == 'po':
self.fieldType = 'Poloidal'
elif ''.join(n[0:2]) == 'to':
self.fieldType = 'Toroidal'
elif ''.join(n[0:2]) == 'ed':
self.fieldType = 'Strain Rate'
elif ''.join(n[0:1]) == 'c':
self.fieldType = 'Composition'
elif ''.join(n[0:1]) == 'f':
self.fieldType = 'Melt Fraction'
elif ''.join(n[0:2]) == 'vm':
self.fieldType = 'Melt Velocity'
elif ''.join(n[0:3]) == 'age':
self.fieldType = 'Age'
elif ''.join(n[0:1]) == 'w':
self.fieldType = 'Vorticity'
else:
self.fieldType = 'Error: Undetermined'
raise FieldTypeInDevError(fname)
if self.flds.shape[0] == 1:
self.im(' - Scalar field detected')
self.fieldNature = 'Scalar'
else:
self.im(' - Vectorial field detected: '+str(self.flds.shape[0])+' fields')
self.fieldNature ='Vectorial'
self.im(' -> '+self.fieldType)
self.im('Reading and resampling operations done!')
def stag2VTU(self,fname=None,path='./',ASCII=False,verbose=True):
""" Extension of the stagVTK package, directly available on stagData !
This function creat '.vtu' or 'xdmf/h5' file readable with Paraview to efficiently
visualize 3D data contain in a stagData object. This function works directly
on non overlapping stagData object.
Note also that the internal field stagData.slayers of the stagData object
must be filled.
<i> : fname = str, name of the exported file without any extention
path = str, path where you want to export your new .vtu file.
[Default: path='./']
ASCII = bool, if True, the .vtu file will be write in ASCII mode
if not, in binary mode. [Default, ASCII=True]
"""
self.im('Requested: Build VTU from StagData object')
if self.geometry == 'cart2D' or self.geometry == 'annulus':
raise VisuGridGeometryError(self.geometry,'cart3D or yy')
if fname == None:
import time
(y,m,d,h,mins,secs,bin1,bin2,bin3) = time.localtime()
fname = self.fname+'_'+str(d)+'-'+str(m)+'-'+str(y)+'_'+str(h)+'-'+str(mins)+'-'+str(secs)
self.im('Automatic file name attribution: '+fname)
#Importation of the stagVTK package
from .stagVTK import stag2VTU
stag2VTU(fname,self,path,ASCII=ASCII,verbose=verbose)
class StagCartesianGeometry(MainStagObject):
"""
Defines the StagCartesianGeometry object, derived from MainStagObject
This object is conditionally inherited in StagData.
"""
def __init__(self,geometry):
super().__init__() # inherit all the methods and properties from MainStagObject
self.geometry = geometry
# ----- Cartesian 2D and 3D geometries ----- #
self.XYZind = [] #Matrix of good index after the mesh operation
self.x = [] #Matrix of X coordinates meshed
self.y = [] #Matrix of Y coordinates meshed
self.z = [] #Matrix of Z coordinates meshed
self.v = [] #Matrix of scalar field (or norm of velocity)
self.vx = [] #Matrix of x-component of the velocity field for Cartesian grids
self.vy = [] #Matrix of y-component of the velocity field for Cartesian grids
self.vz = [] #Matrix of z-component of the velocity field for Cartesian grids
self.P = [] #Matrix of Pressure field for Cartesian grids
def stagProcessing(self):
"""
This function processes stag data according to a Cartesian geometry.
"""
self.im('Processing stag Data:')
self.im(' - Grid Geometry')
if self.geometry == 'cart2D':
self.im(' - 2D cartesian grid geometry')
else:
self.im(' - 3D cartesian grid geometry')
(self.x,self.y,self.z) = np.meshgrid(self.x_coords,self.y_coords,self.z_coords,indexing='ij')
#Same operation but on index matrix:
(Xind,Yind,Zind) = np.meshgrid(self.xind,self.yind,self.zind, indexing='ij')
Xind = Xind.reshape(Xind.shape[0]*Xind.shape[1]*Xind.shape[2])
Yind = Yind.reshape(Yind.shape[0]*Yind.shape[1]*Yind.shape[2])
Zind = Zind.reshape(Zind.shape[0]*Zind.shape[1]*Zind.shape[2])
self.XYZind = np.multiply(np.multiply(Xind,Yind),Zind)
# Application of redFlag on index matrix:
goodIndex = np.array(range(self.nx0*self.ny0*self.nz0))
goodIndex = goodIndex[np.array(self.XYZind,dtype=bool)]
#Processing of the field according to its scalar or vectorial nature:
if self.fieldNature == 'Scalar':
self.im(' - Build data grid for scalar field')
(Nx, Ny, Nz) = self.header.get('nts')
V = self.flds[0,:,:,:,0].reshape(Nx*Ny*Nz)
self.v = V[goodIndex].reshape(self.nx,self.ny,self.nz)
#Creation of empty vectorial fields arrays:
self.vx = np.array(self.vx)
self.vy = np.array(self.vy)
self.vz = np.array(self.vz)
self.P = np.array(self.P)
elif self.fieldNature == 'Vectorial':
self.im(' - Build data grid for vectorial field')
(Nx, Ny, Nz) = self.header.get('nts')
temp_vx = self.flds[0][0:Nx,0:Ny,:].reshape(Nx*Ny*Nz)
temp_vy = self.flds[1][0:Nx,0:Ny,:].reshape(Nx*Ny*Nz)
temp_vz = self.flds[2][0:Nx,0:Ny,:].reshape(Nx*Ny*Nz)
temp_P = self.flds[3][0:Nx,0:Ny,:].reshape(Nx*Ny*Nz)
self.vx = temp_vx[goodIndex].reshape(self.nx,self.ny,self.nz)
self.vy = temp_vy[goodIndex].reshape(self.nx,self.ny,self.nz)
self.vz = temp_vz[goodIndex].reshape(self.nx,self.ny,self.nz)
self.P = temp_P[goodIndex].reshape(self.nx,self.ny,self.nz)
self.v = np.sqrt(self.vx**2+self.vy**2+self.vz**2) #the norm
# == Processing Finish !
self.im('Processing of stag data done!')
class StagYinYangGeometry(MainStagObject):
"""
Secondary geom class
"""
def __init__(self):
super().__init__() # inherit all the methods and properties from MainStagObject
self.geometry = 'yy'
# ----- Yin Yang geometry ----- #
self.X = [] #Matrix of X coordinates meshed
self.Y = [] #Matrix of Y coordinates meshed
self.Z = [] #Matrix of Z coordinates meshed
self.layers = [] #matrix of layer's index meshed
self.XYZind = [] #Matrix of good index after the mesh operation
self.r1 = [] #Matrix of the radius of points for Yin grid
self.r2 = [] #Matrix of the radius of points for Yang grid
self.x1_overlap = []#Yin grid x matrix - overlapping grids:
self.y1_overlap = []#Yin grid y matrix
self.z1_overlap = []#Yin grid z matrix
self.x2_overlap = []#Yang grid x matrix
self.y2_overlap = []#Yang grid y matrix
self.z2_overlap = []#Yang grid z matrix
self.x1 = [] #Yin grid x matrix - non-overlapping grids:
self.y1 = [] #Yin grid y matrix
self.z1 = [] #Yin grid z matrix
self.x2 = [] #Yang grid x matrix
self.y2 = [] #Yang grid y matrix
self.z2 = [] #Yang grid z matrix
self.r1 = [] #Matrice of spherical coordinates r for the Yin grid
self.theta1 = [] #Matrice of spherical coordinates theta for the Yin grid
self.phi1 = [] #Matrice of spherical coordinates phi for the Yin grid
self.r2 = [] #Matrice of spherical coordinates r for the Yang grid
self.theta2 = [] #Matrice of spherical coordinates theta for the Yang grid
self.phi2 = [] #Matrice of spherical coordinates phi for the Yang grid
self.redFlags = [] #Matrix of wrong index in YY (overlaping pbs)
self.x1_redf = [] #Matrix of redflag x-coordinates for Yin grid
self.y1_redf = [] #Matrix of redflag y-coordinates for Yin grid
self.z1_redf = [] #Matrix of redflag z-coordinates for Yin grid
self.x2_redf = [] #Matrix of redflag x-coordinates for Yang grid
self.y2_redf = [] #Matrix of redflag y-coordinates for Yang grid
self.z2_redf = [] #Matrix of redflag z-coordinates for Yang grid
self.redFlags_layers = [] #Matrix of layer's index meshed for redFlags points
#For scalar field only:
self.v1_overlap = []#Complete Yin field, corresponding to over '_overlap' matrices
self.v2_overlap = []#Complete Yang field, corresponding to over '_overlap' matrices
self.v1 = [] #Matrix of scalar field for the Yin grid (or norm of vectorial on Yin)
self.v2 = [] #Matrix of scalar field for the Yang grid (or norm of vectorial on Yang)
#For vectorial field only:
self.vx1_overlap= [] #Complete vx Yin field, corresponding to over '_overlap' matrices
self.vx2_overlap= [] #Complete vx Yang field, corresponding to over '_overlap' matrices
self.vy1_overlap= [] #Complete vy Yin field
self.vy2_overlap= [] #Complete vy Yang field
self.vz1_overlap= [] #Complete vz Yin field
self.vz2_overlap= [] #Complete vz Yang field
self.P1_overlap = [] #Complete P Yin field
self.P2_overlap = [] #Complete P Yang field
self.vx1 = [] #Matrix of x-component of the vectorial field for the Yin grid
self.vx2 = [] #Matrix of x-component of the vectorial field for the Yang grid
self.vy1 = [] #Matrix of y-component of the vectorial field for the Yin grid
self.vy2 = [] #Matrix of y-component of the vectorial field for the Yang grid
self.vz1 = [] #Matrix of z-component of the vectorial field for the Yin grid
self.vz2 = [] #Matrix of z-component of the vectorial field for the Yang grid
self.P1 = [] #Matrix of the Pressure field for the Yin grid
self.P2 = [] #Matrix of the Pressure field for the Yang grid
self.vr1 = [] #Matrix of radial component of the vectorial field for the Yin grid
self.vtheta1 = [] #Matrix of theta component of the vectorial field for the Yin grid
self.vphi1 = [] #Matrix of phi component of the vectorial field for the Yin grid
self.vr2 = [] #Matrix of radial component of the vectorial field for the Yang grid
self.vtheta2 = [] #Matrix of theta component of the vectorial field for the Yang grid
self.vphi2 = [] #Matrix of phi component of the vectorial field for the Yang grid
def stagProcessing(self, build_redflag_point=False, build_overlapping_field=False):
""" This function process stag data according to a YinYang geometry.
If build_redflag_point == True, build coordinates matrices of the
redflag points and fills fields x-y-z_redf
If build_overlapping_field == True, build ghost points on YY corner"""
self.im('Processing stag Data:')
self.im(' - Grid Geometry')
self.im(' - Yin-Yang grid geometry')
self.im(' - Preprocessing of coordinates matrices')
(self.X,self.Y,self.Z) = np.meshgrid(self.x_coords,self.y_coords,self.z_coords, indexing='ij')
self.X = self.X.reshape(self.X.shape[0]*self.X.shape[1]*self.X.shape[2])
self.Y = self.Y.reshape(self.Y.shape[0]*self.Y.shape[1]*self.Y.shape[2])
self.Z = self.Z.reshape(self.Z.shape[0]*self.Z.shape[1]*self.Z.shape[2])
#Same operation but on layers matrix:
(self.bin, self.BIN, self.layers) = np.meshgrid(self.x_coords,self.y_coords,self.slayers, indexing='ij')
self.layers = self.layers.reshape(self.layers.shape[0]*self.layers.shape[1]*self.layers.shape[2])
self.bin = None
self.BIN = None
#Same operation but on index matrix:
(Xind,Yind,Zind) = np.meshgrid(self.xind,self.yind,self.zind, indexing='ij')
Xind = Xind.reshape(Xind.shape[0]*Xind.shape[1]*Xind.shape[2])
Yind = Yind.reshape(Yind.shape[0]*Yind.shape[1]*Yind.shape[2])
Zind = Zind.reshape(Zind.shape[0]*Zind.shape[1]*Zind.shape[2])
self.XYZind = np.multiply(np.multiply(Xind,Yind),Zind)
#self.XYZind is built during the construction of the YY grid and follows
#the good index for the field read here (= readFlag in index)
#Functions for the 3D spherical YY grids
def rectangular2YY(x,y,z,rcmb):
"""Returns the geometry of the two cartesian blocks corresponding
to the overlapping Yin (x1,y1,z1) and Yang (x2,y2,z2) grids
from the single block contained in the StagYY binary outputs.
after bending cartesian boxes"""
#Spherical coordinates:
R = z+rcmb
lat = np.pi/4 - x
lon = y - 3*np.pi/4
#Yin grid
x1 = np.multiply(np.multiply(R,np.cos(lat)),np.cos(lon))
y1 = np.multiply(np.multiply(R,np.cos(lat)),np.sin(lon))
z1 = np.multiply(R,np.sin(lat))
#Yang grid
x2 = -x1
y2 = z1
z2 = y1
return ((x1,y1,z1),(x2,y2,z2))
def cartesian2spherical(x1,y1,z1,x2,y2,z2):
"""Converts cartesian coordinates of YY grid into spherical coordinates"""
#Yin grid
r1 = np.sqrt(x1**2+y1**2+z1**2)
theta1 = np.arctan2(np.sqrt(x1**2+y1**2),z1)
phi1 = np.arctan2(y1,x1)
#Yang grid
r2 = np.sqrt(x2**2+y2**2+z2**2)
theta2 = np.arctan2(np.sqrt(x2**2+y2**2),z2)
phi2 = np.arctan2(y2,x2)
return ((r1,theta1,phi1),(r2,theta2,phi2))
#Creation of Yin-Yang grids:
self.im(' - Creation of the Yin-Yang grids')
((self.x1_overlap,self.y1_overlap,self.z1_overlap),(self.x2_overlap,self.y2_overlap,self.z2_overlap)) = \
rectangular2YY(self.X,self.Y,self.Z,self.rcmb)
((self.r1,self.theta1,self.phi1),(self.r2,self.theta2,self.phi2)) = \
cartesian2spherical(self.x1_overlap,self.y1_overlap,self.z1_overlap,self.x2_overlap,self.y2_overlap,self.z2_overlap)
##Cut off the corners from grid #1, which seems to do #2:
##Build Redflags on wrong coordinates
theta12 = np.arccos(np.multiply(np.sin(self.theta1),np.sin(self.phi1)))
self.redFlags = np.where(np.logical_or(np.logical_and((theta12>np.pi/4),(self.phi1>np.pi/2)),\
np.logical_and((theta12<3*np.pi/4),(self.phi1<-np.pi/2))))[0]
if build_redflag_point == True:
print(' - Building RedFlags Points...')
((self.x1_redf,self.y1_redf,self.z1_redf),(self.x2_redf,self.y2_redf,self.z2_redf)) = (([],[],[]),([],[],[]))
self.redFlags_layers = []
for ind in self.redFlags:
self.x1_redf.append(self.x1_overlap[ind])
self.y1_redf.append(self.y1_overlap[ind])
self.z1_redf.append(self.z1_overlap[ind])
self.x2_redf.append(self.x2_overlap[ind])
self.y2_redf.append(self.y2_overlap[ind])
self.z2_redf.append(self.z2_overlap[ind])
self.redFlags_layers.append(self.layers[ind])
#Assembly Yin and Yang grids
self.im(' - Assembly Yin and Yang grids')
goodIndex = np.ones(len(self.x1_overlap),dtype=bool)
goodIndex[np.array(self.redFlags)] = False
self.x1 = self.x1_overlap[goodIndex]
self.y1 = self.y1_overlap[goodIndex]
self.z1 = self.z1_overlap[goodIndex]
self.x2 = self.x2_overlap[goodIndex]
self.y2 = self.y2_overlap[goodIndex]
self.z2 = self.z2_overlap[goodIndex]
self.r1 = self.r1[goodIndex]
self.r2 = self.r2[goodIndex]
self.theta1 = self.theta1[goodIndex]
self.theta2 = self.theta2[goodIndex]
self.phi1 = self.phi1[goodIndex]
self.phi2 = self.phi2[goodIndex]
self.layers = self.layers[goodIndex]
self.layers = self.layers.astype(np.int)
# Extract the scalar or the vectorial field V: V1 on Yin, V2 on Yang
self.im(' - Construction of the appropriated vectorial field:')
## Application of redFlag on index matrix:
## return good index for the vectorial field (goodIndex):
goodIndex = np.array(range(self.nx0*self.ny0*self.nz0))
goodIndex = goodIndex[np.array(self.XYZind,dtype=bool)]
#Two different types of field: Scalar or Vectorial
if self.fieldNature == 'Scalar':
self.im(' - Build data for the entire grids')
tempField = self.flds[0].reshape(self.flds.shape[1]*self.flds.shape[2]*self.flds.shape[3],2)
V1 = tempField[:,0]
V2 = tempField[:,1]
if build_overlapping_field:
self.im(' - Overlapping field requested')
self.v1_overlap = [] #Yin
self.v2_overlap = [] #Yang
for gid in goodIndex:
self.v1_overlap.append(V1[gid])
self.v2_overlap.append(V2[gid])
#Apply redFlags on goodindex:
self.im(' - Processing of redFlags')
mask = np.ones(len(goodIndex),dtype=bool) # all True
mask[np.array(self.redFlags)] = False
#Creation of non overlapping data matrices for Yin and Yang
goodIndex = goodIndex[mask]
self.v1 = np.array(V1)[goodIndex]
self.v2 = np.array(V2)[goodIndex]
#Creation of empty vectorial fields arrays:
self.vx1 = np.array(self.vx1)
self.vy1 = np.array(self.vy1)
self.vz1 = np.array(self.vz1)
self.P1 = np.array(self.P1)
self.vr1 = np.array(self.vr1)
self.vtheta1 = np.array(self.vtheta1)
self.vphi1 = np.array(self.vphi1)
self.vx2 = np.array(self.vx2)
self.vy2 = np.array(self.vy2)
self.vz2 = np.array(self.vz2)
self.P2 = np.array(self.P2)
self.vr2 = np.array(self.vr2)
self.vtheta2 = np.array(self.vtheta2)
self.vphi2 = np.array(self.vphi2)
elif self.fieldNature == 'Vectorial':
self.im(' - Build data for the entire grids')
(Nx, Ny, Nz) = self.header.get('nts')
tempField_vx = self.flds[0][0:Nx,0:Ny,:,:].reshape(Nx*Ny*Nz,2)
tempField_vy = self.flds[1][0:Nx,0:Ny,:,:].reshape(Nx*Ny*Nz,2)
tempField_vz = self.flds[2][0:Nx,0:Ny,:,:].reshape(Nx*Ny*Nz,2)
tempField_P = self.flds[3][0:Nx,0:Ny,:,:].reshape(Nx*Ny*Nz,2)
VX1 = tempField_vx[:,0]
VX2 = tempField_vx[:,1]
VY1 = tempField_vy[:,0]
VY2 = tempField_vy[:,1]
VZ1 = tempField_vz[:,0]
VZ2 = tempField_vz[:,1]
P1 = tempField_P[:,0]
P2 = tempField_P[:,1]
#Transform velocities from internal Yin or Yang coord -> Cartesian
self.im(' - Merging of velocities: YY -> Cartesian')
tx_coord = self.header.get('e1_coord') #temps, will be immediately deleted after use
ty_coord = self.header.get('e2_coord')
tz_coord = self.header.get('e3_coord')
(tX,tY,tZ) = np.meshgrid(tx_coord,ty_coord,tz_coord, indexing='ij')
tX = tX.reshape(Nx*Ny*Nz)
tY = tY.reshape(Nx*Ny*Nz)
tZ = tZ.reshape(Nx*Ny*Nz)
#R = tZ + self.rcmb
lat = np.pi/4 - tX
lon = tY - 3*np.pi/4
# --- on Yin grid ---
Vtheta = VX1
Vphi = VY1
Vr = VZ1
VX1 = Vtheta*np.sin(lat)*np.cos(lon) - Vphi*np.sin(lon) + Vr*np.cos(lat)*np.cos(lon)
VY1 = Vtheta*np.sin(lat)*np.sin(lon) + Vphi*np.cos(lon) + Vr*np.cos(lat)*np.sin(lon)
VZ1 = -1*Vtheta*np.cos(lat) + Vr*np.sin(lat)
vr1 = Vr
# --- on Yang grid ---
Vtheta = VX2
Vphi = VY2
Vr = VZ2
VX2 = -1*(Vtheta*np.sin(lat)*np.cos(lon) - Vphi*np.sin(lon) + Vr*np.cos(lat)*np.cos(lon))
VZ2 = Vtheta*np.sin(lat)*np.sin(lon) + Vphi*np.cos(lon) + Vr*np.cos(lat)*np.sin(lon)
VY2 = -1*Vtheta*np.cos(lat) + Vr*np.sin(lat)
vr2 = Vr
#Discharge of the memory
(tX, tY, tZ) = (None, None, None)
(Vtheta, Vphi, Vr) = (None, None, None)
if build_overlapping_field:
self.im(' - Overlapping field requested')
#Re-sampling
self.vx1_overlap = [] #Yin
self.vx2_overlap = [] #Yang
self.vy1_overlap = []
self.vy2_overlap = []
self.vz1_overlap = []
self.vz2_overlap = []
self.P1_overlap = []
self.P2_overlap = []
for gid in goodIndex:
self.vx1_overlap.append(VX1[gid])
self.vx2_overlap.append(VX2[gid])
self.vy1_overlap.append(VY1[gid])
self.vy2_overlap.append(VY2[gid])
self.vz1_overlap.append(VZ1[gid])
self.vz2_overlap.append(VZ2[gid])
self.P1_overlap.append(P1[gid])
self.P2_overlap.append(P2[gid])
#Apply redFlags on goodindex:
self.im(' - Processing of redFlags')
mask = np.ones(len(goodIndex),dtype=bool) # all True
mask[np.array(self.redFlags)] = False
goodIndex = goodIndex[mask]
self.vx1 = VX1[goodIndex]
self.vy1 = VY1[goodIndex]
self.vz1 = VZ1[goodIndex]
self.vx2 = VX2[goodIndex]
self.vy2 = VY2[goodIndex]
self.vz2 = VZ2[goodIndex]
self.P1 = P1[goodIndex]
self.P2 = P2[goodIndex]
#Radial velocities
self.vr1 = vr1[goodIndex]
self.vr2 = vr2[goodIndex]
#Tranformation of velocities from cartesian to spherical:
self.im(' - Conversion of Velocities: Cartesian -> Spherical')
lat1 = np.arctan2(np.sqrt(self.x1**2+self.y1**2),self.z1)
lon1 = np.arctan2(self.y1,self.x1)
lat2 = np.arctan2(np.sqrt(self.x2**2+self.y2**2),self.z2)
lon2 = np.arctan2(self.y2,self.x2)
Vlat1 = self.vx1*(np.cos(lon1)*np.cos(lat1)) + self.vy1*(np.sin(lon1)*np.cos(lat1)) - self.vz1*(np.sin(lat1))
Vlon1 = -self.vx1*(np.sin(lon1)) + self.vy1*(np.cos(lon1))
Vlat2 = self.vx2*(np.cos(lon2)*np.cos(lat2)) + self.vy2*(np.sin(lon2)*np.cos(lat2)) - self.vz2*(np.sin(lat2))
Vlon2 = -self.vx2*(np.sin(lon2)) + self.vy2*(np.cos(lon2))
#Conservation of the ndarray-type:
self.vr1 = np.array(self.vr1)
self.vr2 = np.array(self.vr2)
self.vtheta1 = Vlat1
self.vtheta2 = Vlat2
self.vphi1 = Vlon1
self.vphi2 = Vlon2
#fills the .v1 and .v2 by the norm of the velocity
self.v1 = np.sqrt(self.vx1**2+self.vy1**2+self.vz1**2) #the norm
self.v2 = np.sqrt(self.vx2**2+self.vy2**2+self.vz2**2) #the norm
# == Processing Finish !
self.im('Processing of stag data done!')
class StagSphericalGeometry(MainStagObject):
"""
Defines the StagSphericalGeometry object, derived from MainStagObject
This object is conditionally inherited in StagData.
"""
def __init__(self,geometry):
super().__init__() # inherit all the methods and properties from MainStagObject
self.geometry = geometry
# ----- Cartesian 2D and 3D geometries ----- #
self.x = [] #Matrix of X coordinates meshed (in spherical shape)
self.y = [] #Matrix of Y coordinates meshed (in spherical shape)
self.z = [] #Matrix of Z coordinates meshed (in spherical shape)
self.xc = [] #Matrice of cartesian x coordinates (in cartesian shape)
self.yc = [] #Matrice of cartesian y coordinates (in cartesian shape)
self.zc = [] #Matrice of cartesian z coordinates (in cartesian shape)
self.r = [] #Matrice of spherical coordinates r
self.theta = [] #Matrice of spherical coordinates theta
self.phi = [] #Matrice of spherical coordinates phi
self.v = [] #Matrix of scalar field (or norm of vectorial)
self.vx = [] #Matrix of x-component of the vectorial field for Cartesian grids
self.vy = [] #Matrix of y-component of the vectorial field for Cartesian grids
self.vz = [] #Matrix of z-component of the vectorial field for Cartesian grids
self.vtheta = [] #Matrix of theta component of the vectorial field
self.vphi = [] #Matrix of phi component of the vectorial field
self.vr = [] #Matrix of radial component of the vectorial field
self.P = [] #Matrix of Pressure field for Cartesian grids
def stagProcessing(self):
"""
This function process stag data and returns the appropriated coords
matrices (1 matrix Yin and 1 matrix for Yqng coords) as well as matrix
of the reading field for Yin and for Yang.
If build_redflag_point == True, build coordinates matrices of the
redflag points and fills fields x-y-z_redf
If build_overlapping_field == True, build ghost points on YY corner
"""
self.im('Processing stag Data:')
self.im(' - Grid Geometry')
self.im(' - 3D cartesian grid geometry')
(self.x,self.y,self.z) = np.meshgrid(self.x_coords,self.y_coords,self.z_coords,indexing='ij')
#save cartesian grid geometry
self.xc = self.x
self.yc = self.y
self.zc = self.z
#Same operation but on index matrix:
(Xind,Yind,Zind) = np.meshgrid(self.xind,self.yind,self.zind, indexing='ij')
Xind = Xind.reshape(Xind.shape[0]*Xind.shape[1]*Xind.shape[2])
Yind = Yind.reshape(Yind.shape[0]*Yind.shape[1]*Yind.shape[2])
Zind = Zind.reshape(Zind.shape[0]*Zind.shape[1]*Zind.shape[2])
self.XYZind = np.multiply( | np.multiply(Xind,Yind) | numpy.multiply |
"""
Example setup and run script for the 3d stimulation and long-term cooling example.
Main differences from the example 1 setup are related to geometry, BCs, wells and
gravity.
"""
import scipy.sparse.linalg as spla
import numpy as np
import porepy as pp
import logging
import time
from typing import Tuple, Dict
from porepy.models.contact_mechanics_biot_model import ContactMechanicsBiot
logging.basicConfig(level=logging.INFO)
logger = logging.getLogger(__name__)
class Model(ContactMechanicsBiot):
"""
This class provides the parameter specification differing from examples 1 and 2.
"""
def __init__(self, params: Dict):
super().__init__(params)
# Set additional case specific fields
self.scalar_scale = 1e7
self.length_scale = 15
self.file_name = self.params["file_name"]
self.folder_name = self.params["folder_name"]
self.export_fields = [
"u_exp",
"p_exp",
"p_minus_ph",
"traction_exp",
"aperture_exp",
"u_global",
"cell_centers",
"well",
"u_exp_0",
"aperture_0",
]
# Initial aperture, a_0
self.initial_aperture = 1e-3 / self.length_scale
# Dilation angle
self._dilation_angle = np.radians(5.0)
self.params = params
self.mesh_args = params.get("mesh_args", None)
def fractures(self):
"""
Define the two fractures.
The first fracture is the one where injection takes place.
"""
n_points = 4
# Size
s = 12
# Major axis rotation
major = np.pi / 4
# Dip
dip_1, dip_2 = np.pi / 4, np.pi / 4
# Strike:
# The values below imply dip about the y and x axis, respectively
strike, strike_2 = np.pi / 2, 0
f_1 = pp.EllipticFracture(
np.array([-10, 0, 0]), s, s, major, strike, dip_1, num_points=n_points
)
f_2 = pp.EllipticFracture(
np.array([10, 0, 0]), s, s, major, strike_2, dip_2, num_points=n_points
)
self.fracs = [f_1, f_2]
def create_grid(self):
"""
Method that creates the GridBucket of a 3D domain with the two fractures
defined by self.fractures().
The grid bucket is represents the mixed-dimensional grid.
"""
self.fractures()
# Define the domain
size = 80
self.box = {
"xmin": -size,
"xmax": size,
"ymin": -size,
"ymax": size,
"zmin": -size,
"zmax": size,
}
# Make a fracture network
self.network = pp.FractureNetwork3d(self.fracs, domain=self.box)
# Generate the mixed-dimensional mesh
# write_fractures_to_csv(self)
gb = self.network.mesh(self.mesh_args)
pp.contact_conditions.set_projections(gb)
self.gb = gb
self.Nd = self.gb.dim_max()
# Tag the wells
self._tag_well_cells()
self.n_frac = len(gb.grids_of_dimension(self.Nd - 1))
self.update_all_apertures(to_iterate=False)
self.update_all_apertures()
def set_mechanics_parameters(self):
""" Mechanical parameters.
Note that we divide the momentum balance equation by self.scalar_scale.
"""
gb = self.gb
for g, d in gb:
if g.dim == self.Nd:
# Rock parameters
rock = self.rock
lam = rock.LAMBDA * np.ones(g.num_cells) / self.scalar_scale
mu = rock.MU * np.ones(g.num_cells) / self.scalar_scale
C = pp.FourthOrderTensor(mu, lam)
bc = self.bc_type_mechanics(g)
bc_values = self.bc_values_mechanics(g)
sources = self.source_mechanics(g)
pp.initialize_data(
g,
d,
self.mechanics_parameter_key,
{
"bc": bc,
"bc_values": bc_values,
"source": sources,
"fourth_order_tensor": C,
"biot_alpha": self.biot_alpha(g),
"time_step": self.time_step,
},
)
elif g.dim == self.Nd - 1:
pp.initialize_data(
g,
d,
self.mechanics_parameter_key,
{
"friction_coefficient": 0.5,
"contact_mechanics_numerical_parameter": 1e1,
"dilation_angle": self._dilation_angle,
"time": self.time,
},
)
for e, d in gb.edges():
mg = d["mortar_grid"]
# Parameters for the surface diffusion. Not used as of now.
pp.initialize_data(
mg,
d,
self.mechanics_parameter_key,
{"mu": self.rock.MU, "lambda": self.rock.LAMBDA},
)
def set_scalar_parameters(self):
""" Set parameters for the scalar (pressure) equation.
"""
for g, d in self.gb:
a = self.aperture(g)
specific_volumes = self.specific_volumes(g)
# Define boundary conditions for flow
bc = self.bc_type_scalar(g)
# Set boundary condition values
bc_values = self.bc_values_scalar(g)
biot_coefficient = self.biot_alpha(g)
compressibility = self.fluid.COMPRESSIBILITY
mass_weight = compressibility * self.porosity(g)
if g.dim == self.Nd:
mass_weight += (
biot_coefficient - self.porosity(g)
) / self.rock.BULK_MODULUS
mass_weight *= self.scalar_scale * specific_volumes
g_rho = (
-pp.GRAVITY_ACCELERATION
* self.density(g)
/ self.scalar_scale
* self.length_scale
)
gravity = np.zeros((self.Nd, g.num_cells))
gravity[self.Nd - 1, :] = g_rho
pp.initialize_data(
g,
d,
self.scalar_parameter_key,
{
"bc": bc,
"bc_values": bc_values,
"mass_weight": mass_weight,
"biot_alpha": biot_coefficient,
"time_step": self.time_step,
"ambient_dimension": self.Nd,
"source": self.source_scalar(g),
# + self.dVdt_source(g, d, self.scalar_parameter_key),
"vector_source": gravity.ravel("F"),
},
)
for e, data_edge in self.gb.edges():
g_l, g_h = self.gb.nodes_of_edge(e)
params_l = self.gb.node_props(g_l)[pp.PARAMETERS][self.scalar_parameter_key]
mg = data_edge["mortar_grid"]
a = mg.slave_to_mortar_avg() * self.aperture(g_l)
grho = (
mg.slave_to_mortar_avg()
* params_l["vector_source"][self.Nd - 1 :: self.Nd]
)
gravity = np.zeros((self.Nd, mg.num_cells))
gravity[self.Nd - 1, :] = grho * a / 2
data_edge = pp.initialize_data(
e,
data_edge,
self.scalar_parameter_key,
{"vector_source": gravity.ravel("F")},
)
self.set_permeability()
def aperture(self, g, from_iterate=True) -> np.ndarray:
"""
Obtain the aperture of a subdomain. See update_all_apertures.
"""
if from_iterate:
return self.gb.node_props(g)[pp.STATE][pp.ITERATE]["aperture"]
else:
return self.gb.node_props(g)[pp.STATE]["aperture"]
def specific_volumes(self, g, from_iterate=True) -> np.ndarray:
"""
Obtain the specific volume of a subdomain. See update_all_apertures.
"""
if from_iterate:
return self.gb.node_props(g)[pp.STATE][pp.ITERATE]["specific_volume"]
else:
return self.gb.node_props(g)[pp.STATE]["specific_volume"]
def update_all_apertures(self, to_iterate=True):
"""
To better control the aperture computation, it is done for the entire gb by a
single function call. This also allows us to ensure the fracture apertures
are updated before the intersection apertures are inherited.
"""
gb = self.gb
for g, d in gb:
apertures = np.ones(g.num_cells)
if g.dim == (self.Nd - 1):
# Initial aperture
apertures *= self.initial_aperture
# Reconstruct the displacement solution on the fracture
g_h = gb.node_neighbors(g)[0]
data_edge = gb.edge_props((g, g_h))
if pp.STATE in data_edge:
u_mortar_local = self.reconstruct_local_displacement_jump(
data_edge, from_iterate=to_iterate
)
apertures -= u_mortar_local[-1].clip(max=0)
if to_iterate:
pp.set_iterate(
d,
{"aperture": apertures.copy(), "specific_volume": apertures.copy()},
)
else:
state = {
"aperture": apertures.copy(),
"specific_volume": apertures.copy(),
}
pp.set_state(d, state)
for g, d in gb:
parent_apertures = []
num_parent = []
if g.dim < (self.Nd - 1):
for edges in gb.edges_of_node(g):
e = edges[0]
g_h = e[0]
if g_h == g:
g_h = e[1]
if g_h.dim == (self.Nd - 1):
d_h = gb.node_props(g_h)
if to_iterate:
a_h = d_h[pp.STATE][pp.ITERATE]["aperture"]
else:
a_h = d_h[pp.STATE]["aperture"]
a_h_face = np.abs(g_h.cell_faces) * a_h
mg = gb.edge_props(e)["mortar_grid"]
# Assumes g_h is master
a_l = (
mg.mortar_to_slave_avg()
* mg.master_to_mortar_avg()
* a_h_face
)
parent_apertures.append(a_l)
num_parent.append(np.sum(mg.mortar_to_slave_int().A, axis=1))
else:
raise ValueError("Intersection points not implemented in 3d")
parent_apertures = np.array(parent_apertures)
num_parents = np.sum(np.array(num_parent), axis=0)
apertures = np.sum(parent_apertures, axis=0) / num_parents
specific_volumes = np.power(apertures, self.Nd - g.dim)
if to_iterate:
pp.set_iterate(
d,
{
"aperture": apertures.copy(),
"specific_volume": specific_volumes.copy(),
},
)
else:
state = {
"aperture": apertures.copy(),
"specific_volume": specific_volumes.copy(),
}
pp.set_state(d, state)
return apertures
def set_permeability(self):
"""
Cubic law in fractures, rock permeability in the matrix.
If "blocking_perm" is present in self.params, this value is used for
Fracture 2.
"""
# Viscosity has units of Pa s, and is consequently divided by the scalar scale.
viscosity = self.fluid.dynamic_viscosity() / self.scalar_scale
gb = self.gb
key = self.scalar_parameter_key
from_iterate = True
blocking_perm = self.params.get("blocking_perm", None)
for g, d in gb:
if g.dim < self.Nd:
# Set fracture permeability
specific_volumes = self.specific_volumes(g, from_iterate)
if d["node_number"] == 1 or blocking_perm is None:
# Use cubic law in fractures. First compute the unscaled
# permeability
apertures = self.aperture(g, from_iterate=from_iterate)
apertures_unscaled = apertures * self.length_scale
k = np.power(apertures_unscaled, 2) / 12 / viscosity
else:
# Blocking and intersection
k = blocking_perm
d[pp.PARAMETERS][key]["perm_nu"] = k
# Multiply with the cross-sectional area
k = k * specific_volumes
# Divide by fluid viscosity and scale back
kxx = k / self.length_scale ** 2
else:
# Use the rock permeability in the matrix
kxx = (
self.rock.PERMEABILITY
/ viscosity
* np.ones(g.num_cells)
/ self.length_scale ** 2
)
K = pp.SecondOrderTensor(kxx)
d[pp.PARAMETERS][key]["second_order_tensor"] = K
# Normal permeability inherited from the neighboring fracture g_l
for e, d in gb.edges():
mg = d["mortar_grid"]
g_l, _ = gb.nodes_of_edge(e)
data_l = gb.node_props(g_l)
a = self.aperture(g_l, from_iterate)
V = self.specific_volumes(g_l, from_iterate)
# We assume isotropic permeability in the fracture, i.e. the normal
# permeability equals the tangential one
k_s = data_l[pp.PARAMETERS][key]["second_order_tensor"].values[0, 0]
# Division through half the aperture represents taking the (normal) gradient
kn = mg.slave_to_mortar_int() * np.divide(k_s, a * V / 2)
pp.initialize_data(mg, d, key, {"normal_diffusivity": kn})
def biot_alpha(self, g) -> float:
if g.dim == self.Nd:
return self.params.get("biot_alpha", 0.7)
else:
# Used for the volume change term in the fracture. See DivU
return 1
def porosity(self, g) -> float:
if g.dim == self.Nd:
return 0.01
else:
return 1.0
def density(self, g, dp=None) -> np.ndarray:
""" Density computed from current pressure solution
taken from the previous iterate.
"""
if dp is None:
p_0 = self.scalar_scale * self.initial_scalar(g)
_, p_k, p_n = self._variable_increment(
g, self.scalar_variable, self.scalar_scale,
)
dp = p_k - p_0
rho_0 = 1e3 * (pp.KILOGRAM / pp.METER ** 3) * np.ones(g.num_cells)
rho = rho_0 * np.exp(dp * self.fluid.COMPRESSIBILITY)
return rho
def faces_to_fix(self, g):
"""
Identify three boundary faces to fix (u=0). This should allow us to assign
Neumann "background stress" conditions on the rest of the boundary faces.
"""
all_bf, *_ = self.domain_boundary_sides(g)
point = np.array(
[
[(self.box["xmin"] + self.box["xmax"]) / 2],
[(self.box["ymin"] + self.box["ymax"]) / 2],
[self.box["zmax"]],
]
)
distances = pp.distances.point_pointset(point, g.face_centers[:, all_bf])
indexes = np.argsort(distances)
faces = all_bf[indexes[: self.Nd]]
return faces
def _tag_well_cells(self):
"""
Tag well cells with unitary values, positive for injection cells and negative
for production cells.
"""
for g, d in self.gb:
tags = np.zeros(g.num_cells)
if g.dim < self.Nd:
point = np.array(
[
[(self.box["xmin"] + self.box["xmax"]) / 2],
[self.box["ymin"]],
[0],
]
)
distances = pp.distances.point_pointset(point, g.cell_centers)
indexes = np.argsort(distances)
if d["node_number"] == 1:
tags[indexes[-1]] = 1 # injection
g.tags["well_cells"] = tags
pp.set_state(d, {"well": tags.copy()})
def source_flow_rates(self) -> Tuple[int, int]:
"""
The rate is given in l/s = m^3/s e-3. Length scaling also needed to convert from
the scaled length to m.
The values returned depend on the simulation phase.
"""
t = self.time
tol = 1e-10
injection, production = 0, 0
if t > self.phase_limits[1] + tol and t < self.phase_limits[2] + tol:
injection = 60
production = 0
elif t > self.phase_limits[2] + tol:
injection, production = 0, 0
w = pp.MILLI * (pp.METER / self.length_scale) ** self.Nd
return injection * w, production * w
def bc_type_mechanics(self, g) -> pp.BoundaryConditionVectorial:
"""
We set Neumann values imitating an anisotropic background stress regime on all
but three faces, which are fixed to ensure a unique solution.
"""
all_bf, *_, bottom = self.domain_boundary_sides(g)
faces = self.faces_to_fix(g)
# write_fixed_faces_to_csv(g, faces, self)
bc = pp.BoundaryConditionVectorial(g, faces, "dir")
frac_face = g.tags["fracture_faces"]
bc.is_neu[:, frac_face] = False
bc.is_dir[:, frac_face] = True
return bc
def bc_type_scalar(self, g) -> pp.BoundaryCondition:
"""
We prescribe the pressure value at all external boundaries.
"""
# Define boundary regions
all_bf, *_ = self.domain_boundary_sides(g)
# pdb.set_trace()
return pp.BoundaryCondition(g, all_bf, "dir")
def bc_values_mechanics(self, g) -> np.ndarray:
"""
Lithostatic mechanical BC values.
"""
bc_values = np.zeros((g.dim, g.num_faces))
if np.isclose(self.time, self.phase_limits[0]):
return bc_values.ravel("F")
# Retrieve the boundaries where values are assigned
all_bf, east, west, north, south, top, bottom = self.domain_boundary_sides(g)
A = g.face_areas
# Domain centred at 1 km below surface
# Gravity acceleration
gravity = (
pp.GRAVITY_ACCELERATION
* self.rock.DENSITY
* self._depth(g.face_centers)
/ self.scalar_scale
)
we, sn, bt = 1.3, 0.6, 1
bc_values[0, west] = (we * gravity[west]) * A[west]
bc_values[0, east] = -(we * gravity[east]) * A[east]
bc_values[1, south] = (sn * gravity[south]) * A[south]
bc_values[1, north] = -(sn * gravity[north]) * A[north]
if self.Nd > 2:
bc_values[2, bottom] = (bt * gravity[bottom]) * A[bottom]
bc_values[2, top] = -(bt * gravity[top]) * A[top]
faces = self.faces_to_fix(g)
bc_values[:, faces] = 0
return bc_values.ravel("F")
def bc_values_scalar(self, g) -> np.ndarray:
"""
Hydrostatic pressure BC values.
"""
# Retrieve the boundaries where values are assigned
all_bf, *_ = self.domain_boundary_sides(g)
bc_values = np.zeros(g.num_faces)
depth = self._depth(g.face_centers[:, all_bf])
bc_values[all_bf] = self.fluid.hydrostatic_pressure(depth) / self.scalar_scale
return bc_values
def source_mechanics(self, g) -> np.ndarray:
"""
Gravity term.
"""
values = np.zeros((self.Nd, g.num_cells))
values[2] = (
pp.GRAVITY_ACCELERATION
* self.rock.DENSITY
* g.cell_volumes
* self.length_scale
/ self.scalar_scale
)
return values.ravel("F")
def source_scalar(self, g) -> np.ndarray:
"""
Source term for the scalar equation.
For slightly compressible flow in the present formulation, this has units of m^3.
Sources are handled by ScalarSource discretizations.
The implicit scheme yields multiplication of the rhs by dt, but
this is not incorporated in ScalarSource, hence we do it here.
"""
injection, production = self.source_flow_rates()
wells = (
injection
* g.tags["well_cells"]
* self.time_step
* g.tags["well_cells"].clip(min=0)
)
wells += (
production
* g.tags["well_cells"]
* self.time_step
* g.tags["well_cells"].clip(max=0)
)
return wells
def _set_time_parameters(self):
"""
Specify time parameters.
"""
# For the initialization run, we use the following
# start time
self.time = -5e2 * pp.YEAR
# and time step
self.time_step = -self.time / 1
# We use
t_1 = 5 * pp.DAY
self.end_time = t_1 + 2 * pp.DAY
self.max_time_step = self.end_time
self.phase_limits = [self.time, 0, t_1, self.end_time]
self.phase_time_steps = [self.time_step, pp.DAY * 1, pp.DAY / 2, 1]
def adjust_time_step(self):
"""
Adjust the time step so that smaller time steps are used when the driving forces
are changed. Also make sure to exactly reach the start and end time for
each phase.
"""
# Default is to just increase the time step somewhat
self.time_step = getattr(self, "time_step_factor", 1.0) * self.time_step
# We also want to make sure that we reach the end of each simulation phase
for dt, lim in zip(self.phase_time_steps, self.phase_limits):
diff = self.time - lim
if diff < 0 and -diff <= self.time_step:
self.time_step = -diff
if np.isclose(self.time, lim):
self.time_step = dt
# And that the time step doesn't grow too large after the equilibration phase
if self.time > 0:
self.time_step = min(self.time_step, self.max_time_step)
def _depth(self, coords) -> np.ndarray:
"""
Unscaled depth. We center the domain at 1 km below the surface.
"""
return 1.0 * pp.KILO * pp.METER - self.length_scale * coords[2]
def set_rock_and_fluid(self):
"""
Set rock and fluid properties to those of granite and water.
The matrix permeability may be adjusted by prescribing a "permeability"
value in the parameters during model construction.
"""
self.rock = Granite()
self.rock.BULK_MODULUS = pp.params.rock.bulk_from_lame(
self.rock.LAMBDA, self.rock.MU
)
self.fluid = Water()
self.rock.PERMEABILITY = self.params.get("permeability", 2.5e-15)
def _variable_increment(self, g, variable, scale=1, x0=None):
""" Extracts the variable solution of the current and previous time step and
computes the increment.
"""
d = self.gb.node_props(g)
if x0 is None:
x0 = d[pp.STATE][variable] * scale
x1 = d[pp.STATE][pp.ITERATE][variable] * scale
dx = x1 - x0
return dx, x1, x0
def initial_condition(self) -> None:
"""
Initial value for the Darcy fluxes. TODO: Add to THM.
"""
for g, d in self.gb:
d[pp.PARAMETERS] = pp.Parameters()
d[pp.PARAMETERS].update_dictionaries(
[self.mechanics_parameter_key, self.scalar_parameter_key,]
)
self.update_all_apertures(to_iterate=False)
self.update_all_apertures()
super().initial_condition()
for g, d in self.gb:
d[pp.STATE]["cell_centers"] = g.cell_centers.copy()
p0 = self.initial_scalar(g)
state = {
self.scalar_variable: p0,
"u_exp_0": np.zeros(g.num_cells),
"aperture_0": self.aperture(g) * self.length_scale,
}
iterate = {
self.scalar_variable: p0,
} # For initial flux
pp.set_state(d, state)
pp.set_iterate(d, iterate)
def initial_scalar(self, g) -> np.ndarray:
depth = self._depth(g.cell_centers)
return self.fluid.hydrostatic_pressure(depth) / self.scalar_scale
def set_exporter(self):
self.exporter = pp.Exporter(
self.gb, self.file_name, folder_name=self.viz_folder_name + "_vtu"
)
self.export_times = []
def export_step(self):
"""
Export the current solution to vtu. The method sets the desired values in d[pp.STATE].
For some fields, it provides zeros in the dimensions where the variable is not defined,
or pads the vector values with zeros so that they have three components, as required
by ParaView.
We use suffix _exp on all exported variables, to separate from scaled versions also
stored in d.
"""
if "exporter" not in self.__dict__:
self.set_exporter()
for g, d in self.gb:
if g.dim == self.Nd:
pad_zeros = np.zeros((3 - g.dim, g.num_cells))
u = d[pp.STATE][self.displacement_variable].reshape(
(self.Nd, -1), order="F"
)
u_exp = | np.vstack((u * self.length_scale, pad_zeros)) | numpy.vstack |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Sun Aug 22 17:07:52 2021
@author: administrator
"""
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Created on Fri Feb 21 16:07:29 2020
Similarity Difference and Uniqness for Explainable AI (SIDU)
This is an supporting code for the SIDU paper (Please refer the paper for more details)
# TO use this code install tensorflow, numpy, matplotlib, skimage, tqdm, PIL modules , os, scipy
@author: satya email: <EMAIL>
"""
from tensorflow.keras.applications.vgg16 import VGG16
from tensorflow.keras.applications.vgg19 import VGG19
from tensorflow.keras.preprocessing import image
from tensorflow.keras.applications.vgg19 import preprocess_input
from tensorflow.keras.models import Model
import numpy as np
import os
from pathlib import Path
from matplotlib import pyplot as plt
from skimage.transform import resize
from tqdm import tqdm
from tensorflow.keras.layers import GlobalAveragePooling2D, Dense, Dropout, Activation, Flatten
from tensorflow.keras import optimizers
from tensorflow.keras.applications.resnet50 import ResNet50, decode_predictions
from tensorflow.python.keras.models import load_model
from keras import backend as K
from mpl_toolkits.axes_grid1 import make_axes_locatable
from scipy.spatial.distance import cdist
from tensorflow.keras import layers,models
from os.path import join, exists
def load_image(img_path, input_size, show=False):
""" to load the image for the own model trained on new dataset """
img = image.load_img(img_path, target_size=input_size)
img_tensor = image.img_to_array(img) # (height, width, channels)
img_tensor = np.expand_dims(img_tensor, axis=0) # (1, height, width, channels), add a dimension because the model expects this shape: (batch_size, height, width, channels)
img_tensor /= 255. # imshow expects values in the range [0, 1]
if show:
plt.imshow(img_tensor[0])
plt.axis('off')
plt.show()
return img, img_tensor
def load_img(path, input_size):
""" to load the image for the pretrained model which are trained from imagenet """
img = image.load_img(path, target_size=input_size)
x = image.img_to_array(img)
x = np.expand_dims(x, axis=0)
x = preprocess_input(x)
return img, x
batch_size = 100
def generate_masks_conv_output(input_size, last_conv_output, s= 8):
""" To generate mask from the last convlutional layers of CNN model """
cell_size = np.ceil(np.array(input_size) / s)
up_size = (s) * cell_size
#grid = np.random.rand(N, s, s) < p1
#grid = grid.astype('float32')
#8, 0.5
grid = np.rollaxis(last_conv_output, 2, 0)
N = len(grid)
masks = np.empty((*input_size, N))
for i in tqdm(range(N), desc='Generating masks'):
# Linear upsampling and cropping
#masks[i, :, :] = resize(grid[i], up_size, order=1, mode='reflect',
# anti_aliasing=False)
#[x:x + input_size[0], y:y + input_size[1]]
"""extracting the each feature maps of last convlution layer """
conv_out = last_conv_output[:,:,i]
"""converting last convlayer to binary mask"""
conv_out = conv_out > 0.5
conv_out = conv_out.astype('float32')
""" upsampling the binary mask using bi-linear interpolation (feature activaions masks) """
final_resize = resize(conv_out, up_size, order=1, mode='reflect',
anti_aliasing=False)
masks[:, :, i] = final_resize
# masks = masks.reshape(-1, *input_size, 1)
return masks, grid, cell_size, up_size
def kernel(d, kernel_width):
""" computing the exponential weights for the differences"""
return np.sqrt(np.exp(-(d ** 2) / kernel_width ** 2))
def sim_differences(pred_org, preds):
"""Computing the similarity differences"""
diff = abs(pred_org-preds)
weights= kernel(diff,0.25)
return weights, diff
def normalize(array):
return (array - array.min()) / (array.max() - array.min() + 1e-13)
def uniqness_measure(masks_predictions):
""" computing the uniqness between the feature maps """
#amp_all_cdist =cdist(all_amp_layer_weights, all_amp_layer_weights)
sum_all_cdist =(cdist(masks_predictions, masks_predictions)).sum(axis=1)
sum_all_cdist = normalize(sum_all_cdist)
return sum_all_cdist
def explain_SIDU(model, inp, N, p1, masks, input_size):
""" SIDU explanation """
preds = []
# Make sure multiplication is being done for correct axes
""" generating the feature image mask for the original image using dot product """
masked = inp * masks
""" predicting the score for oringal _input image """
pred_org = model.predict(inp)
""" predicting the scores for all feature image masks """
for i in tqdm(range(0, N, batch_size), desc='Explaining'):
preds.append(model.predict(masked[i:min(i+batch_size, N)]))
preds = np.concatenate(preds)
weights, diff = sim_differences(pred_org, preds)
interactions = uniqness_measure(preds)
new_interactions = interactions.reshape(-1, 1)
diff_interactions = np.multiply(weights, new_interactions)
sal = diff_interactions.T.dot(masks.reshape(N, -1)).reshape(-1, *input_size)
sal = sal / N / p1
return sal, weights, new_interactions, diff_interactions, pred_org
def _show_explanation(img, sal, prediction, input_path, show=True, save=False, save_folder='', alpha=0.3, figsize=(15, 5),
cmap=plt.cm.seismic):
""" vizualizing the explanation on original image """
plt.figure(figsize=figsize)
plt.subplot(1, 3, 1)
plt.imshow(img)
plt.axis('off')
base_name = os.path.basename(input_path)
title = 'Input ({})'.format(base_name)
plt.title(title)
plt.subplot(1, 3, 2)
h_output = plt.imshow(sal, cmap=cmap)
plt.axis('off')
title = 'Output'
plt.title(title)
ax = plt.gca()
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
plt.colorbar(h_output, cax=cax)
plt.subplot(1, 3, 3)
plt.imshow(img)
plt.imshow(sal, cmap='jet', alpha=alpha)
plt.axis('off')
#plt.title('Mix')
plt.title('Explanation for `{}`'.format(class_name(prediction)))
if save and len(save_folder) > 0:
save_path = Path(save_folder) / (os.path.splitext(base_name)[0] + '.png')
plt.savefig(save_path, bbox_inches='tight')
print('Figure saved to: {}'.format(save_path))
if show:
plt.show()
def class_name(idx):
return decode_predictions(np.eye(1, 1000, idx))[0][0][1]
def fold_dir(folder):
if not os.path.exists(folder):
os.makedirs(folder)
return folder
if __name__ == '__main__':
## CHOOSING THE base MODEL TO GET EXPLANTIONS
model_eval = 'Resnet50'
### LOADING THE PRE-TRAINED BASE MODEL
### use
if model_eval == 'Resnet50':
base_model = ResNet50()
#### to see the last layer of CNN use base_model.summary()
features_model = Model(inputs=base_model.input, outputs=base_model.get_layer('conv5_block3_out').output)
elif model_eval == 'Vgg19':
base_model = VGG19(weights='imagenet')
features_model = Model(inputs=base_model.input, outputs=base_model.get_layer('block5_conv4').output)
elif model_eval == 'Vgg16':
base_model = VGG16(weights='imagenet')
features_model = Model(inputs=base_model.input, outputs=base_model.get_layer('block5_conv3').output)
## reading the image from the folder
read_path = join('test_images', 'water-bird.JPEG')
### load the image
img, x = load_img(read_path, (224,224))
### extract the last convlution layers feature activation maps of the model
feature_activation_maps = features_model.predict(x)
last_conv_output = np.squeeze(feature_activation_maps)
masks, grid, cell_size, up_size = generate_masks_conv_output((224,224), last_conv_output, s= 8)
## TO DISPLAY THE FEATURE ACTIVATION IMAGE MASKS
mask_ind = masks[:, :, 500]
grid_ind = grid[500,:,:]
new_mask= np.reshape(mask_ind,(224,224))
new_masks = | np.rollaxis(masks, 2, 0) | numpy.rollaxis |
from __future__ import absolute_import, print_function, division
import ast
import logging
import numpy as np
from itertools import repeat
from sklearn.cross_decomposition import PLSRegression
from sklearn.metrics import r2_score, mean_squared_error
from sklearn.model_selection import GridSearchCV, KFold, GroupKFold
from sklearn.linear_model import (
LassoLars, LassoLarsCV, Lars, LogisticRegression)
from superman.distance import pairwise_within, pairwise_dists
__all__ = ['GenericModel', 'REGRESSION_MODELS', 'CLASSIFICATION_MODELS']
class GenericModel(object):
def __init__(self, param, ds_kind, wave):
self.parameter = param
self.ds_kind = ds_kind
self.wave = wave
self.var_names = []
self.var_keys = []
@staticmethod
def load(fh):
class_name = fh.readline().strip().decode('utf8')
cls = globals().get(class_name)
if cls is None or not issubclass(cls, GenericModel):
raise ValueError('Invalid model file: class %r not found' % class_name)
param = _parse_literal(fh)
ds_kind = fh.readline().strip().decode('utf8')
var_names = _parse_literal(fh)
var_keys = _parse_literal(fh)
wave_len = _parse_literal(fh, parser=int)
wave = np.fromstring(fh.read(wave_len * 8))
model = cls(param, ds_kind, wave)
model.var_names = var_names
model.var_keys = var_keys
model._finish_loading(fh)
return model
def save(self, fh):
w = np.array(self.wave, dtype=float, copy=False)
fh.write(b'%s\n%r\n%s\n%r\n%r\n%d\n' % (
self.__class__.__name__.encode('utf8'), self.parameter,
self.ds_kind.encode('utf8'), self.var_names, self.var_keys,
w.shape[0]))
# don't use w.tofile(fh) here, because it requires an actual file object
fh.write(w.tobytes())
def info_html(self):
return '%s — %s' % (self, self.ds_kind)
def __str__(self):
return '%s(%g)' % (self.__class__.__name__, self.parameter)
class _Classifier(GenericModel):
def train(self, X, variables):
key, = variables.keys()
y, name = variables[key]
self.var_keys = [key]
self.var_names = [name]
self._train(X, y)
def predict(self, X, variables):
key, = variables.keys()
preds = {key: None}
if key != self.var_keys[0]:
logging.warning('No trained model for variable: %r', key)
else:
preds[key] = self._predict(X)
return preds
class Logistic(_Classifier):
def _train(self, X, y):
self.clf = LogisticRegression(C=self.parameter, fit_intercept=False)
self.clf.fit(X, y)
def _predict(self, X):
return self.clf.predict(X)
@classmethod
def cross_validate(cls, X, variables, Cs=None, num_folds=5, labels=None):
if labels is None:
cv = KFold(n_splits=num_folds)
else:
cv = GroupKFold(n_splits=num_folds)
key, = variables.keys()
y, name = variables[key]
# TODO: use LogisticRegressionCV here instead?
model = GridSearchCV(LogisticRegression(fit_intercept=False), dict(C=Cs),
cv=cv, return_train_score=False, n_jobs=1)
model.fit(X, y, groups=labels)
acc_mean = model.cv_results_['mean_test_score']
acc_stdv = model.cv_results_['std_test_score']
yield name, Cs, acc_mean, acc_stdv
def _finish_loading(self, fh):
params = _parse_literal(fh)
classes = _parse_literal(fh)
coef_shape = _parse_literal(fh)
self.clf = LogisticRegression().set_params(**params)
self.clf.classes_ = np.array(classes)
self.clf.intercept_ = 0.0
n = int(np.prod(coef_shape)) * 8
self.clf.coef_ = np.fromstring(fh.read(n)).reshape(coef_shape)
def save(self, fh):
GenericModel.save(self, fh)
fh.write(b'%r\n%r\n%r\n' % (self.clf.get_params(),
self.clf.classes_.tolist(),
self.clf.coef_.shape))
fh.write(self.clf.coef_.tobytes())
class KNN(_Classifier):
def _train(self, X, y):
self._orig_library = X
self.library = self._prepare_spectra(X)
self.classes, self.labels = np.unique(y, return_inverse=True)
# TODO: expose these in the UI
self.metric = 'cosine'
self.weighting = 'distance'
def _predict(self, X):
if X is self._orig_library:
dists = pairwise_within(self.library, self.metric, num_procs=5)
ks = slice(1, min(self.parameter+1, dists.shape[0]))
else:
X = self._prepare_spectra(X)
dists = pairwise_dists(self.library, X, self.metric, num_procs=5)
ks = slice(0, min(self.parameter, dists.shape[0]))
top_k = np.argsort(dists, axis=0)[ks] # shape: (k, nX)
num_classes = len(self.classes)
votes = np.zeros((num_classes, len(X)))
idx = np.arange(len(X))
if self.weighting == 'uniform':
for kk in top_k:
labels = self.labels[kk]
votes[labels[None], idx] += 1
else:
for kk in top_k:
labels = self.labels[kk]
votes[labels[None], idx] += 1./(1 + dists[kk, idx])
winner = votes.argmax(axis=0)
return self.classes[winner]
def _prepare_spectra(self, X):
if len(X) > 0 and np.asanyarray(X[0]).ndim == 2:
return [np.array(t, copy=False, order='C', dtype=np.float32) for t in X]
return np.array(X, copy=False, dtype=np.float64)
@classmethod
def cross_validate(cls, X, variables, ks=None, num_folds=5, labels=None):
if labels is None:
cv = KFold(n_splits=num_folds)
else:
cv = GroupKFold(n_splits=num_folds)
key, = variables.keys()
y, name = variables[key]
y = np.array(y)
acc = np.zeros((num_folds, len(ks)))
for i, (train_idx, test_idx) in enumerate(cv.split(y, groups=labels)):
trainY, testY = y[train_idx], y[test_idx]
if hasattr(X, 'shape'):
trainX, testX = X[train_idx], X[test_idx]
else:
trainX = [X[ti] for ti in train_idx]
testX = [X[ti] for ti in test_idx]
for j, k in enumerate(ks):
clf = cls(k, None, None)
clf._train(trainX, trainY)
pred = clf._predict(testX)
acc[i, j] = (pred == testY).mean()
yield name, ks, acc.mean(axis=0), acc.std(axis=0)
def _finish_loading(self, fh):
raise NotImplementedError('Cannot load a KNN model from file.')
def save(self, fh):
raise NotImplementedError('Cannot serialize a KNN model.')
class _RegressionModel(GenericModel):
def predict(self, X, variables):
preds, stats = {}, []
for key, p in self._predict(X, variables):
y, name = variables[key]
preds[key] = p
stats_entry = dict(name=name, key=key, r2=np.nan, rmse=np.nan)
if y is not None:
mask = np.isfinite(y)
nnz = np.count_nonzero(mask)
if nnz != 0:
if nnz < len(y):
y, p = y[mask], p[mask]
stats_entry['r2'] = r2_score(y, p)
stats_entry['rmse'] = np.sqrt(mean_squared_error(y, p))
stats.append(stats_entry)
stats.sort(key=lambda s: s['key'])
return preds, stats
class _UnivariateRegression(_RegressionModel):
def train(self, X, variables):
self.models = {}
for key in variables:
y, name = variables[key]
m = self._construct()
_try_to_fit(m, X, y)
self.models[key] = m
self.var_keys.append(key)
self.var_names.append(name)
def _predict(self, X, variables):
for key in variables:
if key not in self.models:
logging.warning('No trained model for variable: %r', key)
continue
clf = self.models[key]
yield key, clf.predict(X)
@classmethod
def _run_cv(cls, X, variables, grid, num_folds, labels=None):
if labels is None:
cv = KFold(n_splits=num_folds)
else:
cv = GroupKFold(n_splits=num_folds)
for key in sorted(variables):
y, name = variables[key]
model = GridSearchCV(cls._cv_construct(), grid, cv=cv,
scoring='neg_mean_squared_error',
return_train_score=False, n_jobs=1)
_try_to_fit(model, X, y, groups=labels)
mse_mean = -model.cv_results_['mean_test_score']
mse_stdv = model.cv_results_['std_test_score']
yield name, mse_mean, mse_stdv
def _finish_loading(self, fh):
self.models = {key: self._load_model(fh) for key in self.var_keys}
def save(self, fh):
GenericModel.save(self, fh)
for key in self.var_keys:
self._save_model(self.models[key], fh)
class _MultivariateRegression(_RegressionModel):
def train(self, X, variables):
self.clf = self._construct()
self.var_keys = list(variables.keys())
y_cols = []
for key in self.var_keys:
y, name = variables[key]
y_cols.append(y)
self.var_names.append(name)
_try_to_fit(self.clf, X, np.column_stack(y_cols))
def _predict(self, X, variables):
P = self.clf.predict(X)
for i, key in enumerate(self.var_keys):
if key not in variables:
logging.warning('No input variable for predicted: %r', key)
continue
yield key, P[:,i]
@classmethod
def _run_cv(cls, X, variables, grid, num_folds, labels=None):
if labels is None:
cv = KFold(n_splits=num_folds)
else:
cv = GroupKFold(n_splits=num_folds)
pls = GridSearchCV(cls._cv_construct(), grid, cv=cv,
scoring='neg_mean_squared_error',
return_train_score=False, n_jobs=1)
Y, names = zip(*variables.values())
_try_to_fit(pls, X, np.column_stack(Y), groups=labels)
mse_mean = -pls.cv_results_['mean_test_score']
mse_stdv = pls.cv_results_['std_test_score']
return '/'.join(names), mse_mean, mse_stdv
def _finish_loading(self, fh):
self.clf = self._load_model(fh)
def save(self, fh):
GenericModel.save(self, fh)
self._save_model(self.clf, fh)
class _PLS(object):
def _construct(self):
return PLSRegression(scale=False, n_components=self.parameter)
@classmethod
def _cv_construct(cls):
return PLSRegression(scale=False)
@classmethod
def _save_model(cls, pls, fh):
fh.write(b'%r\n%r\n' % (pls.get_params(), pls.coef_.shape))
fh.write(pls.x_mean_.tobytes())
fh.write(pls.y_mean_.tobytes())
fh.write(pls.coef_.tobytes())
@classmethod
def _load_model(cls, fh):
params = _parse_literal(fh)
coef_shape = _parse_literal(fh)
pls = PLSRegression().set_params(**params)
pls.x_mean_ = np.fromstring(fh.read(coef_shape[0] * 8))
pls.y_mean_ = np.fromstring(fh.read(coef_shape[1] * 8))
pls.x_std_ = np.ones(coef_shape[0])
pls.y_std_ = np.ones(coef_shape[1])
n = coef_shape[0] * coef_shape[1] * 8
pls.coef_ = np.fromstring(fh.read(n)).reshape(coef_shape)
return pls
class _Lasso(object):
def _construct(self):
return LassoLars(alpha=self.parameter, fit_intercept=False)
@classmethod
def _save_model(cls, m, fh):
fh.write(b'%r\n%r\n%r\n' % (m.get_params(), m.active_, m.coef_.shape))
fh.write(m.coef_.tobytes())
@classmethod
def _load_model(cls, fh):
params = _parse_literal(fh)
active = _parse_literal(fh)
coef_shape = _parse_literal(fh)
m = LassoLars().set_params(**params)
m.intercept_ = 0.0
n = int(np.prod(coef_shape)) * 8
m.coef_ = np.fromstring(fh.read(n)).reshape(coef_shape)
m.active_ = active
return m
class _Lars(object):
def _construct(self):
return Lars(n_nonzero_coefs=self.parameter, fit_intercept=False)
@classmethod
def _cv_construct(cls):
return Lars(fit_intercept=False, fit_path=False)
@classmethod
def _save_model(cls, m, fh):
fh.write(b'%r\n%r\n%r\n' % (m.get_params(), m.active_, m.coef_.shape))
fh.write(m.coef_.tobytes())
@classmethod
def _load_model(cls, fh):
params = _parse_literal(fh)
active = _parse_literal(fh)
coef_shape = _parse_literal(fh)
m = Lars().set_params(**params)
m.intercept_ = 0.0
n = int(np.prod(coef_shape)) * 8
m.coef_ = np.fromstring(fh.read(n)).reshape(coef_shape)
m.active_ = active
return m
class _LassoOrLars1(_UnivariateRegression):
def train(self, X, variables):
_UnivariateRegression.train(self, X, variables)
for clf in self.models.values():
# XXX: work around a bug in sklearn
# see https://github.com/scikit-learn/scikit-learn/pull/8160
clf.coef_ = np.array(clf.coef_)
def coefficients(self):
all_bands = []
all_coefs = []
for key in self.var_keys:
clf = self.models[key]
all_bands.append(self.wave[clf.active_])
all_coefs.append(clf.coef_[clf.active_])
return all_bands, all_coefs
class _LassoOrLars2(_MultivariateRegression):
def train(self, X, variables):
_MultivariateRegression.train(self, X, variables)
# XXX: work around a bug in sklearn
# see https://github.com/scikit-learn/scikit-learn/pull/8160
self.clf.coef_ = | np.array(self.clf.coef_) | numpy.array |
"""A Class of Strategy"""
"""
from pyGPGO.covfunc import squaredExponential
from pyGPGO.acquisition import Acquisition
from pyGPGO.surrogates.GaussianProcess import GaussianProcess
from pyGPGO.GPGO import GPGO
"""
import numpy as np
from concrete_factories.bayesian_folder.bayesian_interface import InterfaceBayesian
from sklearn.covariance import EmpiricalCovariance
#from qpsolvers import solve_qp
class BayesianWUBC(InterfaceBayesian):
def __init__(self, name='Weighted Upper Bound Constraint, WUBC', lamb=1, delta=1, upper_bound=1, lower_bound=0, validation_windows=36, cv_windows=12):
self.name = name
self.lamb = lamb
self.delta = delta
self.upper_bound = upper_bound
self.lower_bound = lower_bound
self.validation_windows = validation_windows
self.cv_windows = cv_windows
self.data = []
self.intermediate_data = []
self.weights = []
self.returns = []
self.optim_param = {}
def solve_optimization_problem(self):
# Type: It returns the optimized weights
# Compute numbers of data points and assets
#lambdaValue=self.optim_param.get('lambda_value')
(numElements, N) = self.intermediate_data.shape
# mean and covariance
assert np.count_nonzero(np.isnan(self.intermediate_data)) == 0
if len(self.intermediate_data)==0:
pass
try:
Sigma = EmpiricalCovariance().fit(self.intermediate_data).covariance_ * 12 # I use 12 for annualizing the covmatrix
except:
pass
Vars = np.diag(Sigma) # variances of the stocks
mu = self.intermediate_data.mean(axis=0).H * 12 # mean log returns
lambdaValue = self.lamb
# lambdaValue = 0.886
upperBoundValue = self.upper_bound
# lowerBoundValue = 0
H = 2 * (lambdaValue * Sigma)
f = - mu.H # FALTA TRANSPOSE
Aeq = np.ones((1, N))
beq = 1
LB = np.ones((1, N))
UB = np.ones((1, N)) * upperBoundValue
P = H
q = np.asarray(f).reshape((6,))
G = np.zeros((6, 6))
h = | np.zeros(6) | numpy.zeros |
"""
pyrad.proc.process_echoclass
===============================
Functions for echo classification and filtering
.. autosummary::
:toctree: generated/
process_echo_id
process_birds_id
process_clt_to_echo_id
process_hydro_mf_to_hydro
process_echo_filter
process_cdf
process_filter_snr
process_filter_vel_diff
process_filter_visibility
process_outlier_filter
process_hydroclass
process_melting_layer
process_zdr_column
"""
from copy import deepcopy
from warnings import warn
import numpy as np
import pyart
from ..io.io_aux import get_datatype_fields, get_fieldname_pyart
def process_echo_id(procstatus, dscfg, radar_list=None):
"""
identifies echoes as 0: No data, 1: Noise, 2: Clutter,
3: Precipitation
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
datatype : list of string. Dataset keyword
The input data types
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype == 'dBZ':
refl_field = get_fieldname_pyart(datatype)
if datatype == 'dBuZ':
refl_field = get_fieldname_pyart(datatype)
if datatype == 'ZDR':
zdr_field = get_fieldname_pyart(datatype)
if datatype == 'ZDRu':
zdr_field = get_fieldname_pyart(datatype)
if datatype == 'RhoHV':
rhv_field = get_fieldname_pyart(datatype)
if datatype == 'uPhiDP':
phi_field = get_fieldname_pyart(datatype)
ind_rad = int(radarnr[5:8])-1
if radar_list[ind_rad] is None:
warn('No valid radar')
return None, None
radar = radar_list[ind_rad]
if ((refl_field not in radar.fields) or
(zdr_field not in radar.fields) or
(rhv_field not in radar.fields) or
(phi_field not in radar.fields)):
warn('Unable to create radar_echo_id dataset. Missing data')
return None, None
echo_id = np.ma.zeros((radar.nrays, radar.ngates), dtype=np.uint8)+3
# look for clutter
gatefilter = pyart.filters.moment_and_texture_based_gate_filter(
radar, zdr_field=zdr_field, rhv_field=rhv_field, phi_field=phi_field,
refl_field=refl_field, textzdr_field=None, textrhv_field=None,
textphi_field=None, textrefl_field=None, wind_size=7,
max_textphi=20., max_textrhv=0.3, max_textzdr=2.85,
max_textrefl=8., min_rhv=0.6)
is_clutter = gatefilter.gate_excluded == 1
echo_id[is_clutter] = 2
# look for noise
is_noise = radar.fields[refl_field]['data'].data == (
pyart.config.get_fillvalue())
echo_id[is_noise] = 1
id_field = pyart.config.get_metadata('radar_echo_id')
id_field['data'] = echo_id
id_field.update({'_FillValue': 0})
# prepare for exit
new_dataset = {'radar_out': deepcopy(radar)}
new_dataset['radar_out'].fields = dict()
new_dataset['radar_out'].add_field('radar_echo_id', id_field)
return new_dataset, ind_rad
def process_birds_id(procstatus, dscfg, radar_list=None):
"""
identifies echoes as 0: No data, 1: Noise, 2: Clutter,
3: Birds
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
datatype : list of string. Dataset keyword
The input data types
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype == 'dBZ':
refl_field = get_fieldname_pyart(datatype)
if datatype == 'dBuZ':
refl_field = get_fieldname_pyart(datatype)
if datatype == 'ZDR':
zdr_field = get_fieldname_pyart(datatype)
if datatype == 'ZDRu':
zdr_field = get_fieldname_pyart(datatype)
if datatype == 'RhoHV':
rhv_field = get_fieldname_pyart(datatype)
ind_rad = int(radarnr[5:8])-1
if radar_list[ind_rad] is None:
warn('No valid radar')
return None, None
radar = radar_list[ind_rad]
if ((refl_field not in radar.fields) or
(zdr_field not in radar.fields) or
(rhv_field not in radar.fields)):
warn('Unable to create radar_echo_id dataset. Missing data')
return None, None
# user defined parameters
max_zdr = dscfg.get('max_zdr', 3.)
max_rhv = dscfg.get('max_rhv', 0.9)
max_refl = dscfg.get('max_refl', 20.)
rmin = dscfg.get('rmin', 2000.)
rmax = dscfg.get('rmax', 25000.)
elmin = dscfg.get('elmin', 1.5)
elmax = dscfg.get('elmax', 85.)
echo_id = np.zeros((radar.nrays, radar.ngates), dtype=np.uint8)+3
# look for clutter
gatefilter = pyart.filters.birds_gate_filter(
radar, zdr_field=zdr_field, rhv_field=rhv_field,
refl_field=refl_field, max_zdr=max_zdr, max_rhv=max_rhv,
max_refl=max_refl, rmin=rmin, rmax=rmax, elmin=elmin, elmax=elmax)
is_clutter = gatefilter.gate_excluded == 1
echo_id[is_clutter] = 2
# look for noise
is_noise = radar.fields[refl_field]['data'].data == (
pyart.config.get_fillvalue())
echo_id[is_noise] = 1
id_field = pyart.config.get_metadata('radar_echo_id')
id_field['data'] = echo_id
# prepare for exit
new_dataset = {'radar_out': deepcopy(radar)}
new_dataset['radar_out'].fields = dict()
new_dataset['radar_out'].add_field('radar_echo_id', id_field)
return new_dataset, ind_rad
def process_clt_to_echo_id(procstatus, dscfg, radar_list=None):
"""
Converts clutter exit code from rad4alp into pyrad echo ID
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
datatype : list of string. Dataset keyword
The input data types
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype == 'CLT':
clt_field = get_fieldname_pyart(datatype)
break
ind_rad = int(radarnr[5:8])-1
if radar_list[ind_rad] is None:
warn('No valid radar')
return None, None
radar = radar_list[ind_rad]
if clt_field not in radar.fields:
warn('rad4alp clutter exit code not present. Unable to obtain echoID')
return None, None
echo_id = np.zeros((radar.nrays, radar.ngates), dtype=np.uint8)+3
clt = radar.fields[clt_field]['data']
echo_id[clt == 1] = 1
echo_id[clt >= 100] = 2
id_field = pyart.config.get_metadata('radar_echo_id')
id_field['data'] = echo_id
# prepare for exit
new_dataset = {'radar_out': deepcopy(radar)}
new_dataset['radar_out'].fields = dict()
new_dataset['radar_out'].add_field('radar_echo_id', id_field)
return new_dataset, ind_rad
def process_hydro_mf_to_hydro(procstatus, dscfg, radar_list=None):
"""
Converts the hydrometeor classification from Météo France to
that of MeteoSwiss
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
datatype : list of string. Dataset keyword
The input data types
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype == 'hydroMF':
field = get_fieldname_pyart(datatype)
break
ind_rad = int(radarnr[5:8])-1
if radar_list[ind_rad] is None:
warn('No valid radar')
return None, None
radar = radar_list[ind_rad]
if field not in radar.fields:
warn('hydroMF not present. Unable to obtain hydro')
return None, None
hydro = np.zeros((radar.nrays, radar.ngates), dtype=np.uint8)
hydroMF = radar.fields[field]['data']
# BRUIT, ZH_MQT, SOL, INSECTES, OISEAUX, MER_CHAFF, PARASITES,
# ROND_CENTRAL, TYPE_INCONNU, SIMPLE_POLAR are classified as NC
hydro[hydroMF<8] = 1
hydro[hydroMF==30] = 1
hydro[hydroMF==31] = 1
# PRECIP_INDIFFERENCIEE, PLUIE, PRECIP are classified as RN
hydro[hydroMF==8] = 6
hydro[hydroMF==9] = 6
hydro[hydroMF==32] = 6
hydro[hydroMF==10] = 8 # NEIGE_MOUILLEE is WS
hydro[hydroMF==11] = 2 # NEIGE_SECHE is AG
hydro[hydroMF==12] = 3 # GLACE is CR
hydro[hydroMF==13] = 5 # PETITE_GRELE is RP
# MOYENNE_GRELE, GROSSE_GRELE is IH/HDG
hydro[hydroMF==14] = 10
hydro[hydroMF==15] = 10
# Light rain (LR), vertically oriented ice (VI) and melting hail (MH) have
# no equivalent in the Météo France classification
hydro_field = pyart.config.get_metadata('radar_echo_classification')
hydro_field['data'] = hydro
# prepare for exit
new_dataset = {'radar_out': deepcopy(radar)}
new_dataset['radar_out'].fields = dict()
new_dataset['radar_out'].add_field(
'radar_echo_classification', hydro_field)
return new_dataset, ind_rad
def process_echo_filter(procstatus, dscfg, radar_list=None):
"""
Masks all echo types that are not of the class specified in
keyword echo_type
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
datatype : list of string. Dataset keyword
The input data types
echo_type : int or list of ints
The type of echoes to keep: 1 noise, 2 clutter, 3 precipitation.
Default 3
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
echoid_field = None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype == 'echoID':
echoid_field = get_fieldname_pyart(datatype)
break
if echoid_field is None:
warn('echoID field required to filter data')
return None, None
ind_rad = int(radarnr[5:8])-1
if radar_list[ind_rad] is None:
warn('No valid radar')
return None, None
radar = radar_list[ind_rad]
if echoid_field not in radar.fields:
warn('Unable to filter data. Missing echo ID field')
return None, None
echo_type = dscfg.get('echo_type', 3)
mask = np.ma.isin(
radar.fields[echoid_field]['data'], echo_type, invert=True)
new_dataset = {'radar_out': deepcopy(radar)}
new_dataset['radar_out'].fields = dict()
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype == 'echoID':
continue
field_name = get_fieldname_pyart(datatype)
if field_name not in radar.fields:
warn('Unable to filter '+field_name+' according to echo ID. ' +
'No valid input fields')
continue
radar_field = deepcopy(radar.fields[field_name])
radar_field['data'] = np.ma.masked_where(
mask, radar_field['data'])
if field_name.startswith('corrected_'):
new_field_name = field_name
elif field_name.startswith('uncorrected_'):
new_field_name = field_name.replace(
'uncorrected_', 'corrected_', 1)
else:
new_field_name = 'corrected_'+field_name
new_dataset['radar_out'].add_field(new_field_name, radar_field)
if not new_dataset['radar_out'].fields:
return None, None
return new_dataset, ind_rad
def process_cdf(procstatus, dscfg, radar_list=None):
"""
Collects the fields necessary to compute the Cumulative Distribution
Function
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
datatype : list of string. Dataset keyword
The input data types
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
echoid_field = None
hydro_field = None
vis_field = None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype == 'echoID':
echoid_field = get_fieldname_pyart(datatype)
elif datatype == 'hydro':
hydro_field = get_fieldname_pyart(datatype)
elif datatype == 'VIS':
vis_field = get_fieldname_pyart(datatype)
else:
field_name = get_fieldname_pyart(datatype)
ind_rad = int(radarnr[5:8])-1
if radar_list[ind_rad] is None:
warn('No valid radar')
return None, None
radar = radar_list[ind_rad]
if field_name not in radar.fields:
warn('Unable to compute CDF. Missing field')
return None, None
new_dataset = {'radar_out': deepcopy(radar)}
new_dataset['radar_out'].fields = dict()
new_dataset['radar_out'].add_field(field_name, radar.fields[field_name])
if echoid_field is not None:
if echoid_field not in radar.fields:
warn('Missing echo ID field. Clutter can not be filtered')
else:
new_dataset['radar_out'].add_field(
echoid_field, radar.fields[echoid_field])
if hydro_field is not None:
if hydro_field not in radar.fields:
warn('Missing hydrometeor type field. ' +
'Filtration according to hydrometeor type not possible')
else:
new_dataset['radar_out'].add_field(
hydro_field, radar.fields[hydro_field])
if vis_field is not None:
if vis_field not in radar.fields:
warn('Missing visibility field. Blocked gates can not be filtered')
else:
new_dataset['radar_out'].add_field(
vis_field, radar.fields[vis_field])
return new_dataset, ind_rad
def process_filter_snr(procstatus, dscfg, radar_list=None):
"""
filters out low SNR echoes
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
datatype : list of string. Dataset keyword
The input data types
SNRmin : float. Dataset keyword
The minimum SNR to keep the data.
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype in ('SNRh', 'SNRv'):
snr_field = get_fieldname_pyart(datatype)
break
ind_rad = int(radarnr[5:8])-1
if radar_list[ind_rad] is None:
warn('No valid radar')
return None, None
radar = radar_list[ind_rad]
new_dataset = {'radar_out': deepcopy(radar)}
new_dataset['radar_out'].fields = dict()
if snr_field not in radar.fields:
warn('Unable to filter dataset according to SNR. Missing SNR field')
return None, None
gatefilter = pyart.filters.snr_based_gate_filter(
radar, snr_field=snr_field, min_snr=dscfg['SNRmin'])
is_low_snr = gatefilter.gate_excluded == 1
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype in ('SNRh', 'SNRv'):
continue
field_name = get_fieldname_pyart(datatype)
if field_name not in radar.fields:
warn('Unable to filter '+field_name +
' according to SNR. '+'No valid input fields')
continue
radar_field = deepcopy(radar.fields[field_name])
radar_field['data'] = np.ma.masked_where(
is_low_snr, radar_field['data'])
if field_name.startswith('corrected_'):
new_field_name = field_name
elif field_name.startswith('uncorrected_'):
new_field_name = field_name.replace(
'uncorrected_', 'corrected_', 1)
else:
new_field_name = 'corrected_'+field_name
new_dataset['radar_out'].add_field(new_field_name, radar_field)
if not new_dataset['radar_out'].fields:
return None, None
return new_dataset, ind_rad
def process_filter_vel_diff(procstatus, dscfg, radar_list=None):
"""
filters out range gates that could not be used for Doppler velocity
estimation
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
datatype : list of string. Dataset keyword
The input data types
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype == 'diffV':
vel_diff_field = get_fieldname_pyart(datatype)
break
ind_rad = int(radarnr[5:8])-1
if radar_list[ind_rad] is None:
warn('No valid radar')
return None, None
radar = radar_list[ind_rad]
new_dataset = {'radar_out': deepcopy(radar)}
new_dataset['radar_out'].fields = dict()
if vel_diff_field not in radar.fields:
warn('Unable to filter dataset according to valid velocity. ' +
'Missing velocity differences field')
return None, None
mask = np.ma.getmaskarray(radar.fields[vel_diff_field]['data'])
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype == 'diffV':
continue
field_name = get_fieldname_pyart(datatype)
if field_name not in radar.fields:
warn('Unable to filter '+field_name +
' according to SNR. '+'No valid input fields')
continue
radar_field = deepcopy(radar.fields[field_name])
radar_field['data'] = np.ma.masked_where(mask, radar_field['data'])
if field_name.find('corrected_') != -1:
new_field_name = field_name
elif field_name.startswith('uncorrected_'):
new_field_name = field_name.replace(
'uncorrected_', 'corrected_', 1)
else:
new_field_name = 'corrected_'+field_name
new_dataset['radar_out'].add_field(new_field_name, radar_field)
if not new_dataset['radar_out'].fields:
return None, None
return new_dataset, ind_rad
def process_filter_visibility(procstatus, dscfg, radar_list=None):
"""
filters out rays gates with low visibility and corrects the reflectivity
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
datatype : list of string. Dataset keyword
The input data types
VISmin : float. Dataset keyword
The minimum visibility to keep the data.
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype == 'VIS':
vis_field = get_fieldname_pyart(datatype)
break
ind_rad = int(radarnr[5:8])-1
if radar_list[ind_rad] is None:
warn('No valid radar')
return None, None
radar = radar_list[ind_rad]
new_dataset = {'radar_out': deepcopy(radar)}
new_dataset['radar_out'].fields = dict()
if vis_field not in radar.fields:
warn('Unable to filter dataset according to visibility. ' +
'Missing visibility field')
return None, None
gatefilter = pyart.filters.visibility_based_gate_filter(
radar, vis_field=vis_field, min_vis=dscfg['VISmin'])
is_lowVIS = gatefilter.gate_excluded == 1
for datatypedescr in dscfg['datatype']:
_, _, datatype, _, _ = get_datatype_fields(
datatypedescr)
if datatype == 'VIS':
continue
field_name = get_fieldname_pyart(datatype)
if field_name not in radar.fields:
warn('Unable to filter '+field_name +
' according to visibility. No valid input fields')
continue
radar_aux = deepcopy(radar)
radar_aux.fields[field_name]['data'] = np.ma.masked_where(
is_lowVIS, radar_aux.fields[field_name]['data'])
if datatype in ('dBZ', 'dBZc', 'dBuZ', 'dBZv', 'dBZvc', 'dBuZv'):
radar_field = pyart.correct.correct_visibility(
radar_aux, vis_field=vis_field, field_name=field_name)
else:
radar_field = radar_aux.fields[field_name]
if field_name.startswith('corrected_'):
new_field_name = field_name
elif field_name.startswith('uncorrected_'):
new_field_name = field_name.replace(
'uncorrected_', 'corrected_', 1)
else:
new_field_name = 'corrected_'+field_name
new_dataset['radar_out'].add_field(new_field_name, radar_field)
if not new_dataset['radar_out'].fields:
return None, None
return new_dataset, ind_rad
def process_outlier_filter(procstatus, dscfg, radar_list=None):
"""
filters out gates which are outliers respect to the surrounding
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
datatype : list of string. Dataset keyword
The input data types
threshold : float. Dataset keyword
The distance between the value of the examined range gate and the
median of the surrounding gates to consider the gate an outlier
nb : int. Dataset keyword
The number of neighbours (to one side) to analyse. i.e. 2 would
correspond to 24 gates
nb_min : int. Dataset keyword
Minimum number of neighbouring gates to consider the examined gate
valid
percentile_min, percentile_max : float. Dataset keyword
gates below (above) these percentiles (computed over the sweep) are
considered potential outliers and further examined
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
radarnr, _, datatype, _, _ = get_datatype_fields(
dscfg['datatype'][0])
ind_rad = int(radarnr[5:8])-1
if radar_list[ind_rad] is None:
warn('No valid radar')
return None, None
radar = radar_list[ind_rad]
field_name = get_fieldname_pyart(datatype)
if field_name not in radar.fields:
warn('Unable to perform outlier removal. No valid data')
return None, None
threshold = dscfg.get('threshold', 10.)
nb = dscfg.get('nb', 2)
nb_min = dscfg.get('nb_min', 3)
percentile_min = dscfg.get('percentile_min', 5.)
percentile_max = dscfg.get('percentile_max', 95.)
field = radar.fields[field_name]
field_out = deepcopy(field)
for sweep in range(radar.nsweeps):
# find gates suspected to be outliers
sweep_start = radar.sweep_start_ray_index['data'][sweep]
sweep_end = radar.sweep_end_ray_index['data'][sweep]
nrays_sweep = radar.rays_per_sweep['data'][sweep]
data_sweep = field['data'][sweep_start:sweep_end+1, :]
# check if all elements in array are masked
if np.all(np.ma.getmaskarray(data_sweep)):
continue
percent_vals = np.nanpercentile(
data_sweep.filled(fill_value=np.nan),
(percentile_min, percentile_max))
ind_rays, ind_rngs = np.ma.where(
np.ma.logical_or(
data_sweep < percent_vals[0], data_sweep > percent_vals[1]))
for i, ind_ray in enumerate(ind_rays):
ind_rng = ind_rngs[i]
# find neighbours of suspected outlier gate
data_cube = []
for ray_nb in range(-nb, nb+1):
for rng_nb in range(-nb, nb+1):
if ray_nb == 0 and rng_nb == 0:
continue
if ((ind_ray+ray_nb >= 0) and
(ind_ray+ray_nb < nrays_sweep) and
(ind_rng+rng_nb >= 0) and
(ind_rng+rng_nb < radar.ngates)):
if (data_sweep[ind_ray+ray_nb, ind_rng+rng_nb] is not
np.ma.masked):
data_cube.append(
data_sweep[ind_ray+ray_nb, ind_rng+rng_nb])
# remove data far from median of neighbours or with not enough
# valid neighbours
if len(data_cube) < nb_min:
field_out['data'][
sweep_start+ind_ray, ind_rng] = np.ma.masked
elif (abs(np.ma.median(data_cube) -
data_sweep[ind_ray, ind_rng]) > threshold):
field_out['data'][sweep_start+ind_ray, ind_rng] = np.ma.masked
if field_name.startswith('corrected_'):
new_field_name = field_name
elif field_name.startswith('uncorrected_'):
new_field_name = field_name.replace(
'uncorrected_', 'corrected_', 1)
else:
new_field_name = 'corrected_'+field_name
new_dataset = {'radar_out': deepcopy(radar)}
new_dataset['radar_out'].fields = dict()
new_dataset['radar_out'].add_field(new_field_name, field_out)
return new_dataset, ind_rad
def process_hydroclass(procstatus, dscfg, radar_list=None):
"""
Classifies precipitation echoes
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
datatype : list of string. Dataset keyword
The input data types
HYDRO_METHOD : string. Dataset keyword
The hydrometeor classification method. One of the following:
SEMISUPERVISED
RADARCENTROIDS : string. Dataset keyword
Used with HYDRO_METHOD SEMISUPERVISED. The name of the radar of
which the derived centroids will be used. One of the following: A
Albis, <NAME>, <NAME>, DX50
compute_entropy : bool. Dataset keyword
If true the entropy is computed and the field hydroclass_entropy
is output
output_distances : bool. Dataset keyword
If true the de-mixing algorithm based on the distances to the
centroids is computed and the field proportions of each
hydrometeor in the radar range gate is output
vectorize : bool. Dataset keyword
If true a vectorized version of the algorithm is used
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
if 'HYDRO_METHOD' not in dscfg:
raise Exception(
"ERROR: Undefined parameter 'HYDRO_METHOD' for dataset '%s'"
% dscfg['dsname'])
if dscfg['HYDRO_METHOD'] == 'SEMISUPERVISED':
temp_field = None
iso0_field = None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype == 'dBZ':
refl_field = 'reflectivity'
if datatype == 'dBZc':
refl_field = 'corrected_reflectivity'
if datatype == 'ZDR':
zdr_field = 'differential_reflectivity'
if datatype == 'ZDRc':
zdr_field = 'corrected_differential_reflectivity'
if datatype == 'RhoHV':
rhv_field = 'cross_correlation_ratio'
if datatype == 'uRhoHV':
rhv_field = 'uncorrected_cross_correlation_ratio'
if datatype == 'RhoHVc':
rhv_field = 'corrected_cross_correlation_ratio'
if datatype == 'KDP':
kdp_field = 'specific_differential_phase'
if datatype == 'KDPc':
kdp_field = 'corrected_specific_differential_phase'
if datatype == 'TEMP':
temp_field = 'temperature'
if datatype == 'H_ISO0':
iso0_field = 'height_over_iso0'
ind_rad = int(radarnr[5:8])-1
if radar_list[ind_rad] is None:
warn('No valid radar')
return None, None
radar = radar_list[ind_rad]
if temp_field is None and iso0_field is None:
warn('iso0 or temperature fields needed to create hydrometeor ' +
'classification field')
return None, None
if temp_field is not None and (temp_field not in radar.fields):
warn('Unable to create hydrometeor classification field. ' +
'Missing temperature field')
return None, None
if iso0_field is not None and (iso0_field not in radar.fields):
warn('Unable to create hydrometeor classification field. ' +
'Missing height over iso0 field')
return None, None
temp_ref = 'temperature'
if iso0_field is not None:
temp_ref = 'height_over_iso0'
if ((refl_field not in radar.fields) or
(zdr_field not in radar.fields) or
(rhv_field not in radar.fields) or
(kdp_field not in radar.fields)):
warn('Unable to create hydrometeor classification field. ' +
'Missing data')
return None, None
mass_centers = np.zeros((9, 5))
if dscfg['RADARCENTROIDS'] == 'A':
# Zh ZDR kdp RhoHV delta_Z
mass_centers[0, :] = [
13.5829, 0.4063, 0.0497, 0.9868, 1330.3] # AG
mass_centers[1, :] = [
02.8453, 0.2457, 0.0000, 0.9798, 0653.8] # CR
mass_centers[2, :] = [
07.6597, 0.2180, 0.0019, 0.9799, -1426.5] # LR
mass_centers[3, :] = [
31.6815, 0.3926, 0.0828, 0.9978, 0535.3] # RP
mass_centers[4, :] = [
39.4703, 1.0734, 0.4919, 0.9876, -1036.3] # RN
mass_centers[5, :] = [
04.8267, -0.5690, 0.0000, 0.9691, 0869.8] # VI
mass_centers[6, :] = [
30.8613, 0.9819, 0.1998, 0.9845, -0066.1] # WS
mass_centers[7, :] = [
52.3969, 2.1094, 2.4675, 0.9730, -1550.2] # MH
mass_centers[8, :] = [
50.6186, -0.0649, 0.0946, 0.9904, 1179.9] # IH/HDG
elif dscfg['RADARCENTROIDS'] == 'L':
# Zh ZDR kdp RhoHV delta_Z
mass_centers[0, :] = [
13.8231, 0.2514, 0.0644, 0.9861, 1380.6] # AG
mass_centers[1, :] = [
03.0239, 0.1971, 0.0000, 0.9661, 1464.1] # CR
mass_centers[2, :] = [
04.9447, 0.1142, 0.0000, 0.9787, -0974.7] # LR
mass_centers[3, :] = [
34.2450, 0.5540, 0.1459, 0.9937, 0945.3] # RP
mass_centers[4, :] = [
40.9432, 1.0110, 0.5141, 0.9928, -0993.5] # RN
mass_centers[5, :] = [
03.5202, -0.3498, 0.0000, 0.9746, 0843.2] # VI
mass_centers[6, :] = [
32.5287, 0.9751, 0.2640, 0.9804, -0055.5] # WS
mass_centers[7, :] = [
52.6547, 2.7054, 2.5101, 0.9765, -1114.6] # MH
mass_centers[8, :] = [
46.4998, 0.1978, 0.6431, 0.9845, 1010.1] # IH/HDG
elif dscfg['RADARCENTROIDS'] == 'D':
# Zh ZDR kdp RhoHV delta_Z
mass_centers[0, :] = [
12.567, 0.18934, 0.041193, 0.97693, 1328.1] # AG
mass_centers[1, :] = [
3.2115, 0.13379, 0.0000, 0.96918, 1406.3] # CR
mass_centers[2, :] = [
10.669, 0.18119, 0.0000, 0.97337, -1171.9] # LR
mass_centers[3, :] = [
34.941, 0.13301, 0.090056, 0.9979, 898.44] # RP
mass_centers[4, :] = [
39.653, 1.1432, 0.35013, 0.98501, -859.38] # RN
mass_centers[5, :] = [
2.8874, -0.46363, 0.0000, 0.95653, 1015.6] # VI
mass_centers[6, :] = [
34.122, 0.87987, 0.2281, 0.98003, -234.37] # WS
mass_centers[7, :] = [
53.134, 2.0888, 2.0055, 0.96927, -1054.7] # MH
mass_centers[8, :] = [
46.715, 0.030477, 0.16994, 0.9969, 976.56] # IH/HDG
elif dscfg['RADARCENTROIDS'] == 'P':
# Zh ZDR kdp RhoHV delta_Z
mass_centers[0, :] = [
13.9882, 0.2470, 0.0690, 0.9939, 1418.1] # AG
mass_centers[1, :] = [
00.9834, 0.4830, 0.0043, 0.9834, 0950.6] # CR
mass_centers[2, :] = [
05.3962, 0.2689, 0.0000, 0.9831, -0479.5] # LR
mass_centers[3, :] = [
35.3411, 0.1502, 0.0940, 0.9974, 0920.9] # RP
mass_centers[4, :] = [
35.0114, 0.9681, 0.1106, 0.9785, -0374.0] # RN
mass_centers[5, :] = [
02.5897, -0.3879, 0.0282, 0.9876, 0985.5] # VI
mass_centers[6, :] = [
32.2914, 0.7789, 0.1443, 0.9075, -0153.5] # WS
mass_centers[7, :] = [
53.2413, 1.8723, 0.3857, 0.9454, -0470.8] # MH
mass_centers[8, :] = [
44.7896, 0.0015, 0.1349, 0.9968, 1116.7] # IH/HDG
elif dscfg['RADARCENTROIDS'] == 'W':
# Zh ZDR kdp RhoHV delta_Z
mass_centers[0, :] = [
16.7650, 0.3754, 0.0442, 0.9866, 1409.0] # AG
mass_centers[1, :] = [
01.4418, 0.3786, 0.0000, 0.9490, 1415.8] # CR
mass_centers[2, :] = [
16.0987, 0.3238, 0.0000, 0.9871, -0818.7] # LR
mass_centers[3, :] = [
36.5465, 0.2041, 0.0731, 0.9952, 0745.4] # RP
mass_centers[4, :] = [
43.4011, 0.6658, 0.3241, 0.9894, -0778.5] # RN
mass_centers[5, :] = [
00.9077, -0.4793, 0.0000, 0.9502, 1488.6] # VI
mass_centers[6, :] = [
36.8091, 0.7266, 0.1284, 0.9924, -0071.1] # WS
mass_centers[7, :] = [
53.8402, 0.8922, 0.5306, 0.9890, -1017.6] # MH
mass_centers[8, :] = [
45.9686, 0.0845, 0.0963, 0.9940, 0867.4] # IH/HDG
elif dscfg['RADARCENTROIDS'] == 'DX50':
# Zh ZDR kdp RhoHV delta_Z
mass_centers[0, :] = [
19.0770, 0.4139, 0.0099, 0.9841, 1061.7] # AG
mass_centers[1, :] = [
03.9877, 0.5040, 0.0000, 0.9642, 0856.6] # CR
mass_centers[2, :] = [
20.7982, 0.3177, 0.0004, 0.9858, -1375.1] # LR
mass_centers[3, :] = [
34.7124, -0.3748, 0.0988, 0.9828, 1224.2] # RP
mass_centers[4, :] = [
33.0134, 0.6614, 0.0819, 0.9802, -1169.8] # RN
mass_centers[5, :] = [
08.2610, -0.4681, 0.0000, 0.9722, 1100.7] # VI
mass_centers[6, :] = [
35.1801, 1.2830, 0.1322, 0.9162, -0159.8] # WS
mass_centers[7, :] = [
52.4539, 2.3714, 1.1120, 0.9382, -1618.5] # MH
mass_centers[8, :] = [
44.2216, -0.3419, 0.0687, 0.9683, 1272.7] # IH/HDG
else:
warn(
' Unknown radar. ' +
'Default centroids will be used in classification.')
mass_centers = None
compute_entropy = dscfg.get('compute_entropy', False)
output_distances = dscfg.get('output_distances', False)
vectorize = dscfg.get('vectorize', False)
fields_dict = pyart.retrieve.hydroclass_semisupervised(
radar, mass_centers=mass_centers,
weights=np.array([1., 1., 1., 0.75, 0.5]), refl_field=refl_field,
zdr_field=zdr_field, rhv_field=rhv_field, kdp_field=kdp_field,
temp_field=temp_field, iso0_field=iso0_field, hydro_field=None,
entropy_field=None, temp_ref=temp_ref,
compute_entropy=compute_entropy,
output_distances=output_distances, vectorize=vectorize)
else:
raise Exception(
"ERROR: Unknown hydrometeor classification method " +
dscfg['HYDRO_METHOD'])
# prepare for exit
new_dataset = {'radar_out': deepcopy(radar)}
new_dataset['radar_out'].fields = dict()
new_dataset['radar_out'].add_field(
'radar_echo_classification', fields_dict['hydro'])
if compute_entropy:
new_dataset['radar_out'].add_field(
'hydroclass_entropy', fields_dict['entropy'])
if output_distances:
new_dataset['radar_out'].add_field(
'proportion_AG', fields_dict['prop_AG'])
new_dataset['radar_out'].add_field(
'proportion_CR', fields_dict['prop_CR'])
new_dataset['radar_out'].add_field(
'proportion_LR', fields_dict['prop_LR'])
new_dataset['radar_out'].add_field(
'proportion_RP', fields_dict['prop_RP'])
new_dataset['radar_out'].add_field(
'proportion_RN', fields_dict['prop_RN'])
new_dataset['radar_out'].add_field(
'proportion_VI', fields_dict['prop_VI'])
new_dataset['radar_out'].add_field(
'proportion_WS', fields_dict['prop_WS'])
new_dataset['radar_out'].add_field(
'proportion_MH', fields_dict['prop_MH'])
new_dataset['radar_out'].add_field(
'proportion_IH', fields_dict['prop_IH'])
return new_dataset, ind_rad
def process_melting_layer(procstatus, dscfg, radar_list=None):
"""
Detects the melting layer
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
datatype : list of string. Dataset keyword
The input data types
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
if 'ML_METHOD' not in dscfg:
raise Exception(
"ERROR: Undefined parameter 'ML_METHOD' for dataset '%s'"
% dscfg['dsname'])
if dscfg['ML_METHOD'] == 'GIANGRANDE':
temp_ref = None
temp_field = None
iso0_field = None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype == 'dBZ':
refl_field = 'reflectivity'
if datatype == 'dBZc':
refl_field = 'corrected_reflectivity'
if datatype == 'ZDR':
zdr_field = 'differential_reflectivity'
if datatype == 'ZDRc':
zdr_field = 'corrected_differential_reflectivity'
if datatype == 'RhoHV':
rhv_field = 'cross_correlation_ratio'
if datatype == 'RhoHVc':
rhv_field = 'corrected_cross_correlation_ratio'
if datatype == 'TEMP':
temp_field = 'temperature'
if datatype == 'H_ISO0':
iso0_field = 'height_over_iso0'
ind_rad = int(radarnr[5:8])-1
if radar_list[ind_rad] is None:
warn('No valid radar')
return None, None
radar = radar_list[ind_rad]
# Check which should be the reference field for temperature
if iso0_field is not None:
if iso0_field not in radar.fields:
warn('Unable to detect melting layer. ' +
'Missing height over iso0 field')
return None, None
temp_ref = 'height_over_iso0'
if temp_field is not None:
if temp_field not in radar.fields:
warn('Unable to detect melting layer. ' +
'Missing temperature field')
return None, None
temp_ref = 'temperature'
iso0_field = 'height_over_iso0'
if temp_ref is None:
iso0_field = 'height_over_iso0'
if ((refl_field not in radar.fields) or
(zdr_field not in radar.fields) or
(rhv_field not in radar.fields)):
warn('Unable to detect melting layer. Missing data')
return None, None
# User defined variables
nVol = dscfg.get('nVol', 3)
maxh = dscfg.get('maxh', 6000.)
hres = dscfg.get('hres', 50.)
rmin = dscfg.get('rmin', 1000.)
elmin = dscfg.get('elmin', 4.)
elmax = dscfg.get('elmax', 10.)
rhomin = dscfg.get('rhomin', 0.75)
rhomax = dscfg.get('rhomax', 0.94)
zhmin = dscfg.get('zhmin', 20.)
hwindow = dscfg.get('hwindow', 500.)
mlzhmin = dscfg.get('mlzhmin', 30.)
mlzhmax = dscfg.get('mlzhmax', 50.)
mlzdrmin = dscfg.get('mlzdrmin', 1.)
mlzdrmax = dscfg.get('mlzdrmax', 5.)
htol = dscfg.get('htol', 500.)
ml_bottom_diff_max = dscfg.get('ml_bottom_diff_max', 1000.)
time_accu_max = dscfg.get('time_accu_max', 1800.)
nml_points_min = dscfg.get('nml_points_min', None)
wlength = dscfg.get('wlength', 20.)
percentile_bottom = dscfg.get('percentile_bottom', 0.3)
percentile_top = dscfg.get('percentile_top', 0.9)
interpol = dscfg.get('interpol', True)
time_nodata_allowed = dscfg.get('time_nodata_allowed', 3600.)
get_iso0 = dscfg.get('get_iso0', True)
if not dscfg['initialized']:
# initialize dataset
ml_obj, ml_dict, iso0_dict, ml_global = (
pyart.retrieve.melting_layer_giangrande(
radar, nVol=nVol, maxh=maxh, hres=hres, rmin=rmin,
elmin=elmin, elmax=elmax, rhomin=rhomin, rhomax=rhomax,
zhmin=zhmin, hwindow=hwindow, mlzhmin=mlzhmin,
mlzhmax=mlzhmax, mlzdrmin=mlzdrmin, mlzdrmax=mlzdrmax,
htol=htol, ml_bottom_diff_max=ml_bottom_diff_max,
time_accu_max=time_accu_max, nml_points_min=nml_points_min,
wlength=wlength, percentile_bottom=percentile_bottom,
percentile_top=percentile_top, interpol=interpol,
time_nodata_allowed=time_nodata_allowed,
refl_field=refl_field, zdr_field=zdr_field,
rhv_field=rhv_field, temp_field=temp_field,
iso0_field=iso0_field, ml_field='melting_layer',
ml_pos_field='melting_layer_height',
temp_ref=temp_ref, get_iso0=get_iso0, ml_global=None))
dscfg['initialized'] = True
else:
# use previous detection
ml_obj, ml_dict, iso0_dict, ml_global = (
pyart.retrieve.melting_layer_giangrande(
radar, nVol=nVol, maxh=maxh, hres=hres, rmin=rmin,
elmin=elmin, elmax=elmax, rhomin=rhomin, rhomax=rhomax,
zhmin=zhmin, hwindow=hwindow, mlzhmin=mlzhmin,
mlzhmax=mlzhmax, mlzdrmin=mlzdrmin, mlzdrmax=mlzdrmax,
htol=htol, ml_bottom_diff_max=ml_bottom_diff_max,
time_accu_max=time_accu_max, nml_points_min=nml_points_min,
wlength=wlength, percentile_bottom=percentile_bottom,
percentile_top=percentile_top, interpol=interpol,
time_nodata_allowed=time_nodata_allowed,
refl_field=refl_field, zdr_field=zdr_field,
rhv_field=rhv_field, temp_field=temp_field,
iso0_field=iso0_field, ml_field='melting_layer',
ml_pos_field='melting_layer_height',
temp_ref=temp_ref, get_iso0=get_iso0,
ml_global=dscfg['global_data']))
# update global stack
dscfg['global_data'] = ml_global
elif dscfg['ML_METHOD'] == 'WOLFENSBERGER':
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype == 'dBZ':
refl_field = 'reflectivity'
if datatype == 'dBZc':
refl_field = 'corrected_reflectivity'
if datatype == 'RhoHV':
rhohv_field = 'cross_correlation_ratio'
if datatype == 'RhoHVc':
rhohv_field = 'corrected_cross_correlation_ratio'
ind_rad = int(radarnr[5:8])-1
if radar_list[ind_rad] is None:
warn('No valid radar')
return None, None
radar = radar_list[ind_rad]
if ((refl_field not in radar.fields) or
(rhohv_field not in radar.fields)):
warn('Unable to detect melting layer. Missing data')
return None, None
# User defined parameters
max_range = dscfg.get('max_range', 20000.)
detect_threshold = dscfg.get('detect_threshold', 0.02)
interp_holes = dscfg.get('interp_holes', False)
max_length_holes = dscfg.get('max_length_holes', 250)
check_min_length = dscfg.get('check_min_length', True)
get_iso0 = dscfg.get('get_iso0', True)
ml_obj, ml_dict, iso0_dict, _ = pyart.retrieve.detect_ml(
radar, refl_field=refl_field, rhohv_field=rhohv_field,
ml_field='melting_layer', ml_pos_field='melting_layer_height',
iso0_field='height_over_iso0', max_range=max_range,
detect_threshold=detect_threshold, interp_holes=interp_holes,
max_length_holes=max_length_holes,
check_min_length=check_min_length, get_iso0=get_iso0)
elif dscfg['ML_METHOD'] == 'FROM_HYDROCLASS':
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype == 'hydro':
hydro_field = get_fieldname_pyart(datatype)
ind_rad = int(radarnr[5:8])-1
if radar_list[ind_rad] is None:
warn('No valid radar')
return None, None
radar = radar_list[ind_rad]
if hydro_field not in radar.fields:
warn('Unable to detect melting layer. Missing data')
return None, None
# User defined parameters
force_continuity = dscfg.get('force_continuity', True)
dist_max = dscfg.get('dist_max', 350.)
get_iso0 = dscfg.get('get_iso0', False)
ml_obj, ml_dict, iso0_dict = pyart.retrieve.melting_layer_hydroclass(
radar, hydro_field=hydro_field, ml_field='melting_layer',
ml_pos_field='melting_layer_height',
iso0_field='height_over_iso0', force_continuity=force_continuity,
dist_max=dist_max, get_iso0=get_iso0)
else:
raise Exception(
"ERROR: Unknown melting layer retrieval method " +
dscfg['ML_METHOD'])
# prepare for exit
if ml_dict is None:
return None, None
new_dataset = {'radar_out': deepcopy(radar)}
new_dataset['radar_out'].fields = dict()
new_dataset['radar_out'].add_field('melting_layer', ml_dict)
if iso0_dict is not None:
new_dataset['radar_out'].add_field('height_over_iso0', iso0_dict)
new_dataset.update({'ml_obj': ml_obj})
return new_dataset, ind_rad
def process_zdr_column(procstatus, dscfg, radar_list=None):
"""
Detects ZDR columns
Parameters
----------
procstatus : int
Processing status: 0 initializing, 1 processing volume,
2 post-processing
dscfg : dictionary of dictionaries
data set configuration. Accepted Configuration Keywords::
datatype : list of string. Dataset keyword
The input data types
radar_list : list of Radar objects
Optional. list of radar objects
Returns
-------
new_dataset : dict
dictionary containing the output
ind_rad : int
radar index
"""
if procstatus != 1:
return None, None
temp_field = None
iso0_field = None
for datatypedescr in dscfg['datatype']:
radarnr, _, datatype, _, _ = get_datatype_fields(datatypedescr)
if datatype == 'ZDR':
zdr_field = 'differential_reflectivity'
if datatype == 'ZDRc':
zdr_field = 'corrected_differential_reflectivity'
if datatype == 'RhoHV':
rhv_field = 'cross_correlation_ratio'
if datatype == 'RhoHVc':
rhv_field = 'corrected_cross_correlation_ratio'
if datatype == 'TEMP':
temp_field = 'temperature'
if datatype == 'H_ISO0':
iso0_field = 'height_over_iso0'
ind_rad = int(radarnr[5:8])-1
if radar_list[ind_rad] is None:
warn('No valid radar')
return None, None
radar = radar_list[ind_rad]
# Check which should be the reference field for temperature
if iso0_field is not None and (iso0_field not in radar.fields):
warn('Unable to detect melting layer. ' +
'Missing height over iso0 field')
return None, None
temp_ref = 'height_over_iso0'
if temp_field is not None and (temp_field not in radar.fields):
warn('Unable to detect melting layer. Missing temperature field')
return None, None
temp_ref = 'temperature'
iso0_field = 'height_over_iso0'
if ((zdr_field not in radar.fields) or
(rhv_field not in radar.fields)):
warn('Unable to detect melting layer. Missing data')
return None, None
rhohv_min = dscfg.get('rhohv_min', 0.8)
zdr_min = dscfg.get('zdr_min', 1.)
smooth_window = dscfg.get('smooth_window', 0.)
latlon_tol = dscfg.get('latlon_tol', 0.025) # approx 3x2 km
if smooth_window == 0:
smooth_window_len = 0
else:
smooth_window_len = int(
smooth_window/(radar.range['data'][1]-radar.range['data'][0]))
zdr_dict = deepcopy(radar.fields[zdr_field])
if smooth_window_len > 0:
zdr_dict['data'] = pyart.correct.smooth_masked(
zdr_dict['data'], wind_len=smooth_window_len, min_valid=1,
wind_type='mean')
zdr_dict['data'][
radar.fields[rhv_field]['data'] < rhohv_min] = np.ma.masked
zdr_dict['data'][zdr_dict['data'] < zdr_min] = np.ma.masked
zdr_dict['data'][radar.fields[temp_field]['data'] > 0.] = np.ma.masked
zdr_valid = np.logical_not(np.ma.getmaskarray(zdr_dict['data']))
hlowerleft, hupperright = pyart.retrieve._get_res_vol_sides(radar)
ind_ang_sorted = np.argsort(radar.fixed_angle['data'])
# get number of suspected ZDR columns
lat_cols = np.array([], dtype=int)
lon_cols = np.array([], dtype=int)
zdr_cols = np.array([], dtype=int)
g_lat = radar.gate_latitude['data']
g_lon = radar.gate_longitude['data']
for ind_ray in range(radar.nrays):
# Get bins with negative temperatures
ind_rngs = np.where(
radar.fields[temp_field]['data'][ind_ray, :] < 0.)[0]
if ind_rngs.size == 0:
continue
# Segment negative temperatures and get start of each segment
cons_list = np.split(ind_rngs, np.where(np.diff(ind_rngs) != 1)[0]+1)
for ind_rngs_cell in cons_list:
if not zdr_valid[ind_ray, ind_rngs_cell[0]]:
continue
ind_ray_col = ind_ray
ind_rng_col = ind_rngs_cell[0]
# extract data around point:
ind_rays, ind_rngs = np.where(np.logical_and.reduce((
np.logical_and(
g_lat >= g_lat[ind_ray_col, ind_rng_col]-latlon_tol,
g_lat <= g_lat[ind_ray_col, ind_rng_col]+latlon_tol),
np.logical_and(
g_lon >= g_lon[ind_ray_col, ind_rng_col]-latlon_tol,
g_lon <= g_lon[ind_ray_col, ind_rng_col]+latlon_tol),
zdr_valid)))
# get ZDR column height for each radar sweep
h_low = np.ma.masked_all(radar.nsweeps)
h_high = np.ma.masked_all(radar.nsweeps)
for sweep in range(radar.nsweeps):
ind = np.where(np.logical_and(
ind_rays >= radar.sweep_start_ray_index['data'][sweep],
ind_rays <= radar.sweep_end_ray_index['data'][sweep]))[0]
if ind.size == 0:
continue
h_low[sweep] = np.min(
hlowerleft[ind_rays[ind], ind_rngs[ind]])
h_high[sweep] = np.max(
hupperright[ind_rays[ind], ind_rngs[ind]])
# order data by elevation angle
h_low = h_low[ind_ang_sorted]
h_high = h_high[ind_ang_sorted]
# get the first segment of continuous ZDR valid values
ind_valid = np.where(np.ma.getmaskarray(h_low) == 0)[0]
ind_valid = np.split(
ind_valid, np.where(np.diff(ind_valid) != 1)[0]+1)[0]
# compute ZDR column
zdr_col = h_high[ind_valid[-1]]-h_low[ind_valid[0]]
# put data in output array
lat_cols = np.append(
lat_cols,
radar.gate_latitude['data'][ind_ray_col, ind_rng_col])
lon_cols = np.append(
lon_cols,
radar.gate_longitude['data'][ind_ray_col, ind_rng_col])
zdr_cols = np.append(zdr_cols, zdr_col)
zdr_col_dict = pyart.config.get_metadata(
'differential_reflectivity_column_height')
zdr_col_dict['data'] = zdr_cols/1000.
new_dataset = {
'field_limits': [
| np.min(radar.gate_longitude['data']) | numpy.min |
#!/usr/bin/python
# -*- coding: utf-8 -*-
""" Asssess the performance of the storage management for SEAREV smoothing
based on P_grid statistics
<NAME> — July 2013
"""
from __future__ import division, print_function, unicode_literals
import numpy as np
import matplotlib.pyplot as plt
from searev_data import load, searev_power, dt, power_max
import stodynprog
from storage_simulation import storage_sim, P_sto_law_lin
def cost(P_grid):
'''penalty on the power injected to the grid
penal = (P_grid/power_max)**2
'''
penal = (P_grid/power_max)**2
return penal
### Compare with optimized policy:
# Load optimized trajectory:
import pickle
with open('P_sto_law.dat') as f:
P_sto_law_opt = pickle.load(f)
P_sto_law = P_sto_law_opt
## Enable saturation:
#sat = lambda A,l : (A if A>-l else -l) if A<l else l
#a_sat = 0.5
#print('accel saturation at {:.3f}'.format(a_sat))
#P_sto_law = lambda E,S,A : P_sto_law_opt(E,S,sat(A, a_sat))
datafiles = ['Em_1.txt', 'Em_2.txt', 'Em_3.txt']
std_list = []
mse_list = []
for fname in datafiles:
t, elev, angle, speed, torque, accel = load(fname)
P_prod = speed*torque/1e6 # [MW]
# Run two simulations:
P_sto_lin, P_grid_lin, E_sto_lin = storage_sim(speed, accel, P_prod, P_sto_law_lin)
P_sto_opt, P_grid_opt, E_sto_opt = storage_sim(speed, accel, P_prod, P_sto_law, check_bounds=False)
# Save simulation data:
print('saving simulation data to "data_output"...')
csv_opt = dict(fmt = str('%.12f'), delimiter=',',
header='P_sto,P_grid_lin,E_sto', comments='')
data = np.vstack((P_sto_lin, P_grid_lin, E_sto_lin[:-1])).T
np.savetxt('data_output/lin_smooth_'+fname, data, **csv_opt)
data = np.vstack((P_sto_opt, P_grid_opt, E_sto_opt[:-1])).T
np.savetxt('data_output/opt_smooth_'+fname, data, **csv_opt)
std_nosto = P_prod.std()
mse_nosto = cost(P_prod).mean()
std_lin = P_grid_lin.std()
mse_lin = cost(P_grid_lin).mean()
print('linear control: {:.3f} std, {:.4f} mse'.format(std_lin, mse_lin))
std_opt = P_grid_opt.std()
mse_opt = cost(P_grid_opt).mean()
print('optimized control: {:.3f} std, {:.4f} mse'.format(std_opt, mse_opt))
# Improvement:
std_change = (std_opt - std_lin)/std_lin
mse_change = (mse_opt - mse_lin)/mse_lin
print('criterion reduction: {:.0%} std, {:.0%} mse'.format(std_change, mse_change))
std_list.append((std_nosto, std_lin, std_opt))
mse_list.append((mse_nosto, mse_lin, mse_opt))
print()
# Convert to arrays:
s = | np.array(std_list) | numpy.array |
import numpy as np
from integrator import *
from qoi import *
import parallel as par
def simSetUp(inpt):
Sim = {}
Ndof = int(inpt['Ndof'])
Timestep = float(inpt['Timestep'])
Tf = float(inpt['Tf'])
NSim = int(inpt['NSim'])
Sim['Simulation name'] = inpt['Simulation name']
Sim['Ndof'] = Ndof
Sim['Timestep'] = Timestep
Sim['Tf'] = Tf
Sim['NSim'] = NSim
Sim['Record solution'] = (inpt['Record solution']=="True")
# MPI parallelization
nSim_, startSim_ = par.partitionSim(NSim)
Sim['nSim_'] = nSim_
Sim['startSim_'] = startSim_
if par.nProc > 1:
Sim['reconstruct Sol'] = (inpt['reconstruct Sol']=="True")
Sim['reconstruct QOI'] = (inpt['reconstruct QOI']=="True")
# Post proc
Sim['Plot'] = (inpt['Plot']=="True")
Sim['Build CDF'] = (inpt['Build CDF']=="True")
if Sim['Build CDF']:
Sim['Plot CDF'] = (inpt['Plot CDF']=="True")
Sim['Build rare paths'] = (inpt['Build rare paths']=="True")
if Sim['Build rare paths']:
Sim['Levels'] = [float(lev) for lev in inpt['Levels'].split()]
Sim['Plot rare paths'] = (inpt['Plot rare paths']=="True")
if inpt['Simulation name'] == 'KS':
# scalars for ETDRK4
h = Timestep
Sim['Lx/pi'] = float(inpt['Lx/pi'])
k = np.transpose(np.conj(np.concatenate((np.arange(0, Ndof/2.0), np.array([0]), np.arange(-Ndof/2.0+1.0, 0))))) / (float(inpt['Lx/pi'])/2.0)
ksorted = list(abs(k))
ksorted.sort()
kalias = ksorted[int(len(ksorted)*2/3)]
indexAlias = np.argwhere(abs(k)>kalias)
L = k**2 - k**4
E = np.exp(h*L)
E_2 = np.exp(h*L/2)
M = 16
r = np.exp(1j*np.pi*(np.arange(1, M+1)-0.5) / M)
LR = h*np.transpose(np.repeat([L], M, axis=0)) + np.repeat([r], Ndof, axis=0)
Q = h*np.real(np.mean((np.exp(LR/2)-1)/LR, axis=1))
f1 = h*np.real(np.mean((-4-LR+np.exp(LR)*(4-3*LR+LR**2))/LR**3, axis=1))
f2 = h*np.real(np.mean((2+LR+np.exp(LR)*(-2+LR))/LR**3, axis=1))
f3 = h*np.real(np.mean((-4-3*LR-LR**2+np.exp(LR)*(4-LR))/LR**3, axis=1))
tmax = Tf
nmax = round(tmax/h)
g = -0.5j*k
# Necessary data for simulations
Sim['x'] = float(inpt['Lx/pi'])*np.pi*np.linspace(1,Ndof,Ndof)/Ndof
Sim['E'] = np.reshape(E,(Ndof,1))
Sim['E_2'] = np.reshape(E_2,(Ndof,1))
Sim['Q'] = np.reshape(Q,(Ndof,1))
Sim['f1'] = np.reshape(f1,(Ndof,1))
Sim['f2'] = np.reshape(f2,(Ndof,1))
Sim['f3'] = np.reshape(f3,(Ndof,1))
Sim['nmax'] = nmax
Sim['nplt'] = 1
Sim['g'] = np.reshape(g,(Ndof,1))
Sim['k'] = np.reshape(k,(Ndof,1))
Sim['indexAlias'] = indexAlias
Sim['epsilon_init'] = float(inpt['epsilon_init'])
# forward step and qoi
Sim['stepFunc'] = ksStepETDRK4
Sim['qoiFunc'] = ksqoi
# Initial conditions
ICType = inpt['ICType']
if ICType=='file':
fileNameIC = inpt['fileNameIC']
Sim['u0'] = np.load(fileNameIC)
elif ICType=='default':
x = Sim['x']
Sim['u0'] = np.cos(x/16)*(1+np.sin(x/16))
else :
print('IC type not recognized')
if inpt['Simulation name'] == 'KSFrontBack':
# scalars for ETDRK4
h = Timestep
Sim['Lx/pi'] = float(inpt['Lx/pi'])
k = np.transpose(np.conj(np.concatenate((np.arange(0, Ndof/2.0), np.array([0]), np.arange(-Ndof/2.0+1.0, 0))))) / (float(inpt['Lx/pi'])/2.0)
ksorted = list(abs(k))
ksorted.sort()
kalias = ksorted[int(len(ksorted)*2/3)]
indexAlias = np.argwhere(abs(k)>kalias)
L = k**2 - k**4
E = np.exp(h*L)
E_2 = np.exp(h*L/2)
M = 16
r = np.exp(1j*np.pi*(np.arange(1, M+1)-0.5) / M)
LR = h*np.transpose(np.repeat([L], M, axis=0)) + np.repeat([r], Ndof, axis=0)
Q = h*np.real(np.mean((np.exp(LR/2)-1)/LR, axis=1))
f1 = h*np.real(np.mean((-4-LR+np.exp(LR)*(4-3*LR+LR**2))/LR**3, axis=1))
f2 = h*np.real(np.mean((2+LR+np.exp(LR)*(-2+LR))/LR**3, axis=1))
f3 = h*np.real(np.mean((-4-3*LR-LR**2+np.exp(LR)*(4-LR))/LR**3, axis=1))
tmax = Tf
nmax = round(tmax/h)
g = -0.5j*k
Sim['x'] = float(inpt['Lx/pi'])*np.pi*np.linspace(1,Ndof,Ndof)/Ndof
Sim['E'] = np.reshape(E,(Ndof,1))
Sim['E_2'] = np.reshape(E_2,(Ndof,1))
Sim['Q'] = np.reshape(Q,(Ndof,1))
Sim['f1'] = np.reshape(f1,(Ndof,1))
Sim['f2'] = np.reshape(f2,(Ndof,1))
Sim['f3'] = np.reshape(f3,(Ndof,1))
Sim['nmax'] = nmax
Sim['nplt'] = 1
Sim['g'] = np.reshape(g,(Ndof,1))
Sim['k'] = np.reshape(k,(Ndof,1))
Sim['indexAlias'] = indexAlias
# Necessary data for simulations
beta = float(inpt['beta'])
Lback = (k**2 - k**4)/(1+beta*k**4)
Eback = np.exp(-h*Lback)
E_2back = np.exp(-h*Lback/2)
LRback = -h*np.transpose(np.repeat([Lback], M, axis=0)) + np.repeat([r], Ndof, axis=0)
Qback = -h*np.real(np.mean((np.exp(LRback/2)-1)/LRback, axis=1))
f1back = -h*np.real(np.mean((-4-LRback+np.exp(LRback)*(4-3*LRback+LRback**2))/LRback**3, axis=1)/(1+beta*k**4))
f2back = -h*np.real(np.mean((2+LRback+np.exp(LRback)*(-2+LRback))/LRback**3, axis=1)/(1+beta*k**4))
f3back = -h*np.real(np.mean((-4-3*LRback-LRback**2+np.exp(LRback)*(4-LRback))/LRback**3, axis=1)/(1+beta*k**4))
g = -0.5j*k
Sim['Eback'] = np.reshape(Eback,(Ndof,1))
Sim['E_2back'] = np.reshape(E_2back,(Ndof,1))
Sim['Qback'] = np.reshape(Qback,(Ndof,1))
Sim['f1back'] = np.reshape(f1back,(Ndof,1))
Sim['f2back'] = np.reshape(f2back,(Ndof,1))
Sim['f3back'] = np.reshape(f3back,(Ndof,1))
Sim['beta'] = float(inpt['beta'])
# forward step and qoi
Sim['forwardStepFunc'] = ksStepETDRK4
Sim['backwardStepFunc'] = ksStepBackRegularizedETDRK4
Sim['qoiFunc'] = ksqoi
# Initial conditions
Sim['epsilon_init'] = float(inpt['epsilon_init'])
ICType = inpt['ICType']
if ICType=='file':
fileNameIC = inpt['fileNameIC']
Sim['u0'] = np.load(fileNameIC)
elif ICType=='default':
Sim['u0'] = np.cos(x/16)*(1+np.sin(x/16))
else :
print('IC type not recognized')
# Initial conditions
ICType = inpt['ICType']
if ICType=='file':
fileNameIC = inpt['fileNameIC']
Sim['u0'] = np.load(fileNameIC)
elif ICType=='default':
x = Sim['x']
Sim['u0'] = np.cos(x/16)*(1+np.sin(x/16))
else :
print('IC type not recognized')
if inpt['Simulation name'] == 'L96':
tmax = Tf
nmax = round(tmax/Timestep)
# Initial condition and grid setup
epsilon_init = float(inpt['epsilon_init'])
R = float(inpt['R L96'])
im = | np.zeros(Ndof) | numpy.zeros |
# general libraries
import warnings
import numpy as np
# image processing libraries
from scipy import ndimage, interpolate, fft, signal
from skimage.transform import radon
from skimage.measure import ransac
from sklearn.cluster import KMeans
from ..generic.filtering_statistical import make_2D_Gaussian, mad_filtering
from ..generic.handler_im import get_grad_filters
# frequency preparation
def perdecomp(img):
"""calculate the periodic and smooth components of an image
Parameters
----------
img : np.array, size=(m,n)
array with intensities
Returns
-------
per : np.array, size=(m,n)
periodic component
cor : np.array, size=(m,n)
smooth component
References
----------
.. [1] <NAME>. "Periodic plus smooth image decomposition", Journal of
mathematical imaging and vision vol. 39.2 pp. 161-179, 2011.
Example
-------
>>> import numpy as np
>>> from ..generic.test_tools import create_sample_image_pair
>>> im1,_,_,_,_ = create_sample_image_pair(d=2**7, max_range=1)
>>> per,cor = perdecomp(im1)
>>> spec1 = np.fft.fft2(per)
"""
assert type(img)==np.ndarray, ("please provide an array")
img = img.astype(float)
if img.ndim==2:
(m, n) = img.shape
per = np.zeros((m, n), dtype=float)
per[+0,:] = +img[0,:] -img[-1,:]
per[-1,:] = -per[0,:]
per[:,+0] = per[:,+0] +img[:,+0] -img[:,-1]
per[:,-1] = per[:,-1] -img[:,+0] +img[:,-1]
elif img.ndim==3:
(m, n, b) = img.shape
per = np.zeros((m, n, b), dtype=float)
per[+0,:,:] = +img[0,:,:] -img[-1,:,:]
per[-1,:,:] = -per[0,:,:]
per[:,+0,:] = per[:,+0,:] +img[:,+0,:] -img[:,-1,:]
per[:,-1,:] = per[:,-1,:] -img[:,+0,:] +img[:,-1,:]
fy = np.cos( 2*np.pi*( np.arange(0,m) )/m )
fx = np.cos( 2*np.pi*( np.arange(0,n) )/n )
Fx = np.repeat(fx[np.newaxis,:],m,axis=0)
Fy = np.repeat(fy[:,np.newaxis],n,axis=1)
Fx[0,0] = 0
if img.ndim==3:
Fx = np.repeat(Fx[:,:,np.newaxis], b, axis=2)
Fy = np.repeat(Fy[:,:,np.newaxis], b, axis=2)
cor = np.real( np.fft.ifftn( np.fft.fft2(per) *.5/ (2-Fx-Fy)))
else:
cor = np.real( np.fft.ifft2( np.fft.fft2(per) *.5/ (2-Fx-Fy)))
per = img-cor
return (per, cor)
def normalize_power_spectrum(Q):
"""transform spectrum to complex vectors with unit length
Parameters
----------
Q : np.array, size=(m,n), dtype=complex
cross-spectrum
Returns
-------
Qn : np.array, size=(m,n), dtype=complex
normalized cross-spectrum, that is elements with unit length
Example
-------
>>> import numpy as np
>>> from ..generic.test_tools import create_sample_image_pair
>>> im1,im2,_,_,_ = create_sample_image_pair(d=2**4, max_range=1)
>>> spec1,spec2 = np.fft.fft2(im1), np.fft.fft2(im2)
>>> Q = spec1 * np.conjugate(spec2) # fourier based image matching
>>> Qn = normalize_spectrum(Q)
"""
assert type(Q)==np.ndarray, ("please provide an array")
Qn = np.divide(Q, abs(Q), out=np.zeros_like(Q), where=Q!=0)
return Qn
def make_fourier_grid(Q, indexing='ij', system='radians'):
"""
The four quadrants of the coordinate system of the discrete Fourier
transform are flipped. This function gives its coordinate system as it
would be in a map (xy) or pixel based (ij) system.
Parameters
----------
Q : np.array, size=(m,n), dtype=complex
Fourier based (cross-)spectrum.
indexing : {‘xy’, ‘ij’}
* "xy" : using map coordinates
* "ij" : using local image coordinates
system : {‘radians’, ‘unit’, 'normalized'}
the extent of the cross-spectrum can span from
* "radians" : -pi..+pi (default)
* "unit" : -1...+1
* "normalized" : -0.5...+0.5
* "pixel" : -m/2...+m/2
Returns
-------
F_1 : np,array, size=(m,n), dtype=integer
first coordinate index of the Fourier spectrum in a map system.
F_2 : np,array, size=(m,n), dtype=integer
second coordinate index of the Fourier spectrum in a map system.
Notes
-----
.. code-block:: text
metric system: Fourier-based flip
y +------><------+
^ | |
| | |
| v v
<------+-------> x
| ^ ^
| | |
v +------><------+
It is important to know what type of coordinate systems exist, hence:
.. code-block:: text
coordinate | coordinate ^ y
system 'ij'| system 'xy' |
| |
| j | x
--------+--------> --------+-------->
| |
| |
| i |
v |
"""
assert type(Q)==np.ndarray, ("please provide an array")
(m,n) = Q.shape
if indexing=='ij':
(I_grd,J_grd) = np.meshgrid(np.arange(0,n)-(n//2),
np.arange(0,m)-(m//2), \
indexing='ij')
F_1,F_2 = I_grd/n, J_grd/m
else:
fy = np.flip((np.arange(0,m)-(m/2)) /m)
fx = (np.arange(0,n)-(n/2)) /n
F_1 = np.repeat(fx[np.newaxis,:],m,axis=0)
F_2 = np.repeat(fy[:,np.newaxis],n,axis=1)
if system=='radians': # what is the range of the axis
F_1 *= 2*np.pi
F_2 *= 2*np.pi
elif system=='pixel':
F_1 *= n
F_1 *= m
elif system=='unit':
F_1 *= 2
F_2 *= 2
F_1 = np.fft.fftshift(F_1)
F_2 = np.fft.fftshift(F_2)
return F_1, F_2
# frequency matching filters
def raised_cosine(I, beta=0.35):
""" raised cosine filter
Parameters
----------
I : np.array, size=(m,n)
array with intensities
beta : float, default=0.35
roll-off factor
Returns
-------
W : np.array, size=(m,n), dtype=float
weighting mask
See Also
--------
tpss
References
----------
.. [1] Stone et al. "A fast direct Fourier-based algorithm for subpixel
registration of images." IEEE Transactions on geoscience and remote
sensing. vol. 39(10) pp. 2235-2243, 2001.
.. [2] Leprince, et.al. "Automatic and precise orthorectification,
coregistration, and subpixel correlation of satellite images,
application to ground deformation measurements", IEEE Transactions on
geoscience and remote sensing vol. 45.6 pp. 1529-1558, 2007.
Example
-------
>>> import numpy as np
>>> from ..generic.test_tools import create_sample_image_pair
>>> im1,im2,_,_,_ = create_sample_image_pair(d=2**4, max_range=1)
>>> spec1,spec2 = np.fft.fft2(im1), np.fft.fft2(im2)
>>> rc1 = raised_cosine(spec1, beta=0.35)
>>> rc2 = raised_cosine(spec2, beta=0.50)
>>> Q = (rc1*spec1) * np.conjugate((rc2*spec2)) # Fourier based image matching
>>> Qn = normalize_spectrum(Q)
"""
assert type(I)==np.ndarray, ("please provide an array")
(m, n) = I.shape
Fx,Fy = make_fourier_grid(I, indexing='xy', system='normalized')
R = np.sqrt(Fx**2 + Fy**2) # radius
# filter formulation
Hamm = np.cos( (np.pi/(2*beta)) * (R - (.5-beta)))**2
selec = np.logical_and((.5 - beta) <= R , R<=.5)
# compose filter
W = np.zeros((m,n))
W[(.5 - beta) > R] = 1
W[selec] = Hamm[selec]
return W
def hamming_window(I):
""" create hanning filter
Parameters
----------
I : np.array, size=(m,n)
array with intensities
Returns
-------
W : np.array, size=(m,n), dtype=bool
weighting mask
See Also
--------
raised_cosine, cosine_bell, high_pass_circle, blackman_window,
hamming_window
"""
assert type(I)==np.ndarray, ("please provide an array")
(m, n) = I.shape
W = np.sqrt(np.outer(np.hamming(m), np.hamming(n)))
W = np.fft.fftshift(W)
return W
def hanning_window(I):
""" create hanning filter
Parameters
----------
I : np.array, size=(m,n)
array with intensities
Returns
-------
W : np.array, size=(m,n), dtype=bool
weighting mask
See Also
--------
raised_cosine, cosine_bell, high_pass_circle, blackman_window,
hamming_window
"""
assert type(I)==np.ndarray, ("please provide an array")
(m, n) = I.shape
W = np.sqrt(np.outer(np.hanning(m), np.hanning(n)))
W = np.fft.fftshift(W)
return W
def blackman_window(I):
""" create blackman filter
Parameters
----------
I : np.array, size=(m,n)
array with intensities
Returns
-------
W : np.array, size=(m,n), dtype=bool
weighting mask
See Also
--------
raised_cosine, cosine_bell, high_pass_circle, hamming_window,
hanning_window
"""
assert type(I)==np.ndarray, ("please provide an array")
(m, n) = I.shape
W = np.sqrt(np.outer(np.blackman(m), np.blackman(n)))
W = np.fft.fftshift(W)
return W
def kaiser_window(I, beta=14.):
""" create kaiser filter
Parameters
----------
I : np.array, size=(m,n)
array with intensities
beta: float
0.0 - rectangular window
5.0 - similar to Hamming window
6.0 - similar to Hanning window
8.6 - similar to Blackman window
Returns
-------
W : np.array, size=(m,n), dtype=bool
weighting mask
See Also
--------
raised_cosine, cosine_bell, high_pass_circle, hamming_window,
hanning_window
"""
assert type(I)==np.ndarray, ("please provide an array")
(m, n) = I.shape
W = np.sqrt(np.outer(np.kaiser(m, beta), np.kaiser(n, beta)))
W = np.fft.fftshift(W)
return W
def low_pass_rectancle(I, r=0.50):
""" create hard low-pass filter
Parameters
----------
I : np.array, size=(m,n)
array with intensities
r : float, default=0.5
radius of the rectangle, r=.5 is same as its width
Returns
-------
W : np.array, size=(m,n), dtype=bool
weighting mask
See Also
--------
low_pass_circle, low_pass_pyramid, low_pass_bell
References
----------
.. [1] Takita et al. "High-accuracy subpixel image registration based on
phase-only correlation" IEICE transactions on fundamentals of
electronics, communications and computer sciences, vol.86(8)
pp.1925-1934, 2003.
"""
assert type(I)==np.ndarray, ("please provide an array")
Fx,Fy = make_fourier_grid(I, indexing='xy', system='normalized')
# filter formulation
W = np.logical_and(np.abs(Fx)<=r, np.abs(Fy)<=r)
return W
def low_pass_pyramid(I, r=0.50):
""" create low-pass filter with pyramid shape
Parameters
----------
I : np.array, size=(m,n)
array with intensities
r : float, default=0.5
radius of the mother rectangle, r=.5 is same as its width
Returns
-------
W : np.array, size=(m,n), dtype=bool
weighting mask
See Also
--------
low_pass_rectancle, low_pass_circle, low_pass_bell
References
----------
.. [1] Takita et al. "High-accuracy subpixel image registration based on
phase-only correlation" IEICE transactions on fundamentals of
electronics, communications and computer sciences, vol.86(8)
pp.1925-1934, 2003.
"""
assert type(I)==np.ndarray, ("please provide an array")
R = low_pass_rectancle(I, r)
W = signal.convolve2d(R.astype(float), R.astype(float), \
mode='same', boundary='wrap')
W = np.fft.fftshift(W/np.max(W))
return W
def low_pass_bell(I, r=0.50):
""" create low-pass filter with a bell shape
Parameters
----------
I : np.array, size=(m,n)
array with intensities
r : float, default=0.5
radius of the mother rectangle, r=.5 is same as its width
Returns
-------
W : np.array, size=(m,n), dtype=bool
weighting mask
See Also
--------
low_pass_rectancle, low_pass_circle, low_pass_pyramid
References
----------
.. [1] Takita et al. "High-accuracy subpixel image registration based on
phase-only correlation" IEICE transactions on fundamentals of
electronics, communications and computer sciences, vol.86(8)
pp.1925-1934, 2003.
"""
assert type(I)==np.ndarray, ("please provide an array")
R1 = low_pass_rectancle(I, r)
R2 = low_pass_pyramid(I, r)
W = signal.convolve2d(R1.astype(float), R2.astype(float), \
mode='same', boundary='wrap')
W = np.fft.fftshift(W/ | np.max(W) | numpy.max |
from datetime import datetime
from PIL import Image
import numpy as np
# import matplotlib.pyplot as plt
# plt.switch_backend('agg')
import io
from torchvision import transforms as trans
# from data.data_pipe import de_preprocess
import torch
from backbone.model import l2_norm
import pdb
import cv2
from pathlib import Path
from tqdm import tqdm
def separate_bn_paras(modules):
if not isinstance(modules, list):
modules = [*modules.modules()]
paras_only_bn = []
paras_wo_bn = []
for layer in modules:
if 'model' in str(layer.__class__):
continue
if 'container' in str(layer.__class__):
continue
else:
if 'batchnorm' in str(layer.__class__):
paras_only_bn.extend([*layer.parameters()])
else:
paras_wo_bn.extend([*layer.parameters()])
return paras_only_bn, paras_wo_bn
def prepare_facebank(conf, model, mtcnn, tta = True):
model.eval()
embeddings = []
names = []
for path in tqdm(Path(conf.facebank_path).iterdir()):
if path.is_file():
continue
else:
embs = []
for file in path.iterdir():
if not file.is_file():
continue
else:
try:
img = Image.open(file)
image = np.array(img)
if image.shape[2] >3:
img = Image.fromarray(image[...,:3])
except:
continue
if img.size != (112, 112):
img = mtcnn.align(img)
if img is None:
continue
with torch.no_grad():
if tta:
mirror = trans.functional.hflip(img)
emb = model(conf.test_transform(img).to(conf.device).unsqueeze(0))
emb_mirror = model(conf.test_transform(mirror).to(conf.device).unsqueeze(0))
embs.append(l2_norm(emb + emb_mirror))
else:
embs.append(model(conf.test_transform(img).to(conf.device).unsqueeze(0)))
if len(embs) == 0:
continue
embedding = torch.cat(embs).mean(0,keepdim=True)
embeddings.append(embedding)
names.append(path.name)
names.append('Unknown')
embeddings = torch.cat(embeddings)
names = | np.array(names) | numpy.array |
import numpy as np
import matplotlib.pyplot as plt
import scipy
import scipy.linalg as la
from copy import copy
import matplotlib.cm as cm
import matplotlib
from scipy.linalg import lu as lu1
from mpl_toolkits.mplot3d import Axes3D
def e_generator(i,j,size):
length = (size-1)**2
e = np.zeros(length)
e[(i-1)*(size-1)+j-1] = 1
return e
def A_generator(N):
Q = np.zeros([(N-1)**2, (N-1)**2])
for i in range((N-1)**2):
Q[i][i] = 4.0
if i % (N-1) != 0:
Q[i][i-1] = -1.0;
if i % (N-1) != N-2:
Q[i][i+1] = -1.0;
if i // (N-1) != 0.0:
if(i == 1):
print(i / (N-1))
Q[i][(i-(N-1))] = -1.0;
if i // (N-1) < N-2:
Q[i][(i+(N-1))] = -1.0;
return Q
'''notice that Q is the matrix. Q graph is for plt.
Black: value 4
Gray: value -1
White: value 0
'''
def lu(A):
'''actually this is p^-1'''
n = A.shape[0]
U = A.copy()
L = np.eye(n, dtype=np.double)
P = | np.eye(n, dtype=np.double) | numpy.eye |
'''
import user's own dataset using low level API when the original data is float
this demo is modified based on exp11
'''
import tensorflow as tf
import numpy as np
import glob
import cv2
import os
import h5py
os.environ["CUDA_DEVICE_ORDER"] = "PCI_BUS_ID"
os.environ["CUDA_VISIBLE_DEVICES"] = "0"
def _int64_feature(value):
return tf.train.Feature(int64_list=tf.train.Int64List(value=[value]))
##############################################################################
# this part is changed
def _float_feature(value):
return tf.train.Feature(float_list=tf.train.FloatList(value=value))
# end change
##############################################################################
##############################################################################
# this part is changed
def read_tfrecord(tf_filename, size):
queue = tf.train.string_input_producer([tf_filename])
reader = tf.TFRecordReader()
__, serialized_example = reader.read(queue)
feature = {
'image_raw': tf.FixedLenFeature([size[0]*size[1]*size[2]], tf.float32),
'height': tf.FixedLenFeature([], tf.int64),
'width': tf.FixedLenFeature([], tf.int64)
}
features = tf.parse_single_example(serialized_example, features=feature)
image = features['image_raw']
image = tf.reshape(image, size)
return image
# end change
##############################################################################
if __name__ == "__main__":
dataset_name = 'dataset.tfrecords'
## save my own images to dataset (100 images of size 180 x 180)
print("saving data ...\n")
# build tfrecord file
writer = tf.python_io.TFRecordWriter(dataset_name)
# reader for original data
h5f = h5py.File('my_data.h5', 'r')
keys = list(h5f.keys())
# save my images to dataset
##############################################################################
# this part is changed
for key in keys:
img = np.array(h5f[key]).astype(dtype=np.float32)
height = img.shape[0]
width = img.shape[1]
feature = {
'image_raw': _float_feature(img.reshape( (height*width) )),
'height': _int64_feature(height),
'width': _int64_feature(width)
}
example = tf.train.Example(features=tf.train.Features(feature=feature))
writer.write(example.SerializeToString())
h5f.close()
writer.close()
# end change
##############################################################################
## load the dataset and display them with tensorboard
print("loading data ...\n")
# define batch
batch_size = 10
Image = read_tfrecord(dataset_name, size=[180,180,1])
data_batch = tf.train.shuffle_batch([Image],
batch_size = batch_size,
capacity = 1000 + 3 * batch_size,
num_threads = 2,
min_after_dequeue = 1000)
##############################################################################
# this part is changed
img_batch = tf.placeholder(tf.float32, [None, 180, 180, 1])
# end change
##############################################################################
# summary
tf.summary.image(name='display', tensor=img_batch, max_outputs=4)
# begin loading
epoch = 0
with tf.Session() as sess:
coord = tf.train.Coordinator()
threads = tf.train.start_queue_runners(sess=sess, coord=coord)
summary = tf.summary.merge_all()
summ_writer = tf.summary.FileWriter('logs', sess.graph)
while epoch < 10:
print("epoch %d" % epoch)
batch = sess.run(data_batch)
print([ | np.min(batch) | numpy.min |
# coding: utf-8
# # Advanced Lane Finding Using OpenCV
# **In this project, I used OpenCV to write a software pipeline to identify the lane boundaries in a video from a front-facing camera on a car.**
# ## Pipeline architecture:
# - **Compute Camera Calibration.**
# - **Apply Distortion Correction**.
# - **Apply a Perspective Transform.**
# - **Create a Thresholded Binary Image.**
# - **Define the Image Processing Pipeline.**
# - **Detect Lane Lines.**
# - **Determine the Curvature of the Lane and Vehicle Position.**
# - **Visual display of the Lane Boundaries and Numerical Estimation of Lane Curvature and Vehicle Position.**
# - **Process Project Videos.**
#
# I'll explain each step in details below.
# #### Environement:
# - Ubuntu 16.04
# - Anaconda 5.0.1
# - Python 3.6.2
# - OpenCV 3.1.0
# In[1]:
# Importing Python libraries
import numpy as np
import cv2
import pickle
import glob
import matplotlib.pyplot as plt
import matplotlib.image as mpimg
import os
from ipywidgets import interact, interactive, fixed
from moviepy.editor import VideoFileClip
from IPython.display import HTML
# In[2]:
def display(img1, img2, lbl1, lbl2, x, y, img3=[], lbl3=[], cmap=None, n = 2):
"""
Diplay the input images side-by-side.
Parameters:
img1: Input image #1.
img2: Input image #2.
lbl1: Label for input image #1.
lbl2: Label for input image #2.
x, y: Figure size.
cmap (Default = None): Used to display gray images.
"""
plt.figure(figsize=(x, y))
plt.subplot(1, n, 1)
plt.imshow(img1, cmap = cmap)
plt.xlabel(lbl1, fontsize=15)
plt.xticks([])
plt.yticks([])
plt.subplot(1, n, 2)
plt.imshow(img2, cmap = cmap)
plt.xlabel(lbl2, fontsize=15)
plt.xticks([])
plt.yticks([])
if n == 3:
plt.subplot(1, n, 3)
plt.imshow(img3, cmap = cmap)
plt.xlabel(lbl3, fontsize=15)
plt.xticks([])
plt.yticks([])
plt.show()
# ---
# ## Step 1: Compute Camera Calibration
# The OpenCV functions `cv2.findChessboardCorners()` and `cv2.drawChessboardCorners()` are used for image calibration. We have 20 images of a chessboard, located in `./camera_cal`, taken from different angles with the same camera, and we'll use them as input for camera calibration routine.
#
# `cv2.findChessboardCorners()` attempts to determine whether the input image is a view of the chessboard pattern and locate the internal chessboard corners, and then `cv2.drawChessboardCorners()` draws individual chessboard corners detected.
#
# Arrays of object points, corresponding to the location of internal corners of a chessboard, and image points, the pixel locations of the internal chessboard corners determined by `cv2.findChessboardCorners()`, are fed to `cv2.drawChessboardCorners()` which returns camera calibration and distortion coefficients.
#
#
# These will then be used by the OpenCV `cv2.calibrateCamera()` to find the camera intrinsic and extrinsic parameters from several views of a calibration pattern. These parameters will be fed to `cv2.undistort` function to correct for distortion on any image produced by the same camera.
# In[5]:
cal_images = glob.glob('camera_cal/*.jpg')
test_images = glob.glob('test_images/*.jpg')
nx, ny = 9, 6
objp = np.zeros((nx*ny,3), np.float32)
objp[:,:2] = np.mgrid[0:nx,0:ny].T.reshape(-1, 2)
# In[6]:
def calibrate_camera(cal_images, nx, ny):
"""
Compute camera calibration and return the camera intrinsic and extrinsic parameters.
Parameters:
cal_images: A list of the chessboard calibration images.
nx, ny: Chessboard dimensions.
"""
objpoints = [] # 3D points
imgpoints = [] # 2D points
objp = np.zeros((nx*ny,3), np.float32)
objp[:,:2] = np.mgrid[0:nx,0:ny].T.reshape(-1, 2)
for file in cal_images:
img = cv2.imread(file)
gray = cv2.cvtColor(img, cv2.COLOR_BGR2GRAY)
ret, corners = cv2.findChessboardCorners(gray, (nx, ny), None)
if ret == True:
objpoints.append(objp)
imgpoints.append(corners)
ret, mtx, dist, rvecs, tvecs = cv2.calibrateCamera(objpoints, imgpoints, gray.shape[::-1],None,None)
return mtx, dist
# In[7]:
mtx, dist = calibrate_camera(cal_images, nx, ny)
# ---
# ## Step 2: Apply Distortion Correction
# OpenCV provides `cv2.undistort` function, which transforms an image to compensate for radial and tangential lens distortion.
# In[8]:
def undistort(img, mtx, dist):
"""
Use the camera calibration parameters to correct the input image for distortion.
Parameters:
img: Input image.
mtx: Output floating-point camera matrix.
dist: Output vector of distortion coefficients.
"""
undist = cv2.undistort(img, mtx, dist, None, mtx)
return undist
# In[9]:
# Testing distortion correction on cal_images
img = cv2.imread(cal_images[0])
undist = undistort(img, mtx, dist)
display(img, undist, 'Original image', 'Distortion corrected image', 14, 7)
# In[10]:
# Testing distortion correction on test_images
img = cv2.cvtColor(cv2.imread(test_images[6]), cv2.COLOR_BGR2RGB)
undist_img_ex = undistort(img, mtx, dist)
display(img, undist_img_ex, 'Original image', 'Distortion corrected image', 14, 7)
# The effect of `undistort` is particularly noticeable, by the change in shape of the car hood at the bottom corners of the image.
# ---
# ## Step 3: Apply a Perspective Transform
# A common task in autonomous driving is to convert the vehicle’s camera view of the scene into a top-down “bird’s-eye” view. We'll use OpenCV's `cv2.getPerspectiveTransform()` and `cv2.getPerspectiveTransform()` to do this task.
# In[11]:
image_shape = undist_img_ex.shape
print("Image shape:", image_shape)
plt.imshow(undist_img_ex)
plt.show()
# In[12]:
# Define the region of interest
src = np.float32([[190, 700], [1110, 700], [720, 470], [570, 470]])
bottom_left = src[0][0]+100, src[0][1]
bottom_right = src[1][0]-200, src[1][1]
top_left = src[3][0]-250, 1
top_right = src[2][0]+200, 1
dst = np.float32([bottom_left, bottom_right, top_right, top_left])
# In[13]:
def perspective_transform(img, src, dst):
"""
Convert the vehicle’s camera view of the scene into a top-down “bird’s-eye” view.
Parameters:
img: Input image.
src: Source points.
dst: Destination points.
"""
image_shape = img.shape
img_size = (image_shape[1], image_shape[0])
# Given src and dst points, calculate the perspective transform matrix
M = cv2.getPerspectiveTransform(src, dst)
Minv = cv2.getPerspectiveTransform(dst, src)
# Warp the image using OpenCV warpPerspective()
warped = cv2.warpPerspective(img, M, img_size)
# Return the resulting image and matrix
return warped, M, Minv
# In[14]:
# Applying perspective transform to several test_images
display(undistort(cv2.cvtColor(cv2.imread(test_images[1]), cv2.COLOR_BGR2RGB), mtx, dist),
perspective_transform(undistort(cv2.cvtColor(cv2.imread(test_images[1]), cv2.COLOR_BGR2RGB),
mtx, dist), src, dst)[0],
'Original image', 'Warped image', 14, 7)
display(undistort(cv2.cvtColor(cv2.imread(test_images[7]), cv2.COLOR_BGR2RGB), mtx, dist),
perspective_transform(undistort(cv2.cvtColor(cv2.imread(test_images[7]), cv2.COLOR_BGR2RGB),
mtx, dist), src, dst)[0],
'Original image', 'Warped image', 14, 7)
display(undistort(cv2.cvtColor(cv2.imread(test_images[6]), cv2.COLOR_BGR2RGB), mtx, dist),
perspective_transform(undistort(cv2.cvtColor(cv2.imread(test_images[6]), cv2.COLOR_BGR2RGB),
mtx, dist), src, dst)[0],
'Original image', 'Warped image', 14, 7)
# In[15]:
undist_example_warped = perspective_transform(undist_img_ex, src, dst)[0]
# ---
# ## Step 4: Create a Thresholded Binary Image
# Now, we will use color transform and Sobel differentiation to detect the lane lines in the image.
# ### Exploring different color spaces
# #### RGB color space:
# In[16]:
undist_example_RGB = undist_example_warped
undist_example_R = undist_example_RGB[:,:,0]
undist_example_G = undist_example_RGB[:,:,1]
undist_example_B = undist_example_RGB[:,:,2]
display(undist_example_RGB, undist_example_R, 'Original RGB image', 'RGB R-Channel', 14, 7)
display(undist_example_G, undist_example_B, 'RGB G-Channel', 'RGB B-Channel', 14, 7)
# #### HSV color space:
# This type of color model closely emulates models of human color perception. While in other color models, such as RGB, an image is treated as an additive result of three base colors, the three channels of HSV represent hue (H gives a measure of the spectral composition of a color), saturation (S gives the proportion of pure light of the dominant wavelength, which indicates how far a color is from a gray of equal brightness), and value (V gives the brightness relative to
# the brightness of a similarly illuminated white color) corresponding to the intuitive appeal of tint, shade, and tone.
# In[17]:
undist_example_HSV = cv2.cvtColor(undist_example_RGB, cv2.COLOR_RGB2HSV)
undist_example_HSV_H = undist_example_HSV[:,:,0]
undist_example_HSV_S = undist_example_HSV[:,:,1]
undist_example_HSV_V = undist_example_HSV[:,:,2]
display(undist_example_HSV, undist_example_HSV_H, 'Original HSV image', 'HSV H-Channel', 14, 7)
display(undist_example_HSV_S, undist_example_HSV_V, 'HSV S-Channel', 'HSV V-Channel', 14, 7)
# #### LAB color space:
# The Lab color space describes mathematically all perceivable colors in the three dimensions L for lightness and a and b for the color opponents green–red and blue–yellow.
# In[18]:
undist_example_LAB = cv2.cvtColor(undist_example_RGB, cv2.COLOR_RGB2Lab)
undist_example_LAB_L = undist_example_LAB[:,:,0]
undist_example_LAB_A = undist_example_LAB[:,:,1]
undist_example_LAB_B = undist_example_LAB[:,:,2]
display(undist_example_LAB, undist_example_LAB_L, 'Original LAB image', 'LAB L-Channel', 14, 7)
display(undist_example_LAB_A, undist_example_LAB_B, 'LAB A-Channel', 'LAB B-Channel', 14, 7)
# #### HLS color space:
# This model was developed to specify the values of hue, lightness, and saturation of a color in each channel. The difference with respect to the HSV color model is that the lightness of a pure color defined by HLS is equal to the lightness of a medium gray, while the brightness of a pure color defined by HSV is equal to the brightness of white.
# In[19]:
undist_example_HLS = cv2.cvtColor(undist_example_RGB, cv2.COLOR_RGB2HLS)
undist_example_HLS_H = undist_example_HLS[:,:,0]
undist_example_HLS_L = undist_example_HLS[:,:,1]
undist_example_HLS_S = undist_example_HLS[:,:,2]
display(undist_example_HLS, undist_example_HLS_H, 'Original HLS image', 'HLS H-Channel', 14, 7)
display(undist_example_HLS_L, undist_example_HLS_S, 'HLS L-Channel', 'HLS S-Channel', 14, 7)
# ### Color Space Thresholding
# As you may observe, the white lane lines are clearly highlighted in the L-channel of the of the HLS color space, and the yellow line are clear in the L-channel of the LAP color space as well. We'll apply HLS L-threshold and LAB B-threshold to the image to highlight the lane lines.
# In[20]:
def hls_l_thresh(img, thresh=(220, 255)):
"""
Threshold the input image to the L-channel of the HLS color space.
Parameters:
img: HLS image.
thresh: Minimum and Maximum color intensity.
"""
img = img[:,:,1]
img = img*(255/np.max(img))
binary_output = np.zeros_like(img)
binary_output[(img > thresh[0]) & (img <= thresh[1])] = 1
return binary_output
# In[21]:
thresh_HLS = hls_l_thresh(undist_example_HLS)
display(undist_example_HLS, thresh_HLS, 'HLS image', 'L-thresholded HLS image', 14, 7, cmap = 'gray')
# In[22]:
def lab_b_thresh(img, thresh=(190, 255)):
"""
Threshold the input image to the B-channel of the LAB color space.
Parameters:
img: LAB image.
thresh: Minimum and Maximum color intensity.
"""
img = img[:,:,2]
if np.max(img) > 175:
img = img*(255/np.max(img))
binary_output = np.zeros_like(img)
binary_output[(img > thresh[0]) & (img <= thresh[1])] = 1
return binary_output
# In[23]:
thresh_LAB = lab_b_thresh(undist_example_LAB)
display(undist_example_LAB, thresh_LAB, 'LAB image', 'B-thresholded LAB image', 14, 7, cmap = 'gray')
# In[24]:
def threshold_color_space(img):
"""
Threshold the input image to the L-channel of the HLS color space and the B-channel of the LAB color space.
Parameters:
img: Input image.
"""
img_HLS = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
img_LAB = cv2.cvtColor(img, cv2.COLOR_RGB2Lab)
img_thresh_HLS = hls_l_thresh(img_HLS)
img_thresh_LAB = lab_b_thresh(img_LAB)
combined_img = np.zeros_like(img_thresh_HLS)
combined_img[((img_thresh_HLS == 1) | (img_thresh_LAB == 1))] = 1
return combined_img
# In[25]:
threshold_color_img = threshold_color_space(undist_example_warped)
display(undist_example_warped, threshold_color_img, 'RGB image', 'Thresholded image', 14, 7, cmap = 'gray')
# ### Sobel Differentiation
# Now, we'll explore different Sobel differentiation techniques, and try to come up with a combination that produces a better output than color space thresholding.
# In[26]:
def abs_sobel(img, orient='x', sobel_kernel=3, thresh=(25, 255)):
"""
Apply absolute Sobel diffrentiation to the input image.
Parameters:
img: Input image.
orient (Default = x): Gradients direction.
sobel_kernel (Default = 3): Size of the extended Sobel kernel.
thresh (Default = (25, 255)): Minimum and Maximum gradient strength.
"""
sobel = cv2.Sobel(img, cv2.CV_64F, orient=='x', orient=='y', ksize= sobel_kernel)
abs_sobel = np.absolute(sobel)
scaled_sobel = np.uint8(255*abs_sobel/np.max(abs_sobel))
sxbinary = np.zeros_like(scaled_sobel)
sxbinary[(scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])] = 1
return sxbinary
# In[27]:
abs_sobel_example_LAB_B = abs_sobel(undist_example_LAB_B)
display(undist_example_LAB_B, abs_sobel_example_LAB_B, 'LAP B-Channel', 'After absolute Sobel', 14, 7, cmap='gray')
# In[28]:
abs_sobel_example_HLS_L = abs_sobel(undist_example_HLS_L)
display(undist_example_HLS_L, abs_sobel_example_HLS_L, 'HLS L-Channel', 'After absolute Sobel', 14, 7, cmap='gray')
# In[29]:
def mag_sobel(img, sobel_kernel=15, thresh=(25, 255)):
"""
Apply magnitude Sobel diffrentiation to the input image.
Parameters:
img: Input image.
sobel_kernel (Default = 15): Size of the extended Sobel kernel.
thresh (Default = (25, 255)): Minimum and Maximum gradient strength.
"""
sobelx = cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize = sobel_kernel)
sobely = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize = sobel_kernel)
mag_sobel = np.sqrt(np.square(sobelx) + np.square(sobely))
scaled_sobel = np.uint8(255*mag_sobel/np.max(mag_sobel))
sxbinary = np.zeros_like(scaled_sobel)
sxbinary[(scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])] = 1
return sxbinary
# In[30]:
mag_sobel_example_LAB_B = mag_sobel(undist_example_LAB_B)
display(undist_example_LAB_B, mag_sobel_example_LAB_B, 'LAP B-Channel', 'After magnitude Sobel', 14, 7, cmap='gray')
# In[31]:
mag_sobel_example_HLS_L = mag_sobel(undist_example_HLS_L)
display(undist_example_HLS_L, mag_sobel_example_HLS_L, 'HLS L-Channel', 'After magnitude Sobel', 14, 7, cmap='gray')
# In[32]:
def dir_sobel(img, sobel_kernel=25, thresh=(0, 0.09)):
"""
Apply direction Sobel diffrentiation to the input image.
Parameters:
img: Input image.
sobel_kernel (Default = 25): Size of the extended Sobel kernel.
thresh (Default = (0, 0.09)): Minimum and Maximum gradient strength.
"""
sobelx = cv2.Sobel(img, cv2.CV_64F, 1, 0, ksize=sobel_kernel)
sobely = cv2.Sobel(img, cv2.CV_64F, 0, 1, ksize=sobel_kernel)
abs_sobelx = np.absolute(sobelx)
abs_sobely = np.absolute(sobely)
grad_dir = np.arctan2(abs_sobely, abs_sobelx)
binary_output = np.zeros_like(grad_dir)
binary_output[(grad_dir >= thresh[0]) & (grad_dir <= thresh[1])] = 1
return binary_output
# In[33]:
dir_sobel_example_LAB_B = dir_sobel(undist_example_LAB_B)
display(undist_example_LAB_B, dir_sobel_example_LAB_B, 'LAP B-Channel', 'After direction Sobel', 14, 7, cmap='gray')
# In[34]:
dir_sobel_example_HLS_L = dir_sobel(undist_example_HLS_L)
display(undist_example_HLS_L, dir_sobel_example_HLS_L, 'HLS L-Channel', 'After direction Sobel', 14, 7, cmap='gray')
# In[35]:
def combined_sobel(img, mag_kernel_size, mag_thresh):
"""
Apply both absolute and magnitude Sobel diffrentiation to the input image.
Parameters:
img: Input image.
mag_kernel_size: Size of the extended Sobel kernel.
mag_thresh: Minimum and Maximum gradient strength.
"""
img_abs = abs_sobel(img)
img_mag = mag_sobel(img, mag_kernel_size, mag_thresh)
combined_img = np.zeros_like(img_mag)
combined_img[((img_abs == 1) & (img_mag == 1))] = 1
return combined_img
# In[36]:
combined_HLS_L = combined_sobel(undist_example_HLS_L, 15, (25, 225))
display(undist_example_HLS_L, combined_HLS_L, 'HLS L-Channel', 'After absolute+magnitude Sobel', 14, 7, cmap='gray')
# In[37]:
combined_LAB_B = combined_sobel(undist_example_LAB_B, 15, (25, 225))
display(undist_example_LAB_B, combined_LAB_B, 'LAB B-Channel', 'After absolute+magnitude Sobel', 14, 7, cmap='gray')
# Now, we'll combine the absolute+magnitude Sobel outputs of both HLS and LAB.
# In[38]:
def combined_sobel_colors(img, mag_kernel_size, mag_thresh):
"""
Combine Sobel diffrentiation results from applying to diffrenet color spaces.
Parameters:
img: Input image.
mag_kernel_size: Size of the extended Sobel kernel.
mag_thresh: Minimum and Maximum gradient strength.
"""
img_HLS = cv2.cvtColor(img, cv2.COLOR_RGB2HLS)
img_LAB = cv2.cvtColor(img, cv2.COLOR_RGB2Lab)
combined_HLS_L = combined_sobel(img_HLS[:,:,1], mag_kernel_size, mag_thresh)
combined_LAB_B = combined_sobel(img_LAB[:,:,2], mag_kernel_size, mag_thresh)
combined_sobel_colors_img = np.zeros_like(combined_HLS_L)
combined_sobel_colors_img[((combined_HLS_L == 1) | (combined_LAB_B == 1))] = 1
return combined_sobel_colors_img
# In[39]:
combined_sobel_colors_img = combined_sobel_colors(undist_example_RGB, 15, (25, 225))
display(undist_example_RGB, combined_sobel_colors_img, 'RGB image', 'After absolute+magnitude Sobel', 14, 7, cmap='gray')
# ### Comparison between Color Thresholding and Sobel Diffrentiation
# We'll apply both color thresholding and Sobel diffrentiation to all the test images to explore which of these two techniques will be better to do the task.
# In[40]:
color_threshold = []
sobel_diff = []
test_images_warped = []
for file in test_images:
image = cv2.cvtColor(cv2.imread(file), cv2.COLOR_BGR2RGB)
image = undistort(image, mtx, dist)
image = perspective_transform(image, src, dst)[0]
test_images_warped.append(image)
color_threshold.append(threshold_color_space(image))
sobel_diff.append(combined_sobel_colors(image, 15, (25, 225)))
# In[41]:
for original, color, sobel in zip(test_images_warped, color_threshold, sobel_diff):
display(original,
color,
'Original image',
'Color thresholded image',
14,
7,
sobel,
'Sobel diffrentiated image',
cmap='gray',
n = 3)
# As you can see, although Sobel diffrentiation was able to capture the lane lines correctly, it captured some noise around it. On the other hand, color thresholding was able to produce clean output highlighting the lane lines.
# ---
# ## Step 5: Define the Image Processing Pipeline
# Now, we'll define the complete image processing function to read the raw image and apply the following steps:
# 1. Distortion Correction.
# 2. Perspective Transform.
# 3. Color Thresholding.
# In[42]:
def image_process(img):
"""
Apply undistortion, perspective transform, and color space thresholding to the input image.
Parameters:
img: Input image.
"""
# Undistort
img = undistort(img, mtx, dist)
# Perspective Transform
img, M, Minv = perspective_transform(img, src, dst)
# Create a thresholded binary image
img = threshold_color_space(img)
return img, Minv
# In[43]:
test_images_imgs = []
test_images_processed = []
for file in test_images:
image = cv2.cvtColor(cv2.imread(file), cv2.COLOR_BGR2RGB)
test_images_imgs.append(image)
image, Minv = image_process(image)
test_images_processed.append(image)
# In[44]:
for original, processed in zip(test_images_imgs, test_images_processed):
display(original,
processed,
'Original test image',
'Processed test image',
14,
7,
cmap='gray')
# ---
# ## Step 6: Detect the Lane Lines
# After applying calibration, thresholding, and a perspective transform to a road image, we should have a binary image where the lane lines stand out clearly. However, we still need to decide explicitly which pixels are part of the lines and which belong to the left line and which belong to the right line.
# ### Sliding Window Search
#
# We'll compute a histogram of the bottom half of the image and find the base of the left and right lane lines. Originally these locations were identified from the local maxima of the left and right halves of the histogram, but in the final implementation we used quarters of the histogram just left and right of the midpoint. This helped to reject lines from adjacent lanes. The function identifies 50 windows from which to identify lane pixels, each one centered on the midpoint of the pixels from the window below. This effectively "follows" the lane lines up to the top of the binary image, and speeds processing by only searching for activated pixels over a small portion of the image.
# In[45]:
def sliding_window(img):
"""
Fit a polynomial to the input binary image.
Parameters:
img: Input image.
"""
# Take a histogram of the bottom half of the image
histogram = np.sum(img[img.shape[0]//2:,:], axis=0)
# Find the peak of the left and right halves of the histogram
# These will be the starting point for the left and right lines
midpoint = np.int(histogram.shape[0]//2)
quarter_point = np.int(midpoint//2)
# Previously the left/right base was the max of the left/right half of the histogram
# this changes it so that only a quarter of the histogram (directly to the left/right) is considered
leftx_base = np.argmax(histogram[quarter_point:midpoint]) + quarter_point
rightx_base = np.argmax(histogram[midpoint:(midpoint+quarter_point)]) + midpoint
# Choose the number of sliding windows
nwindows = 50
# Set height of windows
window_height = np.int(img.shape[0]/nwindows)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = img.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
# Current positions to be updated for each window
leftx_current = leftx_base
rightx_current = rightx_base
# Set the width of the windows +/- margin
margin = 80
# Set minimum number of pixels found to recenter window
minpix = 40
# Create empty lists to receive left and right lane pixel indices
left_lane_inds = []
right_lane_inds = []
# Rectangle data for visualization
rectangle_data = []
# Step through the windows one by one
for window in range(nwindows):
# Identify window boundaries in x and y (and right and left)
win_y_low = img.shape[0] - (window+1)*window_height
win_y_high = img.shape[0] - window*window_height
win_xleft_low = leftx_current - margin
win_xleft_high = leftx_current + margin
win_xright_low = rightx_current - margin
win_xright_high = rightx_current + margin
rectangle_data.append((win_y_low, win_y_high, win_xleft_low, win_xleft_high, win_xright_low, win_xright_high))
# Identify the nonzero pixels in x and y within the window
good_left_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xleft_low) &
(nonzerox < win_xleft_high)).nonzero()[0]
good_right_inds = ((nonzeroy >= win_y_low) & (nonzeroy < win_y_high) & (nonzerox >= win_xright_low) &
(nonzerox < win_xright_high)).nonzero()[0]
# Append these indices to the lists
left_lane_inds.append(good_left_inds)
right_lane_inds.append(good_right_inds)
# If you found > minpix pixels, recenter next window on their mean position
if len(good_left_inds) > minpix:
leftx_current = np.int(np.mean(nonzerox[good_left_inds]))
if len(good_right_inds) > minpix:
rightx_current = np.int(np.mean(nonzerox[good_right_inds]))
# Concatenate the arrays of indices
left_lane_inds = np.concatenate(left_lane_inds)
right_lane_inds = np.concatenate(right_lane_inds)
# Extract left and right line pixel positions
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
left_fit, right_fit = (None, None)
# Fit a second order polynomial to each
if len(leftx) != 0:
left_fit = np.polyfit(lefty, leftx, 2)
if len(rightx) != 0:
right_fit = np.polyfit(righty, rightx, 2)
visualization_data = (rectangle_data, histogram)
return left_fit, right_fit, left_lane_inds, right_lane_inds, visualization_data
# In[46]:
# Visualize the sliding windows over an example test image.
img = test_images_processed[0]
left_fit, right_fit, left_lane_inds, right_lane_inds, visualization_data = sliding_window(img)
h = img.shape[0]
left_fit_x_int = left_fit[0]*h**2 + left_fit[1]*h + left_fit[2]
right_fit_x_int = right_fit[0]*h**2 + right_fit[1]*h + right_fit[2]
rectangles = visualization_data[0]
histogram = visualization_data[1]
# Create an output image to draw on and visualize the result
out_img = np.uint8(np.dstack((img, img, img))*255)
# Generate x and y values for plotting
ploty = np.linspace(0, img.shape[0]-1, img.shape[0] )
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
for rect in rectangles:
# Draw the windows on the visualization image
cv2.rectangle(out_img,(rect[2],rect[0]),(rect[3],rect[1]),(0,255,0), 2)
cv2.rectangle(out_img,(rect[4],rect[0]),(rect[5],rect[1]),(0,255,0), 2)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = img.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [100, 200, 255]
plt.figure(figsize=(14, 7))
plt.subplot(1, 2, 1)
plt.imshow(original)
plt.xlabel('Original image', fontsize=15)
plt.xticks([])
plt.yticks([])
plt.subplot(1, 2, 2)
plt.imshow(out_img)
plt.xlabel('Sliding window', fontsize=15)
plt.plot(left_fitx, ploty, color='yellow')
plt.plot(right_fitx, ploty, color='yellow')
plt.xlim(0, 1280)
plt.ylim(720, 0)
plt.xticks([])
plt.yticks([])
plt.show()
# In[48]:
# Visualize the sliding windows over the test images.
for original, processed in zip(test_images_imgs, test_images_processed):
img = processed
left_fit, right_fit, left_lane_inds, right_lane_inds, visualization_data = sliding_window(img)
h = img.shape[0]
left_fit_x_int = left_fit[0]*h**2 + left_fit[1]*h + left_fit[2]
right_fit_x_int = right_fit[0]*h**2 + right_fit[1]*h + right_fit[2]
rectangles = visualization_data[0]
histogram = visualization_data[1]
# Create an output image to draw on and visualize the result
out_img = np.uint8(np.dstack((img, img, img))*255)
# Generate x and y values for plotting
ploty = np.linspace(0, img.shape[0]-1, img.shape[0] )
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
for rect in rectangles:
# Draw the windows on the visualization image
cv2.rectangle(out_img,(rect[2],rect[0]),(rect[3],rect[1]),(0,255,0), 2)
cv2.rectangle(out_img,(rect[4],rect[0]),(rect[5],rect[1]),(0,255,0), 2)
# Identify the x and y positions of all nonzero pixels in the image
nonzero = img.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
out_img[nonzeroy[left_lane_inds], nonzerox[left_lane_inds]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds], nonzerox[right_lane_inds]] = [100, 200, 255]
plt.figure(figsize=(14, 7))
plt.subplot(1, 2, 1)
plt.imshow(original)
plt.xlabel('Original image', fontsize=15)
plt.xticks([])
plt.yticks([])
plt.subplot(1, 2, 2)
plt.imshow(out_img)
plt.xlabel('Sliding window', fontsize=15)
plt.plot(left_fitx, ploty, color='yellow')
plt.plot(right_fitx, ploty, color='yellow')
plt.xlim(0, 1280)
plt.ylim(720, 0)
plt.xticks([])
plt.yticks([])
plt.show()
# ### Polyfit Using Fit from Previous Frame
#
# The Polyfit Using Fit from Previous Frame is another way that performs basically the same task, but alleviates much difficulty of the search process by leveraging a previous fit (from a previous video frame, for example) and only searching for lane pixels within a certain range of that fit.
# In[49]:
def polyfit_prev_fit(img, left_fit_prev, right_fit_prev):
"""
Fit a polynomial to the input binary image based upon a previous fit.
This assumes that the fit will not change significantly from one video frame to the next.
Parameters:
img: Input image.
left_fit_prev:
right_fit_prev:
"""
nonzero = img.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
margin = 80
left_lane_inds = ((nonzerox > (left_fit_prev[0]*(nonzeroy**2) + left_fit_prev[1]*nonzeroy + left_fit_prev[2] - margin))
& (nonzerox < (left_fit_prev[0]*(nonzeroy**2) + left_fit_prev[1]*nonzeroy + left_fit_prev[2] + margin)))
right_lane_inds = ((nonzerox > (right_fit_prev[0]*(nonzeroy**2) + right_fit_prev[1]*nonzeroy + right_fit_prev[2] - margin))
& (nonzerox < (right_fit_prev[0]*(nonzeroy**2) + right_fit_prev[1]*nonzeroy + right_fit_prev[2] + margin)))
leftx = nonzerox[left_lane_inds]
lefty = nonzeroy[left_lane_inds]
rightx = nonzerox[right_lane_inds]
righty = nonzeroy[right_lane_inds]
left_fit_new, right_fit_new = (None, None)
if len(leftx) != 0:
left_fit_new = np.polyfit(lefty, leftx, 2)
if len(rightx) != 0:
right_fit_new = np.polyfit(righty, rightx, 2)
return left_fit_new, right_fit_new, left_lane_inds, right_lane_inds
# In[50]:
# Visualize the polyfit_prev_fit over the an example image.
margin = 50
left_fit, right_fit, left_lane_inds, right_lane_inds, visualization_data = sliding_window(img)
left_fit2, right_fit2, left_lane_inds2, right_lane_inds2 = polyfit_prev_fit(img, left_fit, right_fit)
ploty = np.linspace(0, img.shape[0]-1, img.shape[0] )
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
left_fitx2 = left_fit2[0]*ploty**2 + left_fit2[1]*ploty + left_fit2[2]
right_fitx2 = right_fit2[0]*ploty**2 + right_fit2[1]*ploty + right_fit2[2]
out_img = np.uint8(np.dstack((img, img, img))*255)
window_img = np.zeros_like(out_img)
nonzero = img.nonzero()
nonzeroy = np.array(nonzero[0])
nonzerox = np.array(nonzero[1])
out_img[nonzeroy[left_lane_inds2], nonzerox[left_lane_inds2]] = [255, 0, 0]
out_img[nonzeroy[right_lane_inds2], nonzerox[right_lane_inds2]] = [0, 0, 255]
left_line_window1 = np.array([np.transpose(np.vstack([left_fitx-margin, ploty]))])
left_line_window2 = np.array([np.flipud(np.transpose(np.vstack([left_fitx+margin, ploty])))])
left_line_pts = np.hstack((left_line_window1, left_line_window2))
right_line_window1 = np.array([np.transpose(np.vstack([right_fitx-margin, ploty]))])
right_line_window2 = np.array([np.flipud(np.transpose(np.vstack([right_fitx+margin, ploty])))])
right_line_pts = np.hstack((right_line_window1, right_line_window2))
cv2.fillPoly(window_img, np.int_([left_line_pts]), (0,255, 0))
cv2.fillPoly(window_img, np.int_([right_line_pts]), (0,255, 0))
result = cv2.addWeighted(out_img, 1, window_img, 0.3, 0)
plt.figure(figsize=(14, 7))
plt.subplot(1, 2, 1)
plt.imshow(original)
plt.xlabel('Original image', fontsize=15)
plt.xticks([])
plt.yticks([])
plt.subplot(1, 2, 2)
plt.imshow(result)
plt.xlabel('Polyfit using previous fit', fontsize=15)
plt.plot(left_fitx2, ploty, color='yellow')
plt.plot(right_fitx2, ploty, color='yellow')
plt.xlim(0, 1280)
plt.ylim(720, 0)
plt.xticks([])
plt.yticks([])
plt.show()
# In[52]:
# visualizing polyfit_prev_fit over the test images.
for original, processed in zip(test_images_imgs, test_images_processed):
img = processed
margin = 50
left_fit, right_fit, left_lane_inds, right_lane_inds, visualization_data = sliding_window(img)
left_fit2, right_fit2, left_lane_inds2, right_lane_inds2 = polyfit_prev_fit(img, left_fit, right_fit)
ploty = np.linspace(0, img.shape[0]-1, img.shape[0] )
left_fitx = left_fit[0]*ploty**2 + left_fit[1]*ploty + left_fit[2]
right_fitx = right_fit[0]*ploty**2 + right_fit[1]*ploty + right_fit[2]
left_fitx2 = left_fit2[0]*ploty**2 + left_fit2[1]*ploty + left_fit2[2]
right_fitx2 = right_fit2[0]*ploty**2 + right_fit2[1]*ploty + right_fit2[2]
out_img = np.uint8(np.dstack((img, img, img))*255)
window_img = | np.zeros_like(out_img) | numpy.zeros_like |
# !/usr/bin/env python
# Created by "Thieu" at 10:08, 02/03/2021 ----------%
# Email: <EMAIL> %
# Github: https://github.com/thieu1995 %
# --------------------------------------------------%
import numpy as np
from mealpy.optimizer import Optimizer
class OriginalHC(Optimizer):
"""
The original version of: Hill Climbing (HC)
Notes
~~~~~
+ The number of neighbour solutions are equal to user defined
+ The step size to calculate neighbour is randomized
Hyper-parameters should fine tuned in approximate range to get faster convergence toward the global optimum:
+ neighbour_size (int): [pop_size/2, pop_size], fixed parameter, sensitive exploitation parameter, Default: 50
Examples
~~~~~~~~
>>> import numpy as np
>>> from mealpy.math_based.HC import OriginalHC
>>>
>>> def fitness_function(solution):
>>> return np.sum(solution**2)
>>>
>>> problem_dict1 = {
>>> "fit_func": fitness_function,
>>> "lb": [-10, -15, -4, -2, -8],
>>> "ub": [10, 15, 12, 8, 20],
>>> "minmax": "min",
>>> }
>>>
>>> epoch = 1000
>>> pop_size = 50
>>> neighbour_size = 50
>>> model = OriginalHC(problem_dict1, epoch, pop_size, neighbour_size)
>>> best_position, best_fitness = model.solve()
>>> print(f"Solution: {best_position}, Fitness: {best_fitness}")
References
~~~~~~~~~~
[1] <NAME>., <NAME>. and <NAME>., 1993. When will a genetic algorithm
outperform hill climbing. Advances in neural information processing systems, 6.
"""
def __init__(self, problem, epoch=10000, pop_size=100, neighbour_size=50, **kwargs):
"""
Args:
problem (dict): The problem dictionary
epoch (int): maximum number of iterations, default = 10000
pop_size (int): number of population size, default = 100
neighbour_size (int): fixed parameter, sensitive exploitation parameter, Default: 50
"""
super().__init__(problem, kwargs)
self.epoch = self.validator.check_int("epoch", epoch, [1, 100000])
self.pop_size = self.validator.check_int("pop_size", pop_size, [10, 10000])
self.neighbour_size = self.validator.check_int("neighbour_size", neighbour_size, [2, self.pop_size])
self.nfe_per_epoch = self.pop_size
self.sort_flag = False
def evolve(self, epoch):
"""
The main operations (equations) of algorithm. Inherit from Optimizer class
Args:
epoch (int): The current iteration
"""
self.nfe_per_epoch = self.neighbour_size
step_size = np.mean(self.problem.ub - self.problem.lb) * np.exp(-2 * (epoch + 1) / self.epoch)
pop_neighbours = []
for i in range(0, self.neighbour_size):
pos_new = self.g_best[self.ID_POS] + np.random.normal(0, 1, self.problem.n_dims) * step_size
pos_new = self.amend_position(pos_new, self.problem.lb, self.problem.ub)
pop_neighbours.append([pos_new, None])
self.pop = self.update_target_wrapper_population(pop_neighbours)
class BaseHC(OriginalHC):
"""
My changed version of: Swarm-based Hill Climbing (S-HC)
Notes
~~~~~
+ Based on swarm-of people are trying to climb on the mountain idea
+ The number of neighbour solutions are equal to population size
+ The step size to calculate neighbour is randomized and based on rank of solution.
+ The guys near on top of mountain will move slower than the guys on bottom of mountain.
+ Imagination: exploration when far from global best, and exploitation when near global best
+ Who on top of mountain first will be the winner. (global optimal)
Hyper-parameters should fine tuned in approximate range to get faster convergence toward the global optimum:
+ neighbour_size (int): [pop_size/2, pop_size], fixed parameter, sensitive exploitation parameter, Default: 50
Examples
~~~~~~~~
>>> import numpy as np
>>> from mealpy.math_based.HC import BaseHC
>>>
>>> def fitness_function(solution):
>>> return np.sum(solution**2)
>>>
>>> problem_dict1 = {
>>> "fit_func": fitness_function,
>>> "lb": [-10, -15, -4, -2, -8],
>>> "ub": [10, 15, 12, 8, 20],
>>> "minmax": "min",
>>> }
>>>
>>> epoch = 1000
>>> pop_size = 50
>>> neighbour_size = 50
>>> model = BaseHC(problem_dict1, epoch, pop_size, neighbour_size)
>>> best_position, best_fitness = model.solve()
>>> print(f"Solution: {best_position}, Fitness: {best_fitness}")
"""
def __init__(self, problem, epoch=10000, pop_size=100, neighbour_size=50, **kwargs):
"""
Args:
epoch (int): maximum number of iterations, default = 10000
pop_size (int): number of population size, default = 100
neighbour_size (int): fixed parameter, sensitive exploitation parameter, Default: 50
"""
super().__init__(problem, epoch, pop_size, neighbour_size, **kwargs)
self.nfe_per_epoch = self.pop_size
self.sort_flag = True
def evolve(self, epoch):
"""
Args:
epoch (int): The current iteration
"""
ranks = np.array(list(range(1, self.pop_size + 1)))
ranks = ranks / sum(ranks)
step_size = np.mean(self.problem.ub - self.problem.lb) * | np.exp(-2 * (epoch + 1) / self.epoch) | numpy.exp |
"""
* Copyright (c) 2021, NVIDIA CORPORATION.
*
* Licensed under the Apache License, Version 2.0 (the "License");
* you may not use this file except in compliance with the License.
* You may obtain a copy of the License at
*
* http://www.apache.org/licenses/LICENSE-2.0
*
* Unless required by applicable law or agreed to in writing, software
* distributed under the License is distributed on an "AS IS" BASIS,
* WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
* See the License for the specific language governing permissions and
* limitations under the License.
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import sys
sys.path.append("./python")
sys.path.append("./performance_profile")
import txt2tfrecord as utils
import tensorflow as tf
import hugectr_tf_ops
from model import OriginalEmbedding
from read_data import create_dataset, CreateDataset
import argparse
import logging
import time
import numpy as np
tf.debugging.set_log_device_placement(False)
devices = tf.config.list_physical_devices("GPU")
for dev in devices:
tf.config.experimental.set_memory_growth(dev, True)
cols = [utils.idx2key(idx, False) for idx in range(0, utils.NUM_TOTAL_COLUMNS)]
feature_desc = dict()
for col in cols:
if col == 'label' or col.startswith("I"):
feature_desc[col] = tf.io.FixedLenFeature([], tf.int64) # scaler
else:
feature_desc[col] = tf.io.FixedLenFeature([1], tf.int64) # [slot_num, nnz]
def Convert_to_csr_test(batch_size, gpu_count, embedding_type, iterations=10):
def _plugin_CPU_op_VS_tf_ops():
"""
Compare the result of converting to CSR between plugin CPU ops and tf ops.
"""
print("[INFO]: converting to CSR, plugin CPU vs tf ops..")
dataset_names = ['./performance_profile/train.tfrecord']
dataset_cpu = create_dataset(dataset_names=dataset_names,
feature_desc=feature_desc,
batch_size=batch_size,
n_epochs=1,
distribute_keys=True,
gpu_count=gpu_count,
embedding_type=embedding_type,
use_which_device='cpu')
dataset_tf = CreateDataset(dataset_names=dataset_names,
feature_desc=feature_desc,
batch_size=batch_size,
n_epochs=1,
slot_num=26,
max_nnz=1,
convert_to_csr=True,
gpu_count=gpu_count,
embedding_type=embedding_type)()
dataset_cpu = iter(dataset_cpu)
dataset_tf = iter(dataset_tf)
for iter_i in range(iterations):
row_offsets_cpu, value_tensor_cpu, nnz_array_cpu = next(dataset_cpu)[2:5]
row_offsets_tf, value_tensor_tf, nnz_array_tf = next(dataset_tf)[2:5]
try:
tf.debugging.assert_equal(row_offsets_cpu[:, 0:row_offsets_tf.shape[1]], row_offsets_tf)
tf.debugging.assert_equal(value_tensor_cpu[:, 0:value_tensor_tf.shape[1]], value_tensor_tf)
tf.debugging.assert_equal(nnz_array_cpu, nnz_array_tf)
except tf.errors.InvalidArgumentError as error:
raise RuntimeError("Error in %s, gpu_count %d, batch_size %d." %(embedding_type, gpu_count, batch_size),
error.message)
print("[INFO]: For %s and gpu_count: %d, batch_size: %d, iteration: %d results is the same."
%(embedding_type, gpu_count, batch_size, iter_i))
def _plugin_GPU_op_VS_tf_ops():
"""
Compare the result of converting to CSR between plugin GPU ops and tf ops.
"""
print("[INFO]: converting to CSR, plugin GPU vs tf ops..")
dataset_names = ['./performance_profile/train.tfrecord']
dataset_gpu = create_dataset(dataset_names=dataset_names,
feature_desc=feature_desc,
batch_size=batch_size,
n_epochs=1,
distribute_keys=True,
gpu_count=gpu_count,
embedding_type=embedding_type,
use_which_device='gpu')
dataset_tf = CreateDataset(dataset_names=dataset_names,
feature_desc=feature_desc,
batch_size=batch_size,
n_epochs=1,
slot_num=26,
max_nnz=1,
convert_to_csr=True,
gpu_count=gpu_count,
embedding_type=embedding_type)()
dataset_gpu = iter(dataset_gpu)
dataset_tf = iter(dataset_tf)
for iter_i in range(iterations):
row_indices, values, nnz_array_gpu = next(dataset_gpu)[2:5]
row_offsets_gpu, value_tensor_gpu, nnz_array_gpu = hugectr_tf_ops.distribute_keys_gpu(row_indices=row_indices,
values=values,
embedding_name='hugectr_embedding',
embedding_type=embedding_type,
batch_size=batch_size,
slot_num=26,
gpu_count=gpu_count,
max_nnz=1)
row_offsets_tf, value_tensor_tf, nnz_array_tf = next(dataset_tf)[2:5]
try:
tf.debugging.assert_equal(row_offsets_gpu[:, 0:row_offsets_tf.shape[1]], row_offsets_tf)
tf.debugging.assert_equal(value_tensor_gpu[:, 0:value_tensor_tf.shape[1]], value_tensor_tf)
tf.debugging.assert_equal(nnz_array_gpu, nnz_array_tf)
except tf.errors.InvalidArgumentError as error:
raise RuntimeError("Error in %s, gpu_count %d, batch_size %d." %(embedding_type, gpu_count, batch_size),
error.message)
print("[INFO]: For %s and gpu_count: %d, batch_size: %d, iteration: %d results is the same."
%(embedding_type, gpu_count, batch_size, iter_i))
hugectr_tf_ops.reset()
_plugin_CPU_op_VS_tf_ops()
_plugin_GPU_op_VS_tf_ops()
def Embedding_ops_test(vocabulary_size, slot_num, max_nnz, embedding_vec_size, batch_size, gpus, embedding_type):
"""
test forward propagation result with tf embedding layer.
And do backward, then check forward propagation again.
"""
tf.keras.backend.clear_session()
def _fprop_VS_tf():
print("[INFO]: Testing fprop vs tf...")
if vocabulary_size < slot_num:
raise RuntimeError("vocabulary_size must > slot.")
with tf.GradientTape(persistent=True) as tape:
# initial embedding table
init_value = np.float32(np.random.normal(loc=0, scale=1, size=(vocabulary_size, embedding_vec_size)))
# input keys
# TODO: Keys in different slots should be unique.
input_keys = np.ones(shape=(batch_size, slot_num, max_nnz), dtype=np.int64) * -1
each_slot = vocabulary_size // slot_num
nnz_0_num = 0
for batch_id in range(batch_size):
for slot_id in range(slot_num):
nnz = np.random.randint(low=nnz_0_num, high=max_nnz+1, size=1)[0] # how many keys in this slot
if nnz == 0:
nnz_0_num = 1
if (embedding_type == 'distributed'):
keys = np.random.randint(low=slot_id * each_slot, high=(slot_id + 1) * each_slot, size=nnz)
elif (embedding_type == "localized"):
# TODO: key should belong to that slot.
keys = []
while len(keys) < nnz:
key = np.random.randint(low=slot_id * each_slot, high=(slot_id + 1) * each_slot, size=1)
if key % slot_num == slot_id:
keys.append(key)
input_keys[batch_id, slot_id, 0:nnz] = keys
# hugectr ops
hugectr_tf_ops.init(visiable_gpus=gpus, key_type='int64', value_type='float',
batch_size=batch_size, batch_size_eval=len(gpus))
embedding_name = hugectr_tf_ops.create_embedding(init_value=init_value, opt_hparams=[0.1, 0.9, 0.99, 1e-5],
name_='hugectr_embedding', max_vocabulary_size_per_gpu= (vocabulary_size // len(gpus))* 2 + 1,
slot_num=slot_num, embedding_vec_size=embedding_vec_size,
max_feature_num=slot_num*max_nnz, embedding_type=embedding_type,
max_nnz=max_nnz, update_type='Global')
indices = tf.where(input_keys != -1)
values = tf.gather_nd(input_keys, indices)
bp_trigger = tf.Variable(initial_value=1.0, trainable=True, dtype=tf.float32)
hugectr_forward = hugectr_tf_ops.fprop(embedding_name=embedding_name, sparse_indices=indices, values=values,
dense_shape=input_keys.shape, output_type=tf.float32, is_training=True,
bp_trigger=bp_trigger)
# print("hugectr_results=\n", hugectr_forward)
# tf ops
reshape_input_keys = np.reshape(input_keys, [-1, max_nnz])
tf_indices = tf.where(reshape_input_keys != -1)
tf_values = tf.gather_nd(reshape_input_keys, tf_indices)
sparse_tensor = tf.sparse.SparseTensor(tf_indices, tf_values, reshape_input_keys.shape)
# FIXME: if there are too more nnz=0 slots, tf.nn.embedding_lookup_sparse may get wrong results?
tf_embedding_layer = OriginalEmbedding(vocabulary_size=vocabulary_size,
embedding_vec_size=embedding_vec_size,
initializer=init_value,
combiner='sum',
gpus=gpus)
tf_forward = tf_embedding_layer(sparse_tensor, output_shape=[batch_size, slot_num, embedding_vec_size])
# print("tf_results=\n", tf_forward)
# compare first forward result
try:
tf.debugging.assert_near(hugectr_forward, tf_forward)
except tf.errors.InvalidArgumentError as error:
raise error
print("[INFO]: The results from HugeCTR and tf in the first forward propagation are the same.")
# backward
hugectr_grads = tape.gradient(hugectr_forward, bp_trigger)
tf_opt = tf.keras.optimizers.Adam(learning_rate=0.1, beta_1=0.9, beta_2=0.99, epsilon=1e-5)
tf_grads = tape.gradient(tf_forward, tf_embedding_layer.trainable_weights)
tf_opt.apply_gradients(zip(tf_grads, tf_embedding_layer.trainable_weights))
# compare second forward result
hugectr_forward_2 = hugectr_tf_ops.fprop(embedding_name=embedding_name, sparse_indices=indices, values=values,
dense_shape=input_keys.shape, output_type=tf.float32, is_training=True,
bp_trigger=bp_trigger)
tf_forward_2 = tf_embedding_layer(sparse_tensor, output_shape=[batch_size, slot_num, embedding_vec_size])
# print("hugectr 2:\n", hugectr_forward_2)
# print("tf 2:\n", tf_forward_2)
try:
tf.debugging.assert_near(hugectr_forward_2, tf_forward_2, rtol=1e-4, atol=1e-5)
except tf.errors.InvalidArgumentError as error:
raise error
print("[INFO]: The results from HugeCTR and tf in the second forward propagation are the same.")
hugectr_tf_ops.reset()
def _fprop_v3_VS_tf():
print("[INFO]: Testing fprop_v3 vs tf...")
if vocabulary_size < slot_num:
raise RuntimeError("vocabulary_size must > slot.")
with tf.GradientTape(persistent=True) as tape:
# initial embedding table
init_value = np.float32(np.random.normal(loc=0, scale=1, size=(vocabulary_size, embedding_vec_size)))
# input keys
# TODO: Keys in different slots should be unique.
input_keys = np.ones(shape=(batch_size, slot_num, max_nnz), dtype=np.int64) * -1
each_slot = vocabulary_size // slot_num
nnz_0_num = 0
for batch_id in range(batch_size):
for slot_id in range(slot_num):
nnz = np.random.randint(low=nnz_0_num, high=max_nnz+1, size=1)[0] # how many keys in this slot
if nnz == 0:
nnz_0_num = 1
if (embedding_type == 'distributed'):
keys = np.random.randint(low=slot_id * each_slot, high=(slot_id + 1) * each_slot, size=nnz)
elif (embedding_type == "localized"):
keys = []
while len(keys) < nnz:
key = np.random.randint(low=slot_id * each_slot, high=(slot_id + 1) * each_slot, size=1)
if key % slot_num == slot_id:
keys.append(key)
input_keys[batch_id, slot_id, 0:nnz] = keys
# hugectr ops
hugectr_tf_ops.init(visiable_gpus=gpus, key_type='int64', value_type='float',
batch_size=batch_size, batch_size_eval=len(gpus))
embedding_name = hugectr_tf_ops.create_embedding(init_value=init_value, opt_hparams=[0.1, 0.9, 0.99, 1e-5],
name_='hugectr_embedding', max_vocabulary_size_per_gpu= (vocabulary_size // len(gpus))* 2 + 1,
slot_num=slot_num, embedding_vec_size=embedding_vec_size,
max_feature_num=slot_num*max_nnz, embedding_type=embedding_type,
max_nnz=max_nnz, update_type='Global')
# use CreateDataset to do preprocessing
dataset_utils = CreateDataset(dataset_names=None,
feature_desc=None,
batch_size=batch_size,
n_epochs=1,
slot_num=slot_num,
max_nnz=max_nnz,
convert_to_csr=None,
gpu_count=len(gpus),
embedding_type=embedding_type,
get_row_indices=None)
if ("distributed" == embedding_type):
row_offsets, value_tensor, nnz_array = dataset_utils._distribute_keys_for_distributed(input_keys)
elif ("localized" == embedding_type):
row_offsets, value_tensor, nnz_array = dataset_utils._distribute_keys_for_localized(input_keys)
else:
raise RuntimeError("Not supported embedding_type %s" %embedding_type)
bp_trigger = tf.Variable(initial_value=1.0, trainable=True, dtype=tf.float32)
hugectr_forward = hugectr_tf_ops.fprop_v3(embedding_name=embedding_name, row_offsets=row_offsets,
value_tensors=value_tensor, nnz_array=nnz_array,
bp_trigger=bp_trigger, is_training=True,
output_shape=[batch_size, slot_num, max_nnz])
# print("hugectr_results=\n", hugectr_forward)
# tf ops
reshape_input_keys = | np.reshape(input_keys, [-1, max_nnz]) | numpy.reshape |
import pandas as pd
import numpy as np
import os
def interpolation_fun(dir_path):
set_num = 1
while set_num <= 3:
score_A = 0
score_B = 0
while score_A < 22:
interpolate_table = []
if score_B == 21:
score_A = score_A + 1
score_B = 0
else:
score_B = score_B + 1
path = dir_path + str(set_num)+"_"+(str(score_A)).zfill(2)+"_"+(str(score_B)).zfill(2)+".csv"
if os.path.isfile(path) == False:
continue
df = pd.read_csv(path)
df = df.fillna(0)
x = df['X'].tolist()
y = df['Y'].tolist()
vis = df['Visibility'].tolist()
# Define distance
pre_dif = []
for i in range(0,len(x)):
if i ==0:
pre_dif.append(0)
else:
pre_dif.append(((x[i]-x[i-1])**2+(y[i]-y[i-1])**2)**(1/2))
abnormal = [0]*len(pre_dif)
X_abn = x
y_abn = y
dif_error = 2
for i in range(len(pre_dif)):
if i==len(pre_dif):
abnormal[i]=0
elif i==len(pre_dif)-1:
abnormal[i]=0
elif i==len(pre_dif)-2:
abnormal[i]=0
elif i==len(pre_dif)-3:
abnormal[i]=0
elif pre_dif[i] >= 100 and pre_dif[i+1] >= 100:
if vis[i:i+2] == [1,1]:# and series[i:i+2] == [1,1]:
abnormal[i] ='bias1'
X_abn[i] = 0
y_abn[i] = 0
elif pre_dif[i] >= 100 and pre_dif[i+2] >= 100:
if pre_dif[i+1]<dif_error:
if vis[i:i+3] == [1,1,1]:# and series[i:i+3] == [1,1,1]:
abnormal[i:i+2]=['bias2','bias2']
X_abn[i:i+2] = [0,0]
y_abn[i:i+2] = [0,0]
elif pre_dif[i] >= 100 and pre_dif[i+3] >= 100:
if pre_dif[i+1]<dif_error and pre_dif[i+2]<dif_error:
if vis[i:i+4] == [1,1,1,1]:# and series[i:i+4] == [1,1,1,1]:
abnormal[i:i+3]=['bias3','bias3','bias3']
X_abn[i:i+3] = [0,0,0]
y_abn[i:i+3] = [0,0,0]
elif( i+4 > len(pre_dif)):
if pre_dif[i] >= 100 and pre_dif[i+4] >= 100:
if pre_dif[i+1]<dif_error and pre_dif[i+2]<dif_error and pre_dif[i+3]<dif_error:
if vis[i:i+5] == [1,1,1,1,1]:# and series[i:i+5] == [1,1,1,1,1]:
abnormal[i:i+4]=['bias4','bias4','bias4','bias4']
X_abn[i:i+4] = [0,0,0,0]
y_abn[i:i+4] = [0,0,0,0]
# # II. Poly line check
x_test = X_abn
y_test = y_abn
vis2 = [1] * len(df)
for i in range(len(df)):
if x_test[i] ==0 and y_test[i] ==0:
vis2[i] = 0
fuc2 = [0]*len(df)
fuc1 = [0]*len(df)
fuc0 = [0]*len(df)
x_ck_bf = [0]*len(df)
y_ck_bf = [0]*len(df)
bf_dis = [0]*len(df)
x_ck_af = [0]*len(df)
y_ck_af = [0]*len(df)
af_dis = [0]*len(df)
for i in range(1,len(df)-7):
if sum(vis2[i:i+7])>=3:
vis_window = np.array(vis2[i:i+7])
loc = np.where(vis_window==1)
for k in loc:
x_ar = np.array(x_test)[i+k]
y_ar = np.array(y_test)[i+k]
f1 = np.polyfit(x_ar, y_ar, 2)
p1 = np.poly1d(f1)
fuc2[i]=f1[0]
fuc1[i]=f1[1]
fuc0[i]=f1[2]
if vis[i+7]==1:
y_check_af=p1(x_test[i+7])
x_ck_af[i+7]=x_test[i+7]
y_ck_af[i+7]=y_check_af
af_dis[i+7]=abs(y_check_af-y_test[i+7])
elif vis[i+7]==0:
x_ck_af[i+7]='NA'
y_ck_af[i+7]='NA'
if vis[i-1]==1:
y_check_bf=p1(x_test[i-1])
x_ck_bf[i-1]=x_test[i-1]
y_ck_bf[i-1]=y_check_bf
bf_dis[i-1]=abs(y_check_bf-y_test[i-1])
elif vis[i-1]==0:
x_ck_bf[i-1]='NA'
y_ck_bf[i-1]='NA'
# # III. 2nd Denoise
x_test_2nd = X_abn
y_test_2nd = y_abn
abnormal2 = abnormal
for i in range(len(df)):
if af_dis[i]>100 and vis2[i]==1:
if bf_dis[i]>100 and vis2[i]==1:
x_test_2nd[i]=0
y_test_2nd[i]=0
abnormal2[i]='2bias1'
elif i+1<len(bf_dis) and bf_dis[i+1]>100 and vis2[i+1]==1:
if af_dis[i+1]<100:
x_test_2nd[i:i+2]=[0,0]
y_test_2nd[i:i+2]=[0,0]
abnormal2[i:i+2]=['2bias2','2bias2']
elif i+2<len(bf_dis) and bf_dis[i+2]>100 and vis2[i+1:i+3]==[1,1]:
if af_dis[i+1]<100 and af_dis[i+2]<100:
x_test_2nd[i:i+3]=[0,0,0]
y_test_2nd[i:i+3]=[0,0,0]
abnormal2[i:i+3]=['2bias3','2bias3','2bias3']
elif i+3<len(bf_dis) and bf_dis[i+3]>100 and vis2[i+1:i+4]==[1,1,1]:
if af_dis[i+1]<100 and af_dis[i+2]<100 and af_dis[i+3]<100:
x_test_2nd[i:i+4]=[0,0,0,0]
y_test_2nd[i:i+4]=[0,0,0,0]
abnormal2[i:i+4]=['2bias4','2bias4','2bias4','2bias4']
elif i+4<len(bf_dis) and bf_dis[i+4]>100 and vis2[i+1:i+5]==[1,1,1,1]:
if af_dis[i+1]<100 and af_dis[i+2]<100 and af_dis[i+3]<100 and af_dis[i+4]<100:
x_test_2nd[i:i+5]=[0,0,0,0,0]
y_test_2nd[i:i+5]=[0,0,0,0,0]
abnormal2[i:i+5]=['2bias5','2bias5','2bias5','2bias5','2bias5']
elif i+5<len(bf_dis) and bf_dis[i+5]>100 and vis2[i+1:i+6]==[1,1,1,1,1]:
if af_dis[i+1]<100 and af_dis[i+2]<100 and af_dis[i+3]<100 and af_dis[i+4]<100 and af_dis[i+5]<100:
x_test_2nd[i:i+6]=[0,0,0,0,0,0]
y_test_2nd[i:i+6]=[0,0,0,0,0,0]
abnormal2[i:i+6]=['2bias6','2bias6','2bias6','2bias6','2bias6','2bias6']
elif af_dis[i]>1000 and vis2[i]==1:
x_test_2nd[i]=0
y_test_2nd[i]=0
abnormal2[i]='2bias1'
elif bf_dis[i]>1000 and vis2[i]==1:
x_test_2nd[i]=0
y_test_2nd[i]=0
abnormal2[i]='2bias1'
# # IV. Compensate
vis3 = [1] * len(df)
for i in range(len(df)):
if x_test_2nd[i] ==0 and y_test_2nd[i] ==0:
vis3[i] = 0
f2 = fuc2
f1 = fuc1
f0 = fuc0
x_sm = x_test_2nd
y_sm = y_test_2nd
comp_ft = [0] * len(df)
comp_bk = [0] * len(df)
for i in range(len(vis3)):
if af_dis[i]!=0 and bf_dis[i]!=0 and af_dis[i]<5 and bf_dis[i]<5:
if sum(vis3[i-7:i])!=7: # front side compensate
#print(vis3[i-7:i])
for k in range(5):
if vis3[i-7+k:i-4+k]==[1,0,1]:
x_ev = (x_sm[i-7+k]+x_sm[i-5+k])/2
y_ev = f2[i-7]*x_ev*x_ev + f1[i-7]*x_ev + f0[i-7]
x_sm[i-6+k]=x_ev
y_sm[i-6+k]=y_ev
vis3[i-7+k:i-4+k]=[1,1,1]
for k in range(4):
if vis3[i-7+k:i-3+k]==[1,0,0,1]:
for j in range(1,3):
x_ev = ((x_sm[i-4+k]-x_sm[i-7+k])/3)*j+x_sm[i-7+k]
y_ev = f2[i-7]*x_ev*x_ev + f1[i-7]*x_ev + f0[i-7]
x_sm[i-7+k+j]=x_ev
y_sm[i-7+k+j]=y_ev
vis3[i-7+k:i-3+k]=[1,1,1,1]
for k in range(3):
if vis3[i-7+k:i-2+k]==[1,0,0,0,1]:
for j in range(1,4):
x_ev = ((x_sm[i-3+k]-x_sm[i-7+k])/4)*j+x_sm[i-7+k]
y_ev = f2[i-7]*x_ev*x_ev + f1[i-7]*x_ev + f0[i-7]
x_sm[i-7+k+j]=x_ev
y_sm[i-7+k+j]=y_ev
vis3[i-7+k:i-2+k]=[1,1,1,1,1]
for k in range(2):
if vis3[i-7+k:i-1+k]==[1,0,0,0,0,1]:
for j in range(1,5):
x_ev = ((x_sm[i-2+k]-x_sm[i-7+k])/5)*j+x_sm[i-7+k]
y_ev = f2[i-7]*x_ev*x_ev + f1[i-7]*x_ev + f0[i-7]
x_sm[i-7+k+j]=x_ev
y_sm[i-7+k+j]=y_ev
vis3[i-7+k:i-1+k]=[1,1,1,1,1,1]
for k in range(1):
if vis3[i-7+k:i+k]==[1,0,0,0,0,0,1]:
for j in range(1,6):
x_ev = ((x_sm[i-1+k]-x_sm[i-7+k])/6)*j+x_sm[i-7+k]
y_ev = f2[i-7]*x_ev*x_ev + f1[i-7]*x_ev + f0[i-7]
x_sm[i-7+k+j]=x_ev
y_sm[i-7+k+j]=y_ev
vis3[i-7+k:i+k]=[1,1,1,1,1,1,1]
if sum(vis3[i+1:i+8])!=7: # back side compensate
#print(vis3[i+1:i+8])
for k in range(5):
if vis3[i+1+k:i+4+k]==[1,0,1]:
x_ev = (x_sm[i+1+k]+x_sm[i+3+k])/2
y_ev = f2[i+1]*x_ev*x_ev+f1[i+1]*x_ev+f0[i+1]
x_sm[i+2+k]=x_ev
y_sm[i+2+k]=y_ev
vis3[i+1+k:i+4+k]=[1,1,1]
for k in range(4):
if vis3[i+1+k:i+5+k]==[1,0,0,1]:
for j in range(1,3):
x_ev = ((x_sm[i+4+k]-x_sm[i+1+k])/3)*j+x_sm[i+1+k]
y_ev = f2[i+1]*x_ev*x_ev+f1[i+1]*x_ev+f0[i+1]
x_sm[i+1+k+j]=x_ev
y_sm[i+1+k+j]=y_ev
vis3[i+1+k:i+5+k]=[1,1,1,1]
for k in range(3):
if vis3[i+1+k:i+6+k]==[1,0,0,0,1]:
for j in range(1,4):
x_ev = ((x_sm[i+5+k]-x_sm[i+1+k])/4)*j+x_sm[i+1+k]
y_ev = f2[i+1]*x_ev*x_ev+f1[i+1]*x_ev+f0[i+1]
x_sm[i+1+k+j]=x_ev
y_sm[i+1+k+j]=y_ev
vis3[i+1+k:i+6+k]=[1,1,1,1,1]
for k in range(2):
if vis3[i+1+k:i+7+k]==[1,0,0,0,0,1]:
for j in range(1,5):
x_ev = ((x_sm[i+6+k]-x_sm[i+1+k])/5)*j+x_sm[i+1+k]
y_ev = f2[i+1]*x_ev*x_ev+f1[i+1]*x_ev+f0[i+1]
x_sm[i+1+k+j]=x_ev
y_sm[i+1+k+j]=y_ev
vis3[i+1+k:i+7+k]=[1,1,1,1,1,1]
for k in range(1):
if vis3[i+1+k:i+8+k]==[1,0,0,0,0,0,1]:
for j in range(1,5):
x_ev = ((x_sm[i+7+k]-x_sm[i+1+k])/6)*j+x_sm[i+1+k]
y_ev = f2[i+1]*x_ev*x_ev+f1[i+1]*x_ev+f0[i+1]
x_sm[i+1+k+j]=x_ev
y_sm[i+1+k+j]=y_ev
vis3[i+1+k:i+8+k]=[1,1,1,1,1,1,1]
# # V. 2nd Compensate
vis4 = [1] * len(df)
for i in range(len(df)):
if x_sm[i] ==0 and y_sm[i] ==0:
vis4[i] = 0
mis1 = []
mis2 = []
mis3 = []
mis4 = []
mis5 = []
for i in range(len(vis4)):
if i == 0:
mis1.append(0)
elif vis4[i-1:i+2] == [1,0,1]:
mis1.append(1)
elif i == len(vis4):
mis1.append(0)
else:
mis1.append(0)
for i in range(len(vis4)):
if i == 0:
mis2.append(0)
elif vis4[i-1:i+3] == [1,0,0,1]:
mis2.append(1)
elif i == len(vis4)-1:
mis2.append(0)
elif i == len(vis4):
mis2.append(0)
else:
mis2.append(0)
for i in range(len(vis4)):
if i == 0:
mis3.append(0)
elif vis4[i-1:i+4] == [1,0,0,0,1]:
mis3.append(1)
elif i == len(vis4)-2:
mis3.append(0)
elif i == len(vis4)-1:
mis3.append(0)
elif i == len(vis4):
mis3.append(0)
else:
mis3.append(0)
for i in range(len(vis4)):
if i == 0:
mis4.append(0)
elif vis4[i-1:i+5] == [1,0,0,0,0,1]:
mis4.append(1)
elif i == len(vis4)-3:
mis4.append(0)
elif i == len(vis4)-2:
mis4.append(0)
elif i == len(vis4)-1:
mis4.append(0)
elif i == len(vis4):
mis4.append(0)
else:
mis4.append(0)
for i in range(len(vis4)):
if i == 0:
mis5.append(0)
elif vis4[i-1:i+6] == [1,0,0,0,0,0,1]:
mis5.append(1)
elif i == len(vis4)-4:
mis5.append(0)
elif i == len(vis4)-3:
mis5.append(0)
elif i == len(vis4)-2:
mis5.append(0)
elif i == len(vis4)-1:
mis5.append(0)
elif i == len(vis4):
mis5.append(0)
else:
mis5.append(0)
x_sm2 = x_sm
y_sm2 = y_sm
mis1_X = []
mis1_y = []
for i in range(len(mis1)):
if i == 0 or i == 1 or i ==2 or i ==len(mis1) or i ==len(mis1)-1 or i ==len(mis1)-2:
mis1_X.append(x_sm2[i])
mis1_y.append(y_sm2[i])
elif mis1[i] == 0:
mis1_X.append(x_sm2[i])
mis1_y.append(y_sm2[i])
elif mis1[i] ==1:
miss_point = i
num_X = [x_sm2[miss_point-1],x_sm2[miss_point+1]]
num_y = [y_sm2[miss_point-1],y_sm2[miss_point+1]]
x_mis1 = np.array(num_X)
y_mis1 = np.array(num_y)
f1 = | np.polyfit(x_mis1, y_mis1, 1) | numpy.polyfit |
#!/usr/bin/python
"""
pytacs - The Python wrapper for the TACS solver
This python interface is designed to provide a easier interface to the
c-layer of TACS. It combines all the functionality of the old pyTACS
and pyTACS_Mesh. User-supplied hooks allow for nearly complete
customization of any or all parts of the problem setup. There are two
main parts of this module: The first deals with setting up the TACS
problem including reading the mesh, setting design variables,
functions, constraints etc (Functionality in the former
pyTACS_Mesh). The second part deals with solution of the structural
analysis and gradient computations.
Copyright (c) 2013 by Dr. <NAME>
All rights reserved. Not to be used for commercial purposes.
Developers:
-----------
- Dr. <NAME> (GKK)
History
-------
v. 1.0 - pyTACS initial implementation
"""
# =============================================================================
# Imports
# =============================================================================
from __future__ import print_function
import copy
import os
import numbers
import numpy
import time
import numpy as np
from mpi4py import MPI
import warnings
import tacs.TACS, tacs.constitutive, tacs.elements, tacs.functions, tacs.problems.static
from tacs.pymeshloader import pyMeshLoader
DEG2RAD = np.pi / 180.0
warnings.simplefilter('default')
try:
from collections import OrderedDict
except ImportError:
try:
from ordereddict import OrderedDict
except ImportError:
print("Could not find any OrderedDict class. "
"For python 2.6 and earlier, use:"
"\n pip install ordereddict")
class pyTACS(object):
def __init__(self, fileName, comm=None, dvNum=0,
scaleList=None, **kwargs):
"""
The class for working with a TACS structure
Parameters
----------
fileName : str
The filename of the BDF file to load.
comm : MPI Intracomm
The comm object on which to create the pyTACS object.
dvNum : int
An user supplied offset to the design variable
numbering. This is typically used with tacs+tripan when
geometric variables have already been added and assigned
global tacs numberings.
scaleList: list
when dvNum is non zero, the scaleList must be same size
as the number of design variables already added. i.e.
len(scaleList) = dvNum
"""
startTime = time.time()
# Default Option List
defOpts = {
'probname': [str, 'defaultName'],
'outputdir': [str, './'],
# Solution Options
'solutionType': [str, 'linear'],
'KSMSolver': [str, 'GMRES'],
'orderingType': [str, 'ND'],
'PCFillLevel': [int, 1000],
'PCFillRatio': [float, 20.0],
'subSpaceSize': [int, 10],
'nRestarts': [int, 15],
'flexible': [int, 1],
'L2Convergence': [float, 1e-12],
'L2ConvergenceRel': [float, 1e-12],
'useMonitor': [bool, False],
'monitorFrequency': [int, 10],
'resNormUB': [float, 1e20],
# selectCompID Options
'projectVector': [list, [0.0, 1.0, 0.0]],
# Output Options
'outputElement': [int, None],
'writeBDF': [bool, False],
'writeSolution': [bool, True],
'writeConnectivity': [bool, True],
'writeNodes': [bool, True],
'writeDisplacements': [bool, True],
'writeStrains': [bool, True],
'writeStresses': [bool, True],
'writeExtras': [bool, True],
'writeCoordinateFrame': [bool, False],
'familySeparator': [str, '/'],
'numberSolutions': [bool, True],
'printTiming': [bool, False],
'printIterations': [bool, True],
'printDebug': [bool, False],
}
# Data type (real or complex)
self.dtype = tacs.TACS.dtype
# Set the communicator and rank -- defaults to MPI_COMM_WORLD
if comm is None:
comm = MPI.COMM_WORLD
self.comm = comm
self.rank = comm.rank
# Process the default options which are added to self.options
# under the 'defaults' key. Make sure the key are lower case
self.options = {}
def_keys = defOpts.keys()
self.options['defaults'] = {}
for key in def_keys:
self.options['defaults'][key.lower()] = defOpts[key]
self.options[key.lower()] = defOpts[key]
# Process the user-supplied options
koptions = kwargs.pop('options', {})
kopt_keys = koptions.keys()
for key in kopt_keys:
self.setOption(key, koptions[key])
importTime = time.time()
# Create and load mesh loader object.
debugFlag = self.getOption('printDebug')
self.meshLoader = pyMeshLoader(self.comm, self.dtype, debugFlag)
self.meshLoader.scanBdfFile(fileName)
self.bdfName = fileName
# Save pynastran bdf object
self.bdfInfo = self.meshLoader.getBDFInfo()
meshLoadTime = time.time()
# Retrieve the number of components. This is the maximum
# number of unique constitutive objects possible in this model.
self.nComp = self.meshLoader.getNumComponents()
# Load all the component descriptions
self.compDescripts = self.meshLoader.getComponentDescripts()
self.elemDescripts = self.meshLoader.getElementDescripts()
# Set the starting dvNum and scaleList
self.dvNum = dvNum
self.scaleList = scaleList
if scaleList is None:
self.scaleList = []
DVPreprocTime = time.time()
# List of DV groups
self.globalDVs = {}
self.compIDBounds = {}
self.addedCompIDs = set()
self.varName = 'struct'
self.coordName = 'Xpts'
self.curSP = None
self.doDamp = False
self._factorOnNext = True
self._PCfactorOnNext = False
# List of functions
self.functionList = OrderedDict()
self.adjointList = OrderedDict()
self.dIduList = OrderedDict()
self.dvSensList = OrderedDict()
self.xptSensList = OrderedDict()
# List of initial coordinates
self.coords0 = None
# Variables per node for model
self.varsPerNode = None
# Norms
self.initNorm = 0.0
self.startNorm = 0.0
self.finalNorm = 0.0
# Flag for mat/vector creation
self._variablesCreated = False
# TACS assembler object
self.assembler = None
initFinishTime = time.time()
if self.getOption('printTiming'):
self.pp('+--------------------------------------------------+')
self.pp('|')
self.pp('| TACS Init Times:')
self.pp('|')
self.pp('| %-30s: %10.3f sec' % ('TACS Module Time', importTime - startTime))
self.pp('| %-30s: %10.3f sec' % ('TACS Meshload Time', meshLoadTime - importTime))
self.pp('| %-30s: %10.3f sec' % ('TACS DV Processing Time', DVPreprocTime - meshLoadTime))
self.pp('| %-30s: %10.3f sec' % ('TACS Finalize Initialization Time', initFinishTime - DVPreprocTime))
self.pp('|')
self.pp('| %-30s: %10.3f sec' % ('TACS Total Initialization Time', initFinishTime - startTime))
self.pp('+--------------------------------------------------+')
def addGlobalDV(self, descript, value,
lower=None, upper=None, scale=1.0):
"""
This function allows adding design variables that are not
cleanly associated with a particular constiutive object. One
example is the pitch of the stiffeners for blade stiffened
panels; It often is the same for many different constitutive
objects. By calling this function, the internal dvNum counter
is incremented and the user doesn\'t have to worry about
it.
Parameters
----------
descript : str
A user supplied string that can be used to retrieve the
variable number and value elemCallBackFunction.
value : float
Initial value for variable.
lower : float
Lower bound. May be None for unbounded
upper : float
Upper bound. May be None for unbounded
scale : float
Scale factor for variable
Returns
-------
None, but the information is provided to the user in the
elemCallBack function
"""
self.globalDVs[descript] = {'num': self.dvNum,
'value': value,
'lowerBound': lower,
'upperBound': upper}
self.dvNum += 1
self.scaleList.append(scale)
def selectCompIDs(self, include=None, exclude=None,
includeBounds=None, nGroup=1, includeOp='or',
excludeOp='or', projectVector=None, **kwargs):
"""
This is the most important function of the entire setup
process. The basic idea is as follow: We have a list of nComp
which are the component descriptions. What we need is a way of
generating subgroups of these for the purposes of adding
design variables, constitutive objects, KS domains and mass
domains. All of these operations boil down to selecting a
subset of the compIDs.
This function attemps to support as many ways as possible to
select parts of the structure. Easy and efficient selection of
parts is critical to the end user.
Methods of selction:
1. include, integer, string, list of integers and/or strings: The
simpliest and most direct way of selecting a component. The
user supplies the index of the componentID, a name or partial
name, or a list of a combination of both.
For exammple::
# Select the 11th component
selectCompIDs(include=10)
# Select the first and fifth component
selectCompIDs(include=[0, 4])
# Select any component containing 'rib.00'
selectCompIDs(include='rib.00')
# Select any components containg 'rib.00' and 'rib.10'
selectCompIDs(include=['rib.00', 'rib.10'])
# Select any componet containing 'rib.00', the 11th
# component and any component containing 'spar'
# (This is probably not advisable!)
selectCompIDs(include=['rib.00', 10, 'spar'])
2. Exclude, operates similarally to 'include'. The behaviour
of exclude is identical to include above, except that
component ID's that are found using 'exclude' are
'subtracted' from those found using include. A special
case is treated if 'include' is NOT given: if only an
exclude list is given, this implies the selection of all
compID's EXCEPT the those in exclude.
For example::
# This will return will [0, 1, 2, 3, 5, ..., nComp-1]
selectCompIDs(exclude = 4)
# This will return [0, 1, 4, 5, ..., nComp-1]
selectCompIDs(exclude = [2, 3]) will return
# This will return components that have 'ribs' in the
# componet ID, but not those that have 'le_ribs' in the
# componet id.
selectCompIDs(include='ribs', exclude='le_ribs')
3. includeBounds, list of componets defining a region inside
of which 'include' components will be selected. This
functionality uses a geometric approach to select the compIDs.
All components within the project 2D convex hull are included.
Therefore it is essential to split up concave include regions
into smaller convex regions. Use multiple calls to selectCompIDs to
accumulate multiple regions.
For example::
# This will select upper skin components between the
# leading and trailing edge spars and between ribs 1 and 4.
selectCompIDs(include='U_SKIN', includeBound=
['LE_SPAR', 'TE_SPAR', 'RIB.01', 'RIB.04'])
4. nGroup: The number of groups to divde the found componets
into. Generally this will be 1. However, in certain cases, it
is convient to create multiple groups in one pass.
For example::
# This will 'evenly' create 10 groups on all components
# containing LE_SPAR. Note that once the componets are
# selected, they are sorted **alphetically** and assigned
# sequentially.
selectCompIDs(include='LE_SAPR', nGroup=10)
nGroup can also be negative. If it is negative, then a single
design variable group is added to each of the found
components.
For example::
# will select all components and assign a design variable
# group to each one.
selectCompIDs(nGroup=-1)
includeOp, str: 'and' or 'or'. Selects the logical operation
used for item in 'include' option. For example:
selectCompIDs(include=['LE_SPAR', 'TE_SPAR'],
includeOpt='or') will select the LE_SPAR and TE_SPAR
components (default behaviour).
selectCompIDs(include=['RIB', 'SEG.01'], includeOpt='and')
will select any componet with 'RIB' in the description AND
'SEG.01' in the description.
"""
# Defaults
includeIDs = numpy.arange(self.nComp)
excludeIDs = []
includeBoundIDs = None
if include is not None:
includeIDs = self._getCompIDs(includeOp, include)
if exclude is not None:
excludeIDs = self._getCompIDs(excludeOp, exclude)
iSet = set(includeIDs)
eSet = set(excludeIDs)
# First take the intersection of iSet and ibSet
if includeBoundIDs is not None:
tmp = iSet.intersection(set(includeBoundIDs))
else:
tmp = iSet
# Next take the difference between tmp and eSet
compIDs = tmp.difference(eSet)
# Convert back to a list:
compIDs = list(compIDs)
# If we only want a single group, we're done, otherwise, we
# have a bit more work to do...
if nGroup > 1:
# The user wants to have nGroups returned from compIDs.
# First check that nGroup <= len(compIDs), print warning
# and clip if not
if nGroup > len(compIDs):
TACSWarning('nGroup=%d is larger than the number of\
selected components=%d. nGroup will be clipped to %d' %
(nGroup, len(compIDs), nGroup), self.comm)
nGroup = len(compIDs)
# Pluck out the component descriptions again and we will
# sort them
compDescript = []
for i in range(len(compIDs)):
compDescript.append(self.compDescripts[compIDs[i]])
# define a general argsort
def argsort(seq):
return sorted(range(len(seq)), key=seq.__getitem__)
# ind is the index that would result in a sorted list.
ind = argsort(compDescript)
# Now simply divide 'ind' into 'nGroups' as evenly as
# possible, in the integer sense.
def split_list(alist, wanted_parts=1):
length = len(alist)
return [alist[i * length // wanted_parts:
(i + 1) * length // wanted_parts]
for i in range(wanted_parts)]
ind = split_list(ind, nGroup)
# Finally assemble the nested list of componet IDs
tmp = []
for i in range(len(ind)):
tmp.append([])
for j in range(len(ind[i])):
tmp[-1].append(compIDs[ind[i][j]])
compIDs = tmp
elif nGroup < 0:
# Negative number signifies 'add one dv to each component'
tmp = []
for comp in compIDs:
tmp.append([comp])
compIDs = tmp
else:
# Otherwise, just put the current list of compIDs in a
# list of length 1.
compIDs = [compIDs]
return compIDs
def addFunction(self, funcName, funcHandle, include=None, exclude=None,
includeBound=None, compIDs=None, **kwargs):
"""
Generic function to add a function for TACS. It is intended to
be reasonably generic since the user supplies the actual
function handle to use. The following functions can be used:
KSFailure, KSBuckling, MaxBuckling, AverageKSFailure,
MaxFailure, AverageMaxFailure, AverageKSBuckling,
StructuralMass, Compliance, AggregateDisplacement.
Parameters
----------
funcName : str
The user-supplied name for the function. This will
typically be a string that is meanful to the user
funcHandle : tacs.functions
The fucntion handle to use for creation. This must come
from the functions module in tacs.
include : varries
Argument passed to selctCompIDs. See this function for
more information
exclude : varries
Argument passed to selctCompIDs. See this function for
more information
compIDs: list
List of compIDs to select. Alternative to selectCompIDs
arguments.
"""
# First we will get the required domain, but only if both
# include is None and exclude is None. If so, just use the
# entire domain:
# Note nGroup is one since we only want exactly one domain
if compIDs is None:
compIDs = self.selectCompIDs(include, exclude, includeBound,
nGroup=1)[0]
# Flatten and get element numbers on each proc corresponding to specified compIDs
compIDs = self._flatten(compIDs)
elemIDs = self.meshLoader.getLocalElementIDsForComps(compIDs)
# We try to setup the function, if it fails it may not be implimented:
try:
# pass assembler an function-specific kwargs straight to tacs function
self.functionList[funcName] = funcHandle(self.assembler, **kwargs)
except:
TACSWarning("Function type %s is not currently supported "
"in pyTACS. Skipping function." % funcHandle, self.comm)
return
# Finally set the domain information
self.functionList[funcName].setDomain(elemIDs)
# Create additional tacs BVecs to hold adjoint and sens info
self.adjointList[funcName] = self.assembler.createVec()
self.dIduList[funcName] = self.assembler.createVec()
self.dvSensList[funcName] = self.assembler.createDesignVec()
self.xptSensList[funcName] = self.assembler.createNodeVec()
return compIDs
def getCompNames(self, compIDs):
"""
Return a list of component descriptions for the given component
IDs. compIDs should come from a call to selectCompIDs
Parameters
----------
compIDs : list
List of integers of the compIDs numbers
Returns
-------
compDescript : list
List of strings of the names of the corresponding compIDs
"""
compIDs = self._flatten(compIDs)
compDescripts = []
for i in range(len(compIDs)):
compDescripts.append(self.compDescripts[compIDs[i]])
return compDescripts
def getFunctionKeys(self):
"""Return a list of the current function key names"""
return list(self.functionList.keys())
def setStructProblem(self, structProblem):
"""Set the structProblem. This function can be called by the
user but typically will be called automatically by functions
that accept a structProblem object.
Parameters
----------
structProblem : instance of pyStruct_problem
Description of the sturctural problem to solve
"""
if structProblem is self.curSP:
return
if self.comm.rank == 0:
print('+' + '-' * 70 + '+')
print('| Switching to Struct Problem: %-39s|' % structProblem.name)
print('+' + '-' * 70 + '+')
try:
structProblem.tacsData
except AttributeError:
structProblem.tacsData = TACSLoadCase()
structProblem.tacsData.F = self.assembler.createVec()
structProblem.tacsData.u = self.assembler.createVec()
structProblem.tacsData.auxElems = tacs.TACS.AuxElements()
# We are now ready to associate self.curSP with the supplied SP
self.curSP = structProblem
self.curSP.adjointRHS = None
# Force and displacement vectors for problem
self.F = self.curSP.tacsData.F
self.u = self.curSP.tacsData.u
# Set auxiliary elements for adding tractions/pressures
self.auxElems = self.curSP.tacsData.auxElems
self.assembler.setAuxElements(self.auxElems)
# Create numpy array representation for easier access to vector values
vpn = self.varsPerNode
self.F_array = self.F.getArray()
self.u_array = self.u.getArray()
self.F_array = self.F_array.reshape(len(self.F_array) // vpn, vpn)
self.u_array = self.u_array.reshape(len(self.u_array) // vpn, vpn)
# Set current state variables in assembler
self.assembler.setVariables(self.u)
# Reset the Aitken acceleration for multidisciplinary analyses
self.doDamp = False
def createTACSAssembler(self, elemCallBack=None):
"""
This is the 'last' function to be called during the setup. The
user should have already added all the design variables,
domains ect. before this function is call. This function
finializes the problem initialization and cannot be changed at
later time. If a elemCallBack function is not provided by the user,
we will use pyNastran to generate one automatically from element
properties provided in the BDF file.
Parameters
----------
elemCallBack : python function handle
The calling sequence for elemCallBack **must** be as
follows::
def elemCallBack(dvNum, compID, compDescript, elemDescripts,
globalDVs, **kwargs):
The dvNum is the current counter which must be used by the
user when creating constitutive object with design
variables.
compID is the ID number used by tacs to reference this property group.
Use kwargs['propID'] to get the corresponding Nastran property ID that
is read in from the BDF.
compDescript is the component descriptions read in from the BDF file
elemDescripts are the name of the elements belonging to this group
(e.g. CQUAD4, CTRIA3, CTETRA, etc). This value will be a list since
one component may contain multiple compatible element types.
Example: ['CQUAD4', CTRIA3']
globalDVs is a dictionary containing information about any
global DVs that have been added.
elemCallBack must return a list containing as many TACS element
objects as there are element types in elemDescripts (one for each).
"""
if elemCallBack is None:
elemCallBack = self._elemCallBackFromBDF()
self._createOutputGroups()
self._createElements(elemCallBack)
self.assembler = self.meshLoader.createTACSAssembler(self.varsPerNode)
self._createVariables()
self._createOutputViewer()
# Initial set of nodes for geometry manipulation if necessary
self.coords0 = self.getCoordinates()
def _elemCallBackFromBDF(self):
"""
Automatically setup elemCallBack using information contained in BDF file.
This function assumes all material properties are specified in the BDF.
"""
# Check if any properties are in the BDF
if self.bdfInfo.missing_properties:
raise Error("BDF file '%s' has missing properties cards. "
"Set 'debugPrint' option to True for more information."
"User must define own elemCallBack function." % (self.bdfName))
# Make sure cross-referencing is turned on in pynastran
if self.bdfInfo.is_xrefed is False:
self.bdfInfo.cross_reference()
self.bdfInfo.is_xrefed = True
# Create a dictionary to sort all elements by property number
elemDict = {}
for elementID in self.bdfInfo.elements:
element = self.bdfInfo.elements[elementID]
propertyID = element.pid
if propertyID not in elemDict:
elemDict[propertyID] = {}
elemDict[propertyID]['elements'] = []
elemDict[propertyID]['dvs'] = {}
elemDict[propertyID]['elements'].append(element)
# Create a dictionary to sort all design variables
for dv in self.bdfInfo.dvprels:
propertyID = self.bdfInfo.dvprels[dv].pid
dvName = self.bdfInfo.dvprels[dv].pname_fid
self.dvNum = max(self.dvNum, self.bdfInfo.dvprels[dv].dvids[0])
elemDict[propertyID]['dvs'][dvName] = self.bdfInfo.dvprels[dv]
# Create option for user to specify scale values in BDF
self.scaleList = [1.0] * self.dvNum
# Callback function to return appropriate tacs MaterialProperties object
# For a pynastran mat card
def matCallBack(matInfo):
# First we define the material property object
if matInfo.type == 'MAT1':
mat = tacs.constitutive.MaterialProperties(rho=matInfo.rho, E=matInfo.e,
nu=matInfo.nu, ys=matInfo.St,
alpha=matInfo.a)
elif matInfo.type == 'MAT8':
E1 = matInfo.e11
E2 = matInfo.e22
nu12 = matInfo.nu12
G12 = matInfo.g12
G13 = matInfo.g1z
G23 = matInfo.g2z
# If out-of-plane shear values are 0, Nastran defaults them to the in-plane
if G13 == 0.0:
G13 = G12
if G23 == 0.0:
G23 = G12
rho = matInfo.rho
Xt = matInfo.Xt
Xc = matInfo.Xc
Yt = matInfo.Yt
Yc = matInfo.Yc
S12 = matInfo.S
# TODO: add alpha
mat = tacs.constitutive.MaterialProperties(rho=rho, E1=E1, E2=E2, nu12=nu12, G12=G12, G13=G13, G23=G23,
Xt=Xt, Xc=Xc, Yt=Yt, Yc=Yc, S12=S12)
else:
raise Error("Unsupported material type '%s' for material number %d. " % (matInfo.type, matInfo.mid))
return mat
def elemCallBack(dvNum, compID, compDescript, elemDescripts, globalDVs, **kwargs):
# Initialize scale list for design variables we will add
scaleList = []
# Get the Nastran property ID
propertyID = kwargs['propID']
propInfo = self.bdfInfo.properties[propertyID]
elemInfo = elemDict[propertyID]['elements'][0]
# First we define the material object
# This property only references one material
if hasattr(propInfo, 'mid_ref'):
matInfo = propInfo.mid_ref
mat = matCallBack(matInfo)
# This property references multiple materials (maybe a laminate)
elif hasattr(propInfo, 'mids_ref'):
mat = []
for matInfo in propInfo.mids_ref:
mat.append(matCallBack(matInfo))
# Next we define the constitutive object
if propInfo.type == 'PSHELL': # Nastran isotropic shell
kcorr = propInfo.tst
if 'T' in elemDict[propertyID]['dvs']:
thickness = elemDict[propertyID]['dvs']['T'].dvids_ref[0].xinit
tNum = elemDict[propertyID]['dvs']['T'].dvids[0] - 1
minThickness = elemDict[propertyID]['dvs']['T'].dvids_ref[0].xlb
maxThickness = elemDict[propertyID]['dvs']['T'].dvids_ref[0].xub
name = elemDict[propertyID]['dvs']['T'].dvids_ref[0].label
self.scaleList[tNum - 1] = elemDict[propertyID]['dvs']['T'].coeffs[0]
else:
thickness = propInfo.t
tNum = -1
minThickness = 0.0
maxThickness = 1e20
con = tacs.constitutive.IsoShellConstitutive(mat, t=thickness,
tlb=minThickness, tub=maxThickness, tNum=tNum)
elif propInfo.type == 'PCOMP': # Nastran composite shell
numPlies = propInfo.nplies
plyThicknesses = []
plyAngles = []
plyMats = []
# if the laminate is symmetric, mirror the ply indices
if propInfo.lam == 'SYM':
plyIndices = list(range(numPlies / 2))
plyIndices.extend(plyIndices[::-1])
else:
plyIndices = range(numPlies)
# Loop through plies and setup each entry in layup
for ply_i in plyIndices:
plyThicknesses.append(propInfo.thicknesses[ply_i])
plyMat = tacs.constitutive.OrthotropicPly(plyThicknesses[ply_i], mat[ply_i])
plyMats.append(plyMat)
plyAngles.append(propInfo.thetas[ply_i] * DEG2RAD)
# Convert thickness/angles to appropriate numpy array
plyThicknesses = np.array(plyThicknesses, dtype=self.dtype)
plyAngles = np.array(plyAngles, dtype=self.dtype)
if propInfo.lam is None or propInfo.lam in ['SYM', 'MEM']:
# Discrete laminate class (not for optimization)
con = tacs.constitutive.CompositeShellConstitutive(plyMats, plyThicknesses, plyAngles)
# Need to add functionality to consider only membrane in TACS for type = MEM
else:
raise Error("Unrecognized LAM type '%s' for PCOMP number %d." % (propInfo.lam, propertyID))
elif propInfo.type == 'PSOLID': # Nastran solid property
if 'T' in elemDict[propertyID]['dvs']:
thickness = elemDict[propertyID]['dvs']['T'].dvids_ref[0].xinit
tNum = elemDict[propertyID]['dvs']['T'].dvids[0] - 1
minThickness = elemDict[propertyID]['dvs']['T'].dvids_ref[0].xlb
maxThickness = elemDict[propertyID]['dvs']['T'].dvids_ref[0].xub
name = elemDict[propertyID]['dvs']['T'].dvids_ref[0].label
self.scaleList[tNum - 1] = elemDict[propertyID]['dvs']['T'].coeffs[0]
else:
thickness = 1.0
tNum = -1
minThickness = 0.0
maxThickness = 10.0
con = tacs.constitutive.SolidConstitutive(mat, t=thickness,
tlb=minThickness, tub=maxThickness, tNum=tNum)
else:
raise Error("Unsupported property type '%s' for property number %d. " % (propInfo.type, propertyID))
# Set up transform object which may be required for certain elements
transform = None
if hasattr(elemInfo, 'theta_mcid_ref'):
mcid = elemDict[propertyID]['elements'][0].theta_mcid_ref
if mcid:
if mcid.type == 'CORD2R':
refAxis = mcid.i
transform = tacs.elements.ShellRefAxisTransform(refAxis)
else: # Don't support spherical/cylindrical yet
raise Error("Unsupported material coordinate system type "
"'%s' for property number %d." % (mcid.type, propertyID))
# Finally set up the element objects belonging to this component
elemList = []
for descript in elemDescripts:
if descript in ['CQUAD4', 'CQUADR']:
elem = tacs.elements.Quad4Shell(transform, con)
elif descript in ['CQUAD9', 'CQUAD']:
elem = tacs.elements.Quad9Shell(transform, con)
elif descript in ['CTRIA3', 'CTRIAR']:
elem = tacs.elements.Tri3Shell(transform, con)
elif 'CTETRA' in descript:
# May have variable number of nodes in card
nnodes = len(elemInfo.nodes)
if nnodes == 4:
basis = tacs.elements.LinearTetrahedralBasis()
elif nnodes == 10:
basis = tacs.elements.QuadraticTetrahedralBasis()
else:
raise Error("TACS does not currently support CTETRA elements with %d nodes." % nnodes)
model = tacs.elements.LinearElasticity3D(con)
elem = tacs.elements.Element3D(model, basis)
elif descript in ['CHEXA8', 'CHEXA']:
basis = tacs.elements.LinearHexaBasis()
model = tacs.elements.LinearElasticity3D(con)
elem = tacs.elements.Element3D(model, basis)
else:
raise Error("Unsupported element type "
"'%s' specified for property number %d." % (descript, propertyID))
elemList.append(elem)
return elemList, scaleList
return elemCallBack
####### Static load methods ########
def addLoadToComponents(self, structProblem, compIDs, F, averageLoad=False):
""""
The function is used to add a *FIXED TOTAL LOAD* on one or more
components, defined by COMPIDs. The purpose of this routine is
to add loads that remain fixed throughout an optimization. An example
would be an engine load. This routine determines all the unqiue nodes
in the FE model that are part of the the requested components, then
takes the total 'force' by F and divides by the number of nodes.
This average load is then applied to the nodes.
NOTE: The units of the entries of the 'force' vector F are not
necesarily physical forces and their interpretation depends
on the physics problem being solved and the dofs included
in the model.
A couple of examples of force vector components for common problem are listed below:
In Elasticity with varsPerNode = 3,
F = [fx, fy, fz] # forces
In Elasticity with varsPerNode = 6,
F = [fx, fy, fz, mx, my, mz] # forces + moments
In Thermoelasticity with varsPerNode = 4,
F = [fx, fy, fz, Q] # forces + heat
In Thermoelasticity with varsPerNode = 7,
F = [fx, fy, fz, mx, my, mz, Q] # forces + moments + heat
Parameters
----------
compIDs : The components with added loads. Use selectCompIDs()
to determine this.
F : Numpy array length varsPerNode
Vector of 'force' components
"""
# Make sure CompIDs are flat
compIDs = self._flatten([compIDs])
# Apply a unique force vector to each component
if not averageLoad:
F = numpy.atleast_2d(F)
# If the user only specified one force vector,
# we assume the force should be the same for each component
if F.shape[0] == 1:
F = np.repeat(F, [len(compIDs)], axis=0)
# If the dimensions still don't match, raise an error
elif F.shape[0] != len(compIDs):
raise Error("Number of forces must match number of compIDs,"
" {} forces were specified for {} compIDs".format(F.shape[0], len(compIDs)))
# Call addLoadToComponents again, once for each compID
for i, compID in enumerate(compIDs):
self.addLoadToComponents(structProblem, compID, F[i], averageLoad=True)
# Average one force vector over all components
else:
F = np.atleast_1d(F)
self.setStructProblem(structProblem)
# First determine the actual physical nodal location in the
# original BDF ordering of the nodes we want to add forces
# to. Only the root rank need do this:
uniqueNodes = None
if self.comm.rank == 0:
allNodes = []
compIDs = set(compIDs)
for cID in compIDs:
tmp = self.meshLoader.getConnectivityForComp(cID, nastranOrdering=True)
allNodes.extend(self._flatten(tmp))
# Now just unique all the nodes:
uniqueNodes = numpy.unique(allNodes)
uniqueNodes = self.comm.bcast(uniqueNodes, root=0)
# Now generate the final average force vector
Favg = F / len(uniqueNodes)
self.addLoadToNodes(structProblem, uniqueNodes, Favg, nastranOrdering=True)
# Write out a message of what we did:
self._info("Added a fixed load of %s to %d components, "
"distributed over %d nodes." % (
repr(F), len(compIDs), len(uniqueNodes)),
maxLen=80, box=True)
def addLoadToPoints(self, structProblem, points, F):
""""
The function is used to add a fixed point load of F to the
selected physical locations, points. A closest point search is
used to determine the FE nodes that are the closest to the
requested nodes. It is most efficient if many point loads are
necessary that points and F, contain many entries.
NOTE: The units of the entries of the 'force' vector F are not
necesarily physical forces and their interpretation depends
on the physics problem being solved and the dofs included
in the model.
A couple of examples of force vector components for common problem are listed below:
In Elasticity with varsPerNode = 3,
F = [fx, fy, fz] # forces
In Elasticity with varsPerNode = 6,
F = [fx, fy, fz, mx, my, mz] # forces + moments
In Thermoelasticity with varsPerNode = 4,
F = [fx, fy, fz, Q] # forces + heat
In Thermoelasticity with varsPerNode = 7,
F = [fx, fy, fz, mx, my, mz, Q] # forces + moments + heat
"""
try:
from scipy.spatial import cKDTree
except:
raise Error("scipy.spatial "
"must be available to use addLoadToPoints")
points = numpy.atleast_2d(points)
F = numpy.atleast_2d(F)
# If the user only specified one force vector,
# we assume the force should be the same for each node
if F.shape[0] == 1:
F = np.repeat(F, [len(points)], axis=0)
# If the dimensions still don't match, raise an error
elif F.shape[0] != len(points):
raise Error("Number of forces must match number of points,"
" {} forces were specified for {} points".format(F.shape[0], len(points)))
vpn = self.varsPerNode
if len(F[0]) != vpn:
raise Error("Length of force vector must match varsPerNode specified "
"for problem, which is {}, "
"but length of vector provided was {}".format(vpn, len(F[0])))
self.setStructProblem(structProblem)
# Pull out the local nodes on the proc and search "points" in the tree
self.assembler.getNodes(self.Xpts)
localNodes = np.real(self.Xpts.getArray())
nNodes = len(localNodes) // 3
xNodes = localNodes.reshape((nNodes, 3)).copy()
tree = cKDTree(xNodes)
d, index = tree.query(points, k=1)
# Now figure out which proc has the best distance for this
for i in range(len(points)):
proc = self.comm.allreduce((d[i], self.comm.rank), op=MPI.MINLOC)
print((i, self.comm.rank, proc, d[i], index[i], F[i]))
if proc[1] == self.comm.rank:
# Add contribution to global force array
self.F_array[index[i], :] += F[i]
def addLoadToNodes(self, structProblem, nodeIDs, F, nastranOrdering=False):
"""
The function is used to add a fixed point load of F to the
selected node IDs. This is similar to the addLoadToPoints method,
except we select the load points based on node ID rather than
physical location.
NOTE: This should be the prefered method (over addLoadToPoints) for adding forces to
specific nodes for the following reasons:
1. This method is more efficient, as it does not require a
closest point search to locate the node.
2. In the case where the mesh features coincident nodes
it is impossible to uniquely specify which node gets the load
through x,y,z location, however the points can be specified uniquely by node ID.
A couple of examples of force vector components for common problem are listed below:
In Elasticity with varsPerNode = 3,
F = [fx, fy, fz] # forces
In Elasticity with varsPerNode = 6,
F = [fx, fy, fz, mx, my, mz] # forces + moments
In Thermoelasticity with varsPerNode = 4,
F = [fx, fy, fz, Q] # forces + heat
In Thermoelasticity with varsPerNode = 7,
F = [fx, fy, fz, mx, my, mz, Q] # forces + moments + heat
Parameters
----------
nodeIDs : list[int]
The nodes with added loads.
F : Numpy 1d or 2d array length (varsPerNodes) or (numNodeIDs, varsPerNodes)
Array of force vectors, one for each node. If only one force vector is provided,
force will be copied uniformly across all nodes.
nastranOrdering : bool
Flag signaling whether nodeIDs are in TACS (default)
or NASTRAN (grid IDs in bdf file) ordering
"""
# Make sure the inputs are the correct shape
nodeIDs = numpy.atleast_1d(nodeIDs)
F = numpy.atleast_2d(F)
numNodes = len(nodeIDs)
# If the user only specified one force vector,
# we assume the force should be the same for each node
if F.shape[0] == 1:
F = np.repeat(F, [numNodes], axis=0)
# If the dimensions still don't match, raise an error
elif F.shape[0] != numNodes:
raise Error("Number of forces must match number of nodes,"
" {} forces were specified for {} node IDs".format(F.shape[0], numNodes))
vpn = self.varsPerNode
if len(F[0]) != vpn:
raise Error("Length of force vector must match varsPerNode specified "
"for problem, which is {}, "
"but length of vector provided was {}".format(vpn, len(F[0])))
# First find the cooresponding local node ID on each processor
localNodeIDs = self.meshLoader.getLocalNodeIDsFromGlobal(nodeIDs, nastranOrdering)
# Set the structural problem
self.setStructProblem(structProblem)
# Flag to make sure we find all user-specified nodes
nodeFound = np.zeros(numNodes, dtype=int)
# Loop through every node and if it's owned by this processor, add the load
for i, nodeID in enumerate(localNodeIDs):
# The node was found on this proc
if nodeID >= 0:
# Add contribution to global force array
self.F_array[nodeID, :] += F[i]
nodeFound[i] = 1
# Reduce the node flag and make sure that every node was found on exactly 1 proc
nodeFound = self.comm.allreduce(nodeFound, op=MPI.SUM)
# Warn the user if any nodes weren't found
if nastranOrdering:
orderString = 'Nastran'
else:
orderString = 'TACS'
for i in range(numNodes):
if not nodeFound[i]:
TACSWarning("Can't add load to node ID {} ({} ordering), node not found in model. "
"Double check BDF file.".format(nodeIDs[i], orderString), self.comm)
def addTractionToComponents(self, structProblem, compIDs, tractions,
faceIndex=0):
"""
The function is used to add a *FIXED TOTAL TRACTION* on one or more
components, defined by COMPIDs. The purpose of this routine is
to add loads that remain fixed throughout an optimization.
Parameters
----------
compIDs : The components with added loads. Use selectCompIDs()
to determine this.
tractions : Numpy array length 1 or compIDs
Array of traction vectors for each components
faceIndex : int
Indicates which face (side) of element to apply traction to.
Note: not required for certain elements (i.e. shells)
"""
# Make sure compIDs is flat and unique
compIDs = set(self._flatten(compIDs))
tractions = np.atleast_1d(tractions)
# Get global element IDs for the elements we're applying tractions to
elemIDs = self.meshLoader.getGlobalElementIDsForComps(compIDs, nastranOrdering=False)
# Add tractions element by element
self.addTractionToElements(structProblem, elemIDs, tractions, faceIndex, nastranOrdering=False)
# Write out a message of what we did:
self._info("Added a fixed traction of %s to %d components, "
"distributed over %d elements." % (
repr(tractions), len(compIDs), len(elemIDs)),
maxLen=80, box=True)
def addTractionToElements(self, structProblem, elemIDs, tractions,
faceIndex=0, nastranOrdering=False):
"""
The function is used to add a fixed traction to the
selected element IDs. Tractions can be specified on an
element by element basis (if tractions is a 2d array) or
set to a uniform value (if tractions is a 1d array)
Parameters
----------
elemIDs : List
The global element ID numbers for which to apply the traction.
tractions : Numpy 1d or 2d array length varsPerNodes or (elemIDs, varsPerNodes)
Array of traction vectors for each element
faceIndex : int
Indicates which face (side) of element to apply traction to.
Note: not required for certain elements (i.e. shells)
nastranOrdering : bool
Flag signaling whether elemIDs are in TACS (default)
or NASTRAN ordering
"""
# Make sure the inputs are the correct shape
elemIDs = numpy.atleast_1d(elemIDs)
tractions = numpy.atleast_2d(tractions).astype(dtype=self.dtype)
numElems = len(elemIDs)
# If the user only specified one traction vector,
# we assume the force should be the same for each element
if tractions.shape[0] == 1:
tractions = np.repeat(tractions, [numElems], axis=0)
# If the dimensions still don't match, raise an error
elif tractions.shape[0] != numElems:
raise Error("Number of tractions must match number of elements,"
" {} tractions were specified for {} element IDs".format(tractions.shape[0], numElems))
# First find the coresponding local element ID on each processor
localElemIDs = self.meshLoader.getLocalElementIDsFromGlobal(elemIDs, nastranOrdering=nastranOrdering)
# Set the structural problem
self.setStructProblem(structProblem)
# Flag to make sure we find all user-specified elements
elemFound = np.zeros(numElems, dtype=int)
# Loop through every element and if it's owned by this processor, add the traction
for i, elemID in enumerate(localElemIDs):
# The element was found on this proc
if elemID >= 0:
# Mark element as found
elemFound[i] = 1
# Get the pointer for the tacs element object for this element
elemObj = self.meshLoader.getElementObjectForElemID(elemIDs[i], nastranOrdering=nastranOrdering)
# Create appropriate traction object for this element type
tracObj = elemObj.createElementTraction(faceIndex, tractions[i])
# Traction not implemented for element
if tracObj is None:
TACSWarning("TACS element of type {} does not hav a traction implimentation. "
"Skipping element in addTractionToElement procedure.".format(elemObj.getObjectName()),
self.comm)
# Traction implemented
else:
# Add new traction to auxiliary element object
self.auxElems.addElement(elemID, tracObj)
# Reduce the element flag and make sure that every element was found on exactly 1 proc
elemFound = self.comm.allreduce(elemFound, op=MPI.SUM)
# Warn the user if any elements weren't found
if nastranOrdering:
orderString = 'Nastran'
else:
orderString = 'TACS'
for i in range(numElems):
if not elemFound[i]:
TACSWarning("Can't add traction to element ID {} ({} ordering), element not found in model. "
"Double check BDF file.".format(elemIDs[i], orderString), self.comm)
def addPressureToComponents(self, structProblem, compIDs, pressures,
faceIndex=0):
"""
The function is used to add a *FIXED TOTAL PRESSURE* on one or more
components, defined by COMPIds. The purpose of this routine is
to add loads that remain fixed throughout an optimization. An example
would be a fuel load.
Parameters
----------
compIDs : The components with added loads. Use selectCompIDs()
to determine this.
pressures : Numpy array length 1 or compIDs
Array of pressure values for each components
faceIndex : int
Indicates which face (side) of element to apply pressure to.
Note: not required for certain elements (i.e. shells)
"""
# Make sure compIDs is flat and unique
compIDs = set(self._flatten(compIDs))
pressures = np.atleast_1d(pressures)
# Get global element IDs for the elements we're applying pressure to
elemIDs = self.meshLoader.getGlobalElementIDsForComps(compIDs, nastranOrdering=False)
# Add pressure element by element
self.addPressureToElements(structProblem, elemIDs, pressures, faceIndex, nastranOrdering=False)
# Write out a message of what we did:
self._info("Added a fixed pressure of %s to %d components, "
"distributed over %d elements." % (
repr(pressures), len(compIDs), len(elemIDs)),
maxLen=80, box=True)
def addPressureToElements(self, structProblem, elemIDs, pressures,
faceIndex=0, nastranOrdering=False):
"""
The function is used to add a fixed presure to the
selected element IDs. Pressures can be specified on an
element by element basis (if pressures is an array) or
set to a uniform value (if pressures is a scalar)
Parameters
----------
elemIDs : List
The global element ID numbers for which to apply the pressure.
pressures : Numpy array length 1 or elemIDs
Array of pressure values for each element
faceIndex : int
Indicates which face (side) of element to apply pressure to.
Note: not required for certain elements (i.e. shells)
nastranOrdering : bool
Flag signaling whether elemIDs are in TACS (default)
or NASTRAN ordering
"""
# Make sure the inputs are the correct shape
elemIDs = numpy.atleast_1d(elemIDs)
pressures = numpy.atleast_1d(pressures)
numElems = len(elemIDs)
# If the user only specified one pressure,
# we assume the force should be the same for each element
if pressures.shape[0] == 1:
pressures = np.repeat(pressures, [numElems], axis=0)
# If the dimensions still don't match, raise an error
elif pressures.shape[0] != numElems:
raise Error("Number of pressures must match number of elements,"
" {} pressures were specified for {} element IDs".format(pressures.shape[0], numElems))
# First find the coresponding local element ID on each processor
localElemIDs = self.meshLoader.getLocalElementIDsFromGlobal(elemIDs, nastranOrdering=nastranOrdering)
# Set the structural problem
self.setStructProblem(structProblem)
# Flag to make sure we find all user-specified elements
elemFound = np.zeros(numElems, dtype=int)
# Loop through every element and if it's owned by this processor, add the pressure
for i, elemID in enumerate(localElemIDs):
# The element was found on this proc
if elemID >= 0:
elemFound[i] = 1
# Get the pointer for the tacs element object for this element
elemObj = self.meshLoader.getElementObjectForElemID(elemIDs[i], nastranOrdering=nastranOrdering)
# Create appropriate pressure object for this element type
pressObj = elemObj.createElementPressure(faceIndex, pressures[i])
# Pressure not implemented for element
if pressObj is None:
TACSWarning("TACS element of type {} does not hav a pressure implimentation. "
"Skipping element in addPressureToElement procedure.".format(elemObj.getObjectName()),
self.comm)
# Pressure implemented
else:
# Add new pressure to auxiliary element object
self.auxElems.addElement(elemID, pressObj)
# Reduce the element flag and make sure that every element was found on exactly 1 proc
elemFound = self.comm.allreduce(elemFound, op=MPI.SUM)
# Warn the user if any elements weren't found
if nastranOrdering:
orderString = 'Nastran'
else:
orderString = 'TACS'
for i in range(numElems):
if not elemFound[i]:
TACSWarning("Can't add pressure to element ID {} ({} ordering), element not found in model. "
"Double check BDF file.".format(elemIDs[i], orderString), self.comm)
def createTACSProbsFromBDF(self):
"""
Automatically define tacs problem class using information contained in BDF file.
This function assumes all loads are specified in the BDF and allows users to
skip setting loads in Python.
NOTE: Currently only supports LOAD, FORCE, MOMENT, PLOAD2, and PLOAD4 cards.
NOTE: Currently only supports staticProblem (SOL 101)
"""
if self.assembler is None:
raise Error("TACS assembler has not been created. "
"Assembler must created first by running 'createTACSAssembler' method.")
# Make sure cross-referencing is turned on in pynastran
if self.bdfInfo.is_xrefed is False:
self.bdfInfo.cross_reference()
self.bdfInfo.is_xrefed = True
vpn = self.varsPerNode
loads = self.bdfInfo.loads
nloads = len(loads)
# Check if any loads are in the BDF
if nloads == 0:
raise Error("BDF file '%s' has no loads included in it. " % (self.bdfName))
structProblems = {}
# If subcases have been added in Nastran, then subCase 0 should not be run
if len(self.bdfInfo.subcases) > 1:
skipCaseZero = True
else:
skipCaseZero = False
# Loop through every load set and create a corresponding structural problem
for subCase in self.bdfInfo.subcases.values():
if skipCaseZero and subCase.id == 0:
continue
if 'SUBTITLE' in subCase.params:
name = subCase.params['SUBTITLE'][0]
else:
name = 'load_set_%.3d' % (subCase.id)
sp = tacs.problems.static.StaticProblem(name=name)
if 'LOAD' in subCase.params:
loadsID = subCase.params['LOAD'][0]
# Get loads and scalers for this load case ID
loadSet, loadScale, _ = self.bdfInfo.get_reduced_loads(loadsID)
# Loop through every load in set and add it to problem
for loadInfo, scale in zip(loadSet, loadScale):
# Add any point force or moment cards
if loadInfo.type == 'FORCE' or loadInfo.type == 'MOMENT':
nodeID = loadInfo.node_ref.nid
loadArray = numpy.zeros(vpn)
if loadInfo.type == 'FORCE' and vpn >= 3:
loadArray[:3] += scale * loadInfo.scaled_vector
elif loadInfo.type == 'MOMENT' and vpn >= 6:
loadArray[3:6] += scale * loadInfo.scaled_vector
self.addLoadToNodes(sp, nodeID, loadArray, nastranOrdering=True)
# Add any pressure loads
# Pressure load card specific to shell elements
elif loadInfo.type == 'PLOAD2':
elemIDs = loadInfo.eids
pressure = scale * loadInfo.pressure
self.addPressureToElements(sp, elemIDs, pressure, nastranOrdering=True)
# Alternate more general pressure load type
elif loadInfo.type == 'PLOAD4':
self._addPressureFromPLOAD4(sp, loadInfo, scale)
else:
TACSWarning("Unsupported load type "
" '%s' specified for load set number %d, skipping load" %(loadInfo.type, loadInfo.sid),
self.comm)
# append to list of structural problems
structProblems[subCase.id] = sp
return structProblems
def _addPressureFromPLOAD4(self, staticProb, loadInfo, scale=1.0):
"""
Add pressure to tacs static problem from pynastran PLOAD4 card.
Should only be called by createTACSProbsFromBDF and not directly by user.
"""
# Dictionary mapping nastran element face indices to TACS equivilent numbering
nastranToTACSFaceIDDict = {'CTETRA4': {1: 1, 2: 3, 3: 2, 4: 0},
'CTETRA': {2: 1, 4: 3, 3: 2, 1: 0},
'CHEXA': {1: 4, 2: 2, 3: 0, 4: 3, 5: 0, 6: 5}}
# We don't support pressure variation across elements, for now just average it
pressure = scale * np.mean(loadInfo.pressures)
for elemInfo in loadInfo.eids_ref:
elemID = elemInfo.eid
# Get the correct face index number based on element type
if 'CTETRA' in elemInfo.type:
for faceIndex in elemInfo.faces:
if loadInfo.g1 in elemInfo.faces[faceIndex] and \
loadInfo.g34 not in elemInfo.faces[faceIndex]:
# For some reason CTETRA4 is the only element that doesn't
# use ANSYS face numbering convention by default
if len(elemInfo.nodes) == 4:
faceIndex = nastranToTACSFaceIDDict['CTETRA4'][faceIndex]
else:
faceIndex = nastranToTACSFaceIDDict['CTETRA'][faceIndex]
# Positive pressure is inward for solid elements, flip pressure if necessary
# We don't flip it for face 0, because the normal for that face points inward by convention
# while the rest point outward
if faceIndex != 0:
pressure *= -1.0
break
elif 'CHEXA' in elemInfo.type:
for faceIndex in elemInfo.faces:
if loadInfo.g1 in elemInfo.faces[faceIndex] and \
loadInfo.g34 in elemInfo.faces[faceIndex]:
faceIndex = nastranToTACSFaceIDDict['CHEXA'][faceIndex]
# Pressure orientation is flipped for solid elements per Nastran convention
pressure *= -1.0
break
elif 'CQUAD' in elemInfo.type or 'CTRIA' in elemInfo.type:
# Face index doesn't matter for shells, just use 0
faceIndex = 0
else:
raise Error("Unsupported element type "
"'%s' specified for PLOAD4 load set number %d." % (elemInfo.type, loadInfo.sid))
# Figure out whether or not this is a traction based on if a vector is defined
if np.linalg.norm(loadInfo.nvector) == 0.0:
self.addPressureToElements(staticProb, elemID, pressure, faceIndex,
nastranOrdering=True)
else:
trac = pressure * loadInfo.nvector
self.addTractionToElements(staticProb, elemID, trac, faceIndex,
nastranOrdering=True)
####### Static solver methods ########
def reset(self, SP):
""" Reset each of the solution to last converged value."""
self.setStructProblem(SP)
self.u.copyValues(self.u_old)
def _initializeSolve(self):
"""
Initialze the solution of the structural system for the
loadCase. The stiffness matrix is assembled and factored.
"""
if self._factorOnNext:
self.assembler.assembleJacobian(self.alpha, self.beta, self.gamma, self.res, self.K)
self.PC.factor()
self.old_update.zeroEntries()
self._factorOnNext = False
self._PCfactorOnNext = False
def __call__(self, structProblem, damp=1.0, useAitkenAcceleration=False,
dampLB=0.2, loadScale=1.0):
"""
Solution of the structural system for loadCase. The
forces must already be set.
Parameters
----------
structProblem
Optional Arguments:
damp, float: Value to use to damp the solution update. Default is 1.0
useAitkenAcceleration, boolen: Flag to use
aitkenAcceleration. Only applicable for aerostructural
problems. Default is False.
loadScale, float: value to scale external loads by. Only useful for
load step approach on nonlinear problems.
"""
startTime = time.time()
self.setStructProblem(structProblem)
self.curSP.tacsData.callCounter += 1
# Set loadScale attributes, during load incrementation, self.loadScale is the current loadScale
# while self.maxLoadScale is the target/final load scale.
# For now, maxLoadScale is set equal to self.loadScale to make _updateResidual
# and _getForces work, this will be addressed in future when new NL solver is merged
self.loadScale = loadScale
self.maxLoadScale = loadScale
setupProblemTime = time.time()
# Check if we need to initialize
self._initializeSolve()
initSolveTime = time.time()
# Compute the RHS
# TODO: Auxiliary forces still need to be load scaled
# self.structure.setLoadFactor(self.curSP.tacsData.lcnum,loadScale)
self.assembler.assembleRes(self.res)
# Zero out bc terms in F
self.assembler.applyBCs(self.F)
# Add the -F
self.res.axpy(-loadScale, self.F)
# Set initnorm as the norm of F
self.initNorm = numpy.real(self.F.norm()) * loadScale
# Starting Norm for this compuation
self.startNorm = numpy.real(self.res.norm())
initNormTime = time.time()
# Solve Linear System for the update
self.KSM.solve(self.res, self.update)
self.update.scale(-1.0)
solveTime = time.time()
# Apply Aitken Acceleration if necessary:
if useAitkenAcceleration:
if self.doDamp:
# Comput: temp0 = update - old_update
self.temp0.zeroEntries()
self.temp0.axpy(1.0, self.update)
self.temp0.axpy(-1.0, self.old_update)
dnom = self.temp0.dot(self.temp0)
damp = damp * (1.0 - self.temp0.dot(self.update) / dnom)
# Clip to a reasonable range
damp = numpy.clip(damp, dampLB, 1.0)
self.doDamp = True
# Update State Variables
self.assembler.getVariables(self.u)
self.u.axpy(damp, self.update)
self.assembler.setVariables(self.u)
# Set the old update
self.old_update.copyValues(self.update)
stateUpdateTime = time.time()
# Compute final FEA Norm
self.assembler.assembleRes(self.res)
self.res.axpy(-loadScale, self.F) # Add the -F
self.finalNorm = numpy.real(self.res.norm())
finalNormTime = time.time()
# If timing was was requested print it, if the solution is nonlinear
# print this information automatically if prinititerations was requested.
if (self.getOption('printTiming') or (self.getOption('printIterations')
and self.getOption('solutionType').lower() != 'linear')):
self.pp('+--------------------------------------------------+')
self.pp('|')
self.pp('| TACS Solve Times:')
self.pp('|')
self.pp('| %-30s: %10.3f sec' % ('TACS Setup Time', setupProblemTime - startTime))
self.pp('| %-30s: %10.3f sec' % ('TACS Solve Init Time', initSolveTime - setupProblemTime))
self.pp('| %-30s: %10.3f sec' % ('TACS Init Norm Time', initNormTime - initSolveTime))
self.pp('| %-30s: %10.3f sec' % ('TACS Solve Time', solveTime - initNormTime))
self.pp('| %-30s: %10.3f sec' % ('TACS State Update Time', stateUpdateTime - solveTime))
self.pp('| %-30s: %10.3f sec' % ('TACS Final Norm Time', finalNormTime - stateUpdateTime))
self.pp('|')
self.pp('| %-30s: %10.3f sec' % ('TACS Total Solution Time', finalNormTime - startTime))
self.pp('+--------------------------------------------------+')
return damp
####### Function eval/sensitivity methods ########
def evalFunctions(self, structProblem, funcs, evalFuncs=None,
ignoreMissing=False):
"""
This is the main routine for returning useful information from
pytacs. The functions corresponding to the strings in
EVAL_FUNCS are evaluated and updated into the provided
dictionary.
Parameters
----------
structProblem : pyStructProblem class
Structural problem to get the solution for
funcs : dict
Dictionary into which the functions are saved.
evalFuncs : iterable object containing strings.
If not none, use these functions to evaluate.
ignoreMissing : bool
Flag to supress checking for a valid function. Please use
this option with caution.
Examples
--------
>>> funcs = {}
>>> FEAsolver(sp)
>>> FEAsolver.evalFunctions(sp, funcs, ['mass'])
>>> funcs
>>> # Result will look like (if structProblem, sp, has name of 'c1'):
>>> # {'cl_mass':12354.10}
"""
startTime = time.time()
# Set the structural problem
self.setStructProblem(structProblem)
if evalFuncs is None:
evalFuncs = sorted(list(self.curSP.evalFuncs))
else:
evalFuncs = sorted(list(evalFuncs))
if not ignoreMissing:
for f in evalFuncs:
if not f in self.functionList:
raise Error("Supplied function '%s' has not been added "
"using addFunction()." % f)
setupProblemTime = time.time()
# Fast parallel function evaluation of structural funcs:
handles = [self.functionList[f] for f in evalFuncs if
f in self.functionList]
funcVals = self.assembler.evalFunctions(handles)
functionEvalTime = time.time()
# Assign function values to appropriate dictionary
i = 0
for f in evalFuncs:
if f in self.functionList:
key = self.curSP.name + '_%s' % f
self.curSP.funcNames[f] = key
funcs[key] = funcVals[i]
i += 1
dictAssignTime = time.time()
if self.getOption('printTiming'):
self.pp('+--------------------------------------------------+')
self.pp('|')
self.pp('| TACS Function Times:')
self.pp('|')
self.pp('| %-30s: %10.3f sec' % ('TACS Function Setup Time', setupProblemTime - startTime))
self.pp('| %-30s: %10.3f sec' % ('TACS Function Eval Time', functionEvalTime - setupProblemTime))
self.pp('| %-30s: %10.3f sec' % ('TACS Dict Time', dictAssignTime - functionEvalTime))
self.pp('|')
self.pp('| %-30s: %10.3f sec' % ('TACS Function Time', dictAssignTime - startTime))
self.pp('+--------------------------------------------------+')
def evalFunctionsSens(self, structProblem, funcsSens, evalFuncs=None):
"""
This is the main routine for returning useful (sensitivity)
information from pytacs. The derivatives of the functions
corresponding to the strings in EVAL_FUNCS are evaluated and
updated into the provided dictionary.
Parameters
----------
structProblem : pyStructProblem class
Structural problem to get the solution for
funcsSens : dict
Dictionary into which the derivatives are saved.
evalFuncs : iterable object containing strings
The functions the user wants returned
Examples
--------
>>> funcsSens = {}
>>> FEAsolver.evalFunctionsSens(sp, funcsSens, ['mass'])
>>> funcs
>>> # Result will look like (if structProblem, sp, has name of 'c1'):
>>> # {'c1_mass':{'struct':[1.234, ..., 7.89]}
"""
startTime = time.time()
# Set the structural problem
self.setStructProblem(structProblem)
if evalFuncs is None:
evalFuncs = sorted(list(self.curSP.evalFuncs))
else:
evalFuncs = sorted(list(evalFuncs))
# Check that the functions are all ok.
# and prepare tacs vecs for adjoint procedure
dvSenses = []
xptSenses = []
dIdus = []
adjoints = []
for f in evalFuncs:
if f not in self.functionList:
raise Error("Supplied function has not beed added "
"using addFunction()")
else:
# Populate the lists with the tacs bvecs
# we'll need for each adjoint/sens calculation
dvSens = self.dvSensList[f]
dvSens.zeroEntries()
dvSenses.append(dvSens)
xptSens = self.xptSensList[f]
xptSens.zeroEntries()
xptSenses.append(xptSens)
dIdu = self.dIduList[f]
dIdu.zeroEntries()
dIdus.append(dIdu)
adjoint = self.adjointList[f]
adjoint.zeroEntries()
adjoints.append(adjoint)
setupProblemTime = time.time()
adjointStartTime = {}
adjointEndTime = {}
# Next we will solve all the adjoints
# Set adjoint rhs
self.addSVSens(evalFuncs, dIdus)
adjointRHSTime = time.time()
for i, f in enumerate(evalFuncs):
adjointStartTime[f] = time.time()
self.solveAdjoint(dIdus[i], adjoints[i])
adjointEndTime[f] = time.time()
adjointFinishedTime = time.time()
# Evaluate all the adoint res prooduct at the same time for
# efficiency:
self.addDVSens(evalFuncs, dvSenses)
self.addAdjointResProducts(adjoints, dvSenses)
self.addXptSens(evalFuncs, xptSenses)
self.addAdjointResXptSensProducts(adjoints, xptSenses)
# Recast sensititivities into dict for user
for i, f in enumerate(evalFuncs):
key = self.curSP.name + '_%s' % f
# Finalize sensitivity arrays across all procs
dvSenses[i].beginSetValues()
dvSenses[i].endSetValues()
xptSenses[i].beginSetValues()
xptSenses[i].endSetValues()
# Return sensitivities as array in sens dict
funcsSens[key] = {self.varName: dvSenses[i].getArray().copy(),
self.coordName: xptSenses[i].getArray().copy()}
totalSensitivityTime = time.time()
if self.getOption('printTiming'):
self.pp('+--------------------------------------------------+')
self.pp('|')
self.pp('| TACS Adjoint Times:')
print('|')
print('| %-30s: %10.3f sec' % ('TACS Sens Setup Problem Time', setupProblemTime - startTime))
print('| %-30s: %10.3f sec' % (
'TACS Adjoint RHS Time', adjointRHSTime - setupProblemTime))
for f in evalFuncs:
print('| %-30s: %10.3f sec' % (
'TACS Adjoint Solve Time - %s' % (f), adjointEndTime[f] - adjointStartTime[f]))
print('| %-30s: %10.3f sec' % ('Total Sensitivity Time', totalSensitivityTime - adjointFinishedTime))
print('|')
print('| %-30s: %10.3f sec' % ('Complete Sensitivity Time', totalSensitivityTime - startTime))
print('+--------------------------------------------------+')
####### Design variable methods ########
def setVarName(self, varName):
"""
Set a name for the structural variables in pyOpt. Only needs
to be changed if more than 1 pytacs object is used in an
optimization
Parameters
----------
varName : str
Name of the structural variable used in addVarGroup().
"""
self.varName = varName
def setDesignVars(self, x):
"""
Update the design variables used by tacs.
Parameters
----------
x : ndarray
The variables (typically from the optimizer) to set. It
looks for variable in the ``self.varName`` attribute.
"""
# Check if the design variables are being handed in a dict
if isinstance(x, dict):
if self.varName in x:
self.x.getArray()[:] = x[self.varName]
# or array
elif isinstance(x, np.ndarray):
self.x.getArray()[:] = x
else:
raise ValueError("setDesignVars must be called with either a numpy array or dict as input.")
# Set the variables in tacs, and the constriant objects
self.assembler.setDesignVars(self.x)
self._factorOnNext = True
def getDesignVars(self):
"""
get the design variables that were specified with
addVariablesPyOpt.
Returns
----------
x : array
The current design variable vector set in tacs.
Notes
-----
This routine **can** also accept a list or vector of
variables. This is used internally in pytacs, but is not
recommended to used externally.
"""
# Set the variables in tacs, and the constriant objects
# Set the variables in tacs, and the constriant objects
self.assembler.getDesignVars(self.x)
return self.x.getArray().copy()
def getNumDesignVars(self):
"""
Return the number of design variables on this processor.
"""
return self.x.getSize()
def getTotalNumDesignVars(self):
"""
Return the number of design variables across all processors.
"""
return self.dvNum
def getCoordinates(self):
"""
Return the mesh coordiantes of the structure.
Returns
-------
coords : array
Structural coordinate in array of size (N, 3) where N is
the number of structural nodes on this processor.
"""
Xpts = self.assembler.createNodeVec()
self.assembler.getNodes(Xpts)
coords = Xpts.getArray()
return coords
def setCoordinates(self, coords):
"""
Set the mesh coordinates of the structure.
Returns
-------
coords : array
Structural coordinate in array of size (N, 3) where N is
the number of structural nodes on this processor.
"""
XptsArray = self.Xpts.getArray()
# Make sure input is raveled (1D) in case user changed shape
XptsArray[:] = numpy.ravel(coords)
self.assembler.setNodes(self.Xpts)
self._factorOnNext = True
def getNumCoordinates(self):
"""
Return the number of mesh coordinates on this processor.
"""
return self.Xpts.getSize()
####### Post processing methods ########
def getVariablesAtPoints(self, structProblem, points):
'''The function is used to get the state variables DOF's at the
selected physical locations, points. A closest point search is
used to determine the FE nodes that are the closest to the
requested nodes.
NOTE: The number and units of the entries of the state vector
depends on the physics problem being solved and the dofs included
in the model.
A couple of examples of state vector components for common problem are listed below:
In Elasticity with varsPerNode = 3,
q = [u, v, w] # displacements
In Elasticity with varsPerNode = 6,
q = [u, v, w, tx, ty, tz] # displacements + rotations
In Thermoelasticity with varsPerNode = 4,
q = [u, v, w, T] # displacements + temperature
In Thermoelasticity with varsPerNode = 7,
q = [u, v, w, tx, ty, tz, T] # displacements + rotations + temperature
'''
try:
from scipy.spatial import cKDTree
except:
raise Error("scipy.spatial "
"must be available to use getDisplacements")
points = numpy.atleast_2d(points)
self.setStructProblem(structProblem)
# Pull out the local nodes on the proc and search "points" in the tree
vpn = self.varsPerNode
Xpts = self.assembler.createNodeVec()
self.assembler.getNodes(Xpts)
localNodes = np.real(Xpts.getArray())
nNodes = len(localNodes) // vpn
xNodes = localNodes[0:nNodes * 3].reshape((nNodes, 3)).copy()
tree = cKDTree(xNodes)
d, index = tree.query(points, k=1)
# Now figure out which proc has the best distance for this
localu = np.real(structProblem.tacsData.u.getArray())
uNodes = localu[0:nNodes * vpn].reshape((nNodes, vpn)).copy()
u_req = numpy.zeros([len(points), vpn])
for i in range(len(points)):
proc = self.comm.allreduce((d[i], self.comm.rank), op=MPI.MINLOC)
u_req[i, :] = uNodes[index[i], :]
u_req[i, :] = self.comm.bcast(uNodes[index[i], :], root=proc[1])
return u_req
def writeDVVisualization(self, fileName, n=17):
"""
This function writes a standard f5 output file, but with
design variables defined by x=mod(arange(nDV, n)), where n an
integer supplied by the user. The idea is to use contouring in
a post processing program to visualize the structural design
variables.
Parameters
----------
fileName : str
Filename to use. Since it is a f5 file, shoud have .f5 extension
n : int
Modulus value. 17 is the default which tends to work well.
"""
nDVs = self.getNumDesignVars()
# Save the current variables
xSave = self.getDesignVars()
# Generate and set the 'mod' variables
x = numpy.mod(numpy.arange(nDVs), n)
self.setDesignVars(x)
# Normal solution write
self.writeOutputFile(fileName)
# Reset the saved variables
self.setDesignVars(xSave)
def writeOutputFile(self, fileName):
"""Low-level command to write the current loadcase to a file
Parameters
----------
fileName : str
Filename for output. Should have .f5 extension.
"""
self.outputViewer.writeToFile(fileName)
def writeSolution(self, outputDir=None, baseName=None, number=None):
"""This is a generic shell function that writes the output
file(s). The intent is that the user or calling program can
call this function and pyTACS writes all the files that the
user has defined. It is recommneded that this function is used
along with the associated logical flags in the options to
determine the desired writing procedure
Parameters
----------
outputDir : str or None
Use the supplied output directory
baseName : str or None
Use this supplied string for the base filename. Typically
only used from an external solver.
number : int or None
Use the user spplied number to index solution. Again, only
typically used from an external solver
"""
# Check input
if outputDir is None:
outputDir = self.getOption('outputDir')
if baseName is None:
baseName = self.curSP.name
# If we are numbering solution, it saving the sequence of
# calls, add the call number
if number is not None:
# We need number based on the provided number:
baseName = baseName + '_%3.3d' % number
else:
# if number is none, i.e. standalone, but we need to
# number solutions, use internal counter
if self.getOption('numberSolutions'):
baseName = baseName + '_%3.3d' % self.curSP.tacsData.callCounter
# Unless the writeSolution option is off write actual file:
if self.getOption('writeSolution'):
base = os.path.join(outputDir, baseName) + '.f5'
self.outputViewer.writeToFile(base)
if self.getOption('writeBDF'):
base = os.path.join(outputDir, baseName) + '.bdf'
self.writeBDF(base)
# =========================================================================
# The remainder of the routines should not be needed by a user
# using this class directly. However, many of the functions are
# still public since they are used by a solver that uses this
# class, i.e. an Aerostructural solver.
# =========================================================================
def getNumComponents(self):
"""
Return number of components (property) groups found in bdf.
"""
return self.nComp
def solveAdjoint(self, rhs, phi, damp=1.0):
"""
Solve the structural adjoint.
Parameters
----------
rhs : TACS BVec
right hand side vector for adjoint solve
phi : TACS BVec
BVec into which the adjoint is saved
damp : float
A damping variable for adjoint update. Typically only used
in multidisciplinary analysis
"""
# First compute the residual
self.K.mult(phi, self.res)
self.res.axpy(-1.0, rhs) # Add the -RHS
# Starting Norm for this compuation
self.startNorm = numpy.real(self.res.norm())
# Solve Linear System
zeroGuess = 0
self.update.zeroEntries()
self.KSM.solve(self.res, self.update, zeroGuess)
# Update the adjoint vector with the (damped) update
phi.axpy(-damp, self.update)
# Compute actual final FEA Norm
self.K.mult(phi, self.res)
self.res.axpy(-1.0, rhs) # Add the -RHS
self.finalNorm = numpy.real(self.res.norm())
def getNumVariables(self):
"""Return the number of degrees of freedom (states) that are
on this processor
Returns
-------
nstate : int
number of states.
"""
return self.u.getSize()
def getVariables(self, structProblem, states=None):
"""Return the current state values for the current
structProblem"""
self.setStructProblem(structProblem)
if states is None:
states = self.u.getArray().copy()
else:
states[:] = self.u.getArray()
return states
def setVariables(self, structProblem, states):
""" Set the structural states for current load case. Typically
only used for aerostructural analysis
Parameters
----------
states : array
Values to set. Must be the size of getNumVariables()
"""
self.setStructProblem(structProblem)
self.u.setValues(states)
self.assembler.setVariables(self.u)
def getVarsPerNodes(self):
"""
Get the number of variables per node for the model.
"""
if self.assembler is not None:
return self.varsPerNode
else:
raise Error("Assembler must be finalized before getVarsPerNodes can be called.")
def addSVSens(self, evalFuncs, dIduList):
""" Add the state variable sensitivity to the ADjoint RHS for given evalFuncs"""
funcHandles = [self.functionList[f] for f in evalFuncs if
f in self.functionList]
self.assembler.addSVSens(funcHandles, dIduList, self.alpha, self.beta, self.gamma)
def addDVSens(self, evalFuncs, dvSensList, scale=1.0):
""" Add pratial sensitivity contribution due to design vars for evalFuncs"""
funcHandles = [self.functionList[f] for f in evalFuncs if
f in self.functionList]
self.assembler.addDVSens(funcHandles, dvSensList, scale)
def addAdjointResProducts(self, adjointlist, dvSensList, scale=-1.0):
""" Add the adjoint product contribution to the design variable sensitivity arrays"""
self.assembler.addAdjointResProducts(adjointlist, dvSensList, scale)
def addXptSens(self, evalFuncs, xptSensList, scale=1.0):
""" Add pratial sensitivity contribution due to nodal coordinates for evalFuncs"""
funcHandles = [self.functionList[f] for f in evalFuncs if
f in self.functionList]
self.assembler.addXptSens(funcHandles, xptSensList, scale)
def addAdjointResXptSensProducts(self, adjointlist, xptSensList, scale=-1.0):
""" Add the adjoint product contribution to the nodal coordinates sensitivity arrays"""
self.assembler.addAdjointResXptSensProducts(adjointlist, xptSensList, scale)
def getResidual(self, structProblem, res=None, Fext=None):
"""
This routine is used to evaluate directly the structural
residual. Only typically used with aerostructural analysis.
Parameters
----------
structProblem : pyStructProblem class
Structural problem to use
res : numpy array
If res is not None, place the residuals into this array.
Returns
-------
res : array
The same array if res was provided, (otherwise a new
array) with evaluated residuals
"""
self.setStructProblem(structProblem)
self.assembler.assembleRes(self.res)
self.res.axpy(1.0, self.curSP.tacsData.F) # Add the -F
if Fext is not None:
resArray = self.res.getArray()
resArray[:] -= Fext[:]
if res is None:
res = self.res.getArray().copy()
else:
res[:] = self.res.getArray()
return res
def getResNorms(self):
"""Return the initial, starting and final Res Norms. Note that
the same norms are used for both solution and adjoint
computations"""
return ( | numpy.real(self.initNorm) | numpy.real |
import torch
import numpy as np
from utils.data_loader import get_relative_pos, get_trigger_mask
TRI_LEN = 5
ARG_LEN_DICT = {
'collateral': 14,
'proportion': 37,
'obj-org': 34,
'number': 18,
'date': 27,
'sub-org': 35,
'target-company': 59,
'sub': 38,
'obj': 36,
'share-org': 19,
'money': 28,
'title': 8,
'sub-per': 15,
'obj-per': 18,
'share-per': 20,
'institution': 22,
'way': 8,
'amount': 19
}
def extract_all_items_without_oracle(model, device, idx, content: str, token, seg, mask, seq_len, threshold_0, threshold_1, threshold_2, threshold_3, threshold_4, id_type: dict, id_args: dict, ty_args_id: dict):
assert token.size(0) == 1
content = content[0]
result = {'id': idx, 'content': content}
text_emb = model.plm(token, seg, mask)
args_id = {id_args[k]: k for k in id_args}
args_len_dict = {args_id[k]: ARG_LEN_DICT[k] for k in ARG_LEN_DICT}
p_type, type_emb = model.predict_type(text_emb, mask)
type_pred = np.array(p_type > threshold_0, dtype=bool)
type_pred = [i for i, t in enumerate(type_pred) if t]
events_pred = []
for type_pred_one in type_pred:
type_rep = type_emb[type_pred_one, :]
type_rep = type_rep.unsqueeze(0)
p_s, p_e, text_rep_type = model.predict_trigger(type_rep, text_emb, mask)
trigger_s = np.where(p_s > threshold_1)[0]
trigger_e = np.where(p_e > threshold_2)[0]
trigger_spans = []
for i in trigger_s:
es = trigger_e[trigger_e >= i]
if len(es) > 0:
e = es[0]
if e - i + 1 <= TRI_LEN:
trigger_spans.append((i, e))
for k, span in enumerate(trigger_spans):
rp = get_relative_pos(span[0], span[1], seq_len)
rp = [p + seq_len for p in rp]
tm = get_trigger_mask(span[0], span[1], seq_len)
rp = torch.LongTensor(rp).to(device)
tm = torch.LongTensor(tm).to(device)
rp = rp.unsqueeze(0)
tm = tm.unsqueeze(0)
p_s, p_e, type_soft_constrain = model.predict_args(text_rep_type, rp, tm, mask, type_rep)
p_s = np.transpose(p_s)
p_e = np.transpose(p_e)
type_name = id_type[type_pred_one]
pred_event_one = {'type': type_name}
pred_trigger = {'span': [int(span[0]), int(span[1]) + 1], 'word': content[int(span[0]):int(span[1]) + 1]}
pred_event_one['trigger'] = pred_trigger
pred_args = {}
args_candidates = ty_args_id[type_pred_one]
for i in args_candidates:
pred_args[id_args[i]] = []
args_s = np.where(p_s[i] > threshold_3)[0]
args_e = | np.where(p_e[i] > threshold_4) | numpy.where |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# ---
# jupyter:
# jupytext:
# text_representation:
# extension: .py
# format_name: light
# format_version: '1.4'
# jupytext_version: 1.1.4
# kernelspec:
# display_name: Python 3
# language: python
# name: python3
# ---
# # s_risk_attrib_variance [<img src="https://www.arpm.co/lab/icons/icon_permalink.png" width=30 height=30 style="display: inline;">](https://www.arpm.co/lab/redirect.php?code=s_risk_attrib_variance&codeLang=Python)
# For details, see [here](https://www.arpm.co/lab/redirect.php?permalink=ExBetsPCAandTors).
# +
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
from arpym.portfolio import effective_num_bets, minimum_torsion
from arpym.tools import solve_riccati, add_logo
# -
# ## [Step 0](https://www.arpm.co/lab/redirect.php?permalink=s_risk_attrib_variance-implementation-step00): Load data
# +
# (generated by script s_risk_attribution_norm)
path = '../../../databases/temporary-databases/'
db = pd.read_csv(path + 'db_risk_attribution_normal.csv')
k_ = int(np.array(db['k_'].iloc[0]))
beta = np.array(db['beta_new'].iloc[:k_+1]).reshape(-1)
mu_z = np.array(db['mu'].iloc[:k_+1]).reshape(-1)
sig2_z = np.array(db['sigma2_z'].iloc[:(k_+1)*(k_+1)]).\
reshape((k_+1, k_+1))
# -
# ## [Step 1](https://www.arpm.co/lab/redirect.php?permalink=s_risk_attrib_variance-implementation-step01): Principal components decomposition of the covariance matrix
lam, e = np.linalg.eig(sig2_z) # eigenvectors of factors' covariance matrix
flip = (e[2] < 0)
for j in range(len(flip)):
e[:, flip] = -e[:, flip]
index = np.argsort(lam)[::-1]
e = e[:, index]
lambda_sort = | np.sort(lam) | numpy.sort |
#!/usr/bin/python
from __future__ import print_function
import sys
import csv
import os
from metrics import precision, recall, f1score, ndcg, average_precision, nnt1, nnt2
import numpy
import matplotlib.pyplot as plt
def read_dataset(filename):
dataset = {}
with open(filename, 'rb') as fin:
reader = csv.reader(fin)
for row in reader:
fullid = row[0]
category = row[1]
subcategory = row[2]
dataset[fullid] = (category, subcategory)
return dataset
def load_result(path, filename, queries, targets):
fullpath = os.path.join(path, filename)
cutoff = 1000
r = []
q = queries[filename]
with open(fullpath, 'rb') as fin:
for line in fin.readlines()[:cutoff]:
if line.strip() and not line.startswith('#'): # line is not empty
retrieved, distance = line.split()
try:
r.append(targets[retrieved])
except KeyError:
continue
return q, r
def load_results(path, queries, targets):
results = []
for filename in os.listdir(path):
try:
q = queries[filename]
except KeyError:
continue
q, r = load_result(path, filename, queries, targets)
results.append((q, r))
return results
def freq_count(dataset):
freqs = {}
for k, v in dataset.items():
if v[0] in freqs:
freqs[v[0]] += 1
else:
freqs[v[0]] = 1
return freqs
def categories_to_rel(queried, retrieved):
x = []
for r in retrieved:
if queried[0] == r[0]:
x.append(1.0)
else:
x.append(0.0)
return x
def evaluate(path):
queries = read_dataset('queries.csv')
targets = read_dataset('targets.csv')
freqs = freq_count(targets)
results = load_results(path, queries, targets)
cutoff = 1000
precisions = []
recalls = []
f1scores = []
aps = []
gains = []
nnt1s = []
nnt2s = []
for (queried, retrieved) in results:
x = categories_to_rel(queried, retrieved)[:cutoff]
p = precision(x)
r = recall(x, freqs[queried[0]])
f = f1score(x, freqs[queried[0]])
g = ndcg(x)
ap = average_precision(x, freqs[queried[0]])
t1 = nnt1(x, freqs[queried[0]])
t2 = nnt2(x, freqs[queried[0]])
precisions.append(p)
recalls.append(r)
f1scores.append(f)
gains.append(g)
aps.append(ap)
nnt1s.append(t1)
nnt2s.append(t2)
print('mean precision:', numpy.mean(precisions))
print('mean recall:', numpy.mean(recalls))
print('mean F1 score:', numpy.mean(f1scores))
print('mAP:', numpy.mean(aps))
print('mean NDCG:', numpy.mean(gains))
print('mean nearest neighbor:', | numpy.mean(nnt1s) | numpy.mean |
'''
Addition problem. Code reused from https://github.com/batzner/indrnn/blob/master/examples/addition_rnn.py
'''
from __future__ import print_function
import numpy as np
import os
from keras.models import Sequential
from keras.layers import Dense
from keras.optimizers import Adam
from keras import initializers
from keras.callbacks import ModelCheckpoint
from janet import JANET
if not os.path.exists('weights'):
os.makedirs('weights/')
# Parameters taken from https://arxiv.org/abs/1804.04849
TIME_STEPS = 100
NUM_UNITS = 128
LEARNING_RATE = 0.001
STEPS_PER_EPOCH = 100
NUM_EPOCHS = 80
BATCH_SIZE = 50
# Code reused from https://github.com/batzner/indrnn/blob/master/examples/addition_rnn.py
def batch_generator():
while True:
"""Generate the adding problem dataset"""
# Build the first sequence
add_values = np.random.uniform(0., 1., (BATCH_SIZE, TIME_STEPS))
# Build the second sequence with one 1 in each half and 0s otherwise
add_indices = np.zeros_like(add_values, dtype='float32')
half = int(TIME_STEPS / 2)
for i in range(BATCH_SIZE):
first_half = | np.random.randint(half) | numpy.random.randint |
###
# Introspective Autoencoder Main training Function
# <NAME>, 2016
import argparse
import imp
import time
import logging
# import sys
# sys.path.insert(0, 'C:\Users\Andy\Generative-and-Discriminative-Voxel-Modeling')
import numpy as np
from path import Path
import theano
import theano.tensor as T
import lasagne
from utils import checkpoints, npytar, metrics_logging
from collections import OrderedDict
import matplotlib
matplotlib.use('Agg') # Turn this off if you want to display plots on your own computer or have X11 forwarding set up.
import matplotlib.pyplot as plt
#####################
# Training Functions#
#####################
#
# This function compiles all theano functions and returns
# two dicts containing the functions and theano variables.
#
def make_training_functions(cfg,model):
# Input Array
X = T.TensorType('float32', [False]*5)('X')
# Class Vector, for classification or augmenting the latent space vector
y = T.TensorType('float32', [False]*2)('y')
# Shared variable for input array
X_shared = lasagne.utils.shared_empty(5, dtype='float32')
# Shared variable for class vector
y_shared = lasagne.utils.shared_empty(2, dtype='float32')
# Input layer
l_in = model['l_in']
# Output layer
l_out = model['l_out']
# Latent Layer
l_latents = model['l_latents']
# Latent Means
l_mu = model['l_mu']
# Log-sigmas
l_ls = model['l_ls']
# Classifier
l_classifier = model['l_classifier']
# Class-conditional latents
l_cc = model['l_cc']
# Decoder Layers, including final output layer
l_decoder = lasagne.layers.get_all_layers(l_out)[len(lasagne.layers.get_all_layers(l_latents)):]
# Batch Parameters
batch_index = T.iscalar('batch_index')
batch_slice = slice(batch_index*cfg['batch_size'], (batch_index+1)*cfg['batch_size'])
#####################################
# Step 1: Compute full forward pass #
#####################################
#
# Note that calling get_output() builds a new graph each time.
# Get outputs
outputs = lasagne.layers.get_output([l_out]+[l_mu]+[l_ls]+[l_classifier]+lasagne.layers.get_all_layers(l_classifier),
{l_in:X, model['l_cc']:y}) # Consider swapping l_classifier in for l_latents
# Get the reconstruction
X_hat = outputs[0]
# Get latent means
Z_mu = outputs[1]
# Get latent logsigmas
Z_ls = outputs[2]
# Get classification guesses
y_hat = outputs[3]
# Get the outputs of the encoder layers, given the training input
g_X = outputs[5:]
# Get the outputs of the feature layers of the encoder given the reconstruction
g_X_hat = lasagne.layers.get_output(lasagne.layers.get_all_layers(l_classifier)[1:],lasagne.nonlinearities.tanh(X_hat))
# Get testing outputs
[X_hat_deterministic,latent_values,y_hat_deterministic] = lasagne.layers.get_output([l_out,l_latents,l_classifier],
{l_in:X, model['l_cc']:y},deterministic=True)
# Latent values at a given
# latent_values = lasagne.layers.get_output(l_latents,deterministic=True)
# For classification
# class_prediction = softmax_out = T.nnet.softmax(g_X[-1])
#################################
# Step 2: Define loss functions #
#################################
# L2 normalization for all params
l2_all = lasagne.regularization.regularize_network_params(l_out,
lasagne.regularization.l2)
# Weighted binary cross-entropy for use in voxel loss. Allows weighting of false positives relative to false negatives.
# Nominally set to strongly penalize false negatives
def weighted_binary_crossentropy(output,target):
return -(98.0*target * T.log(output) + 2.0*(1.0 - target) * T.log(1.0 - output))/100.0
# Voxel-Wise Reconstruction Loss
# Note that the output values are clipped to prevent the BCE from evaluating log(0).
voxel_loss = T.cast(T.mean(weighted_binary_crossentropy(T.clip(lasagne.nonlinearities.sigmoid( X_hat ), 1e-7, 1.0 - 1e-7), X)),'float32')
# KL Divergence from isotropic gaussian prior
kl_div = -0.5 * T.mean(1 + 2*Z_ls - T.sqr(Z_mu) - T.exp(2 * Z_ls))
# Compute classification loss if augmenting with a classification objective
if cfg['discriminative']:
print('discriminating')
classifier_loss = T.cast(T.mean(T.nnet.categorical_crossentropy(T.nnet.softmax(y_hat), y)), 'float32')
classifier_error_rate = T.cast( T.mean( T.neq(T.argmax(y_hat,axis=1), T.argmax(y,axis=1)) ), 'float32' )
classifier_test_error_rate = T.cast( T.mean( T.neq(T.argmax(y_hat_deterministic,axis=1), T.argmax(y,axis=1))), 'float32' )
# Sum the reconstruction loss, the regularization term, the KL divergence over the prior, and the classifier loss.
# Optionally ignore the kl divergence term.
reg_voxel_loss = voxel_loss + cfg['reg']*l2_all +classifier_loss+kl_div if cfg['kl_div'] else voxel_loss + cfg['reg']*l2_all +classifier_loss
# If not, ignore classifier
else:
classifier_loss = None
classifier_error_rate = None
classifier_test_error_rate = None
# Sum the reconstruction loss, the regularization term, and the KL divergence over the prior.
# Optionally ignore the kl divergence term.
reg_voxel_loss = voxel_loss + cfg['reg']*l2_all+kl_div if cfg['kl_div'] else voxel_loss + cfg['reg']*l2_all
##########################
# Step 3: Define Updates #
##########################
# Define learning rate in case of annealing or decay.
if isinstance(cfg['learning_rate'], dict):
learning_rate = theano.shared(np.float32(cfg['learning_rate'][0]))
else:
learning_rate = theano.shared(np.float32(cfg['learning_rate']))
# All network params
params = lasagne.layers.get_all_params(l_out,trainable=True)
# Decoder params
decoder_params = lasagne.layers.get_all_params(l_out,trainable=True)[len(lasagne.layers.get_all_params(l_latents,trainable=True)):]
# Update dict
updates = OrderedDict()
# Reconstruction and Regularization SGD terms
# Note that momentum (or a variant such as Adam) is added further down.
voxel_grads = lasagne.updates.get_or_compute_grads(reg_voxel_loss,params)
for param,grad in zip(params,voxel_grads):
updates[param] = param - learning_rate * grad
# Feature SGD Terms (AKA Introspective SGD Terms)
# Note that momentum (or a variant such as Adam) is added further down.
# Optionally add scale term to weight deeper layers more heavily.
if cfg['introspect']:
# To scale weights differently, add /sum(xrange(1,len(g_X_hat)-1))
# Also (i+1) to scale weights
feature_loss = T.cast(T.mean([T.mean(lasagne.objectives.squared_error(g_X[i],g_X_hat[i])) for i in xrange(0,len(g_X_hat)-2)]),'float32')
feature_grads = lasagne.updates.get_or_compute_grads(feature_loss,decoder_params)
for param,grad in zip(decoder_params,feature_grads):
updates[param] += - learning_rate * grad
else:
feature_loss = None
# Apply nesterov momentum to all updates.
updates = lasagne.updates.apply_nesterov_momentum(updates,momentum=cfg['momentum'])
# Reconstruction Accuracy Term
error_rate = T.cast( T.mean( T.neq(T.ge(X_hat,0), T.ge(X,0))), 'float32' )
# Test Reconstruction Accuracy
test_error_rate = T.cast( T.mean( T.neq(T.ge(X_hat_deterministic,0), T.ge(X,0))), 'float32' )
# Test Reconstruction True Positives
true_positives = T.cast(T.mean(T.eq(T.ge(X_hat_deterministic,0), T.ge(X,0.5))*T.ge(X,0.5))/T.mean(T.ge(X,0.5)),'float32')
# Test Reconstruction True Negatives
true_negatives = T.cast(T.mean(T.eq(T.ge(X_hat_deterministic,0), T.ge(X,0.5))*T.lt(X,0.5))/T.mean(T.lt(X,0.5)),'float32')
# List comprehension to define which outputs are available during training
update_outs = [x for x in [voxel_loss,
feature_loss,
classifier_loss,
kl_div,
classifier_error_rate,
error_rate] if x is not None]
# Training function
update_iter = theano.function([batch_index],update_outs,
updates=updates, givens={
X: X_shared[batch_slice],
y: y_shared[batch_slice]
},on_unused_input='warn' )
# List comprehension to define which outputs are available during testing
test_outs = [x for x in [test_error_rate,
classifier_test_error_rate,
latent_values,true_positives,true_negatives] if x is not None]
# Test function
test_error_fn = theano.function([batch_index],
test_outs, givens={
X: X_shared[batch_slice],
y: y_shared[batch_slice]
},on_unused_input='warn' )
# Dictionary of theano functions
tfuncs = {'update_iter':update_iter,
'test_function':test_error_fn,
}
# Dictionary of theano variables
tvars = {'X' : X,
'y' : y,
'X_shared' : X_shared,
'y_shared' : y_shared,
'batch_slice' : batch_slice,
'batch_index' : batch_index,
'learning_rate' : learning_rate,
}
return tfuncs, tvars
## Data augmentation function from Voxnet, which randomly translates
## and/or horizontally flips a chunk of data.
def jitter_chunk(src, cfg):
dst = src.copy()
if np.random.binomial(1, .2):
dst[:, :, ::-1, :, :] = dst
if np.random.binomial(1, .2):
dst[:, :, :, ::-1, :] = dst
max_ij = cfg['max_jitter_ij']
max_k = cfg['max_jitter_k']
shift_ijk = [np.random.random_integers(-max_ij, max_ij),
np.random.random_integers(-max_ij, max_ij),
np.random.random_integers(-max_k, max_k)]
for axis, shift in enumerate(shift_ijk):
if shift != 0:
# beware wraparound
dst = np.roll(dst, shift, axis+2)
return dst
## Data loading function, originally from VoxNet.
def data_loader(cfg, fname):
dims = cfg['dims']
chunk_size = cfg['batch_size']*cfg['batches_per_chunk']//2
xc = np.zeros((chunk_size, cfg['n_channels'],)+dims, dtype=np.float32)
reader = npytar.NpyTarReader(fname)
yc = np.zeros((chunk_size,cfg['n_classes']),dtype = np.float32)
counter = []
for ix, (x, name) in enumerate(reader):
cix = ix % chunk_size
xc[cix] = x.astype(np.float32)
yc[cix,(int(name.split('.')[0])-1)] = 1
counter.append(int(name.split('.')[0])-1)
if len(counter) == chunk_size:
indices = np.random.permutation(2*len(xc))
yield (3.0 * np.append(xc,jitter_chunk(xc, cfg),axis=0)[indices] - 1.0, np.append(yc,yc,axis=0)[indices])
counter = []
yc.fill(0)
xc.fill(0)
if len(counter) > 0:
# pad to nearest multiple of batch_size
if len(counter)%cfg['batch_size'] != 0:
new_size = int(np.ceil(len(counter)/float(cfg['batch_size'])))*cfg['batch_size']
xc = xc[:new_size]
xc[len(counter):] = xc[:(new_size-len(counter))]
yc = yc[:new_size]
yc[len(counter):] = yc[:(new_size-len(counter))]
counter = counter + counter[:(new_size-len(counter))]
indices = np.random.permutation(2*len(xc))
yield (3.0 * np.append(xc,jitter_chunk(xc, cfg),axis=0)[indices] - 1.0, np.append(yc,yc,axis=0)[indices])
# Test data loading function, originally from VoxNet
def test_data_loader(cfg,fname):
dims = cfg['dims']
chunk_size = cfg['batch_size']*cfg['batches_per_chunk']
xc = np.zeros((chunk_size, cfg['n_channels'],)+dims, dtype=np.float32)
reader = npytar.NpyTarReader(fname)
yc = np.zeros((chunk_size,cfg['n_classes']),dtype = np.float32)
counter = []
for ix, (x, name) in enumerate(reader):
cix = ix % chunk_size
xc[cix] = x.astype(np.float32)
yc[cix,(int(name.split('.')[0])-1)] = 1
counter.append(int(name.split('.')[0])-1)
if len(counter) == chunk_size:
yield (3.0*xc-1.0, yc)
counter = []
yc.fill(0)
xc.fill(0)
if len(counter) > 0:
# pad to nearest multiple of batch_size
if len(counter)%cfg['batch_size'] != 0:
new_size = int(np.ceil(len(counter)/float(cfg['batch_size'])))*cfg['batch_size']
xc = xc[:new_size]
xc[len(counter):] = xc[:(new_size-len(counter))]
yc = yc[:new_size]
yc[len(counter):] = yc[:(new_size-len(counter))]
counter = counter + counter[:(new_size-len(counter))]
yield (3.0*xc-1.0, yc)
# Main Function
def main(args):
# Load config file
config_module = imp.load_source('config', args.config_path)
cfg = config_module.cfg
# Define weights file name
weights_fname = str(args.config_path)[:-3]+'.npz'
# Define training metrics filename
metrics_fname = weights_fname[:-4]+'METRICS.jsonl'
# Prepare Logs
logging.basicConfig(level=logging.INFO, format='%(asctime)s %(levelname)s| %(message)s')
logging.info('Metrics will be saved to {}'.format(metrics_fname))
mlog = metrics_logging.MetricsLogger(metrics_fname, reinitialize=True)
# Get model and compile theano functions
model = config_module.get_model()
logging.info('Compiling theano functions...')
tfuncs, tvars = make_training_functions(cfg,model)
logging.info('Training...')
# Iteration Counter. One iteration corresponds to one minibatch.
itr = 0
# Best true-positive rate
best_tp = 0
for epoch in xrange(cfg['max_epochs']):
# Prepare data loader
loader = (data_loader(cfg,args.train_file))
# Update Learning Rate. Note that this version of the function does not support a decay rate;
# See other training files in the discriminative section for this.
if isinstance(cfg['learning_rate'], dict) and epoch > 0:
if any(x==epoch for x in cfg['learning_rate'].keys()):
lr = np.float32(tvars['learning_rate'].get_value())
new_lr = cfg['learning_rate'][epoch]
logging.info('Changing learning rate from {} to {}'.format(lr, new_lr))
tvars['learning_rate'].set_value(np.float32(new_lr))
# Initialize epoch-wise chunk counter
iter_counter = 0;
# Initialize Epoch-wise metrics
vloss_e, floss_e, closs_e, d_kl_e, c_acc_e, acc_e = 0, 0, 0, 0, 0, 0
# Train!
for x_shared, y_shared in loader: # Loop across chunks
# Increment chunk counter
iter_counter+=1
# Determine number of batches in this chunk; this should only vary from
# cfg['batches_per_chunk'] if we're at the end of the dataset.
num_batches = len(x_shared)//cfg['batch_size']
# Load chunk into memory
tvars['X_shared'].set_value(x_shared, borrow=True)
tvars['y_shared'].set_value(y_shared, borrow=True)
# Initialize Chunk-wise metrics
voxel_lvs,feature_lvs,class_lvs,kl_divs,class_accs,accs = [],[],[],[],[],[]
for bi in xrange(num_batches): # Loop across batches within chunk
# Update!
results = tfuncs['update_iter'](bi)
# Assign results
# This could definitely be done more cleanly with a list comprehension.
voxel_loss = results[0]
feature_loss = results[1] if cfg['introspect'] else 0
classifier_loss = results[1+cfg['introspect']] if cfg['discriminative'] else 0
kl_div = results[1+cfg['introspect']+cfg['discriminative']]
class_acc = results[2+cfg['introspect']+cfg['discriminative']] if cfg['discriminative'] else 0
acc = results[2+cfg['introspect']+2*cfg['discriminative']]
# Append results to chunk-wise result list; these will be averaged later.
voxel_lvs.append(voxel_loss)
feature_lvs.append(feature_loss)
class_lvs.append(classifier_loss)
kl_divs.append(kl_div)
class_accs.append(class_acc)
accs.append(acc)
# Increment batch counter
itr += 1
# Average metrics across chunk
[vloss, floss,closs, d_kl,c_acc,acc] = [float(np.mean(voxel_lvs)), float(np.mean(feature_lvs)),
float(np.mean(class_lvs)), float(np.mean(kl_divs)),
1.0-float(np.mean(class_accs)), 1.0-float(np.mean(accs))]
# Update epoch-wise metrics
vloss_e, floss_e, closs_e, d_kl_e, c_acc_e, acc_e = [vloss_e+vloss, floss_e+floss, closs_e+closs, d_kl_e+d_kl, c_acc_e+c_acc, acc_e+acc]
# Report and Log chunk-wise metrics
logging.info('epoch: {}, itr: {}, v_loss: {}, f_loss: {}, c_loss: {}, D_kl: {}, class_acc: {}, acc: {}'.format(epoch, itr, vloss, floss,
closs, d_kl, c_acc, acc))
mlog.log(epoch=epoch, itr=itr, vloss=vloss,floss=floss, acc=acc,d_kl=d_kl,c_acc=c_acc)
# Average metrics across epoch
vloss_e, floss_e, closs_e, d_kl_e, c_acc_e, acc_e = [vloss_e/iter_counter, floss_e/iter_counter,
closs_e/iter_counter, d_kl_e/iter_counter,
c_acc_e/iter_counter, acc_e/iter_counter]
# Report and log epoch-wise metrics
logging.info('Training metrics, Epoch {}, v_loss: {}, f_loss: {}, c_loss: {}, D_kl: {}, class_acc: {}, acc: {}'.format(epoch, vloss_e, floss_e,closs_e,d_kl_e,c_acc_e,acc_e))
mlog.log(epoch=epoch, vloss_e=vloss_e, floss_e=floss_e, closs_e=closs_e, d_kl_e=d_kl_e, c_acc_e=c_acc_e, acc_e=acc_e)
# Every Nth epoch, save weights
if not (epoch%cfg['checkpoint_every_nth']):
checkpoints.save_weights(weights_fname, model['l_out'],
{'itr': itr, 'ts': time.time()})
# When training is complete, check test performance
test_loader = test_data_loader(cfg,'shapenet10_test_nr.tar')
logging.info('Examining performance on test set')
# Initialize test metrics
test_error,test_class_error,latent_values,tp,tn = [],[],[],[],[]
# Initialize true class array for 2D manifold plots
true_class = np.array([],dtype=np.int)
for x_shared,y_shared in test_loader: # Loop across test chunks
# Calculate number of batches
num_batches = len(x_shared)//cfg['batch_size']
# Load test chunk into memory
tvars['X_shared'].set_value(x_shared, borrow=True)
tvars['y_shared'].set_value(y_shared, borrow=True)
# Update true class array for 2D Manifold Plots
true_class = np.append(true_class,np.argmax(y_shared,axis=1))
for bi in xrange(num_batches): # Loop across minibatches
# Get test results
test_results = tfuncs['test_function'](bi)
# Assign test results
# This could be done more cleanly with a list comprehension
batch_test_error=test_results[0]
batch_test_class_error = test_results[1] if cfg['discriminative'] else 0
latents = test_results[1+cfg['discriminative']]
batch_tp = test_results[2+cfg['discriminative']]
batch_tn = test_results[3+cfg['discriminative']]
test_error.append(batch_test_error)
test_class_error.append(batch_test_class_error)
latent_values.append(latents)
tp.append(batch_tp)
tn.append(batch_tn)
# Average results
t_error = 1-float(np.mean(test_error))
true_positives = float(np.mean(tp))
true_negatives = float( | np.mean(tn) | numpy.mean |
from __future__ import print_function
from builtins import zip
from builtins import range
from builtins import object
from cosmosis.gaussian_likelihood import GaussianLikelihood
from cosmosis.datablock import names
from twopoint_cosmosis import theory_names, type_table
from astropy.io import fits
from scipy.interpolate import interp1d
import numpy as np
import twopoint
import gaussian_covariance
import os
default_array = np.repeat(-1.0, 99)
def is_default(x):
return len(x) == len(default_array) and (x == default_array).all()
def convert_nz_steradian(n):
return n * (41253.0 * 60. * 60.) / (4 * np.pi)
class SpectrumInterp(object):
def __init__(self, angle, spec, bounds_error=True):
if | np.all(spec > 0) | numpy.all |
"""
defines:
- clear_out_solids(bdf_filename, bdf_filename_out=None,
equivalence=True, renumber=True, equivalence_tol=0.01)
- nastran_to_surf(bdf_filename, pid_to_element_flags, surf_filename,
renumber_pids=None, line_map=None,
scale=1.0, tol=1e-10,
xref=True)
"""
from collections import defaultdict
from numpy import array, allclose, unique, zeros
from pyNastran.bdf.bdf import read_bdf
from pyNastran.bdf.mesh_utils.bdf_equivalence import bdf_equivalence_nodes
from pyNastran.bdf.mesh_utils.bdf_renumber import bdf_renumber
from pyNastran.bdf.mesh_utils.remove_unused import remove_unused
def remove_unassociated_nodes(bdf_filename, unused_bdf_filename_out, renumber=False):
"""dummy function"""
assert renumber is False, renumber
remove_unused(bdf_filename, remove_nids=True, remove_cids=False,
remove_pids=False, remove_mids=False)
def clear_out_solids(bdf_filename, bdf_filename_out=None,
equivalence=True, renumber=True, equivalence_tol=0.01):
"""removes solid elements"""
if bdf_filename_out is None:
if renumber or equivalence:
msg = ('bdf_filename_out=%s must be specified if renumber=%s '
'or equivalence=%s are True' % (
bdf_filename_out, renumber, equivalence))
raise RuntimeError(msg)
if isinstance(bdf_filename, str):
print('clearing out solids from %s' % bdf_filename)
model = read_bdf(bdf_filename, xref=False)
else:
model = bdf_filename
#nodes2 = {nid, node for nid, node in model.nodes.items()}
#elements2 = {eid, element for eid, element in model.elements.items()
#if element.type in ['CTRIA3', 'CQUAD4']}
out_dict = model.get_card_ids_by_card_types(card_types=['CTRIA3', 'CQUAD4'])
save_eids = set(out_dict['CTRIA3'] + out_dict['CQUAD4'])
all_eids = set(model.element_ids)
#print('all_eids =', all_eids)
#print('save_eids =', save_eids)
remove_eids = all_eids - save_eids
#print('remove_eids =', remove_eids)
for eid in remove_eids:
#print('eid =', eid)
del model.elements[eid]
# TODO: seems like we could be more efficient...
#nids = unique(hstack([model.elements[eid].node_ids for eid in save_eids]))
# get nodes that are remaining in the model
nids = set()
unused_elements2 = {}
#print(model.elements)
for eid, element in model.elements.items():
#if element.type not in ['CTRIA3', 'CQUAD4']:
#continue
#elements2[eid] = element
nids.update(element.node_ids)
nids = list(nids)
nids.sort()
# filter out old nodes & properties
nodes2 = {nid : node for nid, node in model.nodes.items() if nid in nids}
properties2 = {pid : prop for pid, prop in model.properties.items() if prop.type == 'PSHELL'}
model.nodes = nodes2
#model.elements = elements2
model.properties = properties2
# already equivalenced?
#remove_unassociated_nodes(bdf_filename, bdf_filename_out, renumber=False)
#bdf_filename_out = 'equivalence.bdf'
starting_id_dict = {
'cid' : 1,
'nid' : 1,
'eid' : 1,
'pid' : 1,
'mid' : 1,
}
if equivalence:
if renumber:
bdf_equivalenced_filename = 'equivalence.bdf'
else:
bdf_equivalenced_filename = bdf_filename_out
model.write_bdf('remove_unused_nodes.bdf')
bdf_equivalence_nodes(model, bdf_equivalenced_filename, equivalence_tol,
renumber_nodes=False, neq_max=4, xref=True)
if renumber:
bdf_renumber(bdf_equivalenced_filename, bdf_filename_out, size=8, is_double=False,
starting_id_dict=starting_id_dict)
elif renumber:
model.cross_reference()
bdf_renumber(model, bdf_filename_out, size=8, is_double=False,
starting_id_dict=starting_id_dict)
return model
def nastran_to_surf(bdf_filename, pid_to_element_flags, surf_filename,
renumber_pids=None, line_map=None,
scale=1.0, tol=1e-10,
xref=True):
"""
Converts a BDF to an AFLR3 surf file
Parameters
----------
bdf_filename : str/BDF
str : the input BDF filename
BDF : a BDF model that has been cross-referenced
surf_filename : str
the output SURF filename
pid_to_element_flags : dict[key] = value
key=PSHELL value=[layer0 thickness, BL_thickness, grid_bc]
renumber_pids : bool; default=None
a mapping of pid to surface ID
None = no remapping
line_map : dict[key] = value
same as pid_to_element_flags, but for the specific intersections
where there are BC differences
NOTE: we only check [thickness, BL_thickness] because we're
averaging this data for the nodes
scale : float; default=1.0
scales the mesh by scale for unit conversion
tol : float; default=1e-16
I hate 1e-16 values in my model
xref : bool; default=True
does the model need to be cross-referenced to calculate the
node positions?
# these pids correspond to the BDF
pid_to_element_flags = {
1 : [wall_initial_normal_spacing, wall_bl_thickness, grid_bc], # top_wall
2 : [side_wall_initial_normal_spacing, side_wall_bl_thickness, grid_bc], # right_wall
3 : [side_wall_initial_normal_spacing, side_wall_bl_thickness, grid_bc], # left_wall
4 : [side_wall_initial_normal_spacing, side_wall_bl_thickness, grid_bc], # outlet
5 : [far_field_initial_normal_spacing, far_field_bl_thickness, grid_bc], # bottom_wall
6 : [wall_initial_normal_spacing, wall_bl_thickness, grid_bc], # bay
11 : [far_field_initial_normal_spacing, far_field_bl_thickness, grid_bc], # inlet_btm
12 : [side_wall_initial_normal_spacing, side_wall_bl_thickness, grid_bc], # inlet_front
13 : [side_wall_initial_normal_spacing, side_wall_bl_thickness, grid_bc], # inlet_left
14 : [side_wall_initial_normal_spacing, side_wall_bl_thickness, grid_bc], # inlet_right
15 : [wall_initial_normal_spacing, wall_bl_thickness, grid_bc], # inlet_visc
}
# these pids correspond to the BDF
# the pid_to_element_flag at the intersection between pids
line_map = {
(1, 2) : [wall_initial_normal_spacing, wall_bl_thickness, grid_bc],
(1, 3) : [wall_initial_normal_spacing, wall_bl_thickness, grid_bc],
(1, 4) : [wall_initial_normal_spacing, wall_bl_thickness, grid_bc],
}
# these are done at the last step to make the output "nice"
renumber_pids = {
11 : 7,
12 : 8,
13 : 9,
14 : 10,
15 : 11,
}
scale = 0.0254 # inches to meters;
"""
if renumber_pids is None:
renumber_pids = {}
if line_map is None:
line_map = {}
if isinstance(bdf_filename, str):
model = read_bdf(bdf_filename, xref=xref)
else:
model = bdf_filename
unused_nnodes = len(model.nodes)
nodes = []
quads = []
tris = []
maxnode, nodes, node_flags, node_flags_temp = _get_nodes(model, scale, xref)
node_remaps = {}
#if 0:
#xyz_array = array(nodes, dtype='float64')
#for nid, xyz in enumerate(xyz_array):
#for nidi, xyz2 in enumerate(xyz_array[nid+1:, :]):
#nid2 = nid + nidi + 1
#if not allclose(nid + 1, nid2 + 1):
#msg = 'nid=%s nid2=%s xyz=%s' % (nid+1, nid2+1, xyz)
#raise RuntimeError(msg)
#if allclose(xyz, xyz2):
##print(nid, nid2, nidi)
##if nid + 1 in node_remaps:
#node_remaps[nid2 + 1] = nid + 1
#print('nid=%s nid2=%s xyz=%s xyz2=%s' % (nid+1, nid2+1, xyz, xyz2))
#assert not(allclose(xyz, xyz2)), 'nid=%s nid2=%s xyz=%s' % (nid+1, nid2+1, xyz)
#del xyz_array
pid0 = 1
for pid, prop in sorted(model.properties.items()):
if pid != pid0:
msg = 'properties must go from 1 to N, no gaps; pid=%s expected=%s' % (
pid, pid0)
raise RuntimeError(msg)
#assert pid in pid_to_element_flags, pid
if prop.type in ['PSOLID']:
continue
if prop.type not in ['PSHELL', 'PCOMP', 'PCOMPG']:
raise NotImplementedError(prop)
pid0 += 1
nid_to_eid_map = get_nid_to_eid_map(
model,
node_flags_temp, pid_to_element_flags, node_remaps,
tris, quads)
initial_normal_spacing0 = 0
bl_thickness0 = 0
for nid, node_flagsi in node_flags_temp.items():
nodes_flags_array = array(node_flagsi) # (N, 2)
nflags = nodes_flags_array.shape[0]
if nflags == 0:
#node_flags[nid] = [initial_normal_spacing0, bl_thickness0]
continue
try:
avg_node_flagsi = nodes_flags_array.mean(axis=0)
max_node_flagsi = nodes_flags_array.max(axis=0)
except ValueError:
print('nid=%s node_flagsi=%s' % (nid, node_flagsi))
raise RuntimeError('node %i is duplicated (equivalence your nodes)'
' or you have unused nodes' % nid)
if not | allclose(avg_node_flagsi, max_node_flagsi) | numpy.allclose |
from warnings import warn
import numpy as np
import uarray as ua
import skimage.filters as _skimage_filters
from skimage._shared.utils import convert_to_float, warn
from skimage._backend import scalar_or_array
try:
import diplib as dip
have_diplib = True
ndi_mode_translation_dict = dict(
constant='add zeros',
nearest='zero order',
mirror='mirror',
wrap='periodic')
except ImportError:
have_diplib = False
ndi_mode_translation_dict = {}
numpy_mode_translation_dict = {}
_implemented = {}
def _to_diplib_mode(mode, cval=0):
"""Convert from skimage mode name to the corresponding ndimage mode."""
if mode not in ndi_mode_translation_dict:
# warnings.warn(f"diplib does not support mode {mode}")
return NotImplemented
if mode == 'constant' and cval != 0.:
# warnings.warn(f"diplib backend only supports cval=0 for 'constant' mode")
return NotImplemented
return ndi_mode_translation_dict[mode]
def _implements(skimage_func):
"""Decorator adds function to the dictionary of implemented functions"""
def inner(func):
_implemented[skimage_func] = func
return func
return inner
@_implements(_skimage_filters.gaussian)
def gaussian(image, sigma=1, output=None, mode='nearest', cval=0,
preserve_range=False, truncate=4.0, *, channel_axis=None):
diplib_mode = _to_diplib_mode(mode, cval)
if diplib_mode == NotImplemented:
return NotImplemented
if not have_diplib:
raise ImportError("PyDIP (DIPlib) is unavailable.")
ndim_spatial = image.ndim if channel_axis is None else image.ndim - 1
if np.isscalar(sigma):
sigma = (sigma, ) * ndim_spatial
elif len(sigma) != ndim_spatial:
raise ValueError(
"sigma must be a scalar or a sequence equal to the image.ndim "
"(or image.ndim - 1 in the case of multichannel input)"
)
truncate = np.unique(truncate)
if not len(truncate) == 1:
# raise NotImplementedError("only scalar truncate is supported")
return NotImplemented
truncate = truncate[0]
if channel_axis is not None:
if channel_axis < 0:
channel_axis += image.ndim
if channel_axis != image.ndim - 1:
image = np.moveaxis(image, source=channel_axis, destination=-1)
sigma = sigma[:ndim_spatial]
sigma = list(sigma)
# special handling copied from skimage.filters.Gaussian
if image.ndim == 3 and image.shape[-1] == 3 and channel_axis is None:
msg = ("Images with dimensions (M, N, 3) are interpreted as 2D+RGB "
"by default. Use `channel_axis=None` to interpret as "
"3D image with last dimension of length 3.")
warn(RuntimeWarning(msg))
channel_axis = -1
if any(s < 0 for s in sigma):
raise ValueError("Sigma values less than zero are not valid")
image = convert_to_float(image, preserve_range)
# TODO: try removing this
if output is None:
output = np.empty_like(image)
elif output.dtype not in [np.float32, np.float64]:
raise ValueError(
"Provided output data type must be np.float32 or np.float64."
)
# TODO: debug why multichannel case doesn't work currently (2d grayscale was okay)
# TODO: debug why channel_axis input to this multimethod gets ignored?
output[...] = dip.Gauss(
image,
sigmas=sigma[::-1], # reversed?
method='FIR',
# derivativeOrder=[0] * ndim_spatial,
boundaryCondition=[diplib_mode] * ndim_spatial,
truncation=truncate)
if channel_axis is not None and channel_axis != image.ndim - 1:
output = | np.moveaxis(output, source=-1, destination=channel_axis) | numpy.moveaxis |
import re
from dataclasses import replace
import jax.numpy as jnp
import numpy as np
import pytest
from pgmax.fg import graph, groups
def test_factor_graph():
variable_group = groups.VariableDict(15, (0,))
fg = graph.FactorGraph(variable_group)
fg.add_factor([0], np.arange(15)[:, None], name="test")
with pytest.raises(
ValueError,
match="A factor group with the name test already exists. Please choose a different name",
):
fg.add_factor([0], np.arange(15)[:, None], name="test")
with pytest.raises(
ValueError,
match=re.escape(
f"A factor involving variables {frozenset([0])} already exists."
),
):
fg.add_factor([0], np.arange(10)[:, None])
def test_bp_state():
variable_group = groups.VariableDict(15, (0,))
fg0 = graph.FactorGraph(variable_group)
fg0.add_factor([0], np.arange(10)[:, None], name="test")
fg1 = graph.FactorGraph(variable_group)
fg1.add_factor([0], | np.arange(15) | numpy.arange |
import pandas as pd
import numpy as np
import riskparityportfolio as rp
import cvxpy as cp
from typing import Union
from sklearn.cluster import KMeans
from pypfopt.efficient_frontier import EfficientFrontier
from pypfopt import risk_models
from dl_portfolio.logger import LOGGER
from dl_portfolio.cluster import get_cluster_labels
from dl_portfolio.constant import PORTFOLIOS
def portfolio_weights(returns, shrink_cov=None, budget=None, embedding=None, loading=None,
portfolio=['markowitz', 'shrink_markowitz', 'ivp', 'aerp', 'rp', 'aeerc'],
**kwargs):
assert all([p in PORTFOLIOS for p in portfolio]), [p for p in portfolio if p not in PORTFOLIOS]
port_w = {}
mu = returns.mean()
S = returns.cov()
if 'markowitz' in portfolio:
LOGGER.info('Computing Markowitz weights...')
port_w['markowitz'] = markowitz_weights(mu, S)
if 'shrink_markowitz' in portfolio:
assert shrink_cov is not None
LOGGER.info('Computing shrinked Markowitz weights...')
port_w['shrink_markowitz'] = markowitz_weights(mu, shrink_cov)
if 'ivp' in portfolio:
LOGGER.info('Computing IVP weights...')
port_w['ivp'] = ivp_weights(S)
if 'rp' in portfolio:
LOGGER.info('Computing Riskparity weights...')
assert budget is not None
port_w['rp'] = riskparity_weights(S, budget=budget['rc'].values)
if 'kmaa' in portfolio:
LOGGER.info('Computing KMeans Asset Allocation weights...')
assert embedding is not None
port_w['kmaa'] = kmaa_weights(returns, n_clusters=embedding.shape[-1])
if 'aerp' in portfolio:
LOGGER.info('Computing AE Risk Parity weights...')
assert embedding is not None
port_w['aerp'] = ae_ivp_weights(returns, embedding)
if 'aeerc' in portfolio:
LOGGER.info('Computing AE Risk Contribution weights...')
assert budget is not None
assert embedding is not None
port_w['aeerc'] = ae_riskparity_weights(returns, embedding, loading, budget, risk_parity='budget')
if 'ae_rp_c' in portfolio:
LOGGER.info('Computing AE Risk Contribution Cluster weights...')
assert budget is not None
assert embedding is not None
port_w['ae_rp_c'] = ae_riskparity_weights(returns, embedding, loading, budget, risk_parity='cluster')
if 'aeaa' in portfolio:
LOGGER.info('Computing AE Asset Allocation weights...')
port_w['aeaa'] = aeaa_weights(returns, embedding)
return port_w
def get_cluster_var(cov, cluster_items, weights=None):
"""
Compute the variance per cluster
:param cov: covariance matrix
:type cov: np.ndarray
:param cluster_items: tickers in the cluster
:type cluster_items: list
:param weights: portfolio weights. If None we will compute inverse variance weights
:return:
"""
cov_slice = cov.loc[cluster_items, cluster_items]
if weights is not None:
weights = ivp_weights(cov_slice)
return np.linalg.multi_dot((weights, cov_slice, weights))
def get_inner_cluster_weights(cov, loading, clusters, market_budget=None):
weights = {}
n_clusters = len(clusters)
for c in clusters:
cluster_items = clusters[c]
if cluster_items:
if market_budget is not None:
budget = market_budget.loc[cluster_items, 'rc']
else:
budget = loading.loc[cluster_items, c] ** 2 / np.sum(loading.loc[cluster_items, c] ** 2)
cov_slice = cov.loc[cluster_items, cluster_items]
weights[c] = pd.Series(
rp.RiskParityPortfolio(covariance=cov_slice, budget=budget.values).weights,
index=cluster_items
)
reorder_weights = {}
i = 0
for c in weights:
reorder_weights[i] = weights[c]
i += 1
weights = {i: weights[c] for i, c in enumerate(list(weights.keys()))}
return weights
def markowitz_weights(mu: Union[pd.Series, np.ndarray], S: pd.DataFrame, fix_cov: bool = False,
risk_free_rate: float = 0.) -> pd.Series:
if fix_cov:
S = risk_models.fix_nonpositive_semidefinite(S, fix_method='spectral')
weights = None
try:
LOGGER.info(f"Trying Markowitz with default 'ECOS' solver")
ef = EfficientFrontier(mu, S, verbose=False)
# ef.add_objective(objective_functions.L2_reg, gamma=0)
weights = ef.max_sharpe(risk_free_rate=risk_free_rate)
weights = pd.Series(weights, index=weights.keys())
LOGGER.info("Success")
except Exception as _exc:
LOGGER.info(f'Error with max sharpe: {_exc}')
try:
LOGGER.info(f"Trying Markowitz with 'SCS' solver")
ef = EfficientFrontier(mu, S, verbose=True, solver='SCS')
# ef.add_objective(objective_functions.L2_reg, gamma=0)
weights = ef.max_sharpe(risk_free_rate=risk_free_rate)
weights = pd.Series(weights, index=weights.keys())
LOGGER.info("Success")
except Exception as _exc:
LOGGER.info(f'Error with max sharpe: {_exc}')
try:
LOGGER.info(f"Trying Markowitz with 'OSQP' solver")
ef = EfficientFrontier(mu, S, verbose=True, solver='OSQP')
# ef.add_objective(objective_functions.L2_reg, gamma=0)
weights = ef.max_sharpe(risk_free_rate=risk_free_rate)
weights = pd.Series(weights, index=weights.keys())
LOGGER.info("Success")
except Exception as _exc:
LOGGER.info(f'Error with max sharpe: {_exc}')
try:
LOGGER.info(f"Trying Markowitz with 'CVXOPT' solver")
# ef = EfficientFrontier(mu, S, verbose=True, solver=cp.CVXOPT, solver_options={'feastol': 1e-4})
ef = EfficientFrontier(mu, S, verbose=True, solver=cp.SCS)
weights = ef.max_sharpe(risk_free_rate=risk_free_rate)
weights = pd.Series(weights, index=weights.keys())
LOGGER.info("Success")
except Exception as _exc:
LOGGER.info(f'Error with max sharpe: {_exc}')
if weights is None:
raise _exc
return weights
def ivp_weights(S: Union[pd.DataFrame, np.ndarray]) -> pd.Series:
# Compute the inverse-variance portfolio
ivp = 1. / np.diag(S.values)
weights = ivp / ivp.sum()
if isinstance(S, pd.DataFrame):
weights = pd.Series(weights, index=S.index)
else:
weights = pd.Series(weights)
return weights
def riskparity_weights(S: pd.DataFrame(), budget: np.ndarray) -> pd.Series:
weights = rp.RiskParityPortfolio(covariance=S, budget=budget).weights
weights = pd.Series(weights, index=S.index)
return weights
def ae_riskparity_weights(returns, embedding, loading, market_budget, risk_parity='budget'):
"""
:param returns:
:param embedding: To get cluster assignment
:param loading: To get inner cluster weights
:param market_budget:
:param risk_parity: if 'budget' then use budget for risk allocation, if 'cluster' use relative asset cluster
importance from the embedding matrix
:return:
"""
assert risk_parity in ['budget', 'cluster']
# Rename columns in case of previous renaming
loading.columns = list(range(len(loading.columns)))
embedding.columns = list(range(len(embedding.columns)))
max_cluster = embedding.shape[-1] - 1
# First get cluster allocation to forget about small contribution
clusters, _ = get_cluster_labels(embedding)
clusters = {c: clusters[c] for c in clusters if c <= max_cluster}
# Now get weights of assets inside each cluster
if risk_parity == 'budget':
inner_cluster_weights = get_inner_cluster_weights(returns.cov(),
loading,
clusters,
market_budget=market_budget)
elif risk_parity == 'cluster':
inner_cluster_weights = get_inner_cluster_weights(returns.cov(),
loading,
clusters)
else:
raise NotImplementedError(risk_parity)
# Now compute return of each cluster
cluster_returns = pd.DataFrame()
for c in inner_cluster_weights:
cret = (returns[inner_cluster_weights[c].index] * inner_cluster_weights[c]).sum(1)
cluster_returns = pd.concat([cluster_returns, cret], 1)
cluster_returns.columns = list(inner_cluster_weights.keys())
# Now get risk contribution of each cluster defined by user
cluster_rc = {c: (inner_cluster_weights[c]).idxmax() for c in inner_cluster_weights}
cluster_rc = {c: market_budget.loc[cluster_rc[c], 'rc'] for c in cluster_rc}
# Compute cluster weights with risk parity portfolio
cov = cluster_returns.cov()
budget = np.array(list(cluster_rc.values()))
budget = budget / np.sum(budget)
cluster_weight = rp.RiskParityPortfolio(covariance=cov, budget=budget).weights
# Compute asset weight inside global portfolio
weights = pd.Series(dtype='float32')
for c in inner_cluster_weights:
weights = pd.concat([weights, inner_cluster_weights[c] * cluster_weight[c]])
weights = weights.reindex(returns.columns) # rerorder
weights.fillna(0., inplace=True)
return weights
def kmaa_weights(returns: pd.DataFrame, n_clusters: int) -> pd.Series:
kmeans = KMeans(n_clusters=n_clusters, random_state=0)
kmeans.fit(returns.T)
labels = pd.DataFrame(kmeans.labels_.reshape(1, -1), columns=returns.columns).T
labels.columns = ['label']
clusters = {i: list(labels[labels['label'] == i].index) for i in range(n_clusters)}
# Now get weights of assets inside each cluster
cluster_weights = {c: pd.Series([1 / len(clusters[c])] * len(clusters[c]), index=clusters[c]) for c in clusters}
# {asset: 1 / n_items for asset in clusters[c]}}
# Compute asset weight inside global portfolio
weights = pd.Series(dtype='float32')
for c in cluster_weights:
weights = pd.concat([weights, cluster_weights[c]])
weights = weights / n_clusters # Rescale each weight
weights = weights.reindex(returns.columns) # rerorder
weights.fillna(0., inplace=True)
return weights
def aeaa_weights(returns: Union[np.ndarray, pd.DataFrame], embedding: Union[np.ndarray, pd.DataFrame]) -> pd.Series:
max_cluster = embedding.shape[-1] - 1
# First get cluster allocation to forget about small contribution
# Rename columns in case of previous renaming
embedding.columns = list(range(len(embedding.columns)))
clusters, _ = get_cluster_labels(embedding)
clusters = {c: clusters[c] for c in clusters if c <= max_cluster}
n_clusters = embedding.shape[-1]
# Now get weights of assets inside each cluster
cluster_weights = {c: pd.Series([1 / len(clusters[c])] * len(clusters[c]), index=clusters[c]) for c in clusters}
# {asset: 1 / n_items for asset in clusters[c]}}
# Compute asset weight inside global portfolio
weights = pd.Series(dtype='float32')
for c in cluster_weights:
weights = pd.concat([weights, cluster_weights[c]])
weights = weights / n_clusters # Rescale each weight
weights = weights.reindex(returns.columns) # rerorder
weights.fillna(0., inplace=True)
return weights
def equal_class_weights(market_budget: pd.DataFrame):
market_class = np.unique(market_budget['market'].values, return_counts=True)
inner_class_weight = {c: 1 / market_class[1][i] for i, c in enumerate(market_class[0])}
weights = pd.Series(index=market_budget.index)
for c in inner_class_weight:
assets = market_budget.index[market_budget['market'] == c]
weights.loc[assets] = inner_class_weight[c]
weights /= | np.sum(weights) | numpy.sum |
import numpy as np
import cv2
import augmentation
from skimage.util import img_as_float
def _compute_scale_and_crop(image_size, crop_size, padding, random_crop):
padded_size = crop_size[0] + padding[0], crop_size[1] + padding[1]
# Compute size ratio from the padded region size to the image_size
scale_y = float(image_size[0]) / float(padded_size[0])
scale_x = float(image_size[1]) / float(padded_size[1])
# Take the minimum as this is the factor by which we must scale to take a `padded_size` sized chunk
scale_factor = min(scale_y, scale_x)
# Compute the size of the region that we must extract from the image
region_height = int(float(crop_size[0]) * scale_factor + 0.5)
region_width = int(float(crop_size[1]) * scale_factor + 0.5)
# Compute the additional space available
if scale_x > scale_y:
# Crop in X
extra_x = image_size[1] - region_width
extra_y = padding[0]
else:
# Crop in Y
extra_y = image_size[0] - region_height
extra_x = padding[1]
# Either choose the centre piece or choose a random piece
if random_crop:
pos_y = np.random.randint(0, extra_y + 1, size=(1,))[0]
pos_x = np.random.randint(0, extra_x + 1, size=(1,))[0]
else:
pos_y = extra_y // 2
pos_x = extra_x // 2
return (pos_y, pos_x), (region_height, region_width)
def _compute_scales_and_crops(image_sizes, crop_size, padding, random_crop):
padded_size = crop_size[0] + padding[0], crop_size[1] + padding[1]
# Compute size ratio from the padded region size to the image_size
image_sizes = image_sizes.astype(float)
scale_ys = image_sizes[:, 0] / float(padded_size[0])
scale_xs = image_sizes[:, 1] / float(padded_size[1])
# Take the minimum as this is the factor by which we must scale to take a `padded_size` sized chunk
scale_factors = np.minimum(scale_ys, scale_xs)
# Compute the size of the region that we must extract from the image
region_sizes = (np.array(crop_size)[None, :] * scale_factors[:, None] + 0.5).astype(int)
# Compute the additional space available
extra_space = np.repeat(np.array(padding, dtype=int)[None, :], image_sizes.shape[0], axis=0)
# Crop in X
crop_in_x = scale_xs > scale_ys
extra_space[crop_in_x, 1] = image_sizes[crop_in_x, 1] - region_sizes[crop_in_x, 1]
# Crop in Y
crop_in_y = ~crop_in_x
extra_space[crop_in_y, 0] = image_sizes[crop_in_y, 0] - region_sizes[crop_in_y, 0]
# Either choose the centre piece or choose a random piece
if random_crop:
t = np.random.uniform(0.0, 1.0, size=image_sizes.shape)
pos = (t * (extra_space + 1.0)).astype(int)
else:
pos = extra_space // 2
return pos, region_sizes
def _compute_scales_and_crops_pairs(image_sizes, crop_size, padding, random_crop, pair_offset_size):
padded_size = crop_size[0] + padding[0], crop_size[1] + padding[1]
# Compute size ratio from the padded region size to the image_size
image_sizes = image_sizes.astype(float)
scale_ys = image_sizes[:, 0] / float(padded_size[0])
scale_xs = image_sizes[:, 1] / float(padded_size[1])
# Take the minimum as this is the factor by which we must scale to take a `padded_size` sized chunk
scale_factors = | np.minimum(scale_ys, scale_xs) | numpy.minimum |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.