prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
"""
Optimal binning algorithm for continuous target.
"""
# <NAME> <<EMAIL>>
# Copyright (C) 2019
import numbers
import time
from sklearn.utils import check_array
import numpy as np
from ..information import solver_statistics
from ..logging import Logger
from .auto_monotonic import auto_monotonic_continuous
from .auto_monotonic import peak_valley_trend_change_heuristic
from .binning import OptimalBinning
from .binning_statistics import continuous_bin_info
from .binning_statistics import ContinuousBinningTable
from .binning_statistics import target_info_special_continuous
from .continuous_cp import ContinuousBinningCP
from .preprocessing import preprocessing_user_splits_categorical
from .preprocessing import split_data
from .transformations import transform_continuous_target
logger = Logger(__name__).logger
def _check_parameters(name, dtype, prebinning_method, max_n_prebins,
min_prebin_size, min_n_bins, max_n_bins, min_bin_size,
max_bin_size, monotonic_trend, min_mean_diff, max_pvalue,
max_pvalue_policy, outlier_detector, outlier_params,
cat_cutoff, user_splits, user_splits_fixed,
special_codes, split_digits, time_limit, verbose):
if not isinstance(name, str):
raise TypeError("name must be a string.")
if dtype not in ("categorical", "numerical"):
raise ValueError('Invalid value for dtype. Allowed string '
'values are "categorical" and "numerical".')
if prebinning_method not in ("cart", "quantile", "uniform"):
raise ValueError('Invalid value for prebinning_method. Allowed string '
'values are "cart", "quantile" and "uniform".')
if not isinstance(max_n_prebins, numbers.Integral) or max_n_prebins <= 1:
raise ValueError("max_prebins must be an integer greater than 1; "
"got {}.".format(max_n_prebins))
if not 0. < min_prebin_size <= 0.5:
raise ValueError("min_prebin_size must be in (0, 0.5]; got {}."
.format(min_prebin_size))
if min_n_bins is not None:
if not isinstance(min_n_bins, numbers.Integral) or min_n_bins <= 0:
raise ValueError("min_n_bins must be a positive integer; got {}."
.format(min_n_bins))
if max_n_bins is not None:
if not isinstance(max_n_bins, numbers.Integral) or max_n_bins <= 0:
raise ValueError("max_n_bins must be a positive integer; got {}."
.format(max_n_bins))
if min_n_bins is not None and max_n_bins is not None:
if min_n_bins > max_n_bins:
raise ValueError("min_n_bins must be <= max_n_bins; got {} <= {}."
.format(min_n_bins, max_n_bins))
if min_bin_size is not None:
if (not isinstance(min_bin_size, numbers.Number) or
not 0. < min_bin_size <= 0.5):
raise ValueError("min_bin_size must be in (0, 0.5]; got {}."
.format(min_bin_size))
if max_bin_size is not None:
if (not isinstance(max_bin_size, numbers.Number) or
not 0. < max_bin_size <= 1.0):
raise ValueError("max_bin_size must be in (0, 1.0]; got {}."
.format(max_bin_size))
if min_bin_size is not None and max_bin_size is not None:
if min_bin_size > max_bin_size:
raise ValueError("min_bin_size must be <= max_bin_size; "
"got {} <= {}.".format(min_bin_size,
max_bin_size))
if monotonic_trend is not None:
if monotonic_trend not in ("auto", "auto_heuristic", "auto_asc_desc",
"ascending", "descending", "convex",
"concave", "peak", "valley",
"peak_heuristic", "valley_heuristic"):
raise ValueError('Invalid value for monotonic trend. Allowed '
'string values are "auto", "auto_heuristic", '
'"auto_asc_desc", "ascending", "descending", '
'"concave", "convex", "peak", "valley", '
'"peak_heuristic" and "valley_heuristic".')
if (not isinstance(min_mean_diff, numbers.Number) or min_mean_diff < 0):
raise ValueError("min_mean_diff must be >= 0; got {}."
.format(min_mean_diff))
if max_pvalue is not None:
if (not isinstance(max_pvalue, numbers.Number) or
not 0. < max_pvalue <= 1.0):
raise ValueError("max_pvalue must be in (0, 1.0]; got {}."
.format(max_pvalue))
if max_pvalue_policy not in ("all", "consecutive"):
raise ValueError('Invalid value for max_pvalue_policy. Allowed string '
'values are "all" and "consecutive".')
if outlier_detector is not None:
if outlier_detector not in ("range", "zscore"):
raise ValueError('Invalid value for outlier_detector. Allowed '
'string values are "range" and "zscore".')
if outlier_params is not None:
if not isinstance(outlier_params, dict):
raise TypeError("outlier_params must be a dict or None; "
"got {}.".format(outlier_params))
if cat_cutoff is not None:
if (not isinstance(cat_cutoff, numbers.Number) or
not 0. < cat_cutoff <= 1.0):
raise ValueError("cat_cutoff must be in (0, 1.0]; got {}."
.format(cat_cutoff))
if user_splits is not None:
if not isinstance(user_splits, (np.ndarray, list)):
raise TypeError("user_splits must be a list or numpy.ndarray.")
if user_splits_fixed is not None:
if user_splits is None:
raise ValueError("user_splits must be provided.")
else:
if not isinstance(user_splits_fixed, (np.ndarray, list)):
raise TypeError("user_splits_fixed must be a list or "
"numpy.ndarray.")
elif not all(isinstance(s, bool) for s in user_splits_fixed):
raise ValueError("user_splits_fixed must be list of boolean.")
elif len(user_splits) != len(user_splits_fixed):
raise ValueError("Inconsistent length of user_splits and "
"user_splits_fixed: {} != {}. Lengths must "
"be equal".format(len(user_splits),
len(user_splits_fixed)))
if special_codes is not None:
if not isinstance(special_codes, (np.ndarray, list, dict)):
raise TypeError("special_codes must be a dit, list or "
"numpy.ndarray.")
if isinstance(special_codes, dict) and not len(special_codes):
raise ValueError("special_codes empty. special_codes dict must "
"contain at least one special.")
if split_digits is not None:
if (not isinstance(split_digits, numbers.Integral) or
not 0 <= split_digits <= 8):
raise ValueError("split_digist must be an integer in [0, 8]; "
"got {}.".format(split_digits))
if not isinstance(time_limit, numbers.Number) or time_limit < 0:
raise ValueError("time_limit must be a positive value in seconds; "
"got {}.".format(time_limit))
if not isinstance(verbose, bool):
raise TypeError("verbose must be a boolean; got {}.".format(verbose))
class ContinuousOptimalBinning(OptimalBinning):
"""Optimal binning of a numerical or categorical variable with respect to a
continuous target.
Parameters
----------
name : str, optional (default="")
The variable name.
dtype : str, optional (default="numerical")
The variable data type. Supported data types are "numerical" for
continuous and ordinal variables and "categorical" for categorical
and nominal variables.
prebinning_method : str, optional (default="cart")
The pre-binning method. Supported methods are "cart" for a CART
decision tree, "quantile" to generate prebins with approximately same
frequency and "uniform" to generate prebins with equal width. Method
"cart" uses `sklearn.tree.DecisionTreeRegressor
<https://scikit-learn.org/stable/modules/generated/sklearn.tree.
DecisionTreeRegressor.html>`_.
max_n_prebins : int (default=20)
The maximum number of bins after pre-binning (prebins).
min_prebin_size : float (default=0.05)
The fraction of mininum number of records for each prebin.
min_n_bins : int or None, optional (default=None)
The minimum number of bins. If None, then ``min_n_bins`` is
a value in ``[0, max_n_prebins]``.
max_n_bins : int or None, optional (default=None)
The maximum number of bins. If None, then ``max_n_bins`` is
a value in ``[0, max_n_prebins]``.
min_bin_size : float or None, optional (default=None)
The fraction of minimum number of records for each bin. If None,
``min_bin_size = min_prebin_size``.
max_bin_size : float or None, optional (default=None)
The fraction of maximum number of records for each bin. If None,
``max_bin_size = 1.0``.
monotonic_trend : str or None, optional (default="auto")
The **mean** monotonic trend. Supported trends are “auto”,
"auto_heuristic" and "auto_asc_desc" to automatically determine the
trend minimize the L1-norm using a machine learning classifier,
"ascending", "descending", "concave", "convex", "peak" and
"peak_heuristic" to allow a peak change point, and "valley" and
"valley_heuristic" to allow a valley change point. Trends
"auto_heuristic", "peak_heuristic" and "valley_heuristic" use a
heuristic to determine the change point, and are significantly faster
for large size instances (``max_n_prebins> 20``). Trend "auto_asc_desc"
is used to automatically select the best monotonic trend between
"ascending" and "descending". If None, then the monotonic constraint
is disabled.
min_mean_diff : float, optional (default=0)
The minimum mean difference between consecutives bins. This
option currently only applies when ``monotonic_trend`` is "ascending"
or "descending".
max_pvalue : float or None, optional (default=0.05)
The maximum p-value among bins. The T-test is used to detect bins
not satisfying the p-value constraint.
max_pvalue_policy : str, optional (default="consecutive")
The method to determine bins not satisfying the p-value constraint.
Supported methods are "consecutive" to compare consecutive bins and
"all" to compare all bins.
outlier_detector : str or None, optional (default=None)
The outlier detection method. Supported methods are "range" to use
the interquartile range based method or "zcore" to use the modified
Z-score method.
outlier_params : dict or None, optional (default=None)
Dictionary of parameters to pass to the outlier detection method.
cat_cutoff : float or None, optional (default=None)
Generate bin others with categories in which the fraction of
occurrences is below the ``cat_cutoff`` value. This option is
available when ``dtype`` is "categorical".
user_splits : array-like or None, optional (default=None)
The list of pre-binning split points when ``dtype`` is "numerical" or
the list of prebins when ``dtype`` is "categorical".
user_splits_fixed : array-like or None (default=None)
The list of pre-binning split points that must be fixed.
special_codes : array-like, dict or None, optional (default=None)
List of special codes. Use special codes to specify the data values
that must be treated separately.
split_digits : int or None, optional (default=None)
The significant digits of the split points. If ``split_digits`` is set
to 0, the split points are integers. If None, then all significant
digits in the split points are considered.
time_limit : int (default=100)
The maximum time in seconds to run the optimization solver.
verbose : bool (default=False)
Enable verbose output.
**prebinning_kwargs : keyword arguments
The pre-binning keywrord arguments.
.. versionadded:: 0.6.1
Notes
-----
The parameter values ``max_n_prebins`` and ``min_prebin_size`` control
complexity and memory usage. The default values generally produce quality
results, however, some improvement can be achieved by increasing
``max_n_prebins`` and/or decreasing ``min_prebin_size``.
The T-test uses an estimate of the standard deviation of the contingency
table to speed up the model generation and reduce memory usage. Therefore,
it is not guaranteed to obtain bins satisfying the p-value constraint,
although it may work reasonably well in most cases. To avoid having bins
with similar bins the parameter ``min_mean_diff`` is recommended.
"""
def __init__(self, name="", dtype="numerical", prebinning_method="cart",
max_n_prebins=20, min_prebin_size=0.05, min_n_bins=None,
max_n_bins=None, min_bin_size=None, max_bin_size=None,
monotonic_trend="auto", min_mean_diff=0, max_pvalue=None,
max_pvalue_policy="consecutive", outlier_detector=None,
outlier_params=None, cat_cutoff=None, user_splits=None,
user_splits_fixed=None, special_codes=None, split_digits=None,
time_limit=100, verbose=False, **prebinning_kwargs):
self.name = name
self.dtype = dtype
self.prebinning_method = prebinning_method
self.solver = "cp"
self.max_n_prebins = max_n_prebins
self.min_prebin_size = min_prebin_size
self.min_n_bins = min_n_bins
self.max_n_bins = max_n_bins
self.min_bin_size = min_bin_size
self.max_bin_size = max_bin_size
self.monotonic_trend = monotonic_trend
self.min_mean_diff = min_mean_diff
self.max_pvalue = max_pvalue
self.max_pvalue_policy = max_pvalue_policy
self.outlier_detector = outlier_detector
self.outlier_params = outlier_params
self.cat_cutoff = cat_cutoff
self.user_splits = user_splits
self.user_splits_fixed = user_splits_fixed
self.special_codes = special_codes
self.split_digits = split_digits
self.time_limit = time_limit
self.verbose = verbose
self.prebinning_kwargs = prebinning_kwargs
# auxiliary
self._categories = None
self._cat_others = None
self._n_records = None
self._sums = None
self._stds = None
self._min_target = None
self._max_target = None
self._n_zeros = None
self._n_records_cat_others = None
self._n_records_missing = None
self._n_records_special = None
self._sum_cat_others = None
self._sum_special = None
self._sum_missing = None
self._std_cat_others = None
self._std_special = None
self._std_missing = None
self._min_target_missing = None
self._min_target_special = None
self._min_target_others = None
self._max_target_missing = None
self._max_target_special = None
self._max_target_others = None
self._n_zeros_missing = None
self._n_zeros_special = None
self._n_zeros_others = None
self._problem_type = "regression"
# info
self._binning_table = None
self._n_prebins = None
self._n_refinements = 0
self._n_samples = None
self._optimizer = None
self._splits_optimal = None
self._status = None
# timing
self._time_total = None
self._time_preprocessing = None
self._time_prebinning = None
self._time_solver = None
self._time_optimizer = None
self._time_postprocessing = None
self._is_fitted = False
def fit(self, x, y, check_input=False):
"""Fit the optimal binning according to the given training data.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector, where n_samples is the number of samples.
y : array-like, shape = (n_samples,)
Target vector relative to x.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
self : ContinuousOptimalBinning
Fitted optimal binning.
"""
return self._fit(x, y, check_input)
def fit_transform(self, x, y, metric="mean", metric_special=0,
metric_missing=0, show_digits=2, check_input=False):
"""Fit the optimal binning according to the given training data, then
transform it.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector, where n_samples is the number of samples.
y : array-like, shape = (n_samples,)
Target vector relative to x.
metric : str (default="mean"):
The metric used to transform the input vector. Supported metrics
are "mean" to choose the mean, "indices" to assign the
corresponding indices of the bins and "bins" to assign the
corresponding bin interval.
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical mean, and
any numerical value.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical mean, and
any numerical value.
show_digits : int, optional (default=2)
The number of significant digits of the bin column. Applies when
``metric="bins"``.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
x_new : numpy array, shape = (n_samples,)
Transformed array.
"""
return self.fit(x, y, check_input).transform(
x, metric, metric_special, metric_missing, show_digits,
check_input)
def transform(self, x, metric="mean", metric_special=0, metric_missing=0,
show_digits=2, check_input=False):
"""Transform given data to mean using bins from the fitted
optimal binning.
Parameters
----------
x : array-like, shape = (n_samples,)
Training vector, where n_samples is the number of samples.
metric : str (default="mean"):
The metric used to transform the input vector. Supported metrics
are "mean" to choose the mean, "indices" to assign the
corresponding indices of the bins and "bins" to assign the
corresponding bin interval.
metric_special : float or str (default=0)
The metric value to transform special codes in the input vector.
Supported metrics are "empirical" to use the empirical mean, and
any numerical value.
metric_missing : float or str (default=0)
The metric value to transform missing values in the input vector.
Supported metrics are "empirical" to use the empirical mean, and
any numerical value.
show_digits : int, optional (default=2)
The number of significant digits of the bin column. Applies when
``metric="bins"``.
check_input : bool (default=False)
Whether to check input arrays.
Returns
-------
x_new : numpy array, shape = (n_samples,)
Transformed array.
Notes
-----
Transformation of data including categories not present during training
return zero mean.
"""
self._check_is_fitted()
return transform_continuous_target(self._splits_optimal, self.dtype,
x, self._n_records, self._sums,
self.special_codes,
self._categories, self._cat_others,
metric, metric_special,
metric_missing, self.user_splits,
show_digits, check_input)
def _fit(self, x, y, check_input):
time_init = time.perf_counter()
if self.verbose:
logger.info("Optimal binning started.")
logger.info("Options: check parameters.")
_check_parameters(**self.get_params())
# Pre-processing
if self.verbose:
logger.info("Pre-processing started.")
self._n_samples = len(x)
if self.verbose:
logger.info("Pre-processing: number of samples: {}"
.format(self._n_samples))
time_preprocessing = time.perf_counter()
[x_clean, y_clean, x_missing, y_missing, x_special, y_special,
y_others, categories, cat_others, _, _, _, _] = split_data(
self.dtype, x, y, self.special_codes, self.cat_cutoff,
self.user_splits, check_input, self.outlier_detector,
self.outlier_params)
self._time_preprocessing = time.perf_counter() - time_preprocessing
if self.verbose:
n_clean = len(x_clean)
n_missing = len(x_missing)
n_special = len(x_special)
logger.info("Pre-processing: number of clean samples: {}"
.format(n_clean))
logger.info("Pre-processing: number of missing samples: {}"
.format(n_missing))
logger.info("Pre-processing: number of special samples: {}"
.format(n_special))
if self.outlier_detector is not None:
n_outlier = self._n_samples-(n_clean + n_missing + n_special)
logger.info("Pre-processing: number of outlier samples: {}"
.format(n_outlier))
if self.dtype == "categorical":
n_categories = len(categories)
n_categories_others = len(cat_others)
n_others = len(y_others)
logger.info("Pre-processing: number of others samples: {}"
.format(n_others))
logger.info("Pre-processing: number of categories: {}"
.format(n_categories))
logger.info("Pre-processing: number of categories others: {}"
.format(n_categories_others))
logger.info("Pre-processing terminated. Time: {:.4f}s"
.format(self._time_preprocessing))
# Pre-binning
if self.verbose:
logger.info("Pre-binning started.")
time_prebinning = time.perf_counter()
if self.user_splits is not None:
n_splits = len(self.user_splits)
if self.verbose:
logger.info("Pre-binning: user splits supplied: {}"
.format(n_splits))
if not n_splits:
splits = self.user_splits
n_records = np.array([])
sums = np.array([])
stds = np.array([])
else:
if self.dtype == "numerical":
user_splits = check_array(
self.user_splits, ensure_2d=False, dtype=None,
force_all_finite=True)
if len(set(user_splits)) != len(user_splits):
raise ValueError("User splits are not unique.")
sorted_idx = | np.argsort(user_splits) | numpy.argsort |
import os
import numpy as np
import pandas as pd
from pathlib import Path
from tqdm import tqdm
import json
# import sys
# sys.path.insert(0, './data')
# sys.path.insert(0, './utils')
# sys.path.insert(0, './common')
import os,sys,inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
from utils.visualization import *
from utils.skeleton import Skeleton
from common.mmm import parse_motions
from common.transforms3dbatch import *
from common.quaternion import *
from renderUtils import quat2xyz
from model.model import Integrator
import torch
import pickle as pkl
import scipy.ndimage.filters as filters
import pdb
## permute joints to make it a DAG
def permute(parents, root=0, new_parent=-1, new_joints=[], new_parents=[]):
new_joints.append(root)
new_parents.append(new_parent)
new_parent = len(new_joints) - 1
for idx, p in enumerate(parents):
if p == root:
permute(parents, root=idx, new_parent=new_parent, new_joints=new_joints, new_parents=new_parents)
return new_joints, new_parents
def softmax(x, **kw):
softness = kw.pop('softness', 1.0)
maxi, mini = np.max(x, **kw), np.min(x, **kw)
return maxi + np.log(softness + np.exp(mini - maxi))
def softmin(x, **kw):
return -softmax(-x, **kw)
class RawData():
def __init__(self):
pass
def _get_f(self):
raise NotImplementedError
def _get_df(self):
raise NotImplementedError
def preProcess(self):
raise NotImplementedError
def get_skeletonNpermutation(self):
raise NotImplementedError
@property
def quat_columns(self):
## quaternion columns
quat_columns = ['root_tx', 'root_ty', 'root_tz']
for joint in self.skel.joints:
quat_columns += ['{}_{}'.format(joint, col_suffix) for col_suffix in ['rw', 'rx', 'ry', 'rz']]
return quat_columns
@property
def fke_columns(self):
## forward kinematics columns
fke_columns = []
for joint in self.skel.joints:
fke_columns += ['{}_{}'.format(joint, col_suffix) for col_suffix in ['tx', 'ty', 'tz']]
return fke_columns
@property
def pose_columns(self):
pose_columns = []
for joint in self.skel.joints:
pose_columns += ['{}_{}'.format(joint, col_suffix) for col_suffix in ['rx', 'ry', 'rz']]
return pose_columns
@property
def rifke_columns(self):
## Save Rotation invariant fke (rifke)
rifke_columns = self.fke_columns + ['root_Vx', 'root_Vz', 'root_Ry', 'feet_l1', 'feet_l2', 'feet_r1', 'feet_r2']
return rifke_columns
@property
def rifke_dict(self):
raise NotImplementedError
def output_columns(self, feats_kind):
if feats_kind in {'euler'}:
return self.pose_columns
elif feats_kind in {'quaternion'}:
return self.quat_columns
elif feats_kind in {'fke'}:
return self.fke_columns
elif feats_kind in {'rifke'}:
return self.rifke_columns
def mat2csv(self, data, filename, columns):
pd.DataFrame(data=data, columns=columns).to_csv(filename)
def quat2fke(self, df_quat, filename_fke, filename_rifke):
'''Save Forward Kinematics'''
df_fke = pd.DataFrame(data=np.zeros((df_quat.shape[0], len(self.fke_columns))), columns=self.fke_columns)
## copying translation as is
df_fke[['root_tx', 'root_ty', 'root_tz']] = df_quat.loc[:, ['root_tx', 'root_ty', 'root_tz']].copy()
xyz_data = quat2xyz(df_quat, self.skel)
df_fke.loc[:, self.fke_columns] = xyz_data.reshape(-1, np.prod(xyz_data.shape[1:]))
#filename_fke = dir_name / Path(row[feats_kind]).relative_to(Path(path2data)/'subjects').with_suffix('.fke')
os.makedirs(filename_fke.parent, exist_ok=True)
df_fke.to_csv(filename_fke.as_posix())
'''Save Rotation Invariant Forward Kinematics'''
df_rifke = pd.DataFrame(data=np.zeros((df_quat.shape[0]-1, len(self.rifke_columns))), columns=self.rifke_columns)
rifke_data = self.fke2rifke(xyz_data.copy())
df_rifke[self.rifke_columns] = rifke_data[..., 3:]
#filename_rifke = dir_name / Path(row[feats_kind]).relative_to(Path(path2data)/'subjects').with_suffix('.rifke')
os.makedirs(filename_rifke.parent, exist_ok=True)
df_rifke.to_csv(filename_rifke.as_posix())
''' Convert rifke to fke to get comparable ground truths '''
new_df_fke = pd.DataFrame(data=self.rifke2fke(df_rifke[self.rifke_columns].values, filename_rifke).reshape(-1, len(self.fke_columns)),
columns=self.fke_columns)
new_fke_dir = filename_fke.parent/'new_fke'
os.makedirs(new_fke_dir, exist_ok=True)
new_df_fke.to_csv((new_fke_dir/filename_fke.name).as_posix())
return xyz_data
## fke to rotation invariant fke (Holden et. al.)
def fke2rifke(self, positions):
""" Put on Floor """
#fid_l, fid_r = np.array([5,6]), np.array([10,11])
fid_l, fid_r = self.rifke_dict['fid_l'], self.rifke_dict['fid_r']
foot_heights = np.minimum(positions[:,fid_l,1], positions[:,fid_r,1]).min(axis=1)
floor_height = softmin(foot_heights, softness=0.5, axis=0)
positions[:,:,1] -= floor_height
""" Add Reference Joint """
trajectory_filterwidth = 3
reference = positions[:,0] * | np.array([1,0,1]) | numpy.array |
#Library of tire models
import numpy as np
#Fiala model. Calculates nonlinear tire curve using Fiala brush model with no longitudinal force. Inputs are the cornering stiffness per axle (N/rad). The muP and muS are slip and slip friction coefficients. alpha = slip angle (rad) Fz = normal load on that axle (N)
def fiala(C, muP, muS, alpha, Fz):
alphaSlide = np.abs(np.arctan( 3*muP*Fz / C ))
Fy = np.zeros(alpha.size)
for i in range(alpha.size):
#Use 3rd order polynomial equation when below the tire range
if np.abs(alpha[i]) < alphaSlide:
Fy[i] = -C * np.tan(alpha[i]) + C**2 / (3 * muP * Fz) * (2 - muS / muP) * np.tan(alpha[i]) * np.abs(np.tan(alpha[i])) - C**3/(9*muP**2*Fz**2)* | np.tan(alpha[i]) | numpy.tan |
import random
import gym
import numpy as np
M = 5.0
T = 1.0
GOAL = 0.001
class WeightEnv(gym.Env):
metadata = {'render.modes': ['human']}
def __init__(self):
super(WeightEnv, self).__init__()
self.reward_range = (-float('inf'), 0.0)
self.state = np.array([0, 0, 0]) # position, velocity, acceleration
# action: force[-10, 10]
self.action_space = gym.spaces.Box(low=-10, high=10, shape=(1,), dtype=np.float32)
# observation: position[-10,10], velocity[-10,10], acceleration[-10,10], jerk[-10,10]
self.observation_space = gym.spaces.Box(np.array([-10, -10, -10, -10]), | np.array([10, 10, 10, 10], dtype=np.float32) | numpy.array |
import warnings
from inspect import isclass
import numpy as np
from UQpy.RunModel import RunModel
from UQpy.SampleMethods import *
########################################################################################################################
########################################################################################################################
# Subset Simulation
########################################################################################################################
class SubsetSimulation:
"""
Perform Subset Simulation to estimate probability of failure.
This class estimates probability of failure for a user-defined model using Subset Simulation. The class can
use one of several MCMC algorithms to draw conditional samples.
**Input:**
* **runmodel_object** (``RunModel`` object):
The computational model. It should be of type `RunModel` (see ``RunModel`` class).
* **mcmc_class** (Class of type ``SampleMethods.MCMC``)
Specifies the MCMC algorithm.
Must be a child class of the ``SampleMethods.MCMC`` parent class. Note: This is `not` and object of the class.
This input specifies the class itself.
* **samples_init** (`ndarray`)
A set of samples from the specified probability distribution. These are the samples from the original
distribution. They are not conditional samples. The samples must be an array of size
`nsamples_per_ss x dimension`.
If `samples_init` is not specified, the Subset_Simulation class will use the `mcmc_class` to draw the initial
samples.
* **p_cond** (`float`):
Conditional probability for each conditional level.
* **nsamples_per_ss** (`int`)
Number of samples to draw in each conditional level.
* **max_level** (`int`)
Maximum number of allowable conditional levels.
* **verbose** (Boolean):
A boolean declaring whether to write text to the terminal.
* **mcmc_kwargs** (`dict`)
Any additional keyword arguments needed for the specific ``MCMC`` class.
**Attributes:**
* **samples** (`list` of `ndarrays`)
A list of arrays containing the samples in each conditional level.
* **g** (`list` of `ndarrays`)
A list of arrays containing the evaluation of the performance function at each sample in each conditional level.
* **g_level** (`list`)
Threshold value of the performance function for each conditional level
* **pf** (`float`)
Probability of failure estimate
* **cov1** (`float`)
Coefficient of variation of the probability of failure estimate assuming independent chains
* **cov2** (`float`)
Coefficient of variation of the probability of failure estimate with dependent chains. From [4]_
**Methods:**
"""
def __init__(self, runmodel_object, mcmc_class=MMH, samples_init=None, p_cond=0.1, nsamples_per_ss=1000,
max_level=10, verbose=False, **mcmc_kwargs):
# Store the MCMC object to create a new object of this type for each subset
self.mcmc_kwargs = mcmc_kwargs
self.mcmc_class = mcmc_class
# Initialize other attributes
self.runmodel_object = runmodel_object
self.samples_init = samples_init
self.p_cond = p_cond
self.nsamples_per_ss = nsamples_per_ss
self.max_level = max_level
self.verbose = verbose
# Check that a RunModel object is being passed in.
if not isinstance(self.runmodel_object, RunModel):
raise AttributeError(
'UQpy: Subset simulation requires the user to pass a RunModel object')
if 'random_state' in self.mcmc_kwargs:
self.random_state = self.mcmc_kwargs['random_state']
if isinstance(self.random_state, int):
self.random_state = np.random.RandomState(self.random_state)
elif not isinstance(self.random_state, (type(None), np.random.RandomState)):
raise TypeError('UQpy: random_state must be None, an int or an np.random.RandomState object.')
else:
self.random_state = None
# Perform initial error checks
self._init_sus()
# Initialize the mcmc_object from the specified class.
mcmc_object = self.mcmc_class(**self.mcmc_kwargs)
self.mcmc_objects = [mcmc_object]
# Initialize new attributes/variables
self.samples = list()
self.g = list()
self.g_level = list()
if self.verbose:
print('UQpy: Running Subset Simulation with MCMC of type: ' + str(type(mcmc_object)))
[self.pf, self.cov1, self.cov2] = self.run()
if self.verbose:
print('UQpy: Subset Simulation Complete!')
# -----------------------------------------------------------------------------------------------------------------------
# The run function executes the chosen subset simulation algorithm
def run(self):
"""
Execute subset simulation
This is an instance method that runs subset simulation. It is automatically called when the SubsetSimulation
class is instantiated.
**Output/Returns:**
* **pf** (`float`)
Probability of failure estimate
* **cov1** (`float`)
Coefficient of variation of the probability of failure estimate assuming independent chains
* **cov2** (`float`)
Coefficient of variation of the probability of failure estimate with dependent chains. From [4]_
"""
step = 0
n_keep = int(self.p_cond * self.nsamples_per_ss)
d12 = list()
d22 = list()
# Generate the initial samples - Level 0
# Here we need to make sure that we have good initial samples from the target joint density.
if self.samples_init is None:
warnings.warn('UQpy: You have not provided initial samples.\n Subset simulation is highly sensitive to the '
'initial sample set. It is recommended that the user either:\n'
'- Provide an initial set of samples (samples_init) known to follow the distribution; or\n'
'- Provide a robust MCMC object that will draw independent initial samples from the '
'distribution.')
self.mcmc_objects[0].run(nsamples=self.nsamples_per_ss)
self.samples.append(self.mcmc_objects[0].samples)
else:
self.samples.append(self.samples_init)
# Run the model for the initial samples, sort them by their performance function, and identify the
# conditional level
self.runmodel_object.run(samples=np.atleast_2d(self.samples[step]))
self.g.append(np.squeeze(self.runmodel_object.qoi_list))
g_ind = np.argsort(self.g[step])
self.g_level.append(self.g[step][g_ind[n_keep - 1]])
# Estimate coefficient of variation of conditional probability of first level
d1, d2 = self._cov_sus(step)
d12.append(d1 ** 2)
d22.append(d2 ** 2)
if self.verbose:
print('UQpy: Subset Simulation, conditional level 0 complete.')
while self.g_level[step] > 0 and step < self.max_level:
# Increment the conditional level
step = step + 1
# Initialize the samples and the performance function at the next conditional level
self.samples.append(np.zeros_like(self.samples[step - 1]))
self.samples[step][:n_keep] = self.samples[step - 1][g_ind[0:n_keep], :]
self.g.append(np.zeros_like(self.g[step - 1]))
self.g[step][:n_keep] = self.g[step - 1][g_ind[:n_keep]]
# Unpack the attributes
# Initialize a new MCMC object for each conditional level
self.mcmc_kwargs['seed'] = np.atleast_2d(self.samples[step][:n_keep, :])
self.mcmc_kwargs['random_state'] = self.random_state
new_mcmc_object = self.mcmc_class(**self.mcmc_kwargs)
self.mcmc_objects.append(new_mcmc_object)
# Set the number of samples to propagate each chain (n_prop) in the conditional level
n_prop_test = self.nsamples_per_ss / self.mcmc_objects[step].nchains
if n_prop_test.is_integer():
n_prop = self.nsamples_per_ss // self.mcmc_objects[step].nchains
else:
raise AttributeError(
'UQpy: The number of samples per subset (nsamples_per_ss) must be an integer multiple of '
'the number of MCMC chains.')
# Propagate each chain n_prop times and evaluate the model to accept or reject.
for i in range(n_prop - 1):
# Propagate each chain
if i == 0:
self.mcmc_objects[step].run(nsamples=2 * self.mcmc_objects[step].nchains)
else:
self.mcmc_objects[step].run(nsamples=self.mcmc_objects[step].nchains)
# Decide whether a new simulation is needed for each proposed state
a = self.mcmc_objects[step].samples[i * n_keep:(i + 1) * n_keep, :]
b = self.mcmc_objects[step].samples[(i + 1) * n_keep:(i + 2) * n_keep, :]
test1 = np.equal(a, b)
test = np.logical_and(test1[:, 0], test1[:, 1])
# Pull out the indices of the false values in the test list
ind_false = [i for i, val in enumerate(test) if not val]
# Pull out the indices of the true values in the test list
ind_true = [i for i, val in enumerate(test) if val]
# Do not run the model for those samples where the MCMC state remains unchanged.
self.samples[step][[x + (i + 1) * n_keep for x in ind_true], :] = \
self.mcmc_objects[step].samples[ind_true, :]
self.g[step][[x + (i + 1) * n_keep for x in ind_true]] = self.g[step][ind_true]
# Run the model at each of the new sample points
x_run = self.mcmc_objects[step].samples[[x + (i + 1) * n_keep for x in ind_false], :]
if x_run.size != 0:
self.runmodel_object.run(samples=x_run)
# Temporarily save the latest model runs
g_temp = np.asarray(self.runmodel_object.qoi_list[-len(x_run):])
# Accept the states with g <= g_level
ind_accept = np.where(g_temp <= self.g_level[step - 1])[0]
for ii in ind_accept:
self.samples[step][(i + 1) * n_keep + ind_false[ii]] = x_run[ii]
self.g[step][(i + 1) * n_keep + ind_false[ii]] = g_temp[ii]
# Reject the states with g > g_level
ind_reject = np.where(g_temp > self.g_level[step - 1])[0]
for ii in ind_reject:
self.samples[step][(i + 1) * n_keep + ind_false[ii]] = \
self.samples[step][i * n_keep + ind_false[ii]]
self.g[step][(i + 1) * n_keep + ind_false[ii]] = self.g[step][i * n_keep + ind_false[ii]]
g_ind = np.argsort(self.g[step])
self.g_level.append(self.g[step][g_ind[n_keep]])
# Estimate coefficient of variation of conditional probability of first level
d1, d2 = self._cov_sus(step)
d12.append(d1 ** 2)
d22.append(d2 ** 2)
if self.verbose:
print('UQpy: Subset Simulation, conditional level ' + str(step) + ' complete.')
n_fail = len([value for value in self.g[step] if value < 0])
pf = self.p_cond ** step * n_fail / self.nsamples_per_ss
cov1 = np.sqrt(np.sum(d12))
cov2 = np.sqrt(np.sum(d22))
return pf, cov1, cov2
# -----------------------------------------------------------------------------------------------------------------------
# Support functions for subset simulation
def _init_sus(self):
"""
Check for errors in the SubsetSimulation class input
This is an instance method that checks for errors in the input to the SubsetSimulation class. It is
automatically called when the SubsetSimualtion class is instantiated.
No inputs or returns.
"""
# Check that an MCMC class is being passed in.
if not isclass(self.mcmc_class):
raise ValueError('UQpy: mcmc_class must be a child class of MCMC. Note it is not an instance of the class.')
if not issubclass(self.mcmc_class, MCMC):
raise ValueError('UQpy: mcmc_class must be a child class of MCMC.')
# Check that a RunModel object is being passed in.
if not isinstance(self.runmodel_object, RunModel):
raise AttributeError(
'UQpy: Subset simulation requires the user to pass a RunModel object')
# Check that a valid conditional probability is specified.
if type(self.p_cond).__name__ != 'float':
raise AttributeError('UQpy: Invalid conditional probability. p_cond must be of float type.')
elif self.p_cond <= 0. or self.p_cond >= 1.:
raise AttributeError('UQpy: Invalid conditional probability. p_cond must be in (0, 1).')
# Check that the number of samples per subset is properly defined.
if type(self.nsamples_per_ss).__name__ != 'int':
raise AttributeError('UQpy: Number of samples per subset (nsamples_per_ss) must be integer valued.')
# Check that max_level is an integer
if type(self.max_level).__name__ != 'int':
raise AttributeError('UQpy: The maximum subset level (max_level) must be integer valued.')
def _cov_sus(self, step):
"""
Compute the coefficient of variation of the samples in a conditional level
This is an instance method that is called after each conditional level is complete to compute the coefficient
of variation of the conditional probability in that level.
**Input:**
:param step: Specifies the conditional level
:type step: int
**Output/Returns:**
:param d1: Coefficient of variation in conditional level assuming independent chains
:type d1: float
:param d2: Coefficient of variation in conditional level with dependent chains
:type d2: float
"""
# Here, we assume that the initial samples are drawn to be uncorrelated such that the correction factors do not
# need to be computed.
if step == 0:
d1 = | np.sqrt((1 - self.p_cond) / (self.p_cond * self.nsamples_per_ss)) | numpy.sqrt |
"""Unit tests for convexified belief propagation"""
import unittest
from mrftools import *
import numpy as np
import matplotlib.pyplot as plt
class TestConvexBP(unittest.TestCase):
"""
Unit test class for convexified belief propagation
"""
def create_q_model(self):
"""Create loop model with one variable hanging off the loop (forming a Q shape)."""
mn = MarkovNet()
np.random.seed(1)
k = [4, 3, 6, 2, 5]
mn.set_unary_factor(0, np.random.randn(k[0]))
mn.set_unary_factor(1, np.random.randn(k[1]))
mn.set_unary_factor(2, np.random.randn(k[2]))
mn.set_unary_factor(3, np.random.randn(k[3]))
mn.set_unary_factor(4, np.random.randn(k[4]))
mn.set_edge_factor((0, 1), np.random.randn(k[0], k[1]))
mn.set_edge_factor((1, 2), np.random.randn(k[1], k[2]))
mn.set_edge_factor((2, 3), np.random.randn(k[2], k[3]))
mn.set_edge_factor((0, 3), np.random.randn(k[0], k[3]))
mn.set_edge_factor((0, 4), np.random.randn(k[0], k[4]))
mn.create_matrices()
return mn
def test_comparison_to_trbp(self):
"""
Test that convex BP and tree-reweighted BP produce the same results when the convex BP counting numbers are
set to the TRBP counting numbers.
"""
mn = self.create_q_model()
probs = {(0, 1): 0.75, (1, 2): 0.75, (2, 3): 0.75, (0, 3): 0.75, (0, 4): 1.0}
trbp_mat = MatrixTRBeliefPropagator(mn, probs)
trbp_mat.infer(display='full')
trbp_mat.load_beliefs()
counting_numbers = probs.copy()
counting_numbers[0] = 1.0 - 2.5
counting_numbers[1] = 1.0 - 1.5
counting_numbers[2] = 1.0 - 1.5
counting_numbers[3] = 1.0 - 1.5
counting_numbers[4] = 1.0 - 1.0
cbp = ConvexBeliefPropagator(mn, counting_numbers)
cbp.infer(display='full')
cbp.load_beliefs()
for i in mn.variables:
print("Convex unary marginal of %d: %s" % (i, repr(np.exp(cbp.var_beliefs[i]))))
print("Matrix TRBP unary marginal of %d: %s" % (i, repr(np.exp(trbp_mat.var_beliefs[i]))))
assert np.allclose(np.exp(cbp.var_beliefs[i]), np.exp(trbp_mat.var_beliefs[i])), "unary beliefs don't match"
print("Convex pairwise marginal: " + repr(np.exp(cbp.pair_beliefs[(0, 1)])))
print("Matrix TRBP pairwise marginal: " + repr(np.exp(trbp_mat.pair_beliefs[(0, 1)])))
print("Pairwise marginal error %f" %
np.sum(np.abs(np.exp(cbp.pair_beliefs[(0, 1)]) - np.exp(trbp_mat.pair_beliefs[(0, 1)]))))
# plt.subplot(211)
# plt.imshow(cbp.pair_beliefs[(0, 1)], interpolation='nearest')
# plt.xlabel('CBP')
# plt.subplot(212)
# plt.imshow(trbp_mat.pair_beliefs[(0, 1)], interpolation='nearest')
# plt.xlabel('TRBP')
# plt.show()
assert np.allclose(cbp.pair_beliefs[(0, 1)], trbp_mat.pair_beliefs[(0, 1)]), "Pair beliefs don't match: " + \
"\nCBP:" + repr(
np.exp(cbp.pair_beliefs[(0, 1)])) + "\nMatTRBP:" + repr(np.exp(trbp_mat.pair_beliefs[(0, 1)]))
print("TRBP matrix energy functional: %f" % trbp_mat.compute_energy_functional())
print("Convex energy functional: %f" % cbp.compute_energy_functional())
assert np.allclose(trbp_mat.compute_energy_functional(), cbp.compute_energy_functional()), \
"Energy functional is not exact. Convex: %f, Matrix TRBP: %f" % (cbp.compute_energy_functional(),
trbp_mat.compute_energy_functional())
def test_comparison_to_bethe(self):
"""
Test that loopy belief propagation and convexified belief propagation output the same inferred marginals
when the counting numbers are set to the Bethe counting numbers (which make convex BP no longer convex).
:return: None
"""
mn = self.create_q_model()
bp = MatrixBeliefPropagator(mn)
bp.infer(display='final')
bp.load_beliefs()
counting_numbers = {(0, 1): 1.0,
(1, 2): 1.0,
(2, 3): 1.0,
(0, 3): 1.0,
(0, 4): 1.0,
0: 1.0 - 3.0,
1: 1.0 - 2.0,
2: 1.0 - 2.0,
3: 1.0 - 2.0,
4: 1.0 - 1.0}
cbp = ConvexBeliefPropagator(mn, counting_numbers)
cbp.infer(display='full')
cbp.load_beliefs()
for i in mn.variables:
print("Convex unary marginal of %d: %s" % (i, repr(np.exp(cbp.var_beliefs[i]))))
print("Matrix BP unary marginal of %d: %s" % (i, repr(np.exp(bp.var_beliefs[i]))))
assert np.allclose(np.exp(cbp.var_beliefs[i]), np.exp(bp.var_beliefs[i])), "unary beliefs don't match"
print("Convex pairwise marginal: " + repr(np.exp(cbp.pair_beliefs[(0, 1)])))
print("Matrix BP pairwise marginal: " + repr(np.exp(bp.pair_beliefs[(0, 1)])))
assert np.allclose(cbp.pair_beliefs[(0, 1)], bp.pair_beliefs[(0, 1)]), "Pair beliefs don't match: " + \
"\nCBP:" + repr(
np.exp(cbp.pair_beliefs[(0, 1)])) + "\nMatBP:" + repr(np.exp(bp.pair_beliefs[(0, 1)]))
print("Bethe matrix energy functional: %f" % bp.compute_energy_functional())
print("Convex energy functional: %f" % cbp.compute_energy_functional())
assert np.allclose(bp.compute_energy_functional(), cbp.compute_energy_functional()), \
"Energy functional is not exact. Convex: %f, BP: %f" % (cbp.compute_energy_functional(),
bp.compute_energy_functional())
def test_convexity(self):
"""Test that the convex BP objective is within numerical precision of being truly convex."""
mn = self.create_q_model()
edge_count = 0.1
node_count = 0.1
counting_numbers = {(0, 1): edge_count,
(1, 2): edge_count,
(2, 3): edge_count,
(0, 3): edge_count,
(0, 4): edge_count,
0: node_count,
1: node_count,
2: node_count,
3: node_count,
4: node_count}
bp = ConvexBeliefPropagator(mn, counting_numbers)
bp.infer(display="full")
messages = bp.message_mat.copy()
noise = 0.1 * | np.random.randn(messages.shape[0], messages.shape[1]) | numpy.random.randn |
# Copyright (c) OpenMMLab. All rights reserved.
import numpy as np
import torch
from mmocr.models.textdet.dense_heads import DRRGHead
def test_drrg_head():
in_channels = 10
drrg_head = DRRGHead(in_channels)
assert drrg_head.in_channels == in_channels
assert drrg_head.k_at_hops == (8, 4)
assert drrg_head.num_adjacent_linkages == 3
assert drrg_head.node_geo_feat_len == 120
assert np.allclose(drrg_head.pooling_scale, 1.0)
assert drrg_head.pooling_output_size == (4, 3)
assert np.allclose(drrg_head.nms_thr, 0.3)
assert np.allclose(drrg_head.min_width, 8.0)
assert np.allclose(drrg_head.max_width, 24.0)
assert np.allclose(drrg_head.comp_shrink_ratio, 1.03)
assert np.allclose(drrg_head.comp_ratio, 0.4)
assert np.allclose(drrg_head.comp_score_thr, 0.3)
assert np.allclose(drrg_head.text_region_thr, 0.2)
assert np.allclose(drrg_head.center_region_thr, 0.2)
assert drrg_head.center_region_area_thr == 50
assert np.allclose(drrg_head.local_graph_thr, 0.7)
# test forward train
num_rois = 16
feature_maps = torch.randn((2, 10, 128, 128), dtype=torch.float)
x = np.random.randint(4, 124, (num_rois, 1))
y = np.random.randint(4, 124, (num_rois, 1))
h = 4 * np.ones((num_rois, 1))
w = 4 * np.ones((num_rois, 1))
angle = (np.random.random_sample((num_rois, 1)) * 2 - 1) * np.pi / 2
cos, sin = np.cos(angle), np.sin(angle)
comp_labels = np.random.randint(1, 3, (num_rois, 1))
num_rois = num_rois * np.ones((num_rois, 1))
comp_attribs = np.hstack([num_rois, x, y, h, w, cos, sin, comp_labels])
comp_attribs = comp_attribs.astype(np.float32)
comp_attribs_ = comp_attribs.copy()
comp_attribs = np.stack([comp_attribs, comp_attribs_])
pred_maps, gcn_data = drrg_head(feature_maps, comp_attribs)
pred_labels, gt_labels = gcn_data
assert pred_maps.size() == (2, 6, 128, 128)
assert pred_labels.ndim == gt_labels.ndim == 2
assert gt_labels.size()[0] * gt_labels.size()[1] == pred_labels.size()[0]
assert pred_labels.size()[1] == 2
# test forward test
with torch.no_grad():
feat_maps = torch.zeros((1, 10, 128, 128))
drrg_head.out_conv.bias.data.fill_(-10)
preds = drrg_head.single_test(feat_maps)
assert all([pred is None for pred in preds])
# test get_boundary
edges = np.stack([np.arange(0, 10), np.arange(1, 11)]).transpose()
edges = np.vstack([edges, np.array([1, 0])])
scores = np.ones(11, dtype=np.float32) * 0.9
x1 = np.arange(2, 22, 2)
x2 = x1 + 2
y1 = np.ones(10) * 2
y2 = y1 + 2
comp_scores = np.ones(10, dtype=np.float32) * 0.9
text_comps = np.stack([x1, y1, x2, y1, x2, y2, x1, y2,
comp_scores]).transpose()
outlier = np.array([50, 50, 52, 50, 52, 52, 50, 52, 0.9])
text_comps = | np.vstack([text_comps, outlier]) | numpy.vstack |
"""
========================================
Getting the observer location from a Map
========================================
How to access the observer location from a `~sunpy.map.Map` and interpret it.
"""
import matplotlib.pyplot as plt
import numpy as np
from astropy.constants import R_earth
import sunpy.map
from sunpy.coordinates import get_body_heliographic_stonyhurst
from sunpy.data.sample import AIA_171_IMAGE
###############################################################################
# We use the sunpy sample data.
aiamap = sunpy.map.Map(AIA_171_IMAGE)
###############################################################################
# You can access the observer coordinate with:
print(aiamap.observer_coordinate)
###############################################################################
# This provides the location of the SDO as defined in the header and is
# necessary to fully define the helioprojective coordinate system which
# depends on where the observer is. Let's see where this is with respect
# to Earth. SDO is a geosynchronous orbit with a semi-major axis of
# 42,164.71 km and an inclination of 28.05 deg.
# We will convert it to Geocentric Celestial Reference System (GCRS)
# whose center is at the Earth's center-of-mass.
sdo_gcrs = aiamap.observer_coordinate.gcrs
sun = get_body_heliographic_stonyhurst('sun', aiamap.date)
##############################################################################
# Let's plot the results. The green circle represents the Earth.
# This looks like the Earth is in the way of SDO's
# field of view but remember that it is also above the plane of this plot
# by its declination.
fig = plt.figure()
ax = fig.add_subplot(projection='polar')
circle = plt.Circle((0.0, 0.0), 1.0, transform=ax.transProjectionAffine + ax.transAxes, color="green",
alpha=0.4, label="Earth")
ax.add_artist(circle)
ax.text(0.48, 0.5, "Earth", transform=ax.transAxes)
ax.plot(sdo_gcrs.ra.to('rad'), sdo_gcrs.distance / R_earth, 'o', label=f'SDO {sdo_gcrs.dec:.2f}')
ax.plot(sun.lon.to('rad').value * | np.ones(2) | numpy.ones |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
"""
Join Hypocenter-Velocity Inversion on Tetrahedral meshes (JHVIT).
6 functions can be called and run in this package:
1- jntHypoVel_T : Joint hypocenter-velocity inversion of P wave data,
parametrized via the velocity model.
2- jntHyposlow_T : Joint hypocenter-velocity inversion of P wave data,
parametrized via the slowness model.
3- jntHypoVelPS_T : Joint hypocenter-velocity inversion of P- and S-wave data,
parametrized via the velocity models.
4- jntHyposlowPS_T : Joint hypocenter-velocity inversion of P- and S-wave data,
parametrized via the slowness models.
5-jointHypoVel_T : Joint hypocenter-velocity inversion of P wave data.
Input data and inversion parameters are downloaded automatically
from external text files.
6-jointHypoVelPS_T : Joint hypocenter-velocity inversion of P- and S-wave data.
Input data and inversion parameters are downloaded automatically
from external text files.
Notes:
- The package ttcrpy must be installed in order to perform the raytracing step.
This package can be downloaded from: https://ttcrpy.readthedocs.io/en/latest/
- To prevent bugs, it would be better to use python 3.7
Created on Sat Sep 14 2019
@author: <NAME>
"""
import numpy as np
import scipy.sparse as sp
import scipy.sparse.linalg as spl
import scipy.stats as scps
import re
import sys
import copy
from mesh import MSHReader
from ttcrpy import tmesh
from multiprocessing import Pool, cpu_count, current_process, Manager
import multiprocessing as mp
from collections import OrderedDict
try:
import vtk
from vtk.util.numpy_support import numpy_to_vtk
except BaseException:
print('VTK module not found, saving velocity model in vtk form is disabled')
def msh2vtk(nodes, cells, velocity, outputFilename, fieldname="Velocity"):
"""
Generate a vtk file to store the velocity model.
Parameters
----------
nodes : np.ndarray, shape (nnodes, 3)
Node coordinates.
cells : np.ndarray of int, shape (number of cells, 4)
Indices of nodes forming each cell.
velocity : np.ndarray, shape (nnodes, 1)
Velocity model.
outputFilename : string
The output vtk filename.
fieldname : string, optional
The saved field title. The default is "Velocity".
Returns
-------
float
return 0.0 if no bugs occur.
"""
ugrid = vtk.vtkUnstructuredGrid()
tPts = vtk.vtkPoints()
tPts.SetNumberOfPoints(nodes.shape[0])
for n in range(nodes.shape[0]):
tPts.InsertPoint(n, nodes[n, 0], nodes[n, 1], nodes[n, 2])
ugrid.SetPoints(tPts)
VtkVelocity = numpy_to_vtk(velocity, deep=0, array_type=vtk.VTK_DOUBLE)
VtkVelocity.SetName(fieldname)
ugrid.GetPointData().SetScalars(VtkVelocity)
Tetra = vtk.vtkTetra()
for n in np.arange(cells.shape[0]):
Tetra.GetPointIds().SetId(0, cells[n, 0])
Tetra.GetPointIds().SetId(1, cells[n, 1])
Tetra.GetPointIds().SetId(2, cells[n, 2])
Tetra.GetPointIds().SetId(3, cells[n, 3])
ugrid.InsertNextCell(Tetra.GetCellType(), Tetra.GetPointIds())
gWriter = vtk.vtkUnstructuredGridWriter()
gWriter.SetFileName(outputFilename)
gWriter.SetInputData(ugrid)
gWriter.SetFileTypeToBinary()
gWriter.Update()
return 0.0
def check_hypo_indomain(Hypo_new, P_Dimension, Mesh=None):
"""
Check if the new hypocenter is still inside the domain and
project it onto the domain surface otherwise.
Parameters
----------
Hypo_new : np.ndarray, shape (3, ) or (3,1)
The updated hypocenter coordinates.
P_Dimension : np.ndarray, shape (6, )
Domain borders: the maximum and minimum of its 3 dimensions.
Mesh : instance of the class tmesh, optional
The domain discretization. The default is None.
Returns
-------
Hypo_new : np.ndarray, shape (3, )
The input Hypo_new or its projections on the domain surface.
outside : boolean
True if Hypo_new was outside the domain.
"""
outside = False
Hypo_new = Hypo_new.reshape([1, -1])
if Hypo_new[0, 0] < P_Dimension[0]:
Hypo_new[0, 0] = P_Dimension[0]
outside = True
if Hypo_new[0, 0] > P_Dimension[1]:
Hypo_new[0, 0] = P_Dimension[1]
outside = True
if Hypo_new[0, 1] < P_Dimension[2]:
Hypo_new[0, 1] = P_Dimension[2]
outside = True
if Hypo_new[0, 1] > P_Dimension[3]:
Hypo_new[0, 1] = P_Dimension[3]
outside = True
if Hypo_new[0, 2] < P_Dimension[4]:
Hypo_new[0, 2] = P_Dimension[4]
outside = True
if Hypo_new[0, 2] > P_Dimension[5]:
Hypo_new[0, 2] = P_Dimension[5]
outside = True
if Mesh:
if Mesh.is_outside(Hypo_new):
outside = True
Hypout = copy.copy(Hypo_new)
Hypin = np.array([[Hypo_new[0, 0], Hypo_new[0, 1], P_Dimension[4]]])
distance = np.sqrt(np.sum((Hypin - Hypout)**2))
while distance > 1.e-5:
Hmiddle = 0.5 * Hypout + 0.5 * Hypin
if Mesh.is_outside(Hmiddle):
Hypout = Hmiddle
else:
Hypin = Hmiddle
distance = np.sqrt(np.sum((Hypout - Hypin)**2))
Hypo_new = Hypin
return Hypo_new.reshape([-1, ]), outside
class Parameters:
def __init__(self, maxit, maxit_hypo, conv_hypo, Vlim, VpVslim, dmax,
lagrangians, max_sc, invert_vel=True, invert_VsVp=False,
hypo_2step=False, use_sc=True, save_vel=False, uncrtants=False,
confdce_lev=0.95, verbose=False):
"""
Parameters
----------
maxit : int
Maximum number of iterations.
maxit_hypo : int
Maximum number of iterations to update hypocenter coordinates.
conv_hypo : float
Convergence criterion.
Vlim : tuple of 3 or 6 floats
Vlmin holds the maximum and the minimum values of P- and S-wave
velocity models and the slopes of the penalty functions,
example Vlim = (Vpmin, Vpmax, PAp, Vsmin, Vsmax, PAs).
VpVslim : tuple of 3 floats
Upper and lower limits of Vp/Vs ratio and
the slope of the corresponding Vp/Vs penalty function.
dmax : tuple of four floats
It holds the maximum admissible corrections for the velocity models
(dVp_max and dVs_max), the origin time (dt_max) and
the hypocenter coordinates (dx_max).
lagrangians : tuple of 6 floats
Penalty and constraint weights: λ (smoothing constraint weight),
γ (penalty constraint weight), α (weight of velocity data point const-
raint), wzK (vertical smoothing weight), γ_vpvs (penalty constraint
weight of Vp/Vs ratio), stig (weight of the constraint used to impose
statistical moments on Vp/Vs model).
invert_vel : boolean, optional
Perform velocity inversion if True. The default is True.
invert_VsVp : boolean, optional
Find Vp/Vs ratio model rather than S wave model. The default is False.
hypo_2step : boolean, optional
Relocate hypocenter events in 2 steps. The default is False.
use_sc : boolean, optional
Use static corrections. The default is 'True'.
save_vel : string, optional
Save intermediate velocity models or the final model.
The default is False.
uncrtants : boolean, optional
Calculate the uncertainty of the hypocenter parameters.
The default is False.
confdce_lev : float, optional
The confidence coefficient to calculate the uncertainty.
The default is 0.95.
verbose : boolean, optional
Print information messages about inversion progression.
The default is False.
Returns
-------
None.
"""
self.maxit = maxit
self.maxit_hypo = maxit_hypo
self.conv_hypo = conv_hypo
self.Vpmin = Vlim[0]
self.Vpmax = Vlim[1]
self.PAp = Vlim[2]
if len(Vlim) > 3:
self.Vsmin = Vlim[3]
self.Vsmax = Vlim[4]
self.PAs = Vlim[5]
self.VpVsmin = VpVslim[0]
self.VpVsmax = VpVslim[1]
self.Pvpvs = VpVslim[2]
self.dVp_max = dmax[0]
self.dx_max = dmax[1]
self.dt_max = dmax[2]
if len(dmax) > 3:
self.dVs_max = dmax[3]
self.λ = lagrangians[0]
self.γ = lagrangians[1]
self.γ_vpvs = lagrangians[2]
self.α = lagrangians[3]
self.stig = lagrangians[4]
self.wzK = lagrangians[5]
self.invert_vel = invert_vel
self.invert_VpVs = invert_VsVp
self.hypo_2step = hypo_2step
self.use_sc = use_sc
self.max_sc = max_sc
self.p = confdce_lev
self.uncertainty = uncrtants
self.verbose = verbose
self.saveVel = save_vel
def __str__(self):
"""
Encapsulate the attributes of the class Parameters in a string.
Returns
-------
output : string
Attributes of the class Parameters written in string.
"""
output = "-------------------------\n"
output += "\nParameters of Inversion :\n"
output += "\n-------------------------\n"
output += "\nMaximum number of iterations : {0:d}\n".format(self.maxit)
output += "\nMaximum number of iterations to get hypocenters"
output += ": {0:d}\n".format(self.maxit_hypo)
output += "\nVp minimum : {0:4.2f} km/s\n".format(self.Vpmin)
output += "\nVp maximum : {0:4.2f} km/s\n".format(self.Vpmax)
if self.Vsmin:
output += "\nVs minimum : {0:4.2f} km/s\n".format(self.Vsmin)
if self.Vsmax:
output += "\nVs maximum : {0:4.2f} km/s\n".format(self.Vsmax)
if self.VpVsmin:
output += "\nVpVs minimum : {0:4.2f} km/s\n".format(self.VpVsmin)
if self.VpVsmax:
output += "\nVpVs maximum : {0:4.2f} km/s\n".format(self.VpVsmax)
output += "\nSlope of the penalty function (P wave) : {0:3f}\n".format(
self.PAp)
if self.PAs:
output += "\nSlope of the penalty function (S wave) : {0:3f}\n".format(
self.PAs)
if self.Pvpvs:
output += "\nSlope of the penalty function"
output += "(VpVs ratio wave) : {0:3f}\n".format(self.Pvpvs)
output += "\nMaximum time perturbation by step : {0:4.3f} s\n".format(
self.dt_max)
output += "\nMaximum distance perturbation by step : {0:4.3f} km\n".format(
self.dx_max)
output += "\nMaximum P wave velocity correction by step"
output += " : {0:4.3f} km/s\n".format(self.dVp_max)
if self.dVs_max:
output += "\nMaximum S wave velocity correction by step"
output += " : {0:4.3f} km/s\n".format(self.dVs_max)
output += "\nLagrangians parameters : λ = {0:1.1e}\n".format(self.λ)
output += " : γ = {0:1.1e}\n".format(self.γ)
if self.γ_vpvs:
output += " : γ VpVs ratio = {0:1.1e}\n".format(
self.γ_vpvs)
output += " : α = {0:1.1e}\n".format(self.α)
output += " : wzK factor = {0:4.2f}\n".format(
self.wzK)
if self.stig:
output += " : stats. moment. penalty"
output += "coef. = {0:1.1e}\n".format(self.stig)
output += "\nOther parameters : Inverse Velocity = {0}\n".format(
self.invert_vel)
output += "\n : Use Vs/Vp instead of Vs = {0}\n".format(
self.invert_VpVs)
output += "\n : Use static correction = {0}\n".format(
self.use_sc)
output += "\n : Hyp. parameter Uncertainty estimation = "
output += "{0}\n".format(self.uncertainty)
if self.uncertainty:
output += "\n with a confidence level of"
output += " {0:3.2f}\n".format(self.p)
if self.saveVel == 'last':
output += "\n : Save intermediate velocity models = "
output += "last iteration only\n"
elif self.saveVel == 'all':
output += "\n : Save intermediate velocity models = "
output += "all iterations\n"
else:
output += "\n : Save intermediate velocity models = "
output += "False\n"
output += "\n : Relocate hypoctenters using 2 steps = "
output += "{0}\n".format(self.hypo_2step)
output += "\n : convergence criterion = {0:3.4f}\n".format(
self.conv_hypo)
if self.use_sc:
output += "\n : Maximum static correction = "
output += "{0:3.2f}\n".format(self.max_sc)
return output
class fileReader:
def __init__(self, filename):
"""
Parameters
----------
filename : string
List of data files and other inversion parameters.
Returns
-------
None.
"""
try:
open(filename, 'r')
except IOError:
print("Could not read file:", filename)
sys.exit()
self.filename = filename
assert(self.readParameter('base name')), 'invalid base name'
assert(self.readParameter('mesh file')), 'invalid mesh file'
assert(self.readParameter('rcvfile')), 'invalid rcv file'
assert(self.readParameter('Velocity')), 'invalid Velocity file'
assert(self.readParameter('Time calibration')
), 'invalid calibration data file'
def readParameter(self, parameter, dtype=None):
"""
Read the data filename or the inversion parameter value specified by
the argument parameter.
Parameters
----------
parameter : string
Filename or inversion parameter to read.
dtype : data type, optional
Explicit data type of the filename or the parameter read.
The default is None.
Returns
-------
param : string/int/float
File or inversion parameter.
"""
try:
f = open(self.filename, 'r')
for line in f:
if line.startswith(parameter):
position = line.find(':')
param = line[position + 1:]
param = param.rstrip("\n\r")
if dtype is None:
break
if dtype == int:
param = int(param)
elif dtype == float:
param = float(param)
elif dtype == bool:
if param == 'true' or param == 'True' or param == '1':
param = True
elif param == 'false' or param == 'False' or param == '0':
param = False
else:
print(" non recognized format")
break
return param
except OSError as err:
print("OS error: {0}".format(err))
except ValueError:
print("Could not convert data to a float for " + parameter + "\n")
except NameError as NErr:
print(
parameter +
" is not indicated or has bad value:{0}".format(NErr))
except BaseException:
print("Unexpected error:", sys.exc_info()[0])
raise
finally:
f.close()
def saveVel(self):
"""
Method to read the specified option for saving the velocity model(s).
Returns
-------
bool/string
Save or not the velocity model(s) and for which iteration.
"""
try:
f = open(self.filename, 'r')
for line in f:
if line.startswith('Save Velocity'):
position = line.find(':')
if position > 0:
sv = line[position + 1:].strip()
break
f.close()
if sv == 'last' or sv == 'Last':
return 'last'
elif sv == 'all' or sv == 'All':
return 'all'
elif sv == 'false' or sv == 'False' or sv == '0':
return False
else:
print('bad option to save velocity: default value will be used')
return False
except OSError as err:
print("OS error: {0}".format(err))
except NameError as NErr:
print("save velocity is not indicated :{0}".format(NErr))
except BaseException:
print("Unexpected error:", sys.exc_info()[0])
raise
def getIversionParam(self):
"""
Read the inversion parameters and
store them in an object of the class Parameters.
Returns
-------
Params : instance of the class Parameters
Inversion parameters and options.
"""
maxit = self.readParameter('number of iterations', int)
maxit_hypo = self.readParameter('num. iters. to get hypo.', int)
conv_hypo = self.readParameter('convergence Criterion', float)
Vpmin = self.readParameter('Vpmin', float)
Vpmax = self.readParameter('Vpmax', float)
PAp = self.readParameter('PAp', float)
if PAp is None or PAp < 0:
print('PAp : default value will be considered\n')
PAp = 1. # default value
Vsmin = self.readParameter('Vsmin', float)
Vsmax = self.readParameter('Vsmax', float)
PAs = self.readParameter('PAs', float)
if PAs is None or PAs < 0:
print('PAs : default value will be considered\n')
PAs = 1. # default value
VpVsmax = self.readParameter('VpVs_max', float)
if VpVsmax is None or VpVsmax < 0:
print('default value will be considered (5)\n')
VpVsmax = 5. # default value
VpVsmin = self.readParameter('VpVs_min', float)
if VpVsmin is None or VpVsmin < 0:
print('default value will be considered (1.5)\n')
VpVsmin = 1.5 # default value
Pvpvs = self.readParameter('Pvpvs', float)
if Pvpvs is None or Pvpvs < 0:
print('default value will be considered\n')
Pvpvs = 1. # default value
dVp_max = self.readParameter('dVp max', float)
dVs_max = self.readParameter('dVs max', float)
dx_max = self.readParameter('dx max', float)
dt_max = self.readParameter('dt max', float)
Alpha = self.readParameter('alpha', float)
Lambda = self.readParameter('lambda', float)
Gamma = self.readParameter('Gamma', float)
Gamma_ps = self.readParameter('Gamma_vpvs', float)
stigma = self.readParameter('stigma', float)
if stigma is None or stigma < 0:
stigma = 0. # default value
VerSmooth = self.readParameter('vertical smoothing', float)
InverVel = self.readParameter('inverse velocity', bool)
InverseRatio = self.readParameter('inverse Vs/Vp', bool)
Hyp2stp = self.readParameter('reloc.hypo.in 2 steps', bool)
Sc = self.readParameter('use static corrections', bool)
if Sc:
Sc_max = self.readParameter('maximum stat. correction', float)
else:
Sc_max = 0.
uncrtants = self.readParameter('uncertainty estm.', bool)
if uncrtants:
confdce_lev = self.readParameter('confidence level', float)
else:
confdce_lev = np.NAN
Verb = self.readParameter('Verbose ', bool)
saveVel = self.saveVel()
Params = Parameters(maxit, maxit_hypo, conv_hypo,
(Vpmin, Vpmax, PAp, Vsmin, Vsmax, PAs),
(VpVsmin, VpVsmax, Pvpvs),
(dVp_max, dx_max, dt_max, dVs_max),
(Lambda, Gamma, Gamma_ps, Alpha, stigma,
VerSmooth), Sc_max, InverVel, InverseRatio,
Hyp2stp, Sc, saveVel, uncrtants, confdce_lev, Verb)
return Params
class RCVReader:
def __init__(self, p_rcvfile):
"""
Parameters
----------
p_rcvfile : string
File holding receiver coordinates.
Returns
-------
None.
"""
self.rcv_file = p_rcvfile
assert(self.__ChekFormat()), 'invalid format for rcv file'
def getNumberOfStation(self):
"""
Return the number of receivers.
Returns
-------
Nstations : int
Receiver number.
"""
try:
fin = open(self.rcv_file, 'r')
Nstations = int(fin.readline())
fin.close()
return Nstations
except OSError as err:
print("OS error: {0}".format(err))
except ValueError:
print("Could not convert data to an integer for the station number.")
except BaseException:
print("Unexpected error:", sys.exc_info()[0])
raise
def getStation(self):
"""
Return coordinates of receivers.
Returns
-------
coordonates : np.ndarray, shape(receiver number,3)
Receiver coordinates.
"""
try:
fin = open(self.rcv_file, 'r')
Nsta = int(fin.readline())
coordonates = np.zeros([Nsta, 3])
for n in range(Nsta):
line = fin.readline()
Coord = re.split(r' ', line)
coordonates[n, 0] = float(Coord[0])
coordonates[n, 1] = float(Coord[2])
coordonates[n, 2] = float(Coord[4])
fin.close()
return coordonates
except OSError as err:
print("OS error: {0}".format(err))
except ValueError:
print("Could not convert data to a float in rcvfile.")
except BaseException:
print("Unexpected error:", sys.exc_info()[0])
raise
def __ChekFormat(self):
try:
fin = open(self.rcv_file)
n = 0
for line in fin:
if n == 0:
Nsta = int(line)
num_lines = sum(1 for line in fin)
if(num_lines != Nsta):
fin.close()
return False
if n > 0:
Coord = re.split(r' ', line)
if len(Coord) != 5:
fin.close()
return False
n += 1
fin.close()
return True
except OSError as err:
print("OS error: {0}".format(err))
except ValueError:
print("Could not convert data to a float in rcvfile.")
except BaseException:
print("Unexpected error:", sys.exc_info()[0])
raise
def readEventsFiles(time_file, waveType=False):
"""
Read a list of seismic events and corresponding data from a text file.
Parameters
----------
time_file : string
Event data filename.
waveType : bool
True if the seismic phase of each event is identified.
The default is False.
Returns
-------
data : np.ndarray or a list of two np.ndarrays
Event arrival time data
"""
if (time_file == ""):
if not waveType:
return (np.array([]))
elif waveType:
return (np.array([]), np.array([]))
try:
fin = open(time_file, 'r')
lstart = 0
for line in fin:
lstart += 1
if line.startswith('Ev_idn'):
break
if not waveType:
data = np.loadtxt(time_file, skiprows=lstart, ndmin=2)
elif waveType:
data = np.loadtxt(fname=time_file, skiprows=2,
dtype='S15', ndmin=2)
ind = np.where(data[:, -1] == b'P')[0]
dataP = data[ind, :-1].astype(float)
ind = np.where(data[:, -1] == b'S')[0]
dataS = data[ind, :-1].astype(float)
data = (dataP, dataS)
return data
except OSError as err:
print("OS error: {0}".format(err))
except ValueError:
print("Could not convert data to a float in " + time_file + " file.")
except BaseException:
print("Unexpected error:", sys.exc_info()[0])
raise
def readVelpoints(vlpfile):
"""
Read known velocity points from a text file.
Parameters
----------
vlpfile : string
Name of the file containing the known velocity points.
Returns
-------
data : np.ndarray, shape (number of points , 3)
Data corresponding to the known velocity points.
"""
if (vlpfile == ""):
return (np.array([]))
try:
fin = open(vlpfile, 'r')
lstart = 0
for line in fin:
lstart += 1
if line.startswith('Pt_id'):
break
data = np.loadtxt(vlpfile, skiprows=lstart)
return data
except OSError as err:
print("OS error: {0}".format(err))
except ValueError:
print("Could not convert data to a float in " + vlpfile + " file.")
except BaseException:
print("Unexpected error:", sys.exc_info()[0])
raise
def _hypo_relocation(ev, evID, hypo, data, rcv, sc, convergence, par):
"""
Location of a single hypocenter event using P arrival time data.
Parameters
----------
ev : int
Event index in the array evID.
evID : np.ndarray, shape (number of events ,)
Event indices.
hypo : np.ndarray, shape (number of events ,5)
Current hypocenter coordinates and origin time for each event.
data : np.ndarray, shape (arrival times number,3)
Arrival times for all events.
rcv : np.ndarray, shape (receiver number,3)
Coordinates of receivers.
sc : np.ndarray, shape (receiver number or 0 ,1)
Static correction values.
convergence : boolean list, shape (event number)
Convergence state of each event.
par : instance of the class Parameters
The inversion parameters.
Returns
-------
Hypocenter : np.ndarray, shape (5,)
Updated origin time and coordinates of event evID[ev].
"""
indh = np.where(hypo[:, 0] == evID[ev])[0]
if par.verbose:
print("\nEven N {0:d} is relacated in the ".format(
int(hypo[ev, 0])) + current_process().name + '\n')
sys.stdout.flush()
indr = np.where(data[:, 0] == evID[ev])[0]
rcv_ev = rcv[data[indr, 2].astype(int) - 1, :]
if par.use_sc:
sc_ev = sc[data[indr, 2].astype(int) - 1]
else:
sc_ev = 0.
nst = indr.size
Hypocenter = hypo[indh[0]].copy()
if par.hypo_2step:
print("\nEven N {0:d}: Update longitude and latitude\n".format(
int(hypo[ev, 0])))
sys.stdout.flush()
T0 = np.kron(hypo[indh, 1], np.ones([nst, 1]))
for It in range(par.maxit_hypo):
Tx = np.kron(Hypocenter[2:], np.ones([nst, 1]))
src = np.hstack((ev*np.ones([nst, 1]), T0 + sc_ev, Tx))
tcal, rays = Mesh3D.raytrace(source=src, rcv=rcv_ev, slowness=None,
aggregate_src=False, compute_L=False,
return_rays=True)
slow_0 = Mesh3D.get_s0(src)
Hi = np.ones((nst, 2))
for nr in range(nst):
rayi = rays[nr]
if rayi.shape[0] == 1:
print('\033[43m' + '\nWarning: raypath failed to converge'
' for even N {0:d} :({1:5.4f},{2:5.4f},{3:5.4f}) and '
'receiver N {4:d} :({5:5.4f},{6:5.4f},{7:5.4f})\n'.format(
int(data[indr[nr], 0]),
Tx[nr, 0], Tx[nr, 1], Tx[nr, 2],
int(data[indr[nr], 2]), rcv_ev[nr, 0],
rcv_ev[nr, 1], rcv_ev[nr, 2]) + '\033[0m')
sys.stdout.flush()
continue
slw0 = slow_0[nr]
dx = rayi[1, 0] - Hypocenter[2]
dy = rayi[1, 1] - Hypocenter[3]
dz = rayi[1, 2] - Hypocenter[4]
ds = np.sqrt(dx * dx + dy * dy + dz * dz)
Hi[nr, 0] = -dx * slw0 / ds
Hi[nr, 1] = -dy * slw0 / ds
convrays = np.where(tcal != 0)[0]
res = data[indr, 1] - tcal
if convrays.size < nst:
res = res[convrays]
Hi = Hi[convrays, :]
deltaH = np.linalg.lstsq(Hi, res, rcond=1.e-6)[0]
if not np.all(np.isfinite(deltaH)):
try:
U, S, VVh = np.linalg.svd(Hi.T.dot(Hi) + 1e-9 * np.eye(2))
VV = VVh.T
deltaH = np.dot(VV, np.dot(U.T, Hi.T.dot(res)) / S)
except np.linalg.linalg.LinAlgError:
print('\nEvent could not be relocated (iteration no ' +
str(It) + '), skipping')
sys.stdout.flush()
break
indH = np.abs(deltaH) > par.dx_max
deltaH[indH] = par.dx_max * np.sign(deltaH[indH])
updatedHypo = np.hstack((Hypocenter[2:4] + deltaH, Hypocenter[-1]))
updatedHypo, _ = check_hypo_indomain(updatedHypo, Dimensions,
Mesh3D)
Hypocenter[2:] = updatedHypo
if np.all(np.abs(deltaH[1:]) < par.conv_hypo):
break
if par.verbose:
print("\nEven N {0:d}: Update all parameters\n".format(int(hypo[ev, 0])))
sys.stdout.flush()
for It in range(par.maxit_hypo):
Tx = np.kron(Hypocenter[2:], np.ones([nst, 1]))
T0 = np.kron(Hypocenter[1], np.ones([nst, 1]))
src = np.hstack((ev*np.ones([nst, 1]), T0 + sc_ev, Tx))
tcal, rays = Mesh3D.raytrace(source=src, rcv=rcv_ev, slowness=None,
aggregate_src=False, compute_L=False,
return_rays=True)
slow_0 = Mesh3D.get_s0(src)
Hi = np.ones([nst, 4])
for nr in range(nst):
rayi = rays[nr]
if rayi.shape[0] == 1:
print('\033[43m' + '\nWarning: raypath failed to converge '
'for even N {0:d} :({1:5.4f},{2:5.4f},{3:5.4f}) and '
'receiver N {4:d} :({5:5.4f},{6:5.4f},{7:5.4f})\n'.format(
int(data[indr[nr], 0]), Tx[nr, 0],
Tx[nr, 1], Tx[nr, 2], int(data[indr[nr], 2]),
rcv_ev[nr, 0], rcv_ev[nr, 1], rcv_ev[nr, 2]) + '\033[0m')
sys.stdout.flush()
continue
slw0 = slow_0[nr]
dx = rayi[1, 0] - Hypocenter[2]
dy = rayi[1, 1] - Hypocenter[3]
dz = rayi[1, 2] - Hypocenter[4]
ds = np.sqrt(dx * dx + dy * dy + dz * dz)
Hi[nr, 1] = -dx * slw0 / ds
Hi[nr, 2] = -dy * slw0 / ds
Hi[nr, 3] = -dz * slw0 / ds
convrays = np.where(tcal != 0)[0]
res = data[indr, 1] - tcal
if convrays.size < nst:
res = res[convrays]
Hi = Hi[convrays, :]
deltaH = np.linalg.lstsq(Hi, res, rcond=1.e-6)[0]
if not np.all(np.isfinite(deltaH)):
try:
U, S, VVh = np.linalg.svd(Hi.T.dot(Hi) + 1e-9 * np.eye(4))
VV = VVh.T
deltaH = np.dot(VV, np.dot(U.T, Hi.T.dot(res)) / S)
except np.linalg.linalg.LinAlgError:
print('\nEvent cannot be relocated (iteration no ' +
str(It) + '), skipping')
sys.stdout.flush()
break
if np.abs(deltaH[0]) > par.dt_max:
deltaH[0] = par.dt_max * np.sign(deltaH[0])
if np.linalg.norm(deltaH[1:]) > par.dx_max:
deltaH[1:] *= par.dx_max / np.linalg.norm(deltaH[1:])
updatedHypo = Hypocenter[2:] + deltaH[1:]
updatedHypo, outside = check_hypo_indomain(updatedHypo,
Dimensions, Mesh3D)
Hypocenter[1:] = np.hstack((Hypocenter[1] + deltaH[0], updatedHypo))
if outside and It == par.maxit_hypo - 1:
print('\nEvent N {0:d} cannot be relocated inside the domain\n'.format(
int(hypo[ev, 0])))
convergence[ev] = 'out'
return Hypocenter
if np.all(np.abs(deltaH[1:]) < par.conv_hypo):
convergence[ev] = True
if par.verbose:
print('\033[42m' + '\nEven N {0:d} has converged at {1:d}'
' iteration(s)\n'.format(int(hypo[ev, 0]), It + 1) + '\n'
+ '\033[0m')
sys.stdout.flush()
break
else:
if par.verbose:
print('\nEven N {0:d} : maximum number of iterations'
' was reached\n'.format(int(hypo[ev, 0])) + '\n')
sys.stdout.flush()
return Hypocenter
def _hypo_relocationPS(ev, evID, hypo, data, rcv, sc, convergence, slow, par):
"""
Relocate a single hypocenter event using P- and S-wave arrival times.
Parameters
----------
ev : int
Event index in the array evID.
evID : np.ndarray, shape (event number ,)
Event indices.
hypo : np.ndarray, shape (event number ,5)
Current hypocenter coordinates and origin times for each event.
data : tuple of two np.ndarrays
Arrival times of P- and S-waves.
rcv : np.ndarray, shape (receiver number,3)
Coordinates of receivers.
sc : tuple of two np.ndarrays (shape(receiver number or 0,1))
Static correction values of P- and S-waves.
convergence : boolean list, shape (event number)
The convergence state of each event.
slow : tuple of two np.ndarrays (shape(nnodes,1))
P and S slowness models.
par : instance of the class Parameters
The inversion parameters.
Returns
-------
Hypocenter : np.ndarray, shape (5,)
Updated origin time and coordinates of event evID[ev].
"""
(slowP, slowS) = slow
(scp, scs) = sc
(dataP, dataS) = data
indh = np.where(hypo[:, 0] == evID[ev])[0]
if par.verbose:
print("Even N {0:d} is relacated in the ".format(
int(hypo[ev, 0])) + current_process().name + '\n')
sys.stdout.flush()
indrp = np.where(dataP[:, 0] == evID[ev])[0]
rcv_evP = rcv[dataP[indrp, 2].astype(int) - 1, :]
nstP = indrp.size
indrs = np.where(dataS[:, 0] == evID[ev])[0]
rcv_evS = rcv[dataS[indrs, 2].astype(int) - 1, :]
nstS = indrs.size
Hypocenter = hypo[indh[0]].copy()
if par.use_sc:
scp_ev = scp[dataP[indrp, 2].astype(int) - 1]
scs_ev = scs[dataS[indrs, 2].astype(int) - 1]
else:
scp_ev = np.zeros([nstP, 1])
scs_ev = np.zeros([nstS, 1])
if par.hypo_2step:
if par.verbose:
print("\nEven N {0:d}: Update longitude and latitude\n".format(
int(hypo[ev, 0])))
sys.stdout.flush()
for It in range(par.maxit_hypo):
Txp = np.kron(Hypocenter[1:], np.ones([nstP, 1]))
Txp[:, 0] += scp_ev[:, 0]
srcP = np.hstack((ev*np.ones([nstP, 1]), Txp))
tcalp, raysP = Mesh3D.raytrace(source=srcP, rcv=rcv_evP, slowness=slowP,
aggregate_src=False, compute_L=False,
return_rays=True)
slowP_0 = Mesh3D.get_s0(srcP)
Txs = np.kron(Hypocenter[1:], np.ones([nstS, 1]))
Txs[:, 0] += scs_ev[:, 0]
srcS = np.hstack((ev*np.ones([nstS, 1]), Txs))
tcals, raysS = Mesh3D.raytrace(source=srcS, rcv=rcv_evS, slowness=slowS,
aggregate_src=False, compute_L=False,
return_rays=True)
slowS_0 = Mesh3D.get_s0(srcS)
Hi = np.ones((nstP + nstS, 2))
for nr in range(nstP):
rayi = raysP[nr]
if rayi.shape[0] == 1:
if par.verbose:
print('\033[43m' +
'\nWarning: raypath failed to converge for even '
'N {0:d} :({1:5.4f},{2:5.4f},{3:5.4f})and receiver '
'N {4:d} :({5:5.4f},{6:5.4f},{7:5.4f})\n'.format(
int(dataP[indrp[nr], 0]),
Txp[nr, 1], Txp[nr, 2], Txp[nr, 3],
int(dataP[indrp[nr], 2]),
rcv_evP[nr, 0], rcv_evP[nr, 1],
rcv_evP[nr, 2]) +
'\033[0m')
sys.stdout.flush()
continue
slw0 = slowP_0[nr]
dx = rayi[1, 0] - Hypocenter[2]
dy = rayi[1, 1] - Hypocenter[3]
dz = rayi[1, 2] - Hypocenter[4]
ds = np.sqrt(dx * dx + dy * dy + dz * dz)
Hi[nr, 0] = -dx * slw0 / ds
Hi[nr, 1] = -dy * slw0 / ds
for nr in range(nstS):
rayi = raysS[nr]
if rayi.shape[0] == 1:
if par.verbose:
print('\033[43m' +
'\nWarning: raypath failed to converge for even '
'N {0:d} :({1:5.4f},{2:5.4f},{3:5.4f}) and receiver '
'N {4:d} :({5:5.4f},{6:5.4f},{7:5.4f})\n'.format(
int(dataS[indrs[nr], 0]),
Txs[nr, 1], Txs[nr, 2],
Txs[nr, 3], int(dataS[indrs[nr], 2]),
rcv_evS[nr, 0], rcv_evS[nr, 1],
rcv_evS[nr, 2]) +
'\033[0m')
sys.stdout.flush()
continue
slw0 = slowS_0[nr]
dx = rayi[1, 0] - Hypocenter[2]
dy = rayi[1, 1] - Hypocenter[3]
dz = rayi[1, 2] - Hypocenter[4]
ds = np.sqrt(dx * dx + dy * dy + dz * dz)
Hi[nr + nstP, 0] = -dx * slw0 / ds
Hi[nr + nstP, 1] = -dy * slw0 / ds
tcal = np.hstack((tcalp, tcals))
res = np.hstack((dataP[indrp, 1], dataS[indrs, 1])) - tcal
convrays = np.where(tcal != 0)[0]
if convrays.size < (nstP + nstS):
res = res[convrays]
Hi = Hi[convrays, :]
deltaH = np.linalg.lstsq(Hi, res, rcond=1.e-6)[0]
if not np.all(np.isfinite(deltaH)):
try:
U, S, VVh = np.linalg.svd(Hi.T.dot(Hi) + 1e-9 * np.eye(2))
VV = VVh.T
deltaH = np.dot(VV, np.dot(U.T, Hi.T.dot(res)) / S)
except np.linalg.linalg.LinAlgError:
if par.verbose:
print('\nEvent could not be relocated (iteration no ' +
str(It) + '), skipping')
sys.stdout.flush()
break
indH = np.abs(deltaH) > par.dx_max
deltaH[indH] = par.dx_max * np.sign(deltaH[indH])
updatedHypo = np.hstack((Hypocenter[2:4] + deltaH, Hypocenter[-1]))
updatedHypo, _ = check_hypo_indomain(updatedHypo, Dimensions,
Mesh3D)
Hypocenter[2:] = updatedHypo
if np.all(np.abs(deltaH) < par.conv_hypo):
break
if par.verbose:
print("\nEven N {0:d}: Update all parameters\n".format(int(hypo[ev, 0])))
sys.stdout.flush()
for It in range(par.maxit_hypo):
Txp = np.kron(Hypocenter[1:], np.ones([nstP, 1]))
Txp[:, 0] += scp_ev[:, 0]
srcP = np.hstack((ev*np.ones([nstP, 1]), Txp))
tcalp, raysP = Mesh3D.raytrace(source=srcP, rcv=rcv_evP, slowness=slowP,
aggregate_src=False, compute_L=False,
return_rays=True)
slowP_0 = Mesh3D.get_s0(srcP)
Txs = np.kron(Hypocenter[1:], np.ones([nstS, 1]))
Txs[:, 0] += scs_ev[:, 0]
srcS = np.hstack((ev*np.ones([nstS, 1]), Txs))
tcals, raysS = Mesh3D.raytrace(source=srcS, rcv=rcv_evS, slowness=slowS,
aggregate_src=False, compute_L=False,
return_rays=True)
slowS_0 = Mesh3D.get_s0(srcS)
Hi = np.ones((nstP + nstS, 4))
for nr in range(nstP):
rayi = raysP[nr]
if rayi.shape[0] == 1:
if par.verbose:
print('\033[43m' + '\nWarning: raypath failed to converge for '
'even N {0:d} :({1:5.4f},{2:5.4f},{3:5.4f}) and receiver '
'N {4:d} :({5:5.4f},{6:5.4f},{7:5.4f})\n'.format(
int(dataP[indrp[nr], 0]), Txp[nr, 1],
Txp[nr, 2], Txp[nr, 3], int(dataP[indrp[nr], 2]),
rcv_evP[nr, 0], rcv_evP[nr, 1],
rcv_evP[nr, 2]) + '\033[0m')
sys.stdout.flush()
continue
slw0 = slowP_0[nr]
dx = rayi[2, 0] - Hypocenter[2]
dy = rayi[2, 1] - Hypocenter[3]
dz = rayi[2, 2] - Hypocenter[4]
ds = np.sqrt(dx * dx + dy * dy + dz * dz)
Hi[nr, 1] = -dx * slw0 / ds
Hi[nr, 2] = -dy * slw0 / ds
Hi[nr, 3] = -dz * slw0 / ds
for nr in range(nstS):
rayi = raysS[nr]
if rayi.shape[0] == 1:
if par.verbose:
print('\033[43m' + '\nWarning: raypath failed to converge for '
'even N {0:d} :({1:5.4f},{2:5.4f},{3:5.4f}) and receiver '
'N {4:d} :({5:5.4f},{6:5.4f},{7:5.4f})\n'.format(
int(dataS[indrs[nr], 0]), Txs[nr, 1],
Txs[nr, 2], Txs[nr, 3], int(dataS[indrs[nr], 2]),
rcv_evS[nr, 0], rcv_evS[nr, 1],
rcv_evS[nr, 2]) + '\033[0m')
sys.stdout.flush()
continue
slw0 = slowS_0[nr]
dx = rayi[1, 0] - Hypocenter[2]
dy = rayi[1, 1] - Hypocenter[3]
dz = rayi[1, 2] - Hypocenter[4]
ds = np.sqrt(dx * dx + dy * dy + dz * dz)
Hi[nr + nstP, 1] = -dx * slw0 / ds
Hi[nr + nstP, 2] = -dy * slw0 / ds
Hi[nr + nstP, 3] = -dz * slw0 / ds
tcal = np.hstack((tcalp, tcals))
res = np.hstack((dataP[indrp, 1], dataS[indrs, 1])) - tcal
convrays = np.where(tcal != 0)[0]
if convrays.size < (nstP + nstS):
res = res[convrays]
Hi = Hi[convrays, :]
deltaH = np.linalg.lstsq(Hi, res, rcond=1.e-6)[0]
if not np.all(np.isfinite(deltaH)):
try:
U, S, VVh = np.linalg.svd(Hi.T.dot(Hi) + 1e-9 * np.eye(4))
VV = VVh.T
deltaH = np.dot(VV, np.dot(U.T, Hi.T.dot(res)) / S)
except np.linalg.linalg.LinAlgError:
if par.verbose:
print('\nEvent could not be relocated (iteration no ' +
str(It) + '), skipping\n')
sys.stdout.flush()
break
if np.abs(deltaH[0]) > par.dt_max:
deltaH[0] = par.dt_max * np.sign(deltaH[0])
if np.linalg.norm(deltaH[1:]) > par.dx_max:
deltaH[1:] *= par.dx_max / np.linalg.norm(deltaH[1:])
updatedHypo = Hypocenter[2:] + deltaH[1:]
updatedHypo, outside = check_hypo_indomain(updatedHypo,
Dimensions, Mesh3D)
Hypocenter[1:] = np.hstack((Hypocenter[1] + deltaH[0], updatedHypo))
if outside and It == par.maxit_hypo - 1:
if par.verbose:
print('\nEvent N {0:d} could not be relocated inside '
'the domain\n'.format(int(hypo[ev, 0])))
sys.stdout.flush()
convergence[ev] = 'out'
return Hypocenter
if np.all(np.abs(deltaH[1:]) < par.conv_hypo):
convergence[ev] = True
if par.verbose:
print('\033[42m' + '\nEven N {0:d} has converged at '
' iteration {1:d}\n'.format(int(hypo[ev, 0]), It + 1) +
'\n' + '\033[0m')
sys.stdout.flush()
break
else:
if par.verbose:
print('\nEven N {0:d} : maximum number of iterations was'
' reached'.format(int(hypo[ev, 0])) + '\n')
sys.stdout.flush()
return Hypocenter
def _uncertaintyEstimat(ev, evID, hypo, data, rcv, sc, slow, par, varData=None):
"""
Estimate origin time uncertainty and confidence ellipsoid.
Parameters
----------
ev : int
Event index in the array evID.
evID : np.ndarray, shape (event number,)
Event indices.
hypo : np.ndarray, shape (event number,5)
Estimated hypocenter coordinates and origin time.
data : np.ndarray, shape (arrival time number,3) or
tuple if both P and S waves are used.
Arrival times of seismic events.
rcv : np.ndarray, shape (receiver number ,3)
coordinates of receivers.
sc : np.ndarray, shape (receiver number or 0 ,1) or
tuple if both P and S waves are used.
Static correction values.
slow : np.ndarray or tuple, shape(nnodes,1)
P or P and S slowness models.
par : instance of the class Parameters
The inversion parameters.
varData : list of two lists
Number of arrival times and the sum of residuals needed to
compute the noise variance. See Block's Thesis, 1991 (P. 63)
The default is None.
Returns
-------
to_confInterv : float
Origin time uncertainty interval.
axis1 : np.ndarray, shape(3,)
Coordinates of the 1st confidence ellipsoid axis (vector).
axis2 : np.ndarray, shape(3,)
Coordinates of the 2nd confidence ellipsoid axis (vector).
axis3 : np.ndarray, shape(3,)
Coordinates of the 3rd confidence ellipsoid axis (vector).
"""
if par.verbose:
print("Uncertainty estimation for the Even N {0:d}".format(
int(hypo[ev, 0])) + '\n')
sys.stdout.flush()
indh = np.where(hypo[:, 0] == evID[ev])[0]
if len(slow) == 2:
(slowP, slowS) = slow
(dataP, dataS) = data
(scp, scs) = sc
indrp = np.where(dataP[:, 0] == evID[ev])[0]
rcv_evP = rcv[dataP[indrp, 2].astype(int) - 1, :]
nstP = indrp.size
T0p = np.kron(hypo[indh, 1], np.ones([nstP, 1]))
indrs = np.where(dataS[:, 0] == evID[ev])[0]
rcv_evS = rcv[dataS[indrs, 2].astype(int) - 1, :]
nstS = indrs.size
T0s = np.kron(hypo[indh, 1], np.ones([nstS, 1]))
Txp = np.kron(hypo[indh, 2:], np.ones([nstP, 1]))
Txs = np.kron(hypo[indh, 2:], np.ones([nstS, 1]))
if par.use_sc:
scp_ev = scp[dataP[indrp, 2].astype(int) - 1, :]
scs_ev = scs[dataS[indrs, 2].astype(int) - 1, :]
else:
scp_ev = np.zeros([nstP, 1])
scs_ev = np.zeros([nstS, 1])
srcp = np.hstack((ev*np.ones([nstP, 1]), T0p + scp_ev, Txp))
srcs = np.hstack((ev*np.ones([nstS, 1]), T0s + scs_ev, Txs))
tcalp, raysP = Mesh3D.raytrace(source=srcp, rcv=rcv_evP, slowness=slowP,
aggregate_src=False, compute_L=False,
return_rays=True)
tcals, raysS = Mesh3D.raytrace(source=srcs, rcv=rcv_evS, slowness=slowS,
aggregate_src=False, compute_L=False,
return_rays=True)
slowP_0 = Mesh3D.get_s0(srcp)
slowS_0 = Mesh3D.get_s0(srcs)
Hi = np.ones((nstP + nstS, 4))
for nr in range(nstP):
rayi = raysP[nr]
if rayi.shape[0] == 1:
continue
slw0 = slowP_0[nr]
dx = rayi[1, 0] - hypo[indh, 2]
dy = rayi[1, 1] - hypo[indh, 3]
dz = rayi[1, 2] - hypo[indh, 4]
ds = np.sqrt(dx * dx + dy * dy + dz * dz)
Hi[nr, 1] = -dx * slw0 / ds
Hi[nr, 2] = -dy * slw0 / ds
Hi[nr, 3] = -dz * slw0 / ds
for nr in range(nstS):
rayi = raysS[nr]
if rayi.shape[0] == 1:
continue
slw0 = slowS_0[nr]
dx = rayi[1, 0] - hypo[indh, 2]
dy = rayi[1, 1] - hypo[indh, 3]
dz = rayi[1, 2] - hypo[indh, 4]
ds = np.sqrt(dx * dx + dy * dy + dz * dz)
Hi[nr, 1] = -dx * slw0 / ds
Hi[nr, 2] = -dy * slw0 / ds
Hi[nr, 3] = -dz * slw0 / ds
tcal = np.hstack((tcalp, tcals))
res = np.hstack((dataP[indrp, 1], dataS[indrs, 1])) - tcal
convrays = np.where(tcal != 0)[0]
if convrays.size < (nstP + nstS):
res = res[convrays]
Hi = Hi[convrays, :]
elif len(slow) == 1:
indr = np.where(data[0][:, 0] == evID[ev])[0]
rcv_ev = rcv[data[0][indr, 2].astype(int) - 1, :]
if par.use_sc:
sc_ev = sc[data[0][indr, 2].astype(int) - 1]
else:
sc_ev = 0.
nst = indr.size
T0 = np.kron(hypo[indh, 1], np.ones([nst, 1]))
Tx = np.kron(hypo[indh, 2:], np.ones([nst, 1]))
src = np.hstack((ev*np.ones([nst, 1]), T0+sc_ev, Tx))
tcal, rays = Mesh3D.raytrace(source=src, rcv=rcv_ev, slowness=slow[0],
aggregate_src=False, compute_L=False,
return_rays=True)
slow_0 = Mesh3D.get_s0(src)
Hi = np.ones([nst, 4])
for nr in range(nst):
rayi = rays[nr]
if rayi.shape[0] == 1: # unconverged ray
continue
slw0 = slow_0[nr]
dx = rayi[1, 0] - hypo[indh, 2]
dy = rayi[1, 1] - hypo[indh, 3]
dz = rayi[1, 2] - hypo[indh, 4]
ds = np.sqrt(dx * dx + dy * dy + dz * dz)
Hi[nr, 1] = -dx * slw0 / ds
Hi[nr, 2] = -dy * slw0 / ds
Hi[nr, 3] = -dz * slw0 / ds
convrays = np.where(tcal != 0)[0]
res = data[0][indr, 1] - tcal
if convrays.size < nst:
res = res[convrays]
Hi = Hi[convrays, :]
N = res.shape[0]
try:
Q = np.linalg.inv(Hi.T @ Hi)
except np.linalg.linalg.LinAlgError:
if par.verbose:
print("ill-conditioned Jacobian matrix")
sys.stdout.flush()
U, S, V = np.linalg.svd(Hi.T @ Hi)
Q = V.T @ np.diag(1./(S + 1.e-9)) @ U.T
eigenVals, eigenVec = np.linalg.eig(Q[:3, :3])
ind = np.argsort(eigenVals)
if varData:
s2 = 1
varData[0] += [np.sum(res**2)]
varData[1] += [N]
else:
s2 = np.sum(res**2) / (N - 4)
alpha = 1 - par.p
coef = scps.t.ppf(1 - alpha / 2., N - 4)
axis1 = np.sqrt(eigenVals[ind[2]] * s2) * coef * eigenVec[:, ind[2]]
axis2 = np.sqrt(eigenVals[ind[1]] * s2) * coef * eigenVec[:, ind[1]]
axis3 = np.sqrt(eigenVals[ind[0]] * s2) * coef * eigenVec[:, ind[0]]
to_confInterv = np.sqrt(Q[-1, -1] * s2) * coef
return to_confInterv, axis1, axis2, axis3
def jntHypoVel_T(data, caldata, Vinit, cells, nodes, rcv, Hypo0,
par, threads=1, vPoints=np.array([]), basename='Vel'):
"""
Joint hypocenter-velicoty inversion from P wave arrival time data
parametrized using the velocity model.
Parameters
----------
data : np.ndarray, shape(arrival time number, 3)
Arrival times and corresponding receivers for each event..
caldata : np.ndarray, shape(number of calibration shots, 3)
Calibration shot data.
Vinit : np.ndarray, shape(nnodes,1) or (1,1)
Initial velocity model.
cells : np.ndarray of int, shape (cell number, 4)
Indices of nodes forming the cells.
nodes : np.ndarray, shape (nnodes, 3)
Node coordinates.
rcv : np.ndarray, shape (receiver number,3)
Coordinates of receivers.
Hypo0 : np.ndarray, shape(event number, 5)
First guesses of the hypocenter coordinates (must be all diffirent).
par : instance of the class Parameters
The inversion parameters.
threads : int, optional
Thread number. The default is 1.
vPoints : np.ndarray, shape(point number,4), optional
Known velocity points. The default is np.array([]).
basename : string, optional
The filename used to save the output file. The default is 'Vel'.
Returns
-------
output : python dictionary
It contains the estimated hypocenter coordinates and their origin times,
static correction values, velocity model, convergence states,
parameter uncertainty and residual norm in each iteration.
"""
if par.verbose:
print(par)
print('inversion involves the velocity model\n')
sys.stdout.flush()
if par.use_sc:
nstation = rcv.shape[0]
else:
nstation = 0
Static_Corr = np.zeros([nstation, 1])
nnodes = nodes.shape[0]
# observed traveltimes
if data.shape[0] > 0:
evID = np.unique(data[:, 0]).astype(int)
tObserved = data[:, 1]
numberOfEvents = evID.size
else:
tObserved = np.array([])
numberOfEvents = 0
rcvData = np.zeros([data.shape[0], 3])
for ev in range(numberOfEvents):
indr = np.where(data[:, 0] == evID[ev])[0]
rcvData[indr] = rcv[data[indr, 2].astype(int) - 1, :]
# calibration data
if caldata.shape[0] > 0:
calID = np.unique(caldata[:, 0])
ncal = calID.size
time_calibration = caldata[:, 1]
TxCalib = np.zeros((caldata.shape[0], 5))
TxCalib[:, 2:] = caldata[:, 3:]
TxCalib[:, 0] = caldata[:, 0]
rcvCalib = np.zeros([caldata.shape[0], 3])
if par.use_sc:
Msc_cal = []
for nc in range(ncal):
indr = np.where(caldata[:, 0] == calID[nc])[0]
rcvCalib[indr] = rcv[caldata[indr, 2].astype(int) - 1, :]
if par.use_sc:
Msc_cal.append(sp.csr_matrix(
(np.ones([indr.size, ]),
(range(indr.size), caldata[indr, 2]-1)),
shape=(indr.size, nstation)))
else:
ncal = 0
time_calibration = np.array([])
# initial velocity model
if Vinit.size == 1:
Velocity = Vinit * np.ones([nnodes, 1])
Slowness = 1. / Velocity
elif Vinit.size == nnodes:
Velocity = Vinit
Slowness = 1. / Velocity
else:
print("invalid Velocity Model\n")
sys.stdout.flush()
return 0
# used threads
nThreadsSystem = cpu_count()
nThreads = np.min((threads, nThreadsSystem))
global Mesh3D, Dimensions
Mesh3D = tmesh.Mesh3d(nodes, tetra=cells, method='DSPM', cell_slowness=0,
n_threads=nThreads, n_secondary=2, n_tertiary=1,
process_vel=1, radius_factor_tertiary=2,
translate_grid=1)
Mesh3D.set_slowness(Slowness)
Dimensions = np.empty(6)
Dimensions[0] = min(nodes[:, 0])
Dimensions[1] = max(nodes[:, 0])
Dimensions[2] = min(nodes[:, 1])
Dimensions[3] = max(nodes[:, 1])
Dimensions[4] = min(nodes[:, 2])
Dimensions[5] = max(nodes[:, 2])
# Hypocenter
if numberOfEvents > 0 and Hypo0.shape[0] != numberOfEvents:
print("invalid Hypocenters0 format\n")
sys.stdout.flush()
return 0
else:
Hypocenters = Hypo0.copy()
ResidueNorm = np.zeros([par.maxit])
if par.invert_vel:
if par.use_sc:
U = sp.bsr_matrix(
np.vstack((np.zeros([nnodes, 1]), np.ones([nstation, 1]))))
nbre_param = nnodes + nstation
if par.max_sc > 0. and par.max_sc < 1.:
N = sp.bsr_matrix(
np.hstack((np.zeros([nstation, nnodes]), np.eye(nstation))))
NtN = (1. / par.max_sc**2) * N.T.dot(N)
else:
U = sp.csr_matrix(np.zeros([nnodes, 1]))
nbre_param = nnodes
# build matrix D
if vPoints.size > 0:
if par.verbose:
print('\nBuilding velocity data point matrix D\n')
sys.stdout.flush()
D = Mesh3D.compute_D(vPoints[:, 2:])
D = sp.hstack((D, sp.csr_matrix((D.shape[0], nstation)))).tocsr()
DtD = D.T @ D
nD = spl.norm(DtD)
# Build regularization matrix
if par.verbose:
print('\n...Building regularization matrix K\n')
sys.stdout.flush()
kx, ky, kz = Mesh3D.compute_K(order=2, taylor_order=2,
weighting=1, squared=0,
s0inside=0, additional_points=3)
KX = sp.hstack((kx, sp.csr_matrix((nnodes, nstation))))
KX_Square = KX.transpose().dot(KX)
KY = sp.hstack((ky, sp.csr_matrix((nnodes, nstation))))
KY_Square = KY.transpose().dot(KY)
KZ = sp.hstack((kz, sp.csr_matrix((nnodes, nstation))))
KZ_Square = KZ.transpose().dot(KZ)
KtK = KX_Square + KY_Square + par.wzK * KZ_Square
nK = spl.norm(KtK)
if nThreads == 1:
hypo_convergence = list(np.zeros(numberOfEvents, dtype=bool))
else:
manager = Manager()
hypo_convergence = manager.list(np.zeros(numberOfEvents, dtype=bool))
for i in range(par.maxit):
if par.verbose:
print("Iteration N : {0:d}\n".format(i + 1))
sys.stdout.flush()
if par.invert_vel:
if par.verbose:
print('Iteration {0:d} - Updating velocity model\n'.format(i + 1))
print("Updating penalty vector\n")
sys.stdout.flush()
# Build vector C
cx = kx.dot(Velocity)
cy = ky.dot(Velocity)
cz = kz.dot(Velocity)
# build matrix P and dP
indVmin = np.where(Velocity < par.Vpmin)[0]
indVmax = np.where(Velocity > par.Vpmax)[0]
indPinality = np.hstack([indVmin, indVmax])
dPinality_V = np.hstack(
[-par.PAp * np.ones(indVmin.size), par.PAp * np.ones(indVmax.size)])
pinality_V = np.vstack(
[par.PAp * (par.Vpmin - Velocity[indVmin]), par.PAp *
(Velocity[indVmax] - par.Vpmax)])
d_Pinality = sp.csr_matrix(
(dPinality_V, (indPinality, indPinality)), shape=(
nnodes, nbre_param))
Pinality = sp.csr_matrix(
(pinality_V.reshape([-1, ]),
(indPinality, np.zeros([indPinality.shape[0]]))),
shape=(nnodes, 1))
if par.verbose:
print('Penalties applied at {0:d} nodes\n'.format(
dPinality_V.size))
print('...Start Raytracing\n')
sys.stdout.flush()
if numberOfEvents > 0:
sources = np.empty((data.shape[0], 5))
if par.use_sc:
sc_data = np.empty((data.shape[0], ))
for ev in np.arange(numberOfEvents):
indr = np.where(data[:, 0] == evID[ev])[0]
indh = np.where(Hypocenters[:, 0] == evID[ev])[0]
sources[indr, :] = Hypocenters[indh, :]
if par.use_sc:
sc_data[indr] = Static_Corr[data[indr, 2].astype(int)
- 1, 0]
if par.use_sc:
sources[:, 1] += sc_data
tt, rays, M0 = Mesh3D.raytrace(source=sources,
rcv=rcvData, slowness=None,
aggregate_src=False,
compute_L=True, return_rays=True)
else:
tt, rays, M0 = Mesh3D.raytrace(source=sources,
rcv=rcvData, slowness=None,
aggregate_src=False,
compute_L=True, return_rays=True)
v0 = 1. / Mesh3D.get_s0(sources)
if par.verbose:
inconverged = np.where(tt == 0)[0]
for icr in inconverged:
print('\033[43m' +
'\nWarning: raypath failed to converge for even '
'N {0:d} :({1:5.4f},{2:5.4f},{3:5.4f}) and receiver '
'N {4:d} :({5:5.4f},{6:5.4f},{7:5.4f})\n'.format(
int(data[icr, 0]), sources[icr, 2],
sources[icr, 3], sources[icr, 4],
int(data[icr, 2]), rcvData[icr, 0],
rcvData[icr, 1], rcvData[icr, 2])
+ '\033[0m')
print('\033[43m' + 'ray will be temporary removed' +
'\033[0m')
sys.stdout.flush()
else:
tt = np.array([])
if ncal > 0:
if par.use_sc:
TxCalib[:, 1] = Static_Corr[caldata[:, 2].astype(int) - 1, 0]
tt_Calib, Mcalib = Mesh3D.raytrace(
source=TxCalib, rcv=rcvCalib, slowness=None,
aggregate_src=False, compute_L=True, return_rays=False)
else:
tt_Calib, Mcalib = Mesh3D.raytrace(
source=TxCalib, rcv=rcvCalib, slowness=None,
aggregate_src=False, compute_L=True, return_rays=False)
if par.verbose:
inconverged = np.where(tt_Calib == 0)[0]
for icr in inconverged:
print('\033[43m' +
'\nWarning: raypath failed to converge '
'for calibration shot N '
'{0:d} :({1:5.4f},{2:5.4f},{3:5.4f}) and receiver'
' N {4:d} :({5:5.4f},{6:5.4f},{7:5.4f})\n'.format(
int(caldata[icr, 0]), TxCalib[icr, 2],
TxCalib[icr, 3], TxCalib[icr, 4],
int(caldata[icr, 2]), rcvCalib[icr, 0],
rcvCalib[icr, 1], rcvCalib[icr, 2])
+ '\033[0m')
print('\033[43m' + 'ray will be temporary removed' +
'\033[0m')
sys.stdout.flush()
else:
tt_Calib = np.array([])
Resid = tObserved - tt
convrayData = np.where(tt != 0)[0]
convrayClib = np.where(tt_Calib != 0)[0]
if Resid.size == 0:
Residue = time_calibration[convrayClib] - tt_Calib[convrayClib]
else:
Residue = np.hstack(
(np.zeros([np.count_nonzero(tt) - 4 * numberOfEvents]),
time_calibration[convrayClib] - tt_Calib[convrayClib]))
ResidueNorm[i] = np.linalg.norm(np.hstack(
(Resid[convrayData], time_calibration[convrayClib] -
tt_Calib[convrayClib])))
if par.verbose:
print('...Building matrix M\n')
sys.stdout.flush()
M = sp.csr_matrix((0, nbre_param))
ir = 0
for even in range(numberOfEvents):
indh = np.where(Hypocenters[:, 0] == evID[even])[0]
indr = np.where(data[:, 0] == evID[even])[0]
Mi = M0[even]
nst_ev = Mi.shape[0]
Hi = np.ones([indr.size, 4])
for nr in range(indr.size):
rayi = rays[indr[nr]]
if rayi.shape[0] == 1:
continue
vel0 = v0[indr[nr]]
dx = rayi[1, 0] - Hypocenters[indh[0], 2]
dy = rayi[1, 1] - Hypocenters[indh[0], 3]
dz = rayi[1, 2] - Hypocenters[indh[0], 4]
ds = np.sqrt(dx * dx + dy * dy + dz * dz)
Hi[nr, 1] = -dx / (vel0 * ds)
Hi[nr, 2] = -dy / (vel0 * ds)
Hi[nr, 3] = -dz / (vel0 * ds)
convrays = np.where(tt[indr] != 0)[0]
if convrays.shape[0] < nst_ev:
Hi = Hi[convrays, :]
nst_ev = convrays.size
Q, _ = np.linalg.qr(Hi, mode='complete')
Ti = sp.csr_matrix(Q[:, 4:])
Ti = Ti.T
if par.use_sc:
Lsc = sp.csr_matrix((np.ones(nst_ev,),
(range(nst_ev),
data[indr[convrays], 2] - 1)),
shape=(nst_ev, nstation))
Mi = sp.hstack((Mi, Lsc))
Mi = sp.csr_matrix(Ti @ Mi)
M = sp.vstack([M, Mi])
Residue[ir:ir + (nst_ev - 4)] = Ti.dot(Resid[indr[convrays]])
ir += nst_ev - 4
for evCal in range(len(Mcalib)):
Mi = Mcalib[evCal]
if par.use_sc:
indrCal = np.where(caldata[:, 0] == calID[evCal])[0]
convraysCal = np.where(tt_Calib[indrCal] != 0)[0]
Mi = sp.hstack((Mi, Msc_cal[evCal][convraysCal]))
M = sp.vstack([M, Mi])
if par.verbose:
print('Assembling matrices and solving system\n')
sys.stdout.flush()
S = np.sum(Static_Corr)
term1 = (M.T).dot(M)
nM = spl.norm(term1[:nnodes, :nnodes])
term2 = (d_Pinality.T).dot(d_Pinality)
nP = spl.norm(term2)
term3 = U.dot(U.T)
λ = par.λ * nM / nK
if nP != 0:
γ = par.γ * nM / nP
else:
γ = par.γ
A = term1 + λ * KtK + γ * term2 + term3
if par.use_sc and par.max_sc > 0. and par.max_sc < 1.:
A += NtN
term1 = (M.T).dot(Residue)
term1 = term1.reshape([-1, 1])
term2 = (KX.T).dot(cx) + (KY.T).dot(cy) + par.wzK * (KZ.T).dot(cz)
term3 = (d_Pinality.T).dot(Pinality)
term4 = U.dot(S)
b = term1 - λ * term2 - γ * term3 - term4
if vPoints.size > 0:
α = par.α * nM / nD
A += α * DtD
b += α * D.T @ (vPoints[:, 1].reshape(-1, 1) -
D[:, :nnodes] @ Velocity)
x = spl.minres(A, b, tol=1.e-8)
deltam = x[0].reshape(-1, 1)
# update velocity vector and static correction
dVmax = np.max(abs(deltam[:nnodes]))
if dVmax > par.dVp_max:
deltam[:nnodes] *= par.dVp_max / dVmax
if par.use_sc and par.max_sc > 0. and par.max_sc < 1.:
sc_mean = np.mean(abs(deltam[nnodes:]))
if sc_mean > par.max_sc * np.mean(abs(Residue)):
deltam[nnodes:] *= par.max_sc * np.mean(abs(Residue)) / sc_mean
Velocity += np.matrix(deltam[:nnodes])
Slowness = 1. / Velocity
Static_Corr += deltam[nnodes:]
if par.saveVel == 'all':
if par.verbose:
print('...Saving Velocity models\n')
sys.stdout.flush()
try:
msh2vtk(nodes, cells, Velocity, basename +
'it{0}.vtk'.format(i + 1))
except ImportError:
print('vtk module is not installed\n')
sys.stdout.flush()
elif par.saveVel == 'last' and i == par.maxit - 1:
try:
msh2vtk(nodes, cells, Velocity, basename + '.vtk')
except ImportError:
print('vtk module is not installed\n')
sys.stdout.flush()
#######################################
# relocate Hypocenters
#######################################
Mesh3D.set_slowness(Slowness)
if numberOfEvents > 0:
print("\nIteration N {0:d} : Relocation of events\n".format(i + 1))
sys.stdout.flush()
if nThreads == 1:
for ev in range(numberOfEvents):
Hypocenters[ev, :] = _hypo_relocation(
ev, evID, Hypocenters, data, rcv,
Static_Corr, hypo_convergence, par)
else:
p = mp.get_context("fork").Pool(processes=nThreads)
updatedHypo = p.starmap(_hypo_relocation,
[(int(ev), evID, Hypocenters, data,
rcv, Static_Corr, hypo_convergence,
par)for ev in range(numberOfEvents)])
p.close() # pool won't take any new tasks
p.join()
Hypocenters = np.array([updatedHypo])[0]
# Calculate the hypocenter parameter uncertainty
uncertnty = []
if par.uncertainty and numberOfEvents > 0:
print("\nUncertainty evaluation\n")
sys.stdout.flush()
# estimate data variance
if nThreads == 1:
varData = [[], []]
for ev in range(numberOfEvents):
uncertnty.append(
_uncertaintyEstimat(ev, evID, Hypocenters, (data,), rcv,
Static_Corr, (Slowness,), par, varData))
else:
varData = manager.list([[], []])
with Pool(processes=nThreads) as p:
uncertnty = p.starmap(
_uncertaintyEstimat,
[(int(ev), evID, Hypocenters, (data, ),
rcv, Static_Corr, (Slowness, ), par,
varData) for ev in range(numberOfEvents)])
p.close() # pool won't take any new tasks
p.join()
sgmData = np.sqrt(np.sum(varData[0]) /
(np.sum(varData[1]) - 4 *
numberOfEvents -
Static_Corr.size))
for ic in range(numberOfEvents):
uncertnty[ic] = tuple([sgmData * x for x in uncertnty[ic]])
output = OrderedDict()
output['Hypocenters'] = Hypocenters
output['Convergence'] = list(hypo_convergence)
output['Uncertainties'] = uncertnty
output['Velocity'] = Velocity
output['Sts_Corrections'] = Static_Corr
output['Residual_norm'] = ResidueNorm
return output
def jntHyposlow_T(data, caldata, Vinit, cells, nodes, rcv, Hypo0,
par, threads=1, vPoints=np.array([]), basename='Slowness'):
"""
Joint hypocenter-velicoty inversion from P wave arrival time data
parametrized using the slowness model.
Parameters
----------
data : np.ndarray, shape(arrival time number, 3)
Arrival times and corresponding receivers for each event.
caldata : np.ndarray, shape(number of calibration shots, 6)
Calibration shot data.
Vinit : np.ndarray, shape(nnodes,1) or (1,1)
Initial velocity model.
Cells : np.ndarray of int, shape (cell number, 4)
Indices of nodes forming the cells.
nodes : np.ndarray, shape (nnodes, 3)
Node coordinates.
rcv : np.ndarray, shape (receiver number,3)
Coordinates of receivers.
Hypo0 : np.ndarray, shape(event number, 5)
First guesses of the hypocenter coordinates (must be all diffirent).
par : instance of the class Parameters
The inversion parameters.
threads : int, optional
Thread number. The default is 1.
vPoints : np.ndarray, shape(point number,4), optional
Known velocity points. The default is np.array([]).
basename : string, optional
The filename used to save the output files. The default is 'Slowness'.
Returns
-------
output : python dictionary
It contains the estimated hypocenter coordinates and their origin times,
static correction values, velocity model, convergence states,
parameter uncertainty and residual norm in each iteration.
"""
if par.verbose:
print(par)
print('inversion involves the slowness model\n')
sys.stdout.flush()
if par.use_sc:
nstation = rcv.shape[0]
else:
nstation = 0
Static_Corr = np.zeros([nstation, 1])
nnodes = nodes.shape[0]
# observed traveltimes
if data.shape[0] > 0:
evID = np.unique(data[:, 0]).astype(int)
tObserved = data[:, 1]
numberOfEvents = evID.size
else:
tObserved = np.array([])
numberOfEvents = 0
rcvData = np.zeros([data.shape[0], 3])
for ev in range(numberOfEvents):
indr = np.where(data[:, 0] == evID[ev])[0]
rcvData[indr] = rcv[data[indr, 2].astype(int) - 1, :]
# get calibration data
if caldata.shape[0] > 0:
calID = np.unique(caldata[:, 0])
ncal = calID.size
time_calibration = caldata[:, 1]
TxCalib = np.zeros((caldata.shape[0], 5))
TxCalib[:, 2:] = caldata[:, 3:]
TxCalib[:, 0] = caldata[:, 0]
rcvCalib = np.zeros([caldata.shape[0], 3])
if par.use_sc:
Msc_cal = []
for nc in range(ncal):
indr = np.where(caldata[:, 0] == calID[nc])[0]
rcvCalib[indr] = rcv[caldata[indr, 2].astype(int) - 1, :]
if par.use_sc:
Msc_cal.append(sp.csr_matrix(
(np.ones([indr.size, ]),
(range(indr.size), caldata[indr, 2]-1)),
shape=(indr.size, nstation)))
else:
ncal = 0
time_calibration = np.array([])
# initial velocity model
if Vinit.size == 1:
Slowness = 1. / (Vinit * np.ones([nnodes, 1]))
elif Vinit.size == nnodes:
Slowness = 1. / Vinit
else:
print("invalid Velocity Model")
sys.stdout.flush()
return 0
# Hypocenter
if numberOfEvents > 0 and Hypo0.shape[0] != numberOfEvents:
print("invalid Hypocenters0 format\n")
sys.stdout.flush()
return 0
else:
Hypocenters = Hypo0.copy()
# number of threads
nThreadsSystem = cpu_count()
nThreads = np.min((threads, nThreadsSystem))
global Mesh3D, Dimensions
# build mesh object
Mesh3D = tmesh.Mesh3d(nodes, tetra=cells, method='DSPM', cell_slowness=0,
n_threads=nThreads, n_secondary=2, n_tertiary=1,
radius_factor_tertiary=2, translate_grid=1)
Mesh3D.set_slowness(Slowness)
Dimensions = np.empty(6)
Dimensions[0] = min(nodes[:, 0])
Dimensions[1] = max(nodes[:, 0])
Dimensions[2] = min(nodes[:, 1])
Dimensions[3] = max(nodes[:, 1])
Dimensions[4] = min(nodes[:, 2])
Dimensions[5] = max(nodes[:, 2])
ResidueNorm = np.zeros([par.maxit])
if par.invert_vel:
if par.use_sc:
U = sp.bsr_matrix(np.vstack((np.zeros([nnodes, 1]),
np.ones([nstation, 1]))))
nbre_param = nnodes + nstation
if par.max_sc > 0. and par.max_sc < 1.:
N = sp.bsr_matrix(
np.hstack((np.zeros([nstation, nnodes]), np.eye(nstation))))
NtN = (1. / par.max_sc**2) * N.T.dot(N)
else:
U = sp.csr_matrix(np.zeros([nnodes, 1]))
nbre_param = nnodes
# build matrix D
if vPoints.size > 0:
if par.verbose:
print('\nBuilding velocity data point matrix D\n')
sys.stdout.flush()
D = Mesh3D.compute_D(vPoints[:, 2:])
D = sp.hstack((D, sp.csr_matrix((D.shape[0], nstation)))).tocsr()
DtD = D.T @ D
nD = spl.norm(DtD)
# Build regularization matrix
if par.verbose:
print('\n...Building regularization matrix K\n')
sys.stdout.flush()
kx, ky, kz = Mesh3D.compute_K(order=2, taylor_order=2,
weighting=1, squared=0,
s0inside=0, additional_points=3)
KX = sp.hstack((kx, sp.csr_matrix((nnodes, nstation))))
KX_Square = KX.transpose().dot(KX)
KY = sp.hstack((ky, sp.csr_matrix((nnodes, nstation))))
KY_Square = KY.transpose().dot(KY)
KZ = sp.hstack((kz, sp.csr_matrix((nnodes, nstation))))
KZ_Square = KZ.transpose().dot(KZ)
KtK = KX_Square + KY_Square + par.wzK * KZ_Square
nK = spl.norm(KtK)
if nThreads == 1:
hypo_convergence = list(np.zeros(numberOfEvents, dtype=bool))
else:
manager = Manager()
hypo_convergence = manager.list( | np.zeros(numberOfEvents, dtype=bool) | numpy.zeros |
import numpy as np
import os
import re
import requests
import sys
import time
from netCDF4 import Dataset
import pandas as pd
from bs4 import BeautifulSoup
from tqdm import tqdm
# setup constants used to access the data from the different M2M interfaces
BASE_URL = 'https://ooinet.oceanobservatories.org/api/m2m/' # base M2M URL
SENSOR_URL = '12576/sensor/inv/' # Sensor Information
# setup access credentials
AUTH = ['OOIAPI-853A3LA6QI3L62', '<KEY>']
def M2M_Call(uframe_dataset_name, start_date, end_date):
options = '?beginDT=' + start_date + '&endDT=' + end_date + '&format=application/netcdf'
r = requests.get(BASE_URL + SENSOR_URL + uframe_dataset_name + options, auth=(AUTH[0], AUTH[1]))
if r.status_code == requests.codes.ok:
data = r.json()
else:
return None
# wait until the request is completed
print('Waiting for OOINet to process and prepare data request, this may take up to 20 minutes')
url = [url for url in data['allURLs'] if re.match(r'.*async_results.*', url)][0]
check_complete = url + '/status.txt'
with tqdm(total=400, desc='Waiting') as bar:
for i in range(400):
r = requests.get(check_complete)
bar.update(1)
if r.status_code == requests.codes.ok:
bar.n = 400
bar.last_print_n = 400
bar.refresh()
print('\nrequest completed in %f minutes.' % elapsed)
break
else:
time.sleep(3)
elapsed = (i * 3) / 60
return data
def M2M_Files(data, tag=''):
"""
Use a regex tag combined with the results of the M2M data request to collect the data from the THREDDS catalog.
Collected data is gathered into an xarray dataset for further processing.
:param data: JSON object returned from M2M data request with details on where the data is to be found for download
:param tag: regex tag to use in discriminating the data files, so we only collect the correct ones
:return: the collected data as an xarray dataset
"""
# Create a list of the files from the request above using a simple regex as a tag to discriminate the files
url = [url for url in data['allURLs'] if re.match(r'.*thredds.*', url)][0]
files = list_files(url, tag)
return files
def list_files(url, tag=''):
"""
Function to create a list of the NetCDF data files in the THREDDS catalog created by a request to the M2M system.
:param url: URL to user's THREDDS catalog specific to a data request
:param tag: regex pattern used to distinguish files of interest
:return: list of files in the catalog with the URL path set relative to the catalog
"""
page = requests.get(url).text
soup = BeautifulSoup(page, 'html.parser')
pattern = re.compile(tag)
return [node.get('href') for node in soup.find_all('a', text=pattern)]
def M2M_Data(nclist,variables):
thredds = 'https://opendap.oceanobservatories.org/thredds/dodsC/ooi/'
#nclist is going to contain more than one url eventually
for jj in range(len(nclist)):
url=nclist[jj]
url=url[25:]
dap_url = thredds + url + '#fillmismatch'
openFile = Dataset(dap_url,'r')
for ii in range(len(variables)):
dum = openFile.variables[variables[ii].name]
variables[ii].data = np.append(variables[ii].data, dum[:].data)
tmp = variables[0].data/60/60/24
time_converted = pd.to_datetime(tmp, unit='D', origin=pd.Timestamp('1900-01-01'))
return variables, time_converted
class var(object):
def __init__(self):
"""A Class that generically holds data with a variable name
and the units as attributes"""
self.name = ''
self.data = np.array([])
self.units = ''
def __repr__(self):
return_str = "name: " + self.name + '\n'
return_str += "units: " + self.units + '\n'
return_str += "data: size: " + str(self.data.shape)
return return_str
class structtype(object):
def __init__(self):
""" A class that imitates a Matlab structure type
"""
self._data = []
def __getitem__(self, index):
"""implement index behavior in the struct"""
if index == len(self._data):
self._data.append(var())
return self._data[index]
def __len__(self):
return len(self._data)
def M2M_URLs(platform_name,node,instrument_class,method):
var_list = structtype()
#MOPAK
if platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/telemetered/fdchp_a_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = | np.array([]) | numpy.array |
import numpy as np
import transformations
import math
import os
import logging
from scipy.spatial import KDTree
from utils import vec_angel_diff, dist_in_range
# TODO this should be specified in a configuration file
LAST_FINGER_JOINT = 'finger_2_joint_1'
# TODO this should be defined in a super module
class InvalidTriangleException(Exception):
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
class RobotiqHand:
def __init__(self, env, hand_cache_file, hand_file):
self._or_env = env
self._or_env.Load(hand_file)
self._or_hand = self._or_env.GetRobots()[0]
self._plot_handler = []
# self._hand_mani = RobotiqHandVirtualManifold(self._or_hand)
self._hand_mani = RobotiqHandKDTreeManifold(self, hand_cache_file)
def __getattr__(self, attr): # composition
return getattr(self._or_hand, attr)
def get_hand_manifold(self):
return self._hand_mani
def plot_fingertip_contacts(self):
self._plot_handler = []
colors = [np.array((1, 0, 0)), np.array((0, 1, 0)), np.array((0, 0, 1))]
tip_link_ids = self.get_fingertip_links()
point_size = 0.005
for i in range(len(tip_link_ids)):
link = self._or_hand.GetLink(tip_link_ids[i])
T = link.GetGlobalMassFrame()
local_frame_rot = transformations.rotation_matrix(math.pi / 6., [0, 0, 1])[:3, :3]
T[:3, :3] = T[:3, :3].dot(local_frame_rot)
offset = T[0:3,0:3].dot(self.get_tip_offsets())
T[0:3,3] = T[0:3,3] + offset
position = T[:3, -1]
self._plot_handler.append(self._or_env.plot3(points=position, pointsize=point_size, colors=colors[i], drawstyle=1))
for j in range(3):
normal = T[:3, j]
self._plot_handler.append(self._or_env.drawarrow(p1=position, p2=position + 0.05 * normal, linewidth=0.001, color=colors[j]))
def get_tip_offsets(self):
return np.array([0.025, 0.006, 0.0])
def get_tip_transforms(self):
tip_link_ids = self.get_fingertip_links()
ret = []
for i in range(len(tip_link_ids)):
link = self._or_hand.GetLink(tip_link_ids[i])
T = link.GetGlobalMassFrame()
local_frame_rot = transformations.rotation_matrix(math.pi / 6., [0, 0, 1])[:3, :3]
T[:3, :3] = T[:3, :3].dot(local_frame_rot)
offset = T[0:3,0:3].dot(self.get_tip_offsets())
T[0:3,3] = T[0:3,3] + offset
ret.append(T)
return ret
def get_fingertip_links(self):
return ['finger_1_link_3', 'finger_2_link_3', 'finger_middle_link_3']
def get_non_fingertip_links(self):
return ['palm', 'finger_1_link_0', 'finger_1_link_2',
'finger_2_link_0', 'finger_2_link_1', 'finger_2_link_2',
'finger_middle_link_0', 'finger_middle_link_1', 'finger_middle_link_2']
def get_tip_pn(self):
ret = []
tfs = self.get_tip_transforms()
for t in tfs:
ret.append(np.concatenate((t[:3, 3], t[:3, 1])))
return np.asarray(ret)
def get_ori_tip_pn(self, hand_conf):
self._or_hand.SetTransform(np.identity(4))
self._or_hand.SetDOFValues(hand_conf)
return self.get_tip_pn()
def set_random_conf(self):
self._lower_limits, self._upper_limits = self._or_hand.GetDOFLimits()
self._upper_limits[1] = 0.93124747
self_collision = True
while self_collision:
ret = []
for i in range(2):
ret.append(np.random.uniform(self._lower_limits[i], self._upper_limits[i]))
self.SetDOFValues(ret)
self_collision = self._or_hand.CheckSelfCollision()
def get_contact_number(self):
return 3
def hand_obj_transform(self, hand_points, obj_points):
# We align the hand with the object by matching a frame at the grasp center
frame_hand = self.get_tri_frame(hand_points) # [x; y; z] of this frame in the hand frame
frame_obj = self.get_tri_frame(obj_points) # [x; y; z] of this frame in the object frame
# Let's build a transformation matrix from this
T = transformations.identity_matrix()
# frame_hand is a rotation matrix that rotates the hand frame to our helper frame at the grasp center
T[0:3, 0:3] = np.dot(frame_obj, np.transpose(frame_hand)) # transpose == inverse for rotation matrices
# rotate the hand points to a frame that is aligned with the object frame, but located at the grasp center
# we call this frame rotated hand frame
new_hand_points = np.transpose(np.dot(T[0:3, 0:3], np.transpose(hand_points)))
# use this to compute the translation from object to hand frame
obj_c = np.sum(obj_points, axis=0) / 3. # the position of the grasp center in object frame
new_hand_c = np.sum(new_hand_points, axis=0) / 3. # the position of the grasp center in the rotated hand frame
# Finally, the translation is from origin to obj_c and then from there in the opposite direction of new_hand_c
T[:3, -1] = np.transpose(obj_c - new_hand_c)
return T
def get_tri_frame(self, points):
ori = np.sum(points, axis=0) / 3.
x = (points[0, :] - ori) / np.linalg.norm(points[0, :] - ori)
e01 = points[1, :] - points[0, :]
e02 = points[2, :] - points[0, :]
e12 = points[2, :] - points[1, :]
if np.linalg.norm(e01) == 0.0 or np.linalg.norm(e02) == 0.0 or np.linalg.norm(e12) == 0.0:
raise InvalidTriangleException('Two points are identical')
z = (np.cross(e02, e01)) / np.linalg.norm(np.cross(e02, e01))
y = np.cross(z, x)
frame = np.transpose([x, y, z])
return np.asarray(frame)
def comply_fingertips(self, n_step=100):
"""
Opens and closes the hand until all and only fingertips are in contact.
:param n_step: maximal number of iterations
:return:
"""
joint_index = self.GetJoint(LAST_FINGER_JOINT).GetDOFIndex()
limit_value = self.GetDOFLimits()[1][joint_index]
n_step /= 2
open_succes = self.avoid_collision_at_fingers(n_step)
if not open_succes:
return False, False
curr_conf = self.GetDOFValues()
step = (limit_value - curr_conf[joint_index]) / n_step
for i in range(n_step):
curr_conf[joint_index] += step
self.SetDOFValues(curr_conf)
if self.are_fingertips_in_contact():
return open_succes, True
return open_succes, False
def are_fingertips_in_contact(self):
links = self.get_fingertip_links()
for link in links:
if not self._or_env.CheckCollision(self.GetLink(link)):
return False
return True
def avoid_collision_at_fingers(self, n_step):
"""
Opens the hand until there is no collision anymore.
:param n_step - maximum number of sampling steps
:return True if successful, False otherwise
"""
if n_step <= 0:
n_step = 1
finger_joint_idx = self.GetJoint(LAST_FINGER_JOINT).GetDOFIndex()
start_value = self.GetDOFValues()[finger_joint_idx] # Last joint value opens the fingers
step = (self.GetDOFLimits()[0][finger_joint_idx] - start_value) / n_step
for i in range(n_step):
if not self._or_env.CheckCollision(self._or_hand):
return True
self.SetDOFValues([start_value + i * step], [finger_joint_idx])
return False
# TODO should be generalized to any type of hand
class RobotiqHandKDTreeManifold:
"""
KD tree based hand manifold for the Robotiq hand
"""
CODE_DIMENSION = 6
NUM_SAMPLES = 10000
def __init__(self, or_robot, cache_file_name):
self._or_robot = or_robot
self._cache_file_name = cache_file_name
self._codes = None
self._hand_configurations = None
self._kd_tree = None
self._code_position_scale = 10.0
self._com_center_weight = 1.0
def set_parameters(self, com_center_weight=None):
if com_center_weight is not None:
self._com_center_weight = com_center_weight
def load(self):
if os.path.exists(self._cache_file_name):
logging.info('[RobotiqHandKDTreeManifold::load] Loading sample data set form disk.')
data = np.load(self._cache_file_name)
self._codes = data[:, :self.CODE_DIMENSION]
self._hand_configurations = data[:, self.CODE_DIMENSION:]
else:
logging.info('[RobotiqHandKDTreeManifold::load] No data set available. Generating new...')
self._sample_configuration_space()
data = np.concatenate((self._codes, self._hand_configurations), axis=1)
np.save(self._cache_file_name, data)
self._kd_tree = KDTree(self._codes)
# self.test_manifold()
def _sample_configuration_space(self):
lower_limits, upper_limits = self._or_robot.GetDOFLimits()
#TODO can this be done in a niceer way? closing the hand all the way does not make sense
# TODO hence this limit instead
upper_limits[1] = 0.93124747
joint_ranges = np.array(upper_limits) - np.array(lower_limits)
interpolation_steps = int(math.sqrt(self.NUM_SAMPLES))
step_sizes = joint_ranges / interpolation_steps
config = np.array(lower_limits)
self._hand_configurations = np.zeros((self.NUM_SAMPLES, self._or_robot.GetDOF()))
self._codes = | np.zeros((self.NUM_SAMPLES, self.CODE_DIMENSION)) | numpy.zeros |
#!/usr/bin/env python
"""
Homogeneous Transformation Matrices
"""
import math
import numpy as np
# Local modules
import trifinger_mujoco.utils as tfu
def are_equal(T1, T2, rtol=1e-5, atol=1e-8):
"""
Returns True if two homogeneous transformation are equal within a tolerance.
Parameters
----------
T1: array_like
First input homogeneous transformation
T2: array_like
Second input homogeneous transformation
rtol: float
The relative tolerance parameter.
atol: float
The absolute tolerance parameter.
Returns
-------
equal : bool
True if `T1` and `T2` are `almost` equal, False otherwise
See Also
--------
numpy.allclose: Contains the details about the tolerance parameters
"""
M1 = np.array(T1, dtype=np.float64, copy=True)
M1 /= M1[3, 3]
M2 = np.array(T2, dtype=np.float64, copy=True)
M2 /= M2[3, 3]
return np.allclose(M1, M2, rtol, atol)
def between_axes(axis_a, axis_b):
"""
Compute the transformation that aligns two vectors/axes.
Parameters
----------
axis_a: array_like
The initial axis
axis_b: array_like
The goal axis
Returns
-------
transform: array_like
The transformation that transforms `axis_a` into `axis_b`
"""
a_unit = tfu.vector.unit(axis_a)
b_unit = tfu.vector.unit(axis_b)
c = np.dot(a_unit, b_unit)
angle = np.arccos(c)
if np.isclose(c, -1.0) or np.allclose(a_unit, b_unit):
axis = tfu.vector.perpendicular(b_unit)
else:
axis = tfu.vector.unit(np.cross(a_unit, b_unit))
transform = tfu.axis_angle.to_transform(axis, angle)
return transform
def inverse(transform):
"""
Compute the inverse of an homogeneous transformation.
.. note:: This function is more efficient than :obj:`numpy.linalg.inv` given
the special properties of homogeneous transformations.
Parameters
----------
transform: array_like
The input homogeneous transformation
Returns
-------
inv: array_like
The inverse of the input homogeneous transformation
"""
R = transform[:3, :3].T
p = transform[:3, 3]
inv = np.eye(4)
inv[:3, :3] = R
inv[:3, 3] = np.dot(-R, p)
return inv
def random(max_position=1.0):
"""
Generate a random homogeneous transformation.
Parameters
----------
max_position: float, optional
Maximum value for the position components of the transformation
Returns
-------
T: array_like
The random homogeneous transformation
Examples
--------
>>> import numpy as np
>>> import trifinger_mujoco.utils as tfu
>>> T = tfu.transform.random()
>>> Tinv = tfu.transform.inverse(T)
>>> np.allclose(np.dot(T, Tinv), np.eye(4))
True
"""
quat = tfu.quaternion.random()
T = tfu.quaternion.to_transform(quat)
T[:3, 3] = np.random.rand(3) * max_position
return T
def to_axis_angle(transform):
"""
Return rotation angle and axis from rotation matrix.
Parameters
----------
transform: array_like
The input homogeneous transformation
Returns
-------
axis: array_like
axis around which rotation occurs
angle: float
angle of rotation
point: array_like
point around which the rotation is performed
Examples
--------
>>> import numpy as np
>>> import trifinger_mujoco.utils as tfu
>>> axis = np.random.sample(3) - 0.5
>>> angle = (np.random.sample(1) - 0.5) * (2*np.pi)
>>> point = np.random.sample(3) - 0.5
>>> T0 = tfu.axis_angle.to_transform(axis, angle, point)
>>> axis, angle, point = tfu.transform.to_axis_angle(T0)
>>> T1 = tfu.axis_angle.to_transform(axis, angle, point)
>>> tfu.transform.are_equal(T0, T1)
True
"""
R = np.array(transform, dtype=np.float64, copy=False)
R33 = R[:3, :3]
# direction: unit eigenvector of R33 corresponding to eigenvalue of 1
w, W = np.linalg.eig(R33.T)
i = np.where(abs(np.real(w) - 1.0) < 1e-8)[0]
if not len(i):
raise ValueError("no unit eigenvector corresponding to eigenvalue 1")
axis = np.real(W[:, i[-1]]).squeeze()
# point: unit eigenvector of R corresponding to eigenvalue of 1
w, Q = np.linalg.eig(R)
i = np.where(abs( | np.real(w) | numpy.real |
from __future__ import division, print_function, absolute_import
import numpy as np
from numpy.testing import assert_array_equal, assert_equal
from scipy.optimize._constraints import (NonlinearConstraint, Bounds,
PreparedConstraint)
from scipy.optimize._trustregion_constr.canonical_constraint \
import CanonicalConstraint, initial_constraints_as_canonical
def create_quadratic_function(n, m, rng):
a = rng.rand(m)
A = rng.rand(m, n)
H = rng.rand(m, n, n)
HT = np.transpose(H, (1, 2, 0))
def fun(x):
return a + A.dot(x) + 0.5 * H.dot(x).dot(x)
def jac(x):
return A + H.dot(x)
def hess(x, v):
return HT.dot(v)
return fun, jac, hess
def test_bounds_cases():
# Test 1: no constraints.
user_constraint = Bounds(-np.inf, np.inf)
x0 = np.array([-1, 2])
prepared_constraint = PreparedConstraint(user_constraint, x0, False)
c = CanonicalConstraint.from_PreparedConstraint(prepared_constraint)
assert_equal(c.n_eq, 0)
assert_equal(c.n_ineq, 0)
c_eq, c_ineq = c.fun(x0)
assert_array_equal(c_eq, [])
assert_array_equal(c_ineq, [])
J_eq, J_ineq = c.jac(x0)
assert_array_equal(J_eq, np.empty((0, 2)))
assert_array_equal(J_ineq, np.empty((0, 2)))
assert_array_equal(c.keep_feasible, [])
# Test 2: infinite lower bound.
user_constraint = Bounds(-np.inf, [0, np.inf, 1], [False, True, True])
x0 = np.array([-1, -2, -3], dtype=float)
prepared_constraint = PreparedConstraint(user_constraint, x0, False)
c = CanonicalConstraint.from_PreparedConstraint(prepared_constraint)
assert_equal(c.n_eq, 0)
assert_equal(c.n_ineq, 2)
c_eq, c_ineq = c.fun(x0)
assert_array_equal(c_eq, [])
assert_array_equal(c_ineq, [-1, -4])
J_eq, J_ineq = c.jac(x0)
assert_array_equal(J_eq, np.empty((0, 3)))
assert_array_equal(J_ineq, np.array([[1, 0, 0], [0, 0, 1]]))
assert_array_equal(c.keep_feasible, [False, True])
# Test 3: infinite upper bound.
user_constraint = Bounds([0, 1, -np.inf], np.inf, [True, False, True])
x0 = np.array([1, 2, 3], dtype=float)
prepared_constraint = PreparedConstraint(user_constraint, x0, False)
c = CanonicalConstraint.from_PreparedConstraint(prepared_constraint)
assert_equal(c.n_eq, 0)
assert_equal(c.n_ineq, 2)
c_eq, c_ineq = c.fun(x0)
assert_array_equal(c_eq, [])
assert_array_equal(c_ineq, [-1, -1])
J_eq, J_ineq = c.jac(x0)
assert_array_equal(J_eq, np.empty((0, 3)))
assert_array_equal(J_ineq, np.array([[-1, 0, 0], [0, -1, 0]]))
assert_array_equal(c.keep_feasible, [True, False])
# Test 4: interval constraint.
user_constraint = Bounds([-1, -np.inf, 2, 3], [1, np.inf, 10, 3],
[False, True, True, True])
x0 = np.array([0, 10, 8, 5])
prepared_constraint = PreparedConstraint(user_constraint, x0, False)
c = CanonicalConstraint.from_PreparedConstraint(prepared_constraint)
assert_equal(c.n_eq, 1)
assert_equal(c.n_ineq, 4)
c_eq, c_ineq = c.fun(x0)
assert_array_equal(c_eq, [2])
assert_array_equal(c_ineq, [-1, -2, -1, -6])
J_eq, J_ineq = c.jac(x0)
assert_array_equal(J_eq, [[0, 0, 0, 1]])
assert_array_equal(J_ineq, [[1, 0, 0, 0],
[0, 0, 1, 0],
[-1, 0, 0, 0],
[0, 0, -1, 0]])
assert_array_equal(c.keep_feasible, [False, True, False, True])
def test_nonlinear_constraint():
n = 3
m = 5
rng = np.random.RandomState(0)
x0 = rng.rand(n)
fun, jac, hess = create_quadratic_function(n, m, rng)
f = fun(x0)
J = jac(x0)
lb = [-10, 3, -np.inf, -np.inf, -5]
ub = [10, 3, np.inf, 3, np.inf]
user_constraint = NonlinearConstraint(
fun, lb, ub, jac, hess, [True, False, False, True, False])
for sparse_jacobian in [False, True]:
prepared_constraint = PreparedConstraint(user_constraint, x0,
sparse_jacobian)
c = CanonicalConstraint.from_PreparedConstraint(prepared_constraint)
assert_array_equal(c.n_eq, 1)
assert_array_equal(c.n_ineq, 4)
c_eq, c_ineq = c.fun(x0)
assert_array_equal(c_eq, [f[1] - lb[1]])
assert_array_equal(c_ineq, [f[3] - ub[3], lb[4] - f[4],
f[0] - ub[0], lb[0] - f[0]])
J_eq, J_ineq = c.jac(x0)
if sparse_jacobian:
J_eq = J_eq.toarray()
J_ineq = J_ineq.toarray()
assert_array_equal(J_eq, J[1, None])
assert_array_equal(J_ineq, np.vstack((J[3], -J[4], J[0], -J[0])))
v_eq = rng.rand(c.n_eq)
v_ineq = rng.rand(c.n_ineq)
v = np.zeros(m)
v[1] = v_eq[0]
v[3] = v_ineq[0]
v[4] = -v_ineq[1]
v[0] = v_ineq[2] - v_ineq[3]
assert_array_equal(c.hess(x0, v_eq, v_ineq), hess(x0, v))
assert_array_equal(c.keep_feasible, [True, False, True, True])
def test_concatenation():
rng = np.random.RandomState(0)
n = 4
x0 = np.random.rand(n)
f1 = x0
J1 = np.eye(n)
lb1 = [-1, -np.inf, -2, 3]
ub1 = [1, np.inf, np.inf, 3]
bounds = Bounds(lb1, ub1, [False, False, True, False])
fun, jac, hess = create_quadratic_function(n, 5, rng)
f2 = fun(x0)
J2 = jac(x0)
lb2 = [-10, 3, -np.inf, -np.inf, -5]
ub2 = [10, 3, np.inf, 5, np.inf]
nonlinear = NonlinearConstraint(
fun, lb2, ub2, jac, hess, [True, False, False, True, False])
for sparse_jacobian in [False, True]:
bounds_prepared = PreparedConstraint(bounds, x0, sparse_jacobian)
nonlinear_prepared = PreparedConstraint(nonlinear, x0, sparse_jacobian)
c1 = CanonicalConstraint.from_PreparedConstraint(bounds_prepared)
c2 = CanonicalConstraint.from_PreparedConstraint(nonlinear_prepared)
c = CanonicalConstraint.concatenate([c1, c2], sparse_jacobian)
assert_equal(c.n_eq, 2)
assert_equal(c.n_ineq, 7)
c_eq, c_ineq = c.fun(x0)
assert_array_equal(c_eq, [f1[3] - lb1[3], f2[1] - lb2[1]])
assert_array_equal(c_ineq, [lb1[2] - f1[2], f1[0] - ub1[0],
lb1[0] - f1[0], f2[3] - ub2[3],
lb2[4] - f2[4], f2[0] - ub2[0],
lb2[0] - f2[0]])
J_eq, J_ineq = c.jac(x0)
if sparse_jacobian:
J_eq = J_eq.toarray()
J_ineq = J_ineq.toarray()
assert_array_equal(J_eq, np.vstack((J1[3], J2[1])))
assert_array_equal(J_ineq, np.vstack((-J1[2], J1[0], -J1[0], J2[3],
-J2[4], J2[0], -J2[0])))
v_eq = rng.rand(c.n_eq)
v_ineq = rng.rand(c.n_ineq)
v = np.zeros(5)
v[1] = v_eq[1]
v[3] = v_ineq[3]
v[4] = -v_ineq[4]
v[0] = v_ineq[5] - v_ineq[6]
H = c.hess(x0, v_eq, v_ineq).dot(np.eye(n))
assert_array_equal(H, hess(x0, v))
assert_array_equal(c.keep_feasible,
[True, False, False, True, False, True, True])
def test_empty():
x = np.array([1, 2, 3])
c = CanonicalConstraint.empty(3)
assert_equal(c.n_eq, 0)
assert_equal(c.n_ineq, 0)
c_eq, c_ineq = c.fun(x)
assert_array_equal(c_eq, [])
assert_array_equal(c_ineq, [])
J_eq, J_ineq = c.jac(x)
assert_array_equal(J_eq, np.empty((0, 3)))
assert_array_equal(J_ineq, np.empty((0, 3)))
H = c.hess(x, None, None).toarray()
assert_array_equal(H, np.zeros((3, 3)))
def test_initial_constraints_as_canonical():
# rng is only used to generate the coefficients of the quadratic
# function that is used by the nonlinear constraint.
rng = np.random.RandomState(0)
x0 = np.array([0.5, 0.4, 0.3, 0.2])
n = len(x0)
lb1 = [-1, -np.inf, -2, 3]
ub1 = [1, np.inf, np.inf, 3]
bounds = Bounds(lb1, ub1, [False, False, True, False])
fun, jac, hess = create_quadratic_function(n, 5, rng)
lb2 = [-10, 3, -np.inf, -np.inf, -5]
ub2 = [10, 3, np.inf, 5, np.inf]
nonlinear = NonlinearConstraint(
fun, lb2, ub2, jac, hess, [True, False, False, True, False])
for sparse_jacobian in [False, True]:
bounds_prepared = PreparedConstraint(bounds, x0, sparse_jacobian)
nonlinear_prepared = PreparedConstraint(nonlinear, x0, sparse_jacobian)
f1 = bounds_prepared.fun.f
J1 = bounds_prepared.fun.J
f2 = nonlinear_prepared.fun.f
J2 = nonlinear_prepared.fun.J
c_eq, c_ineq, J_eq, J_ineq = initial_constraints_as_canonical(
n, [bounds_prepared, nonlinear_prepared], sparse_jacobian)
| assert_array_equal(c_eq, [f1[3] - lb1[3], f2[1] - lb2[1]]) | numpy.testing.assert_array_equal |
#!/usr/bin/env python
# encoding: utf-8
# General utility methods.
#
# https://github.com/stefanvanberkum/CD-ABSC
#
# Adapted from Trusca, Wassenberg, Frasincar and Dekker (2020).
# https://github.com/mtrusca/HAABSA_PLUS_PLUS
#
# <NAME>., <NAME>., <NAME>., <NAME>. (2020) A Hybrid Approach for Aspect-Based Sentiment Analysis Using
# Deep Contextual Word Embeddings and Hierarchical Attention. In: <NAME>., <NAME>., <NAME>. (eds) Web
# Engineering. ICWE 2020. Lecture Notes in Computer Science, vol 12128. Springer, Cham.
# https://doi.org/10.1007/978-3-030-50578-3_25
import numpy as np
from config import *
def batch_index(length, batch_size, n_iter=100, is_shuffle=True):
"""
Method obtained from Trusca et al. (2020), no original docstring provided.
:param length:
:param batch_size:
:param n_iter:
:param is_shuffle:
:return:
"""
index = list(range(length))
for j in range(n_iter):
if is_shuffle:
np.random.shuffle(index)
for i in range(int(length / batch_size) + (1 if length % batch_size else 0)):
yield index[i * batch_size:(i + 1) * batch_size]
def load_word_id_mapping(word_id_file, encoding='utf8'):
"""
Method obtained from Trusca et al. (2020), original docstring below.
:param word_id_file: word-id mapping file path
:param encoding: file's encoding, for changing to unicode
:return: word-id mapping, like hello=5
"""
word_to_id = dict()
for line in open(word_id_file):
line = line.decode(encoding, 'ignore').lower().split()
word_to_id[line[0]] = int(line[1])
print('\nload word-id mapping done!\n')
return word_to_id
def load_w2v(w2v_file, embedding_dim, is_skip=False):
"""
Method obtained from Trusca et al. (2020), no original docstring provided.
:param w2v_file:
:param embedding_dim:
:param is_skip:
:return:
"""
fp = open(w2v_file)
if is_skip:
fp.readline()
w2v = []
word_dict = dict()
# [0,0,...,0] represent absent words.
w2v.append([0.] * embedding_dim)
cnt = 0
for line in fp:
cnt += 1
line = line.split()
if len(line) != embedding_dim + 1:
print('a bad word embedding: {}'.format(line[0]))
continue
w2v.append([float(v) for v in line[1:]])
word_dict[line[0]] = cnt
w2v = np.asarray(w2v, dtype=np.float32)
w2v_sum = np.sum(w2v, axis=0, dtype=np.float32)
div = np.divide(w2v_sum, cnt, dtype=np.float32)
w2v = np.row_stack((w2v, div))
word_dict['$t$'] = (cnt + 1)
return word_dict, w2v
def change_y_to_onehot(y, pos_neu_neg=True):
"""
Method adapted from Trusca et al. (2020), no original docstring provided.
:param y: vector of polarities
:param pos_neu_neg: True if three possible polarities (positive, neutral and negative)
:return:
"""
from collections import Counter
count = Counter(y)
if FLAGS.writable == 1:
with open(FLAGS.results_file, "a") as results:
results.write("Positive: " + str(count['1']) + ", Neutral: " + str(
count['0']) + ", Negative: " + str(count['-1']) + ", Total: " + str(sum(count.values())) + "\n")
print("Polarity count:", count)
if pos_neu_neg:
class_set = {'1', '0', '-1'}
else:
class_set = set(y)
n_class = len(class_set)
y_onehot_mapping = dict(zip(class_set, range(n_class)))
print("Polarity mapping:", y_onehot_mapping)
onehot = []
for label in y:
tmp = [0] * n_class
tmp[y_onehot_mapping[label]] = 1
onehot.append(tmp)
return np.asarray(onehot, dtype=np.int32), y_onehot_mapping
def change_y_to_onehot_keep(y, y_onehot_mapping, pos_neu_neg=True):
"""
Method adapted from Trusca et al. (2020), no original docstring provided.
:param y: vector of polarities
:param y_onehot_mapping: one-hot mapping to keep
:param pos_neu_neg: True if three possible polarities (positive, neutral and negative)
:return:
"""
from collections import Counter
count = Counter(y)
if FLAGS.writable == 1:
with open(FLAGS.results_file, "a") as results:
results.write("Positive: " + str(count['1']) + ", Neutral: " + str(
count['0']) + ", Negative: " + str(count['-1']) + ", Total: " + str(sum(count.values())) + "\n")
print("Polarity count:", count)
if pos_neu_neg:
class_set = {'1', '0', '-1'}
else:
class_set = set(y)
n_class = len(class_set)
print("Polarity mapping:", y_onehot_mapping)
onehot = []
for label in y:
tmp = [0] * n_class
tmp[y_onehot_mapping[label]] = 1
onehot.append(tmp)
return np.asarray(onehot, dtype=np.int32), y_onehot_mapping
def load_inputs_twitter(input_file, word_id_file, sentence_len, type_='', is_r=True, target_len=10, encoding='utf8',
pos_neu_neg=True):
"""
Method adapted from Trusca et al. (2020), no original docstring provided.
:param input_file:
:param word_id_file:
:param sentence_len:
:param type_:
:param is_r:
:param target_len:
:param encoding:
:param pos_neu_neg: True if three possible polarities (positive, neutral and negative)
:return:
"""
if type(word_id_file) is str:
word_to_id = load_word_id_mapping(word_id_file)
else:
word_to_id = word_id_file
print('Load word-to-id done!')
x, y, sen_len = [], [], []
x_r, sen_len_r = [], []
target_words = []
tar_len = []
all_target, all_sent, all_y = [], [], []
lines = open(input_file).readlines()
for i in range(0, len(lines), 3):
# Targets.
words = lines[i + 1].lower().split()
target = words
target_word = []
for w in words:
if w in word_to_id:
target_word.append(word_to_id[w])
length = min(len(target_word), target_len)
tar_len.append(length)
target_words.append(target_word[:length] + [0] * (target_len - length))
# Sentiment.
y.append(lines[i + 2].strip().split()[0])
# Left and right context.
words = lines[i].lower().split()
sent = words
words_l, words_r = [], []
flag = True
for word in words:
if word == '$t$':
flag = False
continue
if flag:
if word in word_to_id:
words_l.append(word_to_id[word])
else:
if word in word_to_id:
words_r.append(word_to_id[word])
if type_ == 'TD' or type_ == 'TC':
words_l = words_l[:sentence_len]
words_r = words_r[:sentence_len]
sen_len.append(len(words_l))
x.append(words_l + [0] * (sentence_len - len(words_l)))
tmp = words_r
if is_r:
tmp.reverse()
sen_len_r.append(len(tmp))
x_r.append(tmp + [0] * (sentence_len - len(tmp)))
all_sent.append(sent)
all_target.append(target)
else:
words = words_l + target_word + words_r
words = words[:sentence_len]
sen_len.append(len(words))
x.append(words + [0] * (sentence_len - len(words)))
all_y = y
y, y_onehot_mapping = change_y_to_onehot(y, pos_neu_neg=pos_neu_neg)
if type_ == 'TD':
return np.asarray(x), np.asarray(sen_len), np.asarray(x_r), \
np.asarray(sen_len_r), np.asarray(y)
elif type_ == 'TC':
return np.asarray(x), np.asarray(sen_len), np.asarray(x_r), np.asarray(sen_len_r), \
np.asarray(y), np.asarray(target_words), np.asarray(tar_len), np.asarray(all_sent), np.asarray(
all_target), np.asarray(all_y), y_onehot_mapping
elif type_ == 'IAN':
return np.asarray(x), np.asarray(sen_len), np.asarray(target_words), \
np.asarray(tar_len), np.asarray(y)
else:
return np.asarray(x), np.asarray(sen_len), np.asarray(y)
def load_inputs_twitter_keep(input_file, y_onehot_mapping, word_id_file, sentence_len, type_='', is_r=True,
target_len=10, encoding='utf8', pos_neu_neg=True):
"""
Method adapted from Trusca et al. (2020), no original docstring provided.
:param input_file:
:param y_onehot_mapping: one-hot mapping to keep
:param word_id_file:
:param sentence_len:
:param type_:
:param is_r:
:param target_len:
:param encoding:
:param pos_neu_neg: True if three possible polarities (positive, neutral and negative)
:return:
"""
if type(word_id_file) is str:
word_to_id = load_word_id_mapping(word_id_file)
else:
word_to_id = word_id_file
print('Load word-to-id done!')
x, y, sen_len = [], [], []
x_r, sen_len_r = [], []
target_words = []
tar_len = []
all_target, all_sent, all_y = [], [], []
# Read in txt file.
lines = open(input_file).readlines()
for i in range(0, len(lines), 3):
# Targets.
words = lines[i + 1].lower().split()
target = words
target_word = []
for w in words:
if w in word_to_id:
target_word.append(word_to_id[w])
l = min(len(target_word), target_len)
tar_len.append(l)
target_words.append(target_word[:l] + [0] * (target_len - l))
# Sentiment.
y.append(lines[i + 2].strip().split()[0])
# Left and right context.
words = lines[i].lower().split()
sent = words
words_l, words_r = [], []
flag = True
for word in words:
if word == '$t$':
flag = False
continue
if flag:
if word in word_to_id:
words_l.append(word_to_id[word])
else:
if word in word_to_id:
words_r.append(word_to_id[word])
if type_ == 'TD' or type_ == 'TC':
words_l = words_l[:sentence_len]
words_r = words_r[:sentence_len]
sen_len.append(len(words_l))
x.append(words_l + [0] * (sentence_len - len(words_l)))
tmp = words_r
if is_r:
tmp.reverse()
sen_len_r.append(len(tmp))
x_r.append(tmp + [0] * (sentence_len - len(tmp)))
all_sent.append(sent)
all_target.append(target)
else:
words = words_l + target_word + words_r
words = words[:sentence_len]
sen_len.append(len(words))
x.append(words + [0] * (sentence_len - len(words)))
all_y = y
y, y_onehot_mapping = change_y_to_onehot_keep(y, y_onehot_mapping, pos_neu_neg=pos_neu_neg)
if type_ == 'TD':
return np.asarray(x), np.asarray(sen_len), | np.asarray(x_r) | numpy.asarray |
"""spec2nii module containing functions specific to interpreting Philips formats
Author: <NAME> <<EMAIL>>
Copyright (C) 2020 University of Oxford
"""
from datetime import datetime
from ast import literal_eval
import numpy as np
from spec2nii.nifti_orientation import NIFTIOrient, calc_affine
from spec2nii import nifti_mrs
from spec2nii import __version__ as spec2nii_ver
def read_sdat_spar_pair(sdat_file, spar_file, tag=None):
spar_params = read_spar(spar_file)
data = read_sdat(sdat_file,
spar_params['samples'],
spar_params['rows'])
if data.ndim < 4:
data = data.reshape((1, 1, 1) + data.shape)
# Move to right handed frame
data = data.conj()
# Dwelltime
dwelltime = 1.0 / float(spar_params["sample_frequency"])
# Meta
meta = spar_to_nmrs_hdrext(spar_params)
meta.set_standard_def('OriginalFile', [sdat_file.name])
if tag is not None:
meta.set_dim_info(0, tag)
# Orientation
if spar_params["volume_selection_enable"] == "yes":
affine = _philips_orientation(spar_params)
else:
# Use default affine
affine = np.diag(np.array([10000, 10000, 10000, 1]))
orientation = NIFTIOrient(affine)
return [nifti_mrs.NIfTI_MRS(data, orientation.Q44, dwelltime, meta), ]
def read_spar(filename):
'''Read the .spar file.
:param filename: file path
:return: dict of parameters read from spar file
:rtype: dict
'''
parameter_dict = {}
with open(filename, 'r') as f:
for line in f:
# ignore comments (!) and empty lines
if line == "\n" or line.startswith("!"):
continue
# Handle
key, value = map(str.strip, line.split(":", 1))
try:
val = literal_eval(value)
except (ValueError, SyntaxError):
if value == '':
val = None
else:
val = value
parameter_dict.update({key: val})
return parameter_dict
def read_sdat(filename, samples, rows):
'''Read the .sdat file.
:param filename: File path
:param samples: Number of spectral points
:param rows: Number of rows of data
'''
with open(filename, 'rb') as f:
raw = f.read()
floats = _vax_to_ieee_single_float(raw)
data_iter = iter(floats)
complex_iter = (complex(r, i) for r, i in zip(data_iter, data_iter))
raw_data = np.fromiter(complex_iter, "complex64")
raw_data = | np.reshape(raw_data, (rows, samples)) | numpy.reshape |
# -*- coding: utf-8 -*-
__all__ = ['location_change', 'sondetype', 'metadata']
def location_change(lon, lat, dim='time', ilon=None, ilat=None, as_event=True, distance_threshold=10, window=180,
dates=None, **kwargs):
""" Convert location change to breakpoint series
Args:
lon (DataArray): longitudes
lat (DataArray): latitudes
dim (str): datetime dimension
ilon (float): location longitude
ilat (float): location latitude
as_event (bool): calculate location change as event (distance_threshold)
distance_threshold (float): distance threshold for events
window (int): event influence in days
**kwargs:
Returns:
DataArray : distances between locations
Notes:
distance_threshold : degree 0.1 == 11.1 km, 0.01 == 1.1 km changes
"""
import numpy as np
from xarray import DataArray, full_like
from ..fun.cal import distance
# count occurence of each coordinate pair
# use the most common (also the most recent?)
# to estimate distance from,
# era-interim has a distance of about 80km so only if larger it would make sense to split?
if not isinstance(lon, DataArray):
raise ValueError('requires a DataArray', type(lon))
if not isinstance(lat, DataArray):
raise ValueError('requires a DataArray', type(lat))
lon = lon.copy()
lat = lat.copy()
lon = lon.bfill(dim)
lat = lat.bfill(dim)
dist = full_like(lon, 0, dtype=np.float)
dist.name = 'distance'
dist.attrs['units'] = 'km'
fdistance = np.vectorize(distance)
ishape = lon.values.shape
lon = lon.values.flatten()
lat = lat.values.flatten()
if ilon is None and ilat is None:
if lon.size > 1:
# distance between more recent and less recent
tmp = fdistance(lon[1:], lat[1:], lon[:-1], lat[:-1])
tmp = np.append(tmp, tmp[-1])
else:
tmp = | np.array([0]) | numpy.array |
"""
Copyright 2019 <NAME> <<EMAIL>>
This file is part of localreg.
localreg is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
localreg is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU Lesser General Public License
along with localreg. If not, see <http://www.gnu.org/licenses/>.
"""
# TODO
#
# One could consider making the kernels callable objects. These objects could
# then have a member function without if-testing, which is faster in case it
# is known that all datapoints are to be included. This is the case when
# frac!=None. It could also have a property for its width?
#
import numpy as np
import logging
logger = logging.getLogger("localreg")
logging.basicConfig()
def polyfit(x, y, x0, weights=None, degree=2):
if len(x) == 0:
return np.nan * np.ones_like(x0)
if weights is None:
weights = np.ones_like(x)
s = np.sqrt(weights)
X = x[:, None] ** np.arange(degree + 1)
X0 = x0[:, None] ** np.arange(degree + 1)
lhs = X * s[:, None]
rhs = y * s
# This is what NumPy uses for default from version 1.15 onwards,
# and what 1.14 uses when rcond=None. Computing it here ensures
# support for older versions of NumPy.
rcond = np.finfo(lhs.dtype).eps * max(*lhs.shape)
beta = np.linalg.lstsq(lhs, rhs, rcond=rcond)[0]
rslt = {"beta_fit": beta, "y_fit": X0.dot(beta)}
return rslt
def rectangular(t):
res = np.zeros_like(t)
ind = np.where(np.abs(t) <= 1)
res[ind] = 0.5
return res
def triangular(t):
res = np.zeros_like(t)
ind = np.where(np.abs(t) <= 1)
res[ind] = 1 - np.abs(t[ind])
return res
def epanechnikov(t):
res = np.zeros_like(t)
ind = np.where(np.abs(t) <= 1)
res[ind] = 0.75 * (1 - t[ind] ** 2)
return res
def biweight(t):
res = np.zeros_like(t)
ind = np.where(np.abs(t) <= 1)
res[ind] = (15 / 16) * (1 - t[ind] ** 2) ** 2
return res
def triweight(t):
res = np.zeros_like(t)
ind = np.where(np.abs(t) <= 1)
res[ind] = (35 / 32) * (1 - t[ind] ** 2) ** 3
return res
def tricube(t):
res = np.zeros_like(t)
ind = np.where(np.abs(t) <= 1)
res[ind] = (70 / 81) * (1 - np.abs(t[ind]) ** 3) ** 3
return res
def gaussian(t):
res = (1 / np.sqrt(2 * np.pi)) * np.exp(-0.5 * t ** 2)
return res
def cosine(t):
res = np.zeros_like(t)
ind = np.where(np.abs(t) <= 1)
res[ind] = (np.pi / 4) * np.cos(np.pi * t[ind] / 2)
return res
def logistic(t):
res = 1 / (np.exp(t) + 2 + | np.exp(-t) | numpy.exp |
import h5py
import numpy as np
from matplotlib import pyplot as plt, animation
from numpy import conj
from numpy.fft import fft, ifft, ifftshift
def animate(i):
print(f'On frame {i}')
# Loading wavefunction data
psi_plus = data_file['wavefunction/psi_plus'][:, i]
psi_0 = data_file['wavefunction/psi_0'][:, i]
psi_minus = data_file['wavefunction/psi_minus'][:, i]
# Calculate densities
n_plus = abs(psi_plus) ** 2
n_0 = abs(psi_0) ** 2
n_minus = abs(psi_minus) ** 2
n = abs(psi_plus) ** 2 + abs(psi_0) ** 2 + abs(psi_minus) ** 2
Q_xx = fft(np.real(conj(psi_plus) * psi_minus) - 0.5 * (n_plus + n_minus) + n / 3)
Q_yy = fft(-np.real(conj(psi_plus) * psi_minus) - 0.5 * (n_plus + n_minus) + n / 3)
Q_zz = fft(-n_0 + n / 3)
Q_xy = fft(np.imag(conj(psi_plus) * psi_minus))
Q_xz = fft(-np.sqrt(2.) / 4 * (psi_0 * ( | conj(psi_minus - psi_minus) | numpy.conj |
# -*- coding: utf-8 -*-
#
# Copyright (c) 2020, the cclib development team
#
# This file is part of cclib (http://cclib.github.io) and is distributed under
# the terms of the BSD 3-Clause License.
"""Calculation of DDEC charges based on data parsed by cclib."""
import copy
import random
import numpy
import logging
import math
import os
import sys
from cclib.method.calculationmethod import Method
from cclib.method.volume import electrondensity_spin
from cclib.parser.utils import convertor
from cclib.parser.utils import find_package
from typing import List
class MissingInputError(Exception):
pass
class DDEC6(Method):
"""DDEC6 charges."""
# All of these are required for DDEC6 charges.
required_attrs = ("homos", "mocoeffs", "nbasis", "gbasis")
def __init__(
self, data, volume, proatom_path=None, progress=None, loglevel=logging.INFO, logname="Log"
):
# Inputs are:
# data -- ccData object that describe target molecule.
# volume -- Volume object that describe target Cartesian grid.
# proatom_path -- path to proatom densities
# (directory containing atoms.h5 in horton or c2_001_001_000_400_075.txt in chargemol)
super(DDEC6, self).__init__(data, progress, loglevel, logname)
self.volume = volume
self.fragresults = None
self.proatom_path = proatom_path
if numpy.sum(self.data.coreelectrons) != 0:
# TODO: Pseudopotentials should be added back
pass
# Check whether proatom_path is a valid directory or not.
assert os.path.isdir(
proatom_path
), "Directory that contains proatom densities should be added as an input."
# Read in reference charges.
self.proatom_density = []
self.radial_grid_r = []
for atom_number in self.data.atomnos:
density, r = self._read_proatom(proatom_path, atom_number, 0)
self.proatom_density.append(density)
self.radial_grid_r.append(r)
def __str__(self):
"""Return a string representation of the object."""
return "DDEC6 charges of {}".format(self.data)
def __repr__(self):
"""Return a representation of the object."""
return "DDEC6({})".format(self.data)
def _check_required_attributes(self):
super(DDEC6, self)._check_required_attributes()
def _cartesian_dist(self, pt1, pt2):
""" Small utility function that calculates Euclidian distance between two points
pt1 and pt2 are numpy arrays representing a point in Cartesian coordinates. """
return numpy.sqrt(numpy.dot(pt1 - pt2, pt1 - pt2))
def _read_proatom(
self, directory, atom_num, charge # type = str # type = int # type = float
):
# type: (...) -> numpy.ndarray, numpy.ndarray
"""Return a list containing proatom reference densities."""
# TODO: Treat calculations with psuedopotentials
# TODO: Modify so that proatom densities are read only once for horton
# [https://github.com/cclib/cclib/pull/914#discussion_r464039991]
# File name format:
# ** Chargemol **
# c2_[atom number]_[nuclear charge]_[electron count]_[cutoff radius]_[# shells]
# ** Horton **
# atoms.h5
# File format:
# Starting from line 13, each line contains the charge densities for each shell
# If `charge` is not an integer, proatom densities have to be linearly interpolated between
# the densities of the ion/atom with floor(charge) and ceiling(charge)
charge_floor = int(math.floor(charge))
charge_ceil = int(math.ceil(charge))
chargemol_path_floor = os.path.join(
directory,
"c2_{:03d}_{:03d}_{:03d}_500_100.txt".format(
atom_num, atom_num, atom_num - charge_floor
),
)
chargemol_path_ceil = os.path.join(
directory,
"c2_{:03d}_{:03d}_{:03d}_500_100.txt".format(
atom_num, atom_num, atom_num - charge_ceil
),
)
horton_path = os.path.join(directory, "atoms.h5")
if os.path.isfile(chargemol_path_floor) or os.path.isfile(chargemol_path_ceil):
# Use chargemol proatom densities
# Each shell is .05 angstroms apart (uniform).
# *scalefactor* = 10.58354497764173 bohrs in module_global_parameter.f08
if atom_num <= charge_floor:
density_floor = numpy.array([0])
else:
density_floor = numpy.loadtxt(chargemol_path_floor, skiprows=12, dtype=float)
if atom_num >= charge_ceil:
density_ceil = numpy.array([0])
else:
density_ceil = numpy.loadtxt(chargemol_path_ceil, skiprows=12, dtype=float)
density = (charge_ceil - charge) * density_floor + (
charge - charge_floor
) * density_ceil
radiusgrid = numpy.arange(1, len(density) + 1) * 0.05
elif os.path.isfile(horton_path):
# Use horton proatom densities
assert find_package("h5py"), "h5py is needed to read in proatom densities from horton."
import h5py
with h5py.File(horton_path, "r") as proatomdb:
if atom_num <= charge_floor:
density_floor = numpy.array([0])
radiusgrid = numpy.array([0])
else:
keystring_floor = "Z={}_Q={:+d}".format(atom_num, charge_floor)
density_floor = numpy.asanyarray(list(proatomdb[keystring_floor]["rho"]))
# gridspec is specification of integration grid for proatom densities in horton.
# Example -- ['PowerRTransform', '1.1774580743206259e-07', '20.140888089596444', '41']
# is constructed using PowerRTransform grid
# with rmin = 1.1774580743206259e-07
# rmax = 20.140888089596444
# and ngrid = 41
# PowerRTransform is default in horton-atomdb.py.
gridtype, gridmin, gridmax, gridn = (
proatomdb[keystring_floor].attrs["rtransform"].split()
)
gridmin = convertor(float(gridmin), "bohr", "Angstrom")
gridmax = convertor(float(gridmax), "bohr", "Angstrom")
gridn = int(gridn)
# Convert byte to string in Python3
if sys.version[0] == "3":
gridtype = gridtype.decode("UTF-8")
# First verify that it is one of recognized grids
assert gridtype in [
"LinearRTransform",
"ExpRTransform",
"PowerRTransform",
], "Grid type not recognized."
if gridtype == "LinearRTransform":
# Linear transformation. r(t) = rmin + t*(rmax - rmin)/(npoint - 1)
gridcoeff = (gridmax - gridmin) / (gridn - 1)
radiusgrid = gridmin + numpy.arange(1, gridn + 1) * gridcoeff
elif gridtype == "ExpRTransform":
# Exponential transformation. r(t) = rmin*exp(t*log(rmax/rmin)/(npoint - 1))
gridcoeff = math.log(gridmax / gridmin) / (gridn - 1)
radiusgrid = gridmin * numpy.exp(numpy.arange(1, gridn + 1) * gridcoeff)
elif gridtype == "PowerRTransform":
# Power transformation. r(t) = rmin*t^power
# with power = log(rmax/rmin)/log(npoint)
gridcoeff = math.log(gridmax / gridmin) / math.log(gridn)
radiusgrid = gridmin * numpy.power(numpy.arange(1, gridn + 1), gridcoeff)
if atom_num <= charge_ceil:
density_ceil = | numpy.array([0]) | numpy.array |
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
import mindspore as ms
import mindspore.nn as nn
from mindspore import Tensor
from mindspore import context
from mindspore.common.api import _cell_graph_executor
from mindspore.common.parameter import Parameter
from mindspore.ops import composite as C
from mindspore.ops import operations as P
from tests.ut.python.ops.test_math_ops import VirtualLoss
grad_all = C.GradOperation(get_all=True)
class NetWithLoss(nn.Cell):
def __init__(self, network):
super(NetWithLoss, self).__init__()
self.loss = VirtualLoss()
self.network = network
def construct(self, x):
predict = self.network(x)
return self.loss(predict)
class GradWrap(nn.Cell):
def __init__(self, network):
super(GradWrap, self).__init__()
self.network = network
def construct(self, x):
return grad_all(self.network)(x)
class NetWithLossTwoInput(nn.Cell):
def __init__(self, network):
super(NetWithLossTwoInput, self).__init__()
self.loss = VirtualLoss()
self.network = network
def construct(self, x, y):
predict = self.network(x, y)
return self.loss(predict)
class NetWithReduceLoss(nn.Cell):
def __init__(self, network):
super(NetWithReduceLoss, self).__init__()
self.mean = P.ReduceMean(keep_dims=False)
self.network = network
def construct(self, x, y):
predict = self.network(x, y)
return self.mean(predict, ())
class GradWrapTwoInput(nn.Cell):
def __init__(self, network):
super(GradWrapTwoInput, self).__init__()
self.network = network
def construct(self, x, y):
return grad_all(self.network)(x, y)
def compile_graph(net, parallel_mode, device_num, x):
context.set_auto_parallel_context(device_num=device_num, global_rank=0, parallel_mode=parallel_mode)
net.set_auto_parallel()
net.set_train()
_cell_graph_executor.compile(net, x)
def compile_graph_two_input(net, parallel_mode, device_num, x, y):
context.set_auto_parallel_context(device_num=device_num, global_rank=0, parallel_mode=parallel_mode)
net.set_auto_parallel()
net.set_train()
_cell_graph_executor.compile(net, x, y)
def test_reshape_matmul():
"""
Feature: distribute operator reshape in auto parallel.
Description: reshape - matmul net in auto parallel.
Expectation: compile done without error.
"""
class Net(nn.Cell):
def __init__(self):
super().__init__()
self.reshape = P.Reshape()
self.matmul = P.MatMul()
self.matmul_weight = Parameter(Tensor(np.ones([28, 64]), dtype=ms.float32), name="weight")
def construct(self, x):
out = self.reshape(x, (64, 28))
out = self.matmul(out, self.matmul_weight)
return out
size = 8
x = Tensor(np.ones([8 * size, 28, 1, 1]), dtype=ms.float32)
net = GradWrap(NetWithLoss(Net()))
compile_graph(net, "auto_parallel", size, x)
def test_reshape_reshape():
"""
Feature: distribute operator reshape in auto parallel.
Description: reshape - reshape net in auto parallel.
Expectation: compile done without error.
"""
class Net(nn.Cell):
def __init__(self):
super().__init__()
self.reshape = P.Reshape()
self.relu = P.ReLU()
def construct(self, x):
x = self.relu(x)
out = self.reshape(x, (64, 28))
out = self.reshape(out, (64, 28, 1))
return out
size = 8
x = Tensor(np.ones([8 * size, 28, 1, 1]), dtype=ms.float32)
net = GradWrap(NetWithLoss(Net()))
compile_graph(net, "auto_parallel", size, x)
def test_reshape_auto_1():
"""
Feature: distribute operator reshape in auto parallel.
Description: relu - reshape - matmul net in auto parallel.
Expectation: compile done without error.
"""
class Net(nn.Cell):
def __init__(self):
super().__init__()
self.relu = P.ReLU()
self.reshape = P.Reshape()
self.matmul = P.MatMul()
self.matmul_weight = Parameter(Tensor(np.ones([28, 64]), dtype=ms.float32), name="weight")
def construct(self, x):
out = self.relu(x)
out = self.reshape(out, (64, 28))
out = self.matmul(out, self.matmul_weight)
return out
size = 8
x = Tensor(np.ones([8 * size, 28, 1, 1]), dtype=ms.float32)
net = GradWrap(NetWithLoss(Net()))
compile_graph(net, "auto_parallel", size, x)
def test_reshape_auto_2():
"""
Feature: distribute operator reshape in auto parallel.
Description: reshape - matmul -reshape net in auto parallel.
Expectation: compile done without error.
"""
class Net(nn.Cell):
def __init__(self):
super().__init__()
self.relu = P.ReLU()
self.reshape = P.Reshape()
self.matmul = P.MatMul()
self.add_weight = Parameter(Tensor(np.ones([128, 32]), dtype=ms.float32), name="weight1")
self.matmul_weight = Parameter(Tensor(np.ones([28, 64]), dtype=ms.float32), name="weight")
def construct(self, x):
out = self.relu(x)
out = self.reshape(out, (64, 28))
out = self.matmul(out, self.matmul_weight)
out = self.reshape(out, (128, 32))
out = out + self.add_weight
return out
size = 8
x = Tensor( | np.ones([8 * size, 28, 1, 1]) | numpy.ones |
from typing import Tuple
import numpy as np
import pickle
from tqdm.std import tqdm
from QL.QLearning import QLearning
from SetteEMezzoGame import SetteEMezzo
ALPHA = 0.9
class SetteEMezzoQL(SetteEMezzo, QLearning):
def __init__(self, n_episodes, eps_start=0.3, lr=0.001, policy=(-1, -1)) -> None:
QLearning.__init__(self, n_episodes, eps_start, lr)
SetteEMezzo.__init__(self)
self.Q_values = self.init_q_values()
self.state_action = []
self.done = False
self.policy = policy
def init_q_values(self):
q_values = {}
for cards_value in np.arange(0.5, 8.0, 0.5):
for bust_prob in range(0, 105, 5):
q_values[(cards_value, bust_prob)] = {}
for action in self.actions:
q_values[(cards_value, bust_prob)][action] = 0
return q_values
def get_action(self, cards_value_bust_prob: Tuple[float, float]):
if | np.random.uniform(0.1) | numpy.random.uniform |
import os
import sys
import subprocess
import time
import importlib
import numpy as np
import fitsio
from pyrecon.iterative_fft_particle import OriginalIterativeFFTParticleReconstruction, IterativeFFTParticleReconstruction
from pyrecon.utils import distance
from test_multigrid import get_random_catalog
def test_no_nrandoms():
boxsize = 1000.
data = get_random_catalog(boxsize=boxsize,seed=42)
recon = IterativeFFTParticleReconstruction(f=0.8,bias=2.,los='x',nthreads=4,boxcenter=boxsize/2.,boxsize=boxsize,nmesh=8,dtype='f8')
recon.assign_data(data['Position'],data['Weight'])
assert not recon.has_randoms
recon.set_density_contrast()
assert np.allclose(np.mean(recon.mesh_delta), 0.)
recon.run()
assert np.all(np.abs(recon.read_shifts(data['Position'])) < 5.)
for name in ['boxsize', 'boxcenter', 'offset', 'cellsize']:
assert np.allclose(getattr(recon, name), getattr(recon.info, name))
def test_dtype():
data = get_random_catalog(seed=42)
randoms = get_random_catalog(seed=84)
for los in [None, 'x']:
recon_f4 = IterativeFFTParticleReconstruction(f=0.8,bias=2.,nthreads=4,positions=randoms['Position'],nmesh=64,los=los,dtype='f4')
recon_f4.assign_data(data['Position'],data['Weight'])
recon_f4.assign_randoms(randoms['Position'],randoms['Weight'])
recon_f4.set_density_contrast()
assert recon_f4.mesh_delta.dtype.itemsize == 4
recon_f4.run()
assert recon_f4.mesh_psi[0].dtype.itemsize == 4
shifts_f4 = recon_f4.read_shifts(data['Position'].astype('f8'),field='disp+rsd')
assert shifts_f4.dtype.itemsize == 8
shifts_f4 = recon_f4.read_shifts(data['Position'].astype('f4'),field='disp+rsd')
assert shifts_f4.dtype.itemsize == 4
recon_f8 = IterativeFFTParticleReconstruction(f=0.8,bias=2.,nthreads=4,positions=randoms['Position'],nmesh=64,los=los,dtype='f8')
recon_f8.assign_data(data['Position'],data['Weight'])
recon_f8.assign_randoms(randoms['Position'],randoms['Weight'])
recon_f8.set_density_contrast()
assert recon_f8.mesh_delta.dtype.itemsize == 8
recon_f8.run()
assert recon_f8.mesh_psi[0].dtype.itemsize == 8
shifts_f8 = recon_f8.read_shifts(data['Position'],field='disp+rsd')
assert shifts_f8.dtype.itemsize == 8
assert not np.all(shifts_f4 == shifts_f8)
assert np.allclose(shifts_f4, shifts_f8, atol=1e-2, rtol=1e-2)
def test_mem():
data = get_random_catalog(seed=42)
randoms = get_random_catalog(seed=84)
from pyrecon.utils import MemoryMonitor
with MemoryMonitor() as mem:
recon = IterativeFFTParticleReconstruction(f=0.8,bias=2.,nthreads=4,positions=randoms['Position'],nmesh=256,dtype='f8')
mem('init')
recon.assign_data(data['Position'],data['Weight'])
mem('data')
recon.assign_randoms(randoms['Position'],randoms['Weight'])
mem('randoms')
recon.set_density_contrast()
mem('delta')
recon.run()
mem('recon') # 3 meshes
def test_wisdom():
def remove(fn):
try: os.remove(fn)
except OSError: pass
default_wisdom_fn = 'wisdom.shape-64-64-64.type-complex128.nthreads-1.npy'
remove(default_wisdom_fn)
recon = IterativeFFTParticleReconstruction(f=0.8, bias=2, los='z', boxsize=1000, boxcenter=500, nmesh=64, fft_engine='fftw', fft_plan='measure', nthreads=1)
# Wisdom created and accessible
assert getattr(recon, 'fft_wisdom', None)
assert not os.path.isfile(default_wisdom_fn)
recon = IterativeFFTParticleReconstruction(f=0.8, bias=2, los='z', boxsize=1000, boxcenter=500, nmesh=64, fft_engine='fftw', fft_plan='measure', save_fft_wisdom=True, nthreads=1)
# Wisdom created and accessible
# Wisdom was written to default wisdom file
assert os.path.isfile(default_wisdom_fn)
new_wisdom_fn = 'new_wisdomfile.npy'
remove(new_wisdom_fn)
recon = IterativeFFTParticleReconstruction(f=0.8, bias=2, los='z', boxsize=1000, boxcenter=500, nmesh=64, fft_engine='fftw', fft_plan='measure', save_fft_wisdom=new_wisdom_fn, nthreads=1)
# Wisdom written to custom file
assert os.path.isfile(new_wisdom_fn)
# Wisdom written to both files is the same
assert tuple(np.load(default_wisdom_fn)) == tuple(np.load(new_wisdom_fn))
remove(default_wisdom_fn)
remove(new_wisdom_fn)
def test_iterative_fft_particle_wrap():
size = 100000
boxsize = 1000
for origin in [-500, 0, 500]:
boxcenter = boxsize/2 + origin
data = get_random_catalog(size, boxsize, seed=42)
# set one of the data positions to be outside the fiducial box by hand
data['Position'][-1] = np.array([boxsize, boxsize, boxsize]) + 1
data['Position'] += boxcenter
randoms = get_random_catalog(size, boxsize, seed=42)
# set one of the random positions to be outside the fiducial box by hand
randoms['Position'][-1] = np.array([0, 0, 0]) - 1
randoms['Position'] += boxcenter
recon = IterativeFFTParticleReconstruction(f=0.8, bias=2, los='z', boxsize=boxsize, boxcenter=boxcenter, nmesh=64, wrap=True)
# following steps should run without error if wrapping is correctly implemented
recon.assign_data(data['Position'],data['Weight'])
recon.assign_randoms(randoms['Position'],randoms['Weight'])
recon.set_density_contrast()
recon.run()
# following steps test the implementation coded into standalone pyrecon code
for field in ['rsd', 'disp', 'disp+rsd']:
shifts = recon.read_shifts('data', field=field)
diff = data['Position'] - shifts
positions_rec = (diff - recon.offset) % recon.boxsize + recon.offset
assert | np.all(positions_rec <= origin + boxsize) | numpy.all |
#utils
import os
import random
import numpy as np
from shutil import copyfile
import matplotlib.pyplot as plt
import matplotlib.pyplot as plt # plt 用于显示图片
from PIL import Image
import cv2 as cv
# TensorFlow and tf.keras
import tensorflow as tf
from tensorflow.keras import Model
from tensorflow.keras.optimizers import RMSprop
from tensorflow import keras
from tensorflow.compat.v1.keras import backend as K
from keras.layers import Input, Lambda
from tensorflow.keras import layers
from tensorflow.keras.preprocessing.image import ImageDataGenerator
from tensorflow.keras.applications.inception_v3 import InceptionV3
from tensorflow.keras.models import load_model, Model
#yolo
from yolo import YOLO, detect_video
import yolo_utils
from tensorflow.keras.preprocessing import image
from yolo3.model import yolo_head, yolo_correct_boxes, preprocess_true_boxes, yolo_loss, yolo_body
import os
from yolo3.utils import get_random_data
current_path = os.path.dirname(__file__)
def yolo_non_max_suppression(scores, boxes, classes, max_boxes=10, iou_threshold=0.5):
"""
为锚框实现非最大值抑制( Non-max suppression (NMS))
参数:
scores - tensor类型,维度为(None,),yolo_filter_boxes()的输出
boxes - tensor类型,维度为(None,4),yolo_filter_boxes()的输出,已缩放到图像大小(见下文)
classes - tensor类型,维度为(None,),yolo_filter_boxes()的输出
max_boxes - 整数,预测的锚框数量的最大值
iou_threshold - 实数,交并比阈值。
返回:
scores - tensor类型,维度为(,None),每个锚框的预测的可能值
boxes - tensor类型,维度为(4,None),预测的锚框的坐标
classes - tensor类型,维度为(,None),每个锚框的预测的分类
注意:"None"是明显小于max_boxes的,这个函数也会改变scores、boxes、classes的维度,这会为下一步操作提供方便。
"""
# max_boxes_tensor = K.variable(max_boxes,dtype="int32") #用于tf.image.non_max_suppression()
# # K.get_session().run(K.variable([max_boxes_tensor])) #初始化变量max_boxes_tensor
#使用使用tf.image.non_max_suppression()来获取与我们保留的框相对应的索引列表
nms_indices = tf.image.non_max_suppression(boxes, scores,max_boxes,iou_threshold)
#使用K.gather()来选择保留的锚框
scores = K.gather(scores, nms_indices)
boxes = K.gather(boxes, nms_indices)
classes = K.gather(classes, nms_indices)
return scores, boxes, classes
def yolo_filter_boxes(box_confidence , boxes, box_class_probs, threshold = 0.3):
"""
通过阈值来过滤对象和分类的置信度。
参数:
box_confidence - tensor类型,维度为(19,19,5,1),包含19x19单元格中每个单元格预测的5个锚框中的所有的锚框的pc (一些对象的置信概率)。
boxes - tensor类型,维度为(19,19,5,4),包含了所有的锚框的(px,py,ph,pw )。
box_class_probs - tensor类型,维度为(19,19,5,80),包含了所有单元格中所有锚框的所有对象( c1,c2,c3,···,c80 )检测的概率。
threshold - 实数,阈值,如果分类预测的概率高于它,那么这个分类预测的概率就会被保留。
返回:
scores - tensor 类型,维度为(None,),包含了保留了的锚框的分类概率。
boxes - tensor 类型,维度为(None,4),包含了保留了的锚框的(b_x, b_y, b_h, b_w)
classess - tensor 类型,维度为(None,),包含了保留了的锚框的索引
注意:"None"是因为你不知道所选框的确切数量,因为它取决于阈值。
比如:如果有10个锚框,scores的实际输出大小将是(10,)
"""
#第一步:计算锚框的得分
box_scores = box_confidence * box_class_probs
#第二步:找到最大值的锚框的索引以及对应的最大值的锚框的分数
box_classes = K.argmax(box_scores, axis=-1)#(19*19*5*1)
box_class_scores = K.max(box_scores, axis=-1)#找到最可能的类,是将最后一个维度进行展开(19*19*5*80)得到(19*19*5*1)
#第三步:根据阈值创建掩码
filtering_mask = (box_class_scores >= threshold)
#对scores, boxes 以及 classes使用掩码
scores = tf.boolean_mask(box_class_scores,filtering_mask)
boxes = tf.boolean_mask(boxes,filtering_mask)
classes = tf.boolean_mask(box_classes,filtering_mask)
return scores , boxes , classes
def yolo_boxes_to_corners(box_xy, box_wh):
"""Convert YOLO box predictions to bounding box corners."""
box_mins = box_xy - (box_wh / 2.)
box_maxes = box_xy + (box_wh / 2.)
return K.concatenate([
box_mins[..., 1:2], # y_min
box_mins[..., 0:1], # x_min
box_maxes[..., 1:2], # y_max
box_maxes[..., 0:1] # x_max
])
def yolo_eval(box_xy, box_wh, box_confidence, box_class_probs,image_shape,
max_boxes=15, score_threshold=0.2,iou_threshold=0.4):
"""
将YOLO编码的输出(很多锚框)转换为预测框以及它们的分数,框坐标和类。
参数:
yolo_outputs - 编码模型的输出(对于维度为(608,608,3)的图片),包含4个tensors类型的变量:
box_confidence : tensor类型,维度为(None, 19, 19, 5, 1)
box_xy : tensor类型,维度为(None, 19, 19, 5, 2)
box_wh : tensor类型,维度为(None, 19, 19, 5, 2)
box_class_probs: tensor类型,维度为(None, 19, 19, 5, 80)
image_shape - tensor类型,维度为(2,),包含了输入的图像的维度,这里是(608.,608.)
max_boxes - 整数,预测的锚框数量的最大值
score_threshold - 实数,可能性阈值。
iou_threshold - 实数,交并比阈值。
返回:
scores - tensor类型,维度为(,None),每个锚框的预测的可能值
boxes - tensor类型,维度为(4,None),预测的锚框的坐标
classes - tensor类型,维度为(,None),每个锚框的预测的分类
"""
#获取YOLO模型的输出
# image_input = Input(shape=(416,416, 3))
# print("box_xy, box_wh :")
# print(box_xy, box_wh)
#中心点转换为边角
boxes = yolo_boxes_to_corners(box_xy,box_wh)
#可信度分值过滤
scores, boxes, classes = yolo_filter_boxes(box_confidence, boxes, box_class_probs, score_threshold)
#缩放锚框,以适应原始图像
boxes = yolo_utils.scale_boxes(boxes, image_shape)
# #使用非最大值抑制
# scores, boxes, classes = yolo_non_max_suppression(scores, boxes, classes, max_boxes, iou_threshold)
return scores, boxes, classes
def pridect(origin_img,images,model):
result=model.predict(images,batch_size=1)
# print(result[0].shape)
# print(result[1].shape)
# print(result[2].shape)
anchor_mask = [[6,7,8], [3,4,5], [0,1,2]]
# num_layers = len(anchors)//3 # default setting
# yolo_outputs = args[:num_layers]
# y_true = args[num_layers:]
# input_shape = K.cast(K.shape( result[0])[1:3] * 32, K.dtype(y_true[0]))
# print(K.shape(result[0])[1:3] * 32)
input_shape =K.shape(result[0])[1:3] * 32
colors = yolo_utils.generate_colors(class_names)
box_xy, box_wh, box_confidence, box_class_probs = yolo_head(result[0], anchors[anchor_mask[0]], 20,input_shape)
scores, boxes, classes=yolo_eval(box_xy, box_wh, box_confidence, box_class_probs,image_shape=(float(origin_img.size[1]),float(origin_img.size[0])))
for i in range(1,3):
box_xy, box_wh, box_confidence, box_class_probs = yolo_head(result[i], anchors[anchor_mask[i]], 20,input_shape)
tmp_scores, tmp_boxes, tmp_classes=yolo_eval(box_xy, box_wh, box_confidence, box_class_probs,image_shape=(float(origin_img.size[1]),float(origin_img.size[0])))
scores=tf.concat([scores,tmp_scores],axis=0)
boxes=tf.concat([boxes,tmp_boxes],axis=0)
classes=tf.concat([ classes, tmp_classes],axis=0)
# yolo_utils.draw_boxes(origin_img, scores, boxes, classes, class_names, colors)
#使用非最大值抑制
scores, boxes, classes = yolo_non_max_suppression(scores, boxes, classes, 15, 0.4)
yolo_utils.draw_boxes(origin_img, scores, boxes, classes, class_names, colors)
# print("scores.shape = " + str(scores.shape))
# print("boxes.shape = " + str(boxes.shape))
# print("classes.shape = " + str(classes.shape))
return origin_img
def camera_test(yolo_model):
# 0是代表摄像头编号,只有一个的话默认为0
url = 'http://192.168.2.106:4747/video?640x480'
capture =cv.VideoCapture(0)
capture.set(cv.CAP_PROP_FRAME_WIDTH, 1920)
capture.set(cv.CAP_PROP_FRAME_HEIGHT,1080)
fourcc = cv.VideoWriter_fourcc(*'XVID')
out = cv.VideoWriter('road_Test_output.MP4',-1,25, (1920,1080))
# capture.set(cv.CAP_PROP_FPS,30)
while (True):
ref, frame = capture.read()
# frame = frame[:,::-1,:]
img = cv.resize(frame, (int(416), int(416)), interpolation=cv.INTER_NEAREST)
#cv2 frame to image
img = Image.fromarray(np.uint8(img))
x = image.img_to_array(img)/255.0
x = np.expand_dims(x, axis=0)
dec_image = np.vstack([x])
images=Image.fromarray( | np.uint8(frame) | numpy.uint8 |
from mpi4py import MPI
import numpy as np
import h5py
import matplotlib as mpl
mpl.use('Agg')
import matplotlib.pyplot as plt
plt.ioff()
comm = MPI.COMM_WORLD
size = comm.Get_size()
rank = comm.Get_rank()
# %% Set up grids, used for certain calculations
nx = 2048
k1 = np.fft.fftfreq(nx, d=1.0/nx)
k2 = np.fft.rfftfreq(nx, d=1.0/nx)
k2g, k1g = np.meshgrid(k2,k1)
lap = k2g**2 + k1g**2
#kd = (k2g>0)*(14.0**2)
kd = 0.0
k2kd = lap+kd
invlap = np.zeros(lap.shape)
invlap[k2kd>0] = -1.0 / k2kd[k2kd>0]
sqk = np.sqrt(-invlap)
# Whether to use energy norm or enstrophy norm
energyNorm = True
compute_pods = True
compute_dmd_modes = False
# Total number of snapshots to use in the POD (need +1 for POD)
totalsnaps = 256
snaps_per_rank = totalsnaps//size
times = | np.empty(totalsnaps) | numpy.empty |
import numpy as np
from datetime import datetime
import cv2
import os
from PIL import Image
import torch
import torchvision
from torchvision import datasets, transforms, models
from dataset import Asbest_segmentation
from tqdm import tqdm
import matplotlib.pyplot as plt
import rawpy
from utils import parse_anno_file, create_mask_file, big_image_predict, AverageMeter
from apex import amp
lr = 1e-5
os.environ['CUDA_VISIBLE_DEVICES'] = '1'
path_to_data = 'asbest'
anno_stones = parse_anno_file(os.path.join(path_to_data, 'images', 'annotation.xml'))
anno_tr_stones = parse_anno_file(os.path.join(path_to_data, 'tr_stones', 'annotation.xml'))
transporter_file = os.path.join('asbest', 'transporter', '2020.03.16', 'TRANS_11:28:05_16-03-2020_36.png')
img_tr_stones_shape = (int(anno_tr_stones[0]['height']), int(anno_tr_stones[0]['width']))
stones_valid_indexes = | np.array([3, 7, 12, 15, 20, 30, 40], dtype=int) | numpy.array |
#!python3
import os
import argparse
import pandas as pd
import numpy as np
from glob import glob
from plot_module import *
def replace_grec(s):
return s.replace("\\alpha", "\\Delta G_{\\mathrm{min}}").replace("\\gamma", "\\Delta \\Delta G")
nbr_points = 100
if __name__ == '__main__':
parser = argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
parser.add_argument('-a', '--age', required=True, type=float, dest="age")
parser.add_argument('-b', '--branches', required=True, type=int, dest="branches")
parser.add_argument('-o', '--output', required=True, type=str, dest="output")
parser.add_argument("--y_param_key", required=False, action='append', type=lambda kv: kv.split(":"),
dest='y_param_dict')
parser.add_argument('-i', '--input', required=True, type=str, nargs='+', dest="input")
args = parser.parse_args()
args.y_param_dict = dict(args.y_param_dict) if (args.y_param_dict is not None) else dict()
fig = plt.figure(figsize=(1920 / my_dpi, 1080 / my_dpi), dpi=my_dpi)
t_range = np.linspace(0, args.branches * args.age, nbr_points * args.branches)
y_param = "<dN/dN0>"
for prefix in args.input:
print(prefix)
array = []
for tsv_path in sorted(glob("{0}/*.substitutions.tsv".format(prefix))):
df = pd.read_csv(tsv_path, sep='\t', usecols=["NodeName", "AbsoluteStartTime", "EndTime", y_param])
t_sample = np.searchsorted(df["AbsoluteStartTime"].values, t_range, side="right") - 1
array.append(df[y_param].values[t_sample])
n = prefix.split("/")[-1]
label = args.y_param_dict[n] if (n in args.y_param_dict) else "n={0}".format(n)
plt.plot(t_range, np.mean(array, axis=0), linewidth=3, label=replace_grec(label))
plt.fill_between(t_range, | np.percentile(array, 5, axis=0) | numpy.percentile |
import numpy as np
import roboverse.bullet as bullet
class DrawerOpen:
def __init__(self, env, xyz_action_scale=7.0, angle_action_scale = 0.1,
return_origin_thresh=0.1):
self.env = env
self.xyz_action_scale = xyz_action_scale
self.angle_action_scale = angle_action_scale
self.gripper_dist_thresh = 0.06
self.gripper_xy_dist_thresh = 0.03
self.ending_height_thresh = 0.2
self.return_base_thresh = 0.4
self.open_angle = [90.0, 0.0, 0.0]
self.done = False
self.return_origin_thresh = return_origin_thresh
self.reset()
def reset(self):
# self.dist_thresh = 0.06 + np.random.normal(scale=0.01)
self.drawer_never_opened = True
self.done = False
offset_coeff = (-1) ** (1 - self.env.left_opening)
self.handle_offset = np.array([offset_coeff * 0.01, 0.0, -0.0]) #-0.01
def get_action(self):
ee_pos, ee_orientation = bullet.get_link_state(
self.env.robot_id, self.env.end_effector_index)
ee_deg = bullet.quat_to_deg(ee_orientation)
handle_pos = self.env.get_drawer_handle_pos() + self.handle_offset
gripper_handle_dist = np.linalg.norm(handle_pos - ee_pos)
gripper_handle_xy_dist = np.linalg.norm(handle_pos[:2] - ee_pos[:2])
done = False
noise = True
# print(f"gripper_handle_xy_dist: {gripper_handle_xy_dist}")
if (gripper_handle_xy_dist > self.gripper_xy_dist_thresh
and not self.env.is_drawer_open()):
# print('xy - approaching handle')
action_xyz = (handle_pos - ee_pos) * self.xyz_action_scale
action_xyz = list(action_xyz[:2]) + [0.] # don't droop down.
# action_angles = [0., 0., 0.]
action_angles = (self.open_angle - ee_deg) * self.angle_action_scale
action_gripper = [0.0]
elif (gripper_handle_dist > self.gripper_dist_thresh
and not self.env.is_drawer_open()):
# print("moving down toward handle")
noise = False
action_xyz = (handle_pos - ee_pos) * self.xyz_action_scale
# action_angles = [0., 0., 0.]
action_angles = (self.open_angle - ee_deg) * self.angle_action_scale
action_gripper = [0.0]
elif not self.env.is_drawer_open():
# print("opening drawer")
noise = False
x_command = (-1) ** (1 - self.env.left_opening)
action_xyz = np.array([x_command, 0, 0])
# action = np.asarray([0., 0., 0.7])
# action_angles = (self.open_angle - ee_deg) * self.angle_action_scale
action_angles = [0., 0., 0.]
action_gripper = [0.0]
elif (np.abs(ee_pos[2] - self.ending_height_thresh) >
self.gripper_dist_thresh):
# print("return")
if (np.abs(ee_pos[2] - self.ending_height_thresh) < self.return_base_thresh):
action_xyz = [0., 0., 0.]
action_angles = [0., 0., 0.]
action_gripper = [0.]
done = True
self.done = True
else:
self.drawer_never_opened = False
action_xyz = np.array([0, 0, 0.7]) # force upward action
# action_angles = [0., 0., 0.]
noise = False
action_angles = (self.open_angle - ee_deg) * self.angle_action_scale
action_gripper = [0.0]
else:
action_xyz = [0., 0., 0.]
action_angles = [0., 0., 0.]
action_gripper = [0.0]
# if done:
# if np.linalg.norm(ee_pos - self.env.ee_pos_init) < self.return_origin_thresh:
# self.done = done
# else:
# action_xyz = (self.env.ee_pos_init - ee_pos) * self.xyz_action_scale
# # print(ee_pos, self.env.ee_pos_init)
# # print(np.linalg.norm(ee_pos - self.env.ee_pos_init))
agent_info = dict(done=self.done)
action = np.concatenate((action_xyz, action_angles, action_gripper))
return action, agent_info, noise
class DrawerClose:
def __init__(self, env, xyz_action_scale=3.0, return_origin_thresh=0.1,angle_action_scale = 0.1):
self.env = env
self.xyz_action_scale = xyz_action_scale
self.gripper_dist_thresh = 0.06
self.gripper_xy_dist_thresh = 0.03
self.ending_z = -0.25
self.top_drawer_offset = np.array([0, 0, 0.02])
self.push_angle = [90.0, 5.0, 0.0]
self.done = False
self.begin_closing = False
self.angle_action_scale = angle_action_scale
self.return_origin_thresh = return_origin_thresh
self.reset()
def reset(self):
self.drawer_never_opened = True
offset_coeff = (-1) ** (1 - self.env.left_opening)
self.handle_offset = np.array([offset_coeff * 0.01, 0.0, -0.01])
self.reached_pushing_region = False
self.neutral_taken = False
self.begin_closing = False
self.done = False
def get_action(self):
ee_pos, ee_orientation = bullet.get_link_state(
self.env.robot_id, self.env.end_effector_index)
ee_deg = bullet.quat_to_deg(ee_orientation)
handle_pos = self.env.get_drawer_handle_pos() + self.handle_offset
gripper_handle_dist = np.linalg.norm(handle_pos - ee_pos)
gripper_handle_xy_dist = np.linalg.norm(handle_pos[:2] - ee_pos[:2])
drawer_pos = self.env.get_drawer_pos("drawer")
# drawer_push_target_pos = (
# drawer_pos + np.array([0.15, 0., 0.05]))
# print(f"handle_pos: {handle_pos}, drawer_pos: {drawer_pos} ")
drawer_push_target_pos = (
self.env.get_drawer_handle_pos() + | np.array([0.1, 0.0, 0.12]) | numpy.array |
import photutils
from astropy.io import fits, ascii
import matplotlib as mpl
from mpl_toolkits.axes_grid1 import make_axes_locatable
import sys
import os
from pkg_resources import resource_filename
if 'DISPLAY' not in os.environ:
mpl.use('Agg')
import matplotlib.pyplot as plt
from matplotlib import patches
from matplotlib import gridspec
import glob
from photutils import CircularAperture, CircularAnnulus
from photutils import RectangularAperture
from photutils import aperture_photometry
import photutils
if photutils.__version__ > "1.0":
from . import fit_2dgauss
from photutils.centroids import centroid_2dg
else:
from photutils import centroid_2dg
import numpy as np
from astropy.time import Time
import astropy.units as u
import pdb
from copy import deepcopy
import yaml
import warnings
from scipy.stats import binned_statistic
from astropy.table import Table
import multiprocessing
from multiprocessing import Pool
import time
import logging
import urllib
import tqdm
maxCPUs = multiprocessing.cpu_count() // 3
try:
import bokeh.plotting
from bokeh.models import ColumnDataSource, HoverTool
from bokeh.models import Range1d
from bokeh.models import WheelZoomTool
except ImportError as err2:
print("Could not import bokeh plotting. Interactive plotting may not work")
from .utils import robust_poly, robust_statistics
from .utils import get_baseDir
from .instrument_specific import rowamp_sub
def run_one_phot_method(allInput):
"""
Do a photometry/spectroscopy method on one file
For example, do aperture photometry on one file
This is a slightly awkward workaround because multiprocessing doesn't work on object methods
So it's a separate function that takes an object and runs the method
Parameters
-----------
allInput: 3 part tuple (object, int, string)
This contains the object, file index to run (0-based) and name of the method to run
"""
photObj, ind, method = allInput
photMethod = getattr(photObj,method)
return photMethod(ind)
def run_multiprocessing_phot(photObj,fileIndices,method='phot_for_one_file'):
"""
Run photometry/spectroscopy methods on all files using multiprocessing
Awkward workaround because multiprocessing doesn't work on object methods
Parameters
----------
photObj: Photometry object
A photometry Object instance
fileIndices: list
List of file indices
method: str
Method on which to apply multiprocessing
"""
allInput = []
for oneInd in fileIndices:
allInput.append([photObj,oneInd,method])
n_files = len(fileIndices)
if n_files < maxCPUs:
raise Exception("Fewer files to process than CPUs, this can confuse multiprocessing")
p = Pool(maxCPUs)
outputDat = list(tqdm.tqdm(p.imap(run_one_phot_method,allInput),total=n_files))
p.close()
return outputDat
def read_yaml(filePath):
with open(filePath) as yamlFile:
yamlStructure = yaml.safe_load(yamlFile)
return yamlStructure
path_to_example = "parameters/phot_params/example_phot_parameters.yaml"
exampleParamPath = resource_filename('tshirt',path_to_example)
class phot:
def __init__(self,paramFile=exampleParamPath,
directParam=None):
""" Photometry class
Parameters
------
paramFile: str
Location of the YAML file that contains the photometry parameters as long
as directParam is None. Otherwise, it uses directParam
directParam: dict
Rather than use the paramFile, you can put a dictionary here.
This can be useful for running a batch of photometric extractions.
Properties
-------
paramFile: str
Same as paramFile above
param: dict
The photometry parameters like file names, aperture sizes, guess locations
fileL: list
The files on which photometry will be performed
nImg: int
Number of images in the sequence
directParam: dict
Parameter dictionary rather than YAML file (useful for batch processing)
"""
self.pipeType = 'photometry'
self.get_parameters(paramFile=paramFile,directParam=directParam)
defaultParams = {'srcGeometry': 'Circular', 'bkgSub': True, 'isCube': False, 'cubePlane': 0,
'doCentering': True, 'bkgGeometry': 'CircularAnnulus',
'boxFindSize': 18,'backStart': 9, 'backEnd': 12,
'scaleAperture': False, 'apScale': 2.5, 'apRange': [0.01,9999],
'scaleBackground': False,
'nanTreatment': 'zero', 'backOffset': [0.0,0.0],
'srcName': 'WASP 62','srcNameShort': 'wasp62',
'refStarPos': [[50,50]],'procFiles': '*.fits',
'apRadius': 9,'FITSextension': 0,
'jdRef': 2458868,
'nightName': 'UT2020-01-20','srcName'
'FITSextension': 0, 'HEADextension': 0,
'refPhotCentering': None,'isSlope': False,
'itimeKeyword': 'INTTIME','readNoise': None,
'detectorGain': None,'cornerSubarray': False,
'subpixelMethod': 'exact','excludeList': None,
'dateFormat': 'Two Part','copyCentroidFile': None,
'bkgMethod': 'mean','diagnosticMode': False,
'bkgOrderX': 1, 'bkgOrderY': 1,'backsub_directions': ['Y','X'],
'readFromTshirtExamples': False,
'saturationVal': None, 'satNPix': 5, 'nanReplaceValue': 0.0,
'DATE-OBS': None,
'driftFile': None
}
for oneKey in defaultParams.keys():
if oneKey not in self.param:
self.param[oneKey] = defaultParams[oneKey]
xCoors, yCoors = [], []
positions = self.param['refStarPos']
self.nsrc = len(positions)
## Set up file names for output
self.check_file_structure()
self.dataFileDescrip = self.param['srcNameShort'] + '_'+ self.param['nightName']
self.photFile = os.path.join(self.baseDir,'tser_data','phot','phot_'+self.dataFileDescrip+'.fits')
self.centroidFile = os.path.join(self.baseDir,'centroids','cen_'+self.dataFileDescrip+'.fits')
self.refCorPhotFile = os.path.join(self.baseDir,'tser_data','refcor_phot','refcor_'+self.dataFileDescrip+'.fits')
# Get the file list
self.fileL = self.get_fileList()
self.nImg = len(self.fileL)
self.srcNames = np.array(np.arange(self.nsrc),dtype=str)
self.srcNames[0] = 'src'
self.set_up_apertures(positions)
self.check_parameters()
self.get_drift_dat()
def get_parameters(self,paramFile,directParam=None):
if directParam is None:
self.paramFile = paramFile
self.param = read_yaml(paramFile)
else:
self.paramFile = 'direct dictionary'
self.param = directParam
def check_file_structure(self):
"""
Check the file structure for plotting/saving data
"""
baseDir = get_baseDir()
structure_file = resource_filename('tshirt','directory_info/directory_list.yaml')
dirList = read_yaml(structure_file)
for oneFile in dirList:
fullPath = os.path.join(baseDir,oneFile)
ensure_directories_are_in_place(fullPath)
self.baseDir = baseDir
def get_fileList(self):
if self.param['readFromTshirtExamples'] == True:
## Find the files from the package data examples
## This is only when running example pipeline runs or tests
search_path = os.path.join(self.baseDir,'example_tshirt_data',self.param['procFiles'])
if len(glob.glob(search_path)) == 0:
print("Did not find example tshirt data. Now attempting to download...")
get_tshirt_example_data()
else:
search_path = self.param['procFiles']
origList = np.sort(glob.glob(search_path))
if self.param['excludeList'] is not None:
fileList = []
for oneFile in origList:
if os.path.basename(oneFile) not in self.param['excludeList']:
fileList.append(oneFile)
else:
fileList = origList
if len(fileList) == 0:
print("Note: File Search comes up empty")
if os.path.exists(self.photFile):
print("Note: Reading file list from previous phot file instead.")
t1 = Table.read(self.photFile,hdu='FILENAMES')
fileList = np.array(t1['File Path'])
return fileList
def check_parameters(self):
assert type(self.param['backOffset']) == list,"Background offset is not a list"
assert len(self.param['backOffset']) == 2,'Background offset must by a 2 element list'
def set_up_apertures(self,positions):
if self.param['srcGeometry'] == 'Circular':
self.srcApertures = CircularAperture(positions,r=self.param['apRadius'])
elif self.param['srcGeometry'] == 'Square':
self.srcApertures = RectangularAperture(positions,w=self.param['apRadius'],
h=self.param['apRadius'],theta=0)
elif self.param['srcGeometry'] == 'Rectangular':
self.srcApertures = RectangularAperture(positions,w=self.param['apWidth'],
h=self.param['apHeight'],theta=0)
else:
print('Unrecognized aperture')
self.xCoors = self.srcApertures.positions[:,0]
self.yCoors = self.srcApertures.positions[:,1]
if self.param['bkgSub'] == True:
bkgPositions = np.array(deepcopy(positions))
bkgPositions[:,0] = bkgPositions[:,0] + self.param['backOffset'][0]
bkgPositions[:,1] = bkgPositions[:,1] + self.param['backOffset'][1]
if self.param['bkgGeometry'] == 'CircularAnnulus':
self.bkgApertures = CircularAnnulus(bkgPositions,r_in=self.param['backStart'],
r_out=self.param['backEnd'])
elif self.param['bkgGeometry'] == 'Rectangular':
self.bkgApertures = RectangularAperture(bkgPositions,w=self.param['backWidth'],
h=self.param['backHeight'],theta=0)
else:
raise ValueError('Unrecognized background geometry')
def get_default_index(self):
"""
Get the default index from the file list
"""
return self.nImg // 2
def get_default_im(self,img=None,head=None):
""" Get the default image for postage stamps or star identification maps"""
## Get the data
if img is None:
img, head = self.getImg(self.fileL[self.get_default_index()])
return img, head
def get_default_cen(self,custPos=None,ind=0):
"""
Get the default centroids for postage stamps or star identification maps
Parameters
----------
custPos: numpy array
Array of custom positions for the apertures. Otherwise it uses the guess position
ind: int
Image index. This is used to guess the position if a drift file is given
"""
if custPos is None:
initialPos = deepcopy(self.srcApertures.positions)
showApPos = np.zeros_like(initialPos)
showApPos[:,0] = initialPos[:,0] + float(self.drift_dat['dx'][ind])
showApPos[:,1] = initialPos[:,1] + float(self.drift_dat['dy'][ind])
else:
showApPos = custPos
return showApPos
def get_drift_dat(self):
drift_dat_0 = Table()
drift_dat_0['Index'] = np.arange(self.nImg)
#drift_dat_0['File'] = self.fileL
drift_dat_0['dx'] = np.zeros(self.nImg)
drift_dat_0['dy'] = np.zeros(self.nImg)
if self.param['driftFile'] == None:
self.drift_dat = drift_dat_0
drift_file_found = False
else:
if self.param['readFromTshirtExamples'] == True:
## Find the files from the package data examples
## This is only when running example pipeline runs or tests
drift_file_path = os.path.join(self.baseDir,'example_tshirt_data',self.param['driftFile'])
else:
drift_file_path = self.param['driftFile']
if os.path.exists(drift_file_path) == False:
drift_file_found = False
warnings.warn("No Drift file found at {}".format(drift_file_path))
else:
drift_file_found = True
self.drift_dat = ascii.read(drift_file_path)
return drift_file_found
def make_drift_file(self,srcInd=0,refIndex=0):
"""
Use the centroids in photometry to generate a drift file of X/Y offsets
Parameters
----------
srcInd: int
The source index used for drifts
refIndex: int
Which file index corresponds to 0.0 drift
"""
HDUList = fits.open(self.photFile)
cenData = HDUList['CENTROIDS'].data
photHead = HDUList['PHOTOMETRY'].header
nImg = photHead['NIMG']
drift_dat = Table()
drift_dat['Index'] = np.arange(nImg)
x = cenData[:,srcInd,0]
drift_dat['dx'] = x - x[refIndex]
y = cenData[:,srcInd,1]
drift_dat['dy'] = y - y[refIndex]
drift_dat['File'] = HDUList['FILENAMES'].data['File Path']
outPath = os.path.join(self.baseDir,'centroids','drift_'+self.dataFileDescrip+'.ecsv')
drift_dat.meta['Zero Index'] = refIndex
drift_dat.meta['Source Used'] = srcInd
drift_dat.meta['Zero File'] = str(drift_dat['File'][refIndex])
print("Saving Drift file to {}".format(outPath))
drift_dat.write(outPath,overwrite=True,format='ascii.ecsv')
def showStarChoices(self,img=None,head=None,custPos=None,showAps=False,
srcLabel=None,figSize=None,showPlot=False,
apColor='black',backColor='black',
vmin=None,vmax=None,index=None,
labelColor='white',
xLim=None,yLim=None,
txtOffset=20):
"""
Show the star choices for photometry
Parameters
------------------
img : numpy 2D array, optional
An image to plot
head : astropy FITS header, optional
header for image
custPos : numpy 2D array or list of tuple coordinates, optional
Custom positions
showAps : bool, optional
Show apertures rather than circle stars
srcLabel : str or None, optional
What should the source label be? The default is "src"
srcLabel : list or None, optional
Specify the size of the plot.
This is useful for looking at high/lower resolution
showPlot : bool
Show the plot? If True, it will show, otherwise it is saved as a file
apColor: str
The color for the source apertures
backColor: str
The color for the background apertures
vmin: float or None
A value for the :code:`matplotlib.pyplot.plot.imshow` vmin parameter
vmax: float or None
A value for the :code:`matplotlib.pyplot.plot.imshow` vmax parameter
index: int or None
The index of the file name. If None, it uses the default
labelColor: str
Color for the text label for sources
xLim: None or two element list
Specify the minimum and maximum X for the plot. For example xLim=[40,60]
yLim: None or two element list
Specify the minimum and maximum Y for the plot. For example yLim=[40,60]
txtOffset: float
The X and Y offset to place the text label for a source
"""
fig, ax = plt.subplots(figsize=figSize)
if index is None:
index = self.get_default_index()
if img is None:
img, head = self.getImg(self.fileL[index])
else:
img_other, head = self.get_default_im(img=img,head=None)
if vmin is None:
useVmin = np.nanpercentile(img,1)
else:
useVmin = vmin
if vmax is None:
useVmax = np.nanpercentile(img,99)
else:
useVmax = vmax
imData = ax.imshow(img,cmap='viridis',vmin=useVmin,vmax=useVmax,interpolation='nearest')
ax.invert_yaxis()
rad = 50 ## the radius for the matplotlib scatter to show source centers
showApPos = self.get_default_cen(custPos=custPos,ind=index)
if showAps == True:
apsShow = deepcopy(self.srcApertures)
apsShow.positions = showApPos
self.adjust_apertures(index)
if photutils.__version__ >= "0.7":
apsShow.plot(axes=ax,color=apColor)
else:
apsShow.plot(ax=ax,color=apColor)
if self.param['bkgSub'] == True:
backApsShow = deepcopy(self.bkgApertures)
backApsShow.positions = showApPos
backApsShow.positions[:,0] = backApsShow.positions[:,0] + self.param['backOffset'][0]
backApsShow.positions[:,1] = backApsShow.positions[:,1] + self.param['backOffset'][1]
if photutils.__version__ >= "0.7":
backApsShow.plot(axes=ax,color=backColor)
else:
backApsShow.plot(ax=ax,color=backColor)
outName = 'ap_labels_{}.pdf'.format(self.dataFileDescrip)
else:
ax.scatter(showApPos[:,0],showApPos[:,1], s=rad, facecolors='none', edgecolors='r')
outName = 'st_labels_{}.pdf'.format(self.dataFileDescrip)
for ind, onePos in enumerate(showApPos):
#circ = plt.Circle((onePos[0], onePos[1]), rad, color='r')
#ax.add_patch(circ)
if ind == 0:
if srcLabel is None:
name='src'
else:
name=srcLabel
else:
name=str(ind)
ax.text(onePos[0]+txtOffset,onePos[1]+txtOffset,name,color=labelColor)
ax.set_xlabel('X (px)')
ax.set_ylabel('Y (px)')
divider = make_axes_locatable(ax)
cax = divider.append_axes("right", size="5%", pad=0.05)
fig.colorbar(imData,label='Counts',cax=cax)
ax.set_xlim(xLim)
ax.set_ylim(yLim)
if showPlot == True:
fig.show()
else:
outF = os.path.join(self.baseDir,'plots','photometry','star_labels',outName)
fig.savefig(outF,
bbox_inches='tight')
plt.close(fig)
def showStamps(self,img=None,head=None,custPos=None,custFWHM=None,
vmin=None,vmax=None,showPlot=False,boxsize=None,index=None):
"""
Shows the fixed apertures on the image with postage stamps surrounding sources
Parameters
-----------
index: int
Index of the file list. This is needed if scaling apertures
"""
## Calculate approximately square numbers of X & Y positions in the grid
numGridY = int(np.floor(np.sqrt(self.nsrc)))
numGridX = int(np.ceil(float(self.nsrc) / float(numGridY)))
fig, axArr = plt.subplots(numGridY, numGridX)
img, head = self.get_default_im(img=img,head=head)
if boxsize == None:
boxsize = self.param['boxFindSize']
showApPos = self.get_default_cen(custPos=custPos)
if index is None:
index = self.get_default_index()
self.adjust_apertures(index)
for ind, onePos in enumerate(showApPos):
if self.nsrc == 1:
ax = axArr
else:
ax = axArr.ravel()[ind]
yStamp_proposed = np.array(onePos[1] + np.array([-1,1]) * boxsize,dtype=np.int)
xStamp_proposed = np.array(onePos[0] + np.array([-1,1]) * boxsize,dtype=np.int)
xStamp, yStamp = ensure_coordinates_are_within_bounds(xStamp_proposed,yStamp_proposed,img)
stamp = img[yStamp[0]:yStamp[1],xStamp[0]:xStamp[1]]
if vmin == None:
useVmin = np.nanpercentile(stamp,1)
else:
useVmin = vmin
if vmax == None:
useVmax = np.nanpercentile(stamp,99)
else:
useVmax = vmax
imData = ax.imshow(stamp,cmap='viridis',vmin=useVmin,vmax=useVmax,interpolation='nearest')
ax.invert_yaxis()
ax.set_title(self.srcNames[ind])
ax.get_xaxis().set_visible(False)
ax.get_yaxis().set_visible(False)
srcX, srcY = onePos[0] - xStamp[0],onePos[1] - yStamp[0]
circ = plt.Circle((srcX,srcY),
self.srcApertures.r,edgecolor='red',facecolor='none')
ax.add_patch(circ)
if self.param['bkgSub'] == True:
for oneRad in [self.bkgApertures.r_in, self.bkgApertures.r_out]:
circ = plt.Circle((srcX + self.param['backOffset'][0],srcY + self.param['backOffset'][1]),
oneRad,edgecolor='blue',facecolor='none')
ax.add_patch(circ)
if custFWHM is not None:
circFWHM = plt.Circle((srcX,srcY),
custFWHM[ind],edgecolor='orange',facecolor='none')
ax.add_patch(circFWHM)
fig.colorbar(imData,label='Counts')
totStamps = numGridY * numGridX
for ind in np.arange(self.nsrc,totStamps):
## Make any extra postage stamps blank
ax = axArr.ravel()[ind]
ax.set_frame_on(False)
ax.axes.get_xaxis().set_visible(False)
ax.axes.get_yaxis().set_visible(False)
#self.srcApertures.plot(indices=ind,color='red')
#ax.set_xlim(onePos[0] - boxsize,onePos[0] + boxsize)
#ax.set_ylim(onePos[1] - boxsize,onePos[1] + boxsize)
if showPlot == True:
fig.show()
else:
outName = 'stamps_'+self.dataFileDescrip+'.pdf'
outPath = os.path.join(self.baseDir,'plots','photometry','postage_stamps',outName)
fig.savefig(outPath)
plt.close(fig)
def showCustSet(self,index=None,ptype='Stamps',defaultCen=False,vmin=None,vmax=None,
boxsize=None,showPlot=False):
""" Show a custom stamp or star identification plot for a given image index
Parameters
--------------
index: int
Index of the image/centroid to show
ptype: str
Plot type - 'Stamps' for postage stamps
'Map' for star identification map
defaultCen: bool
Use the default centroid? If True, it will use the guess centroids to
show the guess before centering.
boxsize: int or None
The size of the box to cut out for plotting postage stamps.
If None, will use defaults.
Only use when ptype is 'Stamps'
showPlot: bool
Show the plot in notebook or iPython session?
If True, it will show the plot.
If False, it will save the plot in the default directory.
"""
self.get_allimg_cen()
if index == None:
index = self.get_default_index()
img, head = self.getImg(self.fileL[index])
if defaultCen == True:
cen = self.srcApertures.positions
else:
cen = self.cenArr[index]
if ptype == 'Stamps':
self.showStamps(custPos=cen,img=img,head=head,vmin=vmin,vmax=vmax,showPlot=showPlot,
boxsize=boxsize,index=index)
elif ptype == 'Map':
self.showStarChoices(custPos=cen,img=img,head=head,showAps=True,showPlot=showPlot,
index=index)
else:
print('Unrecognized plot type')
def make_filename_hdu(self,airmass=None):
"""
Makes a Header data unit (binary FITS table) for filenames
"""
fileLTable = Table()
fileLTable['File Path'] = self.fileL
if airmass is not None:
fileLTable['Airmass'] = airmass
hduFileNames = fits.BinTableHDU(fileLTable)
hduFileNames.name = "FILENAMES"
return hduFileNames
def save_centroids(self,cenArr,fwhmArr,fixesApplied=True,origCen=None,origFWHM=None):
""" Saves the image centroid data
Parameters
-----------
cenArr: numpy array
3d array of centroids (nImg x nsrc x 2 for x/y)
fwhmArr: numpy array
3d array of fwhm (nImg x nsrc x 2 for x/y)
fixesApplied: bool
Are fixes applied to the centroids?
origCen: None or numpy array
Original array of centroids
origFWHM: None or numpy array
Original array of FWHMs
"""
hdu = fits.PrimaryHDU(cenArr)
hdu.header['NSOURCE'] = (self.nsrc,'Number of sources with centroids')
hdu.header['NIMG'] = (self.nImg,'Number of images')
hdu.header['AXIS1'] = ('dimension','dimension axis X=0,Y=1')
hdu.header['AXIS2'] = ('src','source axis')
hdu.header['AXIS3'] = ('image','image axis')
hdu.header['BOXSZ'] = (self.param['boxFindSize'],'half-width of the box used for source centroids')
hdu.header['REFCENS'] = (self.param['refPhotCentering'],'Reference Photometry file used to shift the centroids (or empty if none)')
hdu.header['FIXES'] = (fixesApplied, 'Have centroid fixes been applied from trends in other sources?')
hdu.name = 'Centroids'
hdu2 = fits.ImageHDU(fwhmArr)
hdu2.header['NSOURCE'] = (self.nsrc,'Number of sources with centroids')
hdu2.header['NIMG'] = (self.nImg,'Number of images')
hdu2.header['AXIS1'] = ('dimension','dimension axis X=0,Y=1')
hdu2.header['AXIS2'] = ('src','source axis')
hdu2.header['AXIS3'] = ('image','image axis')
hdu2.header['BOXSZ'] = (self.param['boxFindSize'],'half-width of the box used to fit 2D gaussian')
hdu2.name = 'FWHM'
hduFileNames = self.make_filename_hdu()
HDUList = fits.HDUList([hdu,hdu2,hduFileNames])
if fixesApplied == True:
if origCen is not None:
hduCenOrig = fits.ImageHDU(origCen,hdu.header)
hduCenOrig.header['FIXES'] = ('Unknown','Have centroid fixes been applied from trends in other sources?')
hduCenOrig.name = 'ORIG CEN'
HDUList.append(hduCenOrig)
if origFWHM is not None:
hduFWHMOrig = fits.ImageHDU(origFWHM,hdu2.header)
hduFWHMOrig.name = '<NAME>'
HDUList.append(hduFWHMOrig)
HDUList.writeto(self.centroidFile,overwrite=True)
head = hdu.header
return head, hdu2.header
def shift_centroids_from_other_file(self,refPhotFile,SWLW=True):
"""
Creates a centroid array where shifts are applied from another
file.
For example, Imaging data from another camera can be used
to provide shifts to the apertures for grism data
"""
if SWLW == True:
rotAngle = 0; ## set to zero for now
scaling = 0.5
else:
raise Exception("Still working on scaling and rotation params")
HDUList = fits.open(refPhotFile)
if "CENTROIDS" not in HDUList:
raise Exception("Could not find CENTROIDS extension in {}".format(refPhotFile))
refCenArr, head = HDUList["CENTROIDS"].data, HDUList["CENTROIDS"].header
xRefAbs, yRefAbs = refCenArr[:,0,0], refCenArr[:,0,1]
xRef, yRef = xRefAbs - xRefAbs[0], yRefAbs - yRefAbs[0]
HDUList.close()
ndim=2 ## Number of dimensions in image (assuming 2D)
cenArr = np.zeros((self.nImg,self.nsrc,ndim))
pos = self.get_default_cen()
for ind, oneFile in enumerate(self.fileL):
xVec = (xRef[ind] * np.cos(rotAngle) - yRef[ind] * np.sin(rotAngle)) * scaling
yVec = (xRef[ind] * np.sin(rotAngle) + yRef[ind] * np.cos(rotAngle)) * scaling
cenArr[ind,:,0] = pos[:,0] + xVec
cenArr[ind,:,1] = pos[:,1] + yVec
fwhmArr = np.zeros_like(cenArr)
return cenArr, fwhmArr
def fix_centroids(self,diagnostics=False,nsigma=10.):
"""
Fix the centroids for outlier positions for stars
"""
HDUList = fits.open(self.centroidFile)
cenArr, head = HDUList["CENTROIDS"].data, HDUList["CENTROIDS"].header
fwhmArr, headFWHM = HDUList["FWHM"].data, HDUList["FWHM"].header
fixedCenArr = deepcopy(cenArr)
fixedFWHMArr = deepcopy(fwhmArr)
medCen = np.nanmedian(cenArr,0)
medCen3D = np.tile(medCen,[self.nImg,1,1])
diffCen = cenArr - medCen3D ## differential motion
fixedDiffCen = deepcopy(diffCen)
if diagnostics == True:
fig, axArr = plt.subplots(2,sharex=True)
for oneAxis in [0,1]:
trend = np.nanmedian(diffCen[:,:,oneAxis],1) ## median trend
trend2D = np.tile(trend,[self.nsrc,1]).transpose()
diffFromTrend = diffCen[:,:,oneAxis] - trend2D
mad = np.nanmedian(np.abs(diffFromTrend))
badPt = np.abs(diffFromTrend) > nsigma * mad
fixedDiffFromTrend = deepcopy(diffFromTrend)
fixedDiffFromTrend[badPt] = 0
fwhmArr2D = fixedFWHMArr[:,:,oneAxis]
fwhmArr2D[badPt] = np.nan ## w/ different position FWHM is no longer relvant
fixedFWHMArr[:,:,oneAxis] = fwhmArr2D
fixedDiffCen[:,:,oneAxis] = fixedDiffFromTrend + trend2D
if diagnostics == True:
for oneSrc in np.arange(self.nsrc):
showData, = axArr[oneAxis].plot(diffCen[:,oneSrc,oneAxis],'o')
axArr[oneAxis].plot(fixedDiffCen[:,oneSrc,oneAxis],color=showData.get_color())
fixedCenArr = fixedDiffCen + medCen3D
if diagnostics == True:
plt.show()
self.save_centroids(fixedCenArr,fixedFWHMArr,fixesApplied=True,origCen=cenArr,origFWHM=fwhmArr)
HDUList.close()
def copy_centroids_from_file(self,fileName):
HDUList = fits.open(fileName)
cenArr, head = HDUList["CENTROIDS"].data, HDUList["CENTROIDS"].header
if "FWHM" in HDUList:
fwhmArr, headFWHM = HDUList["FWHM"].data, HDUList["FWHM"].header
self.keepFWHM = True
else:
self.keepFWHM = False ## allow for legacy centroid files
fwhmArr, headFWHM = None, None
HDUList.close()
return cenArr, head, fwhmArr, headFWHM
def get_allimg_cen(self,recenter=False,useMultiprocessing=False):
""" Get all image centroids
If self.param['doCentering'] is False, it will just use the input aperture positions
Parameters
----------
recenter: bool
Recenter the apertures again? Especially after changing the sources in photometry parameters
useMultiprocessing: bool
Use multiprocessing for faster computation?s
"""
ndim=2 ## Number of dimensions in image (assuming 2D)
if os.path.exists(self.centroidFile) and (recenter == False):
cenArr, head, fwhmArr, headFWHM = self.copy_centroids_from_file(self.centroidFile)
elif (self.param['copyCentroidFile'] is not None) and (recenter == False):
cenArr, head, fwhmArr, headFWHM = self.copy_centroids_from_file(self.param['copyCentroidFile'])
elif self.param['refPhotCentering'] is not None:
cenArr, fwhmArr = self.shift_centroids_from_other_file(self.param['refPhotCentering'])
head, headFWHM = self.save_centroids(cenArr,fwhmArr)
self.keepFWHM = False
elif self.param['doCentering'] == False:
img, head = self.get_default_im()
cenArr = np.zeros((self.nImg,self.nsrc,ndim))
pos = self.get_default_cen()
for ind, oneFile in enumerate(self.fileL):
cenArr[ind,:,0] = pos[:,0]
cenArr[ind,:,1] = pos[:,1]
fwhmArr = np.zeros_like(cenArr)
head, headFWHM = self.save_centroids(cenArr,fwhmArr)
self.keepFWHM = False
else:
cenArr = np.zeros((self.nImg,self.nsrc,ndim))
fwhmArr = np.zeros((self.nImg,self.nsrc,ndim))
if useMultiprocessing == True:
fileCountArray = np.arange(len(self.fileL))
allOutput = run_multiprocessing_phot(self,fileCountArray,method='get_allcen_img')
else:
allOutput = []
for ind, oneFile in enumerate(self.fileL):
allOutput.append(self.get_allcen_img(ind))
for ind, oneFile in enumerate(self.fileL):
allX, allY, allfwhmX, allfwhmY = allOutput[ind]
cenArr[ind,:,0] = allX
cenArr[ind,:,1] = allY
fwhmArr[ind,:,0] = allfwhmX
fwhmArr[ind,:,1] = allfwhmY
self.keepFWHM = True
head, headFWHM = self.save_centroids(cenArr,fwhmArr)
self.cenArr = cenArr
self.cenHead = head
if self.param['bkgSub'] == True:
## Make an array for the background offsets
backgOffsetArr = np.zeros((self.nImg,self.nsrc,ndim))
backgOffsetArr[:,:,0] = self.param['backOffset'][0]
backgOffsetArr[:,:,1] = self.param['backOffset'][1]
self.backgOffsetArr = backgOffsetArr
else:
self.backgOffsetArr = np.zeros((self.nImg,self.nsrc,ndim))
if self.keepFWHM == True:
self.fwhmArr = fwhmArr
self.headFWHM = headFWHM
def get_allcen_img(self,ind,showStamp=False):
""" Gets the centroids for all sources in one image """
img, head = self.getImg(self.fileL[ind])
allX, allY = [], []
allfwhmX, allfwhmY = [], []
positions = self.get_default_cen(ind=ind)
for srcInd, onePos in enumerate(positions):
xcen, ycen, fwhmX, fwhmY = self.get_centroid(img,onePos[0],onePos[1])
allX.append(xcen)
allY.append(ycen)
allfwhmX.append(fwhmX)
allfwhmY.append(fwhmY)
if showStamp == True:
posArr = np.vstack((allX,allY)).transpose()
#fwhmArr = np.vstack((allfwhmX,allfwhmY)).transpose()
quadFWHM = np.sqrt(np.array(allfwhmX)**2 + np.array(allfwhmY)**2)
self.showStamps(img=img,custPos=posArr,custFWHM=quadFWHM)
return allX, allY, allfwhmX, allfwhmY
def get_centroid(self,img,xGuess,yGuess):
""" Get the centroid of a source given an x and y guess
Takes the self.param['boxFindSize'] to define the search box
"""
boxSize=self.param['boxFindSize']
shape = img.shape
minX = int(np.max([xGuess - boxSize,0.]))
maxX = int(np.min([xGuess + boxSize,shape[1]-1]))
minY = int(np.max([yGuess - boxSize,0.]))
maxY = int(np.min([yGuess + boxSize,shape[1]-1]))
subimg = img[minY:maxY,minX:maxX]
try:
xcenSub,ycenSub = centroid_2dg(subimg)
except ValueError:
warnings.warn("Found value error for centroid. Putting in Guess value")
xcenSub,ycenSub = xGuess, yGuess
xcen = xcenSub + minX
ycen = ycenSub + minY
try:
fwhmX, fwhmY = self.get_fwhm(subimg,xcenSub,ycenSub)
except ValueError:
warnings.warn("Found value error for FWHM. Putting in a Nan")
fwhmX, fwhmY = np.nan, np.nan
return xcen, ycen, fwhmX, fwhmY
def get_fwhm(self,subimg,xCen,yCen):
""" Get the FWHM of the source in a subarray surrounding it
"""
if photutils.__version__ >= "1.0":
GaussFit = fit_2dgauss.centroid_2dg_w_sigmas(subimg)
x_stddev = GaussFit[2]
y_stddev = GaussFit[3]
else:
GaussModel = photutils.fit_2dgaussian(subimg)
x_stddev = GaussModel.x_stddev.value
y_stddev = GaussModel.y_stddev.value
fwhmX = x_stddev * 2.35482005
fwhmY = y_stddev * 2.35482005
return fwhmX, fwhmY
def add_filenames_to_header(self,hdu):
""" Uses fits header cards to list the files
This clutters up the header, so I have now moved
the fileName list to a separate structure
"""
for ind, oneFile in enumerate(self.fileL):
hdu.header['FIL'+str(ind)] = (os.path.basename(oneFile),'file name')
def reset_phot(self):
"""
Reset the photometry
A reminder to myself to write a script to clear the positions.
Sometimes, if you get bad positions from a previous file, they
will wind up being used again. Need to reset the srcAperture.positions!
"""
def get_date(self,head):
if 'DATE-OBS' in head:
useDate = head['DATE-OBS']
elif 'DATE_OBS' in head:
useDate = head['DATE_OBS']
elif 'DATE' in head:
warnings.warn('DATE-OBS not found in header. Using DATE instead')
month1, day1, year1 = head['DATE'].split("/")
useDate = "-".join([year1,month1,day1])
elif 'DATE-OBS' in self.param:
warnings.warn('Using DATE-OBS from parameter file.')
useDate = self.param['DATE-OBS']
else:
warnings.warn('Date headers not found in header. Making it nan')
useDate = np.nan
if self.param['dateFormat'] == 'Two Part':
t0 = Time(useDate+'T'+head['TIME-OBS'])
elif self.param['dateFormat'] == 'One Part':
t0 = Time(useDate)
else:
raise Exception("Date format {} not understdood".format(self.param['dateFormat']))
if 'timingMethod' in self.param:
if self.param['timingMethod'] == 'JWSTint':
if 'INTTIME' in head:
int_time = head['INTTIME']
elif 'EFFINTTM' in head:
int_time = head['EFFINTTM']
else:
warnings.warn("Couldn't find inttime in header. Setting to 0")
int_time = 0
t0 = t0 + (head['TFRAME'] + int_time) * (head['ON_NINT']) * u.second
elif self.param['timingMethod'] == 'intCounter':
t0 = t0 + (head['ON_NINT']) * 1.0 * u.min ## placeholder to spread out time
return t0
def get_read_noise(self,head):
if self.param['readNoise'] != None:
readNoise = float(self.param['readNoise'])
elif 'RDNOISE1' in head:
readNoise = float(head['RDNOISE1'])
else:
readNoise = 1.0
warnings.warn('Warning, no read noise specified')
return readNoise
def adjust_apertures(self,ind):
"""
Adjust apertures, if scaling by FWHM
Parameters
----------
ind: int
the index of `self.fileList`.
"""
if self.param['scaleAperture'] == True:
if self.nImg >= maxCPUs:
useMultiprocessing = True
else:
useMultiprocessing = False
self.get_allimg_cen(useMultiprocessing=useMultiprocessing)
medianFWHM = np.median(self.fwhmArr[ind])
minFWHMallowed, maxFWHMallowed = self.param['apRange']
if medianFWHM < minFWHMallowed:
warnings.warn("FWHM found was smaller than apRange ({}) px. Using {} for Image {}".format(minFWHMallowed,minFWHMallowed,self.fileL[ind]))
medianFWHM = minFWHMallowed
elif medianFWHM > maxFWHMallowed:
warnings.warn("FWHM found was larger than apRange ({}) px. Using {} for Image {}".format(maxFWHMallowed,maxFWHMallowed,self.fileL[ind]))
medianFWHM = maxFWHMallowed
if self.param['bkgGeometry'] == 'CircularAnnulus':
self.srcApertures.r = medianFWHM * self.param['apScale']
if self.param['scaleBackground'] == True:
self.bkgApertures.r_in = (self.srcApertures.r +
self.param['backStart'] - self.param['apRadius'])
self.bkgApertures.r_out = (self.bkgApertures.r_in +
self.param['backEnd'] - self.param['backStart'])
else:
warnings.warn('Background Aperture scaling not set up for non-annular geometry')
def get_ap_area(self,aperture):
"""
A function go get the area of apertures
This accommodates different versions of photutils
"""
if photutils.__version__ > '0.6':
## photutils changed area from a method to an attribute after this version
area = aperture.area
else:
## older photutils
area = aperture.area()
return area
def phot_for_one_file(self,ind):
"""
Calculate aperture photometry using `photutils`
Parameters
----------
ind: int
index of the file list on which to read in and do photometry
"""
oneImg = self.fileL[ind]
img, head = self.getImg(oneImg)
t0 = self.get_date(head)
self.srcApertures.positions = self.cenArr[ind]
if self.param['scaleAperture'] == True:
self.adjust_apertures(ind)
readNoise = self.get_read_noise(head)
err = np.sqrt(np.abs(img) + readNoise**2) ## Should already be gain-corrected
rawPhot = aperture_photometry(img,self.srcApertures,error=err,method=self.param['subpixelMethod'])
if self.param['saturationVal'] != None:
src_masks = self.srcApertures.to_mask(method='center')
for srcInd,mask in enumerate(src_masks):
src_data = mask.multiply(img)
src_data_1d = src_data[mask.data > 0]
satPoints = (src_data_1d > self.param['saturationVal'])
if np.sum(satPoints) >= self.param['satNPix']:
## set this source as NaN b/c it's saturated
rawPhot['aperture_sum'][srcInd] = np.nan
if self.param['bkgSub'] == True:
self.bkgApertures.positions = self.cenArr[ind] + self.backgOffsetArr[ind]
if self.param['bkgMethod'] == 'mean':
bkgPhot = aperture_photometry(img,self.bkgApertures,error=err,method=self.param['subpixelMethod'])
bkgArea = self.get_ap_area(self.bkgApertures)
srcArea = self.get_ap_area(self.srcApertures)
bkgVals = bkgPhot['aperture_sum'] / bkgArea * srcArea
bkgValsErr = bkgPhot['aperture_sum_err'] / bkgArea * srcArea
## Background subtracted fluxes
srcPhot = rawPhot['aperture_sum'] - bkgVals
elif self.param['bkgMethod'] in ['median', 'robust mean']:
bkgIntensity, bkgIntensityErr = [], []
bkg_masks = self.bkgApertures.to_mask(method='center')
for mask in bkg_masks:
bkg_data = mask.multiply(img)
bkg_data_1d = bkg_data[mask.data > 0]
oneIntensity, oneErr = robust_statistics(bkg_data_1d,method=self.param['bkgMethod'])
bkgIntensity.append(oneIntensity)
bkgIntensityErr.append(oneErr)
bkgVals = np.array(bkgIntensity) * self.get_ap_area(self.srcApertures)
bkgValsErr = np.array(bkgIntensityErr) * self.get_ap_area(self.srcApertures)
srcPhot = rawPhot['aperture_sum'] - bkgVals
elif self.param['bkgMethod'] == 'colrow':
srcPhot, bkgVals, bkgValsErr = self.poly_sub_phot(img,head,err,ind)
elif self.param['bkgMethod'] == 'rowAmp':
srcPhot, bkgVals, bkgValsErr = self.rowamp_sub_phot(img,head,err,ind)
else:
raise Exception("Unrecognized background method {}".format(self.param['bkgMethod']))
else:
## No background subtraction
srcPhot = rawPhot['aperture_sum']
bkgVals = np.nan
bkgValsErr = 0.
srcPhotErr = np.sqrt(rawPhot['aperture_sum_err']**2 + bkgValsErr**2)
return [t0.jd,srcPhot,srcPhotErr, bkgVals]
def show_cutout(self,img,aps=None,name='',percentScaling=False,src=None,ind=None):
""" Plot the cutout around the source for diagnostic purposes"""
fig, ax = plt.subplots()
if percentScaling == True:
vmin,vmax = np.nanpercentile(img,[3,97])
else:
vmin,vmax = None, None
ax.imshow(img,vmin=vmin,vmax=vmax)
if aps is not None:
if photutils.__version__ >= "0.7":
aps.plot(axes=ax)
else:
aps.plot(ax=ax)
ax.set_title(name)
fig.show()
print('Press c and enter to continue')
print('or press q and enter to quit')
pdb.set_trace()
plt.close(fig)
primHDU = fits.PrimaryHDU(img)
primHDU.header['Name'] = name
name_for_file = name.replace(" ","_")
outName = "{}_{}_src_{}_ind_{}.fits".format(self.dataFileDescrip,name_for_file,src,ind)
outPath = os.path.join("diagnostics","phot_poly_backsub",outName)
primHDU.writeto(outPath,overwrite=True)
def poly_sub_phot(self,img,head,err,ind,showEach=False,saveFits=False):
"""
Do a polynomial background subtraction use robust polynomials
This is instead of using the mean or another statistic of the background aperture
"""
from . import spec_pipeline
bkg_masks = self.bkgApertures.to_mask(method='center')
spec = spec_pipeline.spec()
spec.param['bkgOrderX'] = self.param['bkgOrderX']
spec.param['bkgOrderY'] = self.param['bkgOrderY']
spec.fileL = self.fileL
srcPhot, bkgPhot = [], []
for ind,mask in enumerate(bkg_masks):
backImg = mask.multiply(img,fill_value=np.nan)
## fill value doesn't appear to work so I manually will make them NaN
nonBackPts = mask.data == 0
backImg[nonBackPts] = np.nan
img_cutout = mask.cutout(img,fill_value=0.0)
err_cutout = mask.cutout(err,fill_value=0.0)
srcApSub = deepcopy(self.srcApertures)
srcApSub.positions[:,0] = srcApSub.positions[:,0] - mask.bbox.ixmin
srcApSub.positions[:,1] = srcApSub.positions[:,1] - mask.bbox.iymin
spec.param['bkgRegionsX'] = [[0,backImg.shape[1]]]
spec.param['bkgRegionsY'] = [[0,backImg.shape[0]]]
if self.param['diagnosticMode'] == True:
self.show_cutout(img_cutout,aps=srcApSub,name='Img Cutout',src=ind,ind=ind)
self.show_cutout(backImg,aps=srcApSub,name='Background Cutout',
percentScaling=True,src=ind,ind=ind)
backImg_sub, bkgModelTotal, subHead = spec.do_backsub(backImg,head,ind=ind,
directions=self.param['backsub_directions'])
subImg = img_cutout - bkgModelTotal
srcPhot1 = aperture_photometry(subImg,srcApSub,error=err_cutout,
method=self.param['subpixelMethod'])
srcPhot.append(srcPhot1['aperture_sum'][ind])
bkgPhot1 = aperture_photometry(bkgModelTotal,srcApSub,error=err_cutout,
method=self.param['subpixelMethod'])
bkgPhot.append(bkgPhot1['aperture_sum'][ind])
if self.param['diagnosticMode'] == True:
self.show_cutout(subImg,aps=srcApSub,name='Backsub Img Cutout',
percentScaling=True,src=ind,ind=ind)
## use the error in the mean background as an estimate for error
bkgPhotTotal = aperture_photometry(img,self.bkgApertures,error=err,method=self.param['subpixelMethod'])
bkgValsErr = (bkgPhotTotal['aperture_sum_err'] / self.get_ap_area(self.bkgApertures)
* self.get_ap_area(self.srcApertures))
return np.array(srcPhot),np.array(bkgPhot),bkgValsErr
def rowamp_sub_phot(self,img,head,err,ind):
"""
Do a row-by-row, amplifier-by-amplifier background subtraction
This is instead of using the mean or another statistic of the background aperture
"""
saveD = self.param['diagnosticMode']
backsub_img, backg_img = rowamp_sub.do_backsub(img,self,
saveDiagnostics=saveD)
srcPhot_t = aperture_photometry(backsub_img,
self.srcApertures,error=err,
method=self.param['subpixelMethod'])
bkgPhot_t = aperture_photometry(backg_img,
self.srcApertures,error=err,
method=self.param['subpixelMethod'])
srcPhot = srcPhot_t['aperture_sum']
bkgPhot = bkgPhot_t['aperture_sum']
bkgValsErr = bkgPhot_t['aperture_sum_err']
return np.array(srcPhot),np.array(bkgPhot),np.array(bkgValsErr)
def return_self(self):
return self
def do_phot(self,useMultiprocessing=False):
""" Does photometry using the centroids found in get_allimg_cen
"""
self.get_allimg_cen(useMultiprocessing=useMultiprocessing)
photArr = np.zeros((self.nImg,self.nsrc))
errArr = np.zeros_like(photArr)
backArr = np.zeros_like(photArr)
jdArr = []
fileCountArray = np.arange(len(self.fileL))
if useMultiprocessing == True:
outputPhot = run_multiprocessing_phot(self,fileCountArray)
else:
outputPhot = []
for ind in tqdm.tqdm(fileCountArray):
outputPhot.append(self.phot_for_one_file(ind))
## unpack the results
for ind,val in enumerate(outputPhot):
jdArr.append(val[0])
photArr[ind,:] = val[1]
errArr[ind,:] = val[2]
backArr[ind,:] = val[3]
## Save the photometry results
hdu = fits.PrimaryHDU(photArr)
hdu.header['NSOURCE'] = (self.nsrc,'Number of sources with photometry')
hdu.header['NIMG'] = (self.nImg,'Number of images')
hdu.header['AXIS1'] = ('src','source axis')
hdu.header['AXIS2'] = ('image','image axis')
basicHeader = deepcopy(hdu.header)
# hdu.header[''] = ' Source parameters '
hdu.header['SRCNAME'] = (self.param['srcName'], 'Source name')
hdu.header['NIGHT'] = (self.param['nightName'], 'Night Name')
hdu.header['SRCGEOM'] = (self.param['srcGeometry'], 'Source Aperture Geometry')
hdu.header['BKGGEOM'] = (self.param['bkgGeometry'], 'Background Aperture Geometry')
if 'apRadius' in self.param:
hdu.header['APRADIUS'] = (self.param['apRadius'], 'Aperture radius (px)')
elif 'apHeight' in self.param:
hdu.header['APRADIUS'] = (self.param['apHeight'], 'Aperture radius (px)')
hdu.header['APHEIGHT'] = (self.param['apHeight'], 'Aperture radius (px)')
hdu.header['APWIDTH'] = (self.param['apWidth'], 'Aperture radius (px)')
else:
print("No apHeight or apRadius found in parameters")
hdu.header['SCALEDAP'] = (self.param['scaleAperture'], 'Is the aperture scaled by the FWHM?')
hdu.header['APSCALE'] = (self.param['apScale'], 'If scaling apertures, which scale factor?')
# hdu.header[''] = ' Background Subtraction parameters '
hdu.header['BKGSUB'] = (self.param['bkgSub'], 'Do a background subtraction?')
hdu.header['BKGSTART'] = (self.param['backStart'], 'Background Annulus start (px), if used')
hdu.header['BKGEND'] = (self.param['backEnd'], 'Background Annulus end (px), if used')
if 'backHeight' in self.param:
hdu.header['BKHEIGHT'] = (self.param['backHeight'], 'Background Box Height (px)')
hdu.header['BKWIDTH'] = (self.param['backWidth'], 'Background Box Width (px)')
hdu.header['BKOFFSTX'] = (self.param['backOffset'][0], 'X Offset between background and source (px)')
hdu.header['BKOFFSTY'] = (self.param['backOffset'][1], 'Y Offset between background and source (px)')
hdu.header['BKGMETH'] = (self.param['bkgMethod'], 'Background subtraction method')
if self.param['bkgMethod'] == 'colrow':
hdu.header['BKGDIREC'] = (" ".join(self.param['backsub_directions']), 'The directions, in order, for polynomial background sub')
hdu.header['BKGORDRX'] = (self.param['bkgOrderX'], 'X Background subtraction polynomial order')
hdu.header['BKGORDRY'] = (self.param['bkgOrderY'], 'Y Background subtraction polynomial order')
# hdu.header[''] = ' Centroiding Parameters '
hdu.header['BOXSZ'] = (self.param['boxFindSize'], 'half-width of the box used for centroiding')
hdu.header['COPYCENT'] = (self.param['copyCentroidFile'], 'Name of the file where centroids are copied (if used)')
# hdu.header[''] = ' Timing Parameters '
hdu.header['JDREF'] = (self.param['jdRef'], ' JD reference offset to subtract for plots')
# hdu.header[''] = ' Image Parameters '
hdu.header['ISCUBE'] = (self.param['isCube'], 'Is the image data 3D?')
hdu.header['CUBPLANE'] = (self.param['cubePlane'], 'Which plane of the cube is used?')
hdu.header['DOCEN'] = (self.param['doCentering'], 'Is each aperture centered individually?')
hdu.header['EXTNAMEU'] = (self.param['FITSextension'], 'FITS extension used of data')
hdu.header['NANTREAT'] = (self.param['nanTreatment'], 'How are NaN pixels treated?')
hdu.header['SLOPEIMG'] = (self.param['isSlope'], 'Are original images slopes, then multiplied by int time?')
hdu.header['SUBPIXEL'] = (self.param['subpixelMethod'], 'Treatment of apertures at the subpixel level')
hduFileNames = self.make_filename_hdu()
hduTime = fits.ImageHDU(jdArr)
hduTime.header['UNITS'] = ('days','JD time, UT')
hduErr = fits.ImageHDU(data=errArr,header=basicHeader)
hduErr.name = 'Phot Err'
hduBack = fits.ImageHDU(data=backArr,header=basicHeader)
hduBack.name = 'Backg Phot'
hduCen = fits.ImageHDU(data=self.cenArr,header=self.cenHead)
hdu.name = 'Photometry'
hduTime.name = 'Time'
hduCen.name = 'Centroids'
## hduFileName.name = 'Filenames' # already named by make_filename_hdu
## Get an example original header
exImg, exHeader = self.get_default_im()
hduOrigHeader = fits.ImageHDU(None,exHeader)
hduOrigHeader.name = 'Orig Header'
HDUList = fits.HDUList([hdu,hduErr,hduBack,hduTime,hduCen,hduFileNames,hduOrigHeader])
if self.keepFWHM == True:
hduFWHM = fits.ImageHDU(self.fwhmArr,header=self.headFWHM)
HDUList.append(hduFWHM)
HDUList.writeto(self.photFile,overwrite=True)
warnings.resetwarnings()
def plot_phot(self,offset=0.,refCorrect=False,ax=None,fig=None,showLegend=True,
normReg=None,doBin=None,doNorm=True,yLim=[None,None],
excludeSrc=None,errBar=None,showPlot=False):
""" Plots previously calculated photometry
Parameters
---------------------
offset : float
y displacement for overlaying time series
refCorrect : bool
Use reference star-corrected photometry?
ax : matplotlib axis object
If the axis was created separately, use the input axis object
fig : matplotlib figure object
If the figure was created separately, use the input axis object
showLegend : bool
Show a legend?
normReg: list with two items or None
Relative region over which to fit a baseline and re-normalize. This only works on reference-corrected photometry for now
doBin : float or None
The bin size if showing binned data. This only works on reference-corrected photometry for now
doNorm : bool
Normalize the individual time series?
yLim: List
List of Y limit to show
errBar : string or None
Describes how error bars will be displayed. None=none, 'all'=every point,'one'=representative
excludeSrc : List or None
Custom sources to exclude in the averaging (to exclude specific sources in the reference time series).
For example, for 5 sources, excludeSrc = [2] will use [1,3,4] for the reference
showPlot: bool
Show the plot? Otherwise, it saves it to a file
"""
HDUList = fits.open(self.photFile)
photHDU = HDUList['PHOTOMETRY']
photArr = photHDU.data
errArr = HDUList['PHOT ERR'].data
head = photHDU.header
jdHDU = HDUList['TIME']
jdArr = jdHDU.data
timeHead = jdHDU.header
jdRef = self.param['jdRef']
if ax == None:
fig, ax = plt.subplots()
if refCorrect == True:
yCorrected, yCorrected_err = self.refSeries(photArr,errArr,excludeSrc=excludeSrc)
x = jdArr - jdRef
if normReg == None:
yShow = yCorrected
else:
fitp = (x < normReg[0]) | (x > normReg[1])
polyBase = robust_poly(x[fitp],yCorrected[fitp],2,sigreject=2)
yBase = np.polyval(polyBase,x)
yShow = yCorrected / yBase
if errBar == 'all':
ax.errorbar(x,yShow,label='data',marker='o',linestyle='',markersize=3.,yerr=yCorrected_err)
else:
ax.plot(x,yShow,label='data',marker='o',linestyle='',markersize=3.)
madY = np.nanmedian(np.abs(yShow - np.nanmedian(yShow)))
if errBar == 'one':
ax.errorbar([np.median(x)],[np.median(yShow) - 4. * madY],
yerr=np.median(yCorrected_err),fmt='o',mfc='none')
#pdb.set_trace()
if doBin is not None:
minValue, maxValue = 0.95, 1.05 ## clip for cosmic rays
goodP = (yShow > minValue) & (yShow < maxValue)
nBin = int(np.round((np.max(x[goodP]) - np.min(x[goodP]))/doBin))
if nBin > 1:
yBins = Table()
for oneStatistic in ['mean','std','count']:
if oneStatistic == 'std':
statUse = np.std
else: statUse = oneStatistic
yBin, xEdges, binNum = binned_statistic(x[goodP],yShow[goodP],
statistic=statUse,bins=nBin)
yBins[oneStatistic] = yBin
## Standard error in the mean
stdErrM = yBins['std'] / np.sqrt(yBins['count'])
xbin = (xEdges[:-1] + xEdges[1:])/2.
ax.errorbar(xbin,yBins['mean'],yerr=stdErrM,marker='s',markersize=3.,
label='binned')
else:
for oneSrc in range(self.nsrc):
yFlux = photArr[:,oneSrc]
yNorm = yFlux / np.nanmedian(yFlux)
if oneSrc == 0:
pLabel = 'Src'
else:
pLabel = 'Ref '+str(oneSrc)
if doNorm == True:
yplot = yNorm - offset * oneSrc
else:
yplot = yFlux - offset * oneSrc
## To avoid repeat colors, switch to dashed lins
if oneSrc >= 10: linestyle='dashed'
else: linestyle= 'solid'
ax.plot(jdArr - jdRef,yplot,label=pLabel,linestyle=linestyle)
if head['SRCGEOM'] == 'Circular':
ax.set_title('Src Ap='+str(head['APRADIUS'])+',Back=['+str(head['BKGSTART'])+','+
str(head['BKGEND'])+']')
ax.set_xlabel('JD - '+str(jdRef))
ax.set_ylim(yLim[0],yLim[1])
if doNorm == True:
ax.set_ylabel('Normalized Flux + Offset')
else:
ax.set_ylabel('Flux + Offset')
#ax.set_ylim(0.94,1.06)
if showLegend == True:
ax.legend(loc='best',fontsize=10)
if showPlot == True:
fig.show()
else:
if refCorrect == True:
outName = 'tser_refcor/refcor_{}.pdf'.format(self.dataFileDescrip)
outPath = os.path.join(self.baseDir,'plots','photometry',outName)
else:
outName = 'raw_tser_{}.pdf'.format(self.dataFileDescrip)
outPath = os.path.join(self.baseDir,'plots','photometry','tser_allstar',outName)
fig.savefig(outPath)
plt.close(fig)
HDUList.close()
def print_phot_statistics(self,refCorrect=True,excludeSrc=None,shorten=False,
returnOnly=False,removeLinear=True,
startInd=0,endInd=15):
"""
Print the calculated and theoretical noise as a table
Parameters
----------
refCorrect: bool
Use reference stars to correct target?
If True, there is only one row in the table for the target.
If False, there is a row for each star's absolute noise
excludeSrc: list, or None
A list of sources (or None) to exclude as reference stars
Given by index number
shorten: bool
Shorten the number of points used the time series?
Useful if analyzing the baseline befor transit, for example.
returnOnly: bool
If True, a table is returned.
If False, a table is printed and another is returned
removeLinear: bool
Remove a linear trend from the data first?
startInd: int
If shorten is True, only uses a subset of the data starting with StartInd
endInd: int
If shorten is True, only uses a subset of the data ending with endInd
"""
HDUList = fits.open(self.photFile)
photHDU = HDUList['PHOTOMETRY']
photArr = photHDU.data
head = photHDU.header
timeArr = HDUList['TIME'].data
errArr = HDUList['PHOT ERR'].data
t = Table()
if (head['NSOURCE'] == 1) & (refCorrect == True):
warnings.warn('Only once source, so defaulting to refCorrect=False')
refCorrect = False
if shorten == True:
photArr = photArr[startInd:endInd,:]
nImg = endInd - startInd
else:
nImg = self.nImg
if refCorrect == True:
yCorrected, yCorrected_err = self.refSeries(photArr,errArr,
excludeSrc=excludeSrc)
if removeLinear == True:
xNorm = (timeArr - np.min(timeArr))/(np.max(timeArr) - np.min(timeArr))
poly_fit = robust_poly(xNorm,yCorrected,1)
yCorrected = yCorrected / np.polyval(poly_fit,xNorm)
if shorten == True:
yCorrected = yCorrected[startInd:endInd]
t['Stdev (%)'] = np.round([np.nanstd(yCorrected) * 100.],4)
t['Theo Err (%)'] = np.round(np.nanmedian(yCorrected_err) * 100.,4)
mad = np.nanmedian(np.abs(yCorrected - np.nanmedian(yCorrected)))
t['MAD (%)'] = np.round(mad * 100.,4)
else:
t['Source #'] = np.arange(self.nsrc)
medFlux = np.nanmedian(photArr,axis=0)
t['Stdev (%)'] = np.round(np.nanstd(photArr,axis=0) / medFlux * 100.,4)
t['Theo Err (%)'] = np.round(np.nanmedian(errArr,axis=0) / medFlux * 100.,4)
tiledFlux = np.tile(medFlux,[nImg,1])
mad = np.nanmedian(np.abs(photArr - tiledFlux),axis=0) / medFlux
t['MAD (%)'] = np.round(mad * 100.,4)
if returnOnly:
pass
else:
print(t)
HDUList.close()
return t
def plot_state_params(self,excludeSrc=None):
HDUList = fits.open(self.photFile)
photHDU = HDUList['PHOTOMETRY']
photArr = photHDU.data
head = photHDU.header
errArr = HDUList['PHOT ERR'].data
jdHDU = HDUList['TIME']
jdArr = jdHDU.data
t = jdArr - np.round(np.min(jdArr))
timeHead = jdHDU.header
cenData = HDUList['CENTROIDS'].data
fwhmData = HDUList['FWHM'].data
backData = HDUList['BACKG PHOT'].data
fig, axArr = plt.subplots(7,sharex=True)
yCorr, yCorr_err = self.refSeries(photArr,errArr,excludeSrc=excludeSrc)
axArr[0].plot(t,yCorr)
axArr[0].set_ylabel('Ref Cor F')
for oneSrc in range(self.nsrc):
yFlux = photArr[:,oneSrc]
axArr[1].plot(t,yFlux / np.median(yFlux))
axArr[1].set_ylabel('Flux')
xCen = cenData[:,oneSrc,0]
backFlux = backData[:,oneSrc]
axArr[2].plot(t,backFlux / np.median(backFlux))
axArr[2].set_ylabel('Back')
axArr[3].plot(t,xCen - np.median(xCen))
axArr[3].set_ylabel('X Pos')
yCen = cenData[:,oneSrc,1]
axArr[4].plot(t,yCen - np.median(yCen))
axArr[4].set_ylabel('Y Pos')
fwhm1 = fwhmData[:,oneSrc,0]
axArr[5].plot(t,np.abs(fwhm1))
axArr[5].set_ylabel('FWHM 1')
fwhm2 = fwhmData[:,oneSrc,1]
axArr[6].plot(t,np.abs(fwhm1))
axArr[6].set_ylabel('FWHM 2')
fig.show()
def plot_flux_vs_pos(self,refCorrect=True):
"""
Plot flux versus centroid to look for flat fielding effects
"""
HDUList = fits.open(self.photFile)
if refCorrect == True:
yNorm, yErrNorm = self.refSeries(HDUList['PHOTOMETRY'].data,HDUList['PHOT ERR'].data)
else:
yFlux = HDUList['PHOTOMETRY'].data[:,0]
yNorm = yFlux / np.median(yFlux)
cenX = HDUList['CENTROIDS'].data[:,0,0]
cenY = HDUList['CENTROIDS'].data[:,0,1]
fig, axArr = plt.subplots(1,2,sharey=True,figsize=(9,4.5))
for ind,oneDir, coord in zip([0,1],['X','Y'],[cenX,cenY]):
axArr[ind].plot(coord,yNorm,'o')
axArr[ind].set_xlabel('{} (px)'.format(oneDir))
axArr[ind].set_ylabel('Norm F')
#yPoly =
fig.show()
HDUList.close()
def refSeries(self,photArr,errPhot,reNorm=False,excludeSrc=None,sigRej=5.):
""" Average together the reference stars
Parameters
-------------
reNorm: bool
Re-normalize all stars before averaging? If set all stars have equal weight. Otherwise, the stars are summed together, which weights by flux
excludeSrc: arr
Custom sources to use in the averaging (to exclude specific sources in the reference time series. For example, for 5 sources, excludeSrc = [2] will use [1,3,4] for the reference
sigRej: int
Sigma rejection threshold
"""
combRef = []
srcArray = np.arange(self.nsrc,dtype=np.int)
if excludeSrc == None:
maskOut = (srcArray == 0)
else:
maskOut = np.zeros(self.nsrc,dtype=bool)
maskOut[0] = True
for oneSrc in excludeSrc:
if (oneSrc < 0) | (oneSrc >= self.nsrc):
pdb.set_trace()
raise Exception("{} is an invalid source among {}".format(oneSrc,self.nsrc))
else:
maskOut[oneSrc] = True
refMask2D = np.tile(maskOut,(self.nImg,1))
## also mask points that are NaN
nanPt = (np.isfinite(photArr) == False)
refMask2D = refMask2D | nanPt
refPhot = np.ma.array(photArr,mask=refMask2D)
## Normalize all time series
norm1D_divisor = np.nanmedian(photArr,axis=0)
norm2D_divisor = np.tile(norm1D_divisor,(self.nImg,1))
normPhot = refPhot / norm2D_divisor
normErr = errPhot / norm2D_divisor
## Find outliers
# Median time series
medTimSeries1D = np.nanmedian(normPhot,axis=1)
medTimSeries2D = np.tile(medTimSeries1D,(self.nsrc,1)).transpose()
# Absolute deviation
absDeviation = np.abs(normPhot - medTimSeries2D)
# Median abs deviation of all reference photometry
MADphot = np.nanmedian(absDeviation)
# Points that deviate above threshold
badP = (absDeviation > sigRej * np.ones((self.nImg,self.nsrc),dtype=np.float) * MADphot)
normPhot.mask = refMask2D | badP
refPhot.mask = refMask2D | badP
if reNorm == True:
## Weight all stars equally
combRef = np.nanmean(normPhot,axis=1)
combErr = np.sqrt(np.nansum(normErr**2,axis=1)) / (self.nsrc - np.sum(maskOut))
else:
## Weight by the flux, but only for the valid points left
weights = np.ma.array(norm2D_divisor,mask=normPhot.mask)
## Make sure weights sum to 1.0 for each time point (since some sources are missing)
weightSums1D = np.nansum(weights,axis=1)
weightSums2D = np.tile(weightSums1D,(self.nsrc,1)).transpose()
weights = weights / weightSums2D
combRef = | np.nansum(normPhot * weights,axis=1) | numpy.nansum |
# -*- coding: utf-8 -*-
#
# Base class for all computational classes in Syncopy
#
# Builtin/3rd party package imports
import os
import sys
import psutil
import h5py
import numpy as np
from itertools import chain
from abc import ABC, abstractmethod
from copy import copy
from numpy.lib.format import open_memmap
from tqdm.auto import tqdm
if sys.platform == "win32":
# tqdm breaks term colors on Windows - fix that (tqdm issue #446)
import colorama
colorama.deinit()
colorama.init(strip=False)
# Local imports
from .tools import get_defaults
from syncopy import __storage__, __acme__, __path__
from syncopy.shared.errors import SPYValueError, SPYTypeError, SPYParallelError, SPYWarning
if __acme__:
from acme import ParallelMap
import dask.distributed as dd
import dask_jobqueue as dj
# # In case of problems w/worker-stealing, uncomment the following lines
# import dask
# dask.config.set(distributed__scheduler__work_stealing=False)
__all__ = []
class ComputationalRoutine(ABC):
"""Abstract class for encapsulating sequential/parallel algorithms
A Syncopy compute class consists of a
:class:`ComputationalRoutine`-subclass that binds a static
:func:`computeFunction` and provides the class method
:meth:`process_metadata`.
Requirements for :meth:`computeFunction`:
* First positional argument is a :class:`numpy.ndarray`, the keywords
`chunkShape` and `noCompute` are supported
* Returns a :class:`numpy.ndarray` if `noCompute` is `False` and expected
shape and numerical type of output array otherwise.
Requirements for :class:`ComputationalRoutine`:
* Child of :class:`ComputationalRoutine`, binds :func:`computeFunction`
as static method
* Provides class method :func:`process_metadata`
For details on writing compute classes and metafunctions for Syncopy, please
refer to :doc:`/developer/compute_kernels`.
"""
# Placeholder: the actual workhorse
@staticmethod
def computeFunction(arr, *argv, chunkShape=None, noCompute=None, **kwargs):
"""Computational core routine
Parameters
----------
arr : :class:`numpy.ndarray`
Numerical data from a single trial
*argv : tuple
Arbitrary tuple of positional arguments
chunkShape : None or tuple
Mandatory keyword. If not `None`, represents global block-size of
processed trial.
noCompute : None or bool
Preprocessing flag. If `True`, do not perform actual calculation but
instead return expected shape and :class:`numpy.dtype` of output
array.
**kwargs: dict
Other keyword arguments.
Returns
-------
out Shape : tuple, if ``noCompute == True``
expected shape of output array
outDtype : :class:`numpy.dtype`, if ``noCompute == True``
expected numerical type of output array
res : :class:`numpy.ndarray`, if ``noCompute == False``
Result of processing input `arr`
Notes
-----
This concrete method is a placeholder that is intended to be
overloaded.
See also
--------
ComputationalRoutine : Developer documentation: :doc:`/developer/compute_kernels`.
"""
return None
def __init__(self, *argv, **kwargs):
"""
Instantiate a :class:`ComputationalRoutine` subclass
Parameters
----------
*argv : tuple
Tuple of positional arguments passed on to :meth:`computeFunction`
**kwargs : dict
Keyword arguments passed on to :meth:`computeFunction`
Returns
-------
obj : instance of :class:`ComputationalRoutine`-subclass
Usable class instance for processing Syncopy data objects.
"""
# list of positional arguments to `computeFunction` for all workers, format:
# ``self.argv = [3, [0, 1, 1], ('a', 'b', 'c')]``
self.argv = list(argv)
# dict of default keyword values accepted by `computeFunction`
self.defaultCfg = get_defaults(self.computeFunction)
# dict of actual keyword argument values to `computeFunction` provided by user
self.cfg = copy(self.defaultCfg)
for key in set(self.cfg.keys()).intersection(kwargs.keys()):
self.cfg[key] = kwargs[key]
# binary flag: if `True`, average across trials, do nothing otherwise
self.keeptrials = None
# full shape of final output dataset (all trials, all chunks, etc.)
self.outputShape = None
# numerical type of output dataset
self.dtype = None
# list of dicts encoding header info of raw binary input files (experimental!)
self.hdr = None
# list of trial numbers to process (either `data.trials` or `data.selection.trials`)
self.trialList = None
# number of trials to process (shortcut for `len(self.trialList)`)
self.numTrials = None
# number of channel blocks to process per Trial (1 by default, only > 1 if
# `chan_per_worker` is not `None`)
self.numBlocksPerTrial = None
# actual number of parallel calls to `computeFunction` to perform
# (if `chan_per_worker` is `None`, then `numCalls` = `numTrials`, otherwise
# `numCalls` = `numBlocksPerTrial * numTrials`)
self.numCalls = None
# list of index-tuples for extracting trial-chunks from input HDF5 dataset
# >>> MUST be ordered, no repetitions! <<<
# indices are ABSOLUTE, i.e., wrt entire dataset, not just current trial!
self.sourceLayout = None
# list of shape-tuples of input trial-chunks (necessary to restore shape of
# arrays that got inflated by scalar selection tuples))
self.sourceShapes = None
# list of index-tuples for re-ordering NumPy arrays extracted w/`self.sourceLayout`
# >>> can be unordered w/repetitions <<<
# indices are RELATIVE, i.e., wrt current trial!
self.sourceSelectors = None
# list of index-tuples for storing trial-chunk result in output dataset
# >>> MUST be ordered, no repetitions! <<<
# indices are ABSOLUTE, i.e., wrt entire dataset, not just current trial
self.targetLayout = None
# list of shape-tuples of trial-chunk results
self.targetShapes = None
# binary flag: if `True`, use fancy array indexing via `np.ix_` to extract
# data from input via `self.sourceLayout` + `self.sourceSelectors`; if `False`,
# only use `self.sourceLayout` (selections ordered, no reps)
self.useFancyIdx = None
# integer, max. memory footprint of largest input array piece (in bytes)
self.chunkMem = None
# instance of ACME's `ParallelMap` to handle actual parallel computing workload
self.pmap = None
# directory for storing source-HDF5 files making up virtual output dataset
self.virtualDatasetDir = None
# h5py layout encoding shape/geometry of file sources within virtual output dataset
self.VirtualDatasetLayout = None
# name of temporary datasets (only relevant for virtual output datasets)
self.virtualDatasetNames = "chk"
# placeholder name for (temporary) output datasets
self.tmpDsetName = None
# Name of output HDF5 file
self.outFileName = None
# name of target HDF5 dataset in output object
self.outDatasetName = "data"
# tmp holding var for preserving original access mode of `data`
self.dataMode = None
# time (in seconds) b/w querying state of futures ('pending' -> 'finished')
self.sleepTime = 0.1
# if `True`, enforces use of single-threaded scheduler in `compute_parallel`
self.parallelDebug = False
# format string for tqdm progress bars in sequential computation
self.tqdmFormat = "{desc}: {percentage:3.0f}% |{bar}| {n_fmt}/{total_fmt} [{elapsed}<{remaining}]"
# maximal acceptable size (in MB) of any provided positional argument
self._maxArgSize = 100
# counter and maximal recursion depth for calling `self._sizeof`
self._callMax = 10000
self._callCount = 0
def initialize(self, data, out_stackingdim, chan_per_worker=None, keeptrials=True):
"""
Perform dry-run of calculation to determine output shape
Parameters
----------
data : syncopy data object
Syncopy data object to be processed (has to be the same object
that is passed to :meth:`compute` for the actual calculation).
out_stackingdim : int
Index of data dimension for stacking trials in output object
chan_per_worker : None or int
Number of channels to be processed by each worker (only relevant in
case of concurrent processing). If `chan_per_worker` is `None` (default)
by-trial parallelism is used, i.e., each worker processes
data corresponding to a full trial. If `chan_per_worker > 0`, trials
are split into channel-groups of size `chan_per_worker` (+ rest if the
number of channels is not divisible by `chan_per_worker` without
remainder) and workers are assigned by-trial channel-groups for
processing.
keeptrials : bool
Flag indicating whether to return individual trials or average
Returns
-------
Nothing : None
Notes
-----
This class method **has** to be called prior to performing the actual
computation realized in :meth:`computeFunction`.
See also
--------
compute : core routine performing the actual computation
"""
# First store `keeptrial` keyword value (important for output shapes below)
self.keeptrials = keeptrials
# Determine if data-selection was provided; if so, extract trials and check
# whether selection requires fancy array indexing
if data.selection is not None:
self.trialList = data.selection.trials
self.useFancyIdx = data.selection._useFancy
else:
self.trialList = list(range(len(data.trials)))
self.useFancyIdx = False
self.numTrials = len(self.trialList)
# Prepare dryrun arguments and determine geometry of trials in output
dryRunKwargs = copy(self.cfg)
dryRunKwargs["noCompute"] = True
chk_list = []
dtp_list = []
trials = []
for tk, trialno in enumerate(self.trialList):
trial = data._preview_trial(trialno)
trlArg = tuple(arg[tk] if isinstance(arg, (list, tuple, np.ndarray)) and len(arg) == self.numTrials \
else arg for arg in self.argv)
chunkShape, dtype = self.computeFunction(trial,
*trlArg,
**dryRunKwargs)
chk_list.append(list(chunkShape))
dtp_list.append(dtype)
trials.append(trial)
# Determine trial stacking dimension and compute aggregate shape of output
stackingDim = out_stackingdim
totalSize = sum(cShape[stackingDim] for cShape in chk_list)
outputShape = list(chunkShape)
if stackingDim < 0 or stackingDim >= len(outputShape):
msg = "valid trial stacking dimension"
raise SPYTypeError(out_stackingdim, varname="out_stackingdim", expected=msg)
outputShape[stackingDim] = totalSize
# The aggregate shape is computed as max across all chunks
chk_arr = np.array(chk_list)
chunkShape = tuple(chk_arr.max(axis=0))
if np.unique(chk_arr[:, stackingDim]).size > 1 and not self.keeptrials:
err = "Averaging trials of unequal lengths in output currently not supported!"
raise NotImplementedError(err)
if np.any([dtp_list[0] != dtp for dtp in dtp_list]):
lgl = "unique output dtype"
act = "{} different output dtypes".format(np.unique(dtp_list).size)
raise SPYValueError(legal=lgl, varname="dtype", actual=act)
# Save determined shapes and data type
self.outputShape = tuple(outputShape)
self.cfg["chunkShape"] = chunkShape
self.dtype = | np.dtype(dtp_list[0]) | numpy.dtype |
import numpy
import six.moves
import cellprofiler_core.image
import cellprofiler_core.measurement
from cellprofiler_core.constants.measurement import COLTYPE_FLOAT
import cellprofiler.modules.measurecolocalization
import cellprofiler_core.object
import cellprofiler_core.pipeline
import cellprofiler_core.workspace
import tests.modules
IMAGE1_NAME = "image1"
IMAGE2_NAME = "image2"
OBJECTS_NAME = "objects"
def make_workspace(image1, image2, objects=None):
"""Make a workspace for testing Threshold"""
module = cellprofiler.modules.measurecolocalization.MeasureColocalization()
image_set_list = cellprofiler_core.image.ImageSetList()
image_set = image_set_list.get_image_set(0)
module.images_list.value = ", ".join((IMAGE1_NAME, IMAGE2_NAME))
for image_name, name, image in zip(
module.images_list.value, (IMAGE1_NAME, IMAGE2_NAME), (image1, image2)
):
image_set.add(name, image)
object_set = cellprofiler_core.object.ObjectSet()
if objects is None:
module.images_or_objects.value = (
cellprofiler.modules.measurecolocalization.M_IMAGES
)
else:
module.images_or_objects.value = (
cellprofiler.modules.measurecolocalization.M_IMAGES_AND_OBJECTS
)
module.objects_list.value = OBJECTS_NAME
object_set.add_objects(objects, OBJECTS_NAME)
pipeline = cellprofiler_core.pipeline.Pipeline()
workspace = cellprofiler_core.workspace.Workspace(
pipeline,
module,
image_set,
object_set,
cellprofiler_core.measurement.Measurements(),
image_set_list,
)
return workspace, module
def test_load_v2():
file = tests.modules.get_test_resources_directory("measurecolocalization/v2.pipeline")
with open(file, "r") as fd:
data = fd.read()
fd = six.moves.StringIO(data)
pipeline = cellprofiler_core.pipeline.Pipeline()
def callback(caller, event):
assert not isinstance(event, cellprofiler_core.pipeline.event.LoadException)
pipeline.add_listener(callback)
pipeline.load(fd)
assert len(pipeline.modules()) == 1
module = pipeline.modules()[-1]
assert (
module.images_or_objects.value
== cellprofiler.modules.measurecolocalization.M_IMAGES_AND_OBJECTS
)
assert len(module.images_list.value) == 2
assert module.thr == 15.0
for name in module.images_list.value:
assert name in ["DNA", "Cytoplasm"]
assert len(module.objects_list.value) == 2
for name in module.objects_list.value:
assert name in ["Nuclei", "Cells"]
def test_load_v3():
file = tests.modules.get_test_resources_directory("measurecolocalization/v3.pipeline")
with open(file, "r") as fd:
data = fd.read()
fd = six.moves.StringIO(data)
pipeline = cellprofiler_core.pipeline.Pipeline()
def callback(caller, event):
assert not isinstance(event, cellprofiler_core.pipeline.event.LoadException)
pipeline.add_listener(callback)
pipeline.load(fd)
assert len(pipeline.modules()) == 1
module = pipeline.modules()[-1]
assert (
module.images_or_objects.value
== cellprofiler.modules.measurecolocalization.M_IMAGES_AND_OBJECTS
)
assert len(module.images_list.value) == 2
assert module.thr == 25.0
for name in module.images_list.value:
assert name in ["DNA", "Cytoplasm"]
assert len(module.objects_list.value) == 2
for name in module.objects_list.value:
assert name in ["Nuclei", "Cells"]
all_object_measurement_formats = [
cellprofiler.modules.measurecolocalization.F_CORRELATION_FORMAT,
cellprofiler.modules.measurecolocalization.F_COSTES_FORMAT,
cellprofiler.modules.measurecolocalization.F_K_FORMAT,
cellprofiler.modules.measurecolocalization.F_MANDERS_FORMAT,
cellprofiler.modules.measurecolocalization.F_OVERLAP_FORMAT,
cellprofiler.modules.measurecolocalization.F_RWC_FORMAT,
]
all_image_measurement_formats = all_object_measurement_formats + [
cellprofiler.modules.measurecolocalization.F_SLOPE_FORMAT
]
asymmetrical_measurement_formats = [
cellprofiler.modules.measurecolocalization.F_COSTES_FORMAT,
cellprofiler.modules.measurecolocalization.F_K_FORMAT,
cellprofiler.modules.measurecolocalization.F_MANDERS_FORMAT,
cellprofiler.modules.measurecolocalization.F_RWC_FORMAT,
]
def test_get_categories():
"""Test the get_categories function for some different cases"""
module = cellprofiler.modules.measurecolocalization.MeasureColocalization()
module.images_list.value = ", ".join((IMAGE1_NAME, IMAGE2_NAME))
module.objects_list.value = OBJECTS_NAME
module.images_or_objects.value = cellprofiler.modules.measurecolocalization.M_IMAGES
def cat(name):
return module.get_categories(None, name) == ["Correlation"]
assert cat("Image")
assert not cat(OBJECTS_NAME)
module.images_or_objects.value = (
cellprofiler.modules.measurecolocalization.M_OBJECTS
)
assert not cat("Image")
assert cat(OBJECTS_NAME)
module.images_or_objects.value = (
cellprofiler.modules.measurecolocalization.M_IMAGES_AND_OBJECTS
)
assert cat("Image")
assert cat(OBJECTS_NAME)
def test_get_measurements():
"""Test the get_measurements function for some different cases"""
module = cellprofiler.modules.measurecolocalization.MeasureColocalization()
module.images_list.value = ", ".join((IMAGE1_NAME, IMAGE2_NAME))
module.objects_list.value = OBJECTS_NAME
module.images_or_objects.value = cellprofiler.modules.measurecolocalization.M_IMAGES
def meas(name):
ans = list(module.get_measurements(None, name, "Correlation"))
ans.sort()
if name == "Image":
mf = all_image_measurement_formats
else:
mf = all_object_measurement_formats
expected = sorted([_.split("_")[1] for _ in mf])
return ans == expected
assert meas("Image")
assert not meas(OBJECTS_NAME)
module.images_or_objects.value = (
cellprofiler.modules.measurecolocalization.M_OBJECTS
)
assert not meas("Image")
assert meas(OBJECTS_NAME)
module.images_or_objects.value = (
cellprofiler.modules.measurecolocalization.M_IMAGES_AND_OBJECTS
)
assert meas("Image")
assert meas(OBJECTS_NAME)
def test_get_measurement_images():
"""Test the get_measurment_images function for some different cases"""
for iocase, names in (
(
cellprofiler.modules.measurecolocalization.M_IMAGES,
["Image"],
),
(cellprofiler.modules.measurecolocalization.M_OBJECTS, [OBJECTS_NAME]),
(
cellprofiler.modules.measurecolocalization.M_IMAGES_AND_OBJECTS,
["Image", OBJECTS_NAME],
),
):
module = cellprofiler.modules.measurecolocalization.MeasureColocalization()
module.images_list.value = ", ".join((IMAGE1_NAME, IMAGE2_NAME))
module.objects_list.value = OBJECTS_NAME
module.images_or_objects.value = iocase
for name, mfs in (
("Image", all_image_measurement_formats),
(OBJECTS_NAME, all_object_measurement_formats),
):
if name not in names:
continue
for mf in mfs:
ftr = mf.split("_")[1]
ans = module.get_measurement_images(None, name, "Correlation", ftr)
expected = [
"%s_%s" % (i1, i2)
for i1, i2 in (
(IMAGE1_NAME, IMAGE2_NAME),
(IMAGE2_NAME, IMAGE1_NAME),
)
]
if mf in asymmetrical_measurement_formats:
assert all([e in ans for e in expected])
else:
assert any([e in ans for e in expected])
def test_01_get_measurement_columns_images():
module = cellprofiler.modules.measurecolocalization.MeasureColocalization()
module.images_list.value = ", ".join((IMAGE1_NAME, IMAGE2_NAME))
module.objects_list.value = OBJECTS_NAME
module.images_or_objects.value = cellprofiler.modules.measurecolocalization.M_IMAGES
columns = module.get_measurement_columns(None)
expected = [
(
"Image",
ftr % (IMAGE1_NAME, IMAGE2_NAME),
COLTYPE_FLOAT,
)
for ftr in all_image_measurement_formats
] + [
(
"Image",
ftr % (IMAGE2_NAME, IMAGE1_NAME),
COLTYPE_FLOAT,
)
for ftr in asymmetrical_measurement_formats
]
assert len(columns) == len(expected)
for column in columns:
assert any([all([cf == ef for cf, ef in zip(column, ex)]) for ex in expected])
def test_02_get_measurement_columns_objects():
module = cellprofiler.modules.measurecolocalization.MeasureColocalization()
module.images_list.value = ", ".join((IMAGE1_NAME, IMAGE2_NAME))
module.objects_list.value = OBJECTS_NAME
module.images_or_objects.value = (
cellprofiler.modules.measurecolocalization.M_OBJECTS
)
columns = module.get_measurement_columns(None)
expected = [
(
OBJECTS_NAME,
ftr % (IMAGE1_NAME, IMAGE2_NAME),
COLTYPE_FLOAT,
)
for ftr in all_object_measurement_formats
] + [
(
OBJECTS_NAME,
ftr % (IMAGE2_NAME, IMAGE1_NAME),
COLTYPE_FLOAT,
)
for ftr in asymmetrical_measurement_formats
]
assert len(columns) == len(expected)
for column in columns:
assert any([all([cf == ef for cf, ef in zip(column, ex)]) for ex in expected])
def test_03_get_measurement_columns_both():
module = cellprofiler.modules.measurecolocalization.MeasureColocalization()
module.images_list.value = ", ".join((IMAGE1_NAME, IMAGE2_NAME))
module.objects_list.value = OBJECTS_NAME
module.images_or_objects.value = (
cellprofiler.modules.measurecolocalization.M_IMAGES_AND_OBJECTS
)
columns = module.get_measurement_columns(None)
expected = (
[
(
"Image",
ftr % (IMAGE1_NAME, IMAGE2_NAME),
COLTYPE_FLOAT,
)
for ftr in all_image_measurement_formats
]
+ [
(
"Image",
ftr % (IMAGE2_NAME, IMAGE1_NAME),
COLTYPE_FLOAT,
)
for ftr in asymmetrical_measurement_formats
]
+ [
(
OBJECTS_NAME,
ftr % (IMAGE1_NAME, IMAGE2_NAME),
COLTYPE_FLOAT,
)
for ftr in all_object_measurement_formats
]
+ [
(
OBJECTS_NAME,
ftr % (IMAGE2_NAME, IMAGE1_NAME),
COLTYPE_FLOAT,
)
for ftr in asymmetrical_measurement_formats
]
)
assert len(columns) == len(expected)
for column in columns:
assert any([all([cf == ef for cf, ef in zip(column, ex)]) for ex in expected])
def test_correlated():
numpy.random.seed(0)
image = numpy.random.uniform(size=(10, 10))
i1 = cellprofiler_core.image.Image(image)
i2 = cellprofiler_core.image.Image(image)
workspace, module = make_workspace(i1, i2)
module.run(workspace)
m = workspace.measurements
mi = module.get_measurement_images(
None, "Image", "Correlation", "Correlation"
)
corr = m.get_current_measurement(
"Image", "Correlation_Correlation_%s" % mi[0]
)
assert round(abs(corr - 1), 7) == 0
assert len(m.get_object_names()) == 1
assert m.get_object_names()[0] == "Image"
columns = module.get_measurement_columns(None)
features = m.get_feature_names("Image")
assert len(columns) == len(features)
for column in columns:
assert column[1] in features
def test_anticorrelated():
"""Test two anticorrelated images"""
#
# Make a checkerboard pattern and reverse it for one image
#
i, j = numpy.mgrid[0:10, 0:10]
image1 = ((i + j) % 2).astype(float)
image2 = 1 - image1
i1 = cellprofiler_core.image.Image(image1)
i2 = cellprofiler_core.image.Image(image2)
workspace, module = make_workspace(i1, i2)
module.run(workspace)
m = workspace.measurements
mi = module.get_measurement_images(
None, "Image", "Correlation", "Correlation"
)
corr = m.get_current_measurement(
"Image", "Correlation_Correlation_%s" % mi[0]
)
assert round(abs(corr - -1), 7) == 0
def test_slope():
"""Test the slope measurement"""
numpy.random.seed(0)
image1 = numpy.random.uniform(size=(10, 10)).astype(numpy.float32)
image2 = image1 * 0.5
i1 = cellprofiler_core.image.Image(image1)
i2 = cellprofiler_core.image.Image(image2)
workspace, module = make_workspace(i1, i2)
module.run(workspace)
m = workspace.measurements
mi = module.get_measurement_images(
None, "Image", "Correlation", "Slope"
)
slope = m.get_current_measurement(
"Image", "Correlation_Slope_%s" % mi[0]
)
if mi[0] == "%s_%s" % (IMAGE1_NAME, IMAGE2_NAME):
assert round(abs(slope - 0.5), 5) == 0
else:
assert round(abs(slope - 2), 7) == 0
def test_crop():
"""Test similarly cropping one image to another"""
numpy.random.seed(0)
image1 = numpy.random.uniform(size=(20, 20))
i1 = cellprofiler_core.image.Image(image1)
crop_mask = numpy.zeros((20, 20), bool)
crop_mask[5:16, 5:16] = True
i2 = cellprofiler_core.image.Image(image1[5:16, 5:16], crop_mask=crop_mask)
workspace, module = make_workspace(i1, i2)
module.run(workspace)
m = workspace.measurements
mi = module.get_measurement_images(
None, "Image", "Correlation", "Correlation"
)
corr = m.get_current_measurement(
"Image", "Correlation_Correlation_%s" % mi[0]
)
assert round(abs(corr - 1), 7) == 0
def test_mask():
"""Test images with two different masks"""
numpy.random.seed(0)
image1 = numpy.random.uniform(size=(20, 20))
mask1 = numpy.ones((20, 20), bool)
mask1[5:8, 8:12] = False
mask2 = numpy.ones((20, 20), bool)
mask2[14:18, 2:5] = False
mask = mask1 & mask2
image2 = image1.copy()
#
# Try to confound the module by making masked points anti-correlated
#
image2[~mask] = 1 - image1[~mask]
i1 = cellprofiler_core.image.Image(image1, mask=mask1)
i2 = cellprofiler_core.image.Image(image2, mask=mask2)
workspace, module = make_workspace(i1, i2)
module.run(workspace)
m = workspace.measurements
mi = module.get_measurement_images(
None, "Image", "Correlation", "Correlation"
)
corr = m.get_current_measurement(
"Image", "Correlation_Correlation_%s" % mi[0]
)
assert round(abs(corr - 1), 7) == 0
def test_objects():
"""Test images with two objects"""
labels = numpy.zeros((10, 10), int)
labels[:4, :4] = 1
labels[6:, 6:] = 2
i, j = numpy.mgrid[0:10, 0:10]
image1 = ((i + j) % 2).astype(float)
image2 = image1.copy()
#
# Anti-correlate the second object
#
image2[labels == 2] = 1 - image1[labels == 2]
i1 = cellprofiler_core.image.Image(image1)
i2 = cellprofiler_core.image.Image(image2)
o = cellprofiler_core.object.Objects()
o.segmented = labels
workspace, module = make_workspace(i1, i2, o)
module.run(workspace)
m = workspace.measurements
mi = module.get_measurement_images(None, OBJECTS_NAME, "Correlation", "Correlation")
corr = m.get_current_measurement(OBJECTS_NAME, "Correlation_Correlation_%s" % mi[0])
assert len(corr) == 2
assert round(abs(corr[0] - 1), 7) == 0
assert round(abs(corr[1] - -1), 7) == 0
assert len(m.get_object_names()) == 2
assert OBJECTS_NAME in m.get_object_names()
columns = module.get_measurement_columns(None)
image_features = m.get_feature_names("Image")
object_features = m.get_feature_names(OBJECTS_NAME)
assert len(columns) == len(image_features) + len(object_features)
for column in columns:
if column[0] == "Image":
assert column[1] in image_features
else:
assert column[0] == OBJECTS_NAME
assert column[1] in object_features
def test_cropped_objects():
"""Test images and objects with a cropping mask"""
numpy.random.seed(0)
image1 = numpy.random.uniform(size=(20, 20))
i1 = cellprofiler_core.image.Image(image1)
crop_mask = numpy.zeros((20, 20), bool)
crop_mask[5:15, 5:15] = True
i2 = cellprofiler_core.image.Image(image1[5:15, 5:15], crop_mask=crop_mask)
labels = numpy.zeros((10, 10), int)
labels[:4, :4] = 1
labels[6:, 6:] = 2
o = cellprofiler_core.object.Objects()
o.segmented = labels
#
# Make the objects have the cropped image as a parent
#
o.parent_image = i2
workspace, module = make_workspace(i1, i2, o)
module.run(workspace)
m = workspace.measurements
mi = module.get_measurement_images(None, OBJECTS_NAME, "Correlation", "Correlation")
corr = m.get_current_measurement(OBJECTS_NAME, "Correlation_Correlation_%s" % mi[0])
assert round(abs(corr[0] - 1), 7) == 0
assert round(abs(corr[1] - 1), 7) == 0
def test_no_objects():
"""Test images with no objects"""
labels = numpy.zeros((10, 10), int)
i, j = numpy.mgrid[0:10, 0:10]
image1 = ((i + j) % 2).astype(float)
image2 = image1.copy()
i1 = cellprofiler_core.image.Image(image1)
i2 = cellprofiler_core.image.Image(image2)
o = cellprofiler_core.object.Objects()
o.segmented = labels
workspace, module = make_workspace(i1, i2, o)
module.run(workspace)
m = workspace.measurements
mi = module.get_measurement_images(None, OBJECTS_NAME, "Correlation", "Correlation")
corr = m.get_current_measurement(OBJECTS_NAME, "Correlation_Correlation_%s" % mi[0])
assert len(corr) == 0
assert len(m.get_object_names()) == 2
assert OBJECTS_NAME in m.get_object_names()
columns = module.get_measurement_columns(None)
image_features = m.get_feature_names("Image")
object_features = m.get_feature_names(OBJECTS_NAME)
assert len(columns) == len(image_features) + len(object_features)
for column in columns:
if column[0] == "Image":
assert column[1] in image_features
else:
assert column[0] == OBJECTS_NAME
assert column[1] in object_features
def test_wrong_size():
"""Regression test of IMG-961 - objects and images of different sizes"""
numpy.random.seed(0)
image1 = numpy.random.uniform(size=(20, 20))
i1 = cellprofiler_core.image.Image(image1)
labels = numpy.zeros((10, 30), int)
labels[:4, :4] = 1
labels[6:, 6:] = 2
o = cellprofiler_core.object.Objects()
o.segmented = labels
workspace, module = make_workspace(i1, i1, o)
module.run(workspace)
m = workspace.measurements
mi = module.get_measurement_images(None, OBJECTS_NAME, "Correlation", "Correlation")
corr = m.get_current_measurement(OBJECTS_NAME, "Correlation_Correlation_%s" % mi[0])
assert round(abs(corr[0] - 1), 7) == 0
assert round(abs(corr[1] - 1), 7) == 0
def test_last_object_masked():
# Regression test of issue #1553
# MeasureColocalization was truncating the measurements
# if the last had no pixels or all pixels masked.
#
r = numpy.random.RandomState()
r.seed(65)
image1 = r.uniform(size=(20, 20))
image2 = r.uniform(size=(20, 20))
labels = numpy.zeros((20, 20), int)
labels[3:8, 3:8] = 1
labels[13:18, 13:18] = 2
mask = labels != 2
objects = cellprofiler_core.object.Objects()
objects.segmented = labels
for mask1, mask2 in ((mask, None), (None, mask), (mask, mask)):
workspace, module = make_workspace(
cellprofiler_core.image.Image(image1, mask=mask1),
cellprofiler_core.image.Image(image2, mask=mask2),
objects,
)
module.run(workspace)
m = workspace.measurements
feature = cellprofiler.modules.measurecolocalization.F_CORRELATION_FORMAT % (
IMAGE1_NAME,
IMAGE2_NAME,
)
values = m[OBJECTS_NAME, feature]
assert len(values) == 2
assert numpy.isnan(values[1])
def test_zero_valued_intensity():
# https://github.com/CellProfiler/CellProfiler/issues/2680
image1 = numpy.zeros((10, 10), dtype=numpy.float32)
image2 = | numpy.random.rand(10, 10) | numpy.random.rand |
import numpy as np
import random
from random import randint, choice
from gutfit import model, parameterlist
def matrix_diag3(d1,d2,d3):
return np.array([[d1, 0.0, 0.0], [0.0, d2, 0.0], [0.0, 0.0, d3]])
# Generic Rotations #
def matrix_rot23(th23):
return np.array([[1.0, 0.0 , 0.0],
[0.0, | np.cos(th23) | numpy.cos |
"""
Class contains a pure-python/Numpy implementation for the layer of a feedforward
network that learns using Hebbian/Anti-Hebbian (HAH) weight updates and a
layer for Q-AQREL reinforcement learning
"""
import numpy as np
import random
from hebbnets import base_layer
from hebbnets import utils
class FeedforwardLayer(base_layer.BaseLayer):
"""Layer of neurons that use simple feedforward weights
"""
def __init__(self, num_nodes, prev_layer, **kwargs):
"""Initialize layer
Args:
num_nodes: Number of nodes in this layer
prev_layer: The previous layer of the network
If this is an input layer, set to with integer for input
size
act_type: String naming the type of activation function
to use
has_bias: Bool indicating whether layer has bias
Returns:
None
"""
super().__init__(num_nodes, prev_layer, **kwargs)
self.input_weights = self.glorot_init(
self.layer_input_size + self.params['bias'],
self.num_nodes
)
def _rescale_weights(self):
np.clip(
self.input_weights,
-self.MAX_WEIGHT, self.MAX_WEIGHT,
out=self.input_weights
)
def update_activation(self, input_value=None):
"""Update activation in forward pass for Q-AGREL layer
Args:
input_value: set to list/numpy array being fed as input into this
layer, otherwise input will be inferred from a previous layer
Returns:
None. Updates self.activation in place
"""
input_value = self._get_input_values(input_value)
input_value_times_weights = np.matmul(self.input_weights.T, input_value)
self.activation = self.activation_type.apply(input_value_times_weights)
class QagrelLayer(FeedforwardLayer):
"""Layer of neurons that use simple feedforward weights
and Q-AGREL weight updating
"""
def update_weights(self, gate_value, rew_prederr, learning_rate, layer_input_val=None):
"""Update weights using Q-AGREL rules
Args:
gate_value: vector with size matching number of units in this layer
indicating gating strength
rew_prederr: scalar prediction error
learning_rate: scalar learning rate
layer_input_val: input value for this layer (or none to use prev lyaer activation)
Returns:
None, updates weight in place
"""
target_vec = self.activation_type.apply_deriv(self.activation) * gate_value.reshape(-1, 1)
outer_prod = np.matmul(
self._get_input_values(layer_input_val),
target_vec.T
)
self.input_weights += learning_rate * rew_prederr * outer_prod
self._rescale_weights()
class HahLayer(base_layer.BaseLayer):
"""Layer of neurons that are activated and updated using a
Hebbian/AntHebbian (HAH) pattern
"""
MAX_ACTIVATION_TIME_STEPS = 200
ACTIVATION_ERROR_TOL = 1.0e-2
CUMSCORE_LR = 0.01
def __init__(self, num_nodes, prev_layer, **kwargs):
"""Initialize layer
Args:
num_nodes: Number of nodes in this layer
prev_layer: The previous layer of the network
If this is an input layer, set to with integer for input
size
act_type: String naming the type of activation function
to use
has_bias: Bool indicating whether layer has bias
noise_var: variance for guassian noise applied to all activations
Returns:
None
"""
super().__init__(num_nodes, prev_layer, **kwargs)
self.input_weights = self.glorot_init(
self.layer_input_size + self.params['bias'],
self.num_nodes
)
self.lateral_weights = self.glorot_init(
self.num_nodes,
self.num_nodes,
)
np.fill_diagonal(self.lateral_weights, 0.0)
self._cum_sqr_activation = np.tile(1000.0, (num_nodes, 1))
def update_activation(self, input_value=None):
"""Update activation value for Hebb/Antihebb layer
Args:
input_value: set to list/numpy array being fed as input into this
layer, otherwise input will be inferred from a previous layer
Returns:
None. Upates self.activation in place
"""
input_value = self._get_input_values(input_value)
self.activation = np.zeros((self.num_nodes, 1), dtype='float64')
input_value_times_weights = np.matmul(self.input_weights.T, input_value)
if self.params['act_type'] == 'linear':
_lateral_inv = np.linalg.pinv(self.lateral_weights.T + np.eye(self.num_nodes))
self.activation = np.matmul(_lateral_inv, input_value_times_weights)
else:
for _ in range(self.MAX_ACTIVATION_TIME_STEPS):
next_activation = self.activation_type.apply(
input_value_times_weights - | np.matmul(self.lateral_weights.T, self.activation) | numpy.matmul |
# Copyright 2018 The TensorFlow Probability Authors.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
"""Tests for ReplicaExchangeMC."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# Dependency imports
import numpy as np
import tensorflow.compat.v1 as tf1
import tensorflow.compat.v2 as tf
import tensorflow_probability as tfp
from tensorflow_probability.python import distributions as tfd
from tensorflow_probability.python.internal import test_case
from tensorflow.python.framework import test_util # pylint: disable=g-direct-tensorflow-import
def _set_seed(seed):
"""Helper which uses graph seed if using TFE."""
# TODO(b/68017812): Deprecate once TFE supports seed.
if tf.executing_eagerly():
return None
return seed
@test_util.run_all_in_graph_and_eager_modes
class DefaultExchangeProposedFnTest(test_case.TestCase):
def setUp(self):
tf1.set_random_seed(123)
def generate_exchanges(self, exchange_proposed_fn, num_replica, seed):
def _scan_fn(*_):
exchange = exchange_proposed_fn(num_replica, seed)
flat_replicas = tf.reshape(exchange, [-1])
with tf.control_dependencies([
tf1.assert_equal(
tf.size(input=flat_replicas),
tf.size(input=tf.unique(flat_replicas)[0])),
tf1.assert_greater_equal(flat_replicas, 0),
tf1.assert_less(flat_replicas, num_replica),
]):
return tf.shape(input=exchange)[0]
return self.evaluate(
tf.scan(_scan_fn, tf.range(1000), initializer=0, parallel_iterations=1))
def testProbExchange0p5NumReplica2(self):
prob_exchange = 0.5
num_replica = 2
fn = tfp.mcmc.default_exchange_proposed_fn(prob_exchange)
exchanges_lens = self.generate_exchanges(
fn, num_replica=num_replica, seed=_set_seed(123))
# All exchanges_lens, if proposed, will be 1.
self.assertAllClose(
prob_exchange, np.mean([e == 1 for e in exchanges_lens]), atol=0.05)
self.assertAllClose(
1 - prob_exchange, np.mean([e == 0 for e in exchanges_lens]), atol=0.05)
def testProbExchange0p5NumReplica4(self):
prob_exchange = 0.5
num_replica = 4
fn = tfp.mcmc.default_exchange_proposed_fn(prob_exchange)
exchanges_lens = self.generate_exchanges(
fn, num_replica=num_replica, seed=_set_seed(312))
# No exchanges_lens 1 - prob_exchange of the time.
self.assertAllClose(
1 - prob_exchange, np.mean([e == 0 for e in exchanges_lens]), atol=0.05)
# All exchanges_lens, if proposed, will be 0 or 1.
self.assertAllClose(
prob_exchange / 2, np.mean([e == 1 for e in exchanges_lens]), atol=0.05)
self.assertAllClose(
prob_exchange / 2, np.mean([e == 2 for e in exchanges_lens]), atol=0.05)
def testProbExchange0p5NumReplica3(self):
prob_exchange = 0.5
num_replica = 3
fn = tfp.mcmc.default_exchange_proposed_fn(prob_exchange)
exchanges_lens = self.generate_exchanges(
fn, num_replica=num_replica, seed=_set_seed(42))
# All exchanges_lens, if proposed, will be 1.
self.assertAllClose(
prob_exchange, np.mean([e == 1 for e in exchanges_lens]), atol=0.05)
self.assertAllClose(
1 - prob_exchange, np.mean([e == 0 for e in exchanges_lens]), atol=0.05)
def testProbExchange0p5NumReplica5(self):
prob_exchange = 0.5
num_replica = 5
fn = tfp.mcmc.default_exchange_proposed_fn(prob_exchange)
exchanges_lens = self.generate_exchanges(
fn, num_replica=num_replica, seed=_set_seed(1))
# All exchanges_lens, if proposed, will be 2.
self.assertAllClose(
prob_exchange, np.mean([e == 2 for e in exchanges_lens]), atol=0.05)
self.assertAllClose(
1 - prob_exchange, np.mean([e == 0 for e in exchanges_lens]), atol=0.05)
def testProbExchange1p0(self):
prob_exchange = 1.0
num_replica = 15
fn = tfp.mcmc.default_exchange_proposed_fn(prob_exchange)
exchanges_lens = self.generate_exchanges(
fn, num_replica=num_replica, seed=_set_seed(667))
# All exchanges_lens, if proposed, will be 7. And prob_exchange is 1.
self.assertAllClose(
prob_exchange, np.mean([e == 7 for e in exchanges_lens]), atol=0.05)
self.assertAllClose(
1 - prob_exchange, np.mean([e == 0 for e in exchanges_lens]), atol=0.05)
def testProbExchange0p0(self):
prob_exchange = 0.0
num_replica = 15
fn = tfp.mcmc.default_exchange_proposed_fn(prob_exchange)
exchanges_lens = self.generate_exchanges(
fn, num_replica=num_replica, seed=_set_seed(665))
# All exchanges_lens, if proposed, will be 7. And prob_exchange is 0.
self.assertAllClose(
prob_exchange, np.mean([e == 7 for e in exchanges_lens]), atol=0.05)
self.assertAllClose(
1 - prob_exchange, np.mean([e == 0 for e in exchanges_lens]), atol=0.05)
@test_util.run_all_in_graph_and_eager_modes
class REMCTest(test_case.TestCase):
def setUp(self):
tf1.set_random_seed(123)
def _getNormalREMCSamples(self,
inverse_temperatures,
num_results=1000,
step_size=1.,
dtype=np.float32):
"""Sampling from standard normal with REMC."""
target = tfd.Normal(dtype(0.), dtype(1.))
def make_kernel_fn(target_log_prob_fn, seed):
return tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=target_log_prob_fn,
seed=seed,
step_size=step_size,
num_leapfrog_steps=3)
remc = tfp.mcmc.ReplicaExchangeMC(
target_log_prob_fn=tf.function(target.log_prob, autograph=False),
inverse_temperatures=inverse_temperatures,
make_kernel_fn=make_kernel_fn,
seed=_set_seed(1))
samples = tfp.mcmc.sample_chain(
num_results=num_results,
current_state=target.sample(seed=_set_seed(1)),
kernel=remc,
num_burnin_steps=50,
trace_fn=None,
parallel_iterations=1) # For determinism.
self.assertAllEqual((num_results,), samples.shape)
return self.evaluate(samples)
def testNormalOddNumReplicas(self):
"""Sampling from the Standard Normal Distribution."""
samps_ = self._getNormalREMCSamples(
inverse_temperatures=[1., 0.3, 0.1, 0.03, 0.01])
self.assertAllClose(samps_.mean(), 0., atol=0.1, rtol=0.1)
self.assertAllClose(samps_.std(), 1., atol=0.1, rtol=0.1)
def testNormalEvenNumReplicas(self):
"""Sampling from the Standard Normal Distribution."""
samps_ = self._getNormalREMCSamples(
inverse_temperatures=[1., 0.9, 0.8, 0.7],)
self.assertAllClose(samps_.mean(), 0., atol=0.1, rtol=0.1)
self.assertAllClose(samps_.std(), 1., atol=0.1, rtol=0.1)
def testNormalOddNumReplicasLowTolerance(self):
"""Sampling from the Standard Normal Distribution."""
samps_ = self._getNormalREMCSamples(
inverse_temperatures=[1., 0.3, 0.1, 0.03, 0.01], num_results=500)
self.assertAllClose(samps_.mean(), 0., atol=0.3, rtol=0.1)
self.assertAllClose(samps_.std(), 1., atol=0.3, rtol=0.1)
def testNormalEvenNumReplicasLowTolerance(self):
"""Sampling from the Standard Normal Distribution."""
samps_ = self._getNormalREMCSamples(
inverse_temperatures=[1., 0.9, 0.8, 0.7], num_results=500)
self.assertAllClose(samps_.mean(), 0., atol=0.3, rtol=0.1)
self.assertAllClose(samps_.std(), 1., atol=0.3, rtol=0.1)
def testNormalHighTemperatureOnlyHasLargerStddev(self):
"""Sampling from the Standard Normal Distribution."""
samps_ = self._getNormalREMCSamples(
inverse_temperatures=[0.2], step_size=3.)
self.assertAllClose(samps_.mean(), 0., atol=0.2, rtol=0.1)
self.assertGreater(samps_.std(), 2.)
def testNormalLowTemperatureOnlyHasSmallerStddev(self):
"""Sampling from the Standard Normal Distribution."""
samps_ = self._getNormalREMCSamples(
inverse_temperatures=[6.0], step_size=0.5)
self.assertAllClose(samps_.mean(), 0., atol=0.2, rtol=0.1)
self.assertLess(samps_.std(), 0.6)
def testRWM2DMixNormal(self):
"""Sampling from a 2-D Mixture Normal Distribution."""
dtype = np.float32
# By symmetry, target has mean [0, 0]
# Therefore, Var = E[X^2] = E[E[X^2 | c]], where c is the component.
# Now..., for the first component,
# E[X1^2] = Var[X1] + Mean[X1]^2
# = 0.3^2 + 1^2,
# and similarly for the second. As a result,
# Var[mixture] = 1.09.
target = tfd.MixtureSameFamily(
mixture_distribution=tfd.Categorical(probs=[0.5, 0.5]),
components_distribution=tfd.MultivariateNormalDiag(
loc=[[-1., -1], [1., 1.]],
scale_identity_multiplier=[0.3, 0.3]))
inverse_temperatures = 10.**tf.linspace(0., -2., 4)
step_sizes = tf.constant([0.3, 0.6, 1.2, 2.4])
def make_kernel_fn(target_log_prob_fn, seed):
kernel = tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=target_log_prob_fn,
seed=seed,
step_size=step_sizes[make_kernel_fn.idx],
num_leapfrog_steps=2)
make_kernel_fn.idx += 1
return kernel
# TODO(b/124770732): Remove this hack.
make_kernel_fn.idx = 0
remc = tfp.mcmc.ReplicaExchangeMC(
target_log_prob_fn=tf.function(target.log_prob, autograph=False),
# Verified that test fails if inverse_temperatures = [1.]
inverse_temperatures=inverse_temperatures,
make_kernel_fn=make_kernel_fn,
seed=_set_seed(888))
def _trace_log_accept_ratio(state, results):
del state
return [r.log_accept_ratio for r in results.sampled_replica_results]
num_results = 1000
samples, log_accept_ratios = tfp.mcmc.sample_chain(
num_results=num_results,
# Start at one of the modes, in order to make mode jumping necessary
# if we want to pass test.
current_state=np.ones(2, dtype=dtype),
kernel=remc,
num_burnin_steps=500,
trace_fn=_trace_log_accept_ratio,
parallel_iterations=1) # For determinism.
self.assertAllEqual((num_results, 2), samples.shape)
log_accept_ratios = [
tf.reduce_mean(input_tensor=tf.exp(tf.minimum(0., lar)))
for lar in log_accept_ratios
]
sample_mean = tf.reduce_mean(input_tensor=samples, axis=0)
sample_std = tf.sqrt(
tf.reduce_mean(
input_tensor=tf.math.squared_difference(samples, sample_mean),
axis=0))
[sample_mean_, sample_std_, log_accept_ratios_] = self.evaluate(
[sample_mean, sample_std, log_accept_ratios])
tf1.logging.vlog(1, 'log_accept_ratios: %s eager: %s',
log_accept_ratios_, tf.executing_eagerly())
self.assertAllClose(sample_mean_, [0., 0.], atol=0.3, rtol=0.3)
self.assertAllClose(
sample_std_, [np.sqrt(1.09), np.sqrt(1.09)], atol=0.1, rtol=0.1)
def testMultipleCorrelatedStatesWithNoBatchDims(self):
dtype = np.float32
true_mean = dtype([0, 0])
true_cov = dtype([[1, 0.5], [0.5, 1]])
# Use LinearOperatorLowerTriangular to get broadcasting ability.
linop = tf.linalg.LinearOperatorLowerTriangular(
tf.linalg.cholesky(true_cov))
num_results = 1000
def target_log_prob(x, y):
# Corresponds to unnormalized MVN.
# z = matmul(inv(chol(true_cov)), [x, y] - true_mean)
xy = tf.stack([x, y], axis=-1) - true_mean
z = linop.solvevec(xy)
return -0.5 * tf.reduce_sum(input_tensor=z**2., axis=-1)
def make_kernel_fn(target_log_prob_fn, seed):
return tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=target_log_prob_fn,
seed=seed,
step_size=[0.5, 0.5],
num_leapfrog_steps=5)
remc = tfp.mcmc.ReplicaExchangeMC(
target_log_prob_fn=tf.function(target_log_prob, autograph=False),
inverse_temperatures=[1., 0.9, 0.8],
make_kernel_fn=make_kernel_fn,
seed=_set_seed(3))
states = tfp.mcmc.sample_chain(
num_results=num_results,
# batch_shape = [] for each initial state
current_state=[1., 1.],
kernel=remc,
num_burnin_steps=100,
trace_fn=None,
parallel_iterations=1) # For determinism.
self.assertAllEqual((num_results,), states[0].shape)
self.assertAllEqual((num_results,), states[1].shape)
states = tf.stack(states, axis=-1)
self.assertEqual(num_results, tf.compat.dimension_value(states.shape[0]))
sample_mean = tf.reduce_mean(input_tensor=states, axis=0)
x = states - sample_mean
sample_cov = tf.matmul(x, x, transpose_a=True) / dtype(num_results)
sample_mean_, sample_cov_ = self.evaluate([sample_mean, sample_cov])
self.assertAllClose(true_mean, sample_mean_, atol=0.06, rtol=0.)
self.assertAllClose(true_cov, sample_cov_, atol=0., rtol=0.2)
def testNormalWithTwoBatchDimsAndThreeReplicas(self):
"""Sampling from the Standard Normal Distribution."""
# Small scale and well-separated modes mean we need replica exchange to
# work or else tests fail.
loc = np.array(
[
# Use 3-D normals, ensuring batch and event sizes don't broadcast.
[-1., -1., -1.], # loc of first batch member
[1., 1., 1.] # loc of second batch member
],
dtype=np.float32)
scale_identity_multiplier = [0.5, 0.8]
target = tfd.MultivariateNormalDiag(
loc=loc, scale_identity_multiplier=scale_identity_multiplier)
def make_kernel_fn(target_log_prob_fn, seed):
return tfp.mcmc.HamiltonianMonteCarlo(
target_log_prob_fn=target_log_prob_fn,
seed=seed,
step_size=0.15,
num_leapfrog_steps=5)
remc = tfp.mcmc.ReplicaExchangeMC(
target_log_prob_fn=tf.function(
lambda x: target.copy().log_prob(x), autograph=False),
inverse_temperatures=[1., 0.9, 0.8],
make_kernel_fn=make_kernel_fn,
seed=_set_seed(700))
num_results = 500
states = tfp.mcmc.sample_chain(
num_results=num_results,
current_state=loc[::-1], # Batch members start at wrong mode!
kernel=remc,
num_burnin_steps=50,
trace_fn=None,
parallel_iterations=1) # For determinism.
self.assertAllEqual((num_results, 2, 3), states.shape)
states_ = self.evaluate(states)
self.assertAllClose(loc, states_.mean(axis=0), rtol=0.2)
self.assertAllClose(
[[0.5**2, 0., 0.], [0., 0.5**2, 0.], [0., 0., 0.5**2]],
np.cov(states_[:, 0, :], rowvar=False),
atol=0.2)
self.assertAllClose(
[[0.8**2, 0., 0.], [0., 0.8**2, 0.], [0., 0., 0.8**2]],
| np.cov(states_[:, 1, :], rowvar=False) | numpy.cov |
###############################################################################
# Potential.py: top-level class for a full potential
#
# Evaluate by calling the instance: Pot(R,z,phi)
#
# API for Potentials:
# function _evaluate(self,R,z,phi) returns Phi(R,z,phi)
# for orbit integration you need
# function _Rforce(self,R,z,phi) return -d Phi d R
# function _zforce(self,R,z,phi) return - d Phi d Z
# density
# function _dens(self,R,z,phi) return BOVY??
# for epicycle frequency
# function _R2deriv(self,R,z,phi) return d2 Phi dR2
###############################################################################
from __future__ import division, print_function
import os, os.path
import pickle
from functools import wraps
import warnings
import numpy
from scipy import optimize, integrate
from ..util import plot, coords, conversion
from ..util.conversion import velocity_in_kpcGyr, \
physical_conversion, potential_physical_input, freq_in_Gyr, \
get_physical
from ..util import galpyWarning
from .plotRotcurve import plotRotcurve, vcirc
from .plotEscapecurve import _INF, plotEscapecurve
from .DissipativeForce import DissipativeForce, _isDissipative
from .Force import Force, _APY_LOADED
if _APY_LOADED:
from astropy import units
def check_potential_inputs_not_arrays(func):
"""
NAME:
check_potential_inputs_not_arrays
PURPOSE:
Decorator to check inputs and throw TypeError if any of the inputs are arrays for Potentials that do not support array evaluation
HISTORY:
2017-summer - Written for SpiralArmsPotential - <NAME> (UBC)
2019-05-23 - Moved to Potential for more general use - Bovy (UofT)
"""
@wraps(func)
def func_wrapper(self,R,z,phi,t):
if (hasattr(R,'shape') and R.shape != () and len(R) > 1) \
or (hasattr(z,'shape') and z.shape != () and len(z) > 1) \
or (hasattr(phi,'shape') and phi.shape != () and len(phi) > 1) \
or (hasattr(t,'shape') and t.shape != () and len(t) > 1):
raise TypeError('Methods in {} do not accept array inputs. Please input scalars'.format(self.__class__.__name__))
return func(self,R,z,phi,t)
return func_wrapper
class Potential(Force):
"""Top-level class for a potential"""
def __init__(self,amp=1.,ro=None,vo=None,amp_units=None):
"""
NAME:
__init__
PURPOSE:
INPUT:
amp - amplitude to be applied when evaluating the potential and its forces
amp_units - ('mass', 'velocity2', 'density') type of units that amp should have if it has units
OUTPUT:
HISTORY:
"""
Force.__init__(self,amp=amp,ro=ro,vo=vo,amp_units=amp_units)
self.dim= 3
self.isRZ= True
self.isNonAxi= False
self.hasC= False
self.hasC_dxdv= False
self.hasC_dens= False
return None
@potential_physical_input
@physical_conversion('energy',pop=True)
def __call__(self,R,z,phi=0.,t=0.,dR=0,dphi=0):
"""
NAME:
__call__
PURPOSE:
evaluate the potential at (R,z,phi,t)
INPUT:
R - Cylindrical Galactocentric radius (can be Quantity)
z - vertical height (can be Quantity)
phi - azimuth (optional; can be Quantity)
t - time (optional; can be Quantity)
OUTPUT:
Phi(R,z,t)
HISTORY:
2010-04-16 - Written - Bovy (NYU)
"""
return self._call_nodecorator(R,z,phi=phi,t=t,dR=dR,dphi=dphi)
def _call_nodecorator(self,R,z,phi=0.,t=0.,dR=0.,dphi=0):
if dR == 0 and dphi == 0:
try:
rawOut= self._evaluate(R,z,phi=phi,t=t)
except AttributeError: #pragma: no cover
raise PotentialError("'_evaluate' function not implemented for this potential")
if rawOut is None: return rawOut
else: return self._amp*rawOut
elif dR == 1 and dphi == 0:
return -self.Rforce(R,z,phi=phi,t=t,use_physical=False)
elif dR == 0 and dphi == 1:
return -self.phiforce(R,z,phi=phi,t=t,use_physical=False)
elif dR == 2 and dphi == 0:
return self.R2deriv(R,z,phi=phi,t=t,use_physical=False)
elif dR == 0 and dphi == 2:
return self.phi2deriv(R,z,phi=phi,t=t,use_physical=False)
elif dR == 1 and dphi == 1:
return self.Rphideriv(R,z,phi=phi,t=t,use_physical=False)
elif dR != 0 or dphi != 0:
raise NotImplementedError('Higher-order derivatives not implemented for this potential')
@potential_physical_input
@physical_conversion('force',pop=True)
def Rforce(self,R,z,phi=0.,t=0.):
"""
NAME:
Rforce
PURPOSE:
evaluate cylindrical radial force F_R (R,z)
INPUT:
R - Cylindrical Galactocentric radius (can be Quantity)
z - vertical height (can be Quantity)
phi - azimuth (optional; can be Quantity)
t - time (optional; can be Quantity)
OUTPUT:
F_R (R,z,phi,t)
HISTORY:
2010-04-16 - Written - Bovy (NYU)
"""
return self._Rforce_nodecorator(R,z,phi=phi,t=t)
def _Rforce_nodecorator(self,R,z,phi=0.,t=0.):
# Separate, so it can be used during orbit integration
try:
return self._amp*self._Rforce(R,z,phi=phi,t=t)
except AttributeError: #pragma: no cover
raise PotentialError("'_Rforce' function not implemented for this potential")
@potential_physical_input
@physical_conversion('force',pop=True)
def zforce(self,R,z,phi=0.,t=0.):
"""
NAME:
zforce
PURPOSE:
evaluate the vertical force F_z (R,z,t)
INPUT:
R - Cylindrical Galactocentric radius (can be Quantity)
z - vertical height (can be Quantity)
phi - azimuth (optional; can be Quantity)
t - time (optional; can be Quantity)
OUTPUT:
F_z (R,z,phi,t)
HISTORY:
2010-04-16 - Written - Bovy (NYU)
"""
return self._zforce_nodecorator(R,z,phi=phi,t=t)
def _zforce_nodecorator(self,R,z,phi=0.,t=0.):
# Separate, so it can be used during orbit integration
try:
return self._amp*self._zforce(R,z,phi=phi,t=t)
except AttributeError: #pragma: no cover
raise PotentialError("'_zforce' function not implemented for this potential")
@potential_physical_input
@physical_conversion('forcederivative',pop=True)
def r2deriv(self,R,z,phi=0.,t=0.):
"""
NAME:
r2deriv
PURPOSE:
evaluate the second spherical radial derivative
INPUT:
R - Cylindrical Galactocentric radius (can be Quantity)
z - vertical height (can be Quantity)
phi - azimuth (optional; can be Quantity)
t - time (optional; can be Quantity)
OUTPUT:
d2phi/dr2
HISTORY:
2018-03-21 - Written - Webb (UofT)
"""
r= numpy.sqrt(R**2.+z**2.)
return (self.R2deriv(R,z,phi=phi,t=t,use_physical=False)*R/r\
+self.Rzderiv(R,z,phi=phi,t=t,use_physical=False)*z/r)*R/r\
+(self.Rzderiv(R,z,phi=phi,t=t,use_physical=False)*R/r\
+self.z2deriv(R,z,phi=phi,t=t,use_physical=False)*z/r)*z/r
@potential_physical_input
@physical_conversion('density',pop=True)
def dens(self,R,z,phi=0.,t=0.,forcepoisson=False):
"""
NAME:
dens
PURPOSE:
evaluate the density rho(R,z,t)
INPUT:
R - Cylindrical Galactocentric radius (can be Quantity)
z - vertical height (can be Quantity)
phi - azimuth (optional; can be Quantity)
t - time (optional; can be Quantity)
KEYWORDS:
forcepoisson= if True, calculate the density through the Poisson equation, even if an explicit expression for the density exists
OUTPUT:
rho (R,z,phi,t)
HISTORY:
2010-08-08 - Written - Bovy (NYU)
"""
try:
if forcepoisson: raise AttributeError #Hack!
return self._amp*self._dens(R,z,phi=phi,t=t)
except AttributeError:
#Use the Poisson equation to get the density
return (-self.Rforce(R,z,phi=phi,t=t,use_physical=False)/R
+self.R2deriv(R,z,phi=phi,t=t,use_physical=False)
+self.phi2deriv(R,z,phi=phi,t=t,use_physical=False)/R**2.
+self.z2deriv(R,z,phi=phi,t=t,use_physical=False))/4./numpy.pi
@potential_physical_input
@physical_conversion('surfacedensity',pop=True)
def surfdens(self,R,z,phi=0.,t=0.,forcepoisson=False):
"""
NAME:
surfdens
PURPOSE:
evaluate the surface density :math:`\\Sigma(R,z,\\phi,t) = \\int_{-z}^{+z} dz' \\rho(R,z',\\phi,t)`
INPUT:
R - Cylindrical Galactocentric radius (can be Quantity)
z - vertical height (can be Quantity)
phi - azimuth (optional; can be Quantity)
t - time (optional; can be Quantity)
KEYWORDS:
forcepoisson= if True, calculate the surface density through the Poisson equation, even if an explicit expression for the surface density exists
OUTPUT:
Sigma (R,z,phi,t)
HISTORY:
2018-08-19 - Written - Bovy (UofT)
"""
try:
if forcepoisson: raise AttributeError #Hack!
return self._amp*self._surfdens(R,z,phi=phi,t=t)
except AttributeError:
#Use the Poisson equation to get the surface density
return (-self.zforce(R,numpy.fabs(z),phi=phi,t=t,use_physical=False)
+integrate.quad(\
lambda x: -self.Rforce(R,x,phi=phi,t=t,use_physical=False)/R
+self.R2deriv(R,x,phi=phi,t=t,use_physical=False)
+self.phi2deriv(R,x,phi=phi,t=t,use_physical=False)/R**2.,
0.,numpy.fabs(z))[0])/2./numpy.pi
def _surfdens(self,R,z,phi=0.,t=0.):
"""
NAME:
_surfdens
PURPOSE:
evaluate the surface density for this potential
INPUT:
R - Galactocentric cylindrical radius
z - vertical height
phi - azimuth
t - time
OUTPUT:
the surface density
HISTORY:
2018-08-19 - Written - Bovy (UofT)
"""
return 2.*integrate.quad(lambda x: self._dens(R,x,phi=phi,t=t),0,z)[0]
@potential_physical_input
@physical_conversion('mass',pop=True)
def mass(self,R,z=None,t=0.,forceint=False):
"""
NAME:
mass
PURPOSE:
evaluate the mass enclosed
INPUT:
R - Cylindrical Galactocentric radius (can be Quantity)
z= (None) vertical height (can be Quantity)
t - time (optional; can be Quantity)
KEYWORDS:
forceint= if True, calculate the mass through integration of the density, even if an explicit expression for the mass exists
OUTPUT:
1) for spherical potentials: M(<R) [or if z is None], when the mass is implemented explicitly, the mass enclosed within r = sqrt(R^2+z^2) is returned when not z is None; forceint will integrate between -z and z, so the two are inconsistent (If you care to have this changed, raise an issue on github)
2) for axisymmetric potentials: M(<R,<fabs(Z))
HISTORY:
2014-01-29 - Written - Bovy (IAS)
2019-08-15 - Added spherical warning - Bovy (UofT)
"""
if self.isNonAxi:
raise NotImplementedError('mass for non-axisymmetric potentials is not currently supported')
try:
if forceint: raise AttributeError #Hack!
return self._amp*self._mass(R,z=z,t=t)
except AttributeError:
#Use numerical integration to get the mass
if z is None:
warnings.warn("Vertical height z not specified for mass "
"calculation...assuming spherical potential"
" (for the mass of axisymmetric potentials"
", specify z)",galpyWarning)
return 4.*numpy.pi\
*integrate.quad(lambda x: x**2.\
*self.dens(x,0.,t=t,
use_physical=False),
0.,R)[0]
else:
return 4.*numpy.pi\
*integrate.dblquad(lambda y,x: x\
*self.dens(x,y,t=t,use_physical=False),
0.,R,lambda x: 0., lambda x: z)[0]
@physical_conversion('mass',pop=False)
def mvir(self,H=70.,Om=0.3,t=0.,overdens=200.,wrtcrit=False,
forceint=False,ro=None,vo=None,
use_physical=False): # use_physical necessary bc of pop=False, does nothing inside
"""
NAME:
mvir
PURPOSE:
calculate the virial mass
INPUT:
H= (default: 70) Hubble constant in km/s/Mpc
Om= (default: 0.3) Omega matter
overdens= (200) overdensity which defines the virial radius
wrtcrit= (False) if True, the overdensity is wrt the critical density rather than the mean matter density
ro= distance scale in kpc or as Quantity (default: object-wide, which if not set is 8 kpc))
vo= velocity scale in km/s or as Quantity (default: object-wide, which if not set is 220 km/s))
KEYWORDS:
forceint= if True, calculate the mass through integration of the density, even if an explicit expression for the mass exists
OUTPUT:
M(<rvir)
HISTORY:
2014-09-12 - Written - Bovy (IAS)
"""
if ro is None: ro= self._ro
if vo is None: vo= self._vo
#Evaluate the virial radius
try:
rvir= self.rvir(H=H,Om=Om,t=t,overdens=overdens,wrtcrit=wrtcrit,
use_physical=False,ro=ro,vo=vo)
except AttributeError:
raise AttributeError("This potential does not have a '_scale' defined to base the concentration on or does not support calculating the virial radius")
return self.mass(rvir,t=t,forceint=forceint,use_physical=False,ro=ro,vo=vo)
@potential_physical_input
@physical_conversion('forcederivative',pop=True)
def R2deriv(self,R,Z,phi=0.,t=0.):
"""
NAME:
R2deriv
PURPOSE:
evaluate the second radial derivative
INPUT:
R - Galactocentric radius (can be Quantity)
Z - vertical height (can be Quantity)
phi - Galactocentric azimuth (can be Quantity)
t - time (can be Quantity)
OUTPUT:
d2phi/dR2
HISTORY:
2011-10-09 - Written - Bovy (IAS)
"""
try:
return self._amp*self._R2deriv(R,Z,phi=phi,t=t)
except AttributeError: #pragma: no cover
raise PotentialError("'_R2deriv' function not implemented for this potential")
@potential_physical_input
@physical_conversion('forcederivative',pop=True)
def z2deriv(self,R,Z,phi=0.,t=0.):
"""
NAME:
z2deriv
PURPOSE:
evaluate the second vertical derivative
INPUT:
R - Galactocentric radius (can be Quantity)
Z - vertical height (can be Quantity)
phi - Galactocentric azimuth (can be Quantity)
t - time (can be Quantity)
OUTPUT:
d2phi/dz2
HISTORY:
2012-07-25 - Written - Bovy (IAS@MPIA)
"""
try:
return self._amp*self._z2deriv(R,Z,phi=phi,t=t)
except AttributeError: #pragma: no cover
raise PotentialError("'_z2deriv' function not implemented for this potential")
@potential_physical_input
@physical_conversion('forcederivative',pop=True)
def Rzderiv(self,R,Z,phi=0.,t=0.):
"""
NAME:
Rzderiv
PURPOSE:
evaluate the mixed R,z derivative
INPUT:
R - Galactocentric radius (can be Quantity)
Z - vertical height (can be Quantity)
phi - Galactocentric azimuth (can be Quantity)
t - time (can be Quantity)
OUTPUT:
d2phi/dz/dR
HISTORY:
2013-08-26 - Written - Bovy (IAS)
"""
try:
return self._amp*self._Rzderiv(R,Z,phi=phi,t=t)
except AttributeError: #pragma: no cover
raise PotentialError("'_Rzderiv' function not implemented for this potential")
def normalize(self,norm):
"""
NAME:
normalize
PURPOSE:
normalize a potential in such a way that vc(R=1,z=0)=1., or a
fraction of this
INPUT:
norm - normalize such that Rforce(R=1,z=0) is such that it is 'norm' of the force necessary to make vc(R=1,z=0)=1 (if True, norm=1)
OUTPUT:
(none)
HISTORY:
2010-07-10 - Written - Bovy (NYU)
"""
self._amp*= norm/numpy.fabs(self.Rforce(1.,0.,use_physical=False))
@potential_physical_input
@physical_conversion('energy',pop=True)
def phiforce(self,R,z,phi=0.,t=0.):
"""
NAME:
phiforce
PURPOSE:
evaluate the azimuthal force F_phi = -d Phi / d phi (R,z,phi,t) [note that this is a torque, not a force!)
INPUT:
R - Cylindrical Galactocentric radius (can be Quantity)
z - vertical height (can be Quantity)
phi - azimuth (rad; can be Quantity)
t - time (optional; can be Quantity)
OUTPUT:
F_phi (R,z,phi,t)
HISTORY:
2010-07-10 - Written - Bovy (NYU)
"""
return self._phiforce_nodecorator(R,z,phi=phi,t=t)
def _phiforce_nodecorator(self,R,z,phi=0.,t=0.):
# Separate, so it can be used during orbit integration
try:
return self._amp*self._phiforce(R,z,phi=phi,t=t)
except AttributeError: #pragma: no cover
if self.isNonAxi:
raise PotentialError("'_phiforce' function not implemented for this non-axisymmetric potential")
return 0.
@potential_physical_input
@physical_conversion('forcederivative',pop=True)
def phi2deriv(self,R,Z,phi=0.,t=0.):
"""
NAME:
phi2deriv
PURPOSE:
evaluate the second azimuthal derivative
INPUT:
R - Galactocentric radius (can be Quantity)
Z - vertical height (can be Quantity)
phi - Galactocentric azimuth (can be Quantity)
t - time (can be Quantity)
OUTPUT:
d2Phi/dphi2
HISTORY:
2013-09-24 - Written - Bovy (IAS)
"""
try:
return self._amp*self._phi2deriv(R,Z,phi=phi,t=t)
except AttributeError: #pragma: no cover
if self.isNonAxi:
raise PotentialError("'_phi2deriv' function not implemented for this non-axisymmetric potential")
return 0.
@potential_physical_input
@physical_conversion('forcederivative',pop=True)
def Rphideriv(self,R,Z,phi=0.,t=0.):
"""
NAME:
Rphideriv
PURPOSE:
evaluate the mixed radial, azimuthal derivative
INPUT:
R - Galactocentric radius (can be Quantity)
Z - vertical height (can be Quantity)
phi - Galactocentric azimuth (can be Quantity)
t - time (can be Quantity)
OUTPUT:
d2Phi/dphidR
HISTORY:
2014-06-30 - Written - Bovy (IAS)
"""
try:
return self._amp*self._Rphideriv(R,Z,phi=phi,t=t)
except AttributeError: #pragma: no cover
if self.isNonAxi:
raise PotentialError("'_Rphideriv' function not implemented for this non-axisymmetric potential")
return 0.
def toPlanar(self):
"""
NAME:
toPlanar
PURPOSE:
convert a 3D potential into a planar potential in the mid-plane
INPUT:
(none)
OUTPUT:
planarPotential
HISTORY:
unknown
"""
from ..potential import toPlanarPotential
return toPlanarPotential(self)
def toVertical(self,R,phi=None,t0=0.):
"""
NAME:
toVertical
PURPOSE:
convert a 3D potential into a linear (vertical) potential at R
INPUT:
R - Galactocentric radius at which to create the vertical potential (can be Quantity)
phi= (None) Galactocentric azimuth at which to create the vertical potential (can be Quantity); required for non-axisymmetric potential
t0= (0.) time at which to create the vertical potential (can be Quantity)
OUTPUT:
linear (vertical) potential: Phi(z,phi,t) = Phi(R,z,phi,t)-Phi(R,0.,phi0,t0) where phi0 and t0 are the phi and t inputs
HISTORY
unknown
"""
from ..potential import toVerticalPotential
return toVerticalPotential(self,R,phi=phi,t0=t0)
def plot(self,t=0.,rmin=0.,rmax=1.5,nrs=21,zmin=-0.5,zmax=0.5,nzs=21,
effective=False,Lz=None,phi=None,xy=False,
xrange=None,yrange=None,
justcontours=False,levels=None,cntrcolors=None,
ncontours=21,savefilename=None):
"""
NAME:
plot
PURPOSE:
plot the potential
INPUT:
t= time to plot potential at
rmin= minimum R (can be Quantity) [xmin if xy]
rmax= maximum R (can be Quantity) [ymax if xy]
nrs= grid in R
zmin= minimum z (can be Quantity) [ymin if xy]
zmax= maximum z (can be Quantity) [ymax if xy]
nzs= grid in z
phi= (None) azimuth to use for non-axisymmetric potentials
xy= (False) if True, plot the potential in X-Y
effective= (False) if True, plot the effective potential Phi + Lz^2/2/R^2
Lz= (None) angular momentum to use for the effective potential when effective=True
justcontours= (False) if True, just plot contours
savefilename - save to or restore from this savefile (pickle)
xrange, yrange= can be specified independently from rmin,zmin, etc.
levels= (None) contours to plot
ncontours - number of contours when levels is None
cntrcolors= (None) colors of the contours (single color or array with length ncontours)
OUTPUT:
plot to output device
HISTORY:
2010-07-09 - Written - Bovy (NYU)
2014-04-08 - Added effective= - Bovy (IAS)
"""
rmin= conversion.parse_length(rmin,ro=self._ro)
rmax= conversion.parse_length(rmax,ro=self._ro)
zmin= conversion.parse_length(zmin,ro=self._ro)
zmax= conversion.parse_length(zmax,ro=self._ro)
if xrange is None: xrange= [rmin,rmax]
if yrange is None: yrange= [zmin,zmax]
if not savefilename is None and os.path.exists(savefilename):
print("Restoring savefile "+savefilename+" ...")
savefile= open(savefilename,'rb')
potRz= pickle.load(savefile)
Rs= pickle.load(savefile)
zs= pickle.load(savefile)
savefile.close()
else:
if effective and Lz is None:
raise RuntimeError("When effective=True, you need to specify Lz=")
Rs= numpy.linspace(xrange[0],xrange[1],nrs)
zs= numpy.linspace(yrange[0],yrange[1],nzs)
potRz= numpy.zeros((nrs,nzs))
for ii in range(nrs):
for jj in range(nzs):
if xy:
R,phi,z= coords.rect_to_cyl(Rs[ii],zs[jj],0.)
else:
R,z= Rs[ii], zs[jj]
potRz[ii,jj]= evaluatePotentials(self,
R,z,t=t,phi=phi,
use_physical=False)
if effective:
potRz[ii,:]+= 0.5*Lz**2/Rs[ii]**2.
#Don't plot outside of the desired range
potRz[Rs < rmin,:]= numpy.nan
potRz[Rs > rmax,:]= numpy.nan
potRz[:,zs < zmin]= numpy.nan
potRz[:,zs > zmax]= numpy.nan
if not savefilename == None:
print("Writing savefile "+savefilename+" ...")
savefile= open(savefilename,'wb')
pickle.dump(potRz,savefile)
pickle.dump(Rs,savefile)
pickle.dump(zs,savefile)
savefile.close()
if xy:
xlabel= r'$x/R_0$'
ylabel= r'$y/R_0$'
else:
xlabel=r"$R/R_0$"
ylabel=r"$z/R_0$"
if levels is None:
levels= numpy.linspace(numpy.nanmin(potRz),numpy.nanmax(potRz),ncontours)
if cntrcolors is None:
cntrcolors= 'k'
return plot.dens2d(potRz.T,origin='lower',cmap='gist_gray',contours=True,
xlabel=xlabel,ylabel=ylabel,
xrange=xrange,
yrange=yrange,
aspect=.75*(rmax-rmin)/(zmax-zmin),
cntrls='-',
justcontours=justcontours,
levels=levels,cntrcolors=cntrcolors)
def plotDensity(self,t=0.,
rmin=0.,rmax=1.5,nrs=21,zmin=-0.5,zmax=0.5,nzs=21,
phi=None,xy=False,
ncontours=21,savefilename=None,aspect=None,log=False,
justcontours=False):
"""
NAME:
plotDensity
PURPOSE:
plot the density of this potential
INPUT:
t= time to plot potential at
rmin= minimum R (can be Quantity) [xmin if xy]
rmax= maximum R (can be Quantity) [ymax if xy]
nrs= grid in R
zmin= minimum z (can be Quantity) [ymin if xy]
zmax= maximum z (can be Quantity) [ymax if xy]
nzs= grid in z
phi= (None) azimuth to use for non-axisymmetric potentials
xy= (False) if True, plot the density in X-Y
ncontours= number of contours
justcontours= (False) if True, just plot contours
savefilename= save to or restore from this savefile (pickle)
log= if True, plot the log density
OUTPUT:
plot to output device
HISTORY:
2014-01-05 - Written - Bovy (IAS)
"""
return plotDensities(self,rmin=rmin,rmax=rmax,nrs=nrs,
zmin=zmin,zmax=zmax,nzs=nzs,phi=phi,xy=xy,t=t,
ncontours=ncontours,savefilename=savefilename,
justcontours=justcontours,
aspect=aspect,log=log)
def plotSurfaceDensity(self,t=0.,z=numpy.inf,
xmin=0.,xmax=1.5,nxs=21,ymin=-0.5,ymax=0.5,nys=21,
ncontours=21,savefilename=None,aspect=None,
log=False,justcontours=False):
"""
NAME:
plotSurfaceDensity
PURPOSE:
plot the surface density of this potential
INPUT:
t= time to plot potential at
z= (inf) height between which to integrate the density (from -z to z; can be a Quantity)
xmin= minimum x (can be Quantity)
xmax= maximum x (can be Quantity)
nxs= grid in x
ymin= minimum y (can be Quantity)
ymax= maximum y (can be Quantity)
nys= grid in y
ncontours= number of contours
justcontours= (False) if True, just plot contours
savefilename= save to or restore from this savefile (pickle)
log= if True, plot the log density
OUTPUT:
plot to output device
HISTORY:
2020-08-19 - Written - Bovy (UofT)
"""
return plotSurfaceDensities(self,xmin=xmin,xmax=xmax,nxs=nxs,
ymin=ymin,ymax=ymax,nys=nys,t=t,z=z,
ncontours=ncontours,
savefilename=savefilename,
justcontours=justcontours,
aspect=aspect,log=log)
@potential_physical_input
@physical_conversion('velocity',pop=True)
def vcirc(self,R,phi=None,t=0.):
"""
NAME:
vcirc
PURPOSE:
calculate the circular velocity at R in this potential
INPUT:
R - Galactocentric radius (can be Quantity)
phi= (None) azimuth to use for non-axisymmetric potentials
t - time (optional; can be Quantity)
OUTPUT:
circular rotation velocity
HISTORY:
2011-10-09 - Written - Bovy (IAS)
2016-06-15 - Added phi= keyword for non-axisymmetric potential - Bovy (UofT)
"""
return numpy.sqrt(R*-self.Rforce(R,0.,phi=phi,t=t,use_physical=False))
@potential_physical_input
@physical_conversion('frequency',pop=True)
def dvcircdR(self,R,phi=None,t=0.):
"""
NAME:
dvcircdR
PURPOSE:
calculate the derivative of the circular velocity at R wrt R
in this potential
INPUT:
R - Galactocentric radius (can be Quantity)
phi= (None) azimuth to use for non-axisymmetric potentials
t - time (optional; can be Quantity)
OUTPUT:
derivative of the circular rotation velocity wrt R
HISTORY:
2013-01-08 - Written - Bovy (IAS)
2016-06-28 - Added phi= keyword for non-axisymmetric potential - Bovy (UofT)
"""
return 0.5*(-self.Rforce(R,0.,phi=phi,t=t,use_physical=False)\
+R*self.R2deriv(R,0.,phi=phi,t=t,use_physical=False))\
/self.vcirc(R,phi=phi,t=t,use_physical=False)
@potential_physical_input
@physical_conversion('frequency',pop=True)
def omegac(self,R,t=0.):
"""
NAME:
omegac
PURPOSE:
calculate the circular angular speed at R in this potential
INPUT:
R - Galactocentric radius (can be Quantity)
t - time (optional; can be Quantity)
OUTPUT:
circular angular speed
HISTORY:
2011-10-09 - Written - Bovy (IAS)
"""
return numpy.sqrt(-self.Rforce(R,0.,t=t,use_physical=False)/R)
@potential_physical_input
@physical_conversion('frequency',pop=True)
def epifreq(self,R,t=0.):
"""
NAME:
epifreq
PURPOSE:
calculate the epicycle frequency at R in this potential
INPUT:
R - Galactocentric radius (can be Quantity)
t - time (optional; can be Quantity)
OUTPUT:
epicycle frequency
HISTORY:
2011-10-09 - Written - Bovy (IAS)
"""
return numpy.sqrt(self.R2deriv(R,0.,t=t,use_physical=False)\
-3./R*self.Rforce(R,0.,t=t,use_physical=False))
@potential_physical_input
@physical_conversion('frequency',pop=True)
def verticalfreq(self,R,t=0.):
"""
NAME:
verticalfreq
PURPOSE:
calculate the vertical frequency at R in this potential
INPUT:
R - Galactocentric radius (can be Quantity)
t - time (optional; can be Quantity)
OUTPUT:
vertical frequency
HISTORY:
2012-07-25 - Written - Bovy (IAS@MPIA)
"""
return numpy.sqrt(self.z2deriv(R,0.,t=t,use_physical=False))
@physical_conversion('position',pop=True)
def lindbladR(self,OmegaP,m=2,t=0.,**kwargs):
"""
NAME:
lindbladR
PURPOSE:
calculate the radius of a Lindblad resonance
INPUT:
OmegaP - pattern speed (can be Quantity)
m= order of the resonance (as in m(O-Op)=kappa (negative m for outer)
use m='corotation' for corotation
+scipy.optimize.brentq xtol,rtol,maxiter kwargs
t - time (optional; can be Quantity)
OUTPUT:
radius of Linblad resonance, None if there is no resonance
HISTORY:
2011-10-09 - Written - Bovy (IAS)
"""
OmegaP= conversion.parse_frequency(OmegaP,ro=self._ro,vo=self._vo)
return lindbladR(self,OmegaP,m=m,t=t,use_physical=False,**kwargs)
@potential_physical_input
@physical_conversion('velocity',pop=True)
def vesc(self,R,t=0.):
"""
NAME:
vesc
PURPOSE:
calculate the escape velocity at R for this potential
INPUT:
R - Galactocentric radius (can be Quantity)
t - time (optional; can be Quantity)
OUTPUT:
escape velocity
HISTORY:
2011-10-09 - Written - Bovy (IAS)
"""
return numpy.sqrt(2.*(self(_INF,0.,t=t,use_physical=False)\
-self(R,0.,t=t,use_physical=False)))
@physical_conversion('position',pop=True)
def rl(self,lz,t=0.):
"""
NAME:
rl
PURPOSE:
calculate the radius of a circular orbit of Lz
INPUT:
lz - Angular momentum (can be Quantity)
t - time (optional; can be Quantity)
OUTPUT:
radius
HISTORY:
2012-07-30 - Written - Bovy (IAS@MPIA)
NOTE:
seems to take about ~0.5 ms for a Miyamoto-Nagai potential;
~0.75 ms for a MWPotential
"""
lz= conversion.parse_angmom(lz,ro=self._ro,vo=self._vo)
return rl(self,lz,t=t,use_physical=False)
@potential_physical_input
@physical_conversion('dimensionless',pop=True)
def flattening(self,R,z,t=0.):
"""
NAME:
flattening
PURPOSE:
calculate the potential flattening, defined as sqrt(fabs(z/R F_R/F_z))
INPUT:
R - Galactocentric radius (can be Quantity)
z - height (can be Quantity)
t - time (optional; can be Quantity)
OUTPUT:
flattening
HISTORY:
2012-09-13 - Written - Bovy (IAS)
"""
return numpy.sqrt(numpy.fabs(z/R*self.Rforce(R,z,t=t,use_physical=False)\
/self.zforce(R,z,t=t,use_physical=False)))
@physical_conversion('velocity',pop=True)
def vterm(self,l,t=0.,deg=True):
"""
NAME:
vterm
PURPOSE:
calculate the terminal velocity at l in this potential
INPUT:
l - Galactic longitude [deg/rad; can be Quantity)
t - time (optional; can be Quantity)
deg= if True (default), l in deg
OUTPUT:
terminal velocity
HISTORY:
2013-05-31 - Written - Bovy (IAS)
"""
if _APY_LOADED and isinstance(l,units.Quantity):
l= conversion.parse_angle(l)
deg= False
if deg:
sinl= numpy.sin(l/180.*numpy.pi)
else:
sinl= numpy.sin(l)
return sinl*(self.omegac(numpy.fabs(sinl),t=t,use_physical=False)\
-self.omegac(1.,t=t,use_physical=False))
def plotRotcurve(self,*args,**kwargs):
"""
NAME:
plotRotcurve
PURPOSE:
plot the rotation curve for this potential (in the z=0 plane for
non-spherical potentials)
INPUT:
Rrange - range (can be Quantity)
grid= number of points to plot
savefilename=- save to or restore from this savefile (pickle)
+galpy.util.plot.plot(*args,**kwargs)
OUTPUT:
plot to output device
HISTORY:
2010-07-10 - Written - Bovy (NYU)
"""
return plotRotcurve(self,*args,**kwargs)
def plotEscapecurve(self,*args,**kwargs):
"""
NAME:
plotEscapecurve
PURPOSE:
plot the escape velocity curve for this potential
(in the z=0 plane for non-spherical potentials)
INPUT:
Rrange - range (can be Quantity)
grid= number of points to plot
savefilename= save to or restore from this savefile (pickle)
+galpy.util.plot.plot(*args,**kwargs)
OUTPUT:
plot to output device
HISTORY:
2010-08-08 - Written - Bovy (NYU)
"""
return plotEscapecurve(self.toPlanar(),*args,**kwargs)
def conc(self,H=70.,Om=0.3,t=0.,overdens=200.,wrtcrit=False,
ro=None,vo=None):
"""
NAME:
conc
PURPOSE:
return the concentration
INPUT:
H= (default: 70) Hubble constant in km/s/Mpc
Om= (default: 0.3) Omega matter
t - time (optional; can be Quantity)
overdens= (200) overdensity which defines the virial radius
wrtcrit= (False) if True, the overdensity is wrt the critical density rather than the mean matter density
ro= distance scale in kpc or as Quantity (default: object-wide, which if not set is 8 kpc))
vo= velocity scale in km/s or as Quantity (default: object-wide, which if not set is 220 km/s))
OUTPUT:
concentration (scale/rvir)
HISTORY:
2014-04-03 - Written - Bovy (IAS)
"""
if ro is None: ro= self._ro
if vo is None: vo= self._vo
try:
return self.rvir(H=H,Om=Om,t=t,overdens=overdens,wrtcrit=wrtcrit,
ro=ro,vo=vo,use_physical=False)/self._scale
except AttributeError:
raise AttributeError("This potential does not have a '_scale' defined to base the concentration on or does not support calculating the virial radius")
def nemo_accname(self):
"""
NAME:
nemo_accname
PURPOSE:
return the accname potential name for use of this potential with NEMO
INPUT:
(none)
OUTPUT:
Acceleration name
HISTORY:
2014-12-18 - Written - Bovy (IAS)
"""
try:
return self._nemo_accname
except AttributeError:
raise AttributeError('NEMO acceleration name not supported for %s' % self.__class__.__name__)
def nemo_accpars(self,vo,ro):
"""
NAME:
nemo_accpars
PURPOSE:
return the accpars potential parameters for use of this potential with NEMO
INPUT:
vo - velocity unit in km/s
ro - length unit in kpc
OUTPUT:
accpars string
HISTORY:
2014-12-18 - Written - Bovy (IAS)
"""
try:
return self._nemo_accpars(vo,ro)
except AttributeError:
raise AttributeError('NEMO acceleration parameters not supported for %s' % self.__class__.__name__)
@potential_physical_input
@physical_conversion('position',pop=True)
def rtide(self,R,z,phi=0.,t=0.,M=None):
"""
NAME:
rtide
PURPOSE:
Calculate the tidal radius for object of mass M assuming a circular orbit as
.. math::
r_t^3 = \\frac{GM_s}{\\Omega^2-\\mathrm{d}^2\\Phi/\\mathrm{d}r^2}
where :math:`M_s` is the cluster mass, :math:`\\Omega` is the circular frequency, and :math:`\Phi` is the gravitational potential. For non-spherical potentials, we evaluate :math:`\\Omega^2 = (1/r)(\\mathrm{d}\\Phi/\\mathrm{d}r)` and evaluate the derivatives at the given position of the cluster.
INPUT:
R - Galactocentric radius (can be Quantity)
z - height (can be Quantity)
phi - azimuth (optional; can be Quantity)
t - time (optional; can be Quantity)
M - (default = None) Mass of object (can be Quantity)
OUTPUT:
Tidal Radius
HISTORY:
2018-03-21 - Written - Webb (UofT)
"""
if M is None:
#Make sure an object mass is given
raise PotentialError("Mass parameter M= needs to be set to compute tidal radius")
r= numpy.sqrt(R**2.+z**2.)
omegac2= -self.rforce(R,z,phi=phi,t=t,use_physical=False)/r
d2phidr2= self.r2deriv(R,z,phi=phi,t=t,use_physical=False)
return (M/(omegac2-d2phidr2))**(1./3.)
@potential_physical_input
@physical_conversion('forcederivative',pop=True)
def ttensor(self,R,z,phi=0.,t=0.,eigenval=False):
"""
NAME:
ttensor
PURPOSE:
Calculate the tidal tensor Tij=-d(Psi)(dxidxj)
INPUT:
R - Galactocentric radius (can be Quantity)
z - height (can be Quantity)
phi - azimuth (optional; can be Quantity)
t - time (optional; can be Quantity)
eigenval - return eigenvalues if true (optional; boolean)
OUTPUT:
Tidal Tensor
HISTORY:
2018-03-21 - Written - Webb (UofT)
"""
if self.isNonAxi:
raise PotentialError("Tidal tensor calculation is currently only implemented for axisymmetric potentials")
#Evaluate forces, angles and derivatives
Rderiv= -self.Rforce(R,z,phi=phi,t=t,use_physical=False)
phideriv= -self.phiforce(R,z,phi=phi,t=t,use_physical=False)
R2deriv= self.R2deriv(R,z,phi=phi,t=t,use_physical=False)
z2deriv= self.z2deriv(R,z,phi=phi,t=t,use_physical=False)
phi2deriv= self.phi2deriv(R,z,phi=phi,t=t,use_physical=False)
Rzderiv= self.Rzderiv(R,z,phi=phi,t=t,use_physical=False)
Rphideriv= self.Rphideriv(R,z,phi=phi,t=t,use_physical=False)
#Temporarily set zphideriv to zero until zphideriv is added to Class
zphideriv=0.0
cosphi=numpy.cos(phi)
sinphi=numpy.sin(phi)
cos2phi=cosphi**2.0
sin2phi=sinphi**2.0
R2=R**2.0
R3=R**3.0
# Tidal tensor
txx= R2deriv*cos2phi-Rphideriv*2.*cosphi*sinphi/R+Rderiv*sin2phi/R\
+phi2deriv*sin2phi/R2+phideriv*2.*cosphi*sinphi/R2
tyx= R2deriv*sinphi*cosphi+Rphideriv*(cos2phi-sin2phi)/R\
-Rderiv*sinphi*cosphi/R-phi2deriv*sinphi*cosphi/R2\
+phideriv*(sin2phi-cos2phi)/R2
tzx=Rzderiv*cosphi-zphideriv*sinphi/R
tyy=R2deriv*sin2phi+Rphideriv*2.*cosphi*sinphi/R+Rderiv*cos2phi/R\
+phi2deriv*cos2phi/R2-phideriv*2.*sinphi*cosphi/R2
txy=tyx
tzy=Rzderiv*sinphi+zphideriv*cosphi/R
txz=tzx
tyz=tzy
tzz=z2deriv
tij=-numpy.array([[txx,txy,txz],[tyx,tyy,tyz],[tzx,tzy,tzz]])
if eigenval:
return numpy.linalg.eigvals(tij)
else:
return tij
@physical_conversion('position',pop=True)
def zvc(self,R,E,Lz,phi=0.,t=0.):
"""
NAME:
zvc
PURPOSE:
Calculate the zero-velocity curve: z such that Phi(R,z) + Lz/[2R^2] = E (assumes that F_z(R,z) = negative at positive z such that there is a single solution)
INPUT:
R - Galactocentric radius (can be Quantity)
E - Energy (can be Quantity)
Lz - Angular momentum (can be Quantity)
phi - azimuth (optional; can be Quantity)
t - time (optional; can be Quantity)
OUTPUT:
z such that Phi(R,z) + Lz/[2R^2] = E
HISTORY:
2020-08-20 - Written - Bovy (UofT)
"""
return zvc(self,R,E,Lz,phi=phi,t=t,use_physical=False)
@physical_conversion('position',pop=True)
def zvc_range(self,E,Lz,phi=0.,t=0.):
"""
NAME:
zvc_range
PURPOSE:
Calculate the minimum and maximum radius for which the zero-velocity curve exists for this energy and angular momentum (R such that Phi(R,0) + Lz/[2R^2] = E)
INPUT:
E - Energy (can be Quantity)
Lz - Angular momentum (can be Quantity)
phi - azimuth (optional; can be Quantity)
t - time (optional; can be Quantity)
OUTPUT:
Solutions R such that Phi(R,0) + Lz/[2R^2] = E
HISTORY:
2020-08-20 - Written - Bovy (UofT)
"""
return zvc_range(self,E,Lz,phi=phi,t=t,use_physical=False)
class PotentialError(Exception): #pragma: no cover
def __init__(self, value):
self.value = value
def __str__(self):
return repr(self.value)
@potential_physical_input
@physical_conversion('energy',pop=True)
def evaluatePotentials(Pot,R,z,phi=None,t=0.,dR=0,dphi=0):
"""
NAME:
evaluatePotentials
PURPOSE:
convenience function to evaluate a possible sum of potentials
INPUT:
Pot - potential or list of potentials (dissipative forces in such a list are ignored)
R - cylindrical Galactocentric distance (can be Quantity)
z - distance above the plane (can be Quantity)
phi - azimuth (can be Quantity)
t - time (can be Quantity)
dR= dphi=, if set to non-zero integers, return the dR, dphi't derivative instead
OUTPUT:
Phi(R,z)
HISTORY:
2010-04-16 - Written - Bovy (NYU)
"""
return _evaluatePotentials(Pot,R,z,phi=phi,t=t,dR=dR,dphi=dphi)
def _evaluatePotentials(Pot,R,z,phi=None,t=0.,dR=0,dphi=0):
"""Raw, undecorated function for internal use"""
nonAxi= _isNonAxi(Pot)
if nonAxi and phi is None:
raise PotentialError("The (list of) Potential instances is non-axisymmetric, but you did not provide phi")
isList= isinstance(Pot,list)
if isList:
sum= 0.
for pot in Pot:
if not isinstance(pot,DissipativeForce):
sum+= pot._call_nodecorator(R,z,phi=phi,t=t,dR=dR,dphi=dphi)
return sum
elif isinstance(Pot,Potential):
return Pot._call_nodecorator(R,z,phi=phi,t=t,dR=dR,dphi=dphi)
else: #pragma: no cover
raise PotentialError("Input to 'evaluatePotentials' is neither a Potential-instance or a list of such instances")
@potential_physical_input
@physical_conversion('density',pop=True)
def evaluateDensities(Pot,R,z,phi=None,t=0.,forcepoisson=False):
"""
NAME:
evaluateDensities
PURPOSE:
convenience function to evaluate a possible sum of densities
INPUT:
Pot - potential or list of potentials (dissipative forces in such a list are ignored)
R - cylindrical Galactocentric distance (can be Quantity)
z - distance above the plane (can be Quantity)
phi - azimuth (can be Quantity)
t - time (can be Quantity)
forcepoisson= if True, calculate the density through the Poisson equation, even if an explicit expression for the density exists
OUTPUT:
rho(R,z)
HISTORY:
2010-08-08 - Written - Bovy (NYU)
2013-12-28 - Added forcepoisson - Bovy (IAS)
"""
isList= isinstance(Pot,list)
nonAxi= _isNonAxi(Pot)
if nonAxi and phi is None:
raise PotentialError("The (list of) Potential instances is non-axisymmetric, but you did not provide phi")
if isList:
sum= 0.
for pot in Pot:
if not isinstance(pot,DissipativeForce):
sum+= pot.dens(R,z,phi=phi,t=t,forcepoisson=forcepoisson,
use_physical=False)
return sum
elif isinstance(Pot,Potential):
return Pot.dens(R,z,phi=phi,t=t,forcepoisson=forcepoisson,
use_physical=False)
else: #pragma: no cover
raise PotentialError("Input to 'evaluateDensities' is neither a Potential-instance or a list of such instances")
@potential_physical_input
@physical_conversion('surfacedensity',pop=True)
def evaluateSurfaceDensities(Pot,R,z,phi=None,t=0.,forcepoisson=False):
"""
NAME:
evaluateSurfaceDensities
PURPOSE:
convenience function to evaluate a possible sum of surface densities
INPUT:
Pot - potential or list of potentials (dissipative forces in such a list are ignored)
R - cylindrical Galactocentric distance (can be Quantity)
z - distance above the plane (can be Quantity)
phi - azimuth (can be Quantity)
t - time (can be Quantity)
forcepoisson= if True, calculate the surface density through the Poisson equation, even if an explicit expression for the surface density exists
OUTPUT:
Sigma(R,z)
HISTORY:
2018-08-20 - Written - Bovy (UofT)
"""
isList= isinstance(Pot,list)
nonAxi= _isNonAxi(Pot)
if nonAxi and phi is None:
raise PotentialError("The (list of) Potential instances is non-axisymmetric, but you did not provide phi")
if isList:
sum= 0.
for pot in Pot:
if not isinstance(pot,DissipativeForce):
sum+= pot.surfdens(R,z,phi=phi,t=t,forcepoisson=forcepoisson,
use_physical=False)
return sum
elif isinstance(Pot,Potential):
return Pot.surfdens(R,z,phi=phi,t=t,forcepoisson=forcepoisson,
use_physical=False)
else: #pragma: no cover
raise PotentialError("Input to 'evaluateSurfaceDensities' is neither a Potential-instance or a list of such instances")
@potential_physical_input
@physical_conversion('force',pop=True)
def evaluateRforces(Pot,R,z,phi=None,t=0.,v=None):
"""
NAME:
evaluateRforce
PURPOSE:
convenience function to evaluate a possible sum of potentials
INPUT:
Pot - a potential or list of potentials
R - cylindrical Galactocentric distance (can be Quantity)
z - distance above the plane (can be Quantity)
phi - azimuth (optional; can be Quantity))
t - time (optional; can be Quantity)
v - current velocity in cylindrical coordinates (optional, but required when including dissipative forces; can be a Quantity)
OUTPUT:
F_R(R,z,phi,t)
HISTORY:
2010-04-16 - Written - Bovy (NYU)
2018-03-16 - Added velocity input for dissipative forces - Bovy (UofT)
"""
return _evaluateRforces(Pot,R,z,phi=phi,t=t,v=v)
def _evaluateRforces(Pot,R,z,phi=None,t=0.,v=None):
"""Raw, undecorated function for internal use"""
isList= isinstance(Pot,list)
nonAxi= _isNonAxi(Pot)
if nonAxi and phi is None:
raise PotentialError("The (list of) Potential instances is non-axisymmetric, but you did not provide phi")
dissipative= _isDissipative(Pot)
if dissipative and v is None:
raise PotentialError("The (list of) Potential instances includes dissipative, but you did not provide the 3D velocity (required for dissipative forces")
if isList:
sum= 0.
for pot in Pot:
if isinstance(pot,DissipativeForce):
sum+= pot._Rforce_nodecorator(R,z,phi=phi,t=t,v=v)
else:
sum+= pot._Rforce_nodecorator(R,z,phi=phi,t=t)
return sum
elif isinstance(Pot,Potential):
return Pot._Rforce_nodecorator(R,z,phi=phi,t=t)
elif isinstance(Pot,DissipativeForce):
return Pot._Rforce_nodecorator(R,z,phi=phi,t=t,v=v)
else: #pragma: no cover
raise PotentialError("Input to 'evaluateRforces' is neither a Potential-instance, DissipativeForce-instance or a list of such instances")
@potential_physical_input
@physical_conversion('energy',pop=True)
def evaluatephiforces(Pot,R,z,phi=None,t=0.,v=None):
"""
NAME:
evaluatephiforces
PURPOSE:
convenience function to evaluate a possible sum of potentials
INPUT:
Pot - a potential or list of potentials
R - cylindrical Galactocentric distance (can be Quantity)
z - distance above the plane (can be Quantity)
phi - azimuth (optional; can be Quantity)
t - time (optional; can be Quantity)
v - current velocity in cylindrical coordinates (optional, but required when including dissipative forces; can be a Quantity)
OUTPUT:
F_phi(R,z,phi,t)
HISTORY:
2010-04-16 - Written - Bovy (NYU)
2018-03-16 - Added velocity input for dissipative forces - Bovy (UofT)
"""
return _evaluatephiforces(Pot,R,z,phi=phi,t=t,v=v)
def _evaluatephiforces(Pot,R,z,phi=None,t=0.,v=None):
"""Raw, undecorated function for internal use"""
isList= isinstance(Pot,list)
nonAxi= _isNonAxi(Pot)
if nonAxi and phi is None:
raise PotentialError("The (list of) Potential instances is non-axisymmetric, but you did not provide phi")
dissipative= _isDissipative(Pot)
if dissipative and v is None:
raise PotentialError("The (list of) Potential instances includes dissipative, but you did not provide the 3D velocity (required for dissipative forces")
if isList:
sum= 0.
for pot in Pot:
if isinstance(pot,DissipativeForce):
sum+= pot._phiforce_nodecorator(R,z,phi=phi,t=t,v=v)
else:
sum+= pot._phiforce_nodecorator(R,z,phi=phi,t=t)
return sum
elif isinstance(Pot,Potential):
return Pot._phiforce_nodecorator(R,z,phi=phi,t=t)
elif isinstance(Pot,DissipativeForce):
return Pot._phiforce_nodecorator(R,z,phi=phi,t=t,v=v)
else: #pragma: no cover
raise PotentialError("Input to 'evaluatephiforces' is neither a Potential-instance, DissipativeForce-instance or a list of such instances")
@potential_physical_input
@physical_conversion('force',pop=True)
def evaluatezforces(Pot,R,z,phi=None,t=0.,v=None):
"""
NAME:
evaluatezforces
PURPOSE:
convenience function to evaluate a possible sum of potentials
INPUT:
Pot - a potential or list of potentials
R - cylindrical Galactocentric distance (can be Quantity)
z - distance above the plane (can be Quantity)
phi - azimuth (optional; can be Quantity)
t - time (optional; can be Quantity)
v - current velocity in cylindrical coordinates (optional, but required when including dissipative forces; can be a Quantity)
OUTPUT:
F_z(R,z,phi,t)
HISTORY:
2010-04-16 - Written - Bovy (NYU)
2018-03-16 - Added velocity input for dissipative forces - Bovy (UofT)
"""
return _evaluatezforces(Pot,R,z,phi=phi,t=t,v=v)
def _evaluatezforces(Pot,R,z,phi=None,t=0.,v=None):
"""Raw, undecorated function for internal use"""
isList= isinstance(Pot,list)
nonAxi= _isNonAxi(Pot)
if nonAxi and phi is None:
raise PotentialError("The (list of) Potential instances is non-axisymmetric, but you did not provide phi")
dissipative= _isDissipative(Pot)
if dissipative and v is None:
raise PotentialError("The (list of) Potential instances includes dissipative, but you did not provide the 3D velocity (required for dissipative forces")
if isList:
sum= 0.
for pot in Pot:
if isinstance(pot,DissipativeForce):
sum+= pot._zforce_nodecorator(R,z,phi=phi,t=t,v=v)
else:
sum+= pot._zforce_nodecorator(R,z,phi=phi,t=t)
return sum
elif isinstance(Pot,Potential):
return Pot._zforce_nodecorator(R,z,phi=phi,t=t)
elif isinstance(Pot,DissipativeForce):
return Pot._zforce_nodecorator(R,z,phi=phi,t=t,v=v)
else: #pragma: no cover
raise PotentialError("Input to 'evaluatezforces' is neither a Potential-instance, DissipativeForce-instance or a list of such instances")
@potential_physical_input
@physical_conversion('force',pop=True)
def evaluaterforces(Pot,R,z,phi=None,t=0.,v=None):
"""
NAME:
evaluaterforces
PURPOSE:
convenience function to evaluate a possible sum of potentials
INPUT:
Pot - a potential or list of potentials
R - cylindrical Galactocentric distance (can be Quantity)
z - distance above the plane (can be Quantity)
phi - azimuth (optional; can be Quantity)
t - time (optional; can be Quantity)
v - current velocity in cylindrical coordinates (optional, but required when including dissipative forces; can be a Quantity)
OUTPUT:
F_r(R,z,phi,t)
HISTORY:
2016-06-10 - Written - Bovy (UofT)
"""
isList= isinstance(Pot,list)
nonAxi= _isNonAxi(Pot)
if nonAxi and phi is None:
raise PotentialError("The (list of) Potential instances is non-axisymmetric, but you did not provide phi")
dissipative= _isDissipative(Pot)
if dissipative and v is None:
raise PotentialError("The (list of) Potential instances includes dissipative, but you did not provide the 3D velocity (required for dissipative forces")
if isList:
sum= 0.
for pot in Pot:
if isinstance(pot,DissipativeForce):
sum+= pot.rforce(R,z,phi=phi,t=t,v=v,use_physical=False)
else:
sum+= pot.rforce(R,z,phi=phi,t=t,use_physical=False)
return sum
elif isinstance(Pot,Potential):
return Pot.rforce(R,z,phi=phi,t=t,use_physical=False)
elif isinstance(Pot,DissipativeForce):
return Pot.rforce(R,z,phi=phi,t=t,v=v,use_physical=False)
else: #pragma: no cover
raise PotentialError("Input to 'evaluaterforces' is neither a Potential-instance or a list of such instances")
@potential_physical_input
@physical_conversion('forcederivative',pop=True)
def evaluateR2derivs(Pot,R,z,phi=None,t=0.):
"""
NAME:
evaluateR2derivs
PURPOSE:
convenience function to evaluate a possible sum of potentials
INPUT:
Pot - a potential or list of potentials (dissipative forces in such a list are ignored)
R - cylindrical Galactocentric distance (can be Quantity)
z - distance above the plane (can be Quantity)
phi - azimuth (optional; can be Quantity)
t - time (optional; can be Quantity)
OUTPUT:
d2Phi/d2R(R,z,phi,t)
HISTORY:
2012-07-25 - Written - Bovy (IAS)
"""
isList= isinstance(Pot,list)
nonAxi= _isNonAxi(Pot)
if nonAxi and phi is None:
raise PotentialError("The (list of) Potential instances is non-axisymmetric, but you did not provide phi")
if isList:
sum= 0.
for pot in Pot:
if not isinstance(pot,DissipativeForce):
sum+= pot.R2deriv(R,z,phi=phi,t=t,use_physical=False)
return sum
elif isinstance(Pot,Potential):
return Pot.R2deriv(R,z,phi=phi,t=t,use_physical=False)
else: #pragma: no cover
raise PotentialError("Input to 'evaluateR2derivs' is neither a Potential-instance or a list of such instances")
@potential_physical_input
@physical_conversion('forcederivative',pop=True)
def evaluatez2derivs(Pot,R,z,phi=None,t=0.):
"""
NAME:
evaluatez2derivs
PURPOSE:
convenience function to evaluate a possible sum of potentials
INPUT:
Pot - a potential or list of potentials (dissipative forces in such a list are ignored)
R - cylindrical Galactocentric distance (can be Quantity)
z - distance above the plane (can be Quantity)
phi - azimuth (optional; can be Quantity)
t - time (optional; can be Quantity)
OUTPUT:
d2Phi/d2z(R,z,phi,t)
HISTORY:
2012-07-25 - Written - Bovy (IAS)
"""
isList= isinstance(Pot,list)
nonAxi= _isNonAxi(Pot)
if nonAxi and phi is None:
raise PotentialError("The (list of) Potential instances is non-axisymmetric, but you did not provide phi")
if isList:
sum= 0.
for pot in Pot:
if not isinstance(pot,DissipativeForce):
sum+= pot.z2deriv(R,z,phi=phi,t=t,use_physical=False)
return sum
elif isinstance(Pot,Potential):
return Pot.z2deriv(R,z,phi=phi,t=t,use_physical=False)
else: #pragma: no cover
raise PotentialError("Input to 'evaluatez2derivs' is neither a Potential-instance or a list of such instances")
@potential_physical_input
@physical_conversion('forcederivative',pop=True)
def evaluateRzderivs(Pot,R,z,phi=None,t=0.):
"""
NAME:
evaluateRzderivs
PURPOSE:
convenience function to evaluate a possible sum of potentials
INPUT:
Pot - a potential or list of potentials (dissipative forces in such a list are ignored)
R - cylindrical Galactocentric distance (can be Quantity)
z - distance above the plane (can be Quantity)
phi - azimuth (optional; can be Quantity)
t - time (optional; can be Quantity)
OUTPUT:
d2Phi/dz/dR(R,z,phi,t)
HISTORY:
2013-08-28 - Written - Bovy (IAS)
"""
isList= isinstance(Pot,list)
nonAxi= _isNonAxi(Pot)
if nonAxi and phi is None:
raise PotentialError("The (list of) Potential instances is non-axisymmetric, but you did not provide phi")
if isList:
sum= 0.
for pot in Pot:
if not isinstance(pot,DissipativeForce):
sum+= pot.Rzderiv(R,z,phi=phi,t=t,use_physical=False)
return sum
elif isinstance(Pot,Potential):
return Pot.Rzderiv(R,z,phi=phi,t=t,use_physical=False)
else: #pragma: no cover
raise PotentialError("Input to 'evaluateRzderivs' is neither a Potential-instance or a list of such instances")
@potential_physical_input
@physical_conversion('forcederivative',pop=True)
def evaluatephi2derivs(Pot,R,z,phi=None,t=0.):
"""
NAME:
evaluatephi2derivs
PURPOSE:
convenience function to evaluate a possible sum of potentials
INPUT:
Pot - a potential or list of potentials
R - cylindrical Galactocentric distance (can be Quantity)
z - distance above the plane (can be Quantity)
phi - azimuth (optional; can be Quantity)
t - time (optional; can be Quantity)
OUTPUT:
d2Phi/d2phi(R,z,phi,t)
HISTORY:
2018-03-28 - Written - Bovy (UofT)
"""
isList= isinstance(Pot,list)
nonAxi= _isNonAxi(Pot)
if nonAxi and phi is None:
raise PotentialError("The (list of) Potential instances is non-axisymmetric, but you did not provide phi")
if isList:
sum= 0.
for pot in Pot:
if not isinstance(pot,DissipativeForce):
sum+= pot.phi2deriv(R,z,phi=phi,t=t,use_physical=False)
return sum
elif isinstance(Pot,Potential):
return Pot.phi2deriv(R,z,phi=phi,t=t,use_physical=False)
else: #pragma: no cover
raise PotentialError("Input to 'evaluatephi2derivs' is neither a Potential-instance or a list of such instances")
@potential_physical_input
@physical_conversion('forcederivative',pop=True)
def evaluateRphiderivs(Pot,R,z,phi=None,t=0.):
"""
NAME:
evaluateRphiderivs
PURPOSE:
convenience function to evaluate a possible sum of potentials
INPUT:
Pot - a potential or list of potentials
R - cylindrical Galactocentric distance (can be Quantity)
z - distance above the plane (can be Quantity)
phi - azimuth (optional; can be Quantity)
t - time (optional; can be Quantity)
OUTPUT:
d2Phi/d2R(R,z,phi,t)
HISTORY:
2012-07-25 - Written - Bovy (IAS)
"""
isList= isinstance(Pot,list)
nonAxi= _isNonAxi(Pot)
if nonAxi and phi is None:
raise PotentialError("The (list of) Potential instances is non-axisymmetric, but you did not provide phi")
if isList:
sum= 0.
for pot in Pot:
if not isinstance(pot,DissipativeForce):
sum+= pot.Rphideriv(R,z,phi=phi,t=t,use_physical=False)
return sum
elif isinstance(Pot,Potential):
return Pot.Rphideriv(R,z,phi=phi,t=t,use_physical=False)
else: #pragma: no cover
raise PotentialError("Input to 'evaluateRphiderivs' is neither a Potential-instance or a list of such instances")
@potential_physical_input
@physical_conversion('forcederivative',pop=True)
def evaluater2derivs(Pot,R,z,phi=None,t=0.):
"""
NAME:
evaluater2derivs
PURPOSE:
convenience function to evaluate a possible sum of potentials
INPUT:
Pot - a potential or list of potentials
R - cylindrical Galactocentric distance (can be Quantity)
z - distance above the plane (can be Quantity)
phi - azimuth (optional; can be Quantity)
t - time (optional; can be Quantity)
OUTPUT:
d2phi/dr2(R,z,phi,t)
HISTORY:
2018-03-28 - Written - Bovy (UofT)
"""
isList= isinstance(Pot,list)
nonAxi= _isNonAxi(Pot)
if nonAxi and phi is None:
raise PotentialError("The (list of) Potential instances is non-axisymmetric, but you did not provide phi")
if isList:
sum= 0.
for pot in Pot:
if not isinstance(pot,DissipativeForce):
sum+= pot.r2deriv(R,z,phi=phi,t=t,use_physical=False)
return sum
elif isinstance(Pot,Potential):
return Pot.r2deriv(R,z,phi=phi,t=t,use_physical=False)
else: #pragma: no cover
raise PotentialError("Input to 'evaluater2derivs' is neither a Potential-instance or a list of such instances")
def plotPotentials(Pot,rmin=0.,rmax=1.5,nrs=21,zmin=-0.5,zmax=0.5,nzs=21,
phi=None,xy=False,t=0.,effective=False,Lz=None,
ncontours=21,savefilename=None,aspect=None,
justcontours=False,levels=None,cntrcolors=None):
"""
NAME:
plotPotentials
PURPOSE:
plot a set of potentials
INPUT:
Pot - Potential or list of Potential instances
rmin= minimum R (can be Quantity) [xmin if xy]
rmax= maximum R (can be Quantity) [ymax if xy]
nrs= grid in R
zmin= minimum z (can be Quantity) [ymin if xy]
zmax= maximum z (can be Quantity) [ymax if xy]
nzs= grid in z
phi= (None) azimuth to use for non-axisymmetric potentials
t= (0.) time to use to evaluate potential
xy= (False) if True, plot the potential in X-Y
effective= (False) if True, plot the effective potential Phi + Lz^2/2/R^2
Lz= (None) angular momentum to use for the effective potential when effective=True
justcontours= (False) if True, just plot contours
levels= (None) contours to plot
ncontours - number of contours when levels is None
cntrcolors= (None) colors of the contours (single color or array with length ncontours)
savefilename= save to or restore from this savefile (pickle)
OUTPUT:
plot to output device
HISTORY:
2010-07-09 - Written - Bovy (NYU)
"""
Pot= flatten(Pot)
rmin= conversion.parse_length(rmin,**get_physical(Pot))
rmax= conversion.parse_length(rmax,**get_physical(Pot))
zmin= conversion.parse_length(zmin,**get_physical(Pot))
zmax= conversion.parse_length(zmax,**get_physical(Pot))
if not savefilename == None and os.path.exists(savefilename):
print("Restoring savefile "+savefilename+" ...")
savefile= open(savefilename,'rb')
potRz= pickle.load(savefile)
Rs= pickle.load(savefile)
zs= pickle.load(savefile)
savefile.close()
else:
if effective and Lz is None:
raise RuntimeError("When effective=True, you need to specify Lz=")
Rs= numpy.linspace(rmin,rmax,nrs)
zs= numpy.linspace(zmin,zmax,nzs)
potRz= numpy.zeros((nrs,nzs))
for ii in range(nrs):
for jj in range(nzs):
if xy:
R,phi,z= coords.rect_to_cyl(Rs[ii],zs[jj],0.)
else:
R,z= Rs[ii], zs[jj]
potRz[ii,jj]= evaluatePotentials(Pot,numpy.fabs(R),
z,phi=phi,t=t,
use_physical=False)
if effective:
potRz[ii,:]+= 0.5*Lz**2/Rs[ii]**2.
if not savefilename == None:
print("Writing savefile "+savefilename+" ...")
savefile= open(savefilename,'wb')
pickle.dump(potRz,savefile)
pickle.dump(Rs,savefile)
pickle.dump(zs,savefile)
savefile.close()
if aspect is None:
aspect=.75*(rmax-rmin)/(zmax-zmin)
if xy:
xlabel= r'$x/R_0$'
ylabel= r'$y/R_0$'
else:
xlabel=r"$R/R_0$"
ylabel=r"$z/R_0$"
if levels is None:
levels= numpy.linspace(numpy.nanmin(potRz),numpy.nanmax(potRz),ncontours)
if cntrcolors is None:
cntrcolors= 'k'
return plot.dens2d(potRz.T,origin='lower',cmap='gist_gray',contours=True,
xlabel=xlabel,ylabel=ylabel,
aspect=aspect,
xrange=[rmin,rmax],
yrange=[zmin,zmax],
cntrls='-',
justcontours=justcontours,
levels=levels,cntrcolors=cntrcolors)
def plotDensities(Pot,rmin=0.,rmax=1.5,nrs=21,zmin=-0.5,zmax=0.5,nzs=21,
phi=None,xy=False,t=0.,
ncontours=21,savefilename=None,aspect=None,log=False,
justcontours=False):
"""
NAME:
plotDensities
PURPOSE:
plot the density a set of potentials
INPUT:
Pot - Potential or list of Potential instances
rmin= minimum R (can be Quantity) [xmin if xy]
rmax= maximum R (can be Quantity) [ymax if xy]
nrs= grid in R
zmin= minimum z (can be Quantity) [ymin if xy]
zmax= maximum z (can be Quantity) [ymax if xy]
nzs= grid in z
phi= (None) azimuth to use for non-axisymmetric potentials
t= (0.) time to use to evaluate potential
xy= (False) if True, plot the density in X-Y
ncontours= number of contours
justcontours= (False) if True, just plot contours
savefilename= save to or restore from this savefile (pickle)
log= if True, plot the log density
OUTPUT:
plot to output device
HISTORY:
2013-07-05 - Written - Bovy (IAS)
"""
Pot= flatten(Pot)
rmin= conversion.parse_length(rmin,**get_physical(Pot))
rmax= conversion.parse_length(rmax,**get_physical(Pot))
zmin= conversion.parse_length(zmin,**get_physical(Pot))
zmax= conversion.parse_length(zmax,**get_physical(Pot))
if not savefilename == None and os.path.exists(savefilename):
print("Restoring savefile "+savefilename+" ...")
savefile= open(savefilename,'rb')
potRz= pickle.load(savefile)
Rs= pickle.load(savefile)
zs= pickle.load(savefile)
savefile.close()
else:
Rs= numpy.linspace(rmin,rmax,nrs)
zs= numpy.linspace(zmin,zmax,nzs)
potRz= numpy.zeros((nrs,nzs))
for ii in range(nrs):
for jj in range(nzs):
if xy:
R,phi,z= coords.rect_to_cyl(Rs[ii],zs[jj],0.)
else:
R,z= Rs[ii], zs[jj]
potRz[ii,jj]= evaluateDensities(Pot,numpy.fabs(R),z,phi=phi,
t=t,
use_physical=False)
if not savefilename == None:
print("Writing savefile "+savefilename+" ...")
savefile= open(savefilename,'wb')
pickle.dump(potRz,savefile)
pickle.dump(Rs,savefile)
pickle.dump(zs,savefile)
savefile.close()
if aspect is None:
aspect=.75*(rmax-rmin)/(zmax-zmin)
if log:
potRz= numpy.log(potRz)
if xy:
xlabel= r'$x/R_0$'
ylabel= r'$y/R_0$'
else:
xlabel=r"$R/R_0$"
ylabel=r"$z/R_0$"
return plot.dens2d(potRz.T,origin='lower',
cmap='gist_yarg',contours=True,
xlabel=xlabel,ylabel=ylabel,
aspect=aspect,
xrange=[rmin,rmax],
yrange=[zmin,zmax],
cntrls='-',
justcontours=justcontours,
levels=numpy.linspace(numpy.nanmin(potRz),numpy.nanmax(potRz),
ncontours))
def plotSurfaceDensities(Pot,
xmin=-1.5,xmax=1.5,nxs=21,ymin=-1.5,ymax=1.5,nys=21,
z=numpy.inf,t=0.,
ncontours=21,savefilename=None,aspect=None,
log=False,justcontours=False):
"""
NAME:
plotSurfaceDensities
PURPOSE:
plot the surface density a set of potentials
INPUT:
Pot - Potential or list of Potential instances
xmin= minimum x (can be Quantity)
xmax= maximum x (can be Quantity)
nxs= grid in x
ymin= minimum y (can be Quantity)
ymax= maximum y (can be Quantity)
nys= grid in y
z= (inf) height between which to integrate the density (from -z to z; can be a Quantity)
t= (0.) time to use to evaluate potential
ncontours= number of contours
justcontours= (False) if True, just plot contours
savefilename= save to or restore from this savefile (pickle)
log= if True, plot the log density
OUTPUT:
plot to output device
HISTORY:
2020-08-19 - Written - Bovy (UofT)
"""
Pot= flatten(Pot)
xmin= conversion.parse_length(xmin,**get_physical(Pot))
xmax= conversion.parse_length(xmax,**get_physical(Pot))
ymin= conversion.parse_length(ymin,**get_physical(Pot))
ymax= conversion.parse_length(ymax,**get_physical(Pot))
if not savefilename == None and os.path.exists(savefilename):
print("Restoring savefile "+savefilename+" ...")
savefile= open(savefilename,'rb')
surfxy= pickle.load(savefile)
xs= pickle.load(savefile)
ys= pickle.load(savefile)
savefile.close()
else:
xs= numpy.linspace(xmin,xmax,nxs)
ys= numpy.linspace(ymin,ymax,nys)
surfxy= numpy.zeros((nxs,nys))
for ii in range(nxs):
for jj in range(nys):
R,phi,_= coords.rect_to_cyl(xs[ii],ys[jj],0.)
surfxy[ii,jj]= evaluateSurfaceDensities(Pot,
numpy.fabs(R),z,
phi=phi,
t=t,
use_physical=False)
if not savefilename == None:
print("Writing savefile "+savefilename+" ...")
savefile= open(savefilename,'wb')
pickle.dump(surfxy,savefile)
pickle.dump(xs,savefile)
pickle.dump(ys,savefile)
savefile.close()
if aspect is None:
aspect= 1.
if log:
surfxy= numpy.log(surfxy)
xlabel= r'$x/R_0$'
ylabel= r'$y/R_0$'
return plot.dens2d(surfxy.T,origin='lower',
cmap='gist_yarg',contours=True,
xlabel=xlabel,ylabel=ylabel,
aspect=aspect,
xrange=[xmin,xmax],
yrange=[ymin,ymax],
cntrls='-',
justcontours=justcontours,
levels=numpy.linspace(numpy.nanmin(surfxy),
numpy.nanmax(surfxy),
ncontours))
@potential_physical_input
@physical_conversion('frequency',pop=True)
def epifreq(Pot,R,t=0.):
"""
NAME:
epifreq
PURPOSE:
calculate the epicycle frequency at R in the potential Pot
INPUT:
Pot - Potential instance or list thereof
R - Galactocentric radius (can be Quantity)
t - time (optional; can be Quantity)
OUTPUT:
epicycle frequency
HISTORY:
2012-07-25 - Written - Bovy (IAS)
"""
from .planarPotential import planarPotential
if isinstance(Pot,(Potential,planarPotential)):
return Pot.epifreq(R,t=t,use_physical=False)
from ..potential import evaluateplanarRforces, evaluateplanarR2derivs
from ..potential import PotentialError
try:
return numpy.sqrt(evaluateplanarR2derivs(Pot,R,t=t,use_physical=False)
-3./R*evaluateplanarRforces(Pot,R,t=t,use_physical=False))
except PotentialError:
from ..potential import RZToplanarPotential
Pot= RZToplanarPotential(Pot)
return numpy.sqrt(evaluateplanarR2derivs(Pot,R,t=t,use_physical=False)
-3./R*evaluateplanarRforces(Pot,R,t=t,use_physical=False))
@potential_physical_input
@physical_conversion('frequency',pop=True)
def verticalfreq(Pot,R,t=0.):
"""
NAME:
verticalfreq
PURPOSE:
calculate the vertical frequency at R in the potential Pot
INPUT:
Pot - Potential instance or list thereof
R - Galactocentric radius (can be Quantity)
t - time (optional; can be Quantity)
OUTPUT:
vertical frequency
HISTORY:
2012-07-25 - Written - Bovy (IAS@MPIA)
"""
from .planarPotential import planarPotential
if isinstance(Pot,(Potential,planarPotential)):
return Pot.verticalfreq(R,t=t,use_physical=False)
return numpy.sqrt(evaluatez2derivs(Pot,R,0.,t=t,use_physical=False))
@potential_physical_input
@physical_conversion('dimensionless',pop=True)
def flattening(Pot,R,z,t=0.):
"""
NAME:
flattening
PURPOSE:
calculate the potential flattening, defined as sqrt(fabs(z/R F_R/F_z))
INPUT:
Pot - Potential instance or list thereof
R - Galactocentric radius (can be Quantity)
z - height (can be Quantity)
t - time (optional; can be Quantity)
OUTPUT:
flattening
HISTORY:
2012-09-13 - Written - Bovy (IAS)
"""
return numpy.sqrt(numpy.fabs(z/R*evaluateRforces(Pot,R,z,t=t,use_physical=False)\
/evaluatezforces(Pot,R,z,t=t,use_physical=False)))
@physical_conversion('velocity',pop=True)
def vterm(Pot,l,t=0.,deg=True):
"""
NAME:
vterm
PURPOSE:
calculate the terminal velocity at l in this potential
INPUT:
Pot - Potential instance
l - Galactic longitude [deg/rad; can be Quantity)
t - time (optional; can be Quantity)
deg= if True (default), l in deg
OUTPUT:
terminal velocity
HISTORY:
2013-05-31 - Written - Bovy (IAS)
"""
Pot= flatten(Pot)
if _APY_LOADED and isinstance(l,units.Quantity):
l= conversion.parse_angle(l)
deg= False
if deg:
sinl= numpy.sin(l/180.*numpy.pi)
else:
sinl= numpy.sin(l)
return sinl*(omegac(Pot,sinl,t=t,use_physical=False)
-omegac(Pot,1.,t=t,use_physical=False))
@physical_conversion('position',pop=True)
def rl(Pot,lz,t=0.):
"""
NAME:
rl
PURPOSE:
calculate the radius of a circular orbit of Lz
INPUT:
Pot - Potential instance or list thereof
lz - Angular momentum (can be Quantity)
t - time (optional; can be Quantity)
OUTPUT:
radius
HISTORY:
2012-07-30 - Written - Bovy (IAS@MPIA)
NOTE:
seems to take about ~0.5 ms for a Miyamoto-Nagai potential;
~0.75 ms for a MWPotential
"""
Pot= flatten(Pot)
lz= conversion.parse_angmom(lz,**conversion.get_physical(Pot))
#Find interval
rstart= _rlFindStart(numpy.fabs(lz),#assumes vo=1.
numpy.fabs(lz),
Pot, t=t)
try:
return optimize.brentq(_rlfunc,10.**-5.,rstart,
args=(numpy.fabs(lz),
Pot,
t),
maxiter=200,disp=False)
except ValueError: #Probably lz small and starting lz to great
rlower= _rlFindStart(10.**-5.,
numpy.fabs(lz),
Pot,t=t,lower=True)
return optimize.brentq(_rlfunc,rlower,rstart,
args=( | numpy.fabs(lz) | numpy.fabs |
'''Partial Regression plot and residual plots to find misspecification
Author: <NAME>
License: BSD-3
Created: 2011-01-23
update
2011-06-05 : start to convert example to usable functions
2011-10-27 : docstrings
'''
from statsmodels.compat.python import lrange, lzip
from statsmodels.compat.pandas import Appender
import numpy as np
import pandas as pd
from patsy import dmatrix
from statsmodels.regression.linear_model import OLS, GLS, WLS
from statsmodels.genmod.generalized_linear_model import GLM
from statsmodels.genmod.generalized_estimating_equations import GEE
from statsmodels.sandbox.regression.predstd import wls_prediction_std
from statsmodels.graphics import utils
from statsmodels.nonparametric.smoothers_lowess import lowess
from statsmodels.tools.tools import maybe_unwrap_results
from ._regressionplots_doc import (
_plot_added_variable_doc,
_plot_partial_residuals_doc,
_plot_ceres_residuals_doc,
_plot_influence_doc,
_plot_leverage_resid2_doc)
__all__ = ['plot_fit', 'plot_regress_exog', 'plot_partregress', 'plot_ccpr',
'plot_regress_exog', 'plot_partregress_grid', 'plot_ccpr_grid',
'add_lowess', 'abline_plot', 'influence_plot',
'plot_leverage_resid2', 'added_variable_resids',
'partial_resids', 'ceres_resids', 'plot_added_variable',
'plot_partial_residuals', 'plot_ceres_residuals']
#TODO: consider moving to influence module
def _high_leverage(results):
#TODO: replace 1 with k_constant
return 2. * (results.df_model + 1)/results.nobs
def add_lowess(ax, lines_idx=0, frac=.2, **lowess_kwargs):
"""
Add Lowess line to a plot.
Parameters
----------
ax : AxesSubplot
The Axes to which to add the plot
lines_idx : int
This is the line on the existing plot to which you want to add
a smoothed lowess line.
frac : float
The fraction of the points to use when doing the lowess fit.
lowess_kwargs
Additional keyword arguments are passes to lowess.
Returns
-------
Figure
The figure that holds the instance.
"""
y0 = ax.get_lines()[lines_idx]._y
x0 = ax.get_lines()[lines_idx]._x
lres = lowess(y0, x0, frac=frac, **lowess_kwargs)
ax.plot(lres[:, 0], lres[:, 1], 'r', lw=1.5)
return ax.figure
def plot_fit(results, exog_idx, y_true=None, ax=None, vlines=True, **kwargs):
"""
Plot fit against one regressor.
This creates one graph with the scatterplot of observed values
compared to fitted values.
Parameters
----------
results : Results
A result instance with resid, model.endog and model.exog as
attributes.
exog_idx : {int, str}
Name or index of regressor in exog matrix.
y_true : array_like. optional
If this is not None, then the array is added to the plot.
ax : AxesSubplot, optional
If given, this subplot is used to plot in instead of a new figure being
created.
vlines : bool, optional
If this not True, then the uncertainty of the fit is not
plotted.
**kwargs
The keyword arguments are passed to the plot command for the fitted
values points.
Returns
-------
Figure
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
Examples
--------
Load the Statewide Crime data set and perform linear regression with
`poverty` and `hs_grad` as variables and `murder` as the response
>>> import statsmodels.api as sm
>>> import matplotlib.pyplot as plt
>>> data = sm.datasets.statecrime.load_pandas().data
>>> murder = data['murder']
>>> X = data[['poverty', 'hs_grad']]
>>> X["constant"] = 1
>>> y = murder
>>> model = sm.OLS(y, X)
>>> results = model.fit()
Create a plot just for the variable 'Poverty':
>>> fig, ax = plt.subplots()
>>> fig = sm.graphics.plot_fit(results, 0, ax=ax)
>>> ax.set_ylabel("Murder Rate")
>>> ax.set_xlabel("Poverty Level")
>>> ax.set_title("Linear Regression")
>>> plt.show()
.. plot:: plots/graphics_plot_fit_ex.py
"""
fig, ax = utils.create_mpl_ax(ax)
exog_name, exog_idx = utils.maybe_name_or_idx(exog_idx, results.model)
results = maybe_unwrap_results(results)
#maybe add option for wendog, wexog
y = results.model.endog
x1 = results.model.exog[:, exog_idx]
x1_argsort = np.argsort(x1)
y = y[x1_argsort]
x1 = x1[x1_argsort]
ax.plot(x1, y, 'bo', label=results.model.endog_names)
if y_true is not None:
ax.plot(x1, y_true[x1_argsort], 'b-', label='True values')
title = 'Fitted values versus %s' % exog_name
ax.plot(x1, results.fittedvalues[x1_argsort], 'D', color='r',
label='fitted', **kwargs)
if vlines is True:
_, iv_l, iv_u = wls_prediction_std(results)
ax.vlines(x1, iv_l[x1_argsort], iv_u[x1_argsort], linewidth=1,
color='k', alpha=.7)
#ax.fill_between(x1, iv_l[x1_argsort], iv_u[x1_argsort], alpha=0.1,
# color='k')
ax.set_title(title)
ax.set_xlabel(exog_name)
ax.set_ylabel(results.model.endog_names)
ax.legend(loc='best', numpoints=1)
return fig
def plot_regress_exog(results, exog_idx, fig=None):
"""Plot regression results against one regressor.
This plots four graphs in a 2 by 2 figure: 'endog versus exog',
'residuals versus exog', 'fitted versus exog' and
'fitted plus residual versus exog'
Parameters
----------
results : result instance
A result instance with resid, model.endog and model.exog as attributes.
exog_idx : int or str
Name or index of regressor in exog matrix.
fig : Figure, optional
If given, this figure is simply returned. Otherwise a new figure is
created.
Returns
-------
Figure
The value of `fig` if provided. Otherwise a new instance.
Examples
--------
Load the Statewide Crime data set and build a model with regressors
including the rate of high school graduation (hs_grad), population in urban
areas (urban), households below poverty line (poverty), and single person
households (single). Outcome variable is the murder rate (murder).
Build a 2 by 2 figure based on poverty showing fitted versus actual murder
rate, residuals versus the poverty rate, partial regression plot of poverty,
and CCPR plot for poverty rate.
>>> import statsmodels.api as sm
>>> import matplotlib.pyplot as plot
>>> import statsmodels.formula.api as smf
>>> fig = plt.figure(figsize=(8, 6))
>>> crime_data = sm.datasets.statecrime.load_pandas()
>>> results = smf.ols('murder ~ hs_grad + urban + poverty + single',
... data=crime_data.data).fit()
>>> sm.graphics.plot_regress_exog(results, 'poverty', fig=fig)
>>> plt.show()
.. plot:: plots/graphics_regression_regress_exog.py
"""
fig = utils.create_mpl_fig(fig)
exog_name, exog_idx = utils.maybe_name_or_idx(exog_idx, results.model)
results = maybe_unwrap_results(results)
#maybe add option for wendog, wexog
y_name = results.model.endog_names
x1 = results.model.exog[:, exog_idx]
prstd, iv_l, iv_u = wls_prediction_std(results)
ax = fig.add_subplot(2, 2, 1)
ax.plot(x1, results.model.endog, 'o', color='b', alpha=0.9, label=y_name)
ax.plot(x1, results.fittedvalues, 'D', color='r', label='fitted',
alpha=.5)
ax.vlines(x1, iv_l, iv_u, linewidth=1, color='k', alpha=.7)
ax.set_title('Y and Fitted vs. X', fontsize='large')
ax.set_xlabel(exog_name)
ax.set_ylabel(y_name)
ax.legend(loc='best')
ax = fig.add_subplot(2, 2, 2)
ax.plot(x1, results.resid, 'o')
ax.axhline(y=0, color='black')
ax.set_title('Residuals versus %s' % exog_name, fontsize='large')
ax.set_xlabel(exog_name)
ax.set_ylabel("resid")
ax = fig.add_subplot(2, 2, 3)
exog_noti = np.ones(results.model.exog.shape[1], bool)
exog_noti[exog_idx] = False
exog_others = results.model.exog[:, exog_noti]
from pandas import Series
fig = plot_partregress(results.model.data.orig_endog,
Series(x1, name=exog_name,
index=results.model.data.row_labels),
exog_others, obs_labels=False, ax=ax)
ax.set_title('Partial regression plot', fontsize='large')
#ax.set_ylabel("Fitted values")
#ax.set_xlabel(exog_name)
ax = fig.add_subplot(2, 2, 4)
fig = plot_ccpr(results, exog_idx, ax=ax)
ax.set_title('CCPR Plot', fontsize='large')
#ax.set_xlabel(exog_name)
#ax.set_ylabel("Fitted values + resids")
fig.suptitle('Regression Plots for %s' % exog_name, fontsize="large")
fig.tight_layout()
fig.subplots_adjust(top=.90)
return fig
def _partial_regression(endog, exog_i, exog_others):
"""Partial regression.
regress endog on exog_i conditional on exog_others
uses OLS
Parameters
----------
endog : array_like
exog : array_like
exog_others : array_like
Returns
-------
res1c : OLS results instance
(res1a, res1b) : tuple of OLS results instances
results from regression of endog on exog_others and of exog_i on
exog_others
"""
#FIXME: This function does not appear to be used.
res1a = OLS(endog, exog_others).fit()
res1b = OLS(exog_i, exog_others).fit()
res1c = OLS(res1a.resid, res1b.resid).fit()
return res1c, (res1a, res1b)
def plot_partregress(endog, exog_i, exog_others, data=None,
title_kwargs={}, obs_labels=True, label_kwargs={},
ax=None, ret_coords=False, **kwargs):
"""Plot partial regression for a single regressor.
Parameters
----------
endog : {ndarray, str}
The endogenous or response variable. If string is given, you can use a
arbitrary translations as with a formula.
exog_i : {ndarray, str}
The exogenous, explanatory variable. If string is given, you can use a
arbitrary translations as with a formula.
exog_others : {ndarray, list[str]}
Any other exogenous, explanatory variables. If a list of strings is
given, each item is a term in formula. You can use a arbitrary
translations as with a formula. The effect of these variables will be
removed by OLS regression.
data : {DataFrame, dict}
Some kind of data structure with names if the other variables are
given as strings.
title_kwargs : dict
Keyword arguments to pass on for the title. The key to control the
fonts is fontdict.
obs_labels : {bool, array_like}
Whether or not to annotate the plot points with their observation
labels. If obs_labels is a boolean, the point labels will try to do
the right thing. First it will try to use the index of data, then
fall back to the index of exog_i. Alternatively, you may give an
array-like object corresponding to the observation numbers.
label_kwargs : dict
Keyword arguments that control annotate for the observation labels.
ax : AxesSubplot, optional
If given, this subplot is used to plot in instead of a new figure being
created.
ret_coords : bool
If True will return the coordinates of the points in the plot. You
can use this to add your own annotations.
**kwargs
The keyword arguments passed to plot for the points.
Returns
-------
fig : Figure
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
coords : list, optional
If ret_coords is True, return a tuple of arrays (x_coords, y_coords).
See Also
--------
plot_partregress_grid : Plot partial regression for a set of regressors.
Notes
-----
The slope of the fitted line is the that of `exog_i` in the full
multiple regression. The individual points can be used to assess the
influence of points on the estimated coefficient.
Examples
--------
Load the Statewide Crime data set and plot partial regression of the rate
of high school graduation (hs_grad) on the murder rate(murder).
The effects of the percent of the population living in urban areas (urban),
below the poverty line (poverty) , and in a single person household (single)
are removed by OLS regression.
>>> import statsmodels.api as sm
>>> import matplotlib.pyplot as plt
>>> crime_data = sm.datasets.statecrime.load_pandas()
>>> sm.graphics.plot_partregress(endog='murder', exog_i='hs_grad',
... exog_others=['urban', 'poverty', 'single'],
... data=crime_data.data, obs_labels=False)
>>> plt.show()
.. plot:: plots/graphics_regression_partregress.py
More detailed examples can be found in the Regression Plots notebook
on the examples page.
"""
#NOTE: there is no interaction between possible missing data and
#obs_labels yet, so this will need to be tweaked a bit for this case
fig, ax = utils.create_mpl_ax(ax)
# strings, use patsy to transform to data
if isinstance(endog, str):
endog = dmatrix(endog + "-1", data)
if isinstance(exog_others, str):
RHS = dmatrix(exog_others, data)
elif isinstance(exog_others, list):
RHS = "+".join(exog_others)
RHS = dmatrix(RHS, data)
else:
RHS = exog_others
RHS_isemtpy = False
if isinstance(RHS, np.ndarray) and RHS.size==0:
RHS_isemtpy = True
elif isinstance(RHS, pd.DataFrame) and RHS.empty:
RHS_isemtpy = True
if isinstance(exog_i, str):
exog_i = dmatrix(exog_i + "-1", data)
# all arrays or pandas-like
if RHS_isemtpy:
endog = np.asarray(endog)
exog_i = np.asarray(exog_i)
ax.plot(endog, exog_i, 'o', **kwargs)
fitted_line = OLS(endog, exog_i).fit()
x_axis_endog_name = 'x' if isinstance(exog_i, np.ndarray) else exog_i.name
y_axis_endog_name = 'y' if isinstance(endog, np.ndarray) else endog.design_info.column_names[0]
else:
res_yaxis = OLS(endog, RHS).fit()
res_xaxis = OLS(exog_i, RHS).fit()
xaxis_resid = res_xaxis.resid
yaxis_resid = res_yaxis.resid
x_axis_endog_name = res_xaxis.model.endog_names
y_axis_endog_name = res_yaxis.model.endog_names
ax.plot(xaxis_resid, yaxis_resid, 'o', **kwargs)
fitted_line = OLS(yaxis_resid, xaxis_resid).fit()
fig = abline_plot(0, fitted_line.params[0], color='k', ax=ax)
if x_axis_endog_name == 'y': # for no names regression will just get a y
x_axis_endog_name = 'x' # this is misleading, so use x
ax.set_xlabel("e(%s | X)" % x_axis_endog_name)
ax.set_ylabel("e(%s | X)" % y_axis_endog_name)
ax.set_title('Partial Regression Plot', **title_kwargs)
# NOTE: if we want to get super fancy, we could annotate if a point is
# clicked using this widget
# http://stackoverflow.com/questions/4652439/
# is-there-a-matplotlib-equivalent-of-matlabs-datacursormode/
# 4674445#4674445
if obs_labels is True:
if data is not None:
obs_labels = data.index
elif hasattr(exog_i, "index"):
obs_labels = exog_i.index
else:
obs_labels = res_xaxis.model.data.row_labels
#NOTE: row_labels can be None.
#Maybe we should fix this to never be the case.
if obs_labels is None:
obs_labels = lrange(len(exog_i))
if obs_labels is not False: # could be array_like
if len(obs_labels) != len(exog_i):
raise ValueError("obs_labels does not match length of exog_i")
label_kwargs.update(dict(ha="center", va="bottom"))
ax = utils.annotate_axes(lrange(len(obs_labels)), obs_labels,
lzip(res_xaxis.resid, res_yaxis.resid),
[(0, 5)] * len(obs_labels), "x-large", ax=ax,
**label_kwargs)
if ret_coords:
return fig, (res_xaxis.resid, res_yaxis.resid)
else:
return fig
def plot_partregress_grid(results, exog_idx=None, grid=None, fig=None):
"""
Plot partial regression for a set of regressors.
Parameters
----------
results : Results instance
A regression model results instance.
exog_idx : {None, list[int], list[str]}
The indices or column names of the exog used in the plot, default is
all.
grid : {None, tuple[int]}
If grid is given, then it is used for the arrangement of the subplots.
The format of grid is (nrows, ncols). If grid is None, then ncol is
one, if there are only 2 subplots, and the number of columns is two
otherwise.
fig : Figure, optional
If given, this figure is simply returned. Otherwise a new figure is
created.
Returns
-------
Figure
If `fig` is None, the created figure. Otherwise `fig` itself.
See Also
--------
plot_partregress : Plot partial regression for a single regressor.
plot_ccpr : Plot CCPR against one regressor
Notes
-----
A subplot is created for each explanatory variable given by exog_idx.
The partial regression plot shows the relationship between the response
and the given explanatory variable after removing the effect of all other
explanatory variables in exog.
References
----------
See http://www.itl.nist.gov/div898/software/dataplot/refman1/auxillar/partregr.htm
Examples
--------
Using the state crime dataset separately plot the effect of the each
variable on the on the outcome, murder rate while accounting for the effect
of all other variables in the model visualized with a grid of partial
regression plots.
>>> from statsmodels.graphics.regressionplots import plot_partregress_grid
>>> import statsmodels.api as sm
>>> import matplotlib.pyplot as plt
>>> import statsmodels.formula.api as smf
>>> fig = plt.figure(figsize=(8, 6))
>>> crime_data = sm.datasets.statecrime.load_pandas()
>>> results = smf.ols('murder ~ hs_grad + urban + poverty + single',
... data=crime_data.data).fit()
>>> plot_partregress_grid(results, fig=fig)
>>> plt.show()
.. plot:: plots/graphics_regression_partregress_grid.py
"""
import pandas
fig = utils.create_mpl_fig(fig)
exog_name, exog_idx = utils.maybe_name_or_idx(exog_idx, results.model)
# TODO: maybe add option for using wendog, wexog instead
y = pandas.Series(results.model.endog, name=results.model.endog_names)
exog = results.model.exog
k_vars = exog.shape[1]
# this function does not make sense if k_vars=1
nrows = (len(exog_idx) + 1) // 2
ncols = 1 if nrows == len(exog_idx) else 2
if grid is not None:
nrows, ncols = grid
if ncols > 1:
title_kwargs = {"fontdict": {"fontsize": 'small'}}
# for indexing purposes
other_names = np.array(results.model.exog_names)
for i, idx in enumerate(exog_idx):
others = lrange(k_vars)
others.pop(idx)
exog_others = pandas.DataFrame(exog[:, others],
columns=other_names[others])
ax = fig.add_subplot(nrows, ncols, i + 1)
plot_partregress(y, pandas.Series(exog[:, idx],
name=other_names[idx]),
exog_others, ax=ax, title_kwargs=title_kwargs,
obs_labels=False)
ax.set_title("")
fig.suptitle("Partial Regression Plot", fontsize="large")
fig.tight_layout()
fig.subplots_adjust(top=.95)
return fig
def plot_ccpr(results, exog_idx, ax=None):
"""
Plot CCPR against one regressor.
Generates a component and component-plus-residual (CCPR) plot.
Parameters
----------
results : result instance
A regression results instance.
exog_idx : {int, str}
Exogenous, explanatory variable. If string is given, it should
be the variable name that you want to use, and you can use arbitrary
translations as with a formula.
ax : AxesSubplot, optional
If given, it is used to plot in instead of a new figure being
created.
Returns
-------
Figure
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
See Also
--------
plot_ccpr_grid : Creates CCPR plot for multiple regressors in a plot grid.
Notes
-----
The CCPR plot provides a way to judge the effect of one regressor on the
response variable by taking into account the effects of the other
independent variables. The partial residuals plot is defined as
Residuals + B_i*X_i versus X_i. The component adds the B_i*X_i versus
X_i to show where the fitted line would lie. Care should be taken if X_i
is highly correlated with any of the other independent variables. If this
is the case, the variance evident in the plot will be an underestimate of
the true variance.
References
----------
http://www.itl.nist.gov/div898/software/dataplot/refman1/auxillar/ccpr.htm
Examples
--------
Using the state crime dataset plot the effect of the rate of single
households ('single') on the murder rate while accounting for high school
graduation rate ('hs_grad'), percentage of people in an urban area, and rate
of poverty ('poverty').
>>> import statsmodels.api as sm
>>> import matplotlib.pyplot as plot
>>> import statsmodels.formula.api as smf
>>> crime_data = sm.datasets.statecrime.load_pandas()
>>> results = smf.ols('murder ~ hs_grad + urban + poverty + single',
... data=crime_data.data).fit()
>>> sm.graphics.plot_ccpr(results, 'single')
>>> plt.show()
.. plot:: plots/graphics_regression_ccpr.py
"""
fig, ax = utils.create_mpl_ax(ax)
exog_name, exog_idx = utils.maybe_name_or_idx(exog_idx, results.model)
results = maybe_unwrap_results(results)
x1 = results.model.exog[:, exog_idx]
#namestr = ' for %s' % self.name if self.name else ''
x1beta = x1*results.params[exog_idx]
ax.plot(x1, x1beta + results.resid, 'o')
from statsmodels.tools.tools import add_constant
mod = OLS(x1beta, add_constant(x1)).fit()
params = mod.params
fig = abline_plot(*params, **dict(ax=ax))
#ax.plot(x1, x1beta, '-')
ax.set_title('Component and component plus residual plot')
ax.set_ylabel("Residual + %s*beta_%d" % (exog_name, exog_idx))
ax.set_xlabel("%s" % exog_name)
return fig
def plot_ccpr_grid(results, exog_idx=None, grid=None, fig=None):
"""
Generate CCPR plots against a set of regressors, plot in a grid.
Generates a grid of component and component-plus-residual (CCPR) plots.
Parameters
----------
results : result instance
A results instance with exog and params.
exog_idx : None or list of int
The indices or column names of the exog used in the plot.
grid : None or tuple of int (nrows, ncols)
If grid is given, then it is used for the arrangement of the subplots.
If grid is None, then ncol is one, if there are only 2 subplots, and
the number of columns is two otherwise.
fig : Figure, optional
If given, this figure is simply returned. Otherwise a new figure is
created.
Returns
-------
Figure
If `ax` is None, the created figure. Otherwise the figure to which
`ax` is connected.
See Also
--------
plot_ccpr : Creates CCPR plot for a single regressor.
Notes
-----
Partial residual plots are formed as::
Res + Betahat(i)*Xi versus Xi
and CCPR adds::
Betahat(i)*Xi versus Xi
References
----------
See http://www.itl.nist.gov/div898/software/dataplot/refman1/auxillar/ccpr.htm
Examples
--------
Using the state crime dataset separately plot the effect of the each
variable on the on the outcome, murder rate while accounting for the effect
of all other variables in the model.
>>> import statsmodels.api as sm
>>> import matplotlib.pyplot as plt
>>> import statsmodels.formula.api as smf
>>> fig = plt.figure(figsize=(8, 8))
>>> crime_data = sm.datasets.statecrime.load_pandas()
>>> results = smf.ols('murder ~ hs_grad + urban + poverty + single',
... data=crime_data.data).fit()
>>> sm.graphics.plot_ccpr_grid(results, fig=fig)
>>> plt.show()
.. plot:: plots/graphics_regression_ccpr_grid.py
"""
fig = utils.create_mpl_fig(fig)
exog_name, exog_idx = utils.maybe_name_or_idx(exog_idx, results.model)
if grid is not None:
nrows, ncols = grid
else:
if len(exog_idx) > 2:
nrows = int(np.ceil(len(exog_idx)/2.))
ncols = 2
else:
nrows = len(exog_idx)
ncols = 1
seen_constant = 0
for i, idx in enumerate(exog_idx):
if results.model.exog[:, idx].var() == 0:
seen_constant = 1
continue
ax = fig.add_subplot(nrows, ncols, i+1-seen_constant)
fig = plot_ccpr(results, exog_idx=idx, ax=ax)
ax.set_title("")
fig.suptitle("Component-Component Plus Residual Plot", fontsize="large")
fig.tight_layout()
fig.subplots_adjust(top=.95)
return fig
def abline_plot(intercept=None, slope=None, horiz=None, vert=None,
model_results=None, ax=None, **kwargs):
"""
Plot a line given an intercept and slope.
Parameters
----------
intercept : float
The intercept of the line.
slope : float
The slope of the line.
horiz : float or array_like
Data for horizontal lines on the y-axis.
vert : array_like
Data for verterical lines on the x-axis.
model_results : statsmodels results instance
Any object that has a two-value `params` attribute. Assumed that it
is (intercept, slope).
ax : axes, optional
Matplotlib axes instance.
**kwargs
Options passed to matplotlib.pyplot.plt.
Returns
-------
Figure
The figure given by `ax.figure` or a new instance.
Examples
--------
>>> import numpy as np
>>> import statsmodels.api as sm
>>> np.random.seed(12345)
>>> X = sm.add_constant(np.random.normal(0, 20, size=30))
>>> y = np.dot(X, [25, 3.5]) + np.random.normal(0, 30, size=30)
>>> mod = sm.OLS(y,X).fit()
>>> fig = sm.graphics.abline_plot(model_results=mod)
>>> ax = fig.axes[0]
>>> ax.scatter(X[:,1], y)
>>> ax.margins(.1)
>>> import matplotlib.pyplot as plt
>>> plt.show()
.. plot:: plots/graphics_regression_abline.py
"""
if ax is not None: # get axis limits first thing, do not change these
x = ax.get_xlim()
else:
x = None
fig, ax = utils.create_mpl_ax(ax)
if model_results:
intercept, slope = model_results.params
if x is None:
x = [model_results.model.exog[:, 1].min(),
model_results.model.exog[:, 1].max()]
else:
if not (intercept is not None and slope is not None):
raise ValueError("specify slope and intercepty or model_results")
if x is None:
x = ax.get_xlim()
data_y = [x[0]*slope+intercept, x[1]*slope+intercept]
ax.set_xlim(x)
#ax.set_ylim(y)
from matplotlib.lines import Line2D
class ABLine2D(Line2D):
def __init__(self, *args, **kwargs):
super(ABLine2D, self).__init__(*args, **kwargs)
self.id_xlim_callback = None
self.id_ylim_callback = None
def remove(self):
ax = self.axes
if self.id_xlim_callback:
ax.callbacks.disconnect(self.id_xlim_callback)
if self.id_ylim_callback:
ax.callbacks.disconnect(self.id_ylim_callback)
super(ABLine2D, self).remove()
def update_datalim(self, ax):
ax.set_autoscale_on(False)
children = ax.get_children()
ablines = [child for child in children if child is self]
abline = ablines[0]
x = ax.get_xlim()
y = [x[0] * slope + intercept, x[1] * slope + intercept]
abline.set_data(x, y)
ax.figure.canvas.draw()
# TODO: how to intercept something like a margins call and adjust?
line = ABLine2D(x, data_y, **kwargs)
ax.add_line(line)
line.id_xlim_callback = ax.callbacks.connect('xlim_changed', line.update_datalim)
line.id_ylim_callback = ax.callbacks.connect('ylim_changed', line.update_datalim)
if horiz:
ax.hline(horiz)
if vert:
ax.vline(vert)
return fig
@Appender(_plot_influence_doc.format(**{
'extra_params_doc': "results: object\n"
" Results for a fitted regression model.\n"
" influence: instance\n"
" The instance of Influence for model."}))
def _influence_plot(results, influence, external=True, alpha=.05,
criterion="cooks", size=48, plot_alpha=.75, ax=None,
**kwargs):
infl = influence
fig, ax = utils.create_mpl_ax(ax)
if criterion.lower().startswith('coo'):
psize = infl.cooks_distance[0]
elif criterion.lower().startswith('dff'):
psize = np.abs(infl.dffits[0])
else:
raise ValueError("Criterion %s not understood" % criterion)
# scale the variables
#TODO: what is the correct scaling and the assumption here?
#we want plots to be comparable across different plots
#so we would need to use the expected distribution of criterion probably
old_range = np.ptp(psize)
new_range = size**2 - 8**2
psize = (psize - psize.min()) * new_range/old_range + 8**2
leverage = infl.hat_matrix_diag
if external:
resids = infl.resid_studentized_external
else:
resids = infl.resid_studentized
from scipy import stats
cutoff = stats.t.ppf(1.-alpha/2, results.df_resid)
large_resid = | np.abs(resids) | numpy.abs |
import json
import numpy as np
import wandb
from matplotlib import pyplot as plt
from src.utils import draw_digit, load_data
class LeakyReLU:
# The Leaky Rectifier function will serve as our activation function
def __init__(self, alpha):
self.alpha = alpha # Parameter for the function
def f(self, z):
# The Leaky Rectifier is computed as:
# f(z) = { z, z >= 0
# {alpha * z, z < 0
return np.where(z >= 0, z, self.alpha * z)
def d(self, a):
# The derivative of the function is given by:
# f'(z) = { 1, z >= 0 which implies f(z) >= 0
# {alpha, z < 0 which implies f(z) < 0
return np.where(a >= 0, 1, self.alpha)
def log_loss(y, a):
# Our chosen error function is the Log Loss function.
# This is also called the "Cross Entropy Loss" and is computed as:
# f(a) = -y * log(a) - (1 - y) * log(1 - a)
# i.e, -log(a) if y is 1, and -log(1 - a) if y is 0.
return np.where(y, -np.log(a), -np.log(1 - a)).sum()
def softmax(z):
# We offset each element of z by the maximum to prevent overflows.
# nan_to_num is used to handle extremely small values. These are
# made 0.
z = np.nan_to_num(np.exp(z - np.max(z)))
z_sum = np.sum(z)
return np.nan_to_num(z / z_sum)
class NeuralNetwork:
def __init__(self, ns, eta=0.5, lmbda=0, alpha=0.05):
print(f'ns: {ns}, eta: {eta}, lambda: {lmbda}, alpha: {alpha}')
# Network Structure
self.n = len(ns) # Number of layers
self.ns = ns # Number of neurons in each layer
# Hyper Parameters
self.eta = eta # Learning rate
self.lmbda = lmbda # Coefficient of Regularization
# Note: The coefficient is called "lmbda" as "lambda" is a keyword.
# Activation Function Object
self.act_fn = LeakyReLU(alpha) # Parameter for LReLU
# Log hyperparameters in wandb to analyze later.
wandb.config.update({
'architecture': ns, 'eta': eta, 'lambda': lmbda, 'alpha': alpha
})
# Randomly initialize thetas (weights) with a normal distribution
# of mean zero and variance as the reciprocal of the number of inputs
# received by the particular neuron.
self.thetas = [
np.random.randn(ns[i], ns[i - 1]) / np.sqrt(ns[i - 1])
for i in range(1, self.n)
]
# Similarly, initialize the biases with a distribution of mean zero
# and standard deviation and variance 1
self.biases = [np.random.randn(x) for x in ns[1:]]
# We use this performance variable to keep a track of how
# the network is performing. We will use it plot graph(s) between
# the number of correct predictions on the validation data and epochs.
self.performance = []
def predict(self, x):
# Our prediction is simply the activations of the output (last) layer.
return self.get_activations(x)[-1]
def train(self, data, validation_data=None, epochs=10, batch_size=20):
# We generate all the indices for the training data.
# We will shuffle these indices each epoch to randomly order the data.
# This is more efficient than zipping and shuffling the arrays directly
perm = np.arange(len(data))
self.performance = []
n_validation = len(validation_data)
# Log hyperparameters in wandb to analyze later.
wandb.config.update({'epochs': epochs, 'batch_size': batch_size})
if validation_data is not None:
correct = self.validate(validation_data)
percentage = 100 * correct / n_validation
print(f'Initial: {correct} / {n_validation} ({percentage}%)')
wandb.log({'epoch': 0, 'accuracy': percentage})
for i in range(1, epochs + 1):
np.random.shuffle(perm)
# We split the training data in batches, each of size batch_size.
for j in range(0, len(data), batch_size):
# From the shuffled indices, we select a range
# and pick all the examples in that range.
batch = [data[x] for x in perm[j:j + batch_size]]
# Each batch is then used to train the network
self.train_batch(batch)
if validation_data is not None:
correct = self.validate(validation_data)
percentage = 100 * correct / n_validation
# Log the data in wandb and also locally.
wandb.log({'epoch': i, 'accuracy': percentage})
self.performance.append(percentage)
print(f'Epoch {i}: {correct} / {n_validation} ({percentage}%)')
def plot(self, filename=None):
if not self.performance:
return
plt.figure()
plt.plot(
range(1, len(self.performance) + 1),
self.performance, 'r'
)
plt.title(
f'ns: {self.ns}, eta: {self.eta}, '
f'lambda: {self.lmbda}, alpha: {self.act_fn.alpha}.')
plt.xlabel('Number of Epochs')
plt.ylabel('Prediction Accuracy (%)')
if filename:
plt.tight_layout()
plt.savefig(filename)
plt.show()
def save(self, filename):
# This function will save all parameters of our network.
# We use this elaborate setup instead of simply pickling and dumping
# the network object so that we can still use this data,
# even if we change the architecture of our class.
# Unpickling and loading will not work in that case.
data = {
'ns': self.ns,
'eta': self.eta,
'lmbda': self.lmbda,
'alpha': self.act_fn.alpha,
'performance': self.performance,
'thetas': [t.tolist() for t in self.thetas],
'biases': [b.tolist() for b in self.biases]
}
with open(filename, 'w') as f:
json.dump(data, f)
wandb.save(filename)
@staticmethod
def load(filename):
with open(filename) as f:
data = json.load(f)
n = NeuralNetwork(
data['ns'], data['eta'], data['lmbda'], data['alpha']
)
n.thetas = [np.array(t) for t in data['thetas']]
n.biases = [np.array(b) for b in data['biases']]
n.performance = data['performance']
return n
def validate(self, validation_data):
# Returns the number of correct predictions
return sum(
np.argmax(y) == np.argmax(self.predict(x))
for x, y in validation_data
)
def get_activations(self, x):
# Validate the size of the input.
self.validate_input(x)
# We scale down the input to be in [0, 1].
# Also, we add a 1 to the end to account for the bias.
activations = [x]
# Iterate over each layer, excluding the output layer
for theta, bias in zip(self.thetas[:-1], self.biases[:-1]):
# The input to this layer is the matrix product of the weights
# associated with this layer and the activations of the previous
# layer plus the biases.
z = np.array(
np.dot(theta, activations[-1]) + bias, dtype='float64'
)
# Apply the activation (LReLU) function to get the activations
# for this layer, and add a 1 to the end for the bias.
activations.append(self.act_fn.f(z))
# For the output layer, we apply the softmax function, computed as:
# exp(z) / sum(exp(z in zs))
# The sum of outputs is clearly 1, which gives us
# a useful interpretation as the 'confidence' for each possible output.
z = np.dot(self.thetas[-1], activations[-1]) + self.biases[-1]
activations.append(softmax(z))
return activations
def train_batch(self, batch):
delta_thetas = [
np.zeros((self.ns[i], self.ns[i - 1]))
for i in range(1, self.n)
]
delta_biases = [np.zeros((x,)) for x in self.ns[1:]]
# Iterate over all examples in the batch
for x, y in batch:
# Activations for the current training example
activations = self.get_activations(x)
# We can trivially compute the bias and weight derivatives
# for the last layer with the help of y and the prediction.
# These formulae are derived by applying the chain rule to
# the softmax (activation) and the log loss (error) functions.
difference = | np.array(activations[-1] - y) | numpy.array |
# -*- coding: utf-8 -*-
"""Saraga Dataset Loader
This repository contains time aligned melody, rhythm and structural annotations for two large open corpora of
Indian Art Music (Carnatic and Hindustani music).
The repository contains the following manual annotations referring to audio files:
Section and tempo annotations stored as start and end timestamps together with the name of the section and
tempo during the section (in a separate file). Sama annotations referring to rhythmic cycle boundaries stored
as timestamps. Phrase annotations stored as timestamps and transcription of the phrases using solfège symbols
({S, r, R, g, G, m, M, P, d, D, n, N}). Audio features automatically extracted and stored: pitch and tonic.
The annotations are stored in text files, named as the audio filename but with the respective extension at the
end, for instance: "<NAME> - Bhuvini Dasudane.tempo-manual.txt".
The dataset contains a total of 197 tracks from the carnatic corpus and 108 track from the hindustani corpus.
A total of 163 tracks from the carnatic dataset have multitrack audio, which is not currently available in this
loader version. New version of the loader is coming soon with the multitrack audio support.
The files of this dataset are shared with the following license:
Creative Commons Attribution Non Commercial Share Alike 4.0 International
Dataset compiled by: <NAME>.; <NAME>.; <NAME>. and <NAME>.
For more information about the dataset as well as IAM and annotations, please refer to:
https://mtg.github.io/saraga/, where a really detailed explanation of the data and annotations is published.
"""
import librosa
import numpy as np
import os
import json
import logging
from mirdata import download_utils
from mirdata import jams_utils
from mirdata import core
from mirdata import utils
BIBTEX = """
@dataset{bozkurt_b_2018_1256127,
author = {<NAME>. and
<NAME>. and
<NAME>. and
<NAME>.},
title = {Saraga: research datasets of Indian Art Music},
month = may,
year = 2018,
publisher = {Zenodo},
version = {1.0},
doi = {10.5281/zenodo.1256127},
url = {https://doi.org/10.5281/zenodo.1256127}
}
"""
REMOTES = {
'all': download_utils.RemoteFileMetadata(
filename='saraga_1.0.zip',
url='https://zenodo.org/record/1256127/files/saraga_1.0.zip?download=1',
checksum='c8471e55bd55e060bde6cfacc555e1b1',
destination_dir=None,
)
}
def _load_metadata(metadata_path):
if not os.path.exists(metadata_path):
logging.info('Metadata file {} not found.'.format(metadata_path))
return None
with open(metadata_path) as f:
metadata = json.load(f)
data_home = metadata_path.split('/' + metadata_path.split('/')[-3])[0]
metadata['track_id'] = (
str(metadata_path.split('/')[-3]) + '_' + str(metadata_path.split('/')[-2])
)
metadata['data_home'] = data_home
return metadata
DATA = utils.LargeData('saraga_index.json', _load_metadata)
class Track(core.Track):
"""Saraga Track class
Args:
track_id (str): track id of the track
data_home (str): Local path where the dataset is stored. default=None
If `None`, looks for the data in the default directory, `~/mir_datasets`
Common attributes:
iam_style (str): flag to identify if track belongs to hindustani or carnatic collection
title (str): Title of the piece in the track
mbid (str): MusicBrainz ID of the track
album_artists (list, dicts): list of dicts containing the album artists present in the track and its mbid
artists (list, dicts): list of dicts containing information of the featuring artists in the track
Carnatic attributes:
raaga (list, dict): list of dicts containing information about the raagas present in the track
form (list, dict): list of dicts containing information about the forms present in the track
work (list, dicts): list of dicts containing the work present in the piece, and its mbid
taala (list, dicts): list of dicts containing the talas present in the track and its uuid
concert (list, dicts): list of dicts containing the concert where the track is present and its mbid
Hindustani attributes:
raags (list, dict): list of dicts containing information about the raags present in the track
form (list, dict): list of dicts containing information about the forms present in the track
release (list, dicts): list of dicts containing information of the release where the track is found
work (list, dicts): list of dicts containing the work present in the piece, and its mbid
taals (list, dicts): list of dicts containing the taals present in the track and its uuid
layas (list, dicts): list of dicts containing the layas present in the track and its uuid
"""
def __init__(self, track_id, data_home):
if track_id not in DATA.index['tracks']:
raise ValueError('{} is not a valid track ID in Saraga'.format(track_id))
self.track_id = track_id
self._data_home = data_home
self._track_paths = DATA.index['tracks'][track_id]
# Audio path
self.audio_path = os.path.join(self._data_home, self._track_paths['audio'][0])
# Annotation paths
self.ctonic_path = utils.none_path_join(
[self._data_home, self._track_paths['ctonic'][0]]
)
self.pitch_path = utils.none_path_join(
[self._data_home, self._track_paths['pitch'][0]]
)
self.pitch_vocal_path = utils.none_path_join(
[self._data_home, self._track_paths['pitch_vocal'][0]]
)
self.tempo_path = utils.none_path_join(
[self._data_home, self._track_paths['tempo'][0]]
)
self.sama_path = utils.none_path_join(
[self._data_home, self._track_paths['sama'][0]]
)
self.sections_path = utils.none_path_join(
[self._data_home, self._track_paths['sections'][0]]
)
self.phrases_path = utils.none_path_join(
[self._data_home, self._track_paths['phrases'][0]]
)
self.metadata_path = utils.none_path_join(
[self._data_home, self._track_paths['metadata'][0]]
)
# Flag to separate between carnatinc and hindustani tracks
self.iam_style = str(self.track_id.split('_')[0])
# CARNATIC MUSIC TRACKS
if self.iam_style == 'carnatic':
metadata = DATA.metadata(self.metadata_path)
if metadata is not None and track_id == metadata['track_id']:
self._track_metadata = metadata
else:
# annotations with missing metadata
self._track_metadata = {
'raaga': None,
'form': None,
'title': None,
'work': None,
'length': None,
'taala': None,
'album_artists': None,
'mbid': None,
'artists': None,
'concert': None,
}
# HINDUSTANI MUSIC TRACKS
if self.iam_style == 'hindustani':
metadata = DATA.metadata(self.metadata_path)
if metadata is not None and track_id == metadata['track_id']:
self._track_metadata = metadata
else:
# annotations with missing metadata
self._track_metadata = {
'title': None,
'raags': None,
'length': None,
'album_artists': None,
'forms': None,
'mbid': None,
'artists': None,
'release': None,
'works': None,
'taals': None,
'layas': None,
}
# Common attributes for Hindustani and Carnatic tracks
self.title = self._track_metadata['title']
self.artists = self._track_metadata['artists']
self.album_artists = self._track_metadata['album_artists']
self.mbid = self._track_metadata['mbid']
# Carnatic specific attributes
self.raaga = (
self._track_metadata['raaga']
if 'raaga' in self._track_metadata.keys() is not None
else None
)
self.form = (
self._track_metadata['form']
if 'form' in self._track_metadata.keys() is not None
else None
)
self.work = (
self._track_metadata['work']
if 'work' in self._track_metadata.keys() is not None
else None
)
self.taala = (
self._track_metadata['taala']
if 'taala' in self._track_metadata.keys() is not None
else None
)
self.concert = (
self._track_metadata['concert']
if 'concert' in self._track_metadata.keys() is not None
else None
)
# Hindustani specific attributes
self.raags = (
self._track_metadata['raags']
if 'raags' in self._track_metadata.keys() is not None
else None
)
self.forms = (
self._track_metadata['forms']
if 'forms' in self._track_metadata.keys() is not None
else None
)
self.release = (
self._track_metadata['release']
if 'release' in self._track_metadata.keys() is not None
else None
)
self.works = (
self._track_metadata['works']
if 'works' in self._track_metadata.keys() is not None
else None
)
self.taals = (
self._track_metadata['taals']
if 'taals' in self._track_metadata.keys() is not None
else None
)
self.layas = (
self._track_metadata['layas']
if 'layas' in self._track_metadata.keys() is not None
else None
)
@utils.cached_property
def tonic(self):
"""Float: tonic annotation"""
return load_tonic(self.ctonic_path)
@utils.cached_property
def pitch(self):
"""F0Data: pitch annotation"""
return load_pitch(self.pitch_path)
@utils.cached_property
def pitch_vocal(self):
"""F0Data: pitch vocal annotations"""
return load_pitch(self.pitch_vocal_path)
@utils.cached_property
def tempo(self):
"""Dict: tempo annotations"""
return load_tempo(self.tempo_path, self.iam_style)
@utils.cached_property
def sama(self):
"""SectionData: sama section annotations"""
return load_sama(self.sama_path)
@utils.cached_property
def sections(self):
"""SectionData: track section annotations"""
return load_sections(self.sections_path, self.iam_style)
@utils.cached_property
def phrases(self):
"""EventData: phrase annotations"""
return load_phrases(self.phrases_path)
@property
def audio(self):
"""(np.ndarray, float): audio signal, sample rate"""
return load_audio(self.audio_path)
def to_jams(self):
"""Jams: the track's data in jams format"""
return jams_utils.jams_converter(
audio_path=self.audio_path,
f0_data=[(self.pitch, 'pitch'), (self.pitch_vocal, 'pitch_vocal')],
section_data=[(self.sama, 'sama'), (self.sections, 'sections')],
event_data=[(self.phrases, 'phrases')],
metadata={
'tempo': self.tempo,
'tonic': self.tonic,
'metadata': self._track_metadata,
},
)
def load_audio(audio_path):
"""Load a Saraga audio file.
Args:
audio_path (str): path to audio file
Returns:
y (np.ndarray): the mono audio signal
sr (float): The sample rate of the audio file
"""
if audio_path is None:
return None
if not os.path.exists(audio_path):
raise IOError("audio_path {} does not exist".format(audio_path))
return librosa.load(audio_path, sr=44100, mono=False)
def load_tonic(tonic_path):
"""Load tonic
Args:
tonic_path (str): Local path where the tonic path is stored.
If `None`, returns None.
Returns:
(int): Tonic annotation in Hz
"""
if tonic_path is None:
return None
if not os.path.exists(tonic_path):
raise IOError("tonic_path {} does not exist".format(tonic_path))
with open(tonic_path, 'r') as reader:
return float(reader.readline().split('\n')[0])
def load_pitch(pitch_path):
"""Load pitch
Args:
pitch path (str): Local path where the pitch annotation is stored.
If `None`, returns None.
Returns:
F0Data: pitch annotation
"""
if pitch_path is None:
return None
if not os.path.exists(pitch_path):
raise IOError("melody_path {} does not exist".format(pitch_path))
times = []
freqs = []
with open(pitch_path, 'r') as reader:
for line in reader.readlines():
times.append(float(line.split('\t')[0]))
freqs.append(float(line.split('\t')[1]))
if not times:
return None
times = | np.array(times) | numpy.array |
#!/usr/bin/python3
import numpy as np
import pdb
import torch
from multiobjective_opt.dist_mgda_utils import (
reduce_to_dict_per_dataset,
scaled_reduce_dict_to_tensor,
normalize_tensor_list
)
def test_all_gather_create_tensor_list():
"""
NOT EASY TO TEST SINCE MUST BE ON SEPARATE cpus/GPUS FOR IT TO WORK
"""
pass
def test_scaled_reduce_dict_to_tensor():
"""
"""
dataset_grad_p_dict = {
'coco': torch.tensor([1.,2.]),
'ade20k': torch.tensor([3.,4.]),
'mapillary': torch.tensor([5.,6.])
}
dataset_names = ['coco', 'ade20k', 'mapillary']
scales = {'coco': 1., 'ade20k': 5., 'mapillary': 2.}
tensor = scaled_reduce_dict_to_tensor(dataset_grad_p_dict, dataset_names, scales=scales)
gt_tensor = torch.tensor([26., 34.])
assert torch.allclose(tensor, gt_tensor)
def test_reduce_to_dict_per_dataset():
"""
"""
ngpus_per_node = 8
tensor_list = [torch.ones(1) * i for i in range(ngpus_per_node) ]
dataset_gpu_mapping = {
'coco':[0,1,2],
'mapillary': [3,4,5],
'ade20k': [6,7]
}
dataset_loss_dict = reduce_to_dict_per_dataset(tensor_list, dataset_gpu_mapping)
gt_dataset_loss_dict = {
'coco': torch.tensor([3./3]), # (0 + 1 + 2 ) / 3
'mapillary': torch.tensor([12./3.]), # (3 + 4 + 5) / 3
'ade20k': torch.tensor([13./2.]) # (6 + 7) / 2
}
assert_tensor_dicts_are_equal(dataset_loss_dict, gt_dataset_loss_dict)
print(dataset_loss_dict)
def assert_tensor_dicts_are_equal(dict1, dict2):
"""
"""
assert set(dict1.keys()) == set(dict2.keys())
for k, v1 in dict1.items():
assert torch.allclose(v1, dict2[k])
def test_normalize_tensor_list():
"""
"""
tensor_list = [
torch.arange(5).type(torch.float32),
torch.ones(3).type(torch.float32),
torch.ones(2).type(torch.float32) * 2
]
print('Unnormalized: ', tensor_list)
normalized_tensor_list, norm = normalize_tensor_list(tensor_list)
gt_tensor_list = np.array([0,1,2,3,4,1,1,1,2,2.])
gt_norm = | np.linalg.norm(gt_tensor_list) | numpy.linalg.norm |
#!/usr/bin/env python
"""experiments.py: experiments python program for different experiment applications"""
__author__ = "<NAME>."
__copyright__ = "Copyright 2020, SuperDARN@VT"
__credits__ = []
__license__ = "MIT"
__version__ = "1.0."
__maintainer__ = "<NAME>."
__email__ = "<EMAIL>"
__status__ = "Research"
import matplotlib
matplotlib.use("Agg")
import matplotlib.dates as mdates
import matplotlib.pyplot as plt
plt.style.use("config/alt.mplstyle")
import sys
sys.path.append("models/")
sys.path.append("models/experiments/")
import os
import numpy as np
import argparse
import pandas as pd
import datetime as dt
from netCDF4 import Dataset, num2date
from dateutil import parser as dparser
import glob
import xarray
import statsmodels.api as sm
from statsmodels.formula.api import ols
from constant import *
import utils
from absorption import *
import case0
from model import Model
fontT = {"family": "serif", "color": "k", "weight": "normal", "size": 8}
font = {"family": "serif", "color": "black", "weight": "normal", "size": 10}
from matplotlib import font_manager
ticks_font = font_manager.FontProperties(family="serif", size=10, weight="normal")
matplotlib.rcParams["xtick.color"] = "k"
matplotlib.rcParams["ytick.color"] = "k"
matplotlib.rcParams["xtick.labelsize"] = 10
matplotlib.rcParams["ytick.labelsize"] = 10
matplotlib.rcParams["mathtext.default"] = "default"
def coloring_axes(ax, atype="left", col="red", fmtr="%H", ivl=60):
ax.spines[atype].set_color(col)
ax.tick_params(axis="y", which="both", colors=col)
ax.yaxis.label.set_color(col)
fmt = matplotlib.dates.DateFormatter(fmtr)
ax.xaxis.set_major_formatter(fmt)
ax.xaxis.set_major_locator(mdates.MinuteLocator(interval=ivl))
return ax
def coloring_twaxes(ax, atype="left", col="red", twcol="k", fmtr="%H", ivl=60):
ax.spines[atype].set_color(col)
ax.tick_params(axis="y", which="both", colors=twcol)
ax.yaxis.label.set_color(twcol)
fmt = matplotlib.dates.DateFormatter(fmtr)
ax.xaxis.set_major_formatter(fmt)
ax.xaxis.set_major_locator(mdates.MinuteLocator(interval=ivl))
return ax
def _case0_(args):
""" Impact of the I0 and frequency """
chi = np.deg2rad(np.linspace(0,90,91))
f0 = 10**np.linspace(-6,-1,31) * 1e3
fo = 10**np.linspace(np.log10(.1), np.log10(200), 100)
ev, start, end = dt.datetime(2015,3,11,16,22), dt.datetime(2015,3,11,15,30), dt.datetime(2015,3,11,17,30)
l, r = 52, 53
_f0_ = case0._Case0_(start, end)[40:53]
fname = "data/sim/case0.nc.gz"
os.system("gzip -d "+fname)
_nc = Dataset(fname.replace(".gz", ""))
os.system("gzip "+fname.replace(".gz", ""))
pg = utils.PointGrid("ott", ev, start, end, 30, v=False)
_lo_,_qo_ = [],[]
b = pg.igrf["B"][l:r,:]
pg._col_.nu_FT = pg._col_.nu_FT[l:r,:]
pg._col_.nu_av_CC = pg._col_.nu_av_CC[l:r,:]
pg._col_.nu_av_MB = pg._col_.nu_av_MB[l:r,:]
pg._col_.nu_SN["total"] = pg._col_.nu_SN["total"][l:r,:]
ne = _nc.variables["ne"][l:r,:]
for _f_ in fo:
print(" Frequency - ", _f_, " MHz")
u = Absorption(b, pg._col_, ne, fo=_f_*1e6)
_lo_.append([utils.int_absorption(u.AH["SN"]["O"], pg.alts, extpoint=68, llim = 60, ulim = 110),
utils.int_absorption(u.AH["AV_CC"]["O"], pg.alts, extpoint=64, llim = 60, ulim = 110),
utils.int_absorption(u.AH["AV_MB"]["O"], pg.alts, extpoint=64, llim = 60, ulim = 110),
utils.int_absorption(u.SW["FT"]["O"], pg.alts, extpoint=64, llim = 60, ulim = 110)])
continue
_lo_ = np.array(_lo_)
ne = _nc.variables["ne"][40:53,:]
nfo = np.linspace(1,70,50)
for i, _ in enumerate(_f0_):
_k_ = []
for _f_ in nfo:
print(" Frequency, I - ", _f_, " MHz,", _f0_[i], "W/m2")
u = Absorption(b, pg._col_, ne[i:i+1,:], fo=_f_*1e6)
_k_.append([utils.int_absorption(u.AH["SN"]["O"], pg.alts, extpoint=68, llim = 60, ulim = 110),
utils.int_absorption(u.AH["AV_CC"]["O"], pg.alts, extpoint=64, llim = 60, ulim = 110),
utils.int_absorption(u.AH["AV_MB"]["O"], pg.alts, extpoint=64, llim = 60, ulim = 110),
utils.int_absorption(u.SW["FT"]["O"], pg.alts, extpoint=64, llim = 60, ulim = 110)])
_k_ = np.array(_k_)[:,:,0]
_qo_.append([10**utils.extrap1d(_k_[:,0], np.log10(nfo))([1])[0],
10**utils.extrap1d(_k_[:,1], np.log10(nfo))([1])[0],
10**utils.extrap1d(_k_[:,2], np.log10(nfo))([1])[0],
10**utils.extrap1d(_k_[:,3], | np.log10(nfo) | numpy.log10 |
"""
This module solves for the orbit of the planet given Keplerian parameters.
"""
import numpy as np
import astropy.units as u
import astropy.constants as consts
try:
from . import _kepler
cext = True
except ImportError:
print("WARNING: KEPLER: Unable to import C-based Kepler's \
equation solver. Falling back to the slower NumPy implementation.")
cext = False
def calc_orbit(epochs, sma, ecc, inc, aop, pan, tau, plx, mtot, mass_for_Kamp=None, tau_ref_epoch=0, tolerance=1e-9, max_iter=100):
"""
Returns the separation and radial velocity of the body given array of
orbital parameters (size n_orbs) at given epochs (array of size n_dates)
Based on orbit solvers from <NAME> and <NAME>. Adapted by <NAME> and <NAME>.
Args:
epochs (np.array): MJD times for which we want the positions of the planet
sma (np.array): semi-major axis of orbit [au]
ecc (np.array): eccentricity of the orbit [0,1]
inc (np.array): inclination [radians]
aop (np.array): argument of periastron [radians]
pan (np.array): longitude of the ascending node [radians]
tau (np.array): epoch of periastron passage in fraction of orbital period past MJD=0 [0,1]
plx (np.array): parallax [mas]
mtot (np.array): total mass of the two-body orbit (M_* + M_planet) [Solar masses]
mass_for_Kamp (np.array, optional): mass of the body that causes the RV signal.
For example, if you want to return the stellar RV, this is the planet mass.
If you want to return the planetary RV, this is the stellar mass. [Solar masses].
For planet mass ~ 0, mass_for_Kamp ~ M_tot, and function returns planetary RV (default).
tau_ref_epoch (float, optional): reference date that tau is defined with respect to (i.e., tau=0)
tolerance (float, optional): absolute tolerance of iterative computation. Defaults to 1e-9.
max_iter (int, optional): maximum number of iterations before switching. Defaults to 100.
Return:
3-tuple:
raoff (np.array): array-like (n_dates x n_orbs) of RA offsets between the bodies
(origin is at the other body) [mas]
deoff (np.array): array-like (n_dates x n_orbs) of Dec offsets between the bodies [mas]
vz (np.array): array-like (n_dates x n_orbs) of radial velocity of one of the bodies
(see `mass_for_Kamp` description) [km/s]
Written: <NAME>, <NAME>, 2018
"""
n_orbs = np.size(sma) # num sets of input orbital parameters
n_dates = np.size(epochs) # number of dates to compute offsets and vz
# return planetary RV if `mass_for_Kamp` is not defined
if mass_for_Kamp is None:
mass_for_Kamp = mtot
# Necessary for _calc_ecc_anom, for now
if np.isscalar(epochs): # just in case epochs is given as a scalar
epochs = np.array([epochs])
ecc_arr = np.tile(ecc, (n_dates, 1))
# Compute period (from Kepler's third law) and mean motion
period = np.sqrt(4*np.pi**2.0*(sma*u.AU)**3/(consts.G*(mtot*u.Msun)))
period = period.to(u.day).value
mean_motion = 2*np.pi/(period) # in rad/day
# # compute mean anomaly (size: n_orbs x n_dates)
manom = (mean_motion*(epochs[:, None] - tau_ref_epoch) - 2*np.pi*tau) % (2.0*np.pi)
# compute eccentric anomalies (size: n_orbs x n_dates)
eanom = _calc_ecc_anom(manom, ecc_arr, tolerance=tolerance, max_iter=max_iter)
# compute the true anomalies (size: n_orbs x n_dates)
# Note: matrix multiplication makes the shapes work out here and below
tanom = 2.*np.arctan(np.sqrt((1.0 + ecc)/(1.0 - ecc))*np.tan(0.5*eanom))
# compute 3-D orbital radius of second body (size: n_orbs x n_dates)
radius = sma * (1.0 - ecc * np.cos(eanom))
# compute ra/dec offsets (size: n_orbs x n_dates)
# math from <NAME>. Lots of trig
c2i2 = np.cos(0.5*inc)**2
s2i2 = np.sin(0.5*inc)**2
arg1 = tanom + aop + pan
arg2 = tanom + aop - pan
c1 = np.cos(arg1)
c2 = np.cos(arg2)
s1 = np.sin(arg1)
s2 = np.sin(arg2)
# updated sign convention for Green Eq. 19.4-19.7
raoff = radius * (c2i2*s1 - s2i2*s2) * plx
deoff = radius * (c2i2*c1 + s2i2*c2) * plx
# compute the radial velocity (vz) of the body (size: n_orbs x n_dates)
# first comptue the RV semi-amplitude (size: n_orbs x n_dates)
Kv = np.sqrt(consts.G / (1.0 - ecc**2)) * (mass_for_Kamp * u.Msun *
np.sin(inc)) / np.sqrt(mtot * u.Msun) / np.sqrt(sma * u.au)
# Convert to km/s
Kv = Kv.to(u.km/u.s)
# compute the vz
vz = Kv.value * (ecc*np.cos(aop) + np.cos(aop + tanom))
# Squeeze out extra dimension (useful if n_orbs = 1, does nothing if n_orbs > 1)
vz = np.squeeze(vz)[()]
return raoff, deoff, vz
def _calc_ecc_anom(manom, ecc, tolerance=1e-9, max_iter=100, use_c=False):
"""
Computes the eccentric anomaly from the mean anomlay.
Code from <NAME>'s orbit solver (e < 0.95 use Newton, e >= 0.95 use Mikkola)
Args:
manom (float/np.array): mean anomaly, either a scalar or np.array of any shape
ecc (float/np.array): eccentricity, either a scalar or np.array of the same shape as manom
tolerance (float, optional): absolute tolerance of iterative computation. Defaults to 1e-9.
max_iter (int, optional): maximum number of iterations before switching. Defaults to 100.
Return:
eanom (float/np.array): eccentric anomalies, same shape as manom
Written: <NAME>, 2018
"""
if np.isscalar(ecc) or (np.shape(manom) == np.shape(ecc)):
pass
else:
raise ValueError("ecc must be a scalar, or ecc.shape == manom.shape")
# If manom is a scalar, make it into a one-element array
if np.isscalar(manom):
manom = np.array((manom, ))
# If ecc is a scalar, make it the same shape as manom
if np.isscalar(ecc):
ecc = np.full(np.shape(manom), ecc)
# Initialize eanom array
eanom = np.full(np.shape(manom), np.nan)
# Save some boolean arrays
ecc_zero = ecc == 0.0
ecc_low = ecc < 0.95
# First deal with e == 0 elements
ind_zero = np.where(ecc_zero)
if len(ind_zero[0]) > 0:
eanom[ind_zero] = manom[ind_zero]
# Now low eccentricities
ind_low = np.where(~ecc_zero & ecc_low)
if cext and use_c:
if len(ind_low[0]) > 0: eanom[ind_low] = _kepler._c_newton_solver(manom[ind_low], ecc[ind_low], tolerance=tolerance, max_iter=max_iter)
# the C solver returns eanom = -1 if it doesnt converge after max_iter iterations
m_one = eanom == -1
ind_high = np.where(~ecc_zero & ~ecc_low | m_one)
else:
if len(ind_low[0]) > 0:
eanom[ind_low] = _newton_solver(
manom[ind_low], ecc[ind_low], tolerance=tolerance, max_iter=max_iter)
ind_high = np.where(~ecc_zero & ~ecc_low)
# Now high eccentricities
if len(ind_high[0]) > 0:
eanom[ind_high] = _mikkola_solver_wrapper(manom[ind_high], ecc[ind_high], use_c)
return np.squeeze(eanom)[()]
def _newton_solver(manom, ecc, tolerance=1e-9, max_iter=100, eanom0=None):
"""
Newton-Raphson solver for eccentric anomaly.
Args:
manom (np.array): array of mean anomalies
ecc (np.array): array of eccentricities
eanom0 (np.array): array of first guess for eccentric anomaly, same shape as manom (optional)
Return:
eanom (np.array): array of eccentric anomalies
Written: <NAME>, 2018
"""
# Ensure manom and ecc are np.array (might get passed as astropy.Table Columns instead)
manom = np.array(manom)
ecc = np.array(ecc)
# Initialize at E=M, E=pi is better at very high eccentricities
if eanom0 is None:
eanom = np.copy(manom)
else:
eanom = np.copy(eanom0)
# Let's do one iteration to start with
eanom -= (eanom - (ecc * np.sin(eanom)) - manom) / (1.0 - (ecc * np.cos(eanom)))
diff = (eanom - (ecc * np.sin(eanom)) - manom) / (1.0 - (ecc * np.cos(eanom)))
abs_diff = np.abs(diff)
ind = np.where(abs_diff > tolerance)
niter = 0
while ((ind[0].size > 0) and (niter <= max_iter)):
eanom[ind] -= diff[ind]
# If it hasn't converged after half the iterations are done, try starting from pi
if niter == (max_iter//2):
eanom[ind] = np.pi
diff[ind] = (eanom[ind] - (ecc[ind] * | np.sin(eanom[ind]) | numpy.sin |
# This file is part of the pyMOR project (http://www.pymor.org).
# Copyright 2013-2019 pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (http://opensource.org/licenses/BSD-2-Clause)
"""This module contains algorithms for the empirical interpolation of |Operators|.
The main work for generating the necessary interpolation data is handled by
the :func:`ei_greedy` method. The objects returned by this method can be used
to instantiate an |EmpiricalInterpolatedOperator|.
As a convenience, the :func:`interpolate_operators` method allows to perform
the empirical interpolation of the |Operators| of a given model with
a single function call.
"""
import numpy as np
from scipy.linalg import solve
from pymor.core.logger import getLogger
from pymor.algorithms.pod import pod as pod_alg
from pymor.operators.ei import EmpiricalInterpolatedOperator
from pymor.parallel.dummy import dummy_pool
from pymor.parallel.interfaces import RemoteObjectInterface
from pymor.parallel.manager import RemoteObjectManager
from pymor.vectorarrays.interfaces import VectorArrayInterface
def ei_greedy(U, error_norm=None, atol=None, rtol=None, max_interpolation_dofs=None,
copy=True, pool=dummy_pool):
"""Generate data for empirical interpolation using EI-Greedy algorithm.
Given a |VectorArray| `U`, this method generates a collateral basis and
interpolation DOFs for empirical interpolation of the vectors contained in `U`.
The returned objects can be used to instantiate an |EmpiricalInterpolatedOperator|
(with `triangular=True`).
The interpolation data is generated by a greedy search algorithm, where in each
loop iteration the worst approximated vector in `U` is added to the collateral basis.
Parameters
----------
U
A |VectorArray| of vectors to interpolate.
error_norm
Norm w.r.t. which to calculate the interpolation error. If `None`, the Euclidean norm
is used.
atol
Stop the greedy search if the largest approximation error is below this threshold.
rtol
Stop the greedy search if the largest relative approximation error is below this threshold.
max_interpolation_dofs
Stop the greedy search if the number of interpolation DOF (= dimension of the collateral
basis) reaches this value.
copy
If `False`, `U` will be modified during executing of the algorithm.
pool
If not `None`, the |WorkerPool| to use for parallelization.
Returns
-------
interpolation_dofs
|NumPy array| of the DOFs at which the vectors are evaluated.
collateral_basis
|VectorArray| containing the generated collateral basis.
data
Dict containing the following fields:
:errors: Sequence of maximum approximation errors during
greedy search.
:triangularity_errors: Sequence of maximum absolute values of interoplation
matrix coefficients in the upper triangle (should
be near zero).
"""
if pool: # dispatch to parallel implemenation
assert isinstance(U, (VectorArrayInterface, RemoteObjectInterface))
with RemoteObjectManager() as rom:
if isinstance(U, VectorArrayInterface):
U = rom.manage(pool.scatter_array(U))
return _parallel_ei_greedy(U, error_norm=error_norm, atol=atol, rtol=rtol,
max_interpolation_dofs=max_interpolation_dofs, copy=copy, pool=pool)
assert isinstance(U, VectorArrayInterface)
logger = getLogger('pymor.algorithms.ei.ei_greedy')
logger.info('Generating Interpolation Data ...')
interpolation_dofs = np.zeros((0,), dtype=np.int32)
collateral_basis = U.empty()
max_errs = []
triangularity_errs = []
if copy:
U = U.copy()
ERR = U
errs = ERR.l2_norm() if error_norm is None else error_norm(ERR)
max_err_ind = np.argmax(errs)
initial_max_err = max_err = errs[max_err_ind]
# main loop
while True:
if max_interpolation_dofs is not None and len(interpolation_dofs) >= max_interpolation_dofs:
logger.info('Maximum number of interpolation DOFs reached. Stopping extension loop.')
logger.info(f'Final maximum interpolation error with'
f'{len(interpolation_dofs)} interpolation DOFs: {max_err}')
break
logger.info(f'Maximum interpolation error with '
f'{len(interpolation_dofs)} interpolation DOFs: {max_err}')
if atol is not None and max_err <= atol:
logger.info('Absolute error tolerance reached! Stopping extension loop.')
break
if rtol is not None and max_err / initial_max_err <= rtol:
logger.info('Relative error tolerance reached! Stopping extension loop.')
break
# compute new interpolation dof and collateral basis vector
new_vec = U[max_err_ind].copy()
new_dof = new_vec.amax()[0][0]
if new_dof in interpolation_dofs:
logger.info(f'DOF {new_dof} selected twice for interplation! Stopping extension loop.')
break
new_dof_value = new_vec.dofs([new_dof])[0, 0]
if new_dof_value == 0.:
logger.info(f'DOF {new_dof} selected for interpolation has zero maximum error! Stopping extension loop.')
break
new_vec *= 1 / new_dof_value
interpolation_dofs = np.hstack((interpolation_dofs, new_dof))
collateral_basis.append(new_vec)
max_errs.append(max_err)
# update U and ERR
new_dof_values = U.dofs([new_dof])
U.axpy(-new_dof_values[:, 0], new_vec)
errs = ERR.l2_norm() if error_norm is None else error_norm(ERR)
max_err_ind = np.argmax(errs)
max_err = errs[max_err_ind]
interpolation_matrix = collateral_basis.dofs(interpolation_dofs).T
triangularity_errors = np.abs(interpolation_matrix - np.tril(interpolation_matrix))
for d in range(1, len(interpolation_matrix) + 1):
triangularity_errs.append(np.max(triangularity_errors[:d, :d]))
if len(triangularity_errs) > 0:
logger.info(f'Interpolation matrix is not lower triangular with maximum error of {triangularity_errs[-1]}')
data = {'errors': max_errs, 'triangularity_errors': triangularity_errs}
return interpolation_dofs, collateral_basis, data
def deim(U, modes=None, pod=True, atol=None, rtol=None, product=None, pod_options={}):
"""Generate data for empirical interpolation using DEIM algorithm.
Given a |VectorArray| `U`, this method generates a collateral basis and
interpolation DOFs for empirical interpolation of the vectors contained in `U`.
The returned objects can be used to instantiate an |EmpiricalInterpolatedOperator|
(with `triangular=False`).
The collateral basis is determined by the first :func:`~pymor.algorithms.pod.pod` modes of `U`.
Parameters
----------
U
A |VectorArray| of vectors to interpolate.
modes
Dimension of the collateral basis i.e. number of POD modes of the vectors in `U`.
pod
If `True`, perform a POD of `U` to obtain the collateral basis. If `False`, `U`
is used as collateral basis.
atol
Absolute POD tolerance.
rtol
Relative POD tolerance.
product
Inner product |Operator| used for the POD.
pod_options
Dictionary of additional options to pass to the :func:`~pymor.algorithms.pod.pod` algorithm.
Returns
-------
interpolation_dofs
|NumPy array| of the DOFs at which the vectors are interpolated.
collateral_basis
|VectorArray| containing the generated collateral basis.
data
Dict containing the following fields:
:svals: POD singular values.
"""
assert isinstance(U, VectorArrayInterface)
logger = getLogger('pymor.algorithms.ei.deim')
logger.info('Generating Interpolation Data ...')
data = {}
if pod:
collateral_basis, svals = pod_alg(U, modes=modes, atol=atol, rtol=rtol, product=product, **pod_options)
data['svals'] = svals
else:
collateral_basis = U
interpolation_dofs = np.zeros((0,), dtype=np.int32)
interpolation_matrix = np.zeros((0, 0))
for i in range(len(collateral_basis)):
logger.info(f'Choosing interpolation point for basis vector {i}.')
if len(interpolation_dofs) > 0:
coefficients = solve(interpolation_matrix,
collateral_basis[i].dofs(interpolation_dofs).T).T
U_interpolated = collateral_basis[:len(interpolation_dofs)].lincomb(coefficients)
ERR = collateral_basis[i] - U_interpolated
else:
ERR = collateral_basis[i]
# compute new interpolation dof and collateral basis vector
new_dof = ERR.amax()[0][0]
if new_dof in interpolation_dofs:
logger.info(f'DOF {new_dof} selected twice for interplation! Stopping extension loop.')
break
interpolation_dofs = | np.hstack((interpolation_dofs, new_dof)) | numpy.hstack |
import warnings
warnings.simplefilter(action='ignore', category=FutureWarning)
import os
import os.path as osp
import numpy as np
import json
import provider
from sklearn.model_selection import train_test_split
BASE_DIR = os.path.dirname(os.path.abspath(__file__))
TRAIN_FILES_MODELNET = provider.getDataFiles( \
os.path.join(BASE_DIR, 'data/modelnet40_ply_hdf5_2048/train_files.txt'))
TEST_FILES_MODELNET = provider.getDataFiles(\
os.path.join(BASE_DIR, 'data/modelnet40_ply_hdf5_2048/test_files.txt'))
MODELNET10_TRAIN_FILE = 'data/ModelNet10/trainShuffled_Relabel.h5'
MODELNET10_TEST_FILE = 'data/ModelNet10/testShuffled_Relabel.h5'
CHAIR_PATH = 'data/Chair'
KEYPOINT_CHAIR_PATH = 'data/Chair/keypts_chair.mat'
CHAIR_FILES = os.listdir(CHAIR_PATH)
TRAIN_CHAIR_FILES = [osp.join(CHAIR_PATH,f) for f in CHAIR_FILES if 'train' in f]
VAL_CHAIR_FILES = [osp.join(CHAIR_PATH,f) for f in CHAIR_FILES if 'val' in f]
TEST_CHAIR_FILES = [osp.join(CHAIR_PATH,f) for f in CHAIR_FILES if 'test' in f]
KEYPOINTNET_PATH = "/media/tianxing/Samsung 1T/ShapeNetCore/"
def naive_read_pcd(path):
lines = open(path, 'r').readlines()
idx = -1
for i, line in enumerate(lines):
if line.startswith('DATA ascii'):
idx = i + 1
break
lines = lines[idx:]
lines = [line.rstrip().split(' ') for line in lines]
data = np.asarray(lines)
pc = np.array(data[:, :3], dtype=np.float)
return pc
def get_pointcloud(dataset, NUM_POINT=2048, shuffle=True):
"""
Load the dataset into memory
"""
if dataset == 'modelnet':
train_file_idxs = np.arange(0, len(TRAIN_FILES_MODELNET))
data_train = []
label_train = []
for fn in range(len(TRAIN_FILES_MODELNET)):
print('----' + str(fn) + '-----')
current_data, current_label = provider.loadDataFile(TRAIN_FILES_MODELNET[fn])
current_data = current_data[:,0:NUM_POINT,:]
current_label = np.squeeze(current_label)
data_train.append(current_data)
label_train.append(current_label)
result_train = np.vstack(data_train)
label_train = np.concatenate(label_train, axis=None)
if shuffle:
X_train, y_train, _ = provider.shuffle_data(result_train, np.squeeze(label_train))
else:
X_train, y_train = result_train, np.squeeze(label_train)
data_test = []
label_test = []
for fn in range(len(TEST_FILES_MODELNET)):
print('----' + str(fn) + '-----')
current_data, current_label = provider.loadDataFile(TEST_FILES_MODELNET[fn])
current_data = current_data[:,0:NUM_POINT,:]
current_label = np.squeeze(current_label)
data_test.append(current_data)
label_test.append(current_label)
result_test = np.vstack(data_test)
label_test = np.concatenate(label_test, axis=None)
if shuffle:
X_test, y_test, _ = provider.shuffle_data(result_test, np.squeeze(label_test))
else:
X_test, y_test = result_test, np.squeeze(label_test)
elif dataset == 'shapenet':
shapenet_data, shapenet_label = provider.get_shapenet_data()
shapenet_data = shapenet_data[:,0:NUM_POINT,:]
X_train, X_test, y_train, y_test = train_test_split(shapenet_data, shapenet_label, test_size=0.2, random_state=42, shuffle=shuffle)
elif dataset == 'shapenet_chair':
shapenet_data, shapenet_label = provider.get_shapenet_data()
shapenet_data = shapenet_data[:,0:NUM_POINT,:]
shapenet_data, shapenet_label = shapenet_data[shapenet_label==17], shapenet_label[shapenet_label==17]
X_train, X_test, y_train, y_test = train_test_split(shapenet_data, shapenet_label, test_size=0.2, random_state=42, shuffle=shuffle)
elif dataset == 'modelnet10':
current_data, current_label = provider.loadDataFile(MODELNET10_TRAIN_FILE)
current_data = current_data[:,0:NUM_POINT,:]
if shuffle:
current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label))
current_label = np.squeeze(current_label)
X_train, y_train = current_data, current_label
current_data, current_label = provider.loadDataFile(MODELNET10_TEST_FILE)
current_data = current_data[:,0:NUM_POINT,:]
if shuffle:
current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label))
current_label = np.squeeze(current_label)
X_test, y_test = current_data, current_label
elif dataset == 'keypoint':
current_data, current_label = provider.load_mat_keypts(TRAIN_CHAIR_FILES, KEYPOINT_CHAIR_PATH, NUM_POINT)
if shuffle:
current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label))
for i in range(current_data.shape[0]): # shuffle order of points in a single model, otherwise keypoints are always at the end
idx = np.arange(current_data.shape[1])
np.random.shuffle(idx)
current_data = current_data[:, idx, :]
current_label = current_label[:, idx]
current_label = np.squeeze(current_label)
X_train, y_train = current_data, current_label
current_data, current_label = provider.load_mat_keypts(TEST_CHAIR_FILES, KEYPOINT_CHAIR_PATH, NUM_POINT)
if shuffle:
current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label))
for i in range(current_data.shape[0]):
idx = np.arange(current_data.shape[1])
np.random.shuffle(idx)
current_data = current_data[:, idx, :]
current_label = current_label[:, idx]
current_label = np.squeeze(current_label)
X_test, y_test = current_data, current_label
elif dataset == 'keypoint_10class':
current_data, current_label = provider.load_mat_keypts(TRAIN_CHAIR_FILES, KEYPOINT_CHAIR_PATH, NUM_POINT)
current_label[:, -10:] = np.arange(1, 11)
if shuffle:
current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label))
for i in range(current_data.shape[0]): # shuffle order of points in a single model, otherwise keypoints are always at the end
idx = np.arange(current_data.shape[1])
np.random.shuffle(idx)
current_data = current_data[:, idx, :]
current_label = current_label[:, idx]
current_label = np.squeeze(current_label)
X_train, y_train = current_data, current_label
current_data, current_label = provider.load_mat_keypts(TEST_CHAIR_FILES, KEYPOINT_CHAIR_PATH, NUM_POINT)
current_label[:, -10:] = np.arange(1, 11)
if shuffle:
current_data, current_label, _ = provider.shuffle_data(current_data, np.squeeze(current_label))
for i in range(current_data.shape[0]):
idx = np.arange(current_data.shape[1])
np.random.shuffle(idx)
current_data = current_data[:, idx, :]
current_label = current_label[:, idx]
current_label = np.squeeze(current_label)
X_test, y_test = current_data, current_label
elif dataset == "keypointnet":
json_path = osp.join(KEYPOINTNET_PATH, "annotations/all.json")
annots = json.load(open(json_path))
X = []
y = []
for annot in annots:
class_id = annot["class_id"]
model_id = annot["model_id"]
kpts = []
for kpt in annot["keypoints"]:
kpts.append(kpt["xyz"])
pcd_path = osp.join(KEYPOINTNET_PATH, f"pcds/{class_id}/{model_id}.pcd")
if os.path.exists(pcd_path):
pcd = naive_read_pcd(pcd_path)
pcd = pcd[0:NUM_POINT, :]
else:
continue
if len(kpts) != 10:
continue
pcd = np.concatenate((pcd[:-10], kpts))
label = np.zeros(NUM_POINT-10)
label = np.concatenate((label, | np.ones(10) | numpy.ones |
import itertools
import logging
import time
from typing import Callable, Dict, Optional, Tuple
import numpy as np
import torch
import torch.nn as nn
from kaggle_environments.envs.hungry_geese.hungry_geese \
import Action, Configuration, Observation
from numba import njit
from config import INPUT_AREA_SIZE, INPUT_CHANNELS, MAX_SEARCH
from config import EAST, INVALID, NORTH, SOUTH, WEST
'''
World class contains following attributes:
field:
[00] player1 head
[01] player1 tail1
[02] player1 tail2
[03] player1 tail3
[04] player1 tail4
[05] player1 tail5
[06] player1 tail6
[07] player1 tail7
[08] player1 tail8..
[09] player1 nexts
[10] player1 bodies
[11] certain collision
[12] potential collision
[13-51] repeat for each player
[52] food
input:
[00] my head
[01] my tail 1
[02] my tail 2
[03] my tail 3
[04] my tail 4
[05] my tail 5
[06] my tail 6
[07] my tail 7
[08] my tail 8..
[09] opponent1 head
[10] opponent1 tail 1
[11] opponent1 tail 2
[12] opponent1 tail 3
[13] opponent1 tail 4
[14] opponent1 tail 5
[15] opponent1 tail 6
[16] opponent1 tail 7
[17] opponent1 tails 8..
[18] opponent1 nexts
[19] opponent2 head
[20] opponent2 tail 1
[21] opponent2 tail 2
[22] opponent2 tail 3
[23] opponent2 tail 4
[24] opponent2 tail 5
[25] opponent2 tail 6
[26] opponent2 tail 7
[27] opponent2 tails 8..
[28] opponent2 nexts
[29] opponent3 head
[30] opponent3 tail 1
[31] opponent3 tail 2
[32] opponent3 tail 3
[33] opponent3 tail 4
[34] opponent3 tail 5
[35] opponent3 tail 6
[36] opponent3 tail 7
[37] opponent3 tails 8..
[38] opponent3 nexts
[39] any opponent head
[40] any opponent tail 1
[41] any opponent tail 2
[42] any opponent tail 3
[43] any opponent tail 4
[44] any opponent tail 5
[45] any opponent tail 6
[46] any opponent tail 7
[47] any opponent tails 8..
[48] any opponent nexts
[49] certain collisions (include myself)
[50] potential collisions
[51] length difference (-7..) (set flag at the head)
[52] length difference (-6)
[53] length difference (-5)
[54] length difference (-4)
[55] length difference (-3)
[56] length difference (-2)
[57] length difference (-1)
[58] length difference (0)
[59] length difference (+1)
[60] length difference (+2)
[61] length difference (+3)
[62] length difference (+4)
[63] length difference (+5)
[64] length difference (+6)
[65] length difference (+7..)
[66] food
[67] remaining steps to hungry [1]
[68] remaining steps to hungry [2]
[69] remaining steps to hungry [3]
[70] remaining steps to hungry [4]
[71] remaining steps to hungry [5]
[72] remaining steps to hungry [6]
[73] remaining steps to hungry [7]
[74] remaining steps to hungry [8..]
[75] remaining steps to the end [1]
[76] remaining steps to the end [2]
[77] remaining steps to the end [3]
[78] remaining steps to the end [4]
[79] remaining steps to the end [5]
[80] remaining steps to the end [6]
[81] remaining steps to the end [7]
[82] remaining steps to the end [8..]
[83] step (000-019)
[84] step (020-039)
[85] step (040-059)
[86] step (060-079)
[87] step (080-099)
[88] step (100-119)
[89] step (120-139)
[90] step (140-159)
[91] step (160-179)
[92] step (180-199)
[93] direction (vertical)
[94] direction (horizontal)
[95] remaining players [2]
[96] remaining players [3]
[97] remaining players [4]
ouptut:
[00] move left
[01] move straight
[02] move right
[03] value of 1st place
[04] value of 2nd place
[05] value of 3rd place
moves: [NORTH, EAST, SOUTH, WEST]
[00] value 1st
[01] value 2nd
[02] value 3rd
[03] value^2 1st (for ucb1-tuned)
[04] value^2 2nd (for ucb1-tuned)
[05] value^2 3rd (for ucb1-tuned)
[06] visit
[07] alive
[08] enabled
[09] policy (for puct or zero)
[10] expanded (1: not expanded, 0: expanded)
'''
LOGGER = logging.getLogger(__name__)
# _BEGIN_AGENT_
FIELD_FOOD = 52
FS = 13 # field size
FN = 9 # field offset of next heads
FB = 10 # field offset of bodies
FC = 11 # field offset of certain collision
FP = 12 # field offset of potential collisioin
INPUT_COLLISION = 49
INPUT_DIFFERENCE = 58
INPUT_FOOD = 66
INPUT_HUNGRY = 67
INPUT_END = 75
INPUT_STEP = 83
INPUT_DIRECTION = 93
INPUT_PLAYERS = 95
MOVE_VISIT = 6
MOVE_ALIVE = 7
MOVE_ENABLED = 8
MOVE_POLICY = 9
MOVE_EXPANDED = 10
# stack(for lookahead): [step, y, x, direction, move, backup]:
AS_Y = 4
AS_X = 5
AS_NMOVE = 6
AS_PMOVE = 7
AS_BACK = 8
@njit
def get_direction(prev: int, current: int, rows: int, columns: int) -> int:
prev_y, prev_x = row_col(prev, columns)
curr_y, curr_x = row_col(current, columns)
move_x = (curr_x - prev_x) % columns
move_y = (curr_y - prev_y) % rows
if move_y == rows - 1:
return NORTH
elif move_x == 1:
return EAST
elif move_y == 1:
return SOUTH
elif move_x == columns - 1:
return WEST
else:
return INVALID
@njit
def row_col(position: int, columns: int) -> Tuple[int, int]:
return position // columns, position % columns
@njit
def get_next_posision(position: int, direction: int, rows: int, columns: int) -> int:
y, x = row_col(position, columns)
if direction == NORTH:
y -= 1
y %= rows
elif direction == EAST:
x += 1
x %= columns
elif direction == SOUTH:
y += 1
y %= rows
elif direction == WEST:
x -= 1
x %= columns
return y * columns + x
def get_move_values(moves: np.ndarray, safe: float, player: int) -> np.ndarray:
return (moves[:, :, 0] * (1.0 - safe)) + (moves[:, :, max(player - 2, 0)] * safe)
def get_move_values2(moves: np.ndarray, safe: float, player: int) -> np.ndarray:
return (moves[:, :, 3] * (1.0 - safe)) + (moves[:, :, max(player + 1, 3)] * safe)
def select_action_by_policy(moves: np.ndarray) -> np.ndarray:
return np.argmax((moves[:, :, MOVE_POLICY]) * moves[:, :, MOVE_ENABLED], axis=1)
def select_actions_by_beta(moves: np.ndarray, safe: float, player: int) -> np.ndarray:
values = get_move_values(moves, safe, player)
alpha = np.minimum(values, moves[:, :, MOVE_VISIT])
beta = moves[:, :, MOVE_VISIT] - alpha
values = np.random.beta(alpha + 1e-6, beta + 1e-6)
return np.argmax(values * moves[:, :, MOVE_ENABLED], axis=1)
def select_actions_by_ucbm(moves: np.ndarray, safe: float, player: int) -> np.ndarray:
'''Modified UCB (not UCB1)'''
values = get_move_values(moves, safe, player) / (moves[:, :, MOVE_VISIT] + 1e-6)
visits = np.sqrt(1.0 / (moves[:, :, MOVE_VISIT] + 1e-6))
return np.argmax(values * visits * moves[:, :, MOVE_ENABLED], axis=1)
def select_actions_by_ucbr(moves: np.ndarray, safe: float, player: int) -> np.ndarray:
'''Randamized-Modified UCB (not UCB1)'''
values = get_move_values(moves, safe, player) / (moves[:, :, MOVE_VISIT] + 1e-6)
visits = np.sqrt((1.0 + np.random.rand(4, 1)) / (moves[:, :, MOVE_VISIT] + 1e-6))
return np.argmax(values * visits * moves[:, :, MOVE_ENABLED], axis=1)
def select_actions_by_ucb1(moves: np.ndarray, safe: float, player: int) -> np.ndarray:
values = get_move_values(moves, safe, player) / (moves[:, :, MOVE_VISIT] + 1e-6)
totals = (moves[:, :, MOVE_VISIT].sum(axis=1) + 1)[:, None]
visits = np.sqrt(2 * np.log(totals) / (moves[:, :, MOVE_VISIT] + 1e-6))
return np.argmax((values + visits) * moves[:, :, MOVE_ENABLED], axis=1)
def select_actions_by_puct(moves: np.ndarray, safe: float, player: int) -> np.ndarray:
'''PUCT(modified)'''
values = get_move_values(moves, safe, player) / (moves[:, :, MOVE_VISIT] + 1e-6)
totals = np.log(moves[:, :, MOVE_VISIT].sum(axis=1) + 1)[:, None]
visits = 1.0 * np.sqrt(totals / (moves[:, :, MOVE_VISIT] + 1e-6))
priority = values + moves[:, :, MOVE_POLICY] * visits
return np.argmax(priority * moves[:, :, MOVE_ENABLED], axis=1)
def select_actions_by_zero(moves: np.ndarray, safe: float, player: int) -> np.ndarray:
'''AlphaZero'''
values = get_move_values(moves, safe, player) / (moves[:, :, MOVE_VISIT] + 1e-6)
totals = moves[:, :, MOVE_VISIT].sum(axis=1, keepdims=True)
visits = 5.0 * np.sqrt(totals) / (moves[:, :, MOVE_VISIT] + 1)
priority = values + moves[:, :, MOVE_POLICY] * visits
return np.argmax(priority * moves[:, :, MOVE_ENABLED], axis=1)
class Setting(object):
def __init__(self, **kwargs) -> None:
self.timelimit = 'auto' # time limit of search moves ('auto', 'zero', 'none' or visit count)
self.depthlimit = 20 # depth limit of search moves (simulation only at more than the specified depth)
self.decision = 'hybrid' # value for move decisions ('hybrid', 'value', 'visit')
self.normalize = True # normalize values
self.collision = 0.0 # penalty of potetial collision moves (ignore potential collisions at 0.0)
self.eating = 0.0 # value of eating a food (ignore foods at 0.0)
self.search = 'ucbm' # algrithm of next move selections
self.safe = 0.0 # safe parameter at move decisions.
self.strict = False # evaluate a value of the root node with other edge and leaf nodes.
self.lookahead = 4 # depth of look-ahead by depth-first search (not inference by NN or MCTS).
self.policybase = 0.1 # base of policy
self.valuebase = 0.0 # base of value
self.valuetail = 0.0 # base at neighbor of own tail
self.usepolicy = True # use policy model
self.usevalue = True # use value model
self.random = 0.0 # probability of random decision (for reinforced learning)
self.gpu = -1 # use gpu (use cpu at -1)
for key, value in kwargs.items():
setattr(self, key, value)
def __and__(self, s: 'Setting') -> 'Setting':
return Setting(**{
k: v if v == getattr(s, k) else None for k, v in self.__dict__.items()})
def __sub__(self, s: 'Setting') -> 'Setting':
return Setting(**{
k: None if v == getattr(s, k) else v for k, v in self.__dict__.items()})
def __str__(self) -> str:
return ', '.join(f'{k}={v}' for k, v in self.__dict__.items() if v is not None)
class World(object):
def __init__(self, model: nn.Module, config: Configuration, setting: Setting) -> None:
self.model = model
self.config = config
self.setting = setting
# node depth
self.depth = 0
# visit count
self.visit = 0
# number of steps
self.step = -1
# game status
self.finished = False
# values after the end of the game
self.rewards = np.zeros([4], dtype=np.int16)
# previous head positions
self.prevs = np.zeros([4], dtype=np.int16)
# head directions
self.directions = np.zeros([4], dtype=np.int16)
# geese length
self.lengths = np.zeros([4], dtype=np.int16)
# geese segments
self.segments = np.zeros([4, config.rows * config.columns], dtype=np.int16)
# food positions
self.foods = np.zeros([2], dtype=np.int16)
# field map
self.field = | np.zeros([53, config.rows, config.columns], dtype=np.float32) | numpy.zeros |
import os
import sys
sys.path.insert(0, os.path.join(os.path.dirname(sys.argv[0]), '../..'))
from assignments.toolcalibration import calibration
if __name__ == "__main__":
import csv
import numpy as np
np.set_printoptions(suppress=True)
transforms = list()
with open('data/pivot_calibration/Tpointer2Cam.csv', 'r') as csvfile:
datareader = csv.reader(csvfile)
for row in datareader:
T = np.eye(4)
data = | np.loadtxt(row, delimiter=',') | numpy.loadtxt |
#! /usr/bin/env python
# Copyright 2021 <NAME>
#
# This file is part of WarpX.
#
# License: BSD-3-Clause-LBNL
import os
import sys
import yt
sys.path.insert(1, '../../../../warpx/Regression/Checksum/')
import checksumAPI
import numpy as np
import scipy.constants as scc
## This script performs various checks for the proton boron nuclear fusion module. The simulation
## that we check is made of 5 different tests, each with different proton, boron and alpha species.
##
## The first test is performed in the proton-boron center of mass frame. It could correspond to the
## physical case of a proton beam colliding with a boron beam. The kinetic energy of the colliding
## particles depends on the cell number in the z direction and varies in the few keV to few MeV
## range. All the particles within a cell have the exact same momentum, which allows detailed
## checks of the energy of produced alpha particles. The proton and boron species have the same
## density and number of particles in this test. The number of produced alphas is much smaller than
## the initial number of protons and borons.
##
## The second test is performed in the boron rest frame. It corresponds to the physical case of a
## low density proton beam colliding with a high-density proton+boron target. The energy of the
## proton beam is varied in the few keV to few MeV range, depending on the cell number in the z
## direction. As in the previous case, all the particles within a cell have the exact same
## momentum, which allows detailed checks of the energy of produced alpha particles. In this test,
## there are 100 immobile boron and 100 immobile proton macroparticles per cell, as well as 900
## beam proton macroparticles per cell. The density of the immobile particles is 6 orders of
## magnitude higher than the number of beam particles, which means that they have a much higher
## weight. This test is similar to the example given in section 3 of Higginson et al.,
## Journal of Computation Physics, 388 439–453 (2019), which was found to be sensitive to the way
## unsampled pairs are accounted for. As before, the number of produced alphas is much smaller than
## the initial number of protons and borons.
##
## The third test corresponds to a Maxwellian plasma with a 44 keV temperature. The alpha yield is
## directly compared to the analytical fits of <NAME> and <NAME>, Nuclear Fusion, 40, 865
## (2000) for a thermal plasma.
##
## The fourth test corresponds to a plasma with an extremely small boron density, so that all boron
## macroparticles should have disappeared by the end of the simulation, which we verify.
##
## The fifth test is exactly the same as the fourth test, except that the
## fusion_probability_threshold parameter is increased to an excessive value. Because of that, we
## severely underestimate the fusion yield and boron macroparticles remain at the end of the
## simulation, which we verify.
##
## In all simulations, we check particle number, charge, momentum and energy conservation and
## perform basic checks regarding the produced particles. When possible, we also compare the number
## of produced macroparticles, fusion yield and energy of the produced particles to theoretical
## values.
##
## Please be aware that the relative tolerances are often set empirically in this analysis script,
## so it would not be surprising that some tolerances need to be increased in the future.
default_tol = 1.e-12 # Default relative tolerance
## Some physical parameters
keV_to_Joule = scc.e*1e3
MeV_to_Joule = scc.e*1e6
barn_to_square_meter = 1.e-28
m_p = scc.m_p # Proton mass
m_b = 10.9298*m_p # Boron 11 mass
m_reduced = m_p*m_b/(m_p+m_b)
m_a = 3.97369*m_p # Alpha mass
m_be = 7.94748*m_p # Beryllium 8 mass
Z_boron = 5.
Z_proton = 1.
E_Gamow = (Z_boron*Z_proton*np.pi*scc.fine_structure)**2*2.*m_reduced*scc.c**2
E_Gamow_MeV = E_Gamow/MeV_to_Joule
E_Gamow_keV = E_Gamow/keV_to_Joule
E_fusion = 8.59009*MeV_to_Joule # Energy released during p + B -> alpha + Be
E_decay = 0.0918984*MeV_to_Joule # Energy released during Be -> 2*alpha
E_fusion_total = E_fusion + E_decay # Energy released during p + B -> 3*alpha
## Some numerical parameters for this test
size_x = 8
size_y = 8
size_z = 16
dV_total = size_x*size_y*size_z # Total simulation volume
# Volume of a slice corresponding to a single cell in the z direction. In tests 1 and 2, all the
# particles of a given species in the same slice have the exact same momentum
dV_slice = size_x*size_y
dt = 1./(scc.c*np.sqrt(3.))
# In test 1 and 2, the energy in cells number i (in z direction) is typically Energy_step * i**2
Energy_step = 22.*keV_to_Joule
def is_close(val1, val2, rtol=default_tol, atol=0.):
## Wrapper around numpy.isclose, used to override the default tolerances.
return np.isclose(val1, val2, rtol=rtol, atol=atol)
def add_existing_species_to_dict(yt_ad, data_dict, species_name, prefix, suffix):
data_dict[prefix+"_px_"+suffix] = yt_ad[species_name, "particle_momentum_x"].v
data_dict[prefix+"_py_"+suffix] = yt_ad[species_name, "particle_momentum_y"].v
data_dict[prefix+"_pz_"+suffix] = yt_ad[species_name, "particle_momentum_z"].v
data_dict[prefix+"_w_"+suffix] = yt_ad[species_name, "particle_weight"].v
data_dict[prefix+"_id_"+suffix] = yt_ad[species_name, "particle_id"].v
data_dict[prefix+"_cpu_"+suffix] = yt_ad[species_name, "particle_cpu"].v
data_dict[prefix+"_z_"+suffix] = yt_ad[species_name, "particle_position_z"].v
def add_empty_species_to_dict(data_dict, species_name, prefix, suffix):
data_dict[prefix+"_px_"+suffix] = np.empty(0)
data_dict[prefix+"_py_"+suffix] = np.empty(0)
data_dict[prefix+"_pz_"+suffix] = np.empty(0)
data_dict[prefix+"_w_"+suffix] = np.empty(0)
data_dict[prefix+"_id_"+suffix] = np.empty(0)
data_dict[prefix+"_cpu_"+suffix] = np.empty(0)
data_dict[prefix+"_z_"+suffix] = np.empty(0)
def add_species_to_dict(yt_ad, data_dict, species_name, prefix, suffix):
try:
## If species exist, we add its data to the dictionary
add_existing_species_to_dict(yt_ad, data_dict, species_name, prefix, suffix)
except yt.utilities.exceptions.YTFieldNotFound:
## If species does not exist, we avoid python crash and add empty arrays to the
## dictionnary. Currently, this happens for the boron species in test number 4, which
## entirely fuses into alphas.
add_empty_species_to_dict(data_dict, species_name, prefix, suffix)
def check_particle_number_conservation(data):
total_w_proton_start = np.sum(data["proton_w_start"])
total_w_proton_end = np.sum(data["proton_w_end"])
total_w_boron_start = np.sum(data["boron_w_start"])
total_w_boron_end = np.sum(data["boron_w_end"])
consumed_proton = total_w_proton_start - total_w_proton_end
consumed_boron = total_w_boron_start - total_w_boron_end
created_alpha = np.sum(data["alpha_w_end"])
assert(consumed_proton >= 0.)
assert(consumed_boron >= 0.)
assert(created_alpha >= 0.)
## Check that number of consumed proton and consumed boron are equal
assert_scale = max(total_w_proton_start, total_w_boron_start)
assert(is_close(consumed_proton, consumed_boron, rtol = 0., atol = default_tol*assert_scale))
## Check that number of consumed particles corresponds to number of produced alpha
## Factor 3 is here because each nuclear fusion reaction produces 3 alphas
assert(is_close(total_w_proton_start, total_w_proton_end + created_alpha/3.))
assert(is_close(total_w_boron_start, total_w_boron_end + created_alpha/3.))
def compute_energy_array(data, species_name, suffix, m):
## Relativistic computation of kinetic energy for a given species
psq_array = data[species_name+'_px_'+suffix]**2 + data[species_name+'_py_'+suffix]**2 + \
data[species_name+'_pz_'+suffix]**2
rest_energy = m*scc.c**2
return np.sqrt(psq_array*scc.c**2 + rest_energy**2) - rest_energy
def check_energy_conservation(data):
proton_energy_start = compute_energy_array(data, "proton", "start", m_p)
proton_energy_end = compute_energy_array(data, "proton", "end", m_p)
boron_energy_start = compute_energy_array(data, "boron", "start", m_b)
boron_energy_end = compute_energy_array(data, "boron", "end", m_b)
alpha_energy_end = compute_energy_array(data, "alpha", "end", m_a)
total_energy_start = np.sum(proton_energy_start*data["proton_w_start"]) + \
np.sum(boron_energy_start*data["boron_w_start"])
total_energy_end = np.sum(proton_energy_end*data["proton_w_end"]) + \
np.sum(boron_energy_end*data["boron_w_end"]) + \
np.sum(alpha_energy_end*data["alpha_w_end"])
## Factor 3 is here because each nuclear fusion reaction produces 3 alphas
n_fusion_reaction = np.sum(data["alpha_w_end"])/3.
assert(is_close(total_energy_end,
total_energy_start + n_fusion_reaction*E_fusion_total,
rtol = 1.e-8))
def check_momentum_conservation(data):
proton_total_px_start = np.sum(data["proton_px_start"]*data["proton_w_start"])
proton_total_py_start = np.sum(data["proton_py_start"]*data["proton_w_start"])
proton_total_pz_start = np.sum(data["proton_pz_start"]*data["proton_w_start"])
proton_total_px_end = np.sum(data["proton_px_end"]*data["proton_w_end"])
proton_total_py_end = np.sum(data["proton_py_end"]*data["proton_w_end"])
proton_total_pz_end = np.sum(data["proton_pz_end"]*data["proton_w_end"])
boron_total_px_start = np.sum(data["boron_px_start"]*data["boron_w_start"])
boron_total_py_start = np.sum(data["boron_py_start"]*data["boron_w_start"])
boron_total_pz_start = np.sum(data["boron_pz_start"]*data["boron_w_start"])
boron_total_px_end = np.sum(data["boron_px_end"]*data["boron_w_end"])
boron_total_py_end = np.sum(data["boron_py_end"]*data["boron_w_end"])
boron_total_pz_end = np.sum(data["boron_pz_end"]*data["boron_w_end"])
alpha_total_px_end = | np.sum(data["alpha_px_end"]*data["alpha_w_end"]) | numpy.sum |
#!/usr/bin/env python3
# Copyright 2019 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
# @title :split_cifar.py
# @author :jvo
# @contact :<EMAIL>
# @created :05/13/2019
# @version :1.0
# @python_version :3.7.3
"""
Split CIFAR-10/100 Dataset
^^^^^^^^^^^^^^^^^^^^^^^^^^
The module :mod:`data.special.split_cifar` contains a wrapper for data handlers
for the Split-CIFAR10/CIFAR100 task.
"""
# FIXME The code in this module is mostly a copy of the code in the
# corresponding `split_mnist` module.
import numpy as np
from data.cifar100_data import CIFAR100Data
from data.cifar10_data import CIFAR10Data
# DELETEME
def get_split_CIFAR_handlers(data_path, use_one_hot=True, validation_size=0,
use_data_augmentation=False):
"""Function has been removed. Use :func:`get_split_cifar_handlers` instead.
"""
raise NotImplementedError('Function has been removed. Use function ' +
'"get_split_cifar_handlers" instead.')
def get_split_cifar_handlers(data_path, use_one_hot=True, validation_size=0,
use_data_augmentation=False, num_tasks=6):
"""This method will combine 1 object of the class
:class:`data.cifar10_data.CIFAR10Data` and 5 objects of the class
:class:`SplitCIFAR100Data`.
The SplitCIFAR benchmark consists of 6 tasks, corresponding to the images
in CIFAR-10 and 5 tasks from CIFAR-100 corresponding to the images with
labels [0-10], [10-20], [20-30], [30-40], [40-50].
Args:
data_path: Where should the CIFAR-10 and CIFAR-100 datasets
be read from? If not existing, the datasets will be downloaded
into this folder.
use_one_hot (bool): Whether the class labels should be represented in a
one-hot encoding.
validation_size: The size of the validation set of each individual
data handler.
use_data_augmentation (optional): Note, this option currently only
applies to input batches that are transformed using the class
member :meth:`data.dataset.Dataset.input_to_torch_tensor`
(hence, **only available for PyTorch**).
num_tasks (int): A number between 1 and 11, specifying the number of
data handlers to be returned. If ``num_tasks=6``, then there will be
the CIFAR-10 data handler and the first 5 splits of the CIFAR-100
dataset (as in the usual CIFAR benchmark for CL).
Returns:
(list) A list of data handlers. The first being an instance of class
:class:`data.cifar10_data.CIFAR10Data` and the remaining ones being an
instance of class :class:`SplitCIFAR100Data`.
"""
assert (num_tasks >= 1 and num_tasks <= 11)
print('Creating data handlers for SplitCIFAR tasks ...')
handlers = []
handlers.append(CIFAR10Data(data_path, use_one_hot=use_one_hot,
validation_size=validation_size,
use_data_augmentation=use_data_augmentation))
for i in range(0, (num_tasks - 1) * 10, 10):
handlers.append(SplitCIFAR100Data(data_path,
use_one_hot=use_one_hot, validation_size=validation_size,
use_data_augmentation=use_data_augmentation, labels=range(i, i + 10)))
print('Creating data handlers for SplitCIFAR tasks ... Done')
return handlers
class SplitCIFAR100Data(CIFAR100Data):
"""An instance of the class shall represent a single SplitCIFAR-100 task.
Args:
data_path: Where should the dataset be read from? If not existing,
the dataset will be downloaded into this folder.
use_one_hot (bool): Whether the class labels should be
represented in a one-hot encoding.
validation_size: The number of validation samples. Validation
samples will be taking from the training set (the first :math:`n`
samples).
use_data_augmentation (optional): Note, this option currently only
applies to input batches that are transformed using the class
member :meth:`data.dataset.Dataset.input_to_torch_tensor`
(hence, **only available for PyTorch**).
Note, we are using the same data augmentation pipeline as for
CIFAR-10.
labels: The labels that should be part of this task.
full_out_dim: Choose the original CIFAR instead of the the new
task output dimension. This option will affect the attributes
:attr:`data.dataset.Dataset.num_classes` and
:attr:`data.dataset.Dataset.out_shape`.
"""
def __init__(self, data_path, use_one_hot=False, validation_size=1000,
use_data_augmentation=False, labels=range(0, 10),
full_out_dim=False):
super().__init__(data_path, use_one_hot=use_one_hot, validation_size=0,
use_data_augmentation=use_data_augmentation)
K = len(labels)
self._labels = labels
train_ins = self.get_train_inputs()
test_ins = self.get_test_inputs()
train_outs = self.get_train_outputs()
test_outs = self.get_test_outputs()
# Get labels.
if self.is_one_hot:
train_labels = self._to_one_hot(train_outs, reverse=True)
test_labels = self._to_one_hot(test_outs, reverse=True)
else:
train_labels = train_outs
test_labels = test_outs
train_labels = train_labels.squeeze()
test_labels = test_labels.squeeze()
train_mask = train_labels == labels[0]
test_mask = test_labels == labels[0]
for k in range(1, K):
train_mask = np.logical_or(train_mask, train_labels == labels[k])
test_mask = np.logical_or(test_mask, test_labels == labels[k])
train_ins = train_ins[train_mask, :]
test_ins = test_ins[test_mask, :]
train_outs = train_outs[train_mask, :]
test_outs = test_outs[test_mask, :]
if validation_size > 0:
assert (validation_size < train_outs.shape[0])
val_inds = np.arange(validation_size)
train_inds = | np.arange(validation_size, train_outs.shape[0]) | numpy.arange |
import os
import json
import torch
import numpy as np
from torch.utils.data import Dataset
from PIL import Image
from torchvision.transforms.functional import to_tensor
from learning.inputs.pose import Pose
from learning.inputs.vision import standardize_image
from learning.models.semantic_map.pinhole_camera_inv import PinholeCameraProjection
from data_io.env import get_landmark_locations_airsim
from learning.datasets.fpv_data_augmentation import data_augmentation
from data_io.paths import get_poses_dir, get_fpv_img_flight_dir, load_config_files
from utils.simple_profiler import SimpleProfiler
import parameters.parameter_server as P
PROFILE = False
class FpvImageDataset(Dataset):
def __init__(self, env_ids, dataset_name, eval, real, real_poses=None):
self.prof = SimpleProfiler(torch_sync=PROFILE, print=PROFILE)
self.real = real
if real_poses:
self.real_poses = real_poses
else:
self.real_poses = real
self.eval = eval
self.dataset_name = dataset_name
self.env_ids = env_ids
# Assume that these parameters include cam_h_fov, img_w, img_h
self.model_params = P.get_current_parameters()["Model"]
self.cam_h_fov = self.model_params["cam_h_fov"]
self.img_w = self.model_params["img_w"]
self.img_h = self.model_params["img_h"]
self.data_params = P.get_current_parameters()["Data"]
self.load_img_w = self.data_params["load_img_w"]
self.load_img_h = self.data_params["load_img_h"]
self.prof.tick("out")
self.instructions, self.poses, self.images, self.env_ids_decompressed = self.data_from_env_ids(env_ids)
self.prof.tick("data from env")
self.lm_pos_fpv, self.lm_idx, self.lm_pos_map = self.compute_pos_idx(add_null=0)
self.prof.tick("compute pos idx")
self.filter_none()
self.prof.tick("filter none")
self.update_dic()
self.prof.tick("update dic")
self.prof.print_stats()
def __len__(self):
return len(self.env_ids_decompressed)
def __getitem__(self, index):
prof = SimpleProfiler(torch_sync=PROFILE, print=PROFILE)
prof.tick("out")
if type(index) == int:
image = self.images[index]
lm_pos_fpv = self.lm_pos_fpv[index]
lm_indices = self.lm_idx[index]
lm_pos_map = self.lm_pos_map[index]
prof.tick("retrieve data")
# data augmentation. If eval no data augmentation.
out_img, out_lm_indices, out_lm_pos_fpv = data_augmentation(
image, lm_indices, lm_pos_fpv, self.img_h, self.img_w, self.eval, prof)
if (len(out_lm_indices) == 0) | (out_lm_indices is None):
out_img, out_lm_indices, out_lm_pos_fpv = data_augmentation(
image, lm_indices, lm_pos_fpv, self.img_h, self.img_w, True, prof)
out_img = standardize_image(np.array(out_img))
out_img = torch.from_numpy(out_img)
out_lm_indices = torch.tensor(out_lm_indices)
out_lm_pos_fpv = torch.tensor(out_lm_pos_fpv)
sample = {"poses": self.poses[index],
"instructions": [], # self.instructions[index],
"images": out_img,
"env_ids": self.env_ids_decompressed[index],
"lm_pos_fpv": out_lm_pos_fpv,
"lm_indices": out_lm_indices,
"lm_pos_map": lm_pos_map}
prof.tick("dic")
prof.print_stats()
"""
elif type(index) == list:
out_images_list, out_lm_indices_list, out_lm_pos_fpv_list = [], [], []
for i in index:
image = self.images[i]
lm_pos_fpv = self.lm_pos_fpv[i]
lm_indices = self.lm_idx[i]
out_img, out_lm_indices, out_lm_pos_fpv = data_augmentation(image, lm_indices, lm_pos_fpv, IMG_HEIGHT, IMG_WIDTH, self.eval, prof)
if (len(out_lm_indices) == 0) | (out_lm_indices is None):
out_img, out_lm_indices, out_lm_pos_fpv = data_augmentation(image, lm_indices, lm_pos_fpv, IMG_HEIGHT, IMG_WIDTH, True, prof)
out_images_list.append(out_img)
out_lm_indices_list.append(out_lm_indices)
out_lm_pos_fpv_list.append(out_lm_pos_fpv)
sample = {"poses": [self.poses[i] for i in index],
"instructions": [], # self.instructions[index],
"lm_mentioned": [],
"images": out_images_list,
"env_ids": [self.env_ids_decompressed[i] for i in index],
"lm_pos_fpv": out_lm_pos_fpv_list,
"lm_idx": out_lm_indices_list}
"""
return sample
def data_from_env_ids(self, env_ids, proba_selection=1.0):
images = []
poses = []
# list of all env_ids (with duplicates)
env_ids_decompressed = []
# TODO: fill instructions
instructions = []
print("Using {} images".format("real" if self.real else "simulated"))
for env_id in env_ids:
poses_dir = get_poses_dir(env_id)
images_dir = get_fpv_img_flight_dir(env_id, self.real)
pose_filenames = [f for f in os.listdir(poses_dir) if f.endswith('.json')]
image_filenames = [f for f in os.listdir(images_dir) if (f.endswith('.jpg') | f.endswith('.png'))]
try:
assert len(image_filenames) == len(pose_filenames)
except:
print("error {}: different count of poses and images".format(env_id))
if not(os.listdir(images_dir)):
print(images_dir+"is empty")
assert(not(not(os.listdir(images_dir))))
img_ids = np.sort(
[int(f.replace('.', '_').split('_')[-2]) for f in os.listdir(images_dir) if (f.endswith('.jpg') | f.endswith('.png'))])
try:
selected = np.random.choice(img_ids,
int(len(image_filenames) * proba_selection),
replace=False)
except:
print(img_ids)
selected_ids = np.sort(selected)
for img_id in selected_ids:
filename_pose = "pose_{}.json".format(img_id)
gen_imgpath = lambda id,ext: os.path.join(images_dir, f"usb_cam_{img_id}.{ext}")
img_path = gen_imgpath(img_id, "jpg")
if not os.path.exists(img_path):
img_path = gen_imgpath(img_id, "png")
#print(filename_pose, filename_img)
with open(os.path.join(poses_dir, filename_pose), 'r') as f:
try:
pose = json.load(f)["camera"]
poses.append(pose)
read_success = True
except:
read_success = False
if read_success:
# Images are resized in bigger shape. They will be resized to 256*144 after data augmentation
img = Image.open(img_path).resize((self.load_img_w, self.load_img_h))
images.append(img)
env_ids_decompressed.append((env_id, img_id))
return instructions, poses, images, env_ids_decompressed
def update_dic(self):
self.dic = {"poses": self.poses,
"instructions": self.instructions,
"images": self.images,
"env_ids": self.env_ids_decompressed,
"lm_pos_fpv": self.lm_pos_fpv,
"lm_indices": self.lm_idx,
"lm_pos_map": self.lm_pos_map}
def provider_lm_pos_lm_indices_fpv(self, env_ids, add_null=0):
"""
Data provider that gives the positions and indices of all landmarks visible in the FPV image.
:param pose_list: B*7 list of poses decomposed in 3 position and 4 orientation floats
[x,y,z, orient_x, orient_y, orient_z, orient_w]
img_x, img_y: shape of images
env_ids: list of environments.
:return: ("lm_pos", lm_pos) - lm_pos is a list (over timesteps) of lists (over landmarks visible in image) of the
landmark locations in image pixel coordinates
("lm_indices", lm_indices) - lm_indices is a list (over timesteps) of lists (over landmarks visible in image)
of the landmark indices for every landmark included in lm_pos. These are the landmark classifier labels
"""
list_of_conf = load_config_files(np.unique(env_ids))#, perception=True)
# add add_null empty objects on each config.
if add_null > 0:
for i, conf in enumerate(list_of_conf):
zpos = conf["zPos"]
xpos = conf["xPos"]
lm_positions = np.stack([xpos, zpos], 1)
for _ in range(add_null): # add 2 empty objects on configuration
i_null = 0
while i_null < 100:
xnull = np.random.rand() * 4.7
znull = np.random.rand() * 4.7
distances_to_lm = np.linalg.norm(lm_positions - np.array([xnull, znull]), axis=1)
min_dist_to_lm = | np.min(distances_to_lm) | numpy.min |
import cv2
import numpy as np
# TODO: detect windows
# TODO: detect doors
# Calculate (actual) size of appartment
# TODO: text detection
"""
Detect
This file contains functions used when detecting and calculating shapes in images.
FloorplanToBlender3d
Copyright (C) 2019 <NAME>
"""
def wall_filter(gray):
"""
Filter walls
Filter out walls from a grayscale image
@Param image
@Return image of walls
"""
ret, thresh = cv2.threshold(gray,0,255,cv2.THRESH_BINARY_INV+cv2.THRESH_OTSU)
# noise removal
kernel = np.ones((3,3),np.uint8)
opening = cv2.morphologyEx(thresh,cv2.MORPH_OPEN,kernel, iterations = 2)
sure_bg = cv2.dilate(opening,kernel,iterations=3)
dist_transform = cv2.distanceTransform(opening,cv2.DIST_L2,5)
ret, sure_fg = cv2.threshold(0.5*dist_transform,0.2*dist_transform.max(),255,0)
sure_fg = np.uint8(sure_fg)
unknown = cv2.subtract(sure_bg,sure_fg)
return unknown
def detectPreciseBoxes(detect_img, output_img = None, color = [100,100,0]):
"""
Detect corners with boxes in image with high precision
@Param detect_img image to detect from @mandatory
@Param output_img image for output
@Param color to set on output
@Return corners(list of boxes), output image
@source https://stackoverflow.com/questions/50930033/drawing-lines-and-distance-to-them-on-image-opencv-python
"""
res = []
contours, hierarchy = cv2.findContours(detect_img,cv2.RETR_EXTERNAL,cv2.CHAIN_APPROX_SIMPLE)
#area = sorted(contours, key=cv2.contourArea, reverse=True)
largest_contour_area = 0
for cnt in contours:
largest_contour_area = cv2.contourArea(cnt)
largest_contour = cnt
epsilon = 0.001*cv2.arcLength(largest_contour,True)
approx = cv2.approxPolyDP(largest_contour,epsilon,True)
if output_img is not None:
final = cv2.drawContours(output_img, [approx], 0, color)
res.append(approx)
return res, output_img
def remove_noise(img, noise_removal_threshold):
"""
Remove noise from image and return mask
Help function for finding room
@Param img @mandatory image to remove noise from
@Param noise_removal_threshold @mandatory threshold for noise
@Return return new mask of image
"""
img[img < 128] = 0
img[img > 128] = 255
contours, _ = cv2.findContours(~img, cv2.RETR_EXTERNAL, cv2.CHAIN_APPROX_SIMPLE)
mask = np.zeros_like(img)
for contour in contours:
area = cv2.contourArea(contour)
if area > noise_removal_threshold:
cv2.fillPoly(mask, [contour], 255)
return mask
def find_corners_and_draw_lines(img, corners_threshold, room_closing_max_length):
"""
Finds corners and draw lines from them
Help function for finding room
@Param image input image
@Param corners_threshold threshold for corner distance
@Param room_closing_max_length threshold for room max size
@Return output image
"""
# Detect corners (you can play with the parameters here)
kernel = np.ones((1,1),np.uint8)
dst = cv2.cornerHarris(img ,2,3,0.04)
dst = cv2.erode(dst,kernel, iterations = 10)
corners = dst > corners_threshold * dst.max()
# Draw lines to close the rooms off by adding a line between corners on the same x or y coordinate
# This gets some false positives.
# You could try to disallow drawing through other existing lines for example.
for y,row in enumerate(corners):
x_same_y = | np.argwhere(row) | numpy.argwhere |
"""
:Author: <NAME> <<EMAIL>>
:Author: <NAME> <<EMAIL>>
:Author: <NAME> <<EMAIL>>
:Date: 2018-06-06
:Copyright: 2018, Karr Lab
:License: MIT
"""
import Bio.SeqIO
import Bio.SeqRecord
import math
import numpy
import random
import scipy.constants
import wc_kb
import wc_kb_gen
from Bio.Data import CodonTable
from Bio.Seq import Seq, Alphabet
from numpy import random
from scipy import stats
from wc_utils.util.units import unit_registry
from wc_onto import onto as wcOntology
from wc_utils.util.ontology import are_terms_equivalent
class GenomeGenerator(wc_kb_gen.KbComponentGenerator):
"""
Generate synthetic chromosome with randomized genes/intergenic
regions. Creates RNA and protein objects corresponding to the genes
this chromosome. Associates the chromosome, RNAs, proteins
with a knowledge base object (and its Cell attribute).
Options:
* num_chromosomes (:obj:`int`): number of chromosomes
* mean_gc_frac (:obj:`float`): fraction of nucleotides which are G or C
* num_genes (:obj:`float`): mean number of genes
* mean_gene_len (:obj:`float`): mean codon length of a gene
* mean_coding_frac (:obj:`float`): mean coding fraction of the genome
* translation_table (:obj:`int`): The NCBI standard genetic code used
* num_ncRNA (:obj:`float`): The proportion of non coding RNAs
* num_rRNA (:obj:`float`): The proportion of ribosomal RNAs
* tRNA_prop (:obj:`float`): The proportion of transfer RNAs
* five_prime_len (:obj:`int`): Average 5' UTR length for transcription units
* three_prime_len (:obj:`int`): Average 3' UTR length for transcription units
* operon_prop (:obj:`float`): Proportion of genes that should be in an operon (polycistronic mRNA)
* operon_gen_num (:obj:`int`): Average number of genes in an operon
* mean_copy_number (:obj:`float`): mean copy number of each RNA
* mean_half_life (:obj:`float`): mean half-life of RNAs
* genetic_code (:obj:`str`): 'normal' / 'reduced', if reduced only 'I': ['ATC'], 'L': ['CTG'],
'M': ['ATG'], 'T': ['ACG'] codons in genome
* seq_path (:obj:`str`): path to save genome sequence
"""
def clean_and_validate_options(self):
""" Apply default options and validate options """
# Default options are loosely based on <NAME> K-12
# Nucleic Acids Research 41:D605-12 2013
options = self.options
genetic_code = options.get('genetic_code', 'normal')
assert(genetic_code in ['normal', 'reduced'])
options['genetic_code'] = genetic_code
num_chromosomes = options.get('num_chromosomes', 1)
assert(num_chromosomes >= 1 and int(num_chromosomes) == num_chromosomes)
options['num_chromosomes'] = num_chromosomes
chromosome_topology = options.get('chromosome_topology', 'circular')
assert(chromosome_topology in ['circular', 'linear'])
options['chromosome_topology'] = chromosome_topology
num_genes = options.get('num_genes', 4500)
assert(num_genes >= 1)
options['num_genes'] = int(num_genes)
num_ncRNA = options.get('num_ncRNA', 10) # not sure
assert(isinstance(num_ncRNA, int))
options['num_ncRNA'] = num_ncRNA
# http://book.bionumbers.org/how-many-ribosomal-rna-gene-copies-are-in-the-genome/
num_rRNA = options.get('num_rRNA', 7)
assert(isinstance(num_rRNA, int))
options['num_rRNA'] = num_rRNA
num_tRNA = options.get('num_tRNA', 20)
assert(isinstance(num_tRNA, int))
options['num_tRNA'] = num_tRNA
min_prots = options.get('min_prots', 8)
assert(isinstance(min_prots, int))
options['min_prots'] = min_prots
assert((num_ncRNA + num_rRNA + num_tRNA + min_prots) <= num_genes)
mean_gc_frac = options.get('mean_gc_frac', 0.58)
assert(mean_gc_frac >= 0 and mean_gc_frac <= 1)
options['mean_gc_frac'] = mean_gc_frac
# DOI: 10.1093/molbev/msk019
mean_gene_len = options.get('mean_gene_len', 308) # codon length (924 bp)
assert(mean_gene_len >= 1)
options['mean_gene_len'] = mean_gene_len
# DOI: 10.1007/s10142-015-0433-4
mean_coding_frac = options.get('mean_coding_frac', 0.88)
assert(mean_coding_frac > 0 and mean_coding_frac < 1)
options['mean_coding_frac'] = mean_coding_frac
translation_table = int(options.get('translation_table', 1))
assert(translation_table in range(1, 32))
options['translation_table'] = translation_table
five_prime_len = int(options.get('five_prime_len', 7))
assert(five_prime_len >= 0)
options['five_prime_len'] = five_prime_len
three_prime_len = int(options.get('three_prime_len', 5)) # guess
assert(three_prime_len >= 0)
options['three_prime_len'] = three_prime_len
operon_prop = (options.get('operon_prop', 0.2)) # guess
assert(operon_prop >= 0 and operon_prop <= 1)
options['operon_prop'] = operon_prop
operon_gen_num = int(options.get('operon_gen_num', 3))
assert(operon_gen_num >= 2)
options['operon_gen_num'] = operon_gen_num
mean_rna_half_life = options.get('mean_rna_half_life', 8 * 60)
assert(mean_rna_half_life > 0)
options['mean_rna_half_life'] = mean_rna_half_life
# DOI: 10.1073/pnas.0308747101
mean_protein_half_life = options.get('mean_protein_half_life', 750 * 60)
assert(mean_protein_half_life > 0)
options['mean_protein_half_life'] = mean_protein_half_life
# DOI: 10.1038/ismej.2012.94
mean_rna_copy_number = options.get('mean_rna_copy_number', 0.4)
assert(mean_rna_copy_number > 0)
options['mean_rna_copy_number'] = mean_rna_copy_number
# DOI: 10.1038/ismej.2012.94
mean_protein_copy_number = options.get('mean_protein_copy_number', 75)
assert(mean_protein_copy_number > 0)
options['mean_protein_copy_number'] = mean_protein_copy_number
seq_path = options.get('seq_path', 'rand_seq.fna')
options['seq_path'] = seq_path
def gen_components(self):
self.gen_genome()
self.gen_tus()
self.gen_rnas_proteins()
self.gen_concentrations()
self.reduce_model()
def gen_genome(self):
'''Construct knowledge base components and generate the DNA sequence'''
# get options
options = self.options
genetic_code = options.get('genetic_code')
num_chromosomes = options.get('num_chromosomes')
mean_gene_len = options.get('mean_gene_len')
translation_table = options.get('translation_table')
num_genes = options.get('num_genes')
mean_coding_frac = options.get('mean_coding_frac')
mean_gc_frac = options.get('mean_gc_frac')
chromosome_topology = options.get('chromosome_topology')
num_ncRNA = options.get('num_ncRNA')
num_rRNA = options.get('num_rRNA')
num_tRNA = options.get('num_tRNA')
min_prots = options.get('min_prots')
seq_path = options.get('seq_path')
cell = self.knowledge_base.cell
self.knowledge_base.translation_table = translation_table
codon_table = CodonTable.unambiguous_dna_by_id[translation_table]
BASES = ['A', 'C', 'G', 'T']
PROB_BASES = [(1 - mean_gc_frac) / 2, mean_gc_frac /2, mean_gc_frac/2, (1-mean_gc_frac)/2]
if genetic_code == 'normal':
START_CODONS = codon_table.start_codons
STOP_CODONS = codon_table.stop_codons
elif genetic_code == 'reduced':
START_CODONS = ['TTA']
STOP_CODONS = ['TAA']
num_genes_all = num_genes
assignList = num_tRNA*[wcOntology['WC:tRNA']] + \
num_rRNA*[wcOntology['WC:rRNA']] + \
num_ncRNA*[wcOntology['WC:ncRNA']] + \
(num_genes_all-(num_ncRNA + num_tRNA + num_rRNA))*[wcOntology['WC:mRNA']]
random.shuffle(assignList)
# Create a chromosome n times
dna_seqs = []
for i_chr in range(num_chromosomes):
num_genes = math.ceil(num_genes_all / num_chromosomes)
gene_lens = 3 * self.rand(mean_gene_len, count=num_genes, min=2)
intergene_lens = 3 * self.rand(mean_gene_len / mean_coding_frac * (1 - mean_coding_frac), count=num_genes)
seq_len = numpy.sum(gene_lens) + numpy.sum(intergene_lens)
# Generate seq based on random codons (NOT start/stop codons)
seq_str = []
if genetic_code=='normal':
for i in range(0, seq_len, 3):
codon_i = random.choice(STOP_CODONS)
codon_i = "".join(random.choice(BASES, p=PROB_BASES, size=(3,)))
seq_str.append(codon_i)
elif genetic_code=='reduced':
for i in range(0, seq_len, 3):
codon_i = STOP_CODONS[0]
codon_i = "".join(random.choice(['ATC', 'CTG', 'ATG', 'ACG']))
seq_str.append(codon_i)
seq_str = "".join(seq_str)
seq = Seq(seq_str, Alphabet.DNAAlphabet())
chro = cell.species_types.get_or_create(id='chr_{}'.format(i_chr + 1), __type=wc_kb.core.DnaSpeciesType)
chro.name = 'Chromosome {}'.format(i_chr + 1)
chro.circular = chromosome_topology == 'circular'
chro.double_stranded = True
chro.sequence_path = seq_path
gene_starts = numpy.int64(numpy.cumsum(numpy.concatenate(([0], gene_lens[0:-1])) +
numpy.concatenate((numpy.round(intergene_lens[0:1] / 2), intergene_lens[1:]))))
# creates GeneLocus objects for the genes and labels their GeneType (which type of RNA they transcribe)
for i_gene, gene_start in enumerate(gene_starts):
gene = self.knowledge_base.cell.loci.get_or_create(
id='gene_{}_{}'.format(i_chr + 1, i_gene + 1), __type=wc_kb.prokaryote.GeneLocus)
gene.start = gene_start + 1 # 1-indexed
gene.polymer = chro
gene.end = gene.start + gene_lens[i_gene] - 1 # 1-indexed
gene.name = 'gene {} {}'.format(i_chr+1, i_gene+1)
if len(assignList) > 0:
gene.type = assignList.pop()
else:
gene.type = wcOntology['WC:mRNA']
if gene.type == wcOntology['WC:mRNA']: # if mRNA, then set up start/stop codons in the gene
start_codon = random.choice(START_CODONS)
stop_codon = random.choice(STOP_CODONS)
seq_str = str(seq)
seq_str = seq_str[:gene.start-1] + \
start_codon + \
seq_str[gene.start+2: gene.end-3] + \
stop_codon + seq_str[gene.end:]
for i in range(gene.start+2, gene.end-3, 3):
while seq_str[i:i+3] in START_CODONS or seq_str[i:i+3] in STOP_CODONS:
if genetic_code == 'normal':
codon_i = "".join(random.choice(BASES, p=PROB_BASES, size=(3,)))
elif genetic_code == 'reduced':
codon_i = "".join(random.choice(['ATC', 'CTG', 'ATG', 'ACG']))
seq_str = seq_str[:i]+codon_i+seq_str[i+3:]
seq = Seq(seq_str, Alphabet.DNAAlphabet())
dna_seqs.append(Bio.SeqRecord.SeqRecord(seq, chro.id))
with open(seq_path, 'w') as file:
writer = Bio.SeqIO.FastaIO.FastaWriter(
file, wrap=70, record2title=lambda record: record.id)
writer.write_file(dna_seqs)
def gen_tus(self):
""" Creates transcription units with 5'/3' UTRs, polycistronic mRNAs, and other types of RNA (tRNA, rRNA, sRNA) """
options = self.options
five_prime_len = options.get('five_prime_len') # 7 bp default (E. coli, wikipedia)
three_prime_len = options.get('three_prime_len') # 5 bp default guess
operon_prop = options.get('operon_prop') # 0.2 default guess
operon_gen_num = options.get('operon_gen_num') # 3 genes default (https://academic.oup.com/gbe/article/5/11/2242/653613)
for i_chr, chromosome in enumerate(self.knowledge_base.cell.species_types.get(__type=wc_kb.core.DnaSpeciesType)):
seq = chromosome.get_seq()
i_gene = 0
transcription_loci = []
# Todo make this into a proper for loop that deals with repeats/additional loci
while i_gene < len(chromosome.loci):
gene = chromosome.loci[i_gene]
if gene.type == wcOntology['WC:mRNA']:
# polycistronic mRNA (multiple GeneLocus objects per TranscriptionUnitLocus)
five_prime = self.rand(five_prime_len)[0]
three_prime = self.rand(three_prime_len)[0]
operon_prob = random.random()
# make an operon (polycistronic mRNA, put multiple genes in one TransUnitLocus)
if operon_prob <= operon_prop:
operon_genes = self.rand(operon_gen_num, min=2)[0]
# add 3', 5' UTRs to the ends of the transcription unit (upstream of first gene, downstream of last gene)
tu = self.knowledge_base.cell.loci.get_or_create(
id='tu_{}_{}'.format(i_chr + 1, i_gene + 1), __type=wc_kb.prokaryote.TranscriptionUnitLocus)
tu.name = 'tu {} {}'.format(i_chr+1, i_gene+1)
five_prime_start = gene.start - five_prime
if five_prime_start < 0:
five_prime_start = 0
tu.genes.append(gene)
tu.start = five_prime_start
for k in range(operon_genes-1):
i_gene += 1
if i_gene >= len(chromosome.loci):
break
if (chromosome.loci[i_gene]).type == wcOntology['WC:mRNA']:
gene = chromosome.loci[i_gene]
tu.genes.append(gene)
else:
break
three_prime_end = gene.end + three_prime
if three_prime_end >= len(seq):
three_prime_end = len(seq) - 1
tu.end = three_prime_end
transcription_loci.append(tu)
else: # make an individual transcription unit for the gene
five_prime_start = gene.start - five_prime
three_prime_end = gene.end + three_prime
if five_prime_start < 0:
five_prime_start = 0
if three_prime_end >= len(seq):
three_prime_end = len(seq) - 1
tu = self.knowledge_base.cell.loci.get_or_create(
id='tu_{}_{}'.format(i_chr + 1, i_gene + 1), __type=wc_kb.prokaryote.TranscriptionUnitLocus)
tu.start = five_prime_start
tu.end = three_prime_end
tu.name = 'tu {} {}'.format(i_chr+1, i_gene+1)
tu.genes.append(gene)
transcription_loci.append(tu)
i_gene += 1
# make a transcription unit that transcribes other types of RNA (tRNA, rRNA, sRNA)
else:
tu = self.knowledge_base.cell.loci.get_or_create(
id='tu_{}_{}'.format(i_chr + 1, i_gene + 1), __type=wc_kb.prokaryote.TranscriptionUnitLocus)
tu.name = 'tu {} {}'.format(i_chr+1, i_gene+1)
tu.start = gene.start
tu.end = gene.end
tu.genes.append(gene)
transcription_loci.append(tu)
i_gene += 1
for locus in transcription_loci:
locus.polymer = chromosome
def gen_rnas_proteins(self):
""" Creates RNA and protein objects corresponding to genes on chromosome. """
cell = self.knowledge_base.cell
options = self.options
mean_rna_half_life = options.get('mean_rna_half_life')
mean_protein_half_life = options.get('mean_protein_half_life')
for chromosome in self.knowledge_base.cell.species_types.get(__type=wc_kb.core.DnaSpeciesType):
for i in range(len(chromosome.loci)):
locus = chromosome.loci[i]
if type(locus) == wc_kb.prokaryote.TranscriptionUnitLocus:
tu = locus
# creates RnaSpeciesType for RNA sequence corresponding to gene
rna = self.knowledge_base.cell.species_types.get_or_create(id='rna_{}'.format(tu.id), __type=wc_kb.prokaryote.RnaSpeciesType)
rna.name = 'rna {}'.format(tu.id)
# GeneLocus object for gene sequence, attribute of ProteinSpeciesType object
if are_terms_equivalent(tu.genes[0].type, wcOntology['WC:mRNA']):
rna.type = wcOntology['WC:mRNA']
elif are_terms_equivalent(tu.genes[0].type, wcOntology['WC:rRNA']):
rna.type = wcOntology['WC:rRNA']
elif are_terms_equivalent(tu.genes[0].type, wcOntology['WC:tRNA']):
rna.type = wcOntology['WC:tRNA']
elif are_terms_equivalent(tu.genes[0].type, wcOntology['WC:ncRNA']):
rna.type = wcOntology['WC:ncRNA']
rna.half_life = random.normal(mean_rna_half_life, numpy.sqrt(mean_rna_half_life))
rna.transcription_units.append(tu)
if are_terms_equivalent(rna.type, wcOntology['WC:mRNA']):
for gene in tu.genes:
# creates ProteinSpecipe object for corresponding protein sequence(s)
prot = self.knowledge_base.cell.species_types.get_or_create(
id='prot_{}'.format(gene.id),
__type=wc_kb.prokaryote.ProteinSpeciesType)
prot.name = 'prot_{}'.format(gene.id)
prot.cell = cell
prot.cell.knowledge_base = self.knowledge_base
prot.gene = gene
prot.rna = rna
prot.half_life = random.normal(mean_protein_half_life, numpy.sqrt(mean_protein_half_life))
def gen_concentrations(self):
""" Creates the concentration objects of RNA and protein objects """
options = self.options
cell = self.knowledge_base.cell
cytosol = cell.compartments.get_one(id='c')
mean_rna_copy_number = options.get('mean_rna_copy_number')
mean_protein_copy_number = options.get('mean_protein_copy_number')
if self.knowledge_base.cell.parameters.get_one(id='mean_volume') is not None:
mean_volume = self.knowledge_base.cell.parameters.get_one(id='mean_volume').value
else:
mean_volume = 0.000000000000000067
print('"mean_volume" parameter is missing, using Mycoplasma pneumoniae value (6.7E-17L).')
for rna in cell.species_types.get(__type=wc_kb.prokaryote.RnaSpeciesType):
rna_specie = rna.species.get_or_create(compartment=cytosol)
conc = round(abs(random.normal(loc=mean_rna_copy_number,scale=15))) / scipy.constants.Avogadro / mean_volume
cell.concentrations.get_or_create(id='CONC({})'.format(rna_specie.id), species=rna_specie, value=conc, units=unit_registry.parse_units('M'))
for prot in cell.species_types.get(__type=wc_kb.prokaryote.ProteinSpeciesType):
prot_specie = prot.species.get_or_create(compartment=cytosol)
conc = round(abs(random.normal(loc=mean_protein_copy_number,scale=15))) / scipy.constants.Avogadro / mean_volume
cell.concentrations.get_or_create(id='CONC({})'.format(prot_specie.id), species=prot_specie, value=conc, units=unit_registry.parse_units('M'))
def reduce_model(self):
options = self.options
genetic_code = options.get('genetic_code')
seq_path = options.get('seq_path')
if genetic_code == 'normal':
pass
elif genetic_code == 'reduced':
cell = self.knowledge_base.cell
kb = self.knowledge_base
bases = "TCAG"
codons = [a + b + c for a in bases for b in bases for c in bases]
dna = kb.cell.species_types.get(__type = wc_kb.core.DnaSpeciesType)[0]
seq_str = str(dna.get_seq())
seq_list = list(seq_str)
for prot in kb.cell.species_types.get(__type = wc_kb.prokaryote.ProteinSpeciesType):
for base_num in range(prot.gene.start+2,prot.gene.end-3,3):
new_codon = random.choice(['ATC', 'CTG', 'ATG', 'ACG'])
seq_list[base_num]=new_codon[0]
seq_list[base_num+1]=new_codon[1]
seq_list[base_num+2]=new_codon[2]
seq_str_new = ''.join(seq_list)
seq=Seq(seq_str_new)
dna_seqs = [Bio.SeqRecord.SeqRecord(seq, dna.id)]
with open(seq_path, 'w') as file:
writer = Bio.SeqIO.FastaIO.FastaWriter(
file, wrap=70, record2title=lambda record: record.id)
writer.write_file(dna_seqs)
def rand(self, mean, count=1, min=0, max=numpy.inf):
""" Generated 1 or more random normally distributed integer(s) with standard deviation equal
to the square root of the mean value.
Args:
mean (:obj:`float`): mean value
count (:obj:`int`): number of random numbers to generate
Returns:
:obj:`int` or :obj:`numpy.ndarray` of :obj:`int`: random normally distributed integer(s)
"""
a = (min-mean)/ | numpy.sqrt(mean) | numpy.sqrt |
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from scipy.sparse import coo_matrix, csr_matrix, csc_matrix
class SBiScale(object):
''' A sparse approach to scaling and centering, row-wise and column-wise, for input to a SoftImpute algorithm.
maxit: int
the maximum number of iterations allowed for obtaining the ideal scaling and centering levels.
thresh: int
the threshold for convergence
row_center, row_scale, col_center, col_scale: bool
a boolean indicating whether or not the task should be completed.
trace: bool
whether or not a verbose output should be provided.
'''
def __init__(self, maxit=20, thresh=1e-9, row_center=True, row_scale=False, col_center=True, col_scale=False, trace=False):
self.maxit = maxit
self.thresh = 1e-9
self.row_center = row_center
self.row_scale = row_scale
self.col_center = col_center
self.col_scale = col_scale
self.trace = trace
self.x = None
self.m = None
self.n = None
self.a = None
self.b = None
self.tau = None
self.gamma = None
self.xhat = None
self.critmat = []
def _prepare_suvc(self):
a = self.a.copy()
a = a.reshape(-1,1)
b = self.b.copy()
b = b.reshape(-1,1)
a = np.hstack((a, np.ones(a.shape[0]).reshape(-1,1)))
b = np.hstack((np.ones(b.shape[0]).reshape(-1,1), b))
return a, b
def _pred_one(self, u, v, row, col):
u_data = np.expand_dims(u[row,:], 0)
return float(u_data.dot(v[col, :].T))
def _c_suvc(self, u, v, irow, icol):
nomega = len(irow)
res = np.zeros(nomega)
targets = zip(irow, icol)
for idx, (r,c) in enumerate(targets):
res[idx] = self._pred_one(u, v, r, c)
return res
def _center_scale_I(self):
x = self.x.data
a, b = self._prepare_suvc()
coo_x = coo_matrix(self.x)
irow = coo_x.row
icol = coo_x.col
suvc1 = self._c_suvc(a, b, irow, icol)
suvc2 = self._c_suvc(self.tau.reshape(-1,1), self.gamma.reshape(-1,1), irow, icol)
self.xhat.data = (x-suvc1) / suvc2
return self
def _col_sum_along(self, a, x):
x = (self.x != 0)
a = csc_matrix(a.T)
return a.dot(x).toarray()
def _row_sum_along(self, b, x):
x = (self.x != 0)
return x.dot(b)
def _add_variables(self, x):
self.x = x
self.m = x.shape[0]
self.n = x.shape[1]
self.a = np.zeros(self.m)
self.b = np.zeros(self.n)
self.tau = np.ones(self.m)
self.gamma = np.ones(self.n)
self.xhat = self.x.copy()
return self
def fit(self, x):
''' Fits data to provide ideal scaling/centering levels. Runs until convergence is achieved or maximum iterations are reached.
x: scipy.sparse matrix type
The data to fit.
Returns: scipy.sparse type matrix
The scaled/centered matrix.
'''
self._add_variables(x)
self._center_scale_I()
for i in xrange(self.maxit):
# Centering
## Column mean
if self.col_center:
colsums = np.sum(self.xhat, axis=0)
gamma_by_sum = np.multiply(colsums,(self.gamma))
dbeta = gamma_by_sum / self._col_sum_along(1 / self.tau, self.x)
self.b = self.b + dbeta
self.b[np.isnan(self.b)] = 0
self._center_scale_I()
else:
dbeta = 0
## Row Mean
if self.row_center:
rowsums = np.sum(self.xhat, axis=1).T
tau_by_sum = np.multiply(self.tau, rowsums)
dalpha = tau_by_sum / self._row_sum_along(1 / self.gamma, self.x)
self.a = self.a + dalpha
self.a[np.isnan(self.a)] = 0
self._center_scale_I()
else:
dalpha = 0
#Leaving out scaling for now; not required for SoftImputeALS algorithm
dalpha[np.isnan(dalpha)] = 0
dbeta[np.isnan(dbeta)] = 0
convergence_level = | np.square(dalpha) | numpy.square |
from __future__ import print_function, division
import numpy as np
import matplotlib.pylab as plt
import astropy.units as u
from astropy import log
from astropy.utils.console import ProgressBar
import pyspeckit
import os
# imports for the test fiteach redefinition
import time
import itertools
from astropy.extern.six import string_types
# gotta catch 'em all!
class AllFixedException(Exception):
""" Zero degrees of freedom. """
pass
class NanGuessesException(Exception):
""" Guesses have NaN values."""
pass
class SnrCutException(Exception):
""" Pixel is below SNR threshold. """
pass
class NanSnrAtPixel(Exception):
""" S/N at pixel has a NaN value. """
pass
class SubCube(pyspeckit.Cube):
"""
An extension of Cube, tinkered to be an instance of MultiCube, from which
it receives references to instances of pyspeckit.Cube that do not depend
on a spectral model chosen (so that parent MultiCube doesn't weigh so much)
Is designed to have methods that operate within a single spectral model.
"""
def __init__(self, *args, **kwargs):
super(SubCube, self).__init__(*args, **kwargs)
# because that UnitConversionError pops up way too often
if self.xarr.velocity_convention is None:
self.xarr.velocity_convention = 'radio'
# so I either define some things as `None`
# or I'll have to call hasattr or them...
# TODO: which is a more Pythonic approach?
# A: probably the hasattr method, see here:
# http://programmers.stackexchange.com/questions/
# 254576/is-it-a-good-practice-to-declare-instance
# -variables-as-none-in-a-class-in-python
self.guess_grid = None
self.model_grid = None
# TODO: investigate whether pyspeckit's #179 needs to be hacked
# around inside either update_model or make_guess_grid methods
def update_model(self, fit_type='gaussian'):
"""
Tie a model to a SubCube. Should work for all the standard
fitters; others can be added with Cube.add_fitter method.
"""
try:
allowed_fitters = self.specfit.Registry.multifitters
self.specfit.fitter = allowed_fitters[fit_type]
except KeyError:
raise ValueError('Unsupported fit type: %s\n'
'Choose one from %s'
% (fit_type, allowed_fitters.keys()))
log.info("Selected %s model" % fit_type)
self.specfit.fittype = fit_type
self.fittype = fit_type
def make_guess_grid(self, minpars, maxpars, finesse, fixed=None,
limitedmin=None, limitedmax=None, **kwargs):
"""
Given parameter ranges and a finesse parameter, generate a grid of
guesses in a parameter space to be iterated upon in self.best_guess
Maybe if parlimits arg is None we can look into parinfo?
Parameters
----------
minpars : an iterable containing minimal parameter values
maxpars : an iterable containing maximal parameter values
finesse : an integer or 1xNpars list/array setting the size
of cells between minimal and maximal values in
the resulting guess grid
fixed : an iterable of booleans setting whether or not to fix the
fitting parameters. Will be passed to Cube.fiteach, defaults
to an array of False-s.
limitedmin : an iterable of booleans controlling if the fit fixed
the minimal boundary of from minpars.
limitedmax : an iterable of booleans controlling if the fit fixed
the maximal boundary of from maxpars.
Returns
-------
guess_grid : a grid of guesses to use for SubCube.generate_model
In addition, it saves a number of variables under self as a dictionary
passed later to Cube.fiteach as additional arguments, with keywords:
['fixed', 'limitedmin', 'limitedmax', 'minpars', 'maxpars']
"""
minpars, maxpars = np.asarray([minpars, maxpars])
truths, falses = (np.ones(minpars.shape, dtype=bool),
np.zeros(minpars.shape, dtype=bool))
fixed = falses if fixed is None else fixed
limitedmin = truths if limitedmin is None else limitedmin
limitedmax = truths if limitedmax is None else limitedmax
self.fiteach_args = {'fixed' : fixed,
'limitedmin': limitedmin,
'limitedmax': limitedmax,
'minpars' : minpars,
'maxpars' : maxpars }
# TODO: why does 'fixed' break the gaussian fitter?
# update as of 1.08.2016: this doesn't happen anymore
#if self.fittype is 'gaussian':
# self.fiteach_args.pop('fixed')
guess_grid = self._grid_parspace(minpars, maxpars, finesse, **kwargs)
guess_grid = self._remove_close_peaks(guess_grid, **kwargs)
self.fiteach_arg_grid = {key: np.repeat([val], guess_grid.shape[0],
axis=0) for key, val in
self.fiteach_args.items()}
self.guess_grid = guess_grid
return guess_grid
def expand_guess_grid(self, minpars, maxpars, finesse, fixed=None,
limitedmin=None, limitedmax=None, **kwargs):
"""
Useful for "chunky" discontinuities in parameter space.
Works as SubCube.make_guess_grid, but instead of creating guess_grid
from scratch, the new guess grid is appended to an existing one.
Parameter limits information is extended to accommodate the new grid.
Returns
-------
guess_grid : an updated grid of guesses
"""
minpars, maxpars = np.asarray([minpars, maxpars])
guess_grid = self._grid_parspace(minpars, maxpars, finesse, **kwargs)
guess_grid = self._remove_close_peaks(guess_grid, **kwargs)
# expanding the parameter boundaries
minpars, maxpars = (
np.vstack([self.fiteach_args['minpars'], minpars]).min(axis=0),
np.vstack([self.fiteach_args['maxpars'], maxpars]).max(axis=0) )
self.fiteach_args['minpars'] = minpars
self.fiteach_args['maxpars'] = maxpars
# updating the fiteach_arg grid
truths, falses = (np.ones(minpars.shape, dtype=bool),
np.zeros(minpars.shape, dtype=bool))
fixed = falses if fixed is None else fixed
limitedmin = truths if limitedmin is None else limitedmin
limitedmax = truths if limitedmax is None else limitedmax
expand_dict = {'fixed' : fixed,
'limitedmin': limitedmin,
'limitedmax': limitedmax,
'minpars' : minpars,
'maxpars' : maxpars }
for key, val in expand_dict.items():
expander = np.repeat([expand_dict[key]],
np.prod( | np.atleast_1d(finesse) | numpy.atleast_1d |
import numpy as np
import matplotlib
matplotlib.use("Agg") # Must be before importing matplotlib.pyplot or pylab!
from matplotlib import pyplot as plt
from matplotlib.colors import to_rgb
from matplotlib import cm
from mpl_toolkits.mplot3d import proj3d, Axes3D
from tqdm import tqdm
from typing import Dict, Sequence
def plot_video_with_surface(
rods_history: Sequence[Dict],
video_name="video.mp4",
fps=60,
step=1,
vis2D=True,
**kwargs,
):
plt.rcParams.update({"font.size": 22})
folder_name = kwargs.get("folder_name", "")
# 2d case <always 2d case for now>
import matplotlib.animation as animation
# simulation time
sim_time = np.array(rods_history[0]["time"])
# Rod
n_visualized_rods = len(rods_history) # should be one for now
# Rod info
rod_history_unpacker = lambda rod_idx, t_idx: (
rods_history[rod_idx]["position"][t_idx],
rods_history[rod_idx]["radius"][t_idx],
)
# Rod center of mass
com_history_unpacker = lambda rod_idx, t_idx: rods_history[rod_idx]["com"][time_idx]
# Generate target sphere data
sphere_flag = False
if kwargs.__contains__("sphere_history"):
sphere_flag = True
sphere_history = kwargs.get("sphere_history")
n_visualized_spheres = len(sphere_history) # should be one for now
sphere_history_unpacker = lambda sph_idx, t_idx: (
sphere_history[sph_idx]["position"][t_idx],
sphere_history[sph_idx]["radius"][t_idx],
)
# color mapping
sphere_cmap = cm.get_cmap("Spectral", n_visualized_spheres)
# video pre-processing
print("plot scene visualization video")
FFMpegWriter = animation.writers["ffmpeg"]
metadata = dict(title="Movie Test", artist="Matplotlib", comment="Movie support!")
writer = FFMpegWriter(fps=fps, metadata=metadata)
dpi = kwargs.get("dpi", 100)
xlim = kwargs.get("x_limits", (-1.0, 1.0))
ylim = kwargs.get("y_limits", (-1.0, 1.0))
zlim = kwargs.get("z_limits", (-0.05, 1.0))
difference = lambda x: x[1] - x[0]
max_axis_length = max(difference(xlim), difference(ylim))
# The scaling factor from physical space to matplotlib space
scaling_factor = (2 * 0.1) / max_axis_length # Octopus head dimension
scaling_factor *= 2.6e3 # Along one-axis
if kwargs.get("vis3D", True):
fig = plt.figure(1, figsize=(10, 8), frameon=True, dpi=dpi)
ax = plt.axes(projection="3d")
ax.set_xlabel("x")
ax.set_ylabel("y")
ax.set_zlabel("z")
ax.set_xlim(*xlim)
ax.set_ylim(*ylim)
ax.set_zlim(*zlim)
time_idx = 0
rod_lines = [None for _ in range(n_visualized_rods)]
rod_com_lines = [None for _ in range(n_visualized_rods)]
rod_scatters = [None for _ in range(n_visualized_rods)]
for rod_idx in range(n_visualized_rods):
inst_position, inst_radius = rod_history_unpacker(rod_idx, time_idx)
if not inst_position.shape[1] == inst_radius.shape[0]:
inst_position = 0.5 * (inst_position[..., 1:] + inst_position[..., :-1])
rod_scatters[rod_idx] = ax.scatter(
inst_position[0],
inst_position[1],
inst_position[2],
s=np.pi * (scaling_factor * inst_radius) ** 2,
)
if sphere_flag:
sphere_artists = [None for _ in range(n_visualized_spheres)]
for sphere_idx in range(n_visualized_spheres):
sphere_position, sphere_radius = sphere_history_unpacker(
sphere_idx, time_idx
)
sphere_artists[sphere_idx] = ax.scatter(
sphere_position[0],
sphere_position[1],
sphere_position[2],
s=np.pi * (scaling_factor * sphere_radius) ** 2,
)
# sphere_radius,
# color=sphere_cmap(sphere_idx),)
ax.add_artist(sphere_artists[sphere_idx])
# ax.set_aspect("equal")
video_name_3D = folder_name + "3D_" + video_name
with writer.saving(fig, video_name_3D, dpi):
with plt.style.context("seaborn-whitegrid"):
for time_idx in tqdm(range(0, sim_time.shape[0], int(step))):
for rod_idx in range(n_visualized_rods):
inst_position, inst_radius = rod_history_unpacker(
rod_idx, time_idx
)
if not inst_position.shape[1] == inst_radius.shape[0]:
inst_position = 0.5 * (
inst_position[..., 1:] + inst_position[..., :-1]
)
rod_scatters[rod_idx]._offsets3d = (
inst_position[0],
inst_position[1],
inst_position[2],
)
# rod_scatters[rod_idx].set_offsets(inst_position[:2].T)
rod_scatters[rod_idx].set_sizes(
np.pi * (scaling_factor * inst_radius) ** 2
)
if sphere_flag:
for sphere_idx in range(n_visualized_spheres):
sphere_position, _ = sphere_history_unpacker(
sphere_idx, time_idx
)
sphere_artists[sphere_idx]._offsets3d = (
sphere_position[0],
sphere_position[1],
sphere_position[2],
)
writer.grab_frame()
# Be a good boy and close figures
# https://stackoverflow.com/a/37451036
# plt.close(fig) alone does not suffice
# See https://github.com/matplotlib/matplotlib/issues/8560/
plt.close(plt.gcf())
if kwargs.get("vis2D", True):
max_axis_length = max(difference(xlim), difference(ylim))
# The scaling factor from physical space to matplotlib space
scaling_factor = (2 * 0.1) / max_axis_length # Octopus head dimension
scaling_factor *= 2.6e3 # Along one-axis
fig = plt.figure(2, figsize=(10, 8), frameon=True, dpi=dpi)
ax = fig.add_subplot(111)
ax.set_xlim(*xlim)
ax.set_ylim(*ylim)
time_idx = 0
rod_lines = [None for _ in range(n_visualized_rods)]
rod_com_lines = [None for _ in range(n_visualized_rods)]
rod_scatters = [None for _ in range(n_visualized_rods)]
for rod_idx in range(n_visualized_rods):
inst_position, inst_radius = rod_history_unpacker(rod_idx, time_idx)
if not inst_position.shape[1] == inst_radius.shape[0]:
inst_position = 0.5 * (inst_position[..., 1:] + inst_position[..., :-1])
rod_lines[rod_idx] = ax.plot(
inst_position[0], inst_position[1], "r", lw=0.5
)[0]
inst_com = com_history_unpacker(rod_idx, time_idx)
rod_com_lines[rod_idx] = ax.plot(inst_com[0], inst_com[1], "k--", lw=2.0)[0]
rod_scatters[rod_idx] = ax.scatter(
inst_position[0],
inst_position[1],
s=np.pi * (scaling_factor * inst_radius) ** 2,
)
if sphere_flag:
sphere_artists = [None for _ in range(n_visualized_spheres)]
for sphere_idx in range(n_visualized_spheres):
sphere_position, sphere_radius = sphere_history_unpacker(
sphere_idx, time_idx
)
sphere_artists[sphere_idx] = Circle(
(sphere_position[0], sphere_position[1]),
sphere_radius,
color=sphere_cmap(sphere_idx),
)
ax.add_artist(sphere_artists[sphere_idx])
ax.set_aspect("equal")
video_name_2D = folder_name + "2D_xy_" + video_name
with writer.saving(fig, video_name_2D, dpi):
with plt.style.context("seaborn-whitegrid"):
for time_idx in tqdm(range(0, sim_time.shape[0], int(step))):
for rod_idx in range(n_visualized_rods):
inst_position, inst_radius = rod_history_unpacker(
rod_idx, time_idx
)
if not inst_position.shape[1] == inst_radius.shape[0]:
inst_position = 0.5 * (
inst_position[..., 1:] + inst_position[..., :-1]
)
rod_lines[rod_idx].set_xdata(inst_position[0])
rod_lines[rod_idx].set_ydata(inst_position[1])
com = com_history_unpacker(rod_idx, time_idx)
rod_com_lines[rod_idx].set_xdata(com[0])
rod_com_lines[rod_idx].set_ydata(com[1])
rod_scatters[rod_idx].set_offsets(inst_position[:2].T)
rod_scatters[rod_idx].set_sizes(
np.pi * (scaling_factor * inst_radius) ** 2
)
if sphere_flag:
for sphere_idx in range(n_visualized_spheres):
sphere_position, _ = sphere_history_unpacker(
sphere_idx, time_idx
)
sphere_artists[sphere_idx].center = (
sphere_position[0],
sphere_position[1],
)
writer.grab_frame()
# Be a good boy and close figures
# https://stackoverflow.com/a/37451036
# plt.close(fig) alone does not suffice
# See https://github.com/matplotlib/matplotlib/issues/8560/
plt.close(plt.gcf())
# Plot zy
max_axis_length = max(difference(zlim), difference(ylim))
# The scaling factor from physical space to matplotlib space
scaling_factor = (2 * 0.1) / max_axis_length # Octopus head dimension
scaling_factor *= 2.6e3 # Along one-axis
fig = plt.figure(2, figsize=(10, 8), frameon=True, dpi=dpi)
ax = fig.add_subplot(111)
ax.set_xlim(*zlim)
ax.set_ylim(*ylim)
time_idx = 0
rod_lines = [None for _ in range(n_visualized_rods)]
rod_com_lines = [None for _ in range(n_visualized_rods)]
rod_scatters = [None for _ in range(n_visualized_rods)]
for rod_idx in range(n_visualized_rods):
inst_position, inst_radius = rod_history_unpacker(rod_idx, time_idx)
if not inst_position.shape[1] == inst_radius.shape[0]:
inst_position = 0.5 * (inst_position[..., 1:] + inst_position[..., :-1])
rod_lines[rod_idx] = ax.plot(
inst_position[2], inst_position[1], "r", lw=0.5
)[0]
inst_com = com_history_unpacker(rod_idx, time_idx)
rod_com_lines[rod_idx] = ax.plot(inst_com[2], inst_com[1], "k--", lw=2.0)[0]
rod_scatters[rod_idx] = ax.scatter(
inst_position[2],
inst_position[1],
s=np.pi * (scaling_factor * inst_radius) ** 2,
)
if sphere_flag:
sphere_artists = [None for _ in range(n_visualized_spheres)]
for sphere_idx in range(n_visualized_spheres):
sphere_position, sphere_radius = sphere_history_unpacker(
sphere_idx, time_idx
)
sphere_artists[sphere_idx] = Circle(
(sphere_position[2], sphere_position[1]),
sphere_radius,
color=sphere_cmap(sphere_idx),
)
ax.add_artist(sphere_artists[sphere_idx])
ax.set_aspect("equal")
video_name_2D = folder_name + "2D_zy_" + video_name
with writer.saving(fig, video_name_2D, dpi):
with plt.style.context("seaborn-whitegrid"):
for time_idx in tqdm(range(0, sim_time.shape[0], int(step))):
for rod_idx in range(n_visualized_rods):
inst_position, inst_radius = rod_history_unpacker(
rod_idx, time_idx
)
if not inst_position.shape[1] == inst_radius.shape[0]:
inst_position = 0.5 * (
inst_position[..., 1:] + inst_position[..., :-1]
)
rod_lines[rod_idx].set_xdata(inst_position[2])
rod_lines[rod_idx].set_ydata(inst_position[1])
com = com_history_unpacker(rod_idx, time_idx)
rod_com_lines[rod_idx].set_xdata(com[2])
rod_com_lines[rod_idx].set_ydata(com[1])
rod_scatters[rod_idx].set_offsets(
np.vstack((inst_position[2], inst_position[1])).T
)
rod_scatters[rod_idx].set_sizes(
np.pi * (scaling_factor * inst_radius) ** 2
)
if sphere_flag:
for sphere_idx in range(n_visualized_spheres):
sphere_position, _ = sphere_history_unpacker(
sphere_idx, time_idx
)
sphere_artists[sphere_idx].center = (
sphere_position[2],
sphere_position[1],
)
writer.grab_frame()
# Be a good boy and close figures
# https://stackoverflow.com/a/37451036
# plt.close(fig) alone does not suffice
# See https://github.com/matplotlib/matplotlib/issues/8560/
plt.close(plt.gcf())
# Plot xz
fig = plt.figure(2, figsize=(10, 8), frameon=True, dpi=dpi)
ax = fig.add_subplot(111)
ax.set_xlim(*xlim)
ax.set_ylim(*zlim)
# The scaling factor from physical space to matplotlib space
max_axis_length = max(difference(zlim), difference(xlim))
scaling_factor = (2 * 0.1) / (max_axis_length) # Octopus head dimension
scaling_factor *= 2.6e3 # Along one-axis
time_idx = 0
rod_lines = [None for _ in range(n_visualized_rods)]
rod_com_lines = [None for _ in range(n_visualized_rods)]
rod_scatters = [None for _ in range(n_visualized_rods)]
for rod_idx in range(n_visualized_rods):
inst_position, inst_radius = rod_history_unpacker(rod_idx, time_idx)
if not inst_position.shape[1] == inst_radius.shape[0]:
inst_position = 0.5 * (inst_position[..., 1:] + inst_position[..., :-1])
rod_lines[rod_idx] = ax.plot(
inst_position[0], inst_position[2], "r", lw=0.5
)[0]
inst_com = com_history_unpacker(rod_idx, time_idx)
rod_com_lines[rod_idx] = ax.plot(inst_com[0], inst_com[2], "k--", lw=2.0)[0]
rod_scatters[rod_idx] = ax.scatter(
inst_position[0],
inst_position[2],
s=np.pi * (scaling_factor * inst_radius) ** 2,
)
if sphere_flag:
sphere_artists = [None for _ in range(n_visualized_spheres)]
for sphere_idx in range(n_visualized_spheres):
sphere_position, sphere_radius = sphere_history_unpacker(
sphere_idx, time_idx
)
sphere_artists[sphere_idx] = Circle(
(sphere_position[0], sphere_position[2]),
sphere_radius,
color=sphere_cmap(sphere_idx),
)
ax.add_artist(sphere_artists[sphere_idx])
ax.set_aspect("equal")
video_name_2D = folder_name + "2D_xz_" + video_name
with writer.saving(fig, video_name_2D, dpi):
with plt.style.context("seaborn-whitegrid"):
for time_idx in tqdm(range(0, sim_time.shape[0], int(step))):
for rod_idx in range(n_visualized_rods):
inst_position, inst_radius = rod_history_unpacker(
rod_idx, time_idx
)
if not inst_position.shape[1] == inst_radius.shape[0]:
inst_position = 0.5 * (
inst_position[..., 1:] + inst_position[..., :-1]
)
rod_lines[rod_idx].set_xdata(inst_position[0])
rod_lines[rod_idx].set_ydata(inst_position[2])
com = com_history_unpacker(rod_idx, time_idx)
rod_com_lines[rod_idx].set_xdata(com[0])
rod_com_lines[rod_idx].set_ydata(com[2])
rod_scatters[rod_idx].set_offsets(
np.vstack((inst_position[0], inst_position[2])).T
)
rod_scatters[rod_idx].set_sizes(
np.pi * (scaling_factor * inst_radius) ** 2
)
if sphere_flag:
for sphere_idx in range(n_visualized_spheres):
sphere_position, _ = sphere_history_unpacker(
sphere_idx, time_idx
)
sphere_artists[sphere_idx].center = (
sphere_position[0],
sphere_position[2],
)
writer.grab_frame()
# Be a good boy and close figures
# https://stackoverflow.com/a/37451036
# plt.close(fig) alone does not suffice
# See https://github.com/matplotlib/matplotlib/issues/8560/
plt.close(plt.gcf())
def plot_snake_velocity(
plot_params: dict,
period,
filename="slithering_snake_velocity.png",
):
time_per_period = np.array(plot_params["time"]) / period
avg_velocity = np.array(plot_params["avg_velocity"])
[
velocity_in_direction_of_rod,
velocity_in_rod_roll_dir,
_,
_,
] = compute_projected_velocity(plot_params, period)
fig = plt.figure(figsize=(10, 8), frameon=True, dpi=150)
ax = fig.add_subplot(111)
ax.grid(b=True, which="minor", color="k", linestyle="--")
ax.grid(b=True, which="major", color="k", linestyle="-")
ax.plot(
time_per_period[:], velocity_in_direction_of_rod[:, 0], "r-", label="forward"
)
ax.plot(
time_per_period[:],
velocity_in_rod_roll_dir[:, 1],
c=to_rgb("xkcd:bluish"),
label="lateral",
)
ax.plot(time_per_period[:], avg_velocity[:, 2], "k-", label="normal")
fig.legend(prop={"size": 20})
fig.savefig(filename)
def compute_projected_velocity(plot_params: dict, period):
time_per_period = np.array(plot_params["time"]) / period
avg_velocity = | np.array(plot_params["avg_velocity"]) | numpy.array |
import sys
from typing import List, Tuple
import numpy as np
import pandas as pd
def get_valid_gene_info(
genes: List[str],
release=102,
species='homo sapiens'
) -> Tuple[List[str], List[int], List[int], List[int]]:
"""Returns gene locations for all genes in ensembl release 93 --S Markson 3 June 2020
Parameters
----------
genes : A list of genes
genes : List[str] :
genes : List[str] :
genes : List[str] :
genes : List[str] :
genes : List[str] :
genes : List[str] :
genes : List[str] :
genes: List[str] :
Returns
-------
"""
from pyensembl import EnsemblRelease
assembly = EnsemblRelease(release, species=species)
gene_names = []
gene_contigs = []
gene_starts = []
gene_ends = []
for gene in np.intersect1d(genes, [
gene.gene_name for gene in assembly.genes()
if gene.contig.isnumeric() or gene.contig == 'X'
]): # Toss genes not in hg38 release 93
gene_info = assembly.genes_by_name(gene)
gene_info = gene_info[0]
gene_names.append(gene)
gene_contigs.append(gene_info.contig)
gene_starts.append(gene_info.start)
gene_ends.append(gene_info.end)
return gene_names, gene_contigs, gene_starts, gene_ends
def seurat_to_loom(seuratrds, patient_id_column, celltype_column,
complexity_column, loomfile):
"""
Parameters
----------
seuratrds :
patient_id_column :
celltype_column :
complexity_column :
loomfile :
Returns
-------
"""
import rpy2.robjects as robjects
from scipy import sparse
from rpy2.robjects import pandas2ri
import loompy
robjects.r('''
library(Seurat)
seurat2rawandmeta <- function(seuratrds) {
seuratobj <- readRDS(seuratrds)
return(list(genes=rownames(seuratobj@data), metadata=<EMAIL>, data=as.data.frame(summary(seuratobj@data))))
}
''')
seurat_grab = robjects.r['seurat2rawandmeta'](seuratrds)
genes = pd.DataFrame(np.array(seurat_grab.rx2('genes')))
genes.columns = ['gene']
metadata = pandas2ri.rpy2py_dataframe(seurat_grab.rx2('metadata'))
if patient_id_column != 'patient_ID':
metadata['patient_ID'] = metadata[patient_id_column]
metadata.drop(patient_id_column, inplace=True)
if celltype_column != 'cell_type':
metadata['cell_type'] = metadata[celltype_column]
metadata.drop(celltype_column, inplace=True)
if complexity_column != 'complexity':
metadata['complexity'] = metadata[complexity_column]
metadata.drop(complexity_column, inplace=True)
data_df = pandas2ri.rpy2py_dataframe(seurat_grab.rx2('data'))
sparsedata = sparse.coo_matrix(
(data_df['x'], (data_df['i'] - 1, data_df['j'] - 1))).tocsc()
sparsedata.resize((genes.shape[0], metadata.shape[0]))
loompy.create(loomfile, sparsedata, genes.to_dict("list"),
metadata.to_dict("list"))
def intify(df_init):
"""
Parameters
----------
df_init :
Returns
-------
"""
import binascii
df = df_init.copy()
for col in df.columns:
if col.endswith('_ad'):
raise Exception(
"Don't append you column names with _ad! -- Samuel")
df[col] = df[col].apply(
lambda x: int(binascii.hexlify(x.encode()), 16))
while np.sum(df.max() > sys.maxsize) > 0:
for col in df.columns:
if df[col].max() > sys.maxsize:
df[col + '_ad'] = df[col] // sys.maxsize
df[col] = df[col] % sys.maxsize
return df.astype(np.int64)
def deintify(df_init):
"""
Parameters
----------
df_init :
Returns
-------
"""
import binascii
df = df_init.copy()
while np.sum([x.endswith('_ad') for x in df.columns]) > 0:
for col in df.columns:
if col.endswith('_ad') and col + '_ad' not in df.columns:
df[col[0:-3]] = df[col[0:-3]].astype(object)
df[col] = df[col].astype(object)
df[col[0:-3]] = df[col[0:-3]] + sys.maxsize * df[col]
df.drop(col, axis=1, inplace=True)
for col in df.columns:
try:
df[col] = df[col].apply(
lambda x: binascii.unhexlify(hex(x)[2::].encode()).decode())
except:
print(df[col].apply(
lambda x: binascii.unhexlify(hex(x)[2::].encode()).decode()))
raise Exception("whoops")
return df
def recover_meta(db, do_deint=False):
"""
Parameters
----------
db :
do_deint :
(Default value = False)
Returns
-------
"""
colmeta = None
for key in db.ca.keys():
if colmeta is None:
colmeta = pd.DataFrame(db.ca[key])
colmeta.columns = [key]
else:
colmeta[key] = db.ca[key]
if do_deint:
colmeta = deintify(colmeta.astype(np.int64))
rowmeta = None
for key in db.ra.keys():
if rowmeta is None:
rowmeta = pd.DataFrame(db.ra[key])
rowmeta.columns = [key]
else:
rowmeta[key] = db.ra[key]
if do_deint:
rowmeta = deintify(rowmeta.astype(np.int64))
return rowmeta, colmeta
def we_can_pickle_it(thing, thingname: str):
"""
Parameters
----------
thing :
thingname : str :
thingname : str :
thingname : str :
thingname : str :
thingname: str :
Returns
-------
"""
import pickle
with open(thingname, 'wb') as f:
pickle.dump(thing, f, pickle.HIGHEST_PROTOCOL)
def we_can_unpickle_it(thingname: str):
"""
Parameters
----------
thingname : str :
thingname : str :
thingname : str :
thingname : str :
thingname: str :
Returns
-------
"""
import pickle
with open(thingname, 'rb') as f:
thing = pickle.load(f)
return thing
def get_alpha_concave_hull_polygon(xcoords, ycoords, alpha=0.1, buffer=1):
"""Much credit to https://thehumangeo.wordpress.com/2014/05/12/drawing-boundaries-in-python/
Parameters
----------
xcoords :
ycoords :
alpha :
(Default value = 0.1)
buffer :
(Default value = 1)
Returns
-------
"""
from shapely.ops import cascaded_union, polygonize
import shapely.geometry as geometry
from scipy.spatial import Delaunay
import numpy as np
import math
def alpha_shape(points, alpha):
"""Compute the alpha shape (concave hull) of a set
of points.
Parameters
----------
points :
Iterable container of points.
alpha :
alpha value to influence the
gooeyness of the border. Smaller numbers
don't fall inward as much as larger numbers.
Too large, and you lose everything!
Returns
-------
"""
if len(points) < 4:
# When you have a triangle, there is no sense
# in computing an alpha shape.
return geometry.MultiPoint(list(points)).convex_hull
def add_edge(edges, edge_points, coords, i, j):
"""Add a line between the i-th and j-th points,
if not in the list already
Parameters
----------
edges :
edge_points :
coords :
i :
j :
Returns
-------
"""
if (i, j) in edges or (j, i) in edges:
# already added
return
edges.add((i, j))
edge_points.append(coords[[i, j]])
coords = np.array([point.coords[0] for point in points])
tri = Delaunay(coords)
edges = set()
edge_points = []
# loop over triangles:
# ia, ib, ic = indices of corner points of the
# triangle
for ia, ib, ic in tri.vertices:
pa = coords[ia]
pb = coords[ib]
pc = coords[ic]
# Lengths of sides of triangle
a = math.sqrt((pa[0] - pb[0])**2 + (pa[1] - pb[1])**2)
b = math.sqrt((pb[0] - pc[0])**2 + (pb[1] - pc[1])**2)
c = math.sqrt((pc[0] - pa[0])**2 + (pc[1] - pa[1])**2)
# Semiperimeter of triangle
s = (a + b + c) / 2.0
# Area of triangle by Heron's formula
area = math.sqrt(s * (s - a) * (s - b) * (s - c))
circum_r = a * b * c / (4.0 * area)
# Here's the radius filter.
#print circum_r
if circum_r < 1.0 / alpha:
add_edge(edges, edge_points, coords, ia, ib)
add_edge(edges, edge_points, coords, ib, ic)
add_edge(edges, edge_points, coords, ic, ia)
m = geometry.MultiLineString(edge_points)
triangles = list(polygonize(m))
return cascaded_union(triangles), edge_points
points = []
for x, y in zip(xcoords, ycoords):
points.append(geometry.shape({'type': 'Point', 'coordinates': [x, y]}))
concave_hull, edge_points = alpha_shape(points, alpha=alpha)
return concave_hull.buffer(buffer)
def get_outlier_removal_mask(xcoords, ycoords, nth_neighbor=10, quantile=.9):
"""
Parameters
----------
xcoords :
ycoords :
nth_neighbor :
(Default value = 10)
quantile :
(Default value = .9)
Returns
-------
"""
from scipy.spatial.distance import pdist, squareform
D = squareform(pdist(np.vstack((xcoords, ycoords)).T))
distances = D[np.argsort(D, axis=0)[nth_neighbor - 1, :], 0]
return distances <= np.quantile(distances, quantile)
def cohensd(g1, g2):
"""
Returns Cohen's D for the effect size of group 1 values (g1) over group 2 values (g2).
Parameters
----------
g1 : group 1 values (list or numpy vector)
g2 : group 2 values (list or numpy vector)
Returns
-------
(mean(g1) - mean(g2) )/s, where s is the pooled standard deviation of the two groups with Bessel's correction
"""
n1 = len(g1)
n2 = len(g2)
s1 = np.std(g1, ddof=1)
s2 = np.std(g2, ddof=1)
s = np.sqrt(((n1 - 1) * s1 * s1 + (n2 - 1) * s2 * s2) / (n1 + n2 - 2))
return (np.mean(g1) - np.mean(g2)) / s
def phi_coefficient(contingency_table):
"""
Returns the phi-coefficient for a contingency table.
Paramenters
-----------
contingency_table : contingency table, identical in format to scipy.stats.fisher_exact
Returns
-------
phi coefficient
"""
table1 = contingency_table[0]
table2 = contingency_table[1]
table = np.vstack([table1, table2])
phitop = (table1[0] * table2[1] - table1[1] * table2[0])
phibottom = np.sqrt((table2[1]+table2[0])*\
(table1[1]+table1[0])*\
(table1[0]+table2[0])*\
(table2[1]+table1[1]))
phi = phitop / phibottom
return phi
def get_igraph_from_adjacency(adjacency, directed=None):
"""This is taken from scanpy._utils.__init__.py as of 12 August 2021
Get igraph graph from adjacency matrix."""
import igraph as ig
sources, targets = adjacency.nonzero()
weights = adjacency[sources, targets]
if isinstance(weights, np.matrix):
weights = weights.A1
g = ig.Graph(directed=directed)
g.add_vertices(adjacency.shape[0]) # this adds adjacency.shape[0] vertices
g.add_edges(list(zip(sources, targets)))
try:
g.es['weight'] = weights
except KeyError:
pass
if g.vcount() != adjacency.shape[0]:
logg.warning(f'The constructed graph has only {g.vcount()} nodes. '
'Your adjacency matrix contained redundant nodes.')
return g
def convert_10x_h5(path_10x_h5,
output_file,
labelkey=None,
label='',
genes_as_ca=[],
gene_whitelist=None,
output_type='loom'):
import cellranger.matrix as cr_matrix
import loompy
output_type = output_file.split('.')[-1]
if output_type not in ['loom', 'pkl']:
raise Exception(
"output_file must be have suffix loom or pkl, denoting an output type of loom of pickle respectively"
)
filtered_feature_bc_matrix = cr_matrix.CountMatrix.load_h5_file(
path_10x_h5)
id2feature = {
val: key
for key, val in filtered_feature_bc_matrix.feature_ids_map.items()
}
features = [
id2feature[x].decode("utf-8")
for x in range(filtered_feature_bc_matrix.features_dim)
]
features_common_names = filtered_feature_bc_matrix.feature_ref.get_feature_names(
)
barcodes = filtered_feature_bc_matrix.bcs.astype(str)
ca = {'cellname': barcodes}
if labelkey is not None:
ca[labelkey] = [label] * len(barcodes)
m = filtered_feature_bc_matrix.m
if gene_whitelist is not None:
if len(gene_whitelist) > 0:
mask = np.isin(features, gene_whitelist)
m = m[mask, :]
features = list(np.array(features)[mask])
features_common_names = list(np.array(features_common_names)[mask])
if type(genes_as_ca) == str:
genes_as_ca = [genes_as_ca]
else:
genes_as_ca = list(genes_as_ca)
if len(genes_as_ca) > 0:
mask = np.isin(features, genes_as_ca)
if len(genes_as_ca) != mask.sum():
raise Exception(
"Improper mapping of row attributes; perhaps gene of interest not in loom.ra[\'gene\']?"
)
for gene in genes_as_ca:
submask = np.array(features) == gene
if np.sum(submask) > 1:
raise Exception("Two or more features with this name")
elif np.sum(submask) == 0:
raise Exception("No features with this name")
ca[gene] = list(m[submask, :].toarray()[0])
m = m[~mask, :]
features = list(np.array(features)[~mask])
features_common_names = list(np.array(features_common_names)[~mask])
ra = {'gene': features, 'gene_common_name': features_common_names}
if output_type == 'loom':
loompy.create(output_file, m, ra, ca)
if output_type == 'pkl':
if gene_whitelist is None:
raise Exception(
"pkl output intended only for saving a small subsetted geneset of interest. Please select a whitelist before saving as dataframe pkl."
)
mask = np.isin(features, gene_whitelist)
features = np.array(features)[mask]
features_common_names = np.array(features_common_names)[mask]
df = pd.DataFrame(m[mask, :].toarray())
df.index = features
if labelkey is not None:
df.columns = [labelkey + '_' + x for x in barcodes]
else:
df.columns = barcodes
df.to_pickle(output_file)
def create_split_exon_gtf(input_gtf, output_gtf, gene):
gtf = pd.read_table(input_gtf, header=None, comment='#')
gtf.columns = [
'seqname', 'source', 'feature', 'start', 'end', 'score', 'strand',
'frame', 'attribute'
]
gtf = gtf[gtf['feature'] == 'exon']
if type(gene) == str:
mask = gtf['attribute'].apply(
lambda x: 'gene_name "{}"'.format(gene) in x)
elif type(gene) in [list, tuple, np.array]:
mask = np.array([False] * len(gtf))
for g in gene:
mask = mask | gtf['attribute'].apply(
lambda x: 'gene_name "{}"'.format(g) in x)
gtf_unchanged = gtf[~mask]
gtf_changed = gtf[mask]
def append_exon_number_to_id_and_name(attribute):
exon_number = attribute.split('exon_number')[1].split(';')[0].split(
'\"')[-2]
old_gene_id_str = 'gene_id' + attribute.split('gene_id')[1].split(
';')[0]
new_gene_id_str = '\"'.join(
old_gene_id_str.split('\"')[0:-1]) + '-exon' + exon_number + '\"'
attribute = attribute.replace(old_gene_id_str, new_gene_id_str)
old_gene_name_str = 'gene_name' + attribute.split(
'gene_name')[1].split(';')[0]
new_gene_name_str = '\"'.join(
old_gene_name_str.split('\"')[0:-1]) + '-exon' + exon_number + '\"'
attribute = attribute.replace(old_gene_name_str, new_gene_name_str)
old_transcript_id_str = 'transcript_id' + attribute.split(
'transcript_id')[1].split(';')[0]
new_transcript_id_str = '\"'.join(
old_transcript_id_str.split('\"')
[0:-1]) + '-exon' + exon_number + '\"'
attribute = attribute.replace(old_transcript_id_str,
new_transcript_id_str)
old_transcript_name_str = 'transcript_name' + attribute.split(
'transcript_name')[1].split(';')[0]
new_transcript_name_str = '\"'.join(
old_transcript_name_str.split('\"')
[0:-1]) + '-exon' + exon_number + '\"'
attribute = attribute.replace(old_transcript_name_str,
new_transcript_name_str)
if 'ccds_id' in attribute:
old_ccds_id_str = 'ccds_id' + attribute.split('ccds_id')[1].split(
';')[0]
new_ccds_id_str = '\"'.join(old_ccds_id_str.split('\"')
[0:-1]) + '-exon' + exon_number + '\"'
attribute = attribute.replace(old_ccds_id_str, new_ccds_id_str)
return attribute
gtf_changed['attribute'] = gtf_changed['attribute'].apply(
append_exon_number_to_id_and_name)
gtf = pd.concat([gtf_changed, gtf_unchanged])
gtf.to_csv(output_gtf, sep='\t', index=False, header=None)
def get_umap_from_matrix(X,
random_state=17,
verbose=True,
min_dist=0.001,
n_neighbors=20,
metric='correlation'):
import umap
reducer = umap.UMAP(random_state=random_state,
verbose=verbose,
min_dist=min_dist,
n_neighbors=n_neighbors,
metric=metric)
return reducer.fit_transform(X)
def convert_h5ad(h5ad,
output_loom,
convert_obsm=True,
convert_varm=True,
convert_uns=True,
convert_layers=True):
import scanpy
import loompy
h5ad = scanpy.read_h5ad(h5ad)
ra = {'gene': np.array(h5ad.var.index)}
for col in h5ad.var.columns:
if col == 'gene':
raise Exception(
"var column of h5ad is \"gene\". This conflicts with panopticon loom format. You must rename before converting."
)
else:
ra[col] = np.array(h5ad.var[col].values)
ca = {'cellname': np.array(h5ad.obs.index)}
for col in h5ad.obs.columns:
if col == 'cellname':
raise Exception(
"obs column of h5ad is \"cellname\". This conflicts with panopticon loom format. You must rename before converting."
)
else:
ca[col] = np.array(h5ad.obs[col].values)
if convert_obsm:
for obsm_key in h5ad.obsm.keys():
for i in range(h5ad.obsm[obsm_key].shape[1]):
ca_key = "{}_{}".format(
obsm_key,
i + 1) # one added so that these are 1-indexed by default
if ca_key in ca.keys():
raise Exception(
"key\"{}\" already present as column attribute key. Please rename to avoid."
)
else:
ca[ca_key] = h5ad.obsm[obsm_key][:, i]
if convert_varm:
for varm_key in h5ad.varm.keys():
for i in range(h5ad.varm[varm_key].shape[1]):
ra_key = "{}_{}".format(
varm_key,
i + 1) # one added so that these are 1-indexed by default
if ra_key in ra.keys():
raise Exception(
"key\"{}\" already present as row attribute key. Please rename to avoid."
)
else:
ra[ra_key] = h5ad.varm[varm_key][:, i]
loompy.create(output_loom, h5ad.X.T, ra, ca)
if convert_uns:
loom = loompy.connect(output_loom)
for uns_key in h5ad.uns.keys():
loom.attrs[uns_key] = h5ad.uns[uns_key]
loom.close()
if convert_layers:
loom = loompy.connect(output_loom)
for layer_key in h5ad.layers.keys():
loom.layers[layer_key] = h5ad.layers[key].T
loom.close()
def get_UMI_curve_from_10x_h5(path_10x_h5, save_to_file=None):
import cellranger.matrix as cr_matrix
import matplotlib.pyplot as plt
bc_matrix = cr_matrix.CountMatrix.load_h5_file(path_10x_h5)
fig, ax = plt.subplots(figsize=(5, 5))
ax.plot(np.sort(bc_matrix.get_counts_per_bc())[::-1])
ax.set_title('UMI counts per barcode, sorted')
ax.set_ylabel('UMI counts')
ax.set_xlabel('cell rank, UMI counts (most to fewest)')
ax.set_xscale('log')
ax.set_yscale('log')
if save_to_file is None:
plt.show()
else:
plt.savefig(save_to_file)
plt.cla()
def get_dsb_normalization(cell_antibody_counts,
empty_droplet_antibody_counts,
use_isotype_control=True,
denoise_counts=True,
isotype_control_name_vec=None,
define_pseudocount=False,
pseudocount_use=10,
quantile_clipping=False,
quantile_clip=[0.001, 0.9995],
return_stats=False):
import rpy2.robjects as robjects
import rpy2.robjects.numpy2ri
if isotype_control_name_vec is None:
isotype_control_name_vec = robjects.r("NULL")
if (pseudocount_use != 10) and (not define_pseudocount):
raise Exception(
"\"define_pseudocount\" must be set to True to use pseudocount_use"
)
rpy2.robjects.numpy2ri.activate()
robjects.r('''
library(mclust)
library(dsb)
dsb <- function(cells,
empty,
use.isotype.control=TRUE,
denoise.counts=TRUE,
isotype.control.name.vec = NULL,
define.pseudocount = FALSE,
pseudocount.use = 10,
quantile.clipping = FALSE,
quantile.clip = c(0.001, 0.9995),
return.stats = FALSE){
DSBNormalizeProtein(cells, empty, use.isotype.control=use.isotype.control,
isotype.control.name.vec = isotype.control.name.vec,
denoise.counts=denoise.counts,
define.pseudocount = define.pseudocount,
pseudocount.use = pseudocount.use,
quantile.clipping = quantile.clipping,
quantile.clip = quantile.clip,
return.stats = return.stats)
}
''')
dsb = robjects.r['dsb']
return dsb(cell_antibody_counts,
empty_droplet_antibody_counts,
use_isotype_control=use_isotype_control,
denoise_counts=denoise_counts,
isotype_control_name_vec=isotype_control_name_vec,
define_pseudocount=define_pseudocount,
pseudocount_use=pseudocount_use,
quantile_clipping=quantile_clipping,
quantile_clip=quantile_clip,
return_stats=return_stats)
def get_cellphonedb_compatible_counts_and_meta(loom,
layername,
celltype_ca,
gene_ra='gene',
cellname_ca='cellname',
return_df=False,
output_prefix=None,
mouse_to_human=False):
if output_prefix is None and not return_df:
raise Exception(
"either output_prefix must be specified, or return_df must be True"
)
counts = pd.DataFrame(loom[layername][:, :])
counts.columns = loom.ca[cellname_ca]
#counts.insert(0, 'Gene', np.array([x.upper() for x in loom.ra[gene_ra]]))
genes = loom.ra[gene_ra]
if mouse_to_human:
from pybiomart import Server
server = Server(host="http://www.ensembl.org")
mouse_dataset = (server.marts['ENSEMBL_MART_ENSEMBL'].
datasets['mmusculus_gene_ensembl'])
mouse_data = mouse_dataset.query(
attributes=['ensembl_gene_id', 'external_gene_name'])
mouse_data['Gene upper'] = mouse_data['Gene name'].apply(
lambda x: str(x).upper())
human_dataset = (server.marts['ENSEMBL_MART_ENSEMBL'].
datasets['hsapiens_gene_ensembl'])
human_data = human_dataset.query(
attributes=['ensembl_gene_id', 'external_gene_name'])
conversion_dict = pd.merge(
mouse_data, human_data, left_on='Gene upper',
right_on='Gene name').set_index(
'Gene stable ID_x')['Gene stable ID_y'].to_dict()
convertible_mask = np.array(
[x in conversion_dict.keys() for x in genes])
genes = [
conversion_dict[x] if x in conversion_dict.keys() else np.nan
for x in genes
]
counts.insert(0, 'Gene', genes)
if mouse_to_human:
counts = counts.iloc[convertible_mask, :]
counts = counts.groupby('Gene').first().reset_index()
meta = pd.DataFrame(loom.ca[cellname_ca])
meta.columns = ['Cell']
meta['cell_type'] = loom.ca[celltype_ca]
if output_prefix is not None:
counts.to_csv(output_prefix + '_counts.txt', sep='\t', index=False)
meta.to_csv(output_prefix + '_meta.txt', sep='\t', index=False)
command = 'cellphonedb method statistical_analysis {0}_meta.txt {0}_counts.txt'.format(
output_prefix)
print("Run cellphonedb on command line with \"{}\"".format(command))
elif return_df:
return meta, counts
def create_gsea_txt_and_cls(loom,
layername,
output_prefix,
phenotypes,
cellmask=None,
gene_ra='gene',
cellname_ca='cellname'):
import os
if cellmask is None:
cellmask = np.array([True] * loom.shape[1])
if type(phenotypes) == str:
phenotypes = loom.ca[phenotypes]
if len(phenotypes) != cellmask.sum():
raise Exception(
"length of phenotypes vector must be equal to number of samples (cells)"
)
txt = pd.DataFrame(loom.ra[gene_ra])
txt.columns = ['NAME']
txt['DESCRIPTION'] = 'na'
#txt = pd.concat([txt,pd.DataFrame(loom[layername][:,cellmask])],axis=1)
#txt.columns = ['NAME','DESCRIPTION'] + list(loom.ca[cellname_ca][cellmask])
#txt.to_csv(output_prefix+'.txt',index=False,sep='\t')
total = cellmask.sum()
nphenotypes = len(np.unique(phenotypes))
outcls = output_prefix + '.cls'
if os.path.exists(outcls):
os.system("rm {}".format(outcls))
#raise Exception("cls file already present--cannot overwrite")
line1 = "{} {} 1".format(total, nphenotypes)
line2 = '# ' + ' '.join(np.unique(phenotypes))
phenotype2index = {
phenotype: i
for i, phenotype in enumerate(np.unique(phenotypes))
}
#print(phenotype2index)
#print([phenotype2index[x] for x in phenotypes])
line3 = ' '.join([str(phenotype2index[x]) for x in phenotypes])
for line in [line1, line2, line3]:
os.system('echo \"{}\">>{}'.format(line, outcls))
def get_cross_column_attribute_heatmap(loom,
ca1,
ca2,
normalization_axis=None):
#if type(normalization_axis) == list:
# outdfs = []
# for axis in normalization_axis:
# outdfs.append(get_cross_column_attribute_heatmap(loom, ca1, ca2, normalization_axis=axis))
# return outdfs
df = pd.DataFrame(loom.ca[ca1], copy=True)
df.columns = [ca1]
df[ca2] = loom.ca[ca2]
df = pd.DataFrame(df.groupby(ca1, )[ca2].value_counts())
df.columns = ['counts']
dfs = []
for i, df_group in df.reset_index().groupby(ca1):
dfs.append(
df_group.rename(columns={
'counts': 'counts_' + i
}).set_index(ca2)['counts_' + i])
outdf = pd.concat(dfs, axis=1)
if normalization_axis is None:
return outdf
elif normalization_axis == 0:
return np.divide(outdf, outdf.sum(axis=0).values)
elif normalization_axis == 1:
return np.divide(outdf.T, outdf.sum(axis=1).values).T
else:
raise Exception("normalization axis must be one of \"None\", 0, or 1")
def get_complement_contigency_tables(df):
if type(df) != pd.core.frame.DataFrame:
raise Exception("pandas dataframe expected input")
complement_contigency_table_dict = {}
for col in df.columns:
complement_contigency_table_dict[col] = {}
for index in df.index.values:
a = df.loc[index][col].sum()
b = df.loc[index][[x for x in df.columns if x != col]].sum()
c = df.loc[[x for x in df.index if x != index]][col].sum()
d = np.sum(df.loc[[x for x in df.index if x != index
]][[x for x in df.columns if x != col]].sum())
complement_contigency_table_dict[col][index] = [[a, b], [c, d]]
return complement_contigency_table_dict
def get_cluster_differential_expression_heatmap_df(loom,
layer,
clusteringlevel,
diffex={},
gene_name='gene',
cell_name='cellname'):
"""
Returns
-------
"""
from panopticon.analysis import get_cluster_differential_expression
import seaborn as sns
import pandas as pd
clusteredmask = []
for cluster in np.unique(loom.ca[clusteringlevel]):
mask = loom.ca[clusteringlevel] == cluster
if mask.sum() > 2:
clusteredmask.append(np.where(mask)[0])
clusteredmask = np.hstack(clusteredmask)
allgenes = []
allgeneindices = []
rawX = []
clusters = [
x for x in np.unique(loom.ca[clusteringlevel]) if x in diffex.keys()
]
for cluster in clusters:
mask = loom.ca[clusteringlevel] == cluster
genes = diffex[cluster][~diffex[cluster]['gene'].isin(allgenes)].query(
'MeanExpr1 > MeanExpr2').query('FracExpr2<.9').head(
10)['gene'].values
genemask = | np.isin(loom.ra['gene'], genes) | numpy.isin |
import subprocess as _subprocess
import numpy as _np
import os as _os
_full = lambda x: _os.path.sep.join(x)
default_track_version = 'trackcpp'
_commom_keys = ['flat_filename','energy','harmonic_number',
'cavity_state','radiation_state','vchamber_state']
def _prepare_args(dynap_type, mand_keys, **kwargs):
args = [kwargs.pop('track_version',default_track_version),dynap_type]
for key in mand_keys:
args.append(str(kwargs.pop(key)))
if kwargs:
print('Keys : '+', '.join(sorted(kwargs.keys()))+ ' were not used.')
return args
# -- dynap_xy --
def load_dynap_xy(path, var_plane='x'):
# Carrego os dados:
nr_header_lines = 13
fname = _full([path, 'dynap_xy_out.txt'])
turn,plane,x,y = _np.loadtxt(fname,skiprows=nr_header_lines,usecols=(1,3,5,6),unpack=True)
# Identifico quantos x e y existem:
nx = len(_np.unique(x))
ny = x.shape[0]//nx
# Redimensiono para que todos os x iguais fiquem na mesma coluna:
# o flipud é usado porque y é decrescente:
fun = lambda x: _np.flipud(x.reshape((nx,ny)).T)
turn, plane, x, y = fun(turn), fun(plane), fun(x), fun(y)
dados = dict(x=x,y=y,plane=plane,turn=turn)
# E identifico a borda da DA:
if var_plane =='y':
lost = plane != 0
ind = lost.argmax(axis=0)
# Caso a abertura vertical seja maior que o espaço calculado:
anyloss = lost.any(axis=0)
ind = ind*anyloss + (~anyloss)*(y.shape[0]-1)
# por fim, defino a DA:
h = x[0]
v = y[:,0][ind]
aper = _np.vstack([h,v])
area = _np.trapz(v,x=h)
else:
idx = x > 0
# para x negativo:
x_mi = _np.fliplr(x[~idx].reshape((ny,-1)))
plane_mi = _np.fliplr(plane[~idx].reshape((ny,-1)))
lost = plane_mi != 0
ind_neg = lost.argmax(axis=1)
# Caso a abertura horizontal seja maior que o espaço calculado:
anyloss = lost.any(axis=1)
ind_neg = ind_neg*anyloss + (~anyloss)*(x_mi.shape[1]-1)
h_neg = x_mi[0][ind_neg]
v_neg = y[:,0]
aper_neg = _np.vstack([h_neg,v_neg])
area_neg = _np.trapz(h_neg,x=v_neg)
#para x positivo
x_ma = x[idx].reshape((ny,-1))
plane_ma = plane[idx].reshape((ny,-1))
lost = plane_ma != 0
ind_pos = lost.argmax(axis=1)
# Caso a abertura horizontal seja maior que o espaço calculado:
anyloss = lost.any(axis=1)
ind_pos = ind_pos*anyloss + (~anyloss)*(x_ma.shape[1]-1)
# por fim, defino a DA em x positivo:
h_pos = x_ma[0][ind_pos]
v_pos = y[:,0]
aper_pos = _np.fliplr(_np.vstack([h_pos,v_pos]))
area_pos = _np.trapz(h_pos,x=v_pos)
aper = _np.hstack([aper_neg,aper_pos])
area = - | _np.trapz(aper[0],x=aper[1]) | numpy.trapz |
import os
import sys
import time
import numpy as np
import scipy.interpolate as intpl
from PyQt5.QtCore import QObject, QThread, pyqtSlot, pyqtSignal
path = os.path.realpath('../')
if not path in sys.path:
sys.path.insert(0, path)
from pyNiDAQ import DAQ
import ipdb
import threading
class DcScan(QThread):
'''
---------------------------------------------------------
tw = DcScan(laser = <class>,
wavemeter = <class>,
*args, **kwargs)
Class for DC Transmission characterization of nanodevices.
For a full description of the algorithm , go please
check the alogorigram in the Docs
----------------------------------------------------------
Args:
laser: laser object to control the equipement (c.f.
pyNFLaser package)
wavemeter: wavemeter object to controll the
equipement (c.f. pyWavemeter package)
If no wavemeter if passed, the class will
work without it and will trust the
wavelength provided by the laser internal
detector
param: dictionary with 'laserParam',
'daqParam' and 'wlmParam' keys
laserParam keys (cf laser properties):
- scan_speed
- scan_limit
wlmParam keys (cf wavemeter properties):
- channel
- exposure
daqParam keys:
- read_ch
- write_ch
- dev
Methods:
self.run: See method doc string
pyQtSlot emissions:
self._DCscan <tupple>:
[0] Code for where the program is in
the algorithm:
-1 : no scan / end of scan
0 : setting up the start of scan
1 : scanning
2: return wavemeter of begining of scan
3: return wavemeter at end of scan
[1] Laser current wavelength
[2] Progress bar current %
[3] Blinking State
------------------------------------------------------
<NAME> - NIST - 2018
------------------------------------------------------
'''
__author__ = "<NAME>"
__copyright__ = "Copyright 2018, NIST"
__credits__ = ["<NAME>",
"<NAME>",
"<NAME>"]
__license__ = "GPL"
__version__ = "1.0.0"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__status__ = "Development"
_DCscan = pyqtSignal(tuple)
def __init__(self, **kwargs):
QThread.__init__(self)
self.laser = kwargs.get('laser', None)
self.wavemeter = kwargs.get('wavemeter',None)
self.param = kwargs.get('param',None)
self._debug = kwargs.get('debug', False)
# Misc
self._is_Running = False
def run(self):
# -- Fetch main parameters --
laser = self.laser
param = self.param
wavemeter = self.wavemeter
scan_limit = laser.scan_limit
if self._debug:
print("Scan limit: {}nm - {}nm".format(scan_limit[0], scan_limit[1]))
# -- More user friendly notation --
daqParam = param['daqParam']
wlmParam = param.get('wlmParam', None)
# -- Wait until lbd start of scan --
while True:
lbd = laser.lbd
changing = laser._is_changing_lbd
delta = lbd - scan_limit[0]
cdt = not(changing) and np.abs(delta)<0.2
if self._debug:
print('-'*30)
print('Is Changing: {}'.format(changing))
print('Wavelength : {}nm'.format(lbd))
print('Wavelength Difference: {:.3f}nm'.format(delta))
print("Stop loop: {}".format(cdt))
print('-'*30)
if cdt:
break
time.sleep(0.25)
# -- Wait for stabilization --
time.sleep(2)
# -- start the wavemeter if connected --
if wavemeter:
# check connect
if not wavemeter.connected:
if self._debug:
wavemeter.connected = 'show'
else:
wavemeter.connected = 'hide'
time.sleep(5)
wavemeter.pulsemode = False
wavemeter.widemode = False
wavemeter.fastmode = False
wavemeter.channel = wlmParam['channel']
wavemeter.exposure = 'auto'
# -- Get First wavelength of the scan --
if wavemeter:
# get it through the wavemeter
wavemeter.acquire = True
print('Getting wavemeter')
time.sleep(2.5)
lbd_start = wavemeter.lbd
wavemeter.acquire = False
print("Wavelength end {:.3f}".format(lbd_start))
self._DCscan.emit((2, lbd_start, 0, None))
if self._debug:
print('Wavemeter start scan: {}nm'.format(lbd_start))
else:
self._DCscan.emit((2, laser.lbd, 0, None))
# -- Setup DAQ for acquisition and create the reading
#. THread --
scan_time = np.diff(scan_limit)[0]/laser.scan_speed
if self._debug:
print('Scan time: {}s'.format(scan_time))
daq = DAQ(t_end = scan_time, dev = daqParam['dev'])
daq.SetupRead(read_ch=daqParam['read_ch'])
self._done_get_data = False
def _GetData():
self.time_start_daq = time.time()
self.data = daq.readtask.read(number_of_samples_per_channel=int(daq.Npts))
self.time_stop_daq = time.time()
self._done_get_data = True
daq.readtask.stop()
daq.readtask.close()
self.threadDAQdata = threading.Thread(target=_GetData, args=())
self.threadDAQdata.daemon = True
# -- Fetch wavelength and progress of scan --
lbd_probe = []
time_probe = []
t_step = scan_time/1000
if self._debug:
print('Begining of Scan: '+ '-'*30)
print('Time sleeping between steps: {}'.format(t_step))
# -- Start laser scan --
self._is_Running = True
laser.scan = True
self.threadDAQdata.start()
while laser._is_scaning:
lbd_scan = laser._lbdscan
lbd_probe.append(lbd_scan)
time_probe.append(time.time())
prgs = np.floor(100*(lbd_scan-scan_limit[0])/np.diff(scan_limit)[0])
if self._debug:
print('Wavelength: {}nm'.format(lbd_scan))
print('Progress: {}%'.format(prgs))
self._DCscan.emit((1, lbd_scan, prgs, None))
time.sleep(t_step)
if self._debug:
print('End of Scan: '+ '-'*30)
# -- Scan Finished, get Data --
while not self._done_get_data:
if self._debug:
print('Waiting for daq')
time.sleep(0.1)
# -- Get Last wavelength of the scan --
if wavemeter:
wavemeter.acquire = True
time.sleep(2)
lbd_end = wavemeter.lbd
wavemeter.acquire = False
self._DCscan.emit((3, lbd_end, 100, None))
if self._debug:
print('Wavemeter end scan: {}nm'.format(lbd_end))
else:
self._DCscan.emit((3, laser.lbd, 100, None))
# -- Processing of the Data --
# -----------------------------------------------------------------
# -- Better notation of Data --
data = | np.array(self.data) | numpy.array |
"""
:mod:`operalib.ridge` implements Operator-valued Naive Online
Regularised Risk Minimization Algorithm (ONORMA)
"""
# Author: <NAME> <<EMAIL>> with help from
# the scikit-learn community.
# License: MIT
from numpy import eye, empty, ravel, vstack, zeros
from sklearn.base import BaseEstimator, RegressorMixin
from sklearn.metrics.pairwise import rbf_kernel
from sklearn.utils import check_X_y, check_array
from sklearn.utils.validation import check_is_fitted
from .metrics import first_periodic_kernel
from .kernels import DecomposableKernel, DotProductKernel
from .learningrate import Constant, InvScaling
# When adding a new kernel, update this table and the _get_kernel_map
# method
PAIRWISE_KERNEL_FUNCTIONS = {
'DGauss': DecomposableKernel,
'DotProduct': DotProductKernel,
'DPeriodic': DecomposableKernel,
'DotProduct': DotProductKernel}
# When adding a new learning rate, update this table and the _get_learning_rate
# method
LEARNING_RATE_FUNCTIONS = {
'constant': Constant,
'invscaling': InvScaling}
class ONORMA(BaseEstimator, RegressorMixin):
"""Operator-valued Naive Online Regularised Risk
Minimization Algorithm .
Operator-Valued kernel Operator-valued Naive Online Regularised Risk
Minimization Algorithm (ONORMA) extends the standard kernel-based
online learning algorithm NORMA from scalar-valued to operator-valued
setting. The truncation is currently not implemented.
Attributes
----------
coef_ : array, shape = [n_features] or [n_targets, n_features]
Weight vector(s) in kernel space
linop_ : callable
Callable which associate to the training points X the Gram matrix (the
Gram matrix being a LinearOperator)
A_ : array, shape = [n_targets, n_targets]
Set when Linear operator used by the decomposable kernel is default or
None.
T_ : integer
Total number of iterations
n_ : integer
Total number of datapoints
p_ : integer
Dimensionality of the outputs
References
----------
* Audiffren, Julien, and <NAME>.
"Online learning with multiple operator-valued kernels."
arXiv preprint arXiv:1311.0222 (2013).
* Kivinen, Jyrki, <NAME>, and <NAME>.
"Online learning with kernels."
IEEE transactions on signal processing 52.8 (2004): 2165-2176.
See also
--------
sklearn.Ridge
Linear ridge regression.
sklearn.KernelRidge
Kernel ridge regression.
sklearn.SVR
Support Vector Regression implemented using libsvm.
Examples
--------
>>> import operalib as ovk
>>> import numpy as np
>>> n_samples, n_features, n_targets = 10, 5, 5
>>> rng = np.random.RandomState(0)
>>> y = rng.randn(n_samples, n_targets)
>>> X = rng.randn(n_samples, n_features)
>>> clf = ovk.ONORMA('DGauss', lbda=1.)
>>> clf.fit(X, y) # doctest: +NORMALIZE_WHITESPACE +ELLIPSIS
ONORMA(A=None, T=None, eta=1.0, gamma=None, kernel='DGauss', lbda=1.0,
learning_rate='invscaling', mu=0.2, power=0.5, random_state=0,
shuffle=True, truncation=None)
"""
def __init__(self, kernel='DGauss', lbda=1e-5,
T=None, A=None, learning_rate='invscaling', truncation=None,
gamma=None, mu=0.2, eta=1., power=.5,
shuffle=True, random_state=0):
"""Initialize ONORMA.
Parameters
----------
kernel : {string, callable}, default='DGauss'
Kernel mapping used internally. A callable should accept two
arguments, and should return a LinearOperator.
lbda : {float}, default=1e-5
Small positive values of lbda improve the conditioning of the
problem and reduce the variance of the estimates. Lbda corresponds
to ``(2*C)^-1`` in other linear models such as LogisticRegression
or LinearSVC.
T : {integer}, default=None
Number of iterations.
A : {LinearOperator, array-like, sparse matrix}, default=None
Linear operator used by the decomposable kernel. If default is
None, wich is set to identity matrix of size y.shape[1] when
fitting.
mu : {array, LinearOperator}, shape = [n_targets, n_targets]
Tradeoff between shared and independant components in the Dot
Product kernel.
learning_rate : {Callable}
Learning rate, a function that return the step size at given step
truncation : learning_rate : {Callable}
TODO
gamma : {float}, default=None.
Gamma parameter for the Decomposable Gaussian kernel.
Ignored by other kernels.
"""
self.kernel = kernel
self.lbda = lbda
self.T = T
self.A = A
self.learning_rate = learning_rate
self.truncation = truncation
self.gamma = gamma
self.mu = mu
self.shuffle = shuffle
self.random_state = random_state
self.eta = eta
self.power = power
def _validate_params(self):
# check on self.kernel is performed in method __get_kernel
if self.lbda < 0:
raise ValueError('lbda must be a positive scalar')
if self.mu < 0 or self.mu > 1:
raise ValueError('mu must be a scalar between 0. and 1.')
if self.T is not None:
if self.T <= 0:
raise ValueError('T must be a positive integer')
# if self.A < 0: # Check whether A is S PD would be really expensive
# raise ValueError('A must be a symmetric positive operator')
if self.gamma is not None:
if self.gamma < 0:
raise ValueError('gamma must be positive or default (None)')
def _default_decomposable_op(self, y):
if self.A is not None:
return self.A
elif y.ndim == 2:
return eye(y.shape[1])
else:
return eye(1)
def _get_kernel_map(self, X, y):
# When adding a new kernel, update this table and the _get_kernel_map
# method
if callable(self.kernel):
ov_kernel = self.kernel
elif type(self.kernel) is str:
# 1) check string and assign the right parameters
if self.kernel == 'DGauss':
self.A_ = self._default_decomposable_op(y)
kernel_params = {'A': self.A_, 'scalar_kernel': rbf_kernel,
'scalar_kernel_params': {'gamma': self.gamma}}
elif self.kernel == 'DotProduct':
kernel_params = {'mu': self.mu, 'p': y.shape[1]}
elif self.kernel == 'DPeriodic':
self.A_ = self._default_decomposable_op(y)
self.period_ = self._default_period(X, y)
kernel_params = {'A': self.A_,
'scalar_kernel': first_periodic_kernel,
'scalar_kernel_params': {'gamma': self.theta,
'period':
self.period_}, }
else:
raise NotImplemented('unsupported kernel')
# 2) Uses lookup table to select the right kernel from string
ov_kernel = PAIRWISE_KERNEL_FUNCTIONS[self.kernel](**kernel_params)
else:
raise NotImplemented('unsupported kernel')
return ov_kernel
def _get_learning_rate(self):
if callable(self.learning_rate):
return self.learning_rate
elif type(self.learning_rate) is str:
# 1) check string and assign the right parameters
if self.learning_rate == 'constant':
lr_params = {'eta': self.eta}
elif self.learning_rate == 'invscaling':
lr_params = {'eta': self.eta, 'power': self.power}
else:
raise NotImplemented('unsupported kernel')
lr = LEARNING_RATE_FUNCTIONS[self.learning_rate](**lr_params)
else:
raise NotImplemented('unsupported learning rate')
return lr
def _decision_function(self, X):
self.linop_ = self.ov_kernel_(self.X_seen_)
pred = self.linop_(X) * self.coefs_[:self.t_ * self.p_]
return pred.reshape(X.shape[0], -1) if self.linop_.p > 1 else pred
def predict(self, X):
"""Predict using ONORMA model.
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Samples.
Returns
-------
C : {array}, shape = [n_samples] or [n_samples, n_targets]
Returns predicted values.
"""
check_is_fitted(self, ['coefs_', 't_', 'p_',
'X_seen_', 'y_seen_'], all_or_any=all)
X = check_array(X)
linop = self.ov_kernel_(self.X_seen_)
pred = linop(X) * self.coefs_[:self.t_ * self.p_]
return pred.reshape(X.shape[0], -1) if linop.p > 1 else pred
def partial_fit(self, X, y):
"""Partial fit of ONORMA model.
This method is usefull for online learning for instance.
Must call
Parameters
----------
X : {array-like, sparse matrix}, shape = [n_samples, n_features]
Training data.
y : {array-like}, shape = [n_samples] or [n_samples, n_targets]
Target values.
Returns
-------
self : returns an instance of self.
"""
n = 1 if X.ndim <= 1 else X.shape[0]
Xt = X.reshape(n, -1) if X.ndim <= 1 else X
yt = y.reshape(n, -1) if y.ndim <= 1 else y
init = not (hasattr(self, 'coefs_') and hasattr(self, 't_'))
if hasattr(self, 't_'):
init = self.t_ == 0
if init:
Xtt = Xt[0, :].reshape(1, -1)
ytt = yt[0, :].reshape(1, -1)
self.d_ = Xtt.shape[1]
self.p_ = ytt.shape[1]
self.learning_rate_ = self._get_learning_rate()
self.coefs_ = empty(self.p_)
eta_t = self.learning_rate_(1)
self.coefs_[:self.p_] = -ravel(eta_t * (0 - ytt))
self.X_seen_ = Xtt
self.y_seen_ = ytt
self.ov_kernel_ = self._get_kernel_map(self.X_seen_, self.y_seen_)
self.t_ = 1
# Reshape if self.coefs_ has not been preallocated
self.coefs_.resize((self.t_ + (n - 1 if init else n)) * self.p_)
for idx in range(1 if init else 0, n):
Xtt = Xt[idx, :].reshape(1, -1)
ytt = yt[idx, :].reshape(1, -1)
eta_t = self.learning_rate_(self.t_ + 1)
# Update weights
self.coefs_[self.t_ * self.p_:(self.t_ + 1) * self.p_] = -ravel(
eta_t * (self._decision_function(Xtt) - ytt))
self.coefs_[:self.t_ * self.p_] *= (1. - eta_t * self.lbda / 2)
# Update seen data
self.X_seen_ = | vstack((self.X_seen_, Xtt)) | numpy.vstack |
"""Test CZT package.
To run:
pytest test_czt.py -v
To run (with coverage):
pytest --cov . --cov-report html test_czt.py
"""
import numpy as np
import matplotlib.pyplot as plt
import pytest
import scipy
import czt
def test_compare_different_czt_methods(debug=False):
print("Compare different CZT calculation methods")
# Create time-domain signal
t = np.arange(0, 20e-3, 1e-4)
x = _signal_model(t)
# Calculate CZT using different methods
X_czt0 = _czt(x)
X_czt1 = czt.czt(x, simple=True)
X_czt2 = czt.czt(x, t_method='ce')
X_czt3 = czt.czt(x, t_method='pd')
X_czt4 = czt.czt(x, t_method='mm')
X_czt5 = czt.czt(x, t_method='scipy')
X_czt6 = czt.czt(x, t_method='ce', f_method='recursive')
X_czt7 = czt.czt(x, t_method='pd', f_method='recursive')
# Try unsupported t_method
with pytest.raises(ValueError):
czt.czt(x, t_method='unsupported_t_method')
# Try unsupported f_method
with pytest.raises(ValueError):
czt.czt(x, t_method='ce', f_method='unsupported_f_method')
# Plot for debugging purposes
if debug:
plt.figure()
plt.title("Imaginary component")
plt.plot(X_czt1.imag, label="simple")
plt.plot(X_czt2.imag, label="ce")
plt.plot(X_czt3.imag, label="pd")
plt.plot(X_czt4.imag, label="mm")
plt.plot(X_czt5.imag, label="scipy")
plt.plot(X_czt6.imag, label="ce / recursive")
plt.plot(X_czt7.imag, label="pd / recursive")
plt.legend()
plt.figure()
plt.title("Real component")
plt.plot(X_czt1.real, label="simple")
plt.plot(X_czt2.real, label="ce")
plt.plot(X_czt3.real, label="pd")
plt.plot(X_czt4.real, label="mm")
plt.plot(X_czt5.real, label="scipy")
plt.plot(X_czt6.real, label="ce / recursive")
plt.plot(X_czt7.real, label="pd / recursive")
plt.legend()
plt.figure()
plt.title("Absolute value")
plt.plot(np.abs(X_czt1), label="simple")
plt.plot(np.abs(X_czt2), label="ce")
plt.plot(np.abs(X_czt3), label="pd")
plt.plot(np.abs(X_czt4), label="mm")
plt.plot(np.abs(X_czt5), label="scipy")
plt.plot(np.abs(X_czt6), label="ce / recursive")
plt.plot(np.abs(X_czt7), label="pd / recursive")
plt.legend()
plt.show()
# Compare Toeplitz matrix multiplication methods
np.testing.assert_almost_equal(X_czt0, X_czt1, decimal=12)
np.testing.assert_almost_equal(X_czt0, X_czt2, decimal=12)
np.testing.assert_almost_equal(X_czt0, X_czt3, decimal=12)
np.testing.assert_almost_equal(X_czt0, X_czt4, decimal=12)
np.testing.assert_almost_equal(X_czt0, X_czt5, decimal=12)
# Compare FFT methods
np.testing.assert_almost_equal(X_czt1, X_czt6, decimal=12)
np.testing.assert_almost_equal(X_czt1, X_czt7, decimal=12)
def test_compare_czt_fft_dft(debug=False):
print("Compare CZT, FFT and DFT")
# Create time-domain signal
t = np.arange(0, 20e-3 + 1e-10, 1e-4)
x = _signal_model(t)
dt = t[1] - t[0]
fs = 1 / dt
# Frequency sweep
f = np.fft.fftshift(np.fft.fftfreq(len(t)) * fs)
# CZT (defaults to FFT settings)
X_czt = np.fft.fftshift(czt.czt(x))
# FFT
X_fft = np.fft.fftshift(np.fft.fft(x))
# DFT (defaults to FFT settings)
_, X_dft = czt.dft(t, x)
# Plot for debugging purposes
if debug:
plt.figure()
plt.title("Imaginary")
plt.plot(f, X_czt.imag, label='CZT')
plt.plot(f, X_fft.imag, label='FFT', ls='--')
plt.plot(f, X_dft.imag, label='DFT', ls='--')
plt.legend()
plt.figure()
plt.title("Real")
plt.plot(f, X_czt.real, label='CZT')
plt.plot(f, X_fft.real, label='FFT', ls='--')
plt.plot(f, X_dft.real, label='DFT', ls='--')
plt.legend()
plt.figure()
plt.title("Absolute")
plt.plot(f, np.abs(X_czt), label='CZT')
plt.plot(f, np.abs(X_fft), label='FFT', ls='--')
plt.plot(f, np.abs(X_dft), label='DFT', ls='--')
plt.legend()
plt.show()
# Compare
np.testing.assert_almost_equal(X_czt, X_fft, decimal=12)
np.testing.assert_almost_equal(X_czt, X_dft, decimal=12)
def test_czt_to_iczt(debug=False):
print("Test CZT -> ICZT")
# Create time-domain signal
t = np.arange(0, 20e-3, 1e-4)
x = _signal_model(t)
# CZT (defaults to FFT)
X_czt = czt.czt(x)
# ICZT
x_iczt1 = czt.iczt(X_czt)
x_iczt2 = czt.iczt(X_czt, simple=False)
# Try unsupported t_method
with pytest.raises(ValueError):
czt.iczt(X_czt, simple=False, t_method='unsupported_t_method')
# Try M != N
with pytest.raises(ValueError):
czt.iczt(X_czt, simple=False, N=len(X_czt)+1)
# Plot for debugging purposes
if debug:
plt.figure()
plt.title("Imaginary")
plt.plot(t*1e3, x.imag)
plt.plot(t*1e3, x_iczt1.imag)
plt.plot(t*1e3, x_iczt2.imag)
plt.figure()
plt.title("Real")
plt.plot(t*1e3, x.real)
plt.plot(t*1e3, x_iczt1.real)
plt.plot(t*1e3, x_iczt2.real)
plt.show()
# Compare
np.testing.assert_almost_equal(x, x_iczt1, decimal=12)
np.testing.assert_almost_equal(x, x_iczt2, decimal=12)
def test_time_to_freq_to_time(debug=False):
print("Test time -> freq -> time")
# Create time-domain data
t1 = np.arange(0, 20e-3, 1e-4)
x1 = _signal_model(t1)
# Frequency domain
f, X = czt.time2freq(t1, x1)
# Back to time domain
t2, x2 = czt.freq2time(f, X, t=t1)
# Plot for debugging purposes
if debug:
plt.figure()
plt.title("Imaginary")
plt.plot(t1, x1.imag, 'k', label='Original')
plt.plot(t2, x2.imag, 'r', label='Recovered')
plt.legend()
plt.figure()
plt.title("Real")
plt.plot(t1, x1.real, 'k', label='Original')
plt.plot(t2, x2.real, 'r', label='Recovered')
plt.legend()
plt.show()
# Compare
np.testing.assert_almost_equal(x1, x2, decimal=12)
def test_compare_iczt_idft(debug=False):
print("Compare ICZT and IDFT")
# Create time-domain signal
t = np.arange(0, 20e-3, 1e-4)
x = _signal_model(t)
# Frequency domain using DFT
f, X = czt.dft(t, x)
# Get time-domain using ICZT
_, x_iczt = czt.freq2time(f, X, t)
# Get time-domain using IDFT
_, x_idft = czt.idft(f, X, t)
# Plot for debugging purposes
if debug:
plt.figure()
plt.title("Imaginary")
plt.plot(t, x.imag, 'k', label="Original")
plt.plot(t, x_iczt.imag, 'g:', label="ICZT")
plt.plot(t, x_idft.imag, 'r--', label="IDFT")
plt.legend()
plt.figure()
plt.title("Real")
plt.plot(t, x.real, 'k', label="Original")
plt.plot(t, x_iczt.real, 'g:', label="ICZT")
plt.plot(t, x_idft.real, 'r--', label="IDFT")
plt.legend()
plt.figure()
plt.title("Real: error")
plt.plot(t, x_iczt.real - x.real, 'k', label="Original")
plt.show()
# Compare
np.testing.assert_almost_equal(x_iczt, x, decimal=12)
np.testing.assert_almost_equal(x_idft, x, decimal=12)
np.testing.assert_almost_equal(x_iczt, x_idft, decimal=12)
def test_frequency_zoom(debug=False):
print("Test frequency-domain zoom")
# Create time-domain signal
t = np.arange(0, 20e-3 + 1e-10, 1e-4)
x = _signal_model(t)
dt = t[1] - t[0]
# Standard FFT frequency range
f = np.fft.fftshift(np.fft.fftfreq(len(t), dt))
# DFT
f, X_dft1 = czt.dft(t, x, f=f)
# CZT
f, X_czt1 = czt.time2freq(t, x, f=f)
# Truncate
idx1, idx2 = 110, 180
f_zoom = f[idx1:idx2]
X_czt1, X_dft1 = X_czt1[idx1:idx2], X_dft1[idx1:idx2]
# Zoom DFT
_, X_dft2 = czt.dft(t, x, f_zoom)
# Zoom CZT
_, X_czt2 = czt.time2freq(t, x, f_zoom)
# Plot for debugging purposes
if debug:
plt.figure()
plt.title("Imaginary")
plt.plot(f_zoom, np.imag(X_czt1), 'c', label='CZT')
plt.plot(f_zoom, np.imag(X_dft1), 'k--', label='DFT')
plt.plot(f_zoom, np.imag(X_czt2), 'r--', label='CZT (zoom)')
plt.plot(f_zoom, np.imag(X_dft2), 'b:', label='DFT (zoom)')
plt.legend()
plt.figure()
plt.title("Real")
plt.plot(f_zoom, np.real(X_czt1), 'c', label='CZT')
plt.plot(f_zoom, np.real(X_dft1), 'k--', label='DFT')
plt.plot(f_zoom, np.real(X_czt2), 'r--', label='CZT (zoom)')
plt.plot(f_zoom, np.real(X_dft2), 'b:', label='DFT (zoom)')
plt.legend()
plt.figure()
plt.title("Absolute")
plt.plot(f_zoom, np.abs(X_czt1), 'c', label='CZT')
plt.plot(f_zoom, np.abs(X_dft1), 'k--', label='DFT')
plt.plot(f_zoom, np.abs(X_czt2), 'r--', label='CZT (zoom)')
plt.plot(f_zoom, | np.abs(X_dft2) | numpy.abs |
# -*- coding: utf-8 -*-
"""
Spyder Editor
This is a temporary script file.
"""
import scipy.io as sio
import tensorflow as tf
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from keras.models import Sequential
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.utils import np_utils
from keras.callbacks import History
from keras import optimizers
#Load data ------------------------------------------------------
def loadMATData(file1):
return sio.loadmat(file1)
#Load Data-------------------------------------------------------
data = loadMATData('ex3data1.mat')
features = data['X']
labels = data['y']
filter = labels ==10
labels[filter] = 0
#shuffle data---------------------------------------------------
ran = | np.arange(features.shape[0]) | numpy.arange |
from pymc import *
import numpy as np
with Model() as model:
lam = Exponential('lam', 1)
failure = np.array([0, 1])
value = | np.array([1, 0]) | numpy.array |
import ray
from ray.data.extensions import TensorArray, TensorDtype
import torchvision
from torchvision import transforms as T
import numpy as np
import pandas as pd
from .dataset_tools import *
from torch.utils.data import DataLoader
import math
from tqdm.auto import tqdm
import torch
from .embeddings import make_clip_transform, ImTransform, XEmbedding
from .vloop_dataset_loaders import get_class_ev
from .dataset_search_terms import *
import pyroaring as pr
from operator import itemgetter
import PIL
def _postprocess_results(acc):
flat_acc = {'iis':[], 'jjs':[], 'dbidx':[], 'vecs':[], 'zoom_factor':[], 'zoom_level':[]}
flat_vecs = []
#{'accs':accs, 'sf':sf, 'dbidx':dbidx, 'zoom_level':zoom_level}
for item in acc:
acc0,sf,dbidx,zl = itemgetter('accs', 'sf', 'dbidx', 'zoom_level')(item)
acc0 = acc0.squeeze(0)
acc0 = acc0.transpose((1,2,0))
iis, jjs = np.meshgrid(range(acc0.shape[0]), range(acc0.shape[1]), indexing='ij')
#iis = iis.reshape(-1, acc0)
iis = iis.reshape(-1)
jjs = jjs.reshape(-1)
acc0 = acc0.reshape(-1,acc0.shape[-1])
imids = np.ones_like(iis)*dbidx
zf = np.ones_like(iis)*(1./sf)
zl = np.ones_like(iis)*zl
flat_acc['iis'].append(iis)
flat_acc['jjs'].append(jjs)
flat_acc['dbidx'].append(imids)
flat_acc['vecs'].append(acc0)
flat_acc['zoom_factor'].append(zf)
flat_acc['zoom_level'].append(zl)
flat = {}
for k,v in flat_acc.items():
flat[k] = np.concatenate(v)
vecs = flat['vecs']
del flat['vecs']
vec_meta = pd.DataFrame(flat)
vecs = vecs.astype('float32')
vecs = vecs/(np.linalg.norm(vecs, axis=-1, keepdims=True) + 1e-6)
vec_meta = vec_meta.assign(file_path=item['file_path'])
vec_meta = vec_meta.assign(vectors=TensorArray(vecs))
return vec_meta
def preprocess_ds(localxclip, ds, debug=False):
txds = TxDataset(ds, tx=pyramid_tx(non_resized_transform(224)))
acc = []
if debug:
num_workers=0
else:
num_workers=4
for dbidx,tup in enumerate(tqdm(DataLoader(txds, num_workers=num_workers, shuffle=False, batch_size=1, collate_fn=lambda x : x),
total=len(txds))):
[(ims, sfs)] = tup
for zoom_level,(im,sf) in enumerate(zip(ims,sfs),start=1):
accs= localxclip.from_image(preprocessed_image=im, pooled=False)
acc.append((accs, sf, dbidx, zoom_level))
return _postprocess_results(acc)
def pyramid_centered(im,i,j):
cy=(i+1)*112.
cx=(j+1)*112.
scales = [112,224,448]
crs = []
w,h = im.size
for s in scales:
tup = (np.clip(cx-s,0,w), np.clip(cy-s,0,h), np.clip(cx+s,0,w), np.clip(cy+s,0,h))
crs.append(im.crop(tup))
return crs
def zoom_out(im : PIL.Image, factor=.5, abs_min=224):
"""
returns image one zoom level out, and the scale factor used
"""
w,h=im.size
mindim = min(w,h)
target_size = max(math.floor(mindim*factor), abs_min)
if target_size * math.sqrt(factor) <= abs_min: # if the target size is almost as large as the image,
# jump to that scale instead
target_size = abs_min
target_factor = target_size/mindim
target_w = max(math.floor(w*target_factor),224) # corrects any rounding effects that make the size 223
target_h = max(math.floor(h*target_factor),224)
im1 = im.resize((target_w, target_h))
assert min(im1.size) >= abs_min
return im1, target_factor
def rescale(im, scale, min_size):
(w,h) = im.size
target_w = max(math.floor(w*scale),min_size)
target_h = max(math.floor(h*scale),min_size)
return im.resize(size=(target_w, target_h), resample=PIL.Image.BILINEAR)
def pyramid(im, factor=.71, abs_min=224):
## if im size is less tha the minimum, expand image to fit minimum
## try following: orig size and abs min size give you bounds
assert factor < 1.
factor = 1./factor
size = min(im.size)
end_size = abs_min
start_size = max(size, abs_min)
start_scale = start_size/size
end_scale = end_size/size
## adjust start scale
ntimes = math.ceil(math.log(start_scale/end_scale)/math.log(factor))
start_size = math.ceil(math.exp(ntimes*math.log(factor) + math.log(abs_min)))
start_scale = start_size/size
factors = np.geomspace(start=start_scale, stop=end_scale, num=ntimes+1, endpoint=True).tolist()
ims = []
for sf in factors:
imout = rescale(im, scale=sf, min_size=abs_min)
ims.append(imout)
assert len(ims) > 0
assert min(ims[0].size) >= abs_min
assert min(ims[-1].size) == abs_min
return ims, factors
def trim_edge(target_divisor=112):
def fun(im1):
w1,h1 = im1.size
spare_h = h1 % target_divisor
spare_w = w1 % target_divisor
im1 = im1.crop((0,0,w1-spare_w, h1-spare_h))
return im1
return fun
class TrimEdge:
def __init__(self, target_divisor=112):
self.target_divisor = target_divisor
def __call__(self, im1):
w1,h1 = im1.size
spare_h = h1 % self.target_divisor
spare_w = w1 % self.target_divisor
im1 = im1.crop((0,0,w1-spare_w, h1-spare_h))
return im1
def torgb(image):
return image.convert('RGB')
def tofloat16(x):
return x.type(torch.float16)
def non_resized_transform(base_size):
return ImTransform(visual_xforms=[torgb],
tensor_xforms=[T.ToTensor(),
T.Normalize((0.48145466, 0.4578275, 0.40821073),
(0.26862954, 0.26130258, 0.27577711)),
tofloat16])
class PyramidTx:
def __init__(self, tx, factor, min_size):
self.tx = tx
self.factor = factor
self.min_size = min_size
def __call__(self, im):
ims,sfs= pyramid(im, factor=self.factor, abs_min=self.min_size)
ppims = []
for im in ims:
ppims.append(self.tx(im))
return ppims, sfs
def pyramid_tx(tx):
def fn(im):
ims,sfs= pyramid(im)
ppims = []
for im in ims:
ppims.append(tx(im))
return ppims, sfs
return fn
def augment_score(db,tup,qvec):
im = db.raw[tup.dbidx]
ims = pyramid(im, tup.iis, tup.jjs)
tx = make_clip_transform(n_px=224, square_crop=True)
vecs = []
for im in ims:
pim = tx(im)
emb = db.embedding.from_image(preprocessed_image=pim.float())
emb = emb/np.linalg.norm(emb, axis=-1)
vecs.append(emb)
vecs = np.concatenate(vecs)
#print(np.linalg.norm(vecs,axis=-1))
augscore = (vecs @ qvec.reshape(-1)).mean()
return augscore
import torchvision.ops
#torchvision.ops.box_iou()
def box_iou(tup, boxes):
b1 = torch.from_numpy(np.stack([tup.x1.values, tup.y1.values, tup.x2.values, tup.y2.values], axis=1))
bxdata = | np.stack([boxes.x1.values, boxes.y1.values, boxes.x2.values,boxes.y2.values], axis=1) | numpy.stack |
# -*- coding: utf-8 -*-
"""
Created on Wed Feb 12 2020
Class to read and manipulate CryoSat-2 waveform data
Reads CryoSat Level-1b data products from baselines A, B and C
Reads CryoSat Level-1b netCDF4 data products from baseline D
Supported CryoSat Modes: LRM, SAR, SARin, FDM, SID, GDR
INPUTS:
full_filename: full path of CryoSat .DBL or .nc file
PYTHON DEPENDENCIES:
numpy: Scientific Computing Tools For Python
http://www.numpy.org
http://www.scipy.org/NumPy_for_Matlab_Users
netCDF4: Python interface to the netCDF C library
https://unidata.github.io/netcdf4-python/netCDF4/index.html
UPDATE HISTORY:
Updated 08/2020: flake8 compatible binary regular expression strings
Forked 02/2020 from read_cryosat_L1b.py
Updated 11/2019: empty placeholder dictionary for baseline D DSD headers
Updated 09/2019: added netCDF4 read function for baseline D
Updated 04/2019: USO correction signed 32 bit int
Updated 10/2018: updated header read functions for python3
Updated 05/2016: using __future__ print and division functions
Written 03/2016
"""
from __future__ import print_function
from __future__ import division
import numpy as np
import pointCollection as pc
import netCDF4
import re
import os
class data(pc.data):
np.seterr(invalid='ignore')
def __default_field_dict__(self):
"""
Define the default fields that get read from the CryoSat-2 file
"""
field_dict = {}
field_dict['Location'] = ['days_J2k','Day','Second','Micsec','USO_Corr',
'Mode_ID','SSC','Inst_config','Rec_Count','Lat','Lon','Alt','Alt_rate',
'Sat_velocity','Real_beam','Baseline','ST_ID','Roll','Pitch','Yaw','MCD']
field_dict['Data'] = ['TD', 'H_0','COR2','LAI','FAI','AGC_CH1','AGC_CH2',
'TR_gain_CH1','TR_gain_CH2','TX_Power','Doppler_range','TR_inst_range',
'R_inst_range','TR_inst_gain','R_inst_gain','Internal_phase',
'External_phase','Noise_power','Phase_slope']
field_dict['Geometry'] = ['dryTrop','wetTrop','InvBar','DAC','Iono_GIM',
'Iono_model','ocTideElv','lpeTideElv','olTideElv','seTideElv','gpTideElv',
'Surf_type','Corr_status','Corr_error']
field_dict['Waveform_20Hz'] = ['Waveform','Linear_Wfm_Multiplier',
'Power2_Wfm_Multiplier','N_avg_echoes']
field_dict['METADATA'] = ['MPH','SPH']
return field_dict
def from_dbl(self, full_filename, field_dict=None, unpack=False, verbose=False):
"""
Read CryoSat Level-1b data from binary formats
"""
# file basename and file extension of input file
fileBasename,fileExtension=os.path.splitext(os.path.basename(full_filename))
# CryoSat file class
# OFFL (Off Line Processing/Systematic)
# NRT_ (Near Real Time)
# RPRO (ReProcessing)
# TEST (Testing)
# TIxx (Stand alone IPF1 testing)
# LTA_ (Long Term Archive)
regex_class = 'OFFL|NRT_|RPRO|TEST|TIxx|LTA_'
# CryoSat mission products
# SIR1SAR_FR: Level 1 FBR SAR Mode (Rx1 Channel)
# SIR2SAR_FR: Level 1 FBR SAR Mode (Rx2 Channel)
# SIR_SIN_FR: Level 1 FBR SARin Mode
# SIR_LRM_1B: Level-1 Product Low Rate Mode
# SIR_FDM_1B: Level-1 Product Fast Delivery Marine Mode
# SIR_SAR_1B: Level-1 SAR Mode
# SIR_SIN_1B: Level-1 SARin Mode
# SIR1LRC11B: Level-1 CAL1 Low Rate Mode (Rx1 Channel)
# SIR2LRC11B: Level-1 CAL1 Low Rate Mode (Rx2 Channel)
# SIR1SAC11B: Level-1 CAL1 SAR Mode (Rx1 Channel)
# SIR2SAC11B: Level-1 CAL1 SAR Mode (Rx2 Channel)
# SIR_SIC11B: Level-1 CAL1 SARin Mode
# SIR_SICC1B: Level-1 CAL1 SARIN Exotic Data
# SIR1SAC21B: Level-1 CAL2 SAR Mode (Rx1 Channel)
# SIR2SAC21B: Level-1 CAL2 SAR Mode (Rx2 Channel)
# SIR1SIC21B: Level-1 CAL2 SARin Mode (Rx1 Channel)
# SIR2SIC21B: Level-1 CAL2 SARin Mode (Rx1 Channel)
# SIR1LRM_0M: LRM and TRK Monitoring Data from Rx 1 Channel
# SIR2LRM_0M: LRM and TRK Monitoring Data from Rx 2 Channel
# SIR1SAR_0M: SAR Monitoring Data from Rx 1 Channel
# SIR2SAR_0M: SAR Monitoring Data from Rx 1 Channel
# SIR_SIN_0M: SARIN Monitoring Data
# SIR_SIC40M: CAL4 Monitoring Data
regex_products = ('SIR1SAR_FR|SIR2SAR_FR|SIR_SIN_FR|SIR_LRM_1B|SIR_FDM_1B|'
'SIR_SAR_1B|SIR_SIN_1B|SIR1LRC11B|SIR2LRC11B|SIR1SAC11B|SIR2SAC11B|'
'SIR_SIC11B|SIR_SICC1B|SIR1SAC21B|SIR2SAC21B|SIR1SIC21B|SIR2SIC21B|'
'SIR1LRM_0M|SIR2LRM_0M|SIR1SAR_0M|SIR2SAR_0M|SIR_SIN_0M|SIR_SIC40M')
# CRYOSAT LEVEL-1b PRODUCTS NAMING RULES
# Mission Identifier
# File Class
# File Product
# Validity Start Date and Time
# Validity Stop Date and Time
# Baseline Identifier
# Version Number
regex_pattern = r'(.*?)_({0})_({1})_(\d+T?\d+)_(\d+T?\d+)_(.*?)(\d+)'
rx = re.compile(regex_pattern.format(regex_class,regex_products),re.VERBOSE)
# extract file information from filename
MI,CLASS,PRODUCT,START,STOP,BASELINE,VERSION=rx.findall(fileBasename).pop()
# CryoSat-2 Mode record sizes
i_size_timestamp = 12
n_SARIN_BC_RW = 1024
n_SARIN_RW = 512
n_SAR_BC_RW = 256
n_SAR_RW = 125
n_LRM_RW = 128
n_blocks = 20
n_BeamBehaviourParams = 50
# check baseline from file to set i_record_size and allocation function
if (BASELINE == 'C'):
# calculate total record sizes of each dataset group
i_size_timegroup = i_size_timestamp + 4 + 2*2 + 6*4 + 3*3*4 + 3*2 + 4*4
i_size_measuregroup = 8 + 4*17 + 8
i_size_external_corr = 4*13 + 12
i_size_1Hz_LRM = i_size_timestamp + 3*4 + 8 + n_LRM_RW*2 + 2*4 + 2*2
i_size_1Hz_SAR = i_size_timestamp + 4*3 + 8 + n_SAR_RW*2 + 4 + 4 + 2 + 2
i_size_1Hz_SARIN = i_size_timestamp + 4*3 + 8 + n_SARIN_RW*2 + 4 + 4 + 2 + 2
i_size_LRM_waveform = n_LRM_RW*2 + 4 + 4 + 2 + 2
i_size_SAR_waveform = n_SAR_BC_RW*2 + 4 + 4 + 2 + 2 + n_BeamBehaviourParams*2
i_size_SARIN_waveform = n_SARIN_BC_RW*2 + 4 + 4 + 2 + 2 + n_SARIN_BC_RW*2 + \
n_SARIN_BC_RW*4 + n_BeamBehaviourParams*2
# Low-Resolution Mode Record Size
i_record_size_LRM_L1b = n_blocks * (i_size_timegroup + \
i_size_measuregroup + i_size_LRM_waveform) + i_size_external_corr + \
i_size_1Hz_LRM
# SAR Mode Record Size
i_record_size_SAR_L1b = n_blocks * (i_size_timegroup + \
i_size_measuregroup + i_size_SAR_waveform) + i_size_external_corr + \
i_size_1Hz_SAR
# SARIN Mode Record Size
i_record_size_SARIN_L1b = n_blocks * (i_size_timegroup + \
i_size_measuregroup + i_size_SARIN_waveform) + i_size_external_corr + \
i_size_1Hz_SARIN
# set read function for Baseline C
read_cryosat_variables = self.cryosat_baseline_C
else:
# calculate total record sizes of each dataset group
i_size_timegroup = i_size_timestamp + 4 + 2*2+ 6*4 + 3*3*4 + 4
i_size_measuregroup = 8 + 4*17 + 8
i_size_external_corr = 4*13 + 12
i_size_1Hz_LRM = i_size_timestamp + 3*4 + 8 + n_LRM_RW*2 + 2*4 + 2*2
i_size_1Hz_SAR = i_size_timestamp + 4*3 + 8 + n_SAR_RW*2 + 4 + 4 + 2 + 2
i_size_1Hz_SARIN = i_size_timestamp + 4*3 + 8 + n_SARIN_RW*2 + 4 + 4 + 2 + 2
i_size_LRM_waveform = n_LRM_RW*2 + 4 + 4 + 2 + 2
i_size_SAR_waveform = n_SAR_RW*2 + 4 + 4 + 2 + 2 + n_BeamBehaviourParams*2
i_size_SARIN_waveform = n_SARIN_RW*2 + 4 + 4 + 2 + 2 + n_SARIN_RW*2 + \
n_SARIN_RW*4 + n_BeamBehaviourParams*2
# Low-Resolution Mode Record Size
i_record_size_LRM_L1b = n_blocks * (i_size_timegroup + \
i_size_measuregroup + i_size_LRM_waveform) + i_size_external_corr + \
i_size_1Hz_LRM
# SAR Mode Record Size
i_record_size_SAR_L1b = n_blocks * (i_size_timegroup + \
i_size_measuregroup + i_size_SAR_waveform) + i_size_external_corr + \
i_size_1Hz_SAR
# SARIN Mode Record Size
i_record_size_SARIN_L1b = n_blocks * (i_size_timegroup + \
i_size_measuregroup + i_size_SARIN_waveform) + i_size_external_corr + \
i_size_1Hz_SARIN
# set read function for Baselines A and B
read_cryosat_variables = self.cryosat_baseline_AB
# get dataset MODE from PRODUCT portion of file name
# set record sizes and DS_TYPE for read_DSD function
self.MODE = re.findall('(LRM|SAR|SIN)', PRODUCT).pop()
if (self.MODE == 'LRM'):
i_record_size = i_record_size_LRM_L1b
DS_TYPE = 'CS_L1B'
elif (self.MODE == 'SAR'):
i_record_size = i_record_size_SAR_L1b
DS_TYPE = 'CS_L1B'
elif (self.MODE == 'SIN'):
i_record_size = i_record_size_SARIN_L1b
DS_TYPE = 'CS_L1B'
# read the input file to get file information
fid = os.open(os.path.expanduser(full_filename),os.O_RDONLY)
file_info = os.fstat(fid)
os.close(fid)
# num DSRs from SPH
j_num_DSR = np.int32(file_info.st_size//i_record_size)
# print file information
if verbose:
print(full_filename)
print('{0:d} {1:d} {2:d}'.format(j_num_DSR,file_info.st_size,i_record_size))
# Check if MPH/SPH/DSD headers
if (j_num_DSR*i_record_size == file_info.st_size):
print('No Header on file')
print('The number of DSRs is: {0:d}'.format(j_num_DSR))
else:
print('Header on file')
# Check if MPH/SPH/DSD headers
if (j_num_DSR*i_record_size != file_info.st_size):
# If there are MPH/SPH/DSD headers
s_MPH_fields = self.read_MPH(full_filename)
j_sph_size = np.int32(re.findall(r'[-+]?\d+',s_MPH_fields['SPH_SIZE']).pop())
s_SPH_fields = self.read_SPH(full_filename, j_sph_size)
# extract information from DSD fields
s_DSD_fields = self.read_DSD(full_filename, DS_TYPE=DS_TYPE)
# extract DS_OFFSET
j_DS_start = np.int32(re.findall(r'[-+]?\d+',s_DSD_fields['DS_OFFSET']).pop())
# extract number of DSR in the file
j_num_DSR = np.int32(re.findall(r'[-+]?\d+',s_DSD_fields['NUM_DSR']).pop())
# check the record size
j_DSR_size = np.int32(re.findall(r'[-+]?\d+',s_DSD_fields['DSR_SIZE']).pop())
# minimum size is start of the read plus number of records to read
j_check_size = j_DS_start + (j_DSR_size*j_num_DSR)
if verbose:
print('The offset of the DSD is: {0:d} bytes'.format(j_DS_start))
print('The number of DSRs is {0:d}'.format(j_num_DSR))
print('The size of the DSR is {0:d}'.format(j_DSR_size))
# check if invalid file size
if (j_check_size > file_info.st_size):
raise IOError('File size error')
# extract binary data from input CryoSat data file (skip headers)
fid = open(os.path.expanduser(full_filename), 'rb')
cryosat_header = fid.read(j_DS_start)
# iterate through CryoSat file and fill output variables
CS_L1b_mds = read_cryosat_variables(fid, j_num_DSR)
# add headers to output dictionary as METADATA
CS_L1b_mds['METADATA'] = {}
CS_L1b_mds['METADATA']['MPH'] = s_MPH_fields
CS_L1b_mds['METADATA']['SPH'] = s_SPH_fields
CS_L1b_mds['METADATA']['DSD'] = s_DSD_fields
# close the input CryoSat binary file
fid.close()
else:
# If there are not MPH/SPH/DSD headers
# extract binary data from input CryoSat data file
fid = open(os.path.expanduser(full_filename), 'rb')
# iterate through CryoSat file and fill output variables
CS_L1b_mds = read_cryosat_variables(fid, j_num_DSR)
# close the input CryoSat binary file
fid.close()
# if unpacking the units
if unpack:
CS_l1b_scale = self.cryosat_scaling_factors()
# for each dictionary key
for group in CS_l1b_scale.keys():
# for each variable
for key,val in CS_L1b_mds[group].items():
# check if val is the 20Hz waveform beam variables
if isinstance(val, dict):
# for each waveform beam variable
for k,v in val.items():
# scale variable
CS_L1b_mds[group][key][k] = CS_l1b_scale[group][key][k]*v.copy()
else:
# scale variable
CS_L1b_mds[group][key] = CS_l1b_scale[group][key]*val.copy()
# calculate GPS time of CryoSat data (seconds since Jan 6, 1980 00:00:00)
# from TAI time since Jan 1, 2000 00:00:00
GPS_Time = self.calc_GPS_time(CS_L1b_mds['Location']['Day'],
CS_L1b_mds['Location']['Second'], CS_L1b_mds['Location']['Micsec'])
# leap seconds for converting from GPS time to UTC time
leap_seconds = self.count_leap_seconds(GPS_Time)
# calculate dates as J2000 days (UTC)
CS_L1b_mds['Location']['days_J2k'] = (GPS_Time - leap_seconds)/86400.0 - 7300.0
# parameters to extract
if field_dict is None:
field_dict = self.__default_field_dict__()
# extract fields of interest using field dict keys
for group,variables in field_dict.items():
for field in variables:
if field not in self.fields:
self.fields.append(field)
setattr(self, field, CS_L1b_mds[group][field])
# update size and shape of input data
self.__update_size_and_shape__()
# return the data and header text
return self
def from_nc(self, full_filename, field_dict=None, unpack=False, verbose=False):
"""
Read CryoSat Level-1b data from netCDF4 format data
"""
# file basename and file extension of input file
fileBasename,fileExtension=os.path.splitext(os.path.basename(full_filename))
# CryoSat file class
# OFFL (Off Line Processing/Systematic)
# NRT_ (Near Real Time)
# RPRO (ReProcessing)
# TEST (Testing)
# TIxx (Stand alone IPF1 testing)
# LTA_ (Long Term Archive)
regex_class = 'OFFL|NRT_|RPRO|TEST|TIxx|LTA_'
# CryoSat mission products
# SIR1SAR_FR: Level 1 FBR SAR Mode (Rx1 Channel)
# SIR2SAR_FR: Level 1 FBR SAR Mode (Rx2 Channel)
# SIR_SIN_FR: Level 1 FBR SARin Mode
# SIR_LRM_1B: Level-1 Product Low Rate Mode
# SIR_FDM_1B: Level-1 Product Fast Delivery Marine Mode
# SIR_SAR_1B: Level-1 SAR Mode
# SIR_SIN_1B: Level-1 SARin Mode
# SIR1LRC11B: Level-1 CAL1 Low Rate Mode (Rx1 Channel)
# SIR2LRC11B: Level-1 CAL1 Low Rate Mode (Rx2 Channel)
# SIR1SAC11B: Level-1 CAL1 SAR Mode (Rx1 Channel)
# SIR2SAC11B: Level-1 CAL1 SAR Mode (Rx2 Channel)
# SIR_SIC11B: Level-1 CAL1 SARin Mode
# SIR_SICC1B: Level-1 CAL1 SARIN Exotic Data
# SIR1SAC21B: Level-1 CAL2 SAR Mode (Rx1 Channel)
# SIR2SAC21B: Level-1 CAL2 SAR Mode (Rx2 Channel)
# SIR1SIC21B: Level-1 CAL2 SARin Mode (Rx1 Channel)
# SIR2SIC21B: Level-1 CAL2 SARin Mode (Rx1 Channel)
# SIR1LRM_0M: LRM and TRK Monitoring Data from Rx 1 Channel
# SIR2LRM_0M: LRM and TRK Monitoring Data from Rx 2 Channel
# SIR1SAR_0M: SAR Monitoring Data from Rx 1 Channel
# SIR2SAR_0M: SAR Monitoring Data from Rx 1 Channel
# SIR_SIN_0M: SARIN Monitoring Data
# SIR_SIC40M: CAL4 Monitoring Data
regex_products = ('SIR1SAR_FR|SIR2SAR_FR|SIR_SIN_FR|SIR_LRM_1B|SIR_FDM_1B|'
'SIR_SAR_1B|SIR_SIN_1B|SIR1LRC11B|SIR2LRC11B|SIR1SAC11B|SIR2SAC11B|'
'SIR_SIC11B|SIR_SICC1B|SIR1SAC21B|SIR2SAC21B|SIR1SIC21B|SIR2SIC21B|'
'SIR1LRM_0M|SIR2LRM_0M|SIR1SAR_0M|SIR2SAR_0M|SIR_SIN_0M|SIR_SIC40M')
# CRYOSAT LEVEL-1b PRODUCTS NAMING RULES
# Mission Identifier
# File Class
# File Product
# Validity Start Date and Time
# Validity Stop Date and Time
# Baseline Identifier
# Version Number
regex_pattern = r'(.*?)_({0})_({1})_(\d+T?\d+)_(\d+T?\d+)_(.*?)(\d+)'
rx = re.compile(regex_pattern.format(regex_class,regex_products),re.VERBOSE)
# extract file information from filename
MI,CLASS,PRODUCT,START,STOP,BASELINE,VERSION=rx.findall(fileBasename).pop()
print(full_filename) if verbose else None
# get dataset MODE from PRODUCT portion of file name
self.MODE = re.findall(r'(LRM|FDM|SAR|SIN)', PRODUCT).pop()
# read level-2 CryoSat-2 data from netCDF4 file
CS_L1b_mds = self.cryosat_baseline_D(full_filename, unpack=unpack)
# calculate GPS time of CryoSat data (seconds since Jan 6, 1980 00:00:00)
# from TAI time since Jan 1, 2000 00:00:00
GPS_Time = self.calc_GPS_time(CS_L1b_mds['Location']['Day'],
CS_L1b_mds['Location']['Second'], CS_L1b_mds['Location']['Micsec'])
# leap seconds for converting from GPS time to UTC time
leap_seconds = self.count_leap_seconds(GPS_Time)
# calculate dates as J2000 days (UTC)
CS_L1b_mds['Location']['days_J2k'] = (GPS_Time - leap_seconds)/86400.0 - 7300.0
# parameters to extract
if field_dict is None:
field_dict = self.__default_field_dict__()
# extract fields of interest using field dict keys
for group,variables in field_dict.items():
for field in variables:
if field not in self.fields:
self.fields.append(field)
setattr(self, field, CS_L1b_mds[group][field])
# update size and shape of input data
self.__update_size_and_shape__()
# return the data and header text
return self
def calc_GPS_time(self, day, second, micsec):
"""
Calculate the GPS time (seconds since Jan 6, 1980 00:00:00)
"""
# TAI time is ahead of GPS by 19 seconds
return (day + 7300.0)*86400.0 + second.astype('f') + micsec/1e6 - 19
def count_leap_seconds(self, GPS_Time):
"""
Count number of leap seconds that have passed for given GPS times
"""
# GPS times for leap seconds
leaps = [46828800, 78364801, 109900802, 173059203, 252028804, 315187205,
346723206, 393984007, 425520008, 457056009, 504489610, 551750411,
599184012, 820108813, 914803214, 1025136015, 1119744016, 1167264017]
# number of leap seconds prior to GPS_Time
n_leaps = np.zeros_like(GPS_Time)
for i,leap in enumerate(leaps):
count = np.count_nonzero(GPS_Time >= leap)
if (count > 0):
i_records,i_blocks = np.nonzero(GPS_Time >= leap)
n_leaps[i_records,i_blocks] += 1.0
return n_leaps
def read_MPH(self, full_filename):
"""
Read ASCII Main Product Header (MPH) block from an ESA PDS file
"""
# read input data file
with open(os.path.expanduser(full_filename), 'rb') as fid:
file_contents = fid.read().splitlines()
# Define constant values associated with PDS file formats
# number of text lines in standard MPH
n_MPH_lines = 41
# check that first line of header matches PRODUCT
if not bool(re.match(br'PRODUCT\=\"(.*)(?=\")',file_contents[0])):
raise IOError('File does not start with a valid PDS MPH')
# read MPH header text
s_MPH_fields = {}
for i in range(n_MPH_lines):
# use regular expression operators to read headers
if bool(re.match(br'(.*?)\=\"(.*)(?=\")',file_contents[i])):
# data fields within quotes
field,value=re.findall(br'(.*?)\=\"(.*)(?=\")',file_contents[i]).pop()
s_MPH_fields[field.decode('utf-8')] = value.decode('utf-8').rstrip()
elif bool(re.match(br'(.*?)\=(.*)',file_contents[i])):
# data fields without quotes
field,value=re.findall(br'(.*?)\=(.*)',file_contents[i]).pop()
s_MPH_fields[field.decode('utf-8')] = value.decode('utf-8').rstrip()
# Return block name array to calling function
return s_MPH_fields
def read_SPH(self, full_filename, j_sph_size):
"""
Read ASCII Specific Product Header (SPH) block from a PDS file
"""
# read input data file
with open(os.path.expanduser(full_filename), 'rb') as fid:
file_contents = fid.read().splitlines()
# Define constant values associated with PDS file formats
# number of text lines in standard MPH
n_MPH_lines = 41
# compile regular expression operator for reading headers
rx = re.compile(br'(.*?)\=\"?(.*)',re.VERBOSE)
# check first line of header matches SPH_DESCRIPTOR
if not bool(re.match(br'SPH\_DESCRIPTOR\=',file_contents[n_MPH_lines+1])):
raise IOError('File does not have a valid PDS DSD')
# read SPH header text (no binary control characters)
s_SPH_lines = [li for li in file_contents[n_MPH_lines+1:] if rx.match(li)
and not re.search(br'[^\x20-\x7e]+',li)]
# extract SPH header text
s_SPH_fields = {}
c = 0
while (c < len(s_SPH_lines)):
# check if line is within DS_NAME portion of SPH header
if bool(re.match(br'DS_NAME',s_SPH_lines[c])):
# add dictionary for DS_NAME
field,value=re.findall(br'(.*?)\=\"(.*)(?=\")',s_SPH_lines[c]).pop()
key = value.decode('utf-8').rstrip()
s_SPH_fields[key] = {}
for line in s_SPH_lines[c+1:c+7]:
if bool(re.match(br'(.*?)\=\"(.*)(?=\")',line)):
# data fields within quotes
dsfield,dsvalue=re.findall(br'(.*?)\=\"(.*)(?=\")',line).pop()
s_SPH_fields[key][dsfield.decode('utf-8')] = dsvalue.decode('utf-8').rstrip()
elif bool(re.match(br'(.*?)\=(.*)',line)):
# data fields without quotes
dsfield,dsvalue=re.findall(br'(.*?)\=(.*)',line).pop()
s_SPH_fields[key][dsfield.decode('utf-8')] = dsvalue.decode('utf-8').rstrip()
# add 6 to counter to go to next entry
c += 6
# use regular expression operators to read headers
elif bool(re.match(br'(.*?)\=\"(.*)(?=\")',s_SPH_lines[c])):
# data fields within quotes
field,value=re.findall(br'(.*?)\=\"(.*)(?=\")',s_SPH_lines[c]).pop()
s_SPH_fields[field.decode('utf-8')] = value.decode('utf-8').rstrip()
elif bool(re.match(br'(.*?)\=(.*)',s_SPH_lines[c])):
# data fields without quotes
field,value=re.findall(br'(.*?)\=(.*)',s_SPH_lines[c]).pop()
s_SPH_fields[field.decode('utf-8')] = value.decode('utf-8').rstrip()
# add 1 to counter to go to next line
c += 1
# Return block name array to calling function
return s_SPH_fields
def read_DSD(self, full_filename, DS_TYPE=None):
"""
Read ASCII Data Set Descriptors (DSD) block from a PDS file
"""
# read input data file
with open(os.path.expanduser(full_filename), 'rb') as fid:
file_contents = fid.read().splitlines()
# Define constant values associated with PDS file formats
# number of text lines in standard MPH
n_MPH_lines = 41
# number of text lines in a DSD header
n_DSD_lines = 8
# Level-1b CryoSat DS_NAMES within files
regex_patterns = []
if (DS_TYPE == 'CS_L1B'):
regex_patterns.append(br'DS_NAME\="SIR_L1B_LRM[\s+]*"')
regex_patterns.append(br'DS_NAME\="SIR_L1B_SAR[\s+]*"')
regex_patterns.append(br'DS_NAME\="SIR_L1B_SARIN[\s+]*"')
elif (DS_TYPE == 'SIR_L1B_FDM'):
regex_patterns.append(br'DS_NAME\="SIR_L1B_FDM[\s+]*"')
# find the DSD starting line within the SPH header
c = 0
Flag = False
while ((Flag is False) and (c < len(regex_patterns))):
# find indice within
indice = [i for i,line in enumerate(file_contents[n_MPH_lines+1:]) if
re.search(regex_patterns[c],line)]
if indice:
Flag = True
else:
c+=1
# check that valid indice was found within header
if not indice:
raise IOError('Can not find correct DSD field')
# extract s_DSD_fields info
DSD_START = n_MPH_lines + indice[0] + 1
s_DSD_fields = {}
for i in range(DSD_START,DSD_START+n_DSD_lines):
# use regular expression operators to read headers
if bool(re.match(br'(.*?)\=\"(.*)(?=\")',file_contents[i])):
# data fields within quotes
field,value=re.findall(br'(.*?)\=\"(.*)(?=\")',file_contents[i]).pop()
s_DSD_fields[field.decode('utf-8')] = value.decode('utf-8').rstrip()
elif bool(re.match(br'(.*?)\=(.*)',file_contents[i])):
# data fields without quotes
field,value=re.findall(br'(.*?)\=(.*)',file_contents[i]).pop()
s_DSD_fields[field.decode('utf-8')] = value.decode('utf-8').rstrip()
# Return block name array to calling function
return s_DSD_fields
def cryosat_baseline_AB(self, fid, n_records):
"""
Read L1b MDS variables for CryoSat Baselines A and B
"""
n_SARIN_RW = 512
n_SAR_RW = 128
n_LRM_RW = 128
n_blocks = 20
n_BeamBehaviourParams = 50
# Bind all the variables of the l1b_mds together into a single dictionary
CS_l1b_mds = {}
# CryoSat-2 Time and Orbit Group
CS_l1b_mds['Location'] = {}
# Time: day part
CS_l1b_mds['Location']['Day'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32,fill_value=0)
# Time: second part
CS_l1b_mds['Location']['Second'] = np.ma.zeros((n_records,n_blocks),dtype=np.uint32)
# Time: microsecond part
CS_l1b_mds['Location']['Micsec'] = np.ma.zeros((n_records,n_blocks),dtype=np.uint32)
# USO correction factor
CS_l1b_mds['Location']['USO_Corr'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Mode ID
CS_l1b_mds['Location']['Mode_ID'] = np.ma.zeros((n_records,n_blocks),dtype=np.uint16)
# Source sequence counter
CS_l1b_mds['Location']['SSC'] = np.ma.zeros((n_records,n_blocks),dtype=np.uint16)
# Instrument configuration
CS_l1b_mds['Location']['Inst_config'] = np.ma.zeros((n_records,n_blocks),dtype=np.uint32)
# Record Counter
CS_l1b_mds['Location']['Rec_Count'] = np.ma.zeros((n_records,n_blocks),dtype=np.uint32)
# Lat: packed units (0.1 micro-degree, 1e-7 degrees)
CS_l1b_mds['Location']['Lat'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Lon: packed units (0.1 micro-degree, 1e-7 degrees)
CS_l1b_mds['Location']['Lon'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Alt: packed units (mm, 1e-3 m)
# Altitude of COG above reference ellipsoid (interpolated value)
CS_l1b_mds['Location']['Alt'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Instantaneous altitude rate derived from orbit: packed units (mm/s, 1e-3 m/s)
CS_l1b_mds['Location']['Alt_rate'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Satellite velocity vector. In ITRF: packed units (mm/s, 1e-3 m/s)
# ITRF= International Terrestrial Reference Frame
CS_l1b_mds['Location']['Sat_velocity'] = np.ma.zeros((n_records,n_blocks,3),dtype=np.int32)
# Real beam direction vector. In CRF: packed units (micro-m, 1e-6 m)
# CRF= CryoSat Reference Frame.
CS_l1b_mds['Location']['Real_beam'] = np.ma.zeros((n_records,n_blocks,3),dtype=np.int32)
# Interferometric baseline vector. In CRF: packed units (micro-m, 1e-6 m)
CS_l1b_mds['Location']['Baseline'] = np.ma.zeros((n_records,n_blocks,3),dtype=np.int32)
# Measurement Confidence Data Flags
# Generally the MCD flags indicate problems when set
# If MCD is 0 then no problems or non-nominal conditions were detected
# Serious errors are indicated by setting bit 31
CS_l1b_mds['Location']['MCD'] = np.ma.zeros((n_records,n_blocks),dtype=np.uint32)
# CryoSat-2 Measurement Group
# Derived from instrument measurement parameters
CS_l1b_mds['Data'] = {}
# Window Delay reference (two-way) corrected for instrument delays
CS_l1b_mds['Data']['TD'] = np.ma.zeros((n_records,n_blocks),dtype=np.int64)
# H0 Initial Height Word from telemetry
CS_l1b_mds['Data']['H_0'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# COR2 Height Rate: on-board tracker height rate over the radar cycle
CS_l1b_mds['Data']['COR2'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Coarse Range Word (LAI) derived from telemetry
CS_l1b_mds['Data']['LAI'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Fine Range Word (FAI) derived from telemetry
CS_l1b_mds['Data']['FAI'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Automatic Gain Control Channel 1: AGC gain applied on Rx channel 1.
# Gain calibration corrections are applied (Sum of AGC stages 1 and 2
# plus the corresponding corrections) (dB/100)
CS_l1b_mds['Data']['AGC_CH1'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Automatic Gain Control Channel 2: AGC gain applied on Rx channel 2.
# Gain calibration corrections are applied (dB/100)
CS_l1b_mds['Data']['AGC_CH2'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Total Fixed Gain On Channel 1: gain applied by the RF unit. (dB/100)
CS_l1b_mds['Data']['TR_gain_CH1'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Total Fixed Gain On Channel 2: gain applied by the RF unit. (dB/100)
CS_l1b_mds['Data']['TR_gain_CH2'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Transmit Power in microWatts
CS_l1b_mds['Data']['TX_Power'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Doppler range correction: Radial component (mm)
# computed for the component of satellite velocity in the nadir direction
CS_l1b_mds['Data']['Doppler_range'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Instrument Range Correction: transmit-receive antenna (mm)
# Calibration correction to range on channel 1 computed from CAL1.
CS_l1b_mds['Data']['TR_inst_range'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Instrument Range Correction: receive-only antenna (mm)
# Calibration correction to range on channel 2 computed from CAL1.
CS_l1b_mds['Data']['R_inst_range'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Instrument Gain Correction: transmit-receive antenna (dB/100)
# Calibration correction to gain on channel 1 computed from CAL1
CS_l1b_mds['Data']['TR_inst_gain'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Instrument Gain Correction: receive-only (dB/100)
# Calibration correction to gain on channel 2 computed from CAL1
CS_l1b_mds['Data']['R_inst_gain'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Internal Phase Correction (microradians)
CS_l1b_mds['Data']['Internal_phase'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# External Phase Correction (microradians)
CS_l1b_mds['Data']['External_phase'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Noise Power measurement (dB/100): converted from telemetry units to be
# the noise floor of FBR measurement echoes.
# Set to -9999.99 when the telemetry contains zero.
CS_l1b_mds['Data']['Noise_power'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
# Phase slope correction (microradians)
# Computed from the CAL-4 packets during the azimuth impulse response
# amplitude (SARIN only). Set from the latest available CAL-4 packet.
CS_l1b_mds['Data']['Phase_slope'] = np.ma.zeros((n_records,n_blocks),dtype=np.int32)
CS_l1b_mds['Data']['Spares1'] = np.ma.zeros((n_records,n_blocks,4),dtype=np.int8)
# CryoSat-2 External Corrections Group
CS_l1b_mds['Geometry'] = {}
# Dry Tropospheric Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['dryTrop'] = np.ma.zeros((n_records),dtype=np.int32)
# Wet Tropospheric Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['wetTrop'] = np.ma.zeros((n_records),dtype=np.int32)
# Inverse Barometric Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['InvBar'] = np.ma.zeros((n_records),dtype=np.int32)
# Delta Inverse Barometric Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['DAC'] = np.ma.zeros((n_records),dtype=np.int32)
# GIM Ionospheric Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['Iono_GIM'] = np.ma.zeros((n_records),dtype=np.int32)
# Model Ionospheric Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['Iono_model'] = np.ma.zeros((n_records),dtype=np.int32)
# Ocean tide Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['ocTideElv'] = np.ma.zeros((n_records),dtype=np.int32)
# Long period equilibrium ocean tide Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['lpeTideElv'] = np.ma.zeros((n_records),dtype=np.int32)
# Ocean loading tide Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['olTideElv'] = np.ma.zeros((n_records),dtype=np.int32)
# Solid Earth tide Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['seTideElv'] = np.ma.zeros((n_records),dtype=np.int32)
# Geocentric Polar tide Correction packed units (mm, 1e-3 m)
CS_l1b_mds['Geometry']['gpTideElv'] = np.ma.zeros((n_records),dtype=np.int32)
# Surface Type: enumerated key to classify surface at nadir
# 0 = Open Ocean
# 1 = Closed Sea
# 2 = Continental Ice
# 3 = Land
CS_l1b_mds['Geometry']['Surf_type'] = np.ma.zeros((n_records),dtype=np.uint32)
CS_l1b_mds['Geometry']['Spare1'] = np.ma.zeros((n_records,4),dtype=np.int8)
# Corrections Status Flag
CS_l1b_mds['Geometry']['Corr_status'] = np.ma.zeros((n_records),dtype=np.uint32)
# Correction Error Flag
CS_l1b_mds['Geometry']['Corr_error'] = np.ma.zeros((n_records),dtype=np.uint32)
CS_l1b_mds['Geometry']['Spare2'] = np.ma.zeros((n_records,4),dtype=np.int8)
# CryoSat-2 Average Waveforms Groups
CS_l1b_mds['Waveform_1Hz'] = {}
if (self.MODE == 'LRM'):
# Low-Resolution Mode
# Data Record Time (MDSR Time Stamp)
CS_l1b_mds['Waveform_1Hz']['Day'] = np.zeros((n_records),dtype=np.int32)
CS_l1b_mds['Waveform_1Hz']['Second'] = np.zeros((n_records),dtype=np.uint32)
CS_l1b_mds['Waveform_1Hz']['Micsec'] = np.zeros((n_records),dtype=np.uint32)
# Lat: packed units (0.1 micro-degree, 1e-7 degrees)
CS_l1b_mds['Waveform_1Hz']['Lat'] = np.zeros((n_records),dtype=np.int32)
# Lon: packed units (0.1 micro-degree, 1e-7 degrees)
CS_l1b_mds['Waveform_1Hz']['Lon'] = np.zeros((n_records),dtype=np.int32)
# Alt: packed units (mm, 1e-3 m)
# Altitude of COG above reference ellipsoid (interpolated value)
CS_l1b_mds['Waveform_1Hz']['Alt'] = np.zeros((n_records),dtype=np.int32)
# Window Delay (two-way) corrected for instrument delays
CS_l1b_mds['Waveform_1Hz']['TD'] = np.zeros((n_records),dtype=np.int64)
# 1 Hz Averaged Power Echo Waveform
CS_l1b_mds['Waveform_1Hz']['Waveform'] = np.zeros((n_records,n_LRM_RW),dtype=np.uint16)
# Echo Scale Factor (to scale echo to watts)
CS_l1b_mds['Waveform_1Hz']['Linear_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
# Echo Scale Power (a power of 2)
CS_l1b_mds['Waveform_1Hz']['Power2_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
# Number of echoes averaged
CS_l1b_mds['Waveform_1Hz']['N_avg_echoes'] = np.zeros((n_records),dtype=np.uint16)
CS_l1b_mds['Waveform_1Hz']['Flags'] = np.zeros((n_records),dtype=np.uint16)
elif (self.MODE == 'SAR'):
# SAR Mode
# Data Record Time (MDSR Time Stamp)
CS_l1b_mds['Waveform_1Hz']['Day'] = np.zeros((n_records),dtype=np.int32)
CS_l1b_mds['Waveform_1Hz']['Second'] = np.zeros((n_records),dtype=np.uint32)
CS_l1b_mds['Waveform_1Hz']['Micsec'] = np.zeros((n_records),dtype=np.uint32)
# Lat: packed units (0.1 micro-degree, 1e-7 degrees)
CS_l1b_mds['Waveform_1Hz']['Lat'] = np.zeros((n_records),dtype=np.int32)
# Lon: packed units (0.1 micro-degree, 1e-7 degrees)
CS_l1b_mds['Waveform_1Hz']['Lon'] = np.zeros((n_records),dtype=np.int32)
# Alt: packed units (mm, 1e-3 m)
# Altitude of COG above reference ellipsoid (interpolated value)
CS_l1b_mds['Waveform_1Hz']['Alt'] = np.zeros((n_records),dtype=np.int32)
# Window Delay (two-way) corrected for instrument delays
CS_l1b_mds['Waveform_1Hz']['TD'] = np.zeros((n_records),dtype=np.int64)
# 1 Hz Averaged Power Echo Waveform
CS_l1b_mds['Waveform_1Hz']['Waveform'] = np.zeros((n_records,n_SAR_RW),dtype=np.uint16)
# Echo Scale Factor (to scale echo to watts)
CS_l1b_mds['Waveform_1Hz']['Linear_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
# Echo Scale Power (a power of 2)
CS_l1b_mds['Waveform_1Hz']['Power2_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
# Number of echoes averaged
CS_l1b_mds['Waveform_1Hz']['N_avg_echoes'] = np.zeros((n_records),dtype=np.uint16)
CS_l1b_mds['Waveform_1Hz']['Flags'] = np.zeros((n_records),dtype=np.uint16)
elif (self.MODE == 'SIN'):
# SARIN Mode
# Same as the LRM/SAR groups but the waveform array is 512 bins instead of
# 128 and the number of echoes averaged is different.
# Data Record Time (MDSR Time Stamp)
CS_l1b_mds['Waveform_1Hz']['Day'] = np.zeros((n_records),dtype=np.int32)
CS_l1b_mds['Waveform_1Hz']['Second'] = np.zeros((n_records),dtype=np.uint32)
CS_l1b_mds['Waveform_1Hz']['Micsec'] = np.zeros((n_records),dtype=np.uint32)
# Lat: packed units (0.1 micro-degree, 1e-7 degrees)
CS_l1b_mds['Waveform_1Hz']['Lat'] = np.zeros((n_records),dtype=np.int32)
# Lon: packed units (0.1 micro-degree, 1e-7 degrees)
CS_l1b_mds['Waveform_1Hz']['Lon'] = np.zeros((n_records),dtype=np.int32)
# Alt: packed units (mm, 1e-3 m)
# Altitude of COG above reference ellipsoid (interpolated value)
CS_l1b_mds['Waveform_1Hz']['Alt'] = np.zeros((n_records),dtype=np.int32)
# Window Delay (two-way) corrected for instrument delays
CS_l1b_mds['Waveform_1Hz']['TD'] = np.zeros((n_records),dtype=np.int64)
# 1 Hz Averaged Power Echo Waveform
CS_l1b_mds['Waveform_1Hz']['Waveform'] = np.zeros((n_records,n_SARIN_RW),dtype=np.uint16)
# Echo Scale Factor (to scale echo to watts)
CS_l1b_mds['Waveform_1Hz']['Linear_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
# Echo Scale Power (a power of 2)
CS_l1b_mds['Waveform_1Hz']['Power2_Wfm_Multiplier'] = np.zeros((n_records),dtype=np.int32)
# Number of echoes averaged
CS_l1b_mds['Waveform_1Hz']['N_avg_echoes'] = np.zeros((n_records),dtype=np.uint16)
CS_l1b_mds['Waveform_1Hz']['Flags'] = np.zeros((n_records),dtype=np.uint16)
# CryoSat-2 Waveforms Groups
# Beam Behavior Parameters
Beam_Behavior = {}
# Standard Deviation of Gaussian fit to range integrated stack power.
Beam_Behavior['SD'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
# Stack Center: Mean of Gaussian fit to range integrated stack power.
Beam_Behavior['Center'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
# Stack amplitude parameter scaled in dB/100.
Beam_Behavior['Amplitude'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
# 3rd moment: providing the degree of asymmetry of the range integrated
# stack power distribution.
Beam_Behavior['Skewness'] = np.zeros((n_records,n_blocks),dtype=np.int16)
# 4th moment: Measure of peakiness of range integrated stack power distribution.
Beam_Behavior['Kurtosis'] = np.zeros((n_records,n_blocks),dtype=np.int16)
Beam_Behavior['Spare'] = np.zeros((n_records,n_blocks,n_BeamBehaviourParams-5),dtype=np.int16)
# CryoSat-2 mode specific waveforms
CS_l1b_mds['Waveform_20Hz'] = {}
if (self.MODE == 'LRM'):
# Low-Resolution Mode
# Averaged Power Echo Waveform [128]
CS_l1b_mds['Waveform_20Hz']['Waveform'] = np.zeros((n_records,n_blocks,n_LRM_RW),dtype=np.uint16)
# Echo Scale Factor (to scale echo to watts)
CS_l1b_mds['Waveform_20Hz']['Linear_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
# Echo Scale Power (a power of 2 to scale echo to Watts)
CS_l1b_mds['Waveform_20Hz']['Power2_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
# Number of echoes averaged
CS_l1b_mds['Waveform_20Hz']['N_avg_echoes'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
CS_l1b_mds['Waveform_20Hz']['Flags'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
elif (self.MODE == 'SAR'):
# SAR Mode
# Averaged Power Echo Waveform [128]
CS_l1b_mds['Waveform_20Hz']['Waveform'] = np.zeros((n_records,n_blocks,n_SAR_RW),dtype=np.uint16)
# Echo Scale Factor (to scale echo to watts)
CS_l1b_mds['Waveform_20Hz']['Linear_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
# Echo Scale Power (a power of 2 to scale echo to Watts)
CS_l1b_mds['Waveform_20Hz']['Power2_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
# Number of echoes averaged
CS_l1b_mds['Waveform_20Hz']['N_avg_echoes'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
CS_l1b_mds['Waveform_20Hz']['Flags'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
# Beam behaviour parameters
CS_l1b_mds['Waveform_20Hz']['Beam'] = Beam_Behavior
elif (self.MODE == 'SIN'):
# SARIN Mode
# Averaged Power Echo Waveform [512]
CS_l1b_mds['Waveform_20Hz']['Waveform'] = np.zeros((n_records,n_blocks,n_SARIN_RW),dtype=np.uint16)
# Echo Scale Factor (to scale echo to watts)
CS_l1b_mds['Waveform_20Hz']['Linear_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
# Echo Scale Power (a power of 2 to scale echo to Watts)
CS_l1b_mds['Waveform_20Hz']['Power2_Wfm_Multiplier'] = np.zeros((n_records,n_blocks),dtype=np.int32)
# Number of echoes averaged
CS_l1b_mds['Waveform_20Hz']['N_avg_echoes'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
CS_l1b_mds['Waveform_20Hz']['Flags'] = np.zeros((n_records,n_blocks),dtype=np.uint16)
# Beam behaviour parameters
CS_l1b_mds['Waveform_20Hz']['Beam'] = Beam_Behavior
# Coherence [512]: packed units (1/1000)
CS_l1b_mds['Waveform_20Hz']['Coherence'] = np.zeros((n_records,n_blocks,n_SARIN_RW),dtype=np.int16)
# Phase Difference [512]: packed units (microradians)
CS_l1b_mds['Waveform_20Hz']['Phase_diff'] = np.zeros((n_records,n_blocks,n_SARIN_RW),dtype=np.int32)
# for each record in the CryoSat file
for r in range(n_records):
# CryoSat-2 Time and Orbit Group
for b in range(n_blocks):
CS_l1b_mds['Location']['Day'].data[r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Location']['Second'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Location']['Micsec'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Location']['USO_Corr'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Location']['Mode_ID'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
CS_l1b_mds['Location']['SSC'][r,b] = np.fromfile(fid,dtype='>u2',count=1)
CS_l1b_mds['Location']['Inst_config'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Location']['Rec_Count'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Location']['Lat'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Location']['Lon'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Location']['Alt'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Location']['Alt_rate'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Location']['Sat_velocity'][r,b,:] = np.fromfile(fid,dtype='>i4',count=3)
CS_l1b_mds['Location']['Real_beam'][r,b,:] = np.fromfile(fid,dtype='>i4',count=3)
CS_l1b_mds['Location']['Baseline'][r,b,:] = np.fromfile(fid,dtype='>i4',count=3)
CS_l1b_mds['Location']['MCD'][r,b] = np.fromfile(fid,dtype='>u4',count=1)
# CryoSat-2 Measurement Group
# Derived from instrument measurement parameters
for b in range(n_blocks):
CS_l1b_mds['Data']['TD'][r,b] = np.fromfile(fid,dtype='>i8',count=1)
CS_l1b_mds['Data']['H_0'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['COR2'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['LAI'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['FAI'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['AGC_CH1'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['AGC_CH2'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['TR_gain_CH1'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['TR_gain_CH2'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['TX_Power'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['Doppler_range'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['TR_inst_range'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['R_inst_range'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['TR_inst_gain'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['R_inst_gain'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['Internal_phase'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['External_phase'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['Noise_power'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['Phase_slope'][r,b] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Data']['Spares1'][r,b,:] = np.fromfile(fid,dtype='>i1',count=4)
# CryoSat-2 External Corrections Group
CS_l1b_mds['Geometry']['dryTrop'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Geometry']['wetTrop'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Geometry']['InvBar'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Geometry']['DAC'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Geometry']['Iono_GIM'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Geometry']['Iono_model'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Geometry']['ocTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Geometry']['lpeTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Geometry']['olTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Geometry']['seTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Geometry']['gpTideElv'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Geometry']['Surf_type'][r] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Geometry']['Spare1'][r,:] = np.fromfile(fid,dtype='>i1',count=4)
CS_l1b_mds['Geometry']['Corr_status'][r] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Geometry']['Corr_error'][r] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Geometry']['Spare2'][r,:] = np.fromfile(fid,dtype='>i1',count=4)
# CryoSat-2 Average Waveforms Groups
if (self.MODE == 'LRM'):
# Low-Resolution Mode
CS_l1b_mds['Waveform_1Hz']['Day'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['Second'][r] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Waveform_1Hz']['Micsec'][r] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Waveform_1Hz']['Lat'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['Lon'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['Alt'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['TD'][r] = np.fromfile(fid,dtype='>i8',count=1)
CS_l1b_mds['Waveform_1Hz']['Waveform'][r,:] = np.fromfile(fid,dtype='>u2',count=n_LRM_RW)
CS_l1b_mds['Waveform_1Hz']['Linear_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['Power2_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['N_avg_echoes'][r] = np.fromfile(fid,dtype='>u2',count=1)
CS_l1b_mds['Waveform_1Hz']['Flags'][r] = np.fromfile(fid,dtype='>u2',count=1)
elif (self.MODE == 'SAR'):
# SAR Mode
CS_l1b_mds['Waveform_1Hz']['Day'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['Second'][r] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Waveform_1Hz']['Micsec'][r] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Waveform_1Hz']['Lat'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['Lon'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['Alt'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['TD'][r] = np.fromfile(fid,dtype='>i8',count=1)
CS_l1b_mds['Waveform_1Hz']['Waveform'][r,:] = np.fromfile(fid,dtype='>u2',count=n_SAR_RW)
CS_l1b_mds['Waveform_1Hz']['Linear_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['Power2_Wfm_Multiplier'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['N_avg_echoes'][r] = np.fromfile(fid,dtype='>u2',count=1)
CS_l1b_mds['Waveform_1Hz']['Flags'][r] = np.fromfile(fid,dtype='>u2',count=1)
elif (self.MODE == 'SIN'):
# SARIN Mode
CS_l1b_mds['Waveform_1Hz']['Day'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['Second'][r] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Waveform_1Hz']['Micsec'][r] = np.fromfile(fid,dtype='>u4',count=1)
CS_l1b_mds['Waveform_1Hz']['Lat'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['Lon'][r] = np.fromfile(fid,dtype='>i4',count=1)
CS_l1b_mds['Waveform_1Hz']['Alt'][r] = | np.fromfile(fid,dtype='>i4',count=1) | numpy.fromfile |
"""Module for remapping complex data for display."""
from inspect import getmembers, isfunction
import sys
import numpy as np
from scipy.stats import scoreatpercentile as prctile
__classification__ = "UNCLASSIFIED"
__author__ = "<NAME>"
def get_remap_list():
"""
Create list of remap functions accessible from this module.
Returns
-------
List[Tuple[str, callable], ...]
List of tuples of the form `(<function name>, <function>)`.
"""
# We specifically list these as the only funtions in is this module that are
# not remaps. If we later add other utility functions to this module, we
# will have to manually add them to this list as well. However, we don't
# have to do anything if we are just adding more remap functions.
names_nonremap_funs = ['get_remap_list', 'amplitude_to_density', '_clip_cast']
# Get all functions from this module
all_funs = getmembers(sys.modules[__name__], isfunction)
# all_funs is list of (function name, function object) tuples. fun[0] is name.
just_remap_funs = [fun for fun in all_funs if fun[0] not in names_nonremap_funs]
return just_remap_funs
def amplitude_to_density(a, dmin=30, mmult=40, data_mean=None):
"""
Convert to density data for remap.
Parameters
----------
a : numpy.ndarray
dmin : float|int
mmult : float|int
data_mean : None|float|int
Returns
-------
numpy.ndarray
"""
EPS = 1e-5
if (a==0).all():
return np.zeros(a.shape)
else:
a = abs(a)
if not data_mean:
data_mean = np.mean(a[np.isfinite(a)])
cl = 0.8 * data_mean
ch = mmult * cl
m = (255 - dmin)/np.log10(ch/cl)
b = dmin - (m * np.log10(cl))
return (m * np.log10(np.maximum(a, EPS))) + b
# Does Python not have a builtin way to do this fundamental operation???
def _clip_cast(x, dtype='uint8'):
"""
Cast by clipping values outside of valid range, rather than wrapping.
Parameters
----------
x : numpy.ndarray
dtype : str|numpy.dtype
Returns
-------
numpy.ndarray
"""
np_type = np.dtype(dtype)
return np.clip(x, np.iinfo(np_type).min, np.iinfo(np_type).max).astype(dtype)
def density(x):
"""
Standard set of parameters for density remap.
Parameters
----------
x : numpy.ndarray
Returns
-------
numpy.ndarray
"""
return _clip_cast(amplitude_to_density(x))
def brighter(x):
"""
Brighter set of parameters for density remap.
Parameters
----------
x : numpy.ndarray
Returns
-------
numpy.ndarray
"""
return _clip_cast(amplitude_to_density(x, 60, 40))
def darker(x):
"""
Darker set of parameters for density remap.
Parameters
----------
x : numpy.ndarray
Returns
-------
numpy.ndarray
"""
return _clip_cast(amplitude_to_density(x, 0, 40))
def highcontrast(x):
"""
Increased contrast set of parameters for density remap.
Parameters
----------
x : numpy.ndarray
Returns
-------
numpy.ndarray
"""
return _clip_cast(amplitude_to_density(x, 30, 4))
def linear(x):
"""
Linear remap - just the magnitude.
Parameters
----------
x : numpy.ndarray
Returns
-------
numpy.ndarray
"""
if np.iscomplexobj(x):
return np.abs(x)
else:
return x
def log(x):
"""
Logarithmic remap.
Parameters
----------
x : numpy.ndarray
Returns
-------
numpy.ndarray
"""
out = np.log(np.abs(x))
out[np.logical_not(np.isfinite(out))] = np.min(out[ | np.isfinite(out) | numpy.isfinite |
# This module has been generated automatically from space group information
# obtained from the Computational Crystallography Toolbox
#
"""
Space groups
This module contains a list of all the 230 space groups that can occur in
a crystal. The variable space_groups contains a dictionary that maps
space group numbers and space group names to the corresponding space
group objects.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The Mosaic Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import numpy as N
class SpaceGroup(object):
"""
Space group
All possible space group objects are created in this module. Other
modules should access these objects through the dictionary
space_groups rather than create their own space group objects.
"""
def __init__(self, number, symbol, transformations):
"""
:param number: the number assigned to the space group by
international convention
:type number: int
:param symbol: the Hermann-Mauguin space-group symbol as used
in PDB and mmCIF files
:type symbol: str
:param transformations: a list of space group transformations,
each consisting of a tuple of three
integer arrays (rot, tn, td), where
rot is the rotation matrix and tn/td
are the numerator and denominator of the
translation vector. The transformations
are defined in fractional coordinates.
:type transformations: list
"""
self.number = number
self.symbol = symbol
self.transformations = transformations
self.transposed_rotations = N.array([N.transpose(t[0])
for t in transformations])
self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2]
for t in transformations]))
def __repr__(self):
return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol))
def __len__(self):
"""
:return: the number of space group transformations
:rtype: int
"""
return len(self.transformations)
def symmetryEquivalentMillerIndices(self, hkl):
"""
:param hkl: a set of Miller indices
:type hkl: Scientific.N.array_type
:return: a tuple (miller_indices, phase_factor) of two arrays
of length equal to the number of space group
transformations. miller_indices contains the Miller
indices of each reflection equivalent by symmetry to the
reflection hkl (including hkl itself as the first element).
phase_factor contains the phase factors that must be applied
to the structure factor of reflection hkl to obtain the
structure factor of the symmetry equivalent reflection.
:rtype: tuple
"""
hkls = N.dot(self.transposed_rotations, hkl)
p = N.multiply.reduce(self.phase_factors**hkl, -1)
return hkls, p
space_groups = {}
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(1, 'P 1', transformations)
space_groups[1] = sg
space_groups['P 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(2, 'P -1', transformations)
space_groups[2] = sg
space_groups['P -1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(3, 'P 1 2 1', transformations)
space_groups[3] = sg
space_groups['P 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(4, 'P 1 21 1', transformations)
space_groups[4] = sg
space_groups['P 1 21 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(5, 'C 1 2 1', transformations)
space_groups[5] = sg
space_groups['C 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(6, 'P 1 m 1', transformations)
space_groups[6] = sg
space_groups['P 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(7, 'P 1 c 1', transformations)
space_groups[7] = sg
space_groups['P 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(8, 'C 1 m 1', transformations)
space_groups[8] = sg
space_groups['C 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(9, 'C 1 c 1', transformations)
space_groups[9] = sg
space_groups['C 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(10, 'P 1 2/m 1', transformations)
space_groups[10] = sg
space_groups['P 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(11, 'P 1 21/m 1', transformations)
space_groups[11] = sg
space_groups['P 1 21/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(12, 'C 1 2/m 1', transformations)
space_groups[12] = sg
space_groups['C 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(13, 'P 1 2/c 1', transformations)
space_groups[13] = sg
space_groups['P 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(14, 'P 1 21/c 1', transformations)
space_groups[14] = sg
space_groups['P 1 21/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(15, 'C 1 2/c 1', transformations)
space_groups[15] = sg
space_groups['C 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(16, 'P 2 2 2', transformations)
space_groups[16] = sg
space_groups['P 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(17, 'P 2 2 21', transformations)
space_groups[17] = sg
space_groups['P 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(18, 'P 21 21 2', transformations)
space_groups[18] = sg
space_groups['P 21 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(19, 'P 21 21 21', transformations)
space_groups[19] = sg
space_groups['P 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(20, 'C 2 2 21', transformations)
space_groups[20] = sg
space_groups['C 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(21, 'C 2 2 2', transformations)
space_groups[21] = sg
space_groups['C 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(22, 'F 2 2 2', transformations)
space_groups[22] = sg
space_groups['F 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(23, 'I 2 2 2', transformations)
space_groups[23] = sg
space_groups['I 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(24, 'I 21 21 21', transformations)
space_groups[24] = sg
space_groups['I 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(25, 'P m m 2', transformations)
space_groups[25] = sg
space_groups['P m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(26, 'P m c 21', transformations)
space_groups[26] = sg
space_groups['P m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(27, 'P c c 2', transformations)
space_groups[27] = sg
space_groups['P c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(28, 'P m a 2', transformations)
space_groups[28] = sg
space_groups['P m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(29, 'P c a 21', transformations)
space_groups[29] = sg
space_groups['P c a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(30, 'P n c 2', transformations)
space_groups[30] = sg
space_groups['P n c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(31, 'P m n 21', transformations)
space_groups[31] = sg
space_groups['P m n 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(32, 'P b a 2', transformations)
space_groups[32] = sg
space_groups['P b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(33, 'P n a 21', transformations)
space_groups[33] = sg
space_groups['P n a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(34, 'P n n 2', transformations)
space_groups[34] = sg
space_groups['P n n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(35, 'C m m 2', transformations)
space_groups[35] = sg
space_groups['C m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(36, 'C m c 21', transformations)
space_groups[36] = sg
space_groups['C m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(37, 'C c c 2', transformations)
space_groups[37] = sg
space_groups['C c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(38, 'A m m 2', transformations)
space_groups[38] = sg
space_groups['A m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(39, 'A b m 2', transformations)
space_groups[39] = sg
space_groups['A b m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(40, 'A m a 2', transformations)
space_groups[40] = sg
space_groups['A m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(41, 'A b a 2', transformations)
space_groups[41] = sg
space_groups['A b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(42, 'F m m 2', transformations)
space_groups[42] = sg
space_groups['F m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(43, 'F d d 2', transformations)
space_groups[43] = sg
space_groups['F d d 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(44, 'I m m 2', transformations)
space_groups[44] = sg
space_groups['I m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(45, 'I b a 2', transformations)
space_groups[45] = sg
space_groups['I b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(46, 'I m a 2', transformations)
space_groups[46] = sg
space_groups['I m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = | N.array([0,0,0]) | numpy.array |
# ===============================================================================
# Copyright 2012 <NAME>
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ===============================================================================
# ============= enthought library imports =======================
import logging
from numpy import asarray, column_stack, sqrt, dot, linalg, zeros_like, hstack, ones_like, array
from statsmodels.api import OLS
from traits.api import Int, Property
# ============= local library imports ==========================
from pychron.core.helpers.fits import FITS
from pychron.core.regression.base_regressor import BaseRegressor
from pychron.pychron_constants import MSEM, SEM
logger = logging.getLogger('Regressor')
class OLSRegressor(BaseRegressor):
degree = Property(depends_on='_degree')
_degree = Int
constant = None
_ols = None
def set_degree(self, d, refresh=True):
if isinstance(d, str):
d = d.lower()
fits = ['linear', 'parabolic', 'cubic']
if d in fits:
d = fits.index(d) + 1
else:
d = None
if d is None:
d = 1
self._degree = d
if refresh:
self.dirty = True
def get_exog(self, x):
return self._get_X(x)
def fast_predict(self, endog, pexog, exog=None):
ols = self._ols
ols.wendog = ols.whiten(endog)
if exog is not None:
ols.wexog = ols.whiten(exog)
# force recalculation
del ols.pinv_wexog
result = ols.fit()
return result.predict(pexog)
def fast_predict2(self, endog, exog):
"""
this function is less flexible than fast_predict but is 2x faster. it doesn't use RegressionResults class
simple does the lin algebra to predict values.
currently useful for monte_carlo_estimation
"""
if not hasattr(self, 'pinv_wexog'):
self.pinv_wexog = linalg.pinv(self._ols.wexog)
beta = dot(self.pinv_wexog, endog)
return dot(exog, beta)
def calculate(self, filtering=False):
cxs = self.clean_xs
cys = self.clean_ys
integrity_check = True
if not self._check_integrity(cxs, cys):
if len(cxs) == 1 and len(cys) == 1:
cxs = hstack((cxs, cxs[0]))
cys = | hstack((cys, cys[0])) | numpy.hstack |
# base.py
# Author: <NAME> <<EMAIL>>
"""
This file contains code that implements the core of the submodular selection
algorithms.
"""
import numpy
from tqdm import tqdm
from ..optimizers import BaseOptimizer
from ..optimizers import NaiveGreedy
from ..optimizers import LazyGreedy
from ..optimizers import ApproximateLazyGreedy
from ..optimizers import TwoStageGreedy
from ..optimizers import StochasticGreedy
from ..optimizers import BidirectionalGreedy
from ..optimizers import GreeDi
from ..optimizers import SieveGreedy
from ..optimizers import OPTIMIZERS
from ..utils import PriorityQueue
from ..utils import check_random_state
from ..utils import _calculate_pairwise_distances
from scipy.sparse import csr_matrix
class BaseSelection(object):
"""The base selection object.
This object defines the structures that all submodular selection algorithms
should follow. All algorithms will have the same public methods and the
same attributes.
Parameters
----------
n_samples : int
The number of samples to return.
initial_subset : list, numpy.ndarray or None, optional
If provided, this should be a list of indices into the data matrix
to use as the initial subset, or a group of examples that may not be
in the provided data should beused as the initial subset. If indices,
the provided array should be one-dimensional. If a group of examples,
the data should be 2 dimensional.
optimizer : string or optimizers.BaseOptimizer, optional
The optimization approach to use for the selection. Default is
'two-stage', which makes selections using the naive greedy algorithm
initially and then switches to the lazy greedy algorithm. Must be
one of
'naive' : the naive greedy algorithm
'lazy' : the lazy (or accelerated) greedy algorithm
'approximate-lazy' : the approximate lazy greedy algorithm
'two-stage' : starts with naive and switches to lazy
'stochastic' : the stochastic greedy algorithm
'greedi' : the GreeDi distributed algorithm
'bidirectional' : the bidirectional greedy algorithm
Default is 'naive'.
optimizer_kwds : dict or None
A dictionary of arguments to pass into the optimizer object. The keys
of this dictionary should be the names of the parameters in the optimizer
and the values in the dictionary should be the values that these
parameters take. Default is None.
reservoir : numpy.ndarray or None
The reservoir to use when calculating gains in the sieve greedy
streaming optimization algorithm in the `partial_fit` method.
Currently only used for graph-based functions. If a numpy array
is passed in, it will be used as the reservoir. If None is passed in,
will use reservoir sampling to collect a reservoir. Default is None.
max_reservoir_size : int
The maximum size that the reservoir can take. If a reservoir is passed
in, this value is set to the size of that array. Default is 1000.
n_jobs : int
The number of threads to use when performing computation in parallel.
Currently, this parameter is exposed but does not actually do anything.
This will be fixed soon.
random_state : int or RandomState or None, optional
The random seed to use for the random selection process. Only used
for stochastic greedy.
verbose : bool
Whether to print output during the selection process.
Attributes
----------
n_samples : int
The number of samples to select.
ranking : numpy.array int
The selected samples in the order of their gain with the first number in
the ranking corresponding to the index of the first sample that was
selected by the greedy procedure.
gains : numpy.array float
The gain of each sample in the returned set when it was added to the
growing subset. The first number corresponds to the gain of the first
added sample, the second corresponds to the gain of the second added
sample, and so forth.
"""
def __init__(self, n_samples, initial_subset=None, optimizer='lazy',
optimizer_kwds={}, reservoir=None, max_reservoir_size=1000,
n_jobs=1, random_state=None, verbose=False):
if n_samples <= 0:
raise ValueError("n_samples must be a positive value.")
if not isinstance(initial_subset, (list, numpy.ndarray)) and initial_subset is not None:
raise ValueError("initial_subset must be a list, numpy array, or None")
if isinstance(initial_subset, (list, numpy.ndarray)):
initial_subset = numpy.array(initial_subset)
if not isinstance(optimizer, BaseOptimizer):
if optimizer not in OPTIMIZERS.keys():
raise ValueError("Optimizer must be an optimizer object or " \
"a str in {}.".format(str(OPTIMIZERS.keys())))
if isinstance(optimizer, BaseOptimizer):
optimizer.function = self
if verbose not in (True, False):
raise ValueError("verbosity must be True or False")
self.n_samples = n_samples
self.metric = 'ignore'
self.random_state = check_random_state(random_state)
self.optimizer = optimizer
self.optimizer_kwds = optimizer_kwds
self.n_jobs = n_jobs
self.verbose = verbose
self.initial_subset = initial_subset
self.ranking = None
self.idxs = None
self.gains = None
self.subset = None
self.sparse = None
self._X = None
self.sieve_current_values_ = None
self.n_seen_ = 0
self.reservoir_size = 0 if reservoir is None else reservoir.shape[0]
self.reservoir = reservoir
self.max_reservoir_size = max_reservoir_size if reservoir is None else reservoir.shape[0]
self.update_reservoir_ = reservoir is None
def fit(self, X, y=None, sample_weight=None, sample_cost=None):
"""Run submodular optimization to select a subset of examples.
This method is a wrapper for the full submodular optimization process.
It takes in some data set (and optionally labels that are ignored
during this process) and selects `n_samples` from it in the greedy
manner specified by the optimizer.
This method will return the selector object itself, not the transformed
data set. The `transform` method will then transform a data set to the
selected points, or alternatively one can use the ranking stored in
the `self.ranking` attribute. The `fit_transform` method will perform
both optimization and selection and return the selected items.
Parameters
----------
X : list or numpy.ndarray, shape=(n, d)
The data set to transform. Must be numeric.
y : list or numpy.ndarray or None, shape=(n,), optional
The labels to transform. If passed in this function will return
both the data and th corresponding labels for the rows that have
been selected.
sample_weight : list or numpy.ndarray or None, shape=(n,), optional
The weight of each example. Currently ignored in apricot but
included to maintain compatibility with sklearn pipelines.
sample_cost : list or numpy.ndarray or None, shape=(n,), optional
The cost of each item. If set, indicates that optimization should
be performed with respect to a knapsack constraint.
Returns
-------
self : BaseGraphSelection
The fit step returns this selector object.
"""
allowed_dtypes = list, numpy.ndarray, csr_matrix
if not isinstance(X, allowed_dtypes):
raise ValueError("X must be either a list of lists, a 2D numpy " \
"array, or a scipy.sparse.csr_matrix.")
if isinstance(X, numpy.ndarray) and len(X.shape) != 2:
raise ValueError("X must have exactly two dimensions.")
if numpy.min(X) < 0.0 and numpy.max(X) > 0.:
raise ValueError("X cannot contain negative values or must be entirely "\
"negative values.")
if self.n_samples > X.shape[0]:
raise ValueError("Cannot select more examples than the number in" \
" the data set.")
if not self.sparse:
if X.dtype != 'float64':
X = X.astype('float64')
if isinstance(self.optimizer, str):
optimizer = OPTIMIZERS[self.optimizer](function=self,
verbose=self.verbose, random_state=self.random_state,
**self.optimizer_kwds)
else:
optimizer = self.optimizer
self._X = X if self._X is None else self._X
self._initialize(X)
if self.verbose:
self.pbar = tqdm(total=self.n_samples, unit_scale=True)
optimizer.select(X, self.n_samples, sample_cost=sample_cost)
if self.verbose == True:
self.pbar.close()
self.ranking = numpy.array(self.ranking)
self.gains = numpy.array(self.gains)
return self
def partial_fit(self, X, y=None, sample_weight=None, sample_cost=None):
allowed_dtypes = list, numpy.ndarray, csr_matrix
if not isinstance(X, allowed_dtypes):
raise ValueError("X must be either a list of lists, a 2D numpy " \
"array, or a scipy.sparse.csr_matrix.")
if isinstance(X, numpy.ndarray) and len(X.shape) != 2:
raise ValueError("X must have exactly two dimensions.")
if not self.sparse:
if X.dtype != 'float64':
X = X.astype('float64')
if not isinstance(self.optimizer, SieveGreedy):
self.optimizer = OPTIMIZERS['sieve'](function=self,
verbose=self.verbose, random_state=self.random_state,
**self.optimizer_kwds)
self._X = X if self._X is None else self._X
self._initialize(X)
if self.verbose:
self.pbar = tqdm(total=self.n_samples, unit_scale=True)
self.optimizer.select(X, self.n_samples, sample_cost=sample_cost)
if self.verbose == True:
self.pbar.close()
self.ranking = numpy.array(self.ranking)
self.gains = numpy.array(self.gains)
self._X = None
return self
def transform(self, X, y=None, sample_weight=None):
"""Transform a data set to include only the selected examples.
This method will return a selection of X and optionally selections
of y and sample_weight. The default setting is to select items based
on the ranking determined in the `fit` step with examples in the same
order as that ranking. Optionally, the whole data set can be returned,
with the weights corresponding to samples that were not selected set
to 0. This setting can be controlled by setting `pipeline=True`.
Parameters
----------
X : list or numpy.ndarray, shape=(n, d)
The data set to transform. Must be numeric.
y : list or numpy.ndarray or None, shape=(n,), optional
The labels to transform. If passed in this function will return
both the data and the corresponding labels for the rows that have
been selected. Default is None.
sample_weight : list or numpy.ndarray or None, shape=(n,), optional
The sample weights to transform. If passed in this function will
return the selected labels (y) and the selected samples, even
if no labels were passed in. Default is None.
Returns
-------
X_subset : numpy.ndarray, shape=(n_samples, d)
A subset of the data such that n_samples < n and n_samples is the
integer provided at initialization.
y_subset : numpy.ndarray, shape=(n_samples,), optional
The labels that match with the indices of the samples if y is
passed in. Only returned if passed in.
sample_weight_subset : numpy.ndarray, shape=(n_samples,), optional
The weight of each example.
"""
r = self.ranking
if sample_weight is not None:
if y is None:
return X[r], None, sample_weight[r]
else:
return X[r], y[r], sample_weight[r]
else:
if y is None:
return X[r]
else:
return X[r], y[r]
def fit_transform(self, X, y=None, sample_weight=None, sample_cost=None):
"""Run optimization and select a subset of examples.
This method will first perform the `fit` step and then perform the
`transform` step, returning a transformed data set.
Parameters
----------
X : list or numpy.ndarray, shape=(n, d)
The data set to transform. Must be numeric.
y : list or numpy.ndarray or None, shape=(n,), optional
The labels to transform. If passed in this function will return
both the data and the corresponding labels for the rows that have
been selected. Default is None.
sample_weight : list or numpy.ndarray or None, shape=(n,), optional
The sample weights to transform. If passed in this function will
return the selected labels (y) and the selected samples, even
if no labels were passed in. Default is None.
sample_cost : list or numpy.ndarray or None, shape=(n,), optional
The cost of each item. If set, indicates that optimization should
be performed with respect to a knapsack constraint.
Returns
-------
X_subset : numpy.ndarray, shape=(n_samples, d)
A subset of the data such that n_samples < n and n_samples is the
integer provided at initialization.
y_subset : numpy.ndarray, shape=(n_samples,), optional
The labels that match with the indices of the samples if y is
passed in. Only returned if passed in.
sample_weight_subset : numpy.ndarray, shape=(n_samples,), optional
The weight of each example.
"""
return self.fit(X, y=y, sample_weight=sample_weight,
sample_cost=sample_cost).transform(X, y=y,
sample_weight=sample_weight)
def _initialize(self, X, idxs=None):
n, d = X.shape
self._X = X if self._X is None else self._X
self.sparse = isinstance(X, csr_matrix)
self.ranking = []
self.gains = []
self.subset = numpy.zeros((0, self._X.shape[1]), dtype='float64')
self.current_values = numpy.zeros(d, dtype='float64')
self.current_concave_values = numpy.zeros(d, dtype='float64')
self.mask = numpy.zeros(n, dtype='int8')
if self.initial_subset is not None:
if self.initial_subset.ndim == 1:
if self.initial_subset.dtype == bool:
self.initial_subset = numpy.where(self.initial_subset == 1)[0]
if len(self.initial_subset) + self.n_samples > X.shape[0]:
raise ValueError("When using a mask for the initial subset" \
" must selected fewer than the size of the subset minus" \
" the initial subset size, i.e., n_samples < X.shape[0] -"\
" initial_subset.shape[0].")
if self.initial_subset.max() > X.shape[0]:
raise ValueError("When passing in an integer mask for the initial subset"\
" the maximum value cannot exceed the size of the data set.")
elif self.initial_subset.min() < 0:
raise ValueError("When passing in an integer mask for the initial subset"\
" the minimum value cannot be negative.")
self.mask[self.initial_subset] = 1
self.idxs = numpy.where(self.mask == 0)[0]
def _calculate_gains(self, X, idxs=None):
raise NotImplementedError
def _calculate_sieve_gains(self, X, thresholds, idxs):
n = X.shape[0]
d = X.shape[1] if self.reservoir is None else self.max_reservoir_size
l = len(thresholds)
if self.sieve_current_values_ is None:
self.sieve_current_values_ = numpy.zeros((l, d),
dtype='float64')
self.sieve_selections_ = numpy.zeros((l, self.n_samples),
dtype='int64') - 1
self.sieve_gains_ = numpy.zeros((l, self.n_samples),
dtype='float64') - 1
self.sieve_n_selected_ = numpy.zeros(l,
dtype='int64')
self.sieve_total_gains_ = numpy.zeros(l,
dtype='float64')
self.sieve_subsets_ = numpy.zeros((l, self.n_samples,
self._X.shape[1]), dtype='float64')
else:
j = l - self.sieve_current_values_.shape[0]
if j > 0:
self.sieve_current_values_ = numpy.vstack([
self.sieve_current_values_, numpy.zeros((j, d),
dtype='float64')])
self.sieve_selections_ = numpy.vstack([
self.sieve_selections_, numpy.zeros((j, self.n_samples),
dtype='int64') - 1])
self.sieve_gains_ = numpy.vstack([self.sieve_gains_,
numpy.zeros((j, self.n_samples), dtype='float64')])
self.sieve_n_selected_ = numpy.concatenate([
self.sieve_n_selected_, numpy.zeros(j, dtype='int64')])
self.sieve_total_gains_ = numpy.concatenate([
self.sieve_total_gains_, numpy.zeros(j, dtype='float64')])
self.sieve_subsets_ = numpy.concatenate([self.sieve_subsets_,
numpy.zeros((j, self.n_samples, self._X.shape[1]),
dtype='float64')])
def _select_next(self, X, gain, idx):
self.ranking.append(idx)
self.gains.append(gain)
self.mask[idx] = True
self.idxs = | numpy.where(self.mask == 0) | numpy.where |
# -*- coding: utf-8 -*-
#try:
# from Numeric import *
#except ImportError:
from numpy import *
import copy
import numpy
outerproduct = outer
PI2 = pi*2.0
# for debuging set a seed
#random.seed(42)
def make_vec(l):
return array(l, "d")
def scal_prod(v1, v2):
return sum(v1*v2,axis=-1)
def length(v):
return sqrt(sum(v*v),axis=-1)
def norm(v1):
return sqrt(scal_prod(v1,v1))
def normalize(v1):
n = norm(v1)
if isscalar(n):
if isclose(n,0):
return v1
else:
return v1/n
else:
return v1/n[:,newaxis]
def angle(v1, v2):
_nv1 = normalize(v1)
_nv2 = normalize(v2)
d = scal_prod(_nv1, _nv2)
if d < -1.0: d=-1.0
if d > 1.0 : d= 1.0
return arccos(d)
def project(v1, v2):
_nv2 = normalize(v2)
l = scal_prod(v1, _nv2)
return _nv2*l
def cross_prod(a, b):
return array( [a[1]*b[2] - a[2]*b[1], \
a[2]*b[0] - a[0]*b[2], \
a[0]*b[1] - a[1]*b[0]], "d")
def rotmat(v, theta):
Q = array([[0, -v[2], v[1]], [v[2], 0, -v[0]], [-v[1], v[0], 0]], "d")
Q *= sin(theta)
uut = outerproduct(v,v)
Q += (identity(3,"d") - uut)*cos(theta)
Q += uut
return Q
def rotate(xyz, v, theta):
return dot(xyz, transpose(rotmat(v, theta)))
def rotmat_from_euler(euler):
R = zeros([3,3],"d")
sa = sin(euler[0])
ca = cos(euler[0])
sb = sin(euler[1])
cb = cos(euler[1])
sg = sin(euler[2])
cg = cos(euler[2])
R[0, 0] = cb * cg
R[1, 0] = cb * sg
R[2, 0] = -sb
R[0, 1] = -ca * sg + sa * sb * cg
R[1, 1] = ca * cg + sa * sb * sg
R[2, 1] = sa * cb
R[0, 2] = sa * sg + ca * sb * cg
R[1, 2] = -sa * cg + ca * sb * sg
R[2, 2] = ca * cb
return R
def rotate_by_euler(xyz, euler):
return dot(xyz, transpose(rotmat_from_euler(euler)))
def random_quat():
rand = random.random(3)
r1 = sqrt(1.0 - rand[0])
r2 = sqrt(rand[0])
t1 = PI2 * rand[1]
t2 = PI2 * rand[2]
return array([cos(t2)*r2, sin(t1)*r1, cos(t1)*r1, sin(t2)*r2])
def rotation_quat(triple):
# with an input of three numbers between zero and one we scan the rotational space in an equal fashion
t0 = triple[0]
if t0>1.0:t0=1.0
if t0<0.0:t0=0.0
r1 = sqrt(1.0 - t0)
r2 = sqrt(t0)
t1 = PI2 * (triple[1]%1.0)
t2 = PI2 * (triple[2]%1.0)
return array([cos(t2)*r2, sin(t1)*r1, cos(t1)*r1, sin(t2)*r2])
def quat_to_mat(quat):
q = array(quat, copy=True)
n = dot(q, q)
if n < 1.0e-15:
return identity(3)
q *= sqrt(2.0 / n)
q = outer(q, q)
return array([
[1.0-q[2, 2]-q[3, 3], q[1, 2]-q[3, 0], q[1, 3]+q[2, 0]],
[ q[1, 2]+q[3, 0], 1.0-q[1, 1]-q[3, 3], q[2, 3]-q[1, 0]],
[ q[1, 3]-q[2, 0], q[2, 3]+q[1, 0], 1.0-q[1, 1]-q[2, 2]]])
def apply_mat(m,v):
return dot(v,m)
def rotate_by_triple(xyz, triple):
rotmat = quat_to_mat(rotation_quat(triple))
return dot(xyz, rotmat)
def rotate_random(v):
return apply_mat(quat_to_mat(random_quat()),v)
def moi2(rs, ms=None):
"""Moment of inertia"""
if ms is None: ms = numpy.ones(len(rs))
else: ms = numpy.asarray(ms)
rs = numpy.asarray(rs)
N = rs.shape[1]
# Matrix is symmetric, so inner/outer loop doesn't matter
return [[(ms*rs[:,i]*rs[:,j]).sum()/ms.sum()
for i in range(N)] for j in range(N)]
def moi(rs,ms=None):
if ms is None: ms = numpy.ones(len(rs))
else: ms = numpy.asarray(ms)
rs = numpy.asarray(rs)
Ixx = (ms* (rs[:,1]*rs[:,1] + rs[:,2]*rs[:,2])).sum()
Iyy = (ms* (rs[:,0]*rs[:,0] + rs[:,2]*rs[:,2])).sum()
Izz = (ms* (rs[:,0]*rs[:,0] + rs[:,1]*rs[:,1])).sum()
Ixy =-(ms* rs[:,0] * rs[:,1]).sum()
Ixz =-(ms* rs[:,0] * rs[:,2]).sum()
Iyz =-(ms* rs[:,1] * rs[:,2]).sum()
I = [[Ixx,Ixy,Ixy],[Ixy,Iyy,Iyz],[Ixz,Iyz,Izz]]
return | numpy.array(I) | numpy.array |
# Copyright 2019 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
r"""Unit tests for the Strawberry Fields utils.py module"""
import pytest
pytestmark = pytest.mark.frontend
import numpy as np
from numpy.polynomial.hermite import hermval as H
from scipy.special import factorial as fac
import strawberryfields as sf
from strawberryfields.program_utils import RegRef
from strawberryfields.ops import Sgate, BSgate, LossChannel, MeasureX, Squeezed
import strawberryfields.utils as utils
R_D = np.linspace(-2.00562, 1.0198, 4)
PHI_D = np.linspace(-1.64566, 1.3734, 4)
PHI = np.linspace(0, 1.43, 4)
R = np.linspace(0, 0.21, 4)
PHI = np.linspace(0, 1.43, 4)
# ===================================================================================
# Convert function tests
# ===================================================================================
@pytest.fixture
def rr():
"""RegRef fixture."""
return RegRef(0)
@pytest.fixture
def prog():
"""Program fixture."""
return sf.Program(2)
# ===================================================================================
# Initial states tests
# ===================================================================================
class TestInitialStates:
"""unit tests for the initial state function utilities"""
@pytest.mark.parametrize("r, phi", zip(R, PHI))
def test_squeezed_cov(self, hbar, r, phi, tol):
"""test squeezed covariance utility function returns correct covariance"""
cov = utils.squeezed_cov(r, phi, hbar=hbar)
expected = (hbar / 2) * np.array(
[
[
np.cosh(2 * r) - np.cos(phi) * np.sinh(2 * r),
-2 * np.cosh(r) * np.sin(phi) * np.sinh(r),
],
[
-2 * np.cosh(r) * np.sin(phi) * np.sinh(r),
np.cosh(2 * r) + np.cos(phi) * np.sinh(2 * r),
],
]
)
assert np.allclose(cov, expected, atol=tol, rtol=0)
def test_vacuum_state_gaussian(self, hbar):
"""test vacuum state returns correct means and covariance"""
means, cov = utils.vacuum_state(basis="gaussian", hbar=hbar)
assert np.all(means == np.zeros(2))
assert np.all(cov == np.identity(2) * hbar / 2)
def test_vacuum_state_fock(self, cutoff, hbar):
"""test vacuum state returns correct state vector"""
state = utils.vacuum_state(basis="fock", hbar=hbar, fock_dim=cutoff)
assert np.all(state == np.eye(1, cutoff, 0))
@pytest.mark.parametrize("r_d, phi_d", zip(R_D, PHI_D))
def test_coherent_state_gaussian(self, r_d, phi_d, hbar):
"""test coherent state returns correct means and covariance"""
means, cov = utils.coherent_state(r_d, phi_d, basis="gaussian", hbar=hbar)
alpha = r_d * np.exp(1j * phi_d)
means_expected = np.array([alpha.real, alpha.imag]) * np.sqrt(2 * hbar)
assert np.all(means == means_expected)
assert np.all(cov == np.identity(2) * hbar / 2)
@pytest.mark.parametrize("r_d, phi_d", zip(R_D, PHI_D))
def test_coherent_state_fock(self, r_d, phi_d, cutoff, hbar, tol):
"""test coherent state returns correct Fock basis state vector"""
state = utils.coherent_state(r_d, phi_d, basis="fock", fock_dim=cutoff, hbar=hbar)
n = np.arange(cutoff)
alpha = r_d * np.exp(1j * phi_d)
expected = np.exp(-0.5 * np.abs(alpha) ** 2) * alpha ** n / np.sqrt(fac(n))
assert np.allclose(state, expected, atol=tol, rtol=0)
@pytest.mark.parametrize("r, phi", zip(R, PHI))
def test_squeezed_state_gaussian(self, r, phi, hbar, tol):
"""test squeezed state returns correct means and covariance"""
means, cov = utils.squeezed_state(r, phi, basis="gaussian", hbar=hbar)
cov_expected = (hbar / 2) * np.array(
[
[
np.cosh(2 * r) - np.cos(phi) * np.sinh(2 * r),
-2 * np.cosh(r) * np.sin(phi) * np.sinh(r),
],
[
-2 * np.cosh(r) * np.sin(phi) * np.sinh(r),
np.cosh(2 * r) + np.cos(phi) * np.sinh(2 * r),
],
]
)
assert np.all(means == np.zeros([2]))
assert np.allclose(cov, cov_expected, atol=tol, rtol=0)
@pytest.mark.parametrize("r, phi", zip(R, PHI))
def test_squeezed_state_fock(self, r, phi, cutoff, hbar, tol):
"""test squeezed state returns correct Fock basis state vector"""
state = utils.squeezed_state(r, phi, basis="fock", fock_dim=cutoff, hbar=hbar)
n = np.arange(cutoff)
kets = (np.sqrt(fac(2 * (n // 2))) / (2 ** (n // 2) * fac(n // 2))) * (
-np.exp(1j * phi) * np.tanh(r)
) ** (n // 2)
expected = np.where(n % 2 == 0, np.sqrt(1 / np.cosh(r)) * kets, 0)
assert np.allclose(state, expected, atol=tol, rtol=0)
@pytest.mark.parametrize("r_d, phi_d, r_s, phi_s", zip(R_D, PHI_D, R, PHI))
def test_displaced_squeezed_state_gaussian(self, r_d, phi_d, r_s, phi_s, hbar, tol):
"""test displaced squeezed state returns correct means and covariance"""
means, cov = utils.displaced_squeezed_state(
r_d, phi_d, r_s, phi_s, basis="gaussian", hbar=hbar
)
a = r_d * np.exp(1j * phi_d)
means_expected = np.array([[a.real, a.imag]]) * np.sqrt(2 * hbar)
cov_expected = (hbar / 2) * np.array(
[
[
np.cosh(2 * r_s) - np.cos(phi_s) * np.sinh(2 * r_s),
-2 * np.cosh(r_s) * np.sin(phi_s) * np.sinh(r_s),
],
[
-2 * np.cosh(r_s) * np.sin(phi_s) * np.sinh(r_s),
np.cosh(2 * r_s) + np.cos(phi_s) * np.sinh(2 * r_s),
],
]
)
assert np.allclose(means, means_expected, atol=tol, rtol=0)
assert np.allclose(cov, cov_expected, atol=tol, rtol=0)
@pytest.mark.parametrize("r_d, phi_d, r_s, phi_s", zip(R_D, PHI_D, R, PHI))
def test_displaced_squeezed_state_fock(self, r_d, phi_d, r_s, phi_s, hbar, cutoff, tol):
"""test displaced squeezed state returns correct Fock basis state vector"""
state = utils.displaced_squeezed_state(
r_d, phi_d, r_s, phi_s, basis="fock", fock_dim=cutoff, hbar=hbar
)
a = r_d * np.exp(1j * phi_d)
if r_s == 0:
pytest.skip("test only non-zero squeezing")
n = np.arange(cutoff)
gamma = a * np.cosh(r_s) + np.conj(a) * np.exp(1j * phi_s) * np.sinh(r_s)
coeff = np.diag(
(0.5 * np.exp(1j * phi_s) * np.tanh(r_s)) ** (n / 2) / np.sqrt(fac(n) * np.cosh(r_s))
)
expected = H(gamma / np.sqrt(np.exp(1j * phi_s) * np.sinh(2 * r_s)), coeff)
expected *= np.exp(
-0.5 * np.abs(a) ** 2 - 0.5 * np.conj(a) ** 2 * np.exp(1j * phi_s) * np.tanh(r_s)
)
assert np.allclose(state, expected, atol=tol, rtol=0)
@pytest.mark.parametrize("r_d, phi_d, phi_s", zip(R_D, PHI_D, PHI))
def test_displaced_squeezed_fock_no_squeezing(self, r_d, phi_d, phi_s, hbar, cutoff, tol):
"""test displaced squeezed state returns coherent state when there is no squeezing"""
state = utils.displaced_squeezed_state(
r_d, phi_d, 0, phi_s, basis="fock", fock_dim=cutoff, hbar=hbar
)
a = r_d * np.exp(1j * phi_d)
n = np.arange(cutoff)
expected = np.exp(-0.5 * np.abs(a) ** 2) * a ** n / np.sqrt(fac(n))
assert np.allclose(state, expected, atol=tol, rtol=0)
@pytest.mark.parametrize("r, phi", zip(R, PHI))
def test_displaced_squeezed_fock_no_displacement(self, r, phi, hbar, cutoff, tol):
"""test displaced squeezed state returns squeezed state when there is no displacement"""
state = utils.displaced_squeezed_state(
0, 0, r, phi, basis="fock", fock_dim=cutoff, hbar=hbar
)
n = np.arange(cutoff)
kets = (np.sqrt(fac(2 * (n // 2))) / (2 ** (n // 2) * fac(n // 2))) * (
-np.exp(1j * phi) * np.tanh(r)
) ** (n // 2)
expected = np.where(n % 2 == 0, np.sqrt(1 / np.cosh(r)) * kets, 0)
assert np.allclose(state, expected, atol=tol, rtol=0)
def test_fock_state(self):
"""test correct fock state returned"""
n = 3
cutoff = 10
state = utils.fock_state(n, fock_dim=cutoff)
assert np.all(state == np.eye(1, cutoff, n))
@pytest.mark.parametrize("a, cutoff", [(0.212, 10), (4, 50)])
def test_even_cat_state(self, a, cutoff, tol):
"""test correct even cat state returned"""
p = 0
state = utils.cat_state(a, 0, p, fock_dim=cutoff)
# For the analytic expression, cast the integer parameter to float so
# that there's no overflow
a = float(a)
n = np.arange(cutoff)
expected = np.exp(-0.5 * np.abs(a) ** 2) * a ** n / np.sqrt(fac(n)) + np.exp(
-0.5 * np.abs(-a) ** 2
) * (-a) ** n / np.sqrt(fac(n))
expected /= np.linalg.norm(expected)
assert np.allclose(state, expected, atol=tol, rtol=0)
@pytest.mark.parametrize("a, cutoff", [(0.212, 10), (4, 50)])
def test_odd_cat_state(self, a, cutoff, tol):
"""test correct odd cat state returned"""
p = 1
state = utils.cat_state(a, 0, p, fock_dim=cutoff)
# For the analytic expression, cast the integer parameter to float so
# that there's no overflow
a = float(a)
n = np.arange(cutoff)
expected = np.exp(-0.5 * np.abs(a) ** 2) * a ** n / np.sqrt(fac(n)) - np.exp(
-0.5 * np.abs(-a) ** 2
) * (-a) ** n / np.sqrt(fac(n))
expected /= np.linalg.norm(expected)
assert np.allclose(state, expected, atol=tol, rtol=0)
# ===================================================================================
# Random matrix tests
# ===================================================================================
class TestRandomMatrices:
"""Unit tests for random matrices"""
@pytest.fixture
def modes(self):
"""Number of modes to use when creating matrices"""
return 3
@pytest.mark.parametrize("pure_state", [True, False])
@pytest.mark.parametrize("block_diag", [True, False])
def test_random_covariance_square(self, modes, hbar, pure_state, block_diag):
"""Test that a random covariance matrix is the right shape"""
V = utils.random_covariance(modes, hbar=hbar, pure=pure_state, block_diag=block_diag)
assert np.all(V.shape == np.array([2 * modes, 2 * modes]))
@pytest.mark.parametrize("pure_state", [True, False])
@pytest.mark.parametrize("block_diag", [True, False])
def test_random_covariance_symmetric(self, modes, hbar, pure_state, tol, block_diag):
"""Test that a random covariance matrix is symmetric"""
V = utils.random_covariance(modes, hbar=hbar, pure=pure_state, block_diag=block_diag)
assert np.allclose(V.T, V, atol=tol, rtol=0)
@pytest.mark.parametrize("pure_state", [True, False])
@pytest.mark.parametrize("block_diag", [True, False])
def test_random_covariance_valid(self, modes, hbar, pure_state, tol, block_diag):
"""Test that a random covariance matrix satisfies the uncertainty principle V+i hbar O/2 >=0"""
V = utils.random_covariance(modes, hbar=hbar, pure=pure_state, block_diag=block_diag)
idm = np.identity(modes)
omega = np.concatenate(
(np.concatenate((0 * idm, idm), axis=1), np.concatenate((-idm, 0 * idm), axis=1)),
axis=0,
)
eigs = np.linalg.eigvalsh(V + 1j * (hbar / 2) * omega)
eigs[np.abs(eigs) < tol] = 0
assert np.all(eigs >= 0)
@pytest.mark.parametrize("block_diag", [True, False])
def test_random_covariance_pure(self, modes, hbar, tol, block_diag):
"""Test that a pure random covariance matrix has correct purity"""
V = utils.random_covariance(modes, hbar=hbar, pure=True, block_diag=block_diag)
det = np.linalg.det(V) - (hbar / 2) ** (2 * modes)
assert np.allclose(det, 0, atol=tol, rtol=0)
@pytest.mark.parametrize("block_diag", [True, False])
def test_random_covariance_mixed(self, modes, hbar, tol, block_diag):
"""Test that a mixed random covariance matrix has correct purity"""
V = utils.random_covariance(modes, hbar=hbar, pure=False, block_diag=block_diag)
det = np.linalg.det(V) - (hbar / 2) ** (2 * modes)
assert not np.allclose(det, 0, atol=tol, rtol=0)
@pytest.mark.parametrize("passive", [True, False])
@pytest.mark.parametrize("block_diag", [True, False])
def test_random_symplectic_square(self, modes, hbar, passive, block_diag):
"""Test that a random symplectic matrix on is the right shape"""
S = utils.random_symplectic(modes, passive=passive, block_diag=block_diag)
assert np.all(S.shape == np.array([2 * modes, 2 * modes]))
@pytest.mark.parametrize("passive", [True, False])
@pytest.mark.parametrize("block_diag", [True, False])
def test_random_symplectic_symplectic(self, modes, hbar, passive, tol, block_diag):
"""Test that a random symplectic matrix is symplectic"""
S = utils.random_symplectic(modes, passive=passive, block_diag=block_diag)
idm = np.identity(modes)
omega = np.concatenate(
(np.concatenate((0 * idm, idm), axis=1), np.concatenate((-idm, 0 * idm), axis=1)),
axis=0,
)
assert np.allclose(S @ omega @ S.T, omega, atol=tol, rtol=0)
@pytest.mark.parametrize("block_diag", [True, False])
def test_random_symplectic_passive_orthogonal(self, modes, hbar, tol, block_diag):
"""Test that a passive random symplectic matrix is orthogonal"""
S = utils.random_symplectic(modes, passive=True, block_diag=block_diag)
assert np.allclose(S @ S.T, np.identity(2 * modes), atol=tol, rtol=0)
@pytest.mark.parametrize("block_diag", [True, False])
def test_random_symplectic_active_not_orthogonal(self, modes, hbar, tol, block_diag):
"""Test that an active random symplectic matrix is not orthogonal"""
S = utils.random_symplectic(modes, passive=False, block_diag=block_diag)
assert not np.allclose(S @ S.T, | np.identity(2 * modes) | numpy.identity |
# Copyright 2020 resspect software
# Author: <NAME>
#
# created on 23 March 2020
#
# Licensed GNU General Public License v3.0;
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# https://www.gnu.org/licenses/gpl-3.0.en.html
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import astropy as ap
import numpy as np
import pandas as pd
from astropy.cosmology import Planck15, Flatw0waCDM
from astropy.cosmology import w0waCDM
from astropy import constants as const
__all__ = ['assign_cosmo', 'fish_deriv_m', 'fisher_results',
'column_deriv_m', 'update_matrix', 'find_most_useful',
'compare_two_fishers']
def assign_cosmo(cosmo, model=[70, 0.3, 0.7, -0.9, 0.0]):
"""Define a new cosmology model.
Parameters
----------
cosmo: astropy.cosmology Cosmology Object
Assumes original cosmology was astropy.cosmology.w0waCDM.
model: list (optional)
Cosmology parameters: [H0, Om, Ode, w0, wa].
Default is [70, 0.3, 0.7, -0.9, 0.0].
Hard code Ob0 (Omega baryons) = 0.022
Returns
-------
newcosmo: astropy.cosmology Cosmology Object
New Cosmology Object with updated cosmology parameters
"""
ob0=0.022
om0=model[1]
ode0 =model[2]
newcosmo = cosmo.clone(name='temp cosmo', H0=model[0], Ob0=ob0,
Om0=om0, Ode0=ode0, w0=model[3], wa=model[4])
return newcosmo
def fish_deriv_m(redshift, model, step, screen=False):
"""Calculates the derivatives and the base function at given redshifts.
Parameters
----------
redshift: float or list
Redshift where derivatives will be calculated.
model: list
List of cosmological model parameters.
Order is [H0, Om, Ode, w0, wa].
step: list
List of steps the cosmological model parameter will take when determining
the derivative.
If a given entry is zero, that parameter will be kept constant.
Length must match the number of parameters in "model" variable.
screen: bool (optional)
Print debug options to screen. Default is False.
Returns
-------
m: list
List of theoretical distance modulus (mu) at a given redshift from
the base cosmology.
m_deriv: list [len(redshift), len(model)]
List of parameter derivatives of the likelihood function
at given redshifts.
"""
Ob0=0.022
Om0=model[1]
Ode0 =model[2]
cosmo = w0waCDM(model[0], Ob0, Om0, Ode0, model[3],model[4])
cosmo=assign_cosmo(cosmo, model)
m = []
m_deriv = []
c = const.c.to('km/s')
base_theory = cosmo.distmod(redshift)
m = base_theory.value
step_inds = np.where(step)[0] # look for non-zero step indices
deriv = np.zeros((len(base_theory), len(model)))
if (step_inds.size==0):
print('No steps taken, abort')
exit
else:
if screen:
print('\n')
print('Computing Fisher derivatives...')
for i, stepp in enumerate(step_inds):
if screen:
print('we are stepping in :', model[stepp],
' with step size', step[stepp])
cosmo = assign_cosmo(cosmo, model)
theory = np.zeros((len(base_theory),2))
for count,j in enumerate([-1,1]):
tempmodel = list(model)
tempmodel[stepp] = model[stepp] + j*step[stepp]
c = const.c.to('km/s')
cosmo = assign_cosmo(cosmo, tempmodel)
tmp = cosmo.distmod(redshift)
theory[:,count] = tmp
deriv[:,stepp] = (theory[:,1] - theory[:,0])/(2.*step[stepp])
m_deriv = deriv
return m, m_deriv
def fisher_results(redshift, mu_err):
"""Computes the Fisher Matrix. Assumes we only care about Om and w0.
TBD: make stepvec an input.
TBD: Priors as inputs?
Parameters
----------
redshift: list [float]
Redshift.
mu_err: list [float]
Error in distance modulus.
Returns
-------
sigma: list
Error/Standard deviation of Om and w0, respectively.
covmat: np.array [2, 2]
Covariance matrix of Om and w0. Om is first row, w0 is second.
"""
if any(np.array(redshift) < 0):
raise ValueError('Redshift must be greater than zero! Galaxies must be moving away.')
stepvec = np.array([0, 0.001, 0.00, 0.1, 0., 0.0, 0.0, 0.0])
model = [70., 0.3, 0.7, -1.0, 0.]
names = ['hubble', 'omega_m', 'omega_de', 'w0', 'wa']
step_inds = np.where(stepvec)[0]
fishermu, deriv = fish_deriv_m(redshift, model, stepvec)
cov = np.diag(mu_err**2)
inv_cov = np.diag(1./mu_err**2.)
# Initialising the Fisher Matrix
FM = np.zeros((len(step_inds), len(step_inds), len(mu_err) ))
# Compute the Fisher matrix
for i in range(len(step_inds)):
# loop over variables
for j in range(len(step_inds)):
# loop over variables
for k in range(len(redshift)):
# loop over redshifts
invcov = inv_cov[k,k]
FM[i,j,k] = np.dot(np.dot(deriv[k, step_inds[i]], invcov), deriv[k, step_inds[j]])
# sum over the redshift direction
fishmat = np.sum(FM,axis=2)
# Compute the prior matrix
prior_vec = np.array([0.1, 0.02, 0.0006, 0.2, 0.2])
priormat = np.diag(1./prior_vec[step_inds]**2.)
final_FM = fishmat + priormat
covmat = np.linalg.inv(final_FM)
sigma = np.sqrt(covmat.diagonal())
return sigma, covmat
def column_deriv_m(redshift, mu_err, model, step):
"""Calculate a column derivative of your model.
Define a matrix P such that P_ik = 1/sigma_i del(M(i, params)/del(param_k)
and M=model. This column matrix holds k constant.
Parameters
----------
redshift: float or list
Redshift.
mu_err: float or list
Error in distance modulus.
model: list
List of cosmological model parameters.
Order is [H0, om, ol, w0, wa].
step: list
List of steps the cosmological model paramter will take when determining
the derivative.
If a given entry is zero, that parameter will be kept constant.
Length must match the number of parameters in "model" variable.
Returns
-------
m: list
List of theoretical distance modulus (mu) at a given redshift from
the base cosmology.
m_deriv: list
List of derivatives for one parameter of the likelihood function
at given redshifts.
"""
Ob0=0.022
Om0=model[1]
Ode0 =model[2]
cosmo = w0waCDM(model[0], Ob0, Om0, Ode0, model[3],model[4])
cosmo=assign_cosmo(cosmo, model)
m = []
m_deriv = []
c = const.c.to('km/s')
base_theory = cosmo.distmod(redshift)
m = base_theory.value
step_inds = np.where(step)[0] # look for non-zero step indices
deriv = np.zeros(len(model))
if (step_inds.size==0):
print('No steps taken, abort')
exit
else:
for i, stepp in enumerate(step_inds):
cosmo = assign_cosmo(cosmo, model)
theory = np.zeros((1,2))
for count,j in enumerate([-1,1]):
tempmodel = list(model)
tempmodel[stepp] = model[stepp] + j*step[stepp]
c = const.c.to('km/s')
cosmo = assign_cosmo(cosmo, tempmodel)
tmp = cosmo.distmod(redshift)
theory[:,count] = tmp.value
deriv[stepp] = (theory[:,1] - theory[:,0])/(2.*step[stepp])
m_deriv = 1.0/mu_err * deriv
return m, m_deriv
def update_matrix(redshift, mu_err, covmat):
"""Update matrix calculated from Hees et al (2019).
https://ui.adsabs.harvard.edu/abs/2019ApJ...880...87H/abstract
How much the Fisher Matrix changed given a new set of observations.
TBD: make stepvec an input.
Parameters
----------
redshift: float or list
Redshift.
mu_err: float or list
Error in distance modulus.
covmat: np.array
Covariance matrix from running the full Fisher Matrix analysis.
Returns
-------
u: np.array (2, 2)
Update to the Fisher Matrix covariance matrix given new observations.
"""
stepvec = | np.array([0, 0.001, 0.00, 0.1, 0.]) | numpy.array |
# -*- coding: utf-8 -*-
"""
Tests for abagen.correct module
"""
import itertools
import numpy as np
import pandas as pd
import pytest
import scipy.stats as sstats
from abagen import allen, correct, io
from abagen.utils import flatten_dict
@pytest.fixture(scope='module')
def donor_expression(testfiles, atlas):
return allen.get_expression_data(atlas['image'], atlas['info'],
exact=False, return_donors=True,
donors=['12876', '15496'])
def test__unpack_tuple():
assert correct._unpack_tuple((3,)) == 3
assert correct._unpack_tuple((3, 3)) == (3, 3)
assert correct._unpack_tuple([2]) == 2
assert correct._unpack_tuple([2, 4]) == [2, 4]
assert correct._unpack_tuple(np.array([3])) == 3
assert np.all(correct._unpack_tuple(np.array([3, 3])) == [3, 3])
def test__batch():
rs = np.random.RandomState(1234)
# p-values for ANOVA should all be ~0 (large group differences) before
# batch correction
y = [rs.normal(size=(100, 1000)) + f for f in [5, 0, 0]]
assert np.allclose(sstats.f_oneway(*y)[1], 0)
# F-values for ANOVA should all be ~0 (no group differences) after batch
# correction; p-values returned here are sometimes NaN so not a good test
out = correct._batch_correct(y)
assert np.allclose(sstats.f_oneway(*out)[0], 0)
# mean expressions after correction should be ~equal
assert np.allclose([o.mean() for o in out], 1.24871965683026)
with pytest.raises(ValueError):
correct._batch_correct([y[0]])
def test__rescale():
rs = np.random.RandomState(1234)
y = rs.normal(size=(100, 1000)) + 10
out = correct._rescale(y)
# default max = 1, min =0
assert np.allclose(out.max(axis=0), 1) and np.allclose(out.min(axis=0), 0)
# can specify alternative min/max
out = correct._rescale(y, low=5, high=6)
assert np.allclose(out.max(axis=0), 6) and np.allclose(out.min(axis=0), 5)
# different axis works, too!
out = correct._rescale(y, axis=1)
assert np.allclose(out.max(axis=1), 1) and np.allclose(out.min(axis=1), 0)
@pytest.mark.parametrize('a', [0, 1])
def test__rs(a):
rs = np.random.RandomState(1234)
# create an array with a pretty ridiculous outlier effect to try and fix
y = rs.normal(size=(100, 1000))
y[0] += 1000
y[:, 0] += 1000
out = correct._rs(y, axis=a)
# max will always be less than one, min will always be greater than zero
assert np.all(out.max(axis=a) <= 1) and np.all(out.min(axis=a) >= 0)
# we should have reduced skewness / kurtosis compared to the original
assert np.all(sstats.skew(out, axis=a) < sstats.skew(y, axis=a))
assert np.all(sstats.kurtosis(out, axis=a) < sstats.kurtosis(y, axis=a))
# this is a weird test; we're gonna bin the data at 0.2 intervals and make
# sure no bins are empty. if one is something probably went wrong, right?
for low in np.arange(0, 1, 0.2):
hi = low + 0.2 + np.spacing(1) # include 1
assert np.all(np.sum( | np.logical_and(out >= low, out < hi) | numpy.logical_and |
import pandas as pd
import numpy as np
import sklearn as sk
import os
import matplotlib.pyplot as plt
from pandas.plotting import scatter_matrix
from sklearn.model_selection import StratifiedShuffleSplit
from sklearn.base import BaseEstimator, TransformerMixin
from sklearn.pipeline import Pipeline, FeatureUnion
from sklearn.preprocessing import OneHotEncoder, LabelBinarizer, StandardScaler
from data_preparation import data_reader, save_data, load_data
import periodictable as pt
import matplotlib
import re
from mendeleev import elements as el
from numpy.linalg import norm
from mendeleev import get_table
# from mendeleev.fetch import fetch_table
pd.options.mode.chained_assignment = None
matplotlib.use('TKAgg')
pd.set_option('display.max_rows', 500)
pd.set_option('display.max_columns', 500)
pd.set_option('display.width', 1000)
# To initialize the periodic elemental table for the further calculation
element_dict = {}
for i, j in enumerate(pt.elements):
element_dict.update({'{}'.format(i): j})
element_df = get_table('elements')
e_conf = element_df['electronic_configuration']
def stratified_data(raw_df):
# data = raw_df.iloc[:, [i for i in range(28, 108)] + [24, 20, 21, 22, 23]]
data = raw_df
data.loc[:, 'YEAR_cat'] = np.ceil(
(data.loc[:, 'Year']-data.loc[:, 'Year'].min()+1)/
(data.loc[:, 'Year'].max()-data.loc[:, 'Year'].min()+1)*5
)
data.loc[:, 'Na_cat'] = np.ceil(data.loc[:, '11'] / data.loc[:, '11'].max()*5)
return data
def stratify_split(data_cat_added):
split = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=42)
for train_index, test_index in split.split(data_cat_added, data_cat_added['Na_cat']):
strat_train_data = data_cat_added.loc[train_index]
strat_test_data = data_cat_added.loc[test_index]
# for set in (strat_train_data, strat_test_data):
# set.drop(['Na_cat'], axis=1, inplace=True)
# set.drop(['Year'], axis=1, inplace=True)
return strat_train_data, strat_test_data
class DataframeSelector(BaseEstimator, TransformerMixin): # transform the data from pd.df to array
def __init__(self, attribute_names):
self.attribute_names = attribute_names
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
return X[self.attribute_names].values
class CombinedAttributesAdderNBO(BaseEstimator, TransformerMixin): # ADD NBO ratio
def __init__(self, X_list, add_total_alkali=True, ):
self.add_total_alkali = add_total_alkali
self.X_list = np.array(X_list).astype('object')
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
Li = np.where(self.X_list == '3')[0][0] # (array([i], dtype=int64))[0][0]
Na = np.where(self.X_list == '11')[0][0]
K = np.where(self.X_list == '19')[0][0]
Rb = np.where(self.X_list == '37')[0][0]
Cs = np.where(self.X_list == '55')[0][0]
Mg = np.where(self.X_list == '12')[0][0]
Ca = np.where(self.X_list == '20')[0][0]
Sr = np.where(self.X_list == '38')[0][0]
Ba = np.where(self.X_list == '56')[0][0]
Si = np.where(self.X_list == '14')[0][0]
Al = np.where(self.X_list == '13')[0][0]
B = np.where(self.X_list == '5')[0][0]
if self.add_total_alkali:
NBO_T_temp = (X[:, Li] + X[:, Na] + X[:, K] + X[:, Rb] +X[:, Cs] + 2*X[:, Mg] + 2*X[:, Ca] + 2*X[:, Sr] +
2*X[:, Ba] -X[:, Al]-X[:, B])/(X[:, Si]+X[:, Al]+X[:, B])
NBO_T = np.where(NBO_T_temp < 0, 0, NBO_T_temp)
return np.c_[X, NBO_T]
else:
return X
class CombinedAttributesAdderAtomicNumber(BaseEstimator, TransformerMixin):
# the order of features added is max, min, mean, std, mode1, mode2
def __init__(self, X_list, el_list, add_atomic_number=True):
self.add_atomic_number = add_atomic_number
self.X_list = np.array(X_list).astype('object')
self.el_list = el_list
self.atomic_number_list = np.array(
[
self.el_list[i].atomic_number for i in range(len(self.el_list))
]
)
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
if self.add_atomic_number: # x[i, :80] select all elements except other features
atomic_number_max = np.array(
[
int(self.X_list[max(np.where(X[i, :80] > 0)[0])]) for i in range(X.shape[0])
]
)
atomic_number_min = np.array(
[
int(self.X_list[min(np.where(X[i, :80] > 0)[0])]) for i in range(X.shape[0])
]
)
atomic_number_mean = np.matmul(X[:, :80], self.atomic_number_list)
atomic_number_std = np.array(
[
np.matmul(X[i, :80], abs(self.atomic_number_list-atomic_number_mean[i]))
for i in range(X.shape[0])
]
)
atomic_mode_1 = np.array(
[
self.atomic_number_list[np.where(X[i, :80]==np.sort(X[i, :80])[-1])[0][0]]
for i in range(X.shape[0])
]
)
atomic_mode_2 = np.array(
[
self.atomic_number_list[np.where(X[i, :80]==np.sort(X[i, :80])[-2])[0][0]]
for i in range(X.shape[0])
]
)
return np.c_[X, atomic_number_max, atomic_number_min, atomic_number_mean,
atomic_number_std, atomic_mode_1, atomic_mode_2]
else:
return X
'''
the order of atomic property we used is:
'atomic_number', 'atomic_radius', 'mendeleev_number', 'atomic_weight', 'melting_point', 'period',
'series_id', 'covalent_radius_cordero', 'en_allen',
'''
class CombinedAttributesAdderAtomicProperty(BaseEstimator, TransformerMixin):
# the order of features added is max, min, mean, std, mode1, mode2
def __init__(self, X_list, property_id, mendeleev_df, add_atomic_property=True):
self.add_atomic_property = add_atomic_property
self.X_list = np.array(X_list).astype('object') # in X_list, the number of H is '1'
self.mendeleev_df = mendeleev_df
self.property_id = property_id
self.atomic_property_list_full = self.mendeleev_df[self.property_id].fillna(method='pad')
self.atomic_property_list = self.atomic_property_list_full.iloc[
self.X_list.astype('int')-1 # the row start with 0, e.g. row number of H is zero
]
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
if self.add_atomic_property: # x[i, :80] select all elements except other features
atomic_property_max = np.array(
[
self.atomic_property_list.iloc[
np.where(X[i, :80] > 0)[0]
].max() for i in range(X.shape[0])
]
)
atomic_property_min = np.array(
[
self.atomic_property_list.iloc[
np.where(X[i, :80] > 0)[0]
].min() for i in range(X.shape[0])
]
)
atomic_property_mean = np.matmul(X[:, :80], self.atomic_property_list)
atomic_property_std = np.array(
[
np.matmul(X[i, :80], abs(self.atomic_property_list - atomic_property_mean[i]))
for i in range(X.shape[0])
]
)
atomic_mode_1 = np.array(
[
self.atomic_property_list.iloc[np.where(X[i, :80] == np.sort(X[i, :80])[-1])[0][0]]
for i in range(X.shape[0])
]
)
atomic_mode_2 = np.array(
[
self.atomic_property_list.iloc[np.where(X[i, :80] == np.sort(X[i, :80])[-2])[0][0]]
for i in range(X.shape[0])
]
)
return np.c_[X, atomic_property_max, atomic_property_min, atomic_property_mean,
atomic_property_std, atomic_mode_1, atomic_mode_2]
else:
return X
'''
CombinedAttributesAdderElectronProperty
property id for electron configuration is 's', 'd', 'p', 'f', if filled=True, return valence electrons,
if filled=False, return unfilled states
'''
class CombinedAttributesAdderElectronProperty(BaseEstimator, TransformerMixin):
def __init__(self, X_list, property_id, mendeleev_df, add_electron_property=True, filled=True):
self.add_electron_property = add_electron_property
self.filled = filled
self.X_list = np.array(X_list).astype('object') # in X_list, the number of H is '1'
self.mendeleev_df = mendeleev_df
self.e_conf = mendeleev_df['electronic_configuration']
self.property_id = property_id
self.electron_dict = {'s': 2, 'd': 10, 'p': 6, 'f': 14}
self.electron_list_full = []
for element in self.e_conf:
if re.findall(r'{}'.format(self.property_id), element):
if re.findall(r'{}\d+'.format(self.property_id), element):
self.electron_list_full.append(
int(re.findall(r'{}\d+'.format(self.property_id), element)[0][1:])
)
else:
self.electron_list_full.append(1)
else:
self.electron_list_full.append(0)
self.electron_list = np.array(self.electron_list_full)[self.X_list.astype('int')-1]
self.unfilled_electron_list = self.electron_dict.get(self.property_id)-self.electron_list
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
if self.add_electron_property: # x[i, :80] select all elements except other features
if self.filled:
electron_property_max = np.array(
[
self.electron_list[
np.where(X[i, :80] > 0)[0]
].max() for i in range(X.shape[0])
]
)
electron_property_min = np.array(
[
self.electron_list[
np.where(X[i, :80] > 0)[0]
].min() for i in range(X.shape[0])
]
)
electron_property_mean = np.matmul(X[:, :80], self.electron_list)
electron_property_std = np.array(
[
np.matmul(X[i, :80], abs(self.electron_list - electron_property_mean[i]))
for i in range(X.shape[0])
]
)
electron_mode_1 = np.array(
[
self.electron_list[np.where(X[i, :80] == np.sort(X[i, :80])[-1])[0][0]]
for i in range(X.shape[0])
]
)
electron_mode_2 = np.array(
[
self.electron_list[np.where(X[i, :80] == np.sort(X[i, :80])[-2])[0][0]]
for i in range(X.shape[0])
]
)
return np.c_[X, electron_property_max, electron_property_min, electron_property_mean,
electron_property_std, electron_mode_1, electron_mode_2]
else:
electron_property_max = np.array(
[
self.unfilled_electron_list[
np.where(X[i, :80] > 0)[0]
].max() for i in range(X.shape[0])
]
)
electron_property_min = np.array(
[
self.unfilled_electron_list[
np.where(X[i, :80] > 0)[0]
].min() for i in range(X.shape[0])
]
)
electron_property_mean = np.matmul(X[:, :80], self.unfilled_electron_list)
electron_property_std = np.array(
[
np.matmul(X[i, :80], abs(self.unfilled_electron_list - electron_property_mean[i]))
for i in range(X.shape[0])
]
)
electron_mode_1 = np.array(
[
self.unfilled_electron_list[np.where(X[i, :80] == np.sort(X[i, :80])[-1])[0][0]]
for i in range(X.shape[0])
]
)
electron_mode_2 = np.array(
[
self.unfilled_electron_list[np.where(X[i, :80] == np.sort(X[i, :80])[-2])[0][0]]
for i in range(X.shape[0])
]
)
return np.c_[X, electron_property_max, electron_property_min, electron_property_mean,
electron_property_std, electron_mode_1, electron_mode_2]
else:
return X
'''
add Lp norm features using np.linarg.norm(). p_list = [0, 2, 3, 5, 7, 10]
'''
class CombinedAttributesAdderLpNorm(BaseEstimator, TransformerMixin):
def __init__(self, X_list, p_list, add_Lp_Norm=True):
self.X_list = np.array(X_list).astype('object') # in X_list, the number of H is '1'
self.p_list = p_list
self.add_Lp_Norm = add_Lp_Norm
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
if self.add_Lp_Norm:
Lp_Norm = np.zeros([X.shape[0], len(self.p_list)])
for num, i in enumerate(self.p_list):
Lp_Norm[:, num] = norm(X[:, :80], i, axis=1)
return np.c_[X, Lp_Norm]
else:
return X
class CombinedAttributesAdderOrbitalOccupation(BaseEstimator, TransformerMixin):
def __init__(self, X_list, mendeleev_df, add_orbital_occupation=True):
self.X_list = np.array(X_list).astype('object') # in X_list, the number of H is '1'
self.add_orbital_occupation = add_orbital_occupation
self.mendeleev_df = mendeleev_df
self.property_ids = ['s', 'p', 'd', 'f']
self.e_conf = mendeleev_df['electronic_configuration']
self.electron_list_s_full = []
self.electron_list_p_full = []
self.electron_list_d_full = []
self.electron_list_f_full = []
for list, id in zip(
(
self.electron_list_s_full,
self.electron_list_p_full,
self.electron_list_d_full,
self.electron_list_f_full
), self.property_ids
):
for element in self.e_conf:
if re.findall(r'{}'.format(id), element):
if re.findall(r'{}\d+'.format(id), element):
list.append(
int(re.findall(r'{}\d+'.format(id), element)[0][1:])
)
else:
list.append(1)
else:
list.append(0)
self.electron_list_s = np.array(self.electron_list_s_full)[self.X_list.astype('int')-1]
self.electron_list_p = np.array(self.electron_list_p_full)[self.X_list.astype('int')-1]
self.electron_list_d = np.array(self.electron_list_d_full)[self.X_list.astype('int')-1]
self.electron_list_f = np.array(self.electron_list_f_full)[self.X_list.astype('int')-1]
self.electron_list_total = self.electron_list_s + self.electron_list_p + \
self.electron_list_d + self.electron_list_f
def fit(self, X, y=None):
return self
def transform(self, X, y=None):
if self.add_orbital_occupation:
s = np.matmul(X[:, :80], self.electron_list_s)/\
np.matmul(X[:, :80], self.electron_list_total)
p = np.matmul(X[:, :80], self.electron_list_p)/\
np.matmul(X[:, :80], self.electron_list_total)
d = np.matmul(X[:, :80], self.electron_list_d)/\
np.matmul(X[:, :80], self.electron_list_total)
f = np.matmul(X[:, :80], self.electron_list_f)/\
np.matmul(X[:, :80], self.electron_list_total)
return np.c_[X, s, p, d, f]
else:
return X
class NumAttributesDropper(BaseEstimator, TransformerMixin):
def __init__(self, X_list, drop_element_conc=True):
self.X_list = | np.array(X_list) | numpy.array |
from smt_solver.tq_solver.linear_solver.alebgra_utils.lu_factorization import LUFactorization
import numpy as np
class TestLUFactorization:
MATRIX1 = np.array([[3., 1., 0.], [1., 1., 0.], [4., 3., 1.]])
@staticmethod
def test_generate_pivot_list():
matrix = TestLUFactorization.MATRIX1
pivot_list = LUFactorization.generate_pivot_list(matrix)
assert np.all(LUFactorization.pivot_array(pivot_list, matrix) == np.array([[4, 3, 1], [3, 1, 0], [1, 1, 0]]))
@staticmethod
def test_pivoting():
matrix = np.copy(TestLUFactorization.MATRIX1)
pivot_list = LUFactorization.generate_pivot_list(matrix)
assert np.all(LUFactorization.pivot_array(pivot_list, matrix) == | np.array([[4, 3, 1], [3, 1, 0], [1, 1, 0]]) | numpy.array |
import numpy as np, os
import pickle
from code.train_data_preprocessing.vectorize_dataset import get_vectorized_dataset
from code.dataset_preparation.data_prep_util import replace_punctuations
from code.train_data_preprocessing.preprocess_util import lemmatizer, get_candidate_query_phrases
def get_all_available_actions(activity_name, node_DB, para_DB):
action_desc_set = set()
action_names = set()
for action_desc in node_DB['action']:
if action_desc.startswith(activity_name+'->'):
action_desc_set.add(action_desc)
if len(action_desc_set) > 0:
for action_desc in action_desc_set:
for action_id in node_DB['action'][action_desc]:
for para in para_DB[action_id]:
if para['Type'] == 'ACTION':
action_names.add(action_desc +'#'+ para['description'])
return action_names
def add_to_vocab(phrase, vocab_to_id, id_to_vocab):
for wd in replace_punctuations(phrase).lower().split():
wd = lemmatizer.lemmatize(wd)
if wd not in vocab_to_id:
vocab_to_id[wd] = len(vocab_to_id)
id_to_vocab[vocab_to_id[wd]] = wd
def generate_pos_neg_examples(data, vocab_to_id, id_to_vocab, node_DB, para_DB, p_DB, update_vocab):
if update_vocab:
# add wild card ....
add_to_vocab('null_action', vocab_to_id, id_to_vocab)
add_to_vocab('null_para', vocab_to_id, id_to_vocab)
add_to_vocab('null_val', vocab_to_id, id_to_vocab)
add_to_vocab('stop', vocab_to_id, id_to_vocab)
data_query_DB = []
for q_inst, path_inst_set in data.items():
act_path_seq = []
para_path_seq = []
all_pos_actions = set()
for path_seq_inst in path_inst_set:
for action, _ in path_seq_inst:
all_pos_actions.add(action)
un_inst_para_name_set = set()
# positive training set ...
for path_seq_inst in path_inst_set:
for pos_action, pos_para_dict in path_seq_inst:
''' For action intent learning ...'''
pos_action_name = pos_action.split("#")[1].strip()
# update vocab
if update_vocab:
add_to_vocab(pos_action_name, vocab_to_id, id_to_vocab)
inst_para_name_set = set(pos_para_dict.keys())
if pos_action in p_DB:
pos_para_name_set = set(p_DB[pos_action].keys())
else:
pos_para_name_set = set()
un_inst_para_name_set = un_inst_para_name_set.union(pos_para_name_set.difference(inst_para_name_set))
# update vocab
if update_vocab:
for pos_para_name in pos_para_name_set:
add_to_vocab(pos_para_name, vocab_to_id, id_to_vocab)
curr_node = pos_action.split("->")[0].strip()
# get all negative actions ...
neg_act_list1 = []
# other available actions in curr_node ...
all_actions = get_all_available_actions(curr_node, node_DB, para_DB)
neg_action_set = all_actions.difference(all_pos_actions)
if len(neg_action_set) == 0:
neg_action_set.add(curr_node + '#null_action')
for neg_action in neg_action_set:
neg_action_name = neg_action.split("#")[1].strip()
# update vocab
if update_vocab:
add_to_vocab(neg_action_name, vocab_to_id, id_to_vocab)
neg_para_name_set = []
if neg_action in p_DB:
for neg_para_name, _ in p_DB[neg_action].items():
neg_para_name_set.append(neg_para_name)
if len(neg_para_name_set) == 0:
neg_para_name_set.append('null_para')
# update vocab
if update_vocab:
for neg_para_name in neg_para_name_set:
add_to_vocab(neg_para_name, vocab_to_id, id_to_vocab)
neg_act_list1.append((neg_action, neg_para_name_set))
act_path_seq.append((curr_node, pos_action, pos_para_name_set, neg_act_list1))
''' For parameter value learning ... '''
for pos_para_name, pos_para_tup in pos_para_dict.items():
pos_para_val = pos_para_tup[0]
pos_para_type = pos_para_tup[1]
pos_para_sample_val = pos_para_tup[2]
if pos_para_type == 0:
all_fixed_val = get_candidate_query_phrases(q_inst) # get candidate noun phrases
else:
all_fixed_val = set([fixed_val for fixed_val, _, para_type in p_DB[pos_action][pos_para_name]])
neg_para_val_set = all_fixed_val.difference({pos_para_val})
if update_vocab:
add_to_vocab(pos_para_name, vocab_to_id, id_to_vocab)
add_to_vocab(pos_para_val, vocab_to_id, id_to_vocab)
for neg_val in neg_para_val_set:
add_to_vocab(neg_val, vocab_to_id, id_to_vocab)
if len(neg_para_val_set) == 0:
neg_para_val_set = {'null_val'}
para_path_seq.append((pos_action, pos_para_name, pos_para_val, pos_para_type,
pos_para_sample_val, neg_para_val_set))
add_to_vocab(q_inst.strip(), vocab_to_id, id_to_vocab)
data_query_DB.append((q_inst, act_path_seq, para_path_seq, un_inst_para_name_set))
return data_query_DB
def prepare_vectorized_dataset(trace_id, dataset_dump):
vocab_to_id = {}
id_to_vocab = {}
train_data, valid_data, test_data, node_DB, para_DB, p_DB = dataset_dump
# expand training dataset with pos and neg examples ...
train_data_pos_neg = generate_pos_neg_examples(train_data, vocab_to_id, id_to_vocab, node_DB, para_DB, p_DB, update_vocab=True)
print('pos neg example generation done for training ....')
assert len(train_data_pos_neg) == len(train_data)
valid_data_pos_neg = generate_pos_neg_examples(valid_data, vocab_to_id, id_to_vocab, node_DB, para_DB, p_DB, update_vocab=False)
print('pos neg example generation done for valid ....')
# generate dataset ..
train = get_vectorized_dataset(train_data_pos_neg, vocab_to_id, p_DB)
print('training data vectorized ....')
valid = get_vectorized_dataset(valid_data_pos_neg, vocab_to_id, p_DB)
print('valid data vectorized ....')
print(np.array(train[0]).shape, '----\n')
print(np.array(train[1]).shape, '----\n')
print(np.array(train[2]).shape, '----\n')
print('action training data shapes ----\n')
for i in range(7):
print(np.array(train[0][0][i]).shape, '----\n')
print('para training data ..tag shapes ----\n')
for i in range(9):
print(np.array(train[1][0][i]).shape, '----\n')
print('para training data ...match shapes ----\n')
for i in range(10):
print(np.array(train[2][0][i]).shape, '----\n')
print('valid data stats ----\n')
print(np.array(valid[0]).shape, '----\n')
print( | np.array(valid[1]) | numpy.array |
# BSD 3-Clause License
#
# Copyright (c) 2021, <NAME>
# All rights reserved.
#
# Redistribution and use in source and binary forms, with or without
# modification, are permitted provided that the following conditions are met:
#
# 1. Redistributions of source code must retain the above copyright notice, this
# list of conditions and the following disclaimer.
#
# 2. Redistributions in binary form must reproduce the above copyright notice,
# this list of conditions and the following disclaimer in the documentation
# and/or other materials provided with the distribution.
#
# 3. Neither the name of the copyright holder nor the names of its
# contributors may be used to endorse or promote products derived from
# this software without specific prior written permission.
#
# THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
# AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
# IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
# DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE
# FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
# DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
# SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER
# CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY,
# OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
# OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
from __future__ import annotations
import numpy as np
from .type_aliases import NPF, NPI
__all__ = ["region", "split"]
def region(low: NPF, high: NPF) -> tuple[NPF, ...]:
"""Compute the hyper-rectangular region parameters from given limits of integration."""
# :::::::::::::::: Shapes ::::::::::::::::::
# {low, high}.shape [ domain_dim, events ]
# centers.shape [ domain_dim, regions_events ]
# halfwidth.shape [ domain_dim, regions_events ]
# vol.shape [ regions_events ]
if low.shape != high.shape:
raise RuntimeError(
"Vector limits of integration must be equivalent.", low.shape, high.shape
)
if low.ndim == 1:
low = np.expand_dims(low, 0)
high = np.expand_dims(high, 0)
if low.ndim != 2:
raise RuntimeError("Input limits shape not supported.")
centers = (high + low) * 0.5
halfwidth = (high - low) * 0.5
vol = np.prod(2 * halfwidth, axis=0)
return centers, halfwidth, vol
def split(centers: NPF, halfwidth: NPF, volumes: NPF, split_dim: NPI):
# centers.shape [ domain_dim, regions_events ]
# split_dim.shape [ 1, regions_events ]
if np.amin(split_dim) < 0 or np.amax(split_dim) >= (centers.shape[0]):
raise IndexError("split dimension invalid")
if split_dim.ndim < centers.ndim:
split_dim = np.expand_dims(split_dim, 0)
## {center, hwidth} [ domain_dim, (regions, events) ]
mask = np.zeros_like(centers, dtype=np.bool_)
np.put_along_axis(mask, split_dim, True, 0)
h = np.copy(halfwidth)
h[mask] *= 0.5
v = np.copy(volumes)
v *= 0.5
c1 = np.copy(centers)
c2 = np.copy(centers)
c1[mask] -= h[mask]
c2[mask] += h[mask]
c = | np.concatenate((c1, c2), axis=1) | numpy.concatenate |
from __future__ import division
import itertools
import warnings
import numpy as np
scipy_gaussian_filter = None # expensive
from .base import ndfeature, winitfeature, imgfeature
from ._gradient import gradient_cython
from .windowiterator import WindowIterator, WindowIteratorResult
def _np_gradient(pixels):
"""
This method is used in the case of multi-channel images (not 2D images).
The output ordering is identical to the gradient() method, returning
a 2 * n_channels image with gradients in order of the first axis derivative
over all the channels, then the second etc. For example, in the case of
a 3D image with 2 channels, the ordering would be:
I[:, 0, 0, 0] = [A_0, B_0, A_1, B_1, A_2, B_2]
where A and B are the 'channel' labels (synonymous with RGB for a colour
image) and 0,1,2 are the axis labels (synonymous with y,x for a 2D image).
"""
n_dims = pixels.ndim - 1
grad_per_dim_per_channel = [np.gradient(g, edge_order=1)
for g in pixels]
# Flatten out the separate dims
grad_per_channel = list(itertools.chain.from_iterable(
grad_per_dim_per_channel))
# Add a channel axis for broadcasting
grad_per_channel = [g[None, ...] for g in grad_per_channel]
# Permute the list so it is first axis, second axis, etc
grad_per_channel = [grad_per_channel[i::n_dims]
for i in range(n_dims)]
grad_per_channel = list(itertools.chain.from_iterable(grad_per_channel))
# Concatenate gradient list into an array (the new_image)
return np.concatenate(grad_per_channel, axis=0)
@ndfeature
def gradient(pixels):
r"""
Calculates the gradient of an input image. The image is assumed to have
channel information on the first axis. In the case of multiple channels,
it returns the gradient over each axis over each channel as the first axis.
The gradient is computed using second order accurate central differences in
the interior and first order accurate one-side (forward or backwards)
differences at the boundaries.
Parameters
----------
pixels : :map:`Image` or subclass or ``(C, X, Y, ..., Z)`` `ndarray`
Either the image object itself or an array where the first dimension
is interpreted as channels. This means an N-dimensional image is
represented by an N+1 dimensional array.
If the image is 2-dimensional the pixels should be of type
float/double (int is not supported).
Returns
-------
gradient : `ndarray`
The gradient over each axis over each channel. Therefore, the
first axis of the gradient of a 2D, single channel image, will have
length `2`. The first axis of the gradient of a 2D, 3-channel image,
will have length `6`, the ordering being
``I[:, 0, 0] = [R0_y, G0_y, B0_y, R0_x, G0_x, B0_x]``. To be clear,
all the ``y``-gradients are returned over each channel, then all
the ``x``-gradients.
"""
if (pixels.ndim - 1) == 2: # 2D Image
return gradient_cython(pixels)
else:
return _np_gradient(pixels)
@ndfeature
def gaussian_filter(pixels, sigma):
r"""
Calculates the convolution of the input image with a multidimensional
Gaussian filter.
Parameters
----------
pixels : :map:`Image` or subclass or ``(C, X, Y, ..., Z)`` `ndarray`
Either the image object itself or an array with the pixels. The first
dimension is interpreted as channels. This means an N-dimensional image
is represented by an N+1 dimensional array.
sigma : `float` or `list` of `float`
The standard deviation for Gaussian kernel. The standard deviations of
the Gaussian filter are given for each axis as a `list`, or as a single
`float`, in which case it is equal for all axes.
Returns
-------
output_image : :map:`Image` or subclass or ``(X, Y, ..., Z, C)`` `ndarray`
The filtered image has the same type and size as the input ``pixels``.
"""
global scipy_gaussian_filter
if scipy_gaussian_filter is None:
from scipy.ndimage import gaussian_filter as scipy_gaussian_filter
output = np.empty(pixels.shape, dtype=pixels.dtype)
for dim in range(pixels.shape[0]):
scipy_gaussian_filter(pixels[dim], sigma, output=output[dim])
return output
@winitfeature
def hog(pixels, mode='dense', algorithm='dalaltriggs', num_bins=9,
cell_size=8, block_size=2, signed_gradient=True, l2_norm_clip=0.2,
window_height=1, window_width=1, window_unit='blocks',
window_step_vertical=1, window_step_horizontal=1,
window_step_unit='pixels', padding=True, verbose=False):
r"""
Extracts Histograms of Oriented Gradients (HOG) features from the input
image.
Parameters
----------
pixels : :map:`Image` or subclass or ``(C, X, Y, ..., Z)`` `ndarray`
Either the image object itself or an array with the pixels. The first
dimension is interpreted as channels. This means an N-dimensional image
is represented by an N+1 dimensional array.
mode : {``dense``, ``sparse``}, optional
The ``sparse`` case refers to the traditional usage of HOGs, so
predefined parameters values are used.
The ``sparse`` case of ``dalaltriggs`` algorithm sets
``window_height = window_width = block_size`` and
``window_step_horizontal = window_step_vertical = cell_size``.
The ``sparse`` case of ``zhuramanan`` algorithm sets
``window_height = window_width = 3 * cell_size`` and
``window_step_horizontal = window_step_vertical = cell_size``.
In the ``dense`` case, the user can choose values for `window_height`,
`window_width`, `window_unit`, `window_step_vertical`,
`window_step_horizontal`, `window_step_unit` and `padding` to customize
the HOG calculation.
window_height : `float`, optional
Defines the height of the window. The metric unit is defined by
`window_unit`.
window_width : `float`, optional
Defines the width of the window. The metric unit is defined by
`window_unit`.
window_unit : {``blocks``, ``pixels``}, optional
Defines the metric unit of the `window_height` and `window_width`
parameters.
window_step_vertical : `float`, optional
Defines the vertical step by which the window is moved, thus it
controls the features' density. The metric unit is defined by
`window_step_unit`.
window_step_horizontal : `float`, optional
Defines the horizontal step by which the window is moved, thus it
controls the features' density. The metric unit is defined by
`window_step_unit`.
window_step_unit : {``pixels``, ``cells``}, optional
Defines the metric unit of the `window_step_vertical` and
`window_step_horizontal` parameters.
padding : `bool`, optional
If ``True``, the output image is padded with zeros to match the input
image's size.
algorithm : {``dalaltriggs``, ``zhuramanan``}, optional
Specifies the algorithm used to compute HOGs. ``dalaltriggs`` is the
implementation of [1] and ``zhuramanan`` is the implementation of [2].
cell_size : `float`, optional
Defines the cell size in pixels. This value is set to both the width
and height of the cell. This option is valid for both algorithms.
block_size : `float`, optional
Defines the block size in cells. This value is set to both the width
and height of the block. This option is valid only for the
``dalaltriggs`` algorithm.
num_bins : `float`, optional
Defines the number of orientation histogram bins. This option is
valid only for the ``dalaltriggs`` algorithm.
signed_gradient : `bool`, optional
Flag that defines whether we use signed or unsigned gradient angles.
This option is valid only for the ``dalaltriggs`` algorithm.
l2_norm_clip : `float`, optional
Defines the clipping value of the gradients' L2-norm. This option is
valid only for the ``dalaltriggs`` algorithm.
verbose : `bool`, optional
Flag to print HOG related information.
Returns
-------
hog : :map:`Image` or subclass or ``(X, Y, ..., Z, K)`` `ndarray`
The HOG features image. It has the same type as the input ``pixels``.
The output number of channels in the case of ``dalaltriggs`` is
``K = num_bins * block_size *block_size`` and ``K = 31`` in the case of
``zhuramanan``.
Raises
------
ValueError
HOG features mode must be either dense or sparse
ValueError
Algorithm must be either dalaltriggs or zhuramanan
ValueError
Number of orientation bins must be > 0
ValueError
Cell size (in pixels) must be > 0
ValueError
Block size (in cells) must be > 0
ValueError
Value for L2-norm clipping must be > 0.0
ValueError
Window height must be >= block size and <= image height
ValueError
Window width must be >= block size and <= image width
ValueError
Window unit must be either pixels or blocks
ValueError
Horizontal window step must be > 0
ValueError
Vertical window step must be > 0
ValueError
Window step unit must be either pixels or cells
References
----------
.. [1] <NAME> and <NAME>, "Histograms of oriented gradients for human
detection", Proceedings of the IEEE Conference on Computer Vision and
Pattern Recognition (CVPR), 2005.
.. [2] <NAME>, <NAME>. "Face detection, pose estimation and landmark
localization in the wild", Proceedings of the IEEE Conference on
Computer Vision and Pattern Recognition (CVPR), 2012.
"""
# TODO: This is a temporary fix
# flip axis
pixels = np.rollaxis(pixels, 0, len(pixels.shape))
# Parse options
if mode not in ['dense', 'sparse']:
raise ValueError("HOG features mode must be either dense or sparse")
if algorithm not in ['dalaltriggs', 'zhuramanan']:
raise ValueError("Algorithm must be either dalaltriggs or zhuramanan")
if num_bins <= 0:
raise ValueError("Number of orientation bins must be > 0")
if cell_size <= 0:
raise ValueError("Cell size (in pixels) must be > 0")
if block_size <= 0:
raise ValueError("Block size (in cells) must be > 0")
if l2_norm_clip <= 0.0:
raise ValueError("Value for L2-norm clipping must be > 0.0")
if mode == 'dense':
if window_unit not in ['pixels', 'blocks']:
raise ValueError("Window unit must be either pixels or blocks")
window_height_temp = window_height
window_width_temp = window_width
if window_unit == 'blocks':
window_height_temp = window_height * block_size * cell_size
window_width_temp = window_width * block_size * cell_size
if (window_height_temp < block_size * cell_size or
window_height_temp > pixels.shape[0]):
raise ValueError("Window height must be >= block size and <= "
"image height")
if (window_width_temp < block_size*cell_size or
window_width_temp > pixels.shape[1]):
raise ValueError("Window width must be >= block size and <= "
"image width")
if window_step_horizontal <= 0:
raise ValueError("Horizontal window step must be > 0")
if window_step_vertical <= 0:
raise ValueError("Vertical window step must be > 0")
if window_step_unit not in ['pixels', 'cells']:
raise ValueError("Window step unit must be either pixels or cells")
# Correct input image_data
pixels = np.asfortranarray(pixels)
pixels *= 255.
# Dense case
if mode == 'dense':
# Iterator parameters
if algorithm == 'dalaltriggs':
algorithm = 1
if window_unit == 'blocks':
block_in_pixels = cell_size * block_size
window_height = np.uint32(window_height * block_in_pixels)
window_width = np.uint32(window_width * block_in_pixels)
if window_step_unit == 'cells':
window_step_vertical = np.uint32(window_step_vertical *
cell_size)
window_step_horizontal = np.uint32(window_step_horizontal *
cell_size)
elif algorithm == 'zhuramanan':
algorithm = 2
if window_unit == 'blocks':
block_in_pixels = 3 * cell_size
window_height = np.uint32(window_height * block_in_pixels)
window_width = np.uint32(window_width * block_in_pixels)
if window_step_unit == 'cells':
window_step_vertical = np.uint32(window_step_vertical *
cell_size)
window_step_horizontal = np.uint32(window_step_horizontal *
cell_size)
iterator = WindowIterator(pixels, window_height, window_width,
window_step_horizontal,
window_step_vertical, padding)
# Sparse case
else:
# Create iterator
if algorithm == 'dalaltriggs':
algorithm = 1
window_size = cell_size * block_size
step = cell_size
else:
algorithm = 2
window_size = 3 * cell_size
step = cell_size
iterator = WindowIterator(pixels, window_size, window_size, step,
step, False)
# Print iterator's info
if verbose:
print(iterator)
# Compute HOG
hog_descriptor = iterator.HOG(algorithm, num_bins, cell_size, block_size,
signed_gradient, l2_norm_clip, verbose)
# TODO: This is a temporal fix
# flip axis
hog_descriptor = WindowIteratorResult(
np.ascontiguousarray(np.rollaxis(hog_descriptor.pixels, -1)),
hog_descriptor.centres)
return hog_descriptor
@ndfeature
def igo(pixels, double_angles=False, verbose=False):
r"""
Extracts Image Gradient Orientation (IGO) features from the input image.
The output image has ``N * C`` number of channels, where ``N`` is the
number of channels of the original image and ``C = 2`` or ``C = 4``
depending on whether double angles are used.
Parameters
----------
pixels : :map:`Image` or subclass or ``(C, X, Y, ..., Z)`` `ndarray`
Either the image object itself or an array with the pixels. The first
dimension is interpreted as channels. This means an N-dimensional image
is represented by an N+1 dimensional array.
double_angles : `bool`, optional
Assume that ``phi`` represents the gradient orientations.
If this flag is ``False``, the features image is the concatenation of
``cos(phi)`` and ``sin(phi)``, thus 2 channels.
If ``True``, the features image is the concatenation of
``cos(phi)``, ``sin(phi)``, ``cos(2 * phi)``, ``sin(2 * phi)``, thus 4
channels.
verbose : `bool`, optional
Flag to print IGO related information.
Returns
-------
igo : :map:`Image` or subclass or ``(X, Y, ..., Z, C)`` `ndarray`
The IGO features image. It has the same type and shape as the input
``pixels``. The output number of channels depends on the
``double_angles`` flag.
Raises
------
ValueError
Image has to be 2D in order to extract IGOs.
References
----------
.. [1] <NAME>, <NAME> and <NAME>, "Subspace learning
from image gradient orientations", IEEE Transactions on Pattern Analysis
and Machine Intelligence, vol. 34, num. 12, p. 2454--2466, 2012.
"""
# check number of dimensions
if len(pixels.shape) != 3:
raise ValueError('IGOs only work on 2D images. Expects image data '
'to be 3D, channels + shape.')
n_img_chnls = pixels.shape[0]
# feature channels per image channel
feat_chnls = 2
if double_angles:
feat_chnls = 4
# compute gradients
grad = gradient(pixels)
# compute angles
grad_orient = np.angle(grad[:n_img_chnls] + 1j * grad[n_img_chnls:])
# compute igo image
igo_pixels = np.empty((n_img_chnls * feat_chnls,
pixels.shape[1], pixels.shape[2]),
dtype=pixels.dtype)
if double_angles:
dbl_grad_orient = 2 * grad_orient
# y angles
igo_pixels[:n_img_chnls] = np.sin(grad_orient)
igo_pixels[n_img_chnls:n_img_chnls*2] = np.sin(dbl_grad_orient)
# x angles
igo_pixels[n_img_chnls*2:n_img_chnls*3] = np.cos(grad_orient)
igo_pixels[n_img_chnls*3:] = np.cos(dbl_grad_orient)
else:
igo_pixels[:n_img_chnls] = np.sin(grad_orient) # y
igo_pixels[n_img_chnls:] = np.cos(grad_orient) # x
# print information
if verbose:
info_str = "IGO Features:\n"
info_str = "{} - Input image is {}W x {}H with {} channels.\n".format(
info_str, pixels.shape[2], pixels.shape[1], n_img_chnls)
info_str = "{} - Double angles are {}.\n".format(
info_str, 'enabled' if double_angles else 'disabled')
info_str = "{}Output image size {}W x {}H with {} channels.".format(
info_str, igo_pixels.shape[2], igo_pixels.shape[1], n_img_chnls)
print(info_str)
return igo_pixels
@ndfeature
def es(pixels, verbose=False):
r"""
Extracts Edge Structure (ES) features from the input image. The output image
has ``N * C`` number of channels, where ``N`` is the number of channels of
the original image and ``C = 2``.
Parameters
----------
pixels : :map:`Image` or subclass or ``(C, X, Y, ..., Z)`` `ndarray`
Either an image object itself or an array where the first axis
represents the number of channels. This means an N-dimensional image
is represented by an N+1 dimensional array.
verbose : `bool`, optional
Flag to print ES related information.
Returns
-------
es : :map:`Image` or subclass or ``(X, Y, ..., Z, C)`` `ndarray`
The ES features image. It has the same type and shape as the input
``pixels``. The output number of channels is ``C = 2``.
Raises
------
ValueError
Image has to be 2D in order to extract ES features.
References
----------
.. [1] <NAME>, <NAME>, "On representing edge structure for model
matching", Proceedings of the IEEE Conference on Computer Vision and
Pattern Recognition (CVPR), 2001.
"""
# check number of dimensions
if len(pixels.shape) != 3:
raise ValueError('ES features only work on 2D images. Expects '
'image data to be 3D, channels + shape.')
n_img_chnls = pixels.shape[0]
# feature channels per image channel
feat_channels = 2
# compute gradients
grad = gradient(pixels)
# compute magnitude
grad_abs = np.abs(grad[:n_img_chnls] + 1j * grad[n_img_chnls:])
# compute es image
grad_abs = grad_abs + np.median(grad_abs)
es_pixels = np.empty((pixels.shape[0] * feat_channels,
pixels.shape[1], pixels.shape[2]),
dtype=pixels.dtype)
es_pixels[:n_img_chnls] = grad[:n_img_chnls] / grad_abs
es_pixels[n_img_chnls:] = grad[n_img_chnls:] / grad_abs
# print information
if verbose:
info_str = "ES Features:\n"
info_str = "{} - Input image is {}W x {}H with {} channels.\n".format(
info_str, pixels.shape[2], pixels.shape[1], n_img_chnls)
info_str = "{}Output image size {}W x {}H with {} channels.".format(
info_str, es_pixels.shape[2], es_pixels.shape[1], n_img_chnls)
print(info_str)
return es_pixels
@ndfeature
def daisy(pixels, step=1, radius=15, rings=2, histograms=2, orientations=8,
normalization='l1', sigmas=None, ring_radii=None, verbose=False):
r"""
Extracts Daisy features from the input image. The output image has ``N * C``
number of channels, where ``N`` is the number of channels of the original
image and ``C`` is the feature channels determined by the input options.
Specifically, ``C = (rings * histograms + 1) * orientations``.
Parameters
----------
pixels : :map:`Image` or subclass or ``(C, X, Y, ..., Z)`` `ndarray`
Either the image object itself or an array with the pixels. The first
dimension is interpreted as channels. This means an N-dimensional image
is represented by an N+1 dimensional array.
step : `int`, optional
The sampling step that defines the density of the output image.
radius : `int`, optional
The radius (in pixels) of the outermost ring.
rings : `int`, optional
The number of rings to be used.
histograms : `int`, optional
The number of histograms sampled per ring.
orientations : `int`, optional
The number of orientations (bins) per histogram.
normalization : [ 'l1', 'l2', 'daisy', None ], optional
It defines how to normalize the descriptors
If 'l1' then L1-normalization is applied at each descriptor.
If 'l2' then L2-normalization is applied at each descriptor.
If 'daisy' then L2-normalization is applied at individual histograms.
If None then no normalization is employed.
sigmas : `list` of `float` or ``None``, optional
Standard deviation of spatial Gaussian smoothing for the centre
histogram and for each ring of histograms. The `list` of sigmas should
be sorted from the centre and out. I.e. the first sigma value defines
the spatial smoothing of the centre histogram and the last sigma value
defines the spatial smoothing of the outermost ring. Specifying sigmas
overrides the `rings` parameter by setting ``rings = len(sigmas) - 1``.
ring_radii : `list` of `float` or ``None``, optional
Radius (in pixels) for each ring. Specifying `ring_radii` overrides the
`rings` and `radius` parameters by setting ``rings = len(ring_radii)``
and ``radius = ring_radii[-1]``.
If both sigmas and ring_radii are given, they must satisfy ::
len(ring_radii) == len(sigmas) + 1
since no radius is needed for the centre histogram.
verbose : `bool`
Flag to print Daisy related information.
Returns
-------
daisy : :map:`Image` or subclass or ``(X, Y, ..., Z, C)`` `ndarray`
The ES features image. It has the same type and shape as the input
``pixels``. The output number of channels is
``C = (rings * histograms + 1) * orientations``.
Raises
------
ValueError
len(sigmas)-1 != len(ring_radii)
ValueError
Invalid normalization method.
References
----------
.. [1] <NAME>, <NAME> and <NAME>, "Daisy: An efficient dense descriptor
applied to wide-baseline stereo", IEEE Transactions on Pattern Analysis
and Machine Intelligence, vol. 32, num. 5, p. 815-830, 2010.
"""
from menpo.external.skimage._daisy import _daisy
# Parse options
if sigmas is not None and ring_radii is not None \
and len(sigmas) - 1 != len(ring_radii):
raise ValueError('`len(sigmas)-1 != len(ring_radii)`')
if ring_radii is not None:
rings = len(ring_radii)
radius = ring_radii[-1]
if sigmas is not None:
rings = len(sigmas) - 1
if sigmas is None:
sigmas = [radius * (i + 1) / float(2 * rings) for i in range(rings)]
if ring_radii is None:
ring_radii = [radius * (i + 1) / float(rings) for i in range(rings)]
if normalization is None:
normalization = 'off'
if normalization not in ['l1', 'l2', 'daisy', 'off']:
raise ValueError('Invalid normalization method.')
# Compute daisy features
daisy_descriptor = _daisy(pixels, step=step, radius=radius, rings=rings,
histograms=histograms, orientations=orientations,
normalization=normalization, sigmas=sigmas,
ring_radii=ring_radii)
# print information
if verbose:
info_str = "Daisy Features:\n"
info_str = "{} - Input image is {}W x {}H with {} channels.\n".format(
info_str, pixels.shape[2], pixels.shape[1], pixels.shape[0])
info_str = "{} - Sampling step is {}.\n".format(info_str, step)
info_str = "{} - Radius of {} pixels, {} rings and {} histograms " \
"with {} orientations.\n".format(
info_str, radius, rings, histograms, orientations)
if not normalization == 'off':
info_str = "{} - Using {} normalization.\n".format(info_str,
normalization)
else:
info_str = "{} - No normalization emplyed.\n".format(info_str)
info_str = "{}Output image size {}W x {}H x {}.".format(
info_str, daisy_descriptor.shape[2], daisy_descriptor.shape[1],
daisy_descriptor.shape[0])
print(info_str)
return daisy_descriptor
# TODO: Needs fixing ...
@winitfeature
def lbp(pixels, radius=None, samples=None, mapping_type='riu2',
window_step_vertical=1, window_step_horizontal=1,
window_step_unit='pixels', padding=True, verbose=False,
skip_checks=False):
r"""
Extracts Local Binary Pattern (LBP) features from the input image. The
output image has ``N * C`` number of channels, where ``N`` is the number of
channels of the original image and ``C`` is the number of radius/samples
values combinations that are used in the LBP computation.
Parameters
----------
pixels : :map:`Image` or subclass or ``(C, X, Y, ..., Z)`` `ndarray`
Either the image object itself or an array with the pixels. The first
dimension is interpreted as channels. This means an N-dimensional image
is represented by an N+1 dimensional array.
radius : `int` or `list` of `int` or ``None``, optional
It defines the radius of the circle (or circles) at which the sampling
points will be extracted. The radius (or radii) values must be greater
than zero. There must be a radius value for each samples value, thus
they both need to have the same length. If ``None``, then
``[1, 2, 3, 4]`` is used.
samples : `int` or `list` of `int` or ``None``, optional
It defines the number of sampling points that will be extracted at each
circle. The samples value (or values) must be greater than zero. There
must be a samples value for each radius value, thus they both need to
have the same length. If ``None``, then ``[8, 8, 8, 8]`` is used.
mapping_type : {``u2``, ``ri``, ``riu2``, ``none``}, optional
It defines the mapping type of the LBP codes. Select ``u2`` for
uniform-2 mapping, ``ri`` for rotation-invariant mapping, ``riu2`` for
uniform-2 and rotation-invariant mapping and ``none`` to use no mapping
and only the decimal values instead.
window_step_vertical : `float`, optional
Defines the vertical step by which the window is moved, thus it controls
the features density. The metric unit is defined by `window_step_unit`.
window_step_horizontal : `float`, optional
Defines the horizontal step by which the window is moved, thus it
controls the features density. The metric unit is defined by
`window_step_unit`.
window_step_unit : {``pixels``, ``window``}, optional
Defines the metric unit of the `window_step_vertical` and
`window_step_horizontal` parameters.
padding : `bool`, optional
If ``True``, the output image is padded with zeros to match the input
image's size.
verbose : `bool`, optional
Flag to print LBP related information.
skip_checks : `bool`, optional
If ``True``, do not perform any validation of the parameters.
Returns
-------
lbp : :map:`Image` or subclass or ``(X, Y, ..., Z, C)`` `ndarray`
The ES features image. It has the same type and shape as the input
``pixels``. The output number of channels is
``C = len(radius) * len(samples)``.
Raises
------
ValueError
Radius and samples must both be either integers or lists
ValueError
Radius and samples must have the same length
ValueError
Radius must be > 0
ValueError
Radii must be > 0
ValueError
Samples must be > 0
ValueError
Mapping type must be u2, ri, riu2 or none
ValueError
Horizontal window step must be > 0
ValueError
Vertical window step must be > 0
ValueError
Window step unit must be either pixels or window
References
----------
.. [1] <NAME>, <NAME>, and <NAME>, "Multiresolution gray-scale
and rotation invariant texture classification with local binary
patterns", IEEE Transactions on Pattern Analysis and Machine
Intelligence, vol. 24, num. 7, p. 971-987, 2002.
"""
if radius is None:
radius = range(1, 5)
if samples is None:
samples = [8]*4
# TODO: This is a temporal fix
# flip axis
pixels = np.rollaxis(pixels, 0, len(pixels.shape))
if not skip_checks:
# Check parameters
if ((isinstance(radius, int) and isinstance(samples, list)) or
(isinstance(radius, list) and isinstance(samples, int))):
raise ValueError("Radius and samples must both be either integers "
"or lists")
elif isinstance(radius, list) and isinstance(samples, list):
if len(radius) != len(samples):
raise ValueError("Radius and samples must have the same "
"length")
if isinstance(radius, int) and radius < 1:
raise ValueError("Radius must be > 0")
elif isinstance(radius, list) and sum(r < 1 for r in radius) > 0:
raise ValueError("Radii must be > 0")
if isinstance(samples, int) and samples < 1:
raise ValueError("Samples must be > 0")
elif isinstance(samples, list) and sum(s < 1 for s in samples) > 0:
raise ValueError("Samples must be > 0")
if mapping_type not in ['u2', 'ri', 'riu2', 'none']:
raise ValueError("Mapping type must be u2, ri, riu2 or "
"none")
if window_step_horizontal <= 0:
raise ValueError("Horizontal window step must be > 0")
if window_step_vertical <= 0:
raise ValueError("Vertical window step must be > 0")
if window_step_unit not in ['pixels', 'window']:
raise ValueError("Window step unit must be either pixels or "
"window")
# Correct input image_data
pixels = np.asfortranarray(pixels)
# Parse options
radius = np.asfortranarray(radius)
samples = np.asfortranarray(samples)
window_height = np.uint32(2 * radius.max() + 1)
window_width = window_height
if window_step_unit == 'window':
window_step_vertical = np.uint32(window_step_vertical * window_height)
window_step_horizontal = np.uint32(window_step_horizontal *
window_width)
if mapping_type == 'u2':
mapping_type = 1
elif mapping_type == 'ri':
mapping_type = 2
elif mapping_type == 'riu2':
mapping_type = 3
else:
mapping_type = 0
# Create iterator object
iterator = WindowIterator(pixels, window_height, window_width,
window_step_horizontal, window_step_vertical,
padding)
# Print iterator's info
if verbose:
print(iterator)
# Compute LBP
lbp_descriptor = iterator.LBP(radius, samples, mapping_type, verbose)
# TODO: This is a temporary fix
# flip axis
lbp_descriptor = WindowIteratorResult(
np.ascontiguousarray(np.rollaxis(lbp_descriptor.pixels, -1)),
lbp_descriptor.centres)
return lbp_descriptor
@imgfeature
def normalize(img, scale_func=None, mode='all',
error_on_divide_by_zero=True):
r"""
Normalize the pixel values via mean centering and an optional scaling. By
default the scaling will be ``1.0``. The ``mode`` parameter selects
whether the normalisation is computed across all pixels in the image or
per-channel.
Parameters
----------
img : :map:`Image` or subclass or ``(C, X, Y, ..., Z)`` `ndarray`
Either the image object itself or an array with the pixels. The first
dimension is interpreted as channels. This means an N-dimensional image
is represented by an N+1 dimensional array.
scale_func : `callable`, optional
Compute the scaling factor. Expects a single parameter and an optional
`axis` keyword argument and will be passed the entire pixel array.
Should return a 1D numpy array of one or more values.
mode : ``{all, per_channel}``, optional
If ``all``, the normalization is over all channels. If
``per_channel``, each channel individually is mean centred and
normalized in variance.
error_on_divide_by_zero : `bool`, optional
If ``True``, will raise a ``ValueError`` on dividing by zero.
If ``False``, will merely raise a warning and only those values
with non-zero denominators will be normalized.
Returns
-------
pixels : :map:`Image` or subclass or ``(X, Y, ..., Z, C)`` `ndarray`
A normalized copy of the image that was passed in.
Raises
------
ValueError
If any of the denominators are 0 and ``error_on_divide_by_zero`` is
``True``.
"""
if scale_func is None:
def scale_func(_, axis=None):
return np.array([1.0])
pixels = img.as_vector(keep_channels=True)
if mode == 'all':
centered_pixels = pixels - np.mean(pixels)
scale_factor = scale_func(centered_pixels)
elif mode == 'per_channel':
centered_pixels = pixels - np.mean(pixels, axis=1, keepdims=1)
scale_factor = scale_func(centered_pixels, axis=1).reshape([-1, 1])
else:
raise ValueError("Supported modes are {{'all', 'per_channel'}} - '{}' "
"is not known".format(mode))
zero_denom = (scale_factor == 0).ravel()
any_non_zero = np.any(zero_denom)
if error_on_divide_by_zero and any_non_zero:
raise ValueError("Computed scale factor cannot be 0.0")
elif any_non_zero:
warnings.warn('One or more the scale factors are 0.0 and thus these'
'entries will be skipped during normalization.')
non_zero_denom = ~zero_denom
centered_pixels[non_zero_denom] = (centered_pixels[non_zero_denom] /
scale_factor[non_zero_denom])
return img.from_vector(centered_pixels)
else:
return img.from_vector(centered_pixels / scale_factor)
@ndfeature
def normalize_norm(pixels, mode='all', error_on_divide_by_zero=True):
r"""
Normalize the pixels to be mean centred and have unit norm. The ``mode``
parameter selects whether the normalisation is computed across all pixels in
the image or per-channel.
Parameters
----------
pixels : :map:`Image` or subclass or ``(C, X, Y, ..., Z)`` `ndarray`
Either the image object itself or an array with the pixels. The first
dimension is interpreted as channels. This means an N-dimensional image
is represented by an N+1 dimensional array.
mode : ``{all, per_channel}``, optional
If ``all``, the normalization is over all channels. If
``per_channel``, each channel individually is mean centred and
normalized in variance.
error_on_divide_by_zero : `bool`, optional
If ``True``, will raise a ``ValueError`` on dividing by zero.
If ``False``, will merely raise a warning and only those values
with non-zero denominators will be normalized.
Returns
-------
pixels : :map:`Image` or subclass or ``(X, Y, ..., Z, C)`` `ndarray`
A normalized copy of the image that was passed in.
Raises
------
ValueError
If any of the denominators are 0 and ``error_on_divide_by_zero`` is
``True``.
"""
def unit_norm(x, axis=None):
return | np.linalg.norm(x, axis=axis) | numpy.linalg.norm |
# -*- coding: utf-8 -*-
""" Various utilities, converters etc., to help video calibration. """
# pylint:disable=invalid-name,logging-not-lazy
import collections
import logging
import numpy as np
import cv2
import sksurgerycore.transforms.matrix as skcm
LOGGER = logging.getLogger(__name__)
def convert_numpy2d_to_opencv(image_points):
"""
Converts numpy array to Vector of 1x2 vectors containing float32.
:param image_points: numpy [Mx2] array.
:return: vector (length M), of 1x2 vectors of float32.
"""
return np.reshape(image_points, (-1, 1, 2)).astype(np.float32)
def convert_numpy3d_to_opencv(object_points):
"""
Converts numpy array to Vector of 1x3 vectors containing float32.
:param object_points: numpy [Mx3] array.
:return: vector (length M), of 1x3 vectors of float32.
"""
return np.reshape(object_points, (-1, 1, 3)).astype(np.float32)
def extrinsic_vecs_to_matrix(rvec, tvec):
"""
Method to convert rvec and tvec to a 4x4 matrix.
:param rvec: [3x1] ndarray, Rodrigues rotation params
:param rvec: [3x1] ndarray, translation params
:return: [3x3] ndarray, Rotation Matrix
"""
rotation_matrix = (cv2.Rodrigues(rvec))[0]
transformation_matrix = \
skcm.construct_rigid_transformation(rotation_matrix, tvec)
return transformation_matrix
def extrinsic_matrix_to_vecs(transformation_matrix):
"""
Method to convert a [4x4] rigid body matrix to an rvec and tvec.
:param transformation_matrix: [4x4] rigid body matrix.
:return [3x1] Rodrigues rotation vec, [3x1] translation vec
"""
rmat = transformation_matrix[0:3, 0:3]
rvec = (cv2.Rodrigues(rmat))[0]
tvec = np.ones((3, 1))
tvec[0:3, 0] = transformation_matrix[0:3, 3]
return rvec, tvec
def filter_common_points_per_image(left_ids,
left_object_points,
left_image_points,
right_ids,
right_image_points,
minimum_points
):
"""
For stereo calibration, we need common points in left and right.
Remember that a point detector, may provide different numbers of
points for left and right, and they may not be sorted.
:param left_ids: ndarray of integer point ids
:param left_object_points: Vector of Vector of 1x3 float 32
:param left_image_points: Vector of Vector of 1x2 float 32
:param right_ids: ndarray of integer point ids
:param right_image_points: Vector of Vector of 1x2 float 32
:param minimum_points: the number of minimum common points to accept
:return: common ids, object_points, left_image_points, right_image_points
"""
# Filter obvious duplicates first.
non_duplicate_left = np.asarray(
[item for item, count in
collections.Counter(left_ids).items() if count == 1])
non_duplicate_right = np.asarray(
[item for item, count in
collections.Counter(right_ids).items() if count == 1])
filtered_left = left_ids[
np.isin(left_ids, non_duplicate_left)]
filtered_right = right_ids[
np.isin(right_ids, non_duplicate_right)]
# Now find common points in left and right.
ids = np.intersect1d(filtered_left, filtered_right)
ids = np.sort(ids)
if len(ids) < minimum_points:
raise ValueError("Not enough common points in left and right images.")
common_ids = \
np.zeros((len(ids), 1), dtype=np.int)
common_object_points = \
np.zeros((len(ids), 1, 3), dtype=np.float32)
common_left_image_points = \
np.zeros((len(ids), 1, 2), dtype=np.float32)
common_right_image_points = \
np.zeros((len(ids), 1, 2), dtype=np.float32)
counter = 0
for position in ids:
left_location = np.where(left_ids == position)
common_ids[counter] = left_ids[left_location[0][0]]
common_object_points[counter] \
= left_object_points[left_location[0][0]]
common_left_image_points[counter] \
= left_image_points[left_location[0][0]]
right_location = np.where(right_ids == position)
common_right_image_points[counter] \
= right_image_points[right_location[0][0]]
counter = counter + 1
number_of_left = len(common_left_image_points)
number_of_right = len(common_right_image_points)
if number_of_left != number_of_right:
raise ValueError("Unequal number of common points in left and right.")
return common_ids, common_object_points, common_left_image_points, \
common_right_image_points
def filter_common_points_all_images(left_ids,
left_object_points,
left_image_points,
right_ids,
right_image_points,
minimum_points
):
"""
Loops over each images's data, filtering per image.
See: filter_common_points_per_image
:return: Vectors of outputs from filter_common_points_per_image
"""
common_ids = []
common_object_points = []
common_left_image_points = []
common_right_image_points = []
# pylint:disable=consider-using-enumerate
for counter in range(len(left_ids)):
c_i, c_o, c_l, c_r = \
filter_common_points_per_image(left_ids[counter],
left_object_points[counter],
left_image_points[counter],
right_ids[counter],
right_image_points[counter],
minimum_points
)
common_ids.append(c_i)
common_object_points.append(c_o)
common_left_image_points.append(c_l)
common_right_image_points.append(c_r)
return common_ids, common_object_points, common_left_image_points, \
common_right_image_points
def convert_pd_to_opencv(ids, object_points, image_points):
"""
The PointDetectors from scikit-surgeryimage aren't quite compatible
with OpenCV.
"""
dims = np.shape(image_points)
ids = np.reshape(ids, dims[0])
image_points = np.reshape(image_points, (dims[0], 1, 2))
image_points = image_points.astype(np.float32)
object_points = np.reshape(object_points, (-1, 1, 3))
object_points = object_points.astype(np.float32)
return ids, image_points, object_points
def array_contains_tracking_data(array_to_check):
"""
Returns True if the array contains some tracking data.
"""
result = False
if array_to_check is not None:
number_of_items = len(array_to_check)
if number_of_items > 0:
found_none = False
for i in range(0, number_of_items):
if array_to_check[i] is None:
found_none = True
if not found_none:
result = True
return result
def match_points_by_id(ids_1, points_1, ids_2, points_2):
"""
Returns an ndarray of matched points, matching by their identifier.
:param ids_1: ndarray [Mx1] list of ids for points_1
:param points_1: ndarray [Mx2 or 3] of 2D or 3D points
:param ids_2: ndarray [Nx1] list of ids for points_2
:param points_2: ndarray [Nx2 or 3] of 2D or 3D points
:return: ndarray. Number of rows is the number of common points by ids.
"""
common_ids = np.intersect1d(ids_1, ids_2)
common_ids = np.sort(common_ids)
indexes_1 = np.isin(ids_1, common_ids).reshape(-1)
indexes_2 = np.isin(ids_2, common_ids).reshape(-1)
points_1_selected = points_1[indexes_1, :]
points_2_selected = points_2[indexes_2, :]
result = np.zeros((common_ids.shape[0],
points_1_selected.shape[1] +
points_2_selected.shape[1]))
result[:, 0:points_1_selected.shape[1]] \
= points_1_selected[:, :]
result[:, points_1_selected.shape[1]:points_1_selected.shape[1] +
points_2_selected.shape[1]] = points_2_selected[:, :]
return result
def distort_points(image_points, camera_matrix, distortion_coeffs):
"""
Distorts image points, reversing the effects of cv2.undistortPoints.
Slow, but should do for now, for offline calibration at least.
:param image_points: undistorted image points.
:param camera_matrix: [3x3] camera matrix
:param distortion_coeffs: [1x5] distortion coefficients
:return: distorted points
"""
distorted_pts = np.zeros(image_points.shape)
number_of_points = image_points.shape[0]
# pylint: disable=invalid-name
for counter in range(number_of_points):
relative_x = (image_points[counter][0] - camera_matrix[0][2]) \
/ camera_matrix[0][0]
relative_y = (image_points[counter][1] - camera_matrix[1][2]) \
/ camera_matrix[1][1]
r2 = relative_x * relative_x + relative_y * relative_y
radial = (
1
+ distortion_coeffs[0][0]
* r2
+ distortion_coeffs[0][1]
* r2 * r2
+ distortion_coeffs[0][4]
* r2 * r2 * r2
)
distorted_x = relative_x * radial
distorted_y = relative_y * radial
distorted_x = distorted_x + (
2 * distortion_coeffs[0][2]
* relative_x * relative_y
+ distortion_coeffs[0][3]
* (r2 + 2 * relative_x * relative_x))
distorted_y = distorted_y + (
distortion_coeffs[0][2]
* (r2 + 2 * relative_y * relative_y)
+ 2 * distortion_coeffs[0][3]
* relative_x * relative_y)
distorted_x = distorted_x * camera_matrix[0][0] + camera_matrix[0][2]
distorted_y = distorted_y * camera_matrix[1][1] + camera_matrix[1][2]
distorted_pts[counter][0] = distorted_x
distorted_pts[counter][1] = distorted_y
return distorted_pts
def map_points_from_canonical_to_original(images_array,
image_index,
video_data,
ids,
object_points,
image_points,
homography,
camera_matrix,
distortion_coeffs):
"""
Utility method to map image points, detected in a canonical face
on image, back to the original image space.
:param images_array:
:param image_index:
:param video_data:
:param ids:
:param object_points:
:param image_points:
:param homography:
:param camera_matrix:
:param distortion_coeffs:
:return:
"""
inverted_points = \
cv2.perspectiveTransform(
image_points.astype(np.float32).reshape(-1, 1, 2),
| np.linalg.inv(homography) | numpy.linalg.inv |
import unittest
import numpy as np
from aitoolbox.torchtrain.train_loop.components.pred_collate_fns import *
class TestBatchPredCollateFns(unittest.TestCase):
def test_append_predictions(self):
preds = []
preds_ep_1 = np.random.rand(100, 1)
preds = append_predictions(preds_ep_1, preds)
self.assertEqual([preds_ep_1], preds)
preds_ep_2 = np.random.rand(45, 1)
preds = append_predictions(preds_ep_2, preds)
self.assertEqual([preds_ep_1, preds_ep_2], preds)
def test_append_concat_predictions(self):
preds = []
preds_ep_1 = np.random.rand(100, 1)
preds = append_predictions(preds_ep_1, preds)
self.assertEqual([preds_ep_1], preds)
preds_ep_2 = np.random.rand(45, 1)
preds = append_predictions(preds_ep_2, preds)
self.assertEqual([preds_ep_1, preds_ep_2], preds)
preds_list = []
preds_list_ep_1 = | np.random.rand(100) | numpy.random.rand |
#
# Copyright 2019 The FATE Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
import math
import time
import unittest
import numpy as np
from arch.api import eggroll
eggroll.init("123")
from federatedml.feature.binning import QuantileBinning
from federatedml.feature.instance import Instance
from federatedml.param.param import FeatureBinningParam
class TestQuantileBinning(unittest.TestCase):
def setUp(self):
# eggroll.init("123")
self.data_num = 1000
self.feature_num = 200
final_result = []
numpy_array = []
for i in range(self.data_num):
tmp = np.random.randn(self.feature_num)
inst = Instance(inst_id=i, features=tmp, label=0)
tmp_pair = (str(i), inst)
final_result.append(tmp_pair)
numpy_array.append(tmp)
table = eggroll.parallelize(final_result,
include_key=True,
partition=10)
self.table = table
self.numpy_table = np.array(numpy_array)
self.cols = [1]
def test_quantile_binning(self):
compress_thres = 10000
head_size = 5000
error = 0.01
bin_num = 10
bin_param = FeatureBinningParam(method='quantile', compress_thres=compress_thres, head_size=head_size,
error=error,
bin_num=bin_num)
quan_bin = QuantileBinning(bin_param)
split_points = quan_bin.binning(self.table, cols=self.cols)
for col_idx, col in enumerate(self.cols):
bin_percent = [i * (1.0 / bin_num) for i in range(1, bin_num)]
x = self.numpy_table[:, col]
x = sorted(x)
for bin_idx, percent in enumerate(bin_percent):
min_rank = int(math.floor(percent * self.data_num - self.data_num * error))
max_rank = int(math.ceil(percent * self.data_num + self.data_num * error))
if min_rank < 0:
min_rank = 0
if max_rank > len(x) - 1:
max_rank = len(x) - 1
try:
self.assertTrue(x[min_rank] <= split_points[col_idx][bin_idx] <= x[max_rank])
except:
print(x[min_rank], x[max_rank], split_points[col_idx][bin_idx])
found_index = x.index(split_points[col_idx][bin_idx])
print("min_rank: {}, found_rank: {}, max_rank: {}".format(
min_rank, found_index, max_rank
))
self.assertTrue(x[min_rank] <= split_points[col_idx][bin_idx] <= x[max_rank])
def tearDown(self):
self.table.destroy()
class TestQuantileBinningSpeed(unittest.TestCase):
def setUp(self):
# eggroll.init("123")
self.data_num = 100000
self.feature_num = 200
final_result = []
numpy_array = []
for i in range(self.data_num):
tmp = np.random.randn(self.feature_num)
inst = Instance(inst_id=i, features=tmp, label=0)
tmp_pair = (str(i), inst)
final_result.append(tmp_pair)
numpy_array.append(tmp)
table = eggroll.parallelize(final_result,
include_key=True,
partition=10)
self.table = table
self.numpy_table = np.array(numpy_array)
self.cols = [1, 2, 3]
def test_quantile_binning(self):
error = 0.01
compress_thres = int(self.data_num / (self.data_num * error))
head_size = 5000
bin_num = 10
bin_percent = [int(i * (100.0 / bin_num)) for i in range(1, bin_num)]
bin_param = FeatureBinningParam(method='quantile', compress_thres=compress_thres, head_size=head_size, error=error,
bin_num=bin_num)
quan_bin = QuantileBinning(bin_param)
t0 = time.time()
split_points = quan_bin.binning(self.table, cols=self.cols)
t1 = time.time()
print('Spend time: {}'.format(t1 - t0))
# collect and test numpy quantile speed
local_table = self.table.collect()
total_data = []
for _, data_inst in local_table:
total_data.append(data_inst.features)
total_data = | np.array(total_data) | numpy.array |
import numpy as np
import matplotlib.pyplot as plt
from numpy.linalg import inv
N = 25
X = np.reshape( | np.linspace(0, 0.9, N) | numpy.linspace |
"""
Experimenting with assorted code in this package.
Can run with
python -m neuroconnect.playground
"""
import os
from collections import OrderedDict
import numpy as np
import matplotlib.pyplot as plt
import mpmath
import seaborn as sns
import pandas as pd
def test_hyper_eg(total, bad, draws):
"""Demonstrates that expected overlap between two sets is formula."""
from .connect_math import (
hypergeometric_pmf,
expected_non_overlapping,
expected_overlapping,
)
exp = 0
for k in range(draws + 1):
res = hypergeometric_pmf(total, total - bad, draws, k)
exp = exp + (k * res)
other = expected_non_overlapping(total, bad, draws)
other2 = expected_overlapping(total, bad, draws)
return (exp, other, other2)
def test_unique(total, draws, n_senders):
"""Test the calculation of expected unique."""
from .connect_math import expected_unique
from .monte_carlo import monte_carlo
total_a = np.array([i for i in range(total)])
def gen_random(i):
random = np.random.choice(total_a, draws)
return (random,)
def check_random(random):
unique = np.unique(random)
return (len(unique),)
result = monte_carlo(check_random, gen_random, 50000)
avg = 0
for val in result:
avg += val[0] / 50000
ab = expected_unique(total, draws)
print("Stats", ab, "MC", avg)
senders = np.random.choice(total, size=(n_senders, draws), replace=True)
connections = {}
for i in range(n_senders):
num_choices = np.random.randint(1, draws + 1, dtype=np.int32)
forward_connection = senders[i, :num_choices]
connections[i] = forward_connection
avg = 0
for k, v in connections.items():
avg += len(np.unique(v)) / n_senders
connections = 0
for val in range(1, 301):
connections += expected_unique(1000, val) / draws
print("Stats", connections, "MC", avg)
def test_uniform(
n_dists,
min_val,
max_val,
n_dists2=20,
n_iters=100000,
N=1000,
plot=True,
n_senders=200,
):
"""Test the distribution of uniform distribution sums and functions of this."""
# NOTE: of course, sum of uniform dists approaches normal dist
# However, taking a function of the sum of uniform dists not necessarily
from .connect_math import (
nfold_conv,
create_uniform,
get_dist_mean,
expected_unique,
apply_fn_to_dist,
alt_way,
)
from .monte_carlo import get_distribution
from .mpf_connection import CombProb
alt_full, alt_final = alt_way(N, n_dists, n_senders, min_val, max_val)
uni = create_uniform(min_val, max_val)
dists = [
uni,
] * n_dists
dist = nfold_conv(dists)
print("Expected value: {}".format(get_dist_mean(dist)))
def fn_to_apply(k):
return float(expected_unique(N, k))
fn_dist = apply_fn_to_dist(dist, fn_to_apply)
print("Expected value fn: {}".format(get_dist_mean(fn_dist)))
print("Old expected value dist: {}".format(expected_unique(N, get_dist_mean(dist))))
print(
"Old expected value: {}".format(
expected_unique(N, n_dists * (max_val + min_val) / 2)
)
)
randoms = | np.random.randint(min_val, max_val + 1, size=(n_iters, n_dists)) | numpy.random.randint |
import pandas as pd
from lifelines import KaplanMeierFitter, CoxPHFitter
import numpy as np
from sklearn.exceptions import ConvergenceWarning
from multiprocessing import Pool
import numpy as np
import functools
from .correlation import intersection, header_list
import plotly
import plotly.offline as opy
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import ShuffleSplit, GridSearchCV
import warnings
#######################
### Sklearn Survival ##
#######################
class EarlyStoppingMonitor:
def __init__(self, window_size, max_iter_without_improvement):
self.window_size = window_size
self.max_iter_without_improvement = max_iter_without_improvement
self._best_step = -1
def __call__(self, iteration, estimator, args):
# continue training for first self.window_size iterations
if iteration < self.window_size:
return False
# compute average improvement in last self.window_size iterations.
# oob_improvement_ is the different in negative log partial likelihood
# between the previous and current iteration.
start = iteration - self.window_size + 1
end = iteration + 1
improvement = np.mean(estimator.oob_improvement_[start:end])
if improvement > 1e-6:
self._best_step = iteration
return False # continue fitting
# stop fitting if there was no improvement
# in last max_iter_without_improvement iterations
diff = iteration - self._best_step
return diff >= self.max_iter_without_improvement
def IPC_RIDGE(X_train, y_train, X_test, y_test, lFeature = None, n_core = 2, seed = 123):
from sksurv.linear_model import IPCRidge
from sklearn.pipeline import make_pipeline
# let's normalize, anyway
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
seed = np.random.RandomState(seed)
y_train_log = y_train.copy()
y_train_log["time"] = np.log1p(y_train["time"])
y_test_log = y_test.copy()
y_test_log["time"] = np.log1p(y_test["time"])
#https://github.com/sebp/scikit-survival/issues/41
n_alphas = 50
alphas = np.logspace(-10, 1, n_alphas)
gcv = GridSearchCV(IPCRidge(max_iter=100000),
{"alpha":alphas},
cv = 2,
n_jobs=10).fit(X_train,y_train_log)
best_model = gcv.best_estimator_.named_steps["IPCRidge"]
alpha = best_model.alphas_
scoreTraining = best_model.score(X_train,y_train_log)
scoreTest = best_model.score(X_test,y_test_log)
feature = pd.DataFrame(best_model.coef_, index=lFeature)[0]
return scoreTraining, scoreTest, feature
def score_survival_model(model, X, y):
from sksurv.metrics import concordance_index_censored
prediction = model.predict(X)
result = concordance_index_censored(y['event'], y['time'], prediction)
return result[0]
def SurvivalSVM(X_train, y_train, X_test, y_test, lFeature = None, n_core = 2, seed = 123):
from sksurv.svm import FastSurvivalSVM
import numpy as np
# let's normalize, anyway
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
seed = np.random.RandomState(seed)
ssvm = FastSurvivalSVM(max_iter=100, tol=1e-5, random_state=seed)
param_grid = {'alpha': 2. ** np.arange(-12, 13, 4)}
cv = ShuffleSplit(n_splits=10, test_size=0.2, random_state=seed)
gcv = GridSearchCV(ssvm, param_grid, scoring=score_survival_model,
n_jobs = n_core , refit=False,
cv=cv)
warnings.filterwarnings("ignore", category=FutureWarning)
gcv = gcv.fit(X_train, y_train)
ssvm.set_params(**gcv.best_params_)
ssvm.fit(X_train, y_train)
scoreTraining = ssvm.score(X_train,y_train)
scoreTest = ssvm.score(X_test,y_test)
feature = pd.Series(ssvm.coef_, index=lFeature)
return scoreTraining, scoreTest, feature
def PenaltyCox(X_train, y_train, X_test, y_test, lFeature = None, n_core = 2, seed = 123):
from sksurv.linear_model import CoxPHSurvivalAnalysis, CoxnetSurvivalAnalysis
from sklearn.pipeline import make_pipeline
seed = np.random.RandomState(seed)
# let's normalize, anyway
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
model = CoxnetSurvivalAnalysis(alpha_min_ratio=0.12, l1_ratio=0.9, max_iter=100)
#https://github.com/sebp/scikit-survival/issues/41
model.set_params(max_iter = 100, n_alphas = 50)
model.fit(X_train, y_train)
warnings.simplefilter("ignore", ConvergenceWarning)
alphas = model.alphas_
gcv = GridSearchCV(
make_pipeline(CoxnetSurvivalAnalysis(l1_ratio=0.9, max_iter=1000)),
param_grid={"coxnetsurvivalanalysis__alphas": [[v] for v in alphas]},
cv = 2,
n_jobs= n_core).fit(X_train,y_train)
best_model = gcv.best_estimator_.named_steps["coxnetsurvivalanalysis"]
alpha = best_model.alphas_
scoreTraining = best_model.score(X_train,y_train)
scoreTest = best_model.score(X_test,y_test)
feature = pd.DataFrame(best_model.coef_, index=lFeature)[0]
return scoreTraining, scoreTest, feature
def SurvivalForest(X_train, y_train, X_test, y_test, lFeature = None, n_core = 2, seed = 123):
from sksurv.ensemble import RandomSurvivalForest
from eli5.formatters import format_as_dataframe
from eli5.sklearn import explain_weights_sklearn
from eli5.sklearn import PermutationImportance
# let's normalize, anyway
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
seed = np.random.RandomState(seed)
rsf = RandomSurvivalForest(n_estimators=300,
min_samples_split=10,
min_samples_leaf=15,
max_features="sqrt",
n_jobs= n_core,
random_state=seed)
rsf.fit(X_train, y_train)
scoreTraining = rsf.score(X_train,y_train)
scoreTest = rsf.score(X_test,y_test)
perm = PermutationImportance(rsf, n_iter=3, random_state=seed)
perm.fit(X_test, y_test)
feature = format_as_dataframe(explain_weights_sklearn(perm, feature_names=lFeature, top = len(lFeature) ))
feature = pd.Series(feature["weight"].tolist(), index=feature["feature"].tolist())
#feature = pd.DataFrame(rsf.feature_importances_, index=lFeature)
return scoreTraining, scoreTest, feature
def gradient_boosted_models(X_train, y_train, X_test, y_test, lFeature = None, n_core = 2, seed = 123):
from sksurv.ensemble import GradientBoostingSurvivalAnalysis
# let's normalize, anyway
scaler = StandardScaler()
X_train = scaler.fit_transform(X_train)
X_test = scaler.transform(X_test)
seed = | np.random.RandomState(seed) | numpy.random.RandomState |
# -*- coding: utf-8 -*-
import numpy as np
from typing import Callable
from scipy.integrate import nquad
def coupled_logarithm(value: [int, float, np.ndarray],
kappa: [int, float] = 0.0,
dim: int = 1
) -> [float, np.ndarray]:
"""
Generalization of the logarithm function, which defines smooth
transition to power functions.
Parameters
----------
value : Input variable in which the coupled logarithm is applied to.
Accepts int, float, and np.ndarray data types.
kappa : Coupling parameter which modifies the coupled logarithm function.
Accepts int and float data types.
dim : The dimension (or rank) of value. If value is scalar, then dim = 1.
Accepts only int data type.
"""
# convert value into np.ndarray (if scalar) to keep consistency
value = | np.array(value) | numpy.array |
##########################################################
# @author: pkc/Vincent
# --------------------------------------------------------
# Based on the MATLAB code by <NAME>
# modification of python code by sajid
#
import numpy as np
import numpy.fft as fft
import matplotlib.pyplot as plt
import itertools
import sys
def diagonal_split(x):
''' pre-processing steps interms of
cropping to enable the diagonal
splitting of the input image
'''
h, w = x.shape
cp_x = x
''' cropping the rows '''
if (np.mod(h, 4)==1):
cp_x = cp_x[:-1]
elif(np.mod(h, 4)==2):
cp_x = cp_x[1:-1]
elif(np.mod(h, 4)==3):
cp_x = cp_x[1:-2]
''' cropping the columns'''
if (np.mod(w, 4)==1):
cp_x = cp_x[:, :-1]
elif(np.mod(w, 4)==2):
cp_x = cp_x[:,1:-1]
elif(np.mod(w, 4)==3):
cp_x = cp_x[:, 1:-2]
x = cp_x
h, w = x.shape
if((np.mod(h, 4)!=0) or (np.mod(w, 4)!=0)):
print('[!] diagonal splitting not possible due to cropping issue')
print('[!] re-check the cropping portion')
end()
row_indices = np.arange(0, h)
col_indices = np.arange(0, w)
row_split_u = row_indices[::2]
row_split_d = np.asanyarray(list(set(row_indices)-set(row_split_u)))
col_split_l = col_indices[::2]
col_split_r = np.asanyarray(list(set(col_indices)-set(col_split_l)))
''' ordered pair of pre-processing
of the diagonal elements
and sub-sequent splits of the image
'''
op1 = list(itertools.product(row_split_u, col_split_l))
ind = [np.asanyarray([fo for fo, _ in op1]), np.asanyarray([so for _, so in op1])]
s_a1 = x[ind]
s_a1 = s_a1.reshape((len(row_split_u), len(col_split_l)))
op2 = list(itertools.product(row_split_d, col_split_r))
ind = [np.asanyarray([fo for fo, _ in op2]), np.asanyarray([so for _, so in op2])]
s_a2 = x[ind]
s_a2 = s_a2.reshape((len(row_split_d), len(col_split_r)))
op3 = list(itertools.product(row_split_d, col_split_l))
ind = [np.asanyarray([fo for fo, _ in op3]), np.asanyarray([so for _, so in op3])]
s_b1 = x[ind]
s_b1 = s_b1.reshape((len(row_split_d), len(col_split_l)))
op4 = list(itertools.product(row_split_u, col_split_r))
ind = [np.asanyarray([fo for fo, _ in op4]), np.asanyarray([so for _, so in op4])]
s_b2 = x[ind]
s_b2 = s_b2.reshape((len(row_split_u), len(col_split_r)))
return(s_a1, s_a2, s_b1, s_b2)
def get_frc_img(img, frc_img_lx, center=None):
''' Returns a cropped image version of input image "img"
img: input image
center: cropping is performed with center a reference
point to calculate length in x and y direction.
Unless otherwise stated center is basically center
of input image "img"
frc_img_lx: length of cropped image in x as well as y. Also
the cropped image is made to be square image for
the FRC calculation
'''
h, w = img.shape
cy = round(min(h, w)/2)
if center is None:
cy = cy
else:
cy = cy + center
ep = cy + round(frc_img_lx/2)
sp = ep - frc_img_lx
frc_img = img[sp:ep, sp:ep]
return frc_img
def ring_indices(x, inscribed_rings=True, plot=False):
print("ring plots is:", plot)
#read the shape and dimensions of the input image
shape = np.shape(x)
dim = np.size(shape)
'''Depending on the dimension of the image 2D/3D,
create an array of integers which increase with
distance from the center of the array
'''
if dim == 2 :
nr,nc = shape
nrdc = np.floor(nr/2)
ncdc = np.floor(nc/2)
r = np.arange(nr)-nrdc
c = np.arange(nc)-ncdc
[R,C] = np.meshgrid(r,c)
index = np.round(np.sqrt(R**2+C**2))
elif dim == 3 :
nr,nc,nz = shape
nrdc = np.floor(nr/2)+1
ncdc = np.floor(nc/2)+1
nzdc = np.floor(nz/2)+1
r = np.arange(nr)-nrdc + 1
c = np.arange(nc)-ncdc + 1
z = np.arange(nc)-nzdc + 1
[R,C,Z] = np.meshgrid(r,c,z)
index = np.round(np.sqrt(R**2+C**2+Z**2))+1
else :
print('input is neither a 2d or 3d array')
''' if inscribed_rings is True then the outmost
ring use to evaluate the FRC will be the circle
inscribed in the square input image of size L.
(i.e. FRC_r <= L/2). Else the arcs of the rings
beyond the inscribed circle will also be
considered while determining FRC
(i.e. FRC_r<=sqrt((L/2)^2 + (L/2)^2))
'''
if (inscribed_rings == True):
maxindex = nr/2
else:
maxindex = np.max(index)
#output = np.zeros(int(maxindex),dtype = complex)
''' In the next step the output is generated. The output is an array of length
maxindex. The elements in this array corresponds to the sum of all the elements
in the original array correponding to the integer position of the output array
divided by the number of elements in the index array with the same value as the
integer position.
Depening on the size of the input array, use either the pixel or index method.
By-pixel method for large arrays and by-index method for smaller ones.
'''
print('performed by index method')
indices = []
for i in np.arange(int(maxindex)):
indices.append(np.where(index == i))
if plot is True:
img_plane = np.zeros((nr, nc))
for i in range(int(maxindex)):
if ((i%20)==0):
img_plane[indices[i]]=1.0
plt.imshow(img_plane,cmap="summer")
if inscribed_rings is True:
plt.title(' FRC rings with the max radius as that\
\n of the inscribed circle in the image (spacing of 20 [px] between rings)')
else:
plt.title(' FRC rings extending beyond the radius of\
\n the inscribed circle in the image (spacing of 20 [px] between rings)')
return(indices)
def spinavej(x, inscribed_rings=True):
''' modification of code by sajid an
Based on the MATLAB code by <NAME>
'''
shape = np.shape(x)
dim = np.size(shape)
''' Depending on the dimension of the image 2D/3D, create an array of integers
which increase with distance from the center of the array
'''
if dim == 2 :
nr,nc = shape
nrdc = np.floor(nr/2)
ncdc = np.floor(nc/2)
r = np.arange(nr)-nrdc
c = np.arange(nc)-ncdc
[R,C] = | np.meshgrid(r,c) | numpy.meshgrid |
import torch
import numpy as np
import cv2
import os
from torch.utils.data import Dataset
import matplotlib.pyplot as plt
import random
class ToTensor(object):
def __call__(self, sample):
entry = {}
for k in sample:
if k == 'rect':
entry[k] = torch.IntTensor(sample[k])
else:
entry[k] = torch.FloatTensor(sample[k])
return entry
class InpaintingDataset(Dataset):
def __init__(self, info_list, root_dir='', im_size=(256, 256), transform=None):
self.filenames = open(info_list, 'rt').read().splitlines()
self.root_dir = root_dir
self.transform = transform
self.im_size = im_size
np.random.seed(2018)
def __len__(self):
return len(self.filenames)
def read_image(self, filepath):
image = cv2.imread(filepath)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
h, w, c = image.shape
if h != self.im_size[0] or w != self.im_size[1]:
ratio = max(1.0*self.im_size[0]/h, 1.0*self.im_size[1]/w)
im_scaled = cv2.resize(image, None, fx=ratio, fy=ratio)
h, w, _ = im_scaled.shape
h_idx = (h-self.im_size[0]) // 2
w_idx = (w-self.im_size[1]) // 2
im_scaled = im_scaled[h_idx:h_idx+self.im_size[0], w_idx:w_idx+self.im_size[1],:]
im_scaled = np.transpose(im_scaled, [2, 0, 1])
else:
im_scaled = np.transpose(image, [2, 0, 1])
return im_scaled
def __getitem__(self, idx):
image = self.read_image(os.path.join(self.root_dir, self.filenames[idx]))
#print("image path:", os.path.join(self.root_dir, self.filenames[idx]))
sample = {'gt': image}
if self.transform:
sample = self.transform(sample)
return sample
#============================
# Modified versiion by Vajira
# To load image and mask from segmented images of Hyper-kvasir
#============================
class InpaintingDataset_WithMask(Dataset):
def __init__(self, info_list, root_dir='', im_size=(256, 256), transform=None):
self.filenames= open(info_list, 'rt').read().splitlines()
self.root_dir = root_dir
self.root_dir_img = os.path.join(self.root_dir, "images")
self.root_dir_mask = os.path.join(self.root_dir, "masks")
self.transform = transform
self.im_size = im_size
np.random.seed(2018)
def __len__(self):
return len(self.filenames)
def read_image(self, filepath):
#print(filepath)
image = cv2.imread(filepath)
image = cv2.cvtColor(image, cv2.COLOR_BGR2RGB)
#print(image.shape)
h, w, c = image.shape
if h != self.im_size[0] or w != self.im_size[1]:
ratio = max(1.0*self.im_size[0]/h, 1.0*self.im_size[1]/w)
im_scaled = cv2.resize(image, None, fx=ratio, fy=ratio)
#print(im_scaled.shape)
h, w, _ = im_scaled.shape
h_idx = (h-self.im_size[0]) // 2
w_idx = (w-self.im_size[1]) // 2
im_scaled = im_scaled[h_idx:h_idx+self.im_size[0], w_idx:w_idx+self.im_size[1],:]
plt.imsave("test_img.jpeg",im_scaled)
im_scaled = np.transpose(im_scaled, [2, 0, 1])
else:
im_scaled = np.transpose(image, [2, 0, 1])
#print("This is running")
return im_scaled
# added by vajira
# To read mask
def read_mask(self, filepath):
#print(filepath)
image = cv2.imread(filepath)
image = cv2.cvtColor(image, cv2.COLOR_BGR2GRAY)
#image = np.expand_dims(image, axis=2)
#print(image.shape)
#image = np.where(image > 0, 1, 0)
# print(image.shape)
h, w = image.shape
if h != self.im_size[0] or w != self.im_size[1]:
ratio = max(1.0*self.im_size[0]/h, 1.0*self.im_size[1]/w)
im_scaled = cv2.resize(image, None, fx=ratio, fy=ratio)
#print(im_scaled.shape)
h, w = im_scaled.shape
h_idx = (h-self.im_size[0]) // 2
w_idx = (w-self.im_size[1]) // 2
im_scaled = im_scaled[h_idx:h_idx+self.im_size[0], w_idx:w_idx+self.im_size[1]]
im_scaled = np.expand_dims(im_scaled, axis=2)
#plt.imsave("test_mask.jpeg",im_scaled, cmap="gray")
im_scaled = np.transpose(im_scaled, [2, 0, 1])
im_scaled = np.where(im_scaled > 0, 1, 0) # convert into 0 and 1
else:
im_scaled = np.expand_dims(im_scaled, axis=2)
im_scaled = | np.transpose(image, [2, 0, 1]) | numpy.transpose |
'''run a linear q learning network on a grid world
do the weight update by hand (w/o any autodiff machinery) for edu purpose
'''
from envs.GridWorld import GridWorld, ACTIONS
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.patches import Circle
import seaborn as sns
sns.set(style='white', context='talk', palette='colorblind')
np.random.seed(0)
# define env and agent
env = GridWorld()
state_dim = env.height * env.width
n_actions = len(ACTIONS)
# weights (i.e. the agent)
W = np.zeros((state_dim, n_actions))
# training params
n_trials = 150
max_steps = 50
epsilon = 0.2
alpha = 0.1
gamma = .9
'''train
'''
log_return = []
log_steps = []
log_actions = []
log_states = []
for i in range(n_trials):
env.reset()
cumulative_reward = 0
step = 0
log_actions_i = []
log_states_i = []
while step < max_steps:
# get current state
s_t = env.get_agent_loc().reshape(1, -1)
log_states_i.append(s_t)
# compute q val
q_t = np.dot(s_t, W)
# epsilon greedy action selection
if np.random.uniform() > epsilon:
a_t = np.argmax(q_t)
else:
a_t = np.random.randint(n_actions)
# transition and get reward
r_t = env.step(a_t)
# get next states info
s_next = env.get_agent_loc().reshape(1, -1)
max_q_next = np.max(np.dot(s_next, W))
# compute TD target
q_target = r_t + gamma * max_q_next
# update weights
w_delta = alpha * q_target * s_t
W[:, a_t] += np.squeeze(w_delta)
# update R and n steps
step += 1
cumulative_reward += r_t * gamma**step
log_actions_i.append(a_t)
# termination condition
if env.is_terminal():
break
log_states_i.append(s_t)
log_states.append(log_states_i)
log_actions.append(log_actions_i)
log_return.append(cumulative_reward)
log_steps.append(step)
'''
learning curve
'''
f, axes = plt.subplots(2, 1, figsize=(6, 6), sharex=True)
axes[0].plot(log_return)
axes[0].axhline(0, color='grey', linestyle='--')
axes[0].set_title('Learning curve')
axes[0].set_ylabel('Return')
axes[1].plot(log_steps)
axes[1].set_title(' ')
axes[1].axhline(0, color='grey', linestyle='--')
axes[1].set_ylabel('n steps taken')
axes[1].set_xlabel('Epoch')
axes[1].set_ylim([0, None])
sns.despine()
f.tight_layout()
'''weights'''
f, axes = plt.subplots(4, 1, figsize=(5, 11))
for i, ax in enumerate(axes):
sns.heatmap(
W[:, i].reshape(5, 5),
cmap='viridis', square=True,
vmin= | np.min(W) | numpy.min |
# Copyright 2019 Huawei Technologies Co., Ltd
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ============================================================================
import pytest
import numpy as np
from mindspore import Tensor
from mindspore.ops import operations as P
from mindspore.ops import composite as C
import mindspore.nn as nn
import mindspore.context as context
class NetSoftmax(nn.Cell):
def __init__(self):
super(NetSoftmax, self).__init__()
axis = -2
self.softmax1 = P.Softmax()
self.softmax2 = P.Softmax(axis)
def construct(self, x):
return self.softmax1(x), self.softmax2(x)
@pytest.mark.level0
@pytest.mark.platform_x86_gpu_training
@pytest.mark.env_onecard
def test_softmax():
x = Tensor(np.array([[0.1, 0.3, 0.6, -0.3],
[0.2, -0.6, 0.8, 0.6],
[0.6, -1.2, 0.4, 0.6]]).astype(np.float32))
expect1 = np.ones(3)
expect2 = np.ones(4)
error1 = expect1 * 1.0e-6
error2 = expect2 * 1.0e-6
context.set_context(mode=context.PYNATIVE_MODE, device_target="GPU")
Softmax = NetSoftmax()
output = Softmax(x)
outputSum1 = output[0].asnumpy().sum(axis=1)
outputSum2 = output[1].asnumpy().sum(axis=0)
diff1 = np.abs(outputSum1 - expect1)
diff2 = np.abs(outputSum2 - expect2)
assert | np.all(diff1 < error1) | numpy.all |
import json
import unittest
from glob import glob
from os import remove, path
from shutil import rmtree
from uuid import uuid4
import anndata
import numpy as np
from pandas import Series, DataFrame
import tiledb
from backend.czi_hosted.common.corpora import CorporaConstants
from backend.czi_hosted.converters.h5ad_data_file import H5ADDataFile
from backend.test import PROJECT_ROOT
class TestH5ADDataFile(unittest.TestCase):
def setUp(self):
self.sample_anndata = self._create_sample_anndata_dataset()
self.sample_h5ad_filename = self._write_anndata_to_file(self.sample_anndata)
self.sample_output_directory = path.splitext(self.sample_h5ad_filename)[0] + ".cxg"
def tearDown(self):
if self.sample_h5ad_filename:
remove(self.sample_h5ad_filename)
if path.isdir(self.sample_output_directory):
rmtree(self.sample_output_directory)
def test__create_h5ad_data_file__non_h5ad_raises_exception(self):
non_h5ad_filename = "my_fancy_dataset.csv"
with self.assertRaises(Exception) as exception_context:
H5ADDataFile(non_h5ad_filename)
self.assertIn("File must be an H5AD", str(exception_context.exception))
def test__create_h5ad_data_file__assert_warning_outputted_if_dataset_title_or_about_given(self):
with self.assertLogs(level="WARN") as logger:
H5ADDataFile(
self.sample_h5ad_filename,
dataset_title="My Awesome Dataset",
dataset_about="http://www.awesomedataset.com",
use_corpora_schema=False,
)
self.assertIn("will override any metadata that is extracted", logger.output[0])
def test__create_h5ad_data_file__reads_anndata_successfully(self):
h5ad_file = H5ADDataFile(self.sample_h5ad_filename, use_corpora_schema=False)
self.assertTrue((h5ad_file.anndata.X == self.sample_anndata.X).all())
self.assertEqual(
h5ad_file.anndata.obs.sort_index(inplace=True), self.sample_anndata.obs.sort_index(inplace=True)
)
self.assertEqual(
h5ad_file.anndata.var.sort_index(inplace=True), self.sample_anndata.var.sort_index(inplace=True)
)
for key in h5ad_file.anndata.obsm.keys():
self.assertIn(key, self.sample_anndata.obsm.keys())
self.assertTrue((h5ad_file.anndata.obsm[key] == self.sample_anndata.obsm[key]).all())
for key in self.sample_anndata.obsm.keys():
self.assertIn(key, h5ad_file.anndata.obsm.keys())
self.assertTrue((h5ad_file.anndata.obsm[key] == self.sample_anndata.obsm[key]).all())
def test__create_h5ad_data_file__copies_index_of_obs_and_var_to_column(self):
h5ad_file = H5ADDataFile(self.sample_h5ad_filename, use_corpora_schema=False)
# The automatic name chosen for the index should be "name_0"
self.assertNotIn("name_0", self.sample_anndata.obs.columns)
self.assertIn("name_0", h5ad_file.obs.columns)
self.assertNotIn("name_0", self.sample_anndata.var.columns)
self.assertIn("name_0", h5ad_file.var.columns)
def test__create_h5ad_data_file__no_copy_if_obs_and_var_index_names_specified(self):
h5ad_file = H5ADDataFile(
self.sample_h5ad_filename,
use_corpora_schema=False,
obs_index_column_name="float_category",
vars_index_column_name="int_category",
)
self.assertNotIn("name_0", h5ad_file.obs.columns)
self.assertNotIn("name_0", h5ad_file.var.columns)
def test__create_h5ad_data_file__obs_and_var_index_names_specified_not_unique_raises_exception(self):
with self.assertRaises(Exception) as exception_context:
H5ADDataFile(
self.sample_h5ad_filename,
use_corpora_schema=False,
obs_index_column_name="float_category",
vars_index_column_name="bool_category",
)
self.assertIn("Please prepare data to contain unique values", str(exception_context.exception))
def test__create_h5ad_data_file__obs_and_var_index_names_specified_doesnt_exist_raises_exception(self):
with self.assertRaises(Exception) as exception_context:
H5ADDataFile(
self.sample_h5ad_filename,
use_corpora_schema=False,
obs_index_column_name="unknown_category",
vars_index_column_name="i_dont_exist",
)
self.assertIn("does not exist", str(exception_context.exception))
def test__create_h5ad_data_file__extract_about_and_title_from_dataset(self):
h5ad_file = H5ADDataFile(self.sample_h5ad_filename)
self.assertEqual(h5ad_file.dataset_title, "random_link_name")
self.assertEqual(h5ad_file.dataset_about, "www.link.com")
def test__create_h5ad_data_file__inputted_dataset_title_and_about_overrides_extracted(self):
h5ad_file = H5ADDataFile(
self.sample_h5ad_filename, dataset_about="override_about", dataset_title="override_title"
)
self.assertEqual(h5ad_file.dataset_title, "override_title")
self.assertEqual(h5ad_file.dataset_about, "override_about")
def test__to_cxg__simple_anndata_no_corpora_and_sparse(self):
h5ad_file = H5ADDataFile(self.sample_h5ad_filename, use_corpora_schema=False)
h5ad_file.to_cxg(self.sample_output_directory, 100)
self._validate_cxg_and_h5ad_content_match(self.sample_h5ad_filename, self.sample_output_directory, True)
def test__to_cxg__simple_anndata_with_corpora_and_sparse(self):
h5ad_file = H5ADDataFile(self.sample_h5ad_filename)
h5ad_file.to_cxg(self.sample_output_directory, 100)
self._validate_cxg_and_h5ad_content_match(self.sample_h5ad_filename, self.sample_output_directory, True)
def test__to_cxg__simple_anndata_no_corpora_and_dense(self):
h5ad_file = H5ADDataFile(self.sample_h5ad_filename, use_corpora_schema=False)
h5ad_file.to_cxg(self.sample_output_directory, 0)
self._validate_cxg_and_h5ad_content_match(self.sample_h5ad_filename, self.sample_output_directory, False)
def test__to_cxg__simple_anndata_with_corpora_and_dense(self):
h5ad_file = H5ADDataFile(self.sample_h5ad_filename)
h5ad_file.to_cxg(self.sample_output_directory, 0)
self._validate_cxg_and_h5ad_content_match(self.sample_h5ad_filename, self.sample_output_directory, False)
def test__to_cxg__with_sparse_column_encoding(self):
anndata = self._create_sample_anndata_dataset()
anndata.X = np.ones((3, 4))
sparse_with_column_shift_filename = self._write_anndata_to_file(anndata)
h5ad_file = H5ADDataFile(sparse_with_column_shift_filename)
h5ad_file.to_cxg(self.sample_output_directory, 50)
self._validate_cxg_and_h5ad_content_match(
sparse_with_column_shift_filename, self.sample_output_directory, False, has_column_encoding=True
)
# Clean up
remove(sparse_with_column_shift_filename)
def _validate_cxg_and_h5ad_content_match(self, h5ad_filename, cxg_directory, is_sparse, has_column_encoding=False):
anndata_object = anndata.read_h5ad(h5ad_filename)
# Array locations
metadata_array_location = f"{cxg_directory}/cxg_group_metadata"
main_x_array_location = f"{cxg_directory}/X"
embedding_array_location = f"{cxg_directory}/emb"
specific_embedding_array_location = f"{self.sample_output_directory}/emb/awesome_embedding"
obs_array_location = f"{cxg_directory}/obs"
var_array_location = f"{cxg_directory}/var"
x_col_shift_array_location = f"{cxg_directory}/X_col_shift"
# Assert CXG structure
self.assertEqual(tiledb.object_type(cxg_directory), "group")
self.assertEqual(tiledb.object_type(obs_array_location), "array")
self.assertEqual(tiledb.object_type(var_array_location), "array")
self.assertEqual(tiledb.object_type(main_x_array_location), "array")
self.assertEqual(tiledb.object_type(embedding_array_location), "group")
self.assertEqual(tiledb.object_type(specific_embedding_array_location), "array")
if has_column_encoding:
self.assertEqual(tiledb.object_type(x_col_shift_array_location), "array")
# Validate metadata
metadata_array = tiledb.DenseArray(metadata_array_location, mode="r")
self.assertIn("cxg_version", metadata_array.meta)
# Validate obs index
obs_array = tiledb.DenseArray(obs_array_location, mode="r")
expected_index_data = anndata_object.obs.index.to_numpy()
index_name = json.loads(obs_array.meta["cxg_schema"])["index"]
actual_index_data = obs_array.query(attrs=[index_name])[:][index_name]
self.assertTrue(np.array_equal(expected_index_data, actual_index_data))
# Validate obs columns
expected_columns = list(anndata_object.obs.columns.values)
for column_name in expected_columns:
expected_data = anndata_object.obs[column_name].to_numpy()
actual_data = obs_array.query(attrs=[column_name])[:][column_name]
self.assertTrue(np.array_equal(expected_data, actual_data))
# Validate var index
var_array = tiledb.DenseArray(var_array_location, mode="r")
expected_index_data = anndata_object.var.index.to_numpy()
index_name = json.loads(var_array.meta["cxg_schema"])["index"]
actual_index_data = var_array.query(attrs=[index_name])[:][index_name]
self.assertTrue(np.array_equal(expected_index_data, actual_index_data))
# Validate var columns
expected_columns = anndata_object.var.columns.values
for column_name in expected_columns:
expected_data = anndata_object.var[column_name].to_numpy()
actual_data = var_array.query(attrs=[column_name])[:][column_name]
self.assertTrue(np.array_equal(expected_data, actual_data))
# Validate embedding
expected_embedding_data = anndata_object.obsm.get("X_awesome_embedding")
embedding_array = tiledb.DenseArray(specific_embedding_array_location, mode="r")
actual_embedding_data = embedding_array[:, 0:2]
self.assertTrue(np.array_equal(expected_embedding_data, actual_embedding_data))
# Validate X matrix if not column shifted
if not has_column_encoding:
expected_x_data = anndata_object.X
if is_sparse:
x_array = tiledb.SparseArray(main_x_array_location, mode="r")
actual_x_data = np.reshape(x_array[:, :][""], expected_x_data.shape)
else:
x_array = tiledb.DenseArray(main_x_array_location, mode="r")
actual_x_data = x_array[:, :]
self.assertTrue(np.array_equal(expected_x_data, actual_x_data))
def _write_anndata_to_file(self, anndata):
temporary_filename = f"{PROJECT_ROOT}/backend/test/fixtures/{uuid4()}.h5ad"
anndata.write(temporary_filename)
return temporary_filename
def _create_sample_anndata_dataset(self):
# Create X
X = np.random.rand(3, 4)
# Create obs
random_string_category = Series(data=["a", "b", "b"], dtype="category")
random_float_category = Series(data=[3.2, 1.1, 2.2], dtype=np.float32)
obs_dataframe = DataFrame(
data={"string_category": random_string_category, "float_category": random_float_category}
)
obs = obs_dataframe
# Create vars
random_int_category = Series(data=[3, 1, 2, 4], dtype=np.int32)
random_bool_category = Series(data=[True, True, False, True], dtype=np.bool_)
var_dataframe = DataFrame(data={"int_category": random_int_category, "bool_category": random_bool_category})
var = var_dataframe
# Create embeddings
random_embedding = | np.random.rand(3, 2) | numpy.random.rand |
import numpy as np
import openmdao.api as om
import wisdem.commonse.utilities as util
import wisdem.pyframe3dd.pyframe3dd as pyframe3dd
import wisdem.commonse.utilization_dnvgl as util_dnvgl
import wisdem.commonse.utilization_constraints as util_con
from wisdem.commonse import NFREQ, gravity
from wisdem.floatingse.member import NULL, MEMMAX, Member
NNODES_MAX = 1000
NELEM_MAX = 1000
RIGID = 1e30
EPS = 1e-6
class PlatformFrame(om.ExplicitComponent):
def initialize(self):
self.options.declare("options")
def setup(self):
opt = self.options["options"]
n_member = opt["floating"]["members"]["n_members"]
for k in range(n_member):
self.add_input(f"member{k}:nodes_xyz", NULL * np.ones((MEMMAX, 3)), units="m")
self.add_input(f"member{k}:nodes_r", NULL * np.ones(MEMMAX), units="m")
self.add_input(f"member{k}:section_D", NULL * np.ones(MEMMAX), units="m")
self.add_input(f"member{k}:section_t", NULL * np.ones(MEMMAX), units="m")
self.add_input(f"member{k}:section_A", NULL * np.ones(MEMMAX), units="m**2")
self.add_input(f"member{k}:section_Asx", NULL * np.ones(MEMMAX), units="m**2")
self.add_input(f"member{k}:section_Asy", NULL * np.ones(MEMMAX), units="m**2")
self.add_input(f"member{k}:section_Ixx", NULL * np.ones(MEMMAX), units="kg*m**2")
self.add_input(f"member{k}:section_Iyy", NULL * np.ones(MEMMAX), units="kg*m**2")
self.add_input(f"member{k}:section_Izz", NULL * np.ones(MEMMAX), units="kg*m**2")
self.add_input(f"member{k}:section_rho", NULL * np.ones(MEMMAX), units="kg/m**3")
self.add_input(f"member{k}:section_E", NULL * np.ones(MEMMAX), units="Pa")
self.add_input(f"member{k}:section_G", NULL * np.ones(MEMMAX), units="Pa")
self.add_input(f"member{k}:section_sigma_y", NULL * np.ones(MEMMAX), units="Pa")
self.add_input(f"member{k}:idx_cb", 0)
self.add_input(f"member{k}:buoyancy_force", 0.0, units="N")
self.add_input(f"member{k}:displacement", 0.0, units="m**3")
self.add_input(f"member{k}:center_of_buoyancy", np.zeros(3), units="m")
self.add_input(f"member{k}:center_of_mass", np.zeros(3), units="m")
self.add_input(f"member{k}:ballast_mass", 0.0, units="kg")
self.add_input(f"member{k}:total_mass", 0.0, units="kg")
self.add_input(f"member{k}:total_cost", 0.0, units="USD")
self.add_input(f"member{k}:I_total", np.zeros(6), units="kg*m**2")
self.add_input(f"member{k}:Awater", 0.0, units="m**2")
self.add_input(f"member{k}:Iwater", 0.0, units="m**4")
self.add_input(f"member{k}:added_mass", np.zeros(6), units="kg")
self.add_input(f"member{k}:waterline_centroid", np.zeros(2), units="m")
self.add_input(f"member{k}:variable_ballast_capacity", val=0.0, units="m**3")
self.add_input(f"member{k}:Px", np.zeros(MEMMAX), units="N/m")
self.add_input(f"member{k}:Py", np.zeros(MEMMAX), units="N/m")
self.add_input(f"member{k}:Pz", np.zeros(MEMMAX), units="N/m")
self.add_input(f"member{k}:qdyn", np.zeros(MEMMAX), units="Pa")
self.add_input("transition_node", np.zeros(3), units="m")
self.add_input("transition_piece_mass", 0.0, units="kg")
self.add_input("transition_piece_cost", 0.0, units="USD")
self.add_output("transition_piece_I", np.zeros(6), units="kg*m**2")
self.add_output("platform_nodes", NULL * np.ones((NNODES_MAX, 3)), units="m")
self.add_output("platform_Fnode", NULL * np.ones((NNODES_MAX, 3)), units="N")
self.add_output("platform_Rnode", NULL * np.ones(NNODES_MAX), units="m")
self.add_output("platform_elem_n1", NULL * np.ones(NELEM_MAX, dtype=np.int_))
self.add_output("platform_elem_n2", NULL * np.ones(NELEM_MAX, dtype=np.int_))
self.add_output("platform_elem_D", NULL * np.ones(NELEM_MAX), units="m")
self.add_output("platform_elem_t", NULL * np.ones(NELEM_MAX), units="m")
self.add_output("platform_elem_A", NULL * np.ones(NELEM_MAX), units="m**2")
self.add_output("platform_elem_Asx", NULL * np.ones(NELEM_MAX), units="m**2")
self.add_output("platform_elem_Asy", NULL * np.ones(NELEM_MAX), units="m**2")
self.add_output("platform_elem_Ixx", NULL * np.ones(NELEM_MAX), units="kg*m**2")
self.add_output("platform_elem_Iyy", NULL * np.ones(NELEM_MAX), units="kg*m**2")
self.add_output("platform_elem_Izz", NULL * np.ones(NELEM_MAX), units="kg*m**2")
self.add_output("platform_elem_rho", NULL * np.ones(NELEM_MAX), units="kg/m**3")
self.add_output("platform_elem_E", NULL * np.ones(NELEM_MAX), units="Pa")
self.add_output("platform_elem_G", NULL * np.ones(NELEM_MAX), units="Pa")
self.add_output("platform_elem_sigma_y", NULL * np.ones(NELEM_MAX), units="Pa")
self.add_output("platform_elem_Px1", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_output("platform_elem_Px2", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_output("platform_elem_Py1", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_output("platform_elem_Py2", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_output("platform_elem_Pz1", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_output("platform_elem_Pz2", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_output("platform_elem_qdyn", NULL * np.ones(NELEM_MAX), units="Pa")
self.add_discrete_output("platform_elem_memid", [-1] * NELEM_MAX)
self.add_output("platform_displacement", 0.0, units="m**3")
self.add_output("platform_center_of_buoyancy", np.zeros(3), units="m")
self.add_output("platform_hull_center_of_mass", np.zeros(3), units="m")
self.add_output("platform_centroid", np.zeros(3), units="m")
self.add_output("platform_ballast_mass", 0.0, units="kg")
self.add_output("platform_hull_mass", 0.0, units="kg")
self.add_output("platform_mass", 0.0, units="kg")
self.add_output("platform_I_hull", np.zeros(6), units="kg*m**2")
self.add_output("platform_cost", 0.0, units="USD")
self.add_output("platform_Awater", 0.0, units="m**2")
self.add_output("platform_Iwater", 0.0, units="m**4")
self.add_output("platform_added_mass", np.zeros(6), units="kg")
self.add_output("platform_variable_capacity", np.zeros(n_member), units="m**3")
self.node_mem2glob = {}
def compute(self, inputs, outputs, discrete_inputs, discrete_outputs):
# Seems like we have to run this each time as numbering can change during optimization
self.node_mem2glob = {}
self.set_connectivity(inputs, outputs)
self.set_node_props(inputs, outputs)
self.set_element_props(inputs, outputs, discrete_inputs, discrete_outputs)
def set_connectivity(self, inputs, outputs):
# Load in number of members
opt = self.options["options"]
n_member = opt["floating"]["members"]["n_members"]
# Initialize running lists across all members
nodes_temp = np.empty((0, 3))
elem_n1 = np.array([], dtype=np.int_)
elem_n2 = np.array([], dtype=np.int_)
# Look over members and grab all nodes and internal connections
for k in range(n_member):
inode_xyz = inputs[f"member{k}:nodes_xyz"]
inodes = np.where(inode_xyz[:, 0] == NULL)[0][0]
inode_xyz = inode_xyz[:inodes, :]
inode_range = np.arange(inodes - 1)
n = nodes_temp.shape[0]
for ii in range(inodes):
self.node_mem2glob[(k, ii)] = n + ii
elem_n1 = np.append(elem_n1, n + inode_range)
elem_n2 = np.append(elem_n2, n + inode_range + 1)
nodes_temp = np.append(nodes_temp, inode_xyz, axis=0)
# Reveal connectivity by using mapping to unique node positions
nodes, idx, inv = np.unique(nodes_temp.round(8), axis=0, return_index=True, return_inverse=True)
nnode = nodes.shape[0]
outputs["platform_nodes"] = NULL * np.ones((NNODES_MAX, 3))
outputs["platform_nodes"][:nnode, :] = nodes
outputs["platform_centroid"] = nodes.mean(axis=0)
# Use mapping to set references to node joints
nelem = elem_n1.size
outputs["platform_elem_n1"] = NULL * np.ones(NELEM_MAX, dtype=np.int_)
outputs["platform_elem_n2"] = NULL * np.ones(NELEM_MAX, dtype=np.int_)
outputs["platform_elem_n1"][:nelem] = inv[elem_n1]
outputs["platform_elem_n2"][:nelem] = inv[elem_n2]
# Update global 2 member mappings
for k in self.node_mem2glob.keys():
self.node_mem2glob[k] = inv[self.node_mem2glob[k]]
def set_node_props(self, inputs, outputs):
# Load in number of members
opt = self.options["options"]
n_member = opt["floating"]["members"]["n_members"]
# Number of valid nodes
node_platform = outputs["platform_nodes"]
nnode = np.where(node_platform[:, 0] == NULL)[0][0]
node_platform = node_platform[:nnode, :]
# Find greatest radius of all members at node intersections
Rnode = np.zeros(nnode)
for k in range(n_member):
irnode = inputs[f"member{k}:nodes_r"]
n = np.where(irnode == NULL)[0][0]
for ii in range(n):
iglob = self.node_mem2glob[(k, ii)]
Rnode[iglob] = np.array([Rnode[iglob], irnode[ii]]).max()
# Find forces on nodes
Fnode = np.zeros((nnode, 3))
for k in range(n_member):
icb = int(inputs[f"member{k}:idx_cb"])
iglob = self.node_mem2glob[(k, icb)]
Fnode[iglob, 2] += inputs[f"member{k}:buoyancy_force"]
# Get transition piece inertial properties
itrans_platform = util.closest_node(node_platform, inputs["transition_node"])
m_trans = float(inputs["transition_piece_mass"])
r_trans = Rnode[itrans_platform]
I_trans = m_trans * r_trans ** 2.0 * np.r_[0.5, 0.5, 1.0, np.zeros(3)]
outputs["transition_piece_I"] = I_trans
# Store outputs
outputs["platform_Rnode"] = NULL * np.ones(NNODES_MAX)
outputs["platform_Rnode"][:nnode] = Rnode
outputs["platform_Fnode"] = NULL * np.ones((NNODES_MAX, 3))
outputs["platform_Fnode"][:nnode, :] = Fnode
def set_element_props(self, inputs, outputs, discrete_inputs, discrete_outputs):
# Load in number of members
opt = self.options["options"]
n_member = opt["floating"]["members"]["n_members"]
# Initialize running lists across all members
elem_D = np.array([])
elem_t = np.array([])
elem_A = np.array([])
elem_Asx = np.array([])
elem_Asy = np.array([])
elem_Ixx = np.array([])
elem_Iyy = np.array([])
elem_Izz = np.array([])
elem_rho = np.array([])
elem_E = np.array([])
elem_G = np.array([])
elem_sigy = np.array([])
elem_Px1 = np.array([])
elem_Px2 = np.array([])
elem_Py1 = np.array([])
elem_Py2 = np.array([])
elem_Pz1 = np.array([])
elem_Pz2 = np.array([])
elem_qdyn = np.array([])
elem_memid = np.array([], dtype=np.int_)
mass = 0.0
m_ball = 0.0
cost = 0.0
volume = 0.0
Awater = 0.0
Iwater = 0.0
m_added = np.zeros(6)
cg_plat = np.zeros(3)
cb_plat = np.zeros(3)
centroid = outputs["platform_centroid"][:2]
variable_capacity = np.zeros(n_member)
# Append all member data
for k in range(n_member):
n = np.where(inputs[f"member{k}:section_A"] == NULL)[0][0]
elem_D = np.append(elem_D, inputs[f"member{k}:section_D"][:n])
elem_t = np.append(elem_t, inputs[f"member{k}:section_t"][:n])
elem_A = np.append(elem_A, inputs[f"member{k}:section_A"][:n])
elem_Asx = np.append(elem_Asx, inputs[f"member{k}:section_Asx"][:n])
elem_Asy = np.append(elem_Asy, inputs[f"member{k}:section_Asy"][:n])
elem_Ixx = np.append(elem_Ixx, inputs[f"member{k}:section_Ixx"][:n])
elem_Iyy = np.append(elem_Iyy, inputs[f"member{k}:section_Iyy"][:n])
elem_Izz = np.append(elem_Izz, inputs[f"member{k}:section_Izz"][:n])
elem_rho = np.append(elem_rho, inputs[f"member{k}:section_rho"][:n])
elem_E = np.append(elem_E, inputs[f"member{k}:section_E"][:n])
elem_G = np.append(elem_G, inputs[f"member{k}:section_G"][:n])
elem_sigy = np.append(elem_sigy, inputs[f"member{k}:section_sigma_y"][:n])
elem_qdyn = np.append(elem_qdyn, inputs[f"member{k}:qdyn"][:n])
elem_memid = np.append(elem_memid, k * np.ones(n, dtype=np.int_))
# The loads should come in with length n+1
elem_Px1 = np.append(elem_Px1, inputs[f"member{k}:Px"][:n])
elem_Px2 = np.append(elem_Px2, inputs[f"member{k}:Px"][1 : (n + 1)])
elem_Py1 = np.append(elem_Py1, inputs[f"member{k}:Py"][:n])
elem_Py2 = np.append(elem_Py2, inputs[f"member{k}:Py"][1 : (n + 1)])
elem_Pz1 = np.append(elem_Pz1, inputs[f"member{k}:Pz"][:n])
elem_Pz2 = np.append(elem_Pz2, inputs[f"member{k}:Pz"][1 : (n + 1)])
# Mass, volume, cost tallies
imass = inputs[f"member{k}:total_mass"]
ivol = inputs[f"member{k}:displacement"]
mass += imass
volume += ivol
cost += inputs[f"member{k}:total_cost"]
m_ball += inputs[f"member{k}:ballast_mass"]
Awater_k = inputs[f"member{k}:Awater"]
Awater += Awater_k
Rwater2 = np.sum((inputs[f"member{k}:waterline_centroid"] - centroid) ** 2)
Iwater += inputs[f"member{k}:Iwater"] + Awater_k * Rwater2
m_added += inputs[f"member{k}:added_mass"]
variable_capacity[k] = inputs[f"member{k}:variable_ballast_capacity"]
# Center of mass / buoyancy tallies
cg_plat += imass * inputs[f"member{k}:center_of_mass"]
cb_plat += ivol * inputs[f"member{k}:center_of_buoyancy"]
# Add transition piece
m_trans = inputs["transition_piece_mass"]
cg_trans = inputs["transition_node"]
I_trans = util.assembleI(outputs["transition_piece_I"])
mass += m_trans
cost += inputs["transition_piece_cost"]
cg_plat += m_trans * cg_trans
# Finalize outputs
cg_plat /= mass
cb_plat /= volume
# With CG known, loop back through to compute platform I
unit_z = np.array([0.0, 0.0, 1.0])
I_hull = np.zeros((3, 3))
for k in range(n_member):
xyz_k = inputs[f"member{k}:nodes_xyz"]
inodes = np.where(xyz_k[:, 0] == NULL)[0][0]
xyz_k = xyz_k[:inodes, :]
imass = inputs[f"member{k}:total_mass"]
cg_k = inputs[f"member{k}:center_of_mass"]
R = cg_plat - cg_k
# Figure out angle to make member parallel to global c.s.
vec_k = xyz_k[-1, :] - xyz_k[0, :]
T = util.rotate_align_vectors(vec_k, unit_z)
# Rotate member inertia tensor
I_k = util.assembleI(inputs[f"member{k}:I_total"])
I_k_rot = T @ I_k @ T.T
# Now do parallel axis theorem
I_hull += np.array(I_k_rot) + imass * (np.dot(R, R) * np.eye(3) - np.outer(R, R))
# Add in transition piece
R = cg_plat - cg_trans
I_hull += I_trans + m_trans * (np.dot(R, R) * np.eye(3) - np.outer(R, R))
# Store outputs
nelem = elem_A.size
outputs["platform_elem_D"] = NULL * np.ones(NELEM_MAX)
outputs["platform_elem_t"] = NULL * np.ones(NELEM_MAX)
outputs["platform_elem_A"] = NULL * np.ones(NELEM_MAX)
outputs["platform_elem_Asx"] = NULL * np.ones(NELEM_MAX)
outputs["platform_elem_Asy"] = NULL * np.ones(NELEM_MAX)
outputs["platform_elem_Ixx"] = NULL * np.ones(NELEM_MAX)
outputs["platform_elem_Iyy"] = NULL * np.ones(NELEM_MAX)
outputs["platform_elem_Izz"] = NULL * np.ones(NELEM_MAX)
outputs["platform_elem_rho"] = NULL * np.ones(NELEM_MAX)
outputs["platform_elem_E"] = NULL * np.ones(NELEM_MAX)
outputs["platform_elem_G"] = NULL * np.ones(NELEM_MAX)
outputs["platform_elem_sigma_y"] = NULL * np.ones(NELEM_MAX)
outputs["platform_elem_Px1"] = NULL * np.ones(NELEM_MAX)
outputs["platform_elem_Px2"] = NULL * np.ones(NELEM_MAX)
outputs["platform_elem_Py1"] = NULL * np.ones(NELEM_MAX)
outputs["platform_elem_Py2"] = NULL * np.ones(NELEM_MAX)
outputs["platform_elem_Pz1"] = NULL * np.ones(NELEM_MAX)
outputs["platform_elem_Pz2"] = NULL * np.ones(NELEM_MAX)
outputs["platform_elem_qdyn"] = NULL * np.ones(NELEM_MAX)
outputs["platform_elem_D"][:nelem] = elem_D
outputs["platform_elem_t"][:nelem] = elem_t
outputs["platform_elem_A"][:nelem] = elem_A
outputs["platform_elem_Asx"][:nelem] = elem_Asx
outputs["platform_elem_Asy"][:nelem] = elem_Asy
outputs["platform_elem_Ixx"][:nelem] = elem_Ixx
outputs["platform_elem_Iyy"][:nelem] = elem_Iyy
outputs["platform_elem_Izz"][:nelem] = elem_Izz
outputs["platform_elem_rho"][:nelem] = elem_rho
outputs["platform_elem_E"][:nelem] = elem_E
outputs["platform_elem_G"][:nelem] = elem_G
outputs["platform_elem_sigma_y"][:nelem] = elem_sigy
outputs["platform_elem_Px1"][:nelem] = elem_Px1
outputs["platform_elem_Px2"][:nelem] = elem_Px2
outputs["platform_elem_Py1"][:nelem] = elem_Py1
outputs["platform_elem_Py2"][:nelem] = elem_Py2
outputs["platform_elem_Pz1"][:nelem] = elem_Pz1
outputs["platform_elem_Pz2"][:nelem] = elem_Pz2
outputs["platform_elem_qdyn"][:nelem] = elem_qdyn
discrete_outputs["platform_elem_memid"] = elem_memid
outputs["platform_mass"] = mass
outputs["platform_ballast_mass"] = m_ball
outputs["platform_hull_mass"] = mass - m_ball
outputs["platform_cost"] = cost
outputs["platform_displacement"] = volume
outputs["platform_hull_center_of_mass"] = cg_plat
outputs["platform_center_of_buoyancy"] = cb_plat
outputs["platform_I_hull"] = util.unassembleI(I_hull)
outputs["platform_Awater"] = Awater
outputs["platform_Iwater"] = Iwater
outputs["platform_added_mass"] = m_added
outputs["platform_variable_capacity"] = variable_capacity
class TowerPreMember(om.ExplicitComponent):
def setup(self):
self.add_input("transition_node", np.zeros(3), units="m")
self.add_input("tower_height", 0.0, units="m")
self.add_output("tower_top_node", np.zeros(3), units="m")
def compute(self, inputs, outputs):
transition_node = inputs["transition_node"]
tower_top_node = 0 # previous code altered the original definition of transition_node
tower_top_node += transition_node
tower_top_node[2] += float(inputs["tower_height"])
outputs["tower_top_node"] = tower_top_node
class PlatformTowerFrame(om.ExplicitComponent):
def initialize(self):
self.options.declare("options")
def setup(self):
opt = self.options["options"]
n_member = opt["floating"]["members"]["n_members"]
n_attach = opt["mooring"]["n_attach"]
self.add_input("platform_nodes", NULL * np.ones((NNODES_MAX, 3)), units="m")
self.add_input("platform_Fnode", NULL * np.ones((NNODES_MAX, 3)), units="N")
self.add_input("platform_Rnode", NULL * np.ones(NNODES_MAX), units="m")
self.add_input("platform_elem_n1", NULL * np.ones(NELEM_MAX, dtype=np.int_))
self.add_input("platform_elem_n2", NULL * np.ones(NELEM_MAX, dtype=np.int_))
self.add_input("platform_elem_D", NULL * np.ones(NELEM_MAX), units="m")
self.add_input("platform_elem_t", NULL * np.ones(NELEM_MAX), units="m")
self.add_input("platform_elem_A", NULL * np.ones(NELEM_MAX), units="m**2")
self.add_input("platform_elem_Asx", NULL * np.ones(NELEM_MAX), units="m**2")
self.add_input("platform_elem_Asy", NULL * np.ones(NELEM_MAX), units="m**2")
self.add_input("platform_elem_Ixx", NULL * np.ones(NELEM_MAX), units="kg*m**2")
self.add_input("platform_elem_Iyy", NULL * np.ones(NELEM_MAX), units="kg*m**2")
self.add_input("platform_elem_Izz", NULL * np.ones(NELEM_MAX), units="kg*m**2")
self.add_input("platform_elem_rho", NULL * np.ones(NELEM_MAX), units="kg/m**3")
self.add_input("platform_elem_E", NULL * np.ones(NELEM_MAX), units="Pa")
self.add_input("platform_elem_G", NULL * np.ones(NELEM_MAX), units="Pa")
self.add_input("platform_elem_sigma_y", NULL * np.ones(NELEM_MAX), units="Pa")
self.add_input("platform_elem_Px1", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_input("platform_elem_Px2", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_input("platform_elem_Py1", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_input("platform_elem_Py2", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_input("platform_elem_Pz1", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_input("platform_elem_Pz2", NULL * np.ones(NELEM_MAX), units="N/m")
self.add_input("platform_elem_qdyn", NULL * np.ones(NELEM_MAX), units="Pa")
self.add_input("platform_hull_center_of_mass", np.zeros(3), units="m")
self.add_input("platform_mass", 0.0, units="kg")
self.add_input("platform_I_hull", np.zeros(6), units="kg*m**2")
self.add_input("platform_displacement", 0.0, units="m**3")
self.add_input("tower_nodes", NULL * np.ones((MEMMAX, 3)), units="m")
self.add_output("tower_Fnode", copy_shape="tower_nodes", units="N")
self.add_input("tower_Rnode", NULL * np.ones(MEMMAX), units="m")
self.add_output("tower_elem_n1", copy_shape="tower_elem_A")
self.add_output("tower_elem_n2", copy_shape="tower_elem_A")
self.add_output("tower_elem_L", copy_shape="tower_elem_A", units="m")
self.add_input("tower_elem_D", NULL * np.ones(MEMMAX), units="m")
self.add_input("tower_elem_t", NULL * np.ones(MEMMAX), units="m")
self.add_input("tower_elem_A", NULL * np.ones(MEMMAX), units="m**2")
self.add_input("tower_elem_Asx", NULL * np.ones(MEMMAX), units="m**2")
self.add_input("tower_elem_Asy", NULL * np.ones(MEMMAX), units="m**2")
self.add_input("tower_elem_Ixx", NULL * np.ones(MEMMAX), units="kg*m**2")
self.add_input("tower_elem_Iyy", NULL * np.ones(MEMMAX), units="kg*m**2")
self.add_input("tower_elem_Izz", NULL * np.ones(MEMMAX), units="kg*m**2")
self.add_input("tower_elem_rho", NULL * np.ones(MEMMAX), units="kg/m**3")
self.add_input("tower_elem_E", NULL * np.ones(MEMMAX), units="Pa")
self.add_input("tower_elem_G", NULL * np.ones(MEMMAX), units="Pa")
self.add_input("tower_elem_sigma_y", NULL * np.ones(MEMMAX), units="Pa")
self.add_input("tower_elem_Px", NULL * np.ones(MEMMAX), units="N/m")
self.add_output("tower_elem_Px1", NULL * np.ones(MEMMAX), units="N/m")
self.add_output("tower_elem_Px2", NULL * np.ones(MEMMAX), units="N/m")
self.add_input("tower_elem_Py", NULL * np.ones(MEMMAX), units="N/m")
self.add_output("tower_elem_Py1", NULL * np.ones(MEMMAX), units="N/m")
self.add_output("tower_elem_Py2", NULL * | np.ones(MEMMAX) | numpy.ones |
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 15 12:05:40 2016
@author: sjjoo
"""
#%%
#import sys
import mne
#import imageio
from mne.utils import run_subprocess, logger
import os
from os import path as op
import copy
#import shutil
import numpy as np
from numpy.random import randn
from scipy import stats as stats
#import scipy.io as sio
import time
from functools import partial
from mne.stats import (spatio_temporal_cluster_1samp_test,
summarize_clusters_stc)
from mne import set_config
import matplotlib.pyplot as plt
import matplotlib.font_manager as font_manager
from pandas import DataFrame
from sklearn import linear_model
import statsmodels.api as sm
#import csv
os.chdir(os.path.join("D:\\", "git","BrainTools","projects","NLR_MEG"))
from plotit3 import plotit3
from plotsig3 import plotsig3
from plotit2 import plotit2
from plotsig2 import plotsig2
from plotcorr3 import plotcorr3
set_config('MNE_MEMMAP_MIN_SIZE', '1M')
set_config('MNE_CACHE_DIR', '.tmp')
mne.set_config('MNE_USE_CUDA', 'true')
this_env = copy.copy(os.environ)
fs_dir = 'D://subjects'
this_env['SUBJECTS_DIR'] = fs_dir
raw_dir = os.path.join("D:\\","NLR_MEG")
os.chdir(raw_dir)
import seaborn as sns
sns.set(style="darkgrid")
#%%
subs = ['NLR_102_RS','NLR_103_AC','NLR_105_BB','NLR_110_HH','NLR_127_AM',
'NLR_130_RW','NLR_132_WP','NLR_133_ML','NLR_145_AC','NLR_150_MG',
'NLR_151_RD','NLR_152_TC','NLR_160_EK','NLR_161_AK','NLR_163_LF',
'NLR_164_SF','NLR_170_GM','NLR_172_TH','NLR_174_HS','NLR_179_GM',
'NLR_180_ZD','NLR_187_NB','NLR_201_GS','NLR_203_AM',
'NLR_204_AM','NLR_205_AC','NLR_206_LM','NLR_207_AH','NLR_211_LB',
'NLR_GB310','NLR_KB218','NLR_JB423','NLR_GB267','NLR_JB420',
'NLR_HB275','NLR_197_BK','NLR_GB355','NLR_GB387','NLR_HB205',
'NLR_IB217','NLR_IB319','NLR_JB227','NLR_JB486','NLR_KB396',
'NLR_IB357']
session1 = ['102_rs160618','103_ac150609','105_bb150713','110_hh160608','127_am151022',
'130_rw151221','132_wp160919','133_ml151124','145_ac160621','150_mg160606',
'151_rd160620','152_tc160422','160_ek160627','161_ak160627','163_lf160707',
'164_sf160707','170_gm160613','172_th160614','174_hs160620','179_gm160701',
'180_zd160621','187_nb161017','201_gs150818','203_am150831',
'204_am150829','205_ac151123','206_lm151119','207_ah160608','211_lb160617',
'nlr_gb310170614','nlr_kb218170619','nlr_jb423170620','nlr_gb267170620','nlr_jb420170621',
'nlr_hb275170622','197_bk170622','nlr_gb355170606','nlr_gb387170608','nlr_hb205170825',
'nlr_ib217170831','nlr_ib319170825','nlr_jb227170811','nlr_jb486170803','nlr_kb396170808',
'nlr_ib357170912']
subs2 = ['NLR_102_RS','NLR_110_HH','NLR_145_AC','NLR_150_MG',
'NLR_152_TC','NLR_160_EK','NLR_161_AK','NLR_162_EF','NLR_163_LF', # 162, 201 only had the second session
'NLR_164_SF','NLR_170_GM','NLR_172_TH','NLR_174_HS','NLR_179_GM', # 'NLR_170_GM': no EOG channel
'NLR_180_ZD','NLR_201_GS',
'NLR_204_AM','NLR_205_AC','NLR_207_AH','NLR_210_SB','NLR_211_LB',
'NLR_GB310','NLR_KB218','NLR_GB267','NLR_JB420', 'NLR_HB275','NLR_GB355']
session2 = ['102_rs160815','110_hh160809','145_ac160823','150_mg160825',
'152_tc160623','160_ek160915','161_ak160916','162_ef160829','163_lf160920',
'164_sf160920','170_gm160822','172_th160825','174_hs160829','179_gm160913',
'180_zd160826','201_gs150925',
'204_am151120','205_ac160202','207_ah160809','210_sb160822','211_lb160823',
'nlr_gb310170829','nlr_kb218170829','nlr_gb267170911','nlr_jb420170828','nlr_hb275170828','nlr_gb355170907']
subIndex1 = np.nonzero(np.in1d(subs,subs2))[0]
subIndex2 = np.empty([1,len(subIndex1)],dtype=int)[0]
for i in range(0,len(subIndex1)):
subIndex2[i] = np.nonzero(np.in1d(subs2,subs[subIndex1[i]]))[0]
twre_index = [87,93,108,66,116,85,110,71,84,92,87,86,63,81,60,55,71,63,68,67,64,127,79,
73,59,84,79,91,57,67,77,57,80,53,72,58,85,79,116,117,107,78,66,101,67]
twre_index = np.array(twre_index)
brs = [87,102,108,78,122,91,121,77,91,93,93,88,75,90,66,59,81,84,81,72,71,121,
81,75,66,90,93,101,56,78,83,69,88,60,88,73,82,81,115,127,124,88,68,110,96]
brs = np.array(brs)
twre_index1 = twre_index[subIndex1]
twre_index2_all = [90,76,94,115,
85,75,82,64,75,
63,83,77,84,75,
68,79,
62,90,105,75,71,
69,83,76,62,73,94]
twre_index2_all = np.array(twre_index2_all)
twre_index2 = twre_index2_all[subIndex2]
brs1 = brs[subIndex1]
brs2_all = [98,88,102,110,99,91,88,79,105,86,81,88,89,77,83,81,86,98,116,104,86,90,91,97,57,99,102]
brs2_all = np.array(brs2_all)
brs2 = brs2_all[subIndex2]
twre_diff = np.subtract(twre_index2,twre_index1)
brs_diff = np.subtract(brs2,brs1)
swe_raw = [62, 76, 74, 42, 75, 67, 76, 21, 54, 35, 21, 61, 45, 48, 17, 11, 70, 19, 10, 57,
12, 86, 53, 51, 13, 28, 54, 25, 27, 10, 66, 18, 18, 20, 37, 23, 17, 36, 79, 82,
74, 64, 42, 78, 35]
swe_raw = np.array(swe_raw)
lwid = [49,60,60,51,62,54,65,23,44,35,31,52,44,39,27,30,57,33,24,48,19,66,45,
43,22,33,51,36,35,25,55,34,26,26,39,27,24,29,61,71,65,56,36,62,51]
lwid = np.array(lwid)
rf = [88,103,95,67,120,85,108,71,91,87,88,76,76,93,60,40,86,61,66,81,59,130,93,85,49,76,90,96,42,64,74,49,84,56,
76,61,80,89,111,120,132,88,65,102,72]
rf = np.array(rf)
age = [125.6885, 132.9501, 122.0434, 138.4349, 97.6347, 138.1420, 108.2457, 98.0631, 105.8147, 89.9132,
87.6465, 131.8660, 123.7174, 95.959, 112.416, 133.8042, 152.4639, 103.4823, 89.8475, 138.4020,
93.8568, 117.0814, 123.6202, 122.9304, 109.1656, 90.6058,
111.9593,86.0381,147.2063,95.8699,148.0802,122.5896,88.7162,123.0495,110.6645,105.3069,88.9143,95.2879,106.2852,
122.2915,114.4389,136.1496,128.6246,137.9216,122.7528]
age = np.divide(age, 12)
wasi_vocab = [51,62,52,39,80,59,56,np.nan,52,47,64,44,49,48,55,53,44,44,53,45,62,
76,45,55,48,56,41,43,40,52,54,50,62,67,59,48,60,60,62,79,74,44,49,50,60]
wasi_mr = [47,64,44,58,60,51,56,np.nan,56,43,37,37,51,55,36,33,52,48,49,41,51,
56,56,53,42,41,46,51,34,51,50,51,55,53,44,44,47,59,66,74,65,53,54,47,60]
n_subjects = len(subs)
c_table = ( (0.6510, 0.8078, 0.8902), # Blue, Green, Red, Orange, Purple, yellow
(0.1216, 0.4706, 0.7059),
(0.6980, 0.8745, 0.5412),
(0.2000, 0.6275, 0.1725),
(0.9843, 0.6039, 0.6000),
(0.8902, 0.1020, 0.1098),
(0.9922, 0.7490, 0.4353),
(1.0000, 0.4980, 0),
(0.7922, 0.6980, 0.8392),
(0.4157, 0.2392, 0.6039),
(1.0000, 1.0000, 0.6000),
(0.6941, 0.3490, 0.1569))
fname_data = op.join(raw_dir, 'session1_data_loose_depth8_normal.npy')
#%%
"""
Here we load the data for Session 1
"""
t0 = time.time()
os.chdir(raw_dir)
X13 = np.load(fname_data)
orig_times = np.load('session1_times.npy')
tstep = np.load('session1_tstep.npy')
n_epochs = np.load('session1_n_averages.npy')
tmin = -0.1
""" Downsample the data """
ss = 3 # was originally 2
sample = np.arange(0,len(orig_times),ss)
sRate = 600 / ss
times = orig_times[sample]
tstep = ss*tstep
X11 = X13[:,sample,:,:]
del X13
X11 = np.abs(X11)
print("\n\nElasped time: %0.2d mins %0.2d secs\n\n" % (divmod(time.time()-t0, 60)))
#%%
""" Grouping subjects """
reading_thresh = 80
m1 = np.logical_and(np.transpose(twre_index) > reading_thresh, np.transpose(age) <= 13)
m2 = np.logical_and(np.transpose(twre_index) <= reading_thresh, np.transpose(age) <= 13)
#m1 = np.logical_and(np.transpose(brs) >= reading_thresh, np.transpose(age) <= 13)
#m2 = np.logical_and(np.transpose(brs) < reading_thresh, np.transpose(age) <= 13)
#m1 = np.logical_and(np.transpose(swe_raw) >= np.median(swe_raw), np.transpose(age) <= 13)
#m2 = np.logical_and(np.transpose(swe_raw) < np.median(swe_raw), np.transpose(age) <= 13)
orig_twre = twre_index
orig_age = age
orig_swe = swe_raw
m3 = np.mean(n_epochs,axis=1) < 40
m1[np.where(m3)] = False
m2[np.where(m3)] = False
twre_index = twre_index[np.where(~m3)[0]]
age = age[np.where(~m3)[0]]
#swe_raw = swe_raw[np.where(~m3)[0]]
good_readers = np.where(m1)[0]
poor_readers = np.where(m2)[0]
a1 = np.transpose(age) > np.mean(age)
a2 = np.logical_not(a1)
a1[np.where(m3)] = False
a2[np.where(m3)] = False
old_readers = np.where(a1)[0]
young_readers = np.where(a2)[0]
#wasi_vocab_G = [wasi_vocab[i] for i in good_readers]
#wasi_vocab_P = [wasi_vocab[i] for i in poor_readers]
#wasi_mr_G = [wasi_mr[i] for i in good_readers]
#wasi_mr_P = [wasi_mr[i] for i in poor_readers]
#age_G = [orig_age[i] for i in good_readers]
#age_P = [orig_age[i] for i in poor_readers]
#twre_G = [orig_twre[i] for i in good_readers]
#twre_P = [orig_twre[i] for i in poor_readers]
#
#n,p = stats.ttest_ind(wasi_vocab_G,wasi_vocab_P,nan_policy='omit')
#n,p = stats.ttest_ind(wasi_mr_G,wasi_mr_P,nan_policy='omit')
#n,p = stats.ttest_ind(age_G,age_P,nan_policy='omit')
#n,p = stats.ttest_ind(twre_G,twre_P,nan_policy='omit')
all_subject = []
all_subject.extend(good_readers)
all_subject.extend(poor_readers)
all_subject.sort()
fs_vertices = [np.arange(10242)] * 2
n_epoch = np.empty((45,4))
n_epoch[:,0] = [np.int(n_epochs[i,0]) for i in range(0,45)]
n_epoch[:,1] = [np.int(n_epochs[i,3]) for i in range(0,45)]
n_epoch[:,2] = [np.int(n_epochs[i,5]) for i in range(0,45)]
n_epoch[:,3] = [np.int(n_epochs[i,8]) for i in range(0,45)]
removal = np.sum(60 - n_epoch, axis = 1)
a = [removal[i] for i in zip(good_readers)]
b = [removal[i] for i in zip(poor_readers)]
c = [removal[i] for i in zip(all_subject)]
d = [removal[i] for i in zip(young_readers)]
e = [removal[i] for i in zip(old_readers)]
stats.ttest_ind(a,b)
stats.ttest_ind(d,e)
stats.pearsonr(c,age)
stats.pearsonr(c,twre_index)
figureDir = '%s/figures' % raw_dir
plt.figure()
plt.clf()
ax = plt.subplot()
fit = np.polyfit(age, c, deg=1)
ax.plot(age, fit[0] * age + fit[1], color=[0,0,0])
ax.plot(age, c, 'o', markerfacecolor=[.5, .5, .5], markeredgecolor=[1,1,1], markersize=10)
plt.xlabel('Age')
plt.ylabel('# of rejected trials')
os.chdir(figureDir)
# plt.savefig('Corr_reject_age.png',dpi=600,papertype='letter',format='png')
# plt.savefig('Corr_reject_age.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
plt.figure()
plt.clf()
ax = plt.subplot()
fit = np.polyfit(twre_index, c, deg=1)
ax.plot(twre_index, fit[0] * twre_index + fit[1], color=[0,0,0])
ax.plot(twre_index, c, 'o', markerfacecolor=[.5, .5, .5], markeredgecolor=[1,1,1], markersize=10)
plt.xlabel('Reading skill')
plt.ylabel('# of rejected trials')
os.chdir(figureDir)
# plt.savefig('Corr_reject_reading.png',dpi=600,papertype='letter',format='png')
# plt.savefig('Corr_reject_reading.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
#%%
""" Read HCP labels """
labels = mne.read_labels_from_annot('fsaverage', parc='HCPMMP1', surf_name='white', subjects_dir=fs_dir) #regexp=aparc_label_name
#aparc_label_name = 'PHT_ROI'#'_IP'#'IFSp_ROI'#'STSvp_ROI'#'STSdp_ROI'#'PH_ROI'#'TE2p_ROI' #'SFL_ROI' #'IFSp_ROI' #'TE2p_ROI' #'inferiortemporal' #'pericalcarine'
anat_label = mne.read_labels_from_annot('fsaverage', parc='aparc.a2009s',surf_name='white',
subjects_dir=fs_dir) #, regexp=aparc_label_name)
#%%
#TE2p_mask_lh = mne.Label.get_vertices_used(TE2p_label[0])
#TE2p_mask_rh = mne.Label.get_vertices_used(TE2p_label[1])
PHT_label_lh = [label for label in labels if label.name == 'L_PHT_ROI-lh'][0]
PHT_label_rh = [label for label in labels if label.name == 'R_PHT_ROI-rh'][0]
TE1p_label_lh = [label for label in labels if label.name == 'L_TE1p_ROI-lh'][0]
TE1p_label_rh = [label for label in labels if label.name == 'R_TE1p_ROI-rh'][0]
TE2p_label_lh = [label for label in labels if label.name == 'L_TE2p_ROI-lh'][0]
TE2p_label_rh = [label for label in labels if label.name == 'R_TE2p_ROI-rh'][0]
TE2a_label_lh = [label for label in labels if label.name == 'L_TE2a_ROI-lh'][0]
TE2a_label_rh = [label for label in labels if label.name == 'R_TE2a_ROI-rh'][0]
TF_label_lh = [label for label in labels if label.name == 'L_TF_ROI-lh'][0]
TF_label_rh = [label for label in labels if label.name == 'R_TF_ROI-rh'][0]
PH_label_lh = [label for label in labels if label.name == 'L_PH_ROI-lh'][0]
PH_label_rh = [label for label in labels if label.name == 'R_PH_ROI-rh'][0]
FFC_label_lh = [label for label in labels if label.name == 'L_FFC_ROI-lh'][0]
FFC_label_rh = [label for label in labels if label.name == 'R_FFC_ROI-rh'][0]
a8C_label_lh = [label for label in labels if label.name == 'L_8C_ROI-lh'][0]
a8C_label_rh = [label for label in labels if label.name == 'R_8C_ROI-rh'][0]
p946v_label_lh = [label for label in labels if label.name == 'L_p9-46v_ROI-lh'][0]
p946v_label_rh = [label for label in labels if label.name == 'R_p9-46v_ROI-rh'][0]
IFSp_label_lh = [label for label in labels if label.name == 'L_IFSp_ROI-lh'][0]
IFSp_label_rh = [label for label in labels if label.name == 'R_IFSp_ROI-rh'][0]
IFSa_label_lh = [label for label in labels if label.name == 'L_IFSa_ROI-lh'][0]
IFSa_label_rh = [label for label in labels if label.name == 'R_IFSa_ROI-rh'][0]
IFJp_label_lh = [label for label in labels if label.name == 'L_IFJp_ROI-lh'][0]
IFJp_label_rh = [label for label in labels if label.name == 'R_IFJp_ROI-rh'][0]
IFJa_label_lh = [label for label in labels if label.name == 'L_IFJa_ROI-lh'][0]
IFJa_label_rh = [label for label in labels if label.name == 'R_IFJa_ROI-rh'][0]
a45_label_lh = [label for label in labels if label.name == 'L_45_ROI-lh'][0]
a45_label_rh = [label for label in labels if label.name == 'R_45_ROI-rh'][0]
a44_label_lh = [label for label in labels if label.name == 'L_44_ROI-lh'][0]
a44_label_rh = [label for label in labels if label.name == 'R_44_ROI-rh'][0]
a43_label_lh = [label for label in labels if label.name == 'L_43_ROI-lh'][0]
a43_label_rh = [label for label in labels if label.name == 'R_43_ROI-rh'][0]
a9_46v_lh = [label for label in labels if label.name == 'L_a9-46v_ROI-lh'][0]
a9_46v_rh = [label for label in labels if label.name == 'R_a9-46v_ROI-rh'][0]
PGi_label_lh = [label for label in labels if label.name == 'L_PGi_ROI-lh'][0]
PGi_label_rh = [label for label in labels if label.name == 'R_PGi_ROI-rh'][0]
PGs_label_lh = [label for label in labels if label.name == 'L_PGs_ROI-lh'][0]
PGs_label_rh = [label for label in labels if label.name == 'R_PGs_ROI-rh'][0]
STSvp_label_lh = [label for label in labels if label.name == 'L_STSvp_ROI-lh'][0]
STSvp_label_rh = [label for label in labels if label.name == 'R_STSvp_ROI-rh'][0]
STSdp_label_lh = [label for label in labels if label.name == 'L_STSdp_ROI-lh'][0]
STSdp_label_rh = [label for label in labels if label.name == 'R_STSdp_ROI-rh'][0]
STSva_label_lh = [label for label in labels if label.name == 'L_STSva_ROI-lh'][0]
STSva_label_rh = [label for label in labels if label.name == 'R_STSva_ROI-rh'][0]
STSda_label_lh = [label for label in labels if label.name == 'L_STSda_ROI-lh'][0]
STSda_label_rh = [label for label in labels if label.name == 'R_STSda_ROI-rh'][0]
TPOJ1_label_lh = [label for label in labels if label.name == 'L_TPOJ1_ROI-lh'][0]
TPOJ1_label_rh = [label for label in labels if label.name == 'R_TPOJ1_ROI-rh'][0]
TPOJ2_label_lh = [label for label in labels if label.name == 'L_TPOJ2_ROI-lh'][0]
TPOJ2_label_rh = [label for label in labels if label.name == 'R_TPOJ2_ROI-rh'][0]
V1_label_lh = [label for label in labels if label.name == 'L_V1_ROI-lh'][0]
V1_label_rh = [label for label in labels if label.name == 'R_V1_ROI-rh'][0]
V4_label_lh = [label for label in labels if label.name == 'L_V4_ROI-lh'][0]
V4_label_rh = [label for label in labels if label.name == 'R_V4_ROI-rh'][0]
LIPd_label_lh = [label for label in labels if label.name == 'L_LIPd_ROI-lh'][0]
LIPd_label_rh = [label for label in labels if label.name == 'R_LIPd_ROI-rh'][0]
LIPv_label_lh = [label for label in labels if label.name == 'L_LIPv_ROI-lh'][0]
LIPv_label_rh = [label for label in labels if label.name == 'R_LIPv_ROI-rh'][0]
IPS1_label_lh = [label for label in labels if label.name == 'L_IPS1_ROI-lh'][0]
IPS1_label_rh = [label for label in labels if label.name == 'R_IPS1_ROI-rh'][0]
_7Am_label_lh = [label for label in labels if label.name == 'L_7Am_ROI-lh'][0]
_7Am_label_rh = [label for label in labels if label.name == 'R_7Am_ROI-rh'][0]
VIP_label_lh = [label for label in labels if label.name == 'L_VIP_ROI-lh'][0]
VIP_label_rh = [label for label in labels if label.name == 'R_VIP_ROI-rh'][0]
_7AL_label_lh = [label for label in labels if label.name == 'L_7AL_ROI-lh'][0]
_7AL_label_rh = [label for label in labels if label.name == 'R_7AL_ROI-rh'][0]
PBelt_label_lh = [label for label in labels if label.name == 'L_PBelt_ROI-lh'][0]
PBelt_label_rh = [label for label in labels if label.name == 'R_PBelt_ROI-rh'][0]
PSL_label_lh = [label for label in labels if label.name == 'L_PSL_ROI-lh'][0]
PSL_label_rh = [label for label in labels if label.name == 'R_PSL_ROI-rh'][0]
LBelt_label_lh = [label for label in labels if label.name == 'L_LBelt_ROI-lh'][0]
LBelt_label_rh = [label for label in labels if label.name == 'R_LBelt_ROI-rh'][0]
A1_label_lh = [label for label in labels if label.name == 'L_A1_ROI-lh'][0]
A1_label_rh = [label for label in labels if label.name == 'R_A1_ROI-rh'][0]
MBelt_label_lh = [label for label in labels if label.name == 'L_MBelt_ROI-lh'][0]
MBelt_label_rh = [label for label in labels if label.name == 'R_MBelt_ROI-rh'][0]
RI_label_lh = [label for label in labels if label.name == 'L_RI_ROI-lh'][0]
RI_label_rh = [label for label in labels if label.name == 'R_RI_ROI-rh'][0]
A4_label_lh = [label for label in labels if label.name == 'L_A4_ROI-lh'][0]
A4_label_rh = [label for label in labels if label.name == 'R_A4_ROI-rh'][0]
PFcm_label_lh = [label for label in labels if label.name == 'L_PFcm_ROI-lh'][0]
PFcm_label_rh = [label for label in labels if label.name == 'R_PFcm_ROI-rh'][0]
PFm_label_lh = [label for label in labels if label.name == 'L_PFm_ROI-lh'][0]
PFm_label_rh = [label for label in labels if label.name == 'R_PFm_ROI-rh'][0]
_4_label_lh = [label for label in labels if label.name == 'L_4_ROI-lh'][0]
_4_label_rh = [label for label in labels if label.name == 'R_4_ROI-rh'][0]
_1_label_lh = [label for label in labels if label.name == 'L_1_ROI-lh'][0]
_1_label_rh = [label for label in labels if label.name == 'R_1_ROI-rh'][0]
_2_label_lh = [label for label in labels if label.name == 'L_2_ROI-lh'][0]
_2_label_rh = [label for label in labels if label.name == 'R_2_ROI-rh'][0]
_3a_label_lh = [label for label in labels if label.name == 'L_3a_ROI-lh'][0]
_3a_label_rh = [label for label in labels if label.name == 'R_3a_ROI-rh'][0]
_3b_label_lh = [label for label in labels if label.name == 'L_3b_ROI-lh'][0]
_3b_label_rh = [label for label in labels if label.name == 'R_3b_ROI-rh'][0]
_43_label_lh = [label for label in labels if label.name == 'L_43_ROI-lh'][0]
_43_label_rh = [label for label in labels if label.name == 'R_43_ROI-rh'][0]
_6r_label_lh = [label for label in labels if label.name == 'L_6r_ROI-lh'][0]
_6r_label_rh = [label for label in labels if label.name == 'R_6r_ROI-rh'][0]
OP1_label_lh = [label for label in labels if label.name == 'L_OP1_ROI-lh'][0]
OP1_label_rh = [label for label in labels if label.name == 'R_OP1_ROI-rh'][0]
OP23_label_lh = [label for label in labels if label.name == 'L_OP2-3_ROI-lh'][0]
OP23_label_rh = [label for label in labels if label.name == 'R_OP2-3_ROI-rh'][0]
OP4_label_lh = [label for label in labels if label.name == 'L_OP4_ROI-lh'][0]
OP4_label_rh = [label for label in labels if label.name == 'R_OP4_ROI-rh'][0]
PFop_label_lh = [label for label in labels if label.name == 'L_PFop_ROI-lh'][0]
PFop_label_rh = [label for label in labels if label.name == 'R_PFop_ROI-rh'][0]
A5_label_lh = [label for label in labels if label.name == 'L_A5_ROI-lh'][0]
A5_label_rh = [label for label in labels if label.name == 'R_A5_ROI-rh'][0]
STV_label_lh = [label for label in labels if label.name == 'L_STV_ROI-lh'][0]
STV_label_rh = [label for label in labels if label.name == 'R_STV_ROI-rh'][0]
RI_label_lh = [label for label in labels if label.name == 'L_RI_ROI-lh'][0]
RI_label_rh = [label for label in labels if label.name == 'R_RI_ROI-rh'][0]
PF_label_lh = [label for label in labels if label.name == 'L_PF_ROI-lh'][0]
PF_label_rh = [label for label in labels if label.name == 'R_PF_ROI-rh'][0]
PFt_label_lh = [label for label in labels if label.name == 'L_PFt_ROI-lh'][0]
PFt_label_rh = [label for label in labels if label.name == 'R_PFt_ROI-rh'][0]
p47r_label_lh = [label for label in labels if label.name == 'L_p47r_ROI-lh'][0]
p47r_label_rh = [label for label in labels if label.name == 'R_p47r_ROI-rh'][0]
FOP5_label_lh = [label for label in labels if label.name == 'L_FOP5_ROI-lh'][0]
FOP5_label_rh = [label for label in labels if label.name == 'R_FOP5_ROI-rh'][0]
FOP4_label_lh = [label for label in labels if label.name == 'L_FOP4_ROI-lh'][0]
FOP4_label_rh = [label for label in labels if label.name == 'R_FOP4_ROI-rh'][0]
FOP3_label_lh = [label for label in labels if label.name == 'L_FOP3_ROI-lh'][0]
FOP3_label_rh = [label for label in labels if label.name == 'R_FOP3_ROI-rh'][0]
FOP2_label_lh = [label for label in labels if label.name == 'L_FOP2_ROI-lh'][0]
FOP2_label_rh = [label for label in labels if label.name == 'R_FOP2_ROI-rh'][0]
Ig_label_lh = [label for label in labels if label.name == 'L_Ig_ROI-lh'][0]
Ig_label_rh = [label for label in labels if label.name == 'R_Ig_ROI-rh'][0]
AVI_label_lh = [label for label in labels if label.name == 'L_AVI_ROI-lh'][0]
AVI_label_rh = [label for label in labels if label.name == 'R_AVI_ROI-rh'][0]
_47l_label_lh = [label for label in labels if label.name == 'L_47l_ROI-lh'][0]
_47l_label_rh = [label for label in labels if label.name == 'R_47l_ROI-rh'][0]
temp1_label_lh = [label for label in anat_label if label.name == 'Pole_occipital-lh'][0]
#temp1_label_rh = [label for label in anat_label if label.name == 'parsopercularis-rh'][0]
temp2_label_lh = [label for label in anat_label if label.name == 'S_occipital_ant-lh'][0]
#temp2_label_rh = [label for label in anat_label if label.name == 'parsorbitalis-rh'][0]
temp3_label_lh = [label for label in anat_label if label.name == 'G_and_S_occipital_inf-lh'][0]
#temp3_label_rh = [label for label in anat_label if label.name == 'parstriangularis-rh'][0]
temp4_label_lh = [label for label in anat_label if label.name == 'S_calcarine-lh'][0]
#temp4_label_rh = [label for label in anat_label if label.name == 'precentral-rh'][0]
#%%
""" Lexical task: Word - Noise """
data11 = X11[:,:,all_subject,5] - X11[:,:,all_subject,8]
data11 = np.transpose(data11,[2,1,0])
data11_good = X11[:,:,good_readers,5] - X11[:,:,good_readers,8]
data11_good = np.transpose(data11_good,[2,1,0])
data11_poor = X11[:,:,poor_readers,5] - X11[:,:,poor_readers,8]
data11_poor = np.transpose(data11_poor,[2,1,0])
""" Dot task: Word - Noise """
data12 = X11[:,:,all_subject,0] - X11[:,:,all_subject,3]
data12 = np.transpose(data12,[2,1,0])
data12_good = X11[:,:,good_readers,0] - X11[:,:,good_readers,3]
data12_good = np.transpose(data12_good,[2,1,0])
data12_poor = X11[:,:,poor_readers,0] - X11[:,:,poor_readers,3]
data12_poor = np.transpose(data12_poor,[2,1,0])
""" Lexical task: High contrast - Low contrast """
#data12 = X11[:,31:65,all_subject,5] - X11[:,31:65,all_subject,7]
#data12 = np.transpose(data12,[2,1,0])
#data12[:,:,medial_vertices] = 0.
#%%
""" Spatio-temporal clustering: session 1 Lexical task"""
t0 = time.time()
print("\n\n Start time: %s \n\n" % time.ctime())
p_threshold = 0.05
t_threshold = -stats.distributions.t.ppf(p_threshold / 2., n_subjects - 1)
s_space = mne.grade_to_tris(5)
# Left hemisphere
s_space_lh = s_space[s_space[:,0] < 10242]
#connectivity = mne.spatial_tris_connectivity(s_space_lh, remap_vertices = True)
connectivity = mne.spatial_tris_connectivity(s_space)
T_obs, clusters, cluster_p_values, H0 = clu = \
mne.stats.spatio_temporal_cluster_1samp_test(data11[:,:,:], n_permutations=1024, connectivity=connectivity, n_jobs=12,
threshold=t_threshold)
good_cluster_inds = np.where(cluster_p_values < p_threshold)[0]
#fsave_vertices = [np.arange(10242), np.array([], int)]
fsave_vertices = [np.arange(10242), np.arange(10242)]
#fsave_vertices = [np.arange(10242), np.array([], int)]
stc_all_cluster_vis = summarize_clusters_stc(clu, tstep=tstep,
vertices=fsave_vertices,
subject='fsaverage')
print("\n\n Elasped time: %0.2d mins %0.2d secs" % (divmod(time.time()-t0, 60)))
#%%
""" Just source estimates """
stat_fun = partial(mne.stats.ttest_1samp_no_p)
p_threshold = 0.05
t_threshold = -stats.distributions.t.ppf(p_threshold / 2., len(good_readers) - 1)
temp3 = mne.SourceEstimate(np.transpose(stat_fun(data12_good[:,:,:])), fs_vertices, tmin, tstep, subject='fsaverage')
brain3_1 = temp3.plot(hemi='both', subjects_dir=fs_dir, views = 'lat', initial_time=0.35, #['lat','ven','med']
clim=dict(kind='value', lims=[1.7, t_threshold, 3.5]))#clim=dict(kind='value', lims=[2, t_threshold, 7]), size=(800,800))
#%%
""" Spatio-temporal clustering: session 1 Dot task"""
dur_thresh = 100
t0 = time.time()
T_obs, clusters, cluster_p_values, H0 = clu = \
mne.stats.permutation_cluster_1samp_test(data12[:,166:199,:], n_permutations=1024, connectivity=connectivity, n_jobs=12,
threshold=t_threshold)
good_cluster_inds = np.where(cluster_p_values < 0.05)[0]
fsave_vertices = [np.arange(10242), np.arange(10242)]
dot_stc_all_cluster_vis = summarize_clusters_stc(clu, tstep=tstep,
vertices=fsave_vertices,
subject='fsaverage')
print("\n\nElasped time: %0.2d mins %0.2d secs" % (divmod(time.time()-t0, 60)))
brain3 = dot_stc_all_cluster_vis.plot(
hemi='lh', views='lateral', subjects_dir=fs_dir,
time_label='Duration significant (ms)', size=(800, 800),
smoothing_steps=20, clim=dict(kind='value', lims=[0, 10, 50]),background='white',foreground='black')
#%%
""" ROI definition """
dur_thresh = 100
"""
plot(self, subject=None, surface='inflated', hemi='lh', colormap='auto',
time_label='auto', smoothing_steps=10, transparent=None, alpha=1.0,
time_viewer=False, subjects_dir=None, figure=None, views='lat',
colorbar=True, clim='auto', cortex='classic', size=800, background='black',
foreground='white', initial_time=None, time_unit='s')
"""
brain1 = stc_all_cluster_vis.plot(
hemi='lh', views='lateral', subjects_dir=fs_dir,
time_label='Duration significant (ms)', size=(800, 800),
smoothing_steps=20, clim=dict(kind='value', lims=[40, dur_thresh, 200]),background='white',foreground='black')
""" Sort out vertices here """
#temp_frontal_label_l = mne.Label(FOP4_label_lh.vertices, hemi='lh',name=u'sts_l',pos=FOP4_label_lh.pos, \
# values= FOP4_label_lh.values)
#
#brain1.add_label(temp_frontal_label_l, borders=True, color=c_table[8])
#
#lh_label = stc_all_cluster_vis.in_label(temp_frontal_label_l)
#data = lh_label.data
#lh_label.data[data < dur_thresh] = 0.
#
#temp_labels, _ = mne.stc_to_label(lh_label, src='fsaverage', smooth=False,
# subjects_dir=fs_dir, connected=False)
#temp = stc_all_cluster_vis.in_label(temp_labels)
#frontal_vertices_l = temp.vertices[0]
#
#new_label = mne.Label(frontal_vertices_l, hemi='lh')
#brain1.add_label(new_label, borders=True, color=c_table[8])
""" Done """
os.chdir('figures')
#brain1.save_image('Lexical_LH_STClustering.pdf', antialiased=True)
#brain1.save_image('Lexical_LH_STClustering.png', antialiased=True)
os.chdir('..')
brain1.add_label(A1_label_lh, borders=True, color=[0,0,0]) # Show A1
temp_auditory_label_l = mne.Label(A4_label_lh.vertices, hemi='lh',name=u'sts_l',pos=A4_label_lh.pos,values= A4_label_lh.values) + \
mne.Label(A5_label_lh.vertices, hemi='lh',name=u'sts_l',pos=A5_label_lh.pos,values= A5_label_lh.values) + \
mne.Label(STSdp_label_lh.vertices, hemi='lh',name=u'sts_l',pos=STSdp_label_lh.pos,values= STSdp_label_lh.values)+ \
mne.Label(TPOJ1_label_lh.vertices, hemi='lh',name=u'sts_l',pos=TPOJ1_label_lh.pos,values= TPOJ1_label_lh.values)+ \
mne.Label(PBelt_label_lh.vertices, hemi='lh',name=u'sts_l',pos=PBelt_label_lh.pos,values= PBelt_label_lh.values)+ \
mne.Label(LBelt_label_lh.vertices, hemi='lh',name=u'sts_l',pos=LBelt_label_lh.pos,values= LBelt_label_lh.values)
#brain1.add_label(temp_auditory_label_l, borders=True, color=c_table[2])
lh_label = stc_all_cluster_vis.in_label(temp_auditory_label_l)
data = lh_label.data
lh_label.data[data < dur_thresh] = 0.
temp_labels, _ = mne.stc_to_label(lh_label, src='fsaverage', smooth=False,
subjects_dir=fs_dir, connected=False)
temp = stc_all_cluster_vis.in_label(temp_labels)
stg_vertices_l = temp.vertices[0]
new_label = mne.Label(stg_vertices_l, hemi='lh')
brain1.add_label(new_label, borders=True, color=c_table[1])
#brain1.remove_labels()
temp_auditory2_label_l = mne.Label(PFcm_label_lh.vertices, hemi='lh',name=u'sts_l',pos=PFcm_label_lh.pos,values= PFcm_label_lh.values) + \
mne.Label(RI_label_lh.vertices, hemi='lh',name=u'sts_l',pos=RI_label_lh.pos,values= RI_label_lh.values)+ \
mne.Label(PF_label_lh.vertices, hemi='lh',name=u'sts_l',pos=PF_label_lh.pos,values= PF_label_lh.values)
#brain1.add_label(temp_auditory2_label_l, borders=True, color=c_table[0])
lh_label = stc_all_cluster_vis.in_label(temp_auditory2_label_l)
data = lh_label.data
lh_label.data[data < dur_thresh] = 0.
temp_labels, _ = mne.stc_to_label(lh_label, src='fsaverage', smooth=False,
subjects_dir=fs_dir, connected=False)
temp = stc_all_cluster_vis.in_label(temp_labels)
tpj_vertices_l = temp.vertices[0]
tpj_vertices_l = np.sort(np.concatenate((tpj_vertices_l, \
[16, 2051, 2677, 2678, 2679, 5042, 8296, 8297, 8299, 8722, 8723, 9376])))
new_label = mne.Label(tpj_vertices_l, hemi='lh')
brain1.add_label(new_label, borders=True, color=c_table[0])
#brain1.add_label(_1_label_lh, borders=True, color=c_table[4])
temp_motor_label_l = mne.Label(_3a_label_lh.vertices, hemi='lh',name=u'sts_l',pos=_3a_label_lh.pos,values= _3a_label_lh.values) + \
mne.Label(_3b_label_lh.vertices, hemi='lh',name=u'sts_l',pos=_3b_label_lh.pos,values= _3b_label_lh.values) + \
mne.Label(_4_label_lh.vertices, hemi='lh',name=u'sts_l',pos=_4_label_lh.pos,values= _4_label_lh.values) + \
mne.Label(_1_label_lh.vertices, hemi='lh',name=u'sts_l',pos=_1_label_lh.pos,values= _1_label_lh.values)
#brain1.add_label(temp_motor_label_l, borders=True, color=c_table[4])
lh_label = stc_all_cluster_vis.in_label(temp_motor_label_l)
data = lh_label.data
lh_label.data[data < dur_thresh] = 0.
temp_labels, _ = mne.stc_to_label(lh_label, src='fsaverage', smooth=False,
subjects_dir=fs_dir, connected=False)
temp = stc_all_cluster_vis.in_label(temp_labels)
motor_vertices_l = temp.vertices[0]
new_label = mne.Label(motor_vertices_l, hemi='lh')
brain1.add_label(new_label, borders=True, color=c_table[4])
temp_broca_label_l = \
mne.Label(a44_label_lh.vertices, hemi='lh',name=u'sts_l',pos=a44_label_lh.pos,values= a44_label_lh.values) + \
mne.Label(a45_label_lh.vertices, hemi='lh',name=u'sts_l',pos=a45_label_lh.pos,values= a45_label_lh.values) + \
mne.Label(AVI_label_lh.vertices, hemi='lh',name=u'sts_l',pos=AVI_label_lh.pos,values= AVI_label_lh.values) + \
mne.Label(FOP5_label_lh.vertices, hemi='lh',name=u'sts_l',pos=FOP5_label_lh.pos,values= FOP5_label_lh.values) + \
mne.Label(_47l_label_lh.vertices, hemi='lh',name=u'sts_l',pos=_47l_label_lh.pos,values= _47l_label_lh.values)
#brain1.add_label(temp_broca_label_l, borders=True, color=c_table[6])
lh_label = stc_all_cluster_vis.in_label(temp_broca_label_l)
data = lh_label.data
lh_label.data[data < dur_thresh] = 0.
temp_labels, _ = mne.stc_to_label(lh_label, src='fsaverage', smooth=False,
subjects_dir=fs_dir, connected=False)
temp = stc_all_cluster_vis.in_label(temp_labels)
broca_vertices_l = temp.vertices[0]
broca_vertices_l = np.sort(np.concatenate((broca_vertices_l,[1187,3107,3108,3109,6745,7690,7691])))
new_label = mne.Label(broca_vertices_l, hemi='lh')
brain1.add_label(new_label, borders=True, color=c_table[6])
temp_sylvian_label_l = mne.Label(OP23_label_lh.vertices, hemi='lh',name=u'sts_l',pos=OP23_label_lh.pos,values= OP23_label_lh.values) + \
mne.Label(Ig_label_lh.vertices, hemi='lh',name=u'sts_l',pos=Ig_label_lh.pos,values= Ig_label_lh.values) + \
mne.Label(OP4_label_lh.vertices, hemi='lh',name=u'sts_l',pos=OP4_label_lh.pos,values= OP4_label_lh.values) + \
mne.Label(OP1_label_lh.vertices, hemi='lh',name=u'sts_l',pos=OP1_label_lh.pos,values= OP1_label_lh.values) + \
mne.Label(FOP2_label_lh.vertices, hemi='lh',name=u'sts_l',pos=FOP2_label_lh.pos,values= FOP2_label_lh.values) + \
mne.Label(_6r_label_lh.vertices, hemi='lh',name=u'sts_l',pos=_6r_label_lh.pos,values= _6r_label_lh.values)
#brain1.add_label(temp_sylvian_label_l, borders=True, color=c_table[8])
lh_label = stc_all_cluster_vis.in_label(temp_sylvian_label_l)
data = lh_label.data
lh_label.data[data < dur_thresh] = 0.
temp_labels, _ = mne.stc_to_label(lh_label, src='fsaverage', smooth=False,
subjects_dir=fs_dir, connected=False)
temp = stc_all_cluster_vis.in_label(temp_labels)
sylvian_vertices_l = temp.vertices[0]
sylvian_vertices_l = np.sort(np.concatenate((sylvian_vertices_l,[905,1892,2825,2526,4157,4158,4159,6239,8290,8293,9194,9203])))
new_label = mne.Label(sylvian_vertices_l, hemi='lh')
brain1.add_label(new_label, borders=True, color=c_table[8])
# right hemisphere
#brain2 = stc_all_cluster_vis.plot(
# hemi='rh', views='lateral', subjects_dir=fs_dir,
# time_label='Duration significant (ms)', size=(800, 800),
# smoothing_steps=20, clim=dict(kind='value', lims=[40, dur_thresh, 200]),background='white',foreground='black')
#
#stg_vertices_r = A5_label_rh.vertices
#stg_vertices_r = np.sort([2001,2002,2419,2420,2421,2418,2754,2417,13075,13076,13077,13078,\
# 13079,13080,13081,12069,12070,12071,12072])
#new_label = mne.Label(stg_vertices_r, hemi='rh')
#brain2.add_label(new_label, borders=True, color=c_table[5])
#
#os.chdir('figures')
#brain2.save_image('RH_STClustering.pdf', antialiased=True)
#brain2.save_image('RH_STClustering.png', antialiased=True)
#os.chdir('..')
# V1
#lh_label = dot_stc_all_cluster_vis.in_label(V1_label_lh)
#data = lh_label.data
#lh_label.data[data < 50] = 0.
#
#temp_labels, _ = mne.stc_to_label(lh_label, src='fsaverage', smooth=False,
# subjects_dir=fs_dir, connected=False)
#temp = dot_stc_all_cluster_vis.in_label(temp_labels)
#tV1_vertices_l = temp.vertices[0]
#new_label = mne.Label(tV1_vertices_l, hemi='lh')
#brain1.add_label(new_label, borders=True, color='r')
#
#M = np.mean(np.mean(tX11[tV1_vertices_l,:,:,:],axis=0),axis=1)
#errM = np.std(np.mean(tX11[tV1_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(all_subject))
#t0 = time.time()
#plotit2(times, M, errM, 5, 0, yMin=0, yMax=2.7, subject = 'all')
#plotsig2(times,nReps,X, 5, 0, all_subject, boot_pVal)
np.save('STG_Vert', stg_vertices_l)
np.save('IFG_Vert', broca_vertices_l)
np.save('TPJ_Vert', tpj_vertices_l)
np.save('Motor_Vert', motor_vertices_l)
np.save('Sylvian_Vert', sylvian_vertices_l)
np.save('STG_Vert_r', stg_vertices_r)
#%%
figureDir = '%s/figures' % raw_dir
nReps = 3000
boot_pVal = 0.05
#%%
""" Left STG: Word vs. Noise """
stg_vertices_l = np.load('STG_Vert.npy')
temp1 = X11[:,:,all_subject,:]
M = np.mean(np.mean(temp1[stg_vertices_l,:,:,:],axis=0),axis=1)
errM = np.std(np.mean(temp1[stg_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(all_subject))
MM = np.mean(temp1[stg_vertices_l,:,:,:],axis=0)
diffScore = np.mean((MM[:,:,5]-MM[:,:,8]), axis = 1)
diffScore2 = np.mean((MM[:,:,0]-MM[:,:,3]), axis = 1)
del temp1
plt.figure()
plt.clf()
plt.plot(times, diffScore)
plt.ylim([-0.4,0.7])
plt.fill_between([0.35,0.55],-0.4,0.7,facecolor=[0,0,0],alpha=0.4)
plt.xlabel('Time after stimulus onset (s)')
plt.ylabel('Word - Scramble')
plt.title('STG: Lexical task')
os.chdir(figureDir)
plt.savefig('STG_Word_Scramble_lex.png',dpi=600,papertype='letter',format='png')
plt.savefig('STG_Word_Scramble_lex.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
plt.figure()
plt.clf()
plt.plot(times, diffScore2)
plt.ylim([-0.4,0.7])
plt.fill_between([0.35,0.55],-0.4,0.7,facecolor=[0,0,0],alpha=0.4)
plt.xlabel('Time after stimulus onset (s)')
plt.ylabel('Word - Scramble')
plt.title('STG: Fixation task')
os.chdir(figureDir)
plt.savefig('STG_Word_Scramble_fix.png',dpi=600,papertype='letter',format='png')
plt.savefig('STG_Word_Scramble_fix.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
temp1 = X11[:,:,good_readers,:]
M1 = np.mean(np.mean(temp1[stg_vertices_l,:,:,:],axis=0),axis=1)
errM1 = np.std(np.mean(temp1[stg_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(good_readers))
diffM1 = np.mean(np.mean(temp1[stg_vertices_l,:,:,5],axis=0),axis=1) - np.mean(np.mean(temp1[stg_vertices_l,:,:,8],axis=0),axis=1)
diffM2 = np.mean(np.mean(temp1[stg_vertices_l,:,:,0],axis=0),axis=1) - np.mean(np.mean(temp1[stg_vertices_l,:,:,3],axis=0),axis=1)
del temp1
temp1 = X11[:,:,poor_readers,:]
M2 = np.mean(np.mean(temp1[stg_vertices_l,:,:,:],axis=0),axis=1)
errM2 = np.std(np.mean(temp1[stg_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(poor_readers))
diffM3 = np.mean(np.mean(temp1[stg_vertices_l,:,:,5],axis=0),axis=1) - np.mean(np.mean(temp1[stg_vertices_l,:,:,8],axis=0),axis=1)
diffM4 = np.mean(np.mean(temp1[stg_vertices_l,:,:,0],axis=0),axis=1) - np.mean(np.mean(temp1[stg_vertices_l,:,:,3],axis=0),axis=1)
del temp1
# For calculating p-values
X = np.mean(X11[stg_vertices_l,:,:,:],axis=0)
###############################################################################
""" Timecourse: Lexical task """
task1 = 5
task2 = 8
plotit2(times, M, errM, task1, task2, yMin=0, yMax=2.3, subject = 'all: STG')
plotsig2(times,nReps,X, task1, task2, all_subject, boot_pVal)
os.chdir(figureDir)
plt.savefig('STG_lexical_all.png',dpi=600,papertype='letter',format='png')
plt.savefig('STG_lexical_all.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
plotit2(times, M1, errM1, task1, task2, yMin=0, yMax=2.7, subject = 'typical: STG')
plotsig2(times,nReps,X, task1, task2, good_readers, boot_pVal)
plt.fill_between([0.35,0.55],0.0,2.7,facecolor=[0,0,0],alpha=0.4)
os.chdir(figureDir)
plt.savefig('STG_lexical_good.png',dpi=600,papertype='letter',format='png')
plt.savefig('STG_lexical_good.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
plotit2(times, M2, errM2, task1, task2, yMin=0, yMax=2.7, subject = 'struggling: STG')
plotsig2(times,nReps,X, task1, task2, poor_readers, boot_pVal)
plt.fill_between([0.35,0.55],0.0,2.7,facecolor=[0,0,0],alpha=0.4)
os.chdir(figureDir)
plt.savefig('STG_lexical_poor.png',dpi=600,papertype='letter',format='png')
plt.savefig('STG_lexical_poor.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
""" Timecourse: Dot task """
task1 = 0
task2 = 3
plotit2(times, M, errM, task1, task2, yMin=0, yMax=2.3, subject = 'all: STG')
plotsig2(times,nReps,X, task1, task2, all_subject, boot_pVal)
os.chdir(figureDir)
plt.savefig('STG_dot_all.png',dpi=600,papertype='letter',format='png')
plt.savefig('STG_dot_all.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
plotit2(times, M1, errM1, task1, task2, yMin=0, yMax=2.7, subject = 'typical: STG')
plotsig2(times,nReps,X, task1, task2, good_readers, boot_pVal)
plt.fill_between([0.35,0.55],0.0,2.7,facecolor=[0,0,0],alpha=0.4)
os.chdir(figureDir)
plt.savefig('STG_dot_good.png',dpi=600,papertype='letter',format='png')
plt.savefig('STG_dot_good.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
plotit2(times, M2, errM2, task1, task2, yMin=0, yMax=2.7, subject = 'struggling: STG')
plotsig2(times,nReps,X, task1, task2, poor_readers, boot_pVal)
plt.fill_between([0.35,0.55],0.0,2.7,facecolor=[0,0,0],alpha=0.4)
os.chdir(figureDir)
plt.savefig('STG_dot_poor.png',dpi=600,papertype='letter',format='png')
plt.savefig('STG_dot_poor.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
#%%
""" Correlation """
temp1 = X11[:,:,all_subject,:]
M = np.mean(temp1[stg_vertices_l,:,:,:],axis=0)
temp1 = X11[:,:,good_readers,:]
M1 = np.mean(temp1[stg_vertices_l,:,:,:],axis=0)
temp2 = X11[:,:,poor_readers,:]
M2 = np.mean(temp2[stg_vertices_l,:,:,:],axis=0)
del temp1, temp2
#%%
""" Plot """
t1 = 350
t_window1 = np.multiply(np.divide(np.add([t1,t1+200],[100,100]),1000.), sRate)
t_window1 = [np.int(i) for i in t_window1]
task = 5
lowNoise1_good = np.mean(M1[np.int(t_window1[0]):np.int(t_window1[1]),:,task], axis = 0) - np.mean(M1[np.int(t_window1[0]):np.int(t_window1[1]),:,task+3], axis = 0)
lowNoise1_good_err = np.std(lowNoise1_good) / np.sqrt(len(lowNoise1_good))
lowNoise1_poor = np.mean(M2[t_window1[0]:t_window1[1],:,task], axis = 0) - np.mean(M2[t_window1[0]:t_window1[1],:,task+3], axis = 0)
lowNoise1_poor_err = np.std(lowNoise1_poor) / np.sqrt(len(lowNoise1_poor))
temp_meg = np.concatenate((lowNoise1_good,lowNoise1_poor))
temp_read = np.concatenate((orig_twre[good_readers],orig_twre[poor_readers]))
plt.figure()
plt.clf()
ax = plt.subplot()
fit = np.polyfit(temp_meg, temp_read, deg=1)
ax.plot(temp_meg, fit[0] * temp_meg + fit[1], color=[0,0,0])
ax.plot(temp_meg, temp_read, 'o', markerfacecolor=[.5, .5, .5], markeredgecolor=[1,1,1], markersize=10)
ax.plot(lowNoise1_good, orig_twre[good_readers], 'o', markerfacecolor=[.5, .5, .5], markeredgecolor=[1,1,1], markersize=10)
plt.xlim([-1.5,4.5])
os.chdir('figures')
plt.savefig('STG_corr_lexical_350.png',dpi=600,papertype='letter',format='png')
plt.savefig('STG_corr_lexical_350.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
r, p = stats.pearsonr(temp_read,temp_meg)
print('lexical(all): correlation = %.4f, p = %.4f' %(r, p))
r, p = stats.pearsonr(orig_twre[good_readers],lowNoise1_good)
print('lexical(good): correlation = %.4f, p = %.4f' %(r, p))
r, p = stats.pearsonr(orig_twre[poor_readers],lowNoise1_poor)
print('lexical(poor): correlation = %.4f, p = %.4f' %(r, p))
temp_meg_lex = temp_meg
""" Correlation: Dot task """
t1 = 300
t_window1_dot = np.multiply(np.divide(np.add([t1,t1+100],[100,100]),1000.), sRate)
t_window1_dot = [np.int(i) for i in t_window1_dot]
task = 0
lowNoise1_good = np.mean(M1[np.int(t_window1_dot[0]):np.int(t_window1_dot[1]),:,task], axis = 0) - np.mean(M1[np.int(t_window1_dot[0]):np.int(t_window1_dot[1]),:,task+3], axis = 0)
lowNoise1_good_err = np.std(lowNoise1_good) / np.sqrt(len(lowNoise1_good))
lowNoise1_poor = np.mean(M2[t_window1_dot[0]:t_window1_dot[1],:,task], axis = 0) - np.mean(M2[t_window1_dot[0]:t_window1_dot[1],:,task+3], axis = 0)
lowNoise1_poor_err = np.std(lowNoise1_poor) / np.sqrt(len(lowNoise1_poor))
temp_meg = np.concatenate((lowNoise1_good,lowNoise1_poor))
temp_read = np.concatenate((orig_twre[good_readers],orig_twre[poor_readers]))
plt.figure()
plt.clf()
ax = plt.subplot()
fit = np.polyfit(temp_meg, temp_read, deg=1)
ax.plot(temp_meg, fit[0] * temp_meg + fit[1], color=[0,0,0])
ax.plot(temp_meg, temp_read, 'o', markerfacecolor=[.5, .5, .5], markeredgecolor=[1,1,1], markersize=10)
ax.plot(lowNoise1_good, orig_twre[good_readers], 'o', markerfacecolor=[.5, .5, .5], markeredgecolor=[1,1,1], markersize=10)
plt.xlim([-1.5,4.5])
os.chdir('figures')
plt.savefig('STG_corr_dot_350.png',dpi=600,papertype='letter',format='png')
plt.savefig('STG_corr_dot_350.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
r, p = stats.pearsonr(temp_read,temp_meg)
print('Dot(all): correlation = %.4f, p = %.4f' %(r, p))
r, p = stats.pearsonr(orig_twre[good_readers],lowNoise1_good)
print('Dot(good): correlation = %.4f, p = %.4f' %(r, p))
r, p = stats.pearsonr(orig_twre[poor_readers],lowNoise1_poor)
print('Dot(poor): correlation = %.4f, p = %.4f' %(r, p))
temp_meg_fix = temp_meg
""" Corr: Difference score lexical vs. fixation """
plt.figure()
plt.clf()
ax = plt.subplot()
fit = np.polyfit(temp_meg_fix, temp_meg_lex, deg=1)
ax.plot(temp_meg_fix, fit[0] * temp_meg_fix + fit[1], color=[0,0,0])
ax.plot(temp_meg_fix, temp_meg_lex, 'o', markerfacecolor=[0.5,0.5,0.5], markeredgecolor=[1,1,1], markersize=10)
#ax.plot(temp3_good, temp2_good, 'o', markerfacecolor=c_table[3], markeredgecolor=[1,1,1], markersize=10)
plt.axis('square')
plt.ylim([-1.5, 4.5])
plt.xlim([-1.5, 4.5])
r, p = stats.pearsonr(temp_meg_fix,temp_meg_lex)
print('STG: lexical vs. dot task (all): correlation = %.4f, p = %.7f' %(r, p))
os.chdir(figureDir)
plt.savefig('STG_lexical_dot_350.png',dpi=600,papertype='letter',format='png')
plt.savefig('STG_lexical_dot_350.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
#%%
"""Equivalence test"""
import statsmodels
sstep = 10
p = np.empty((int(len(range(0,800,sstep))),1))
lower_p = np.empty((int(len(range(0,800,sstep))),1))
upper_p = np.empty((int(len(range(0,800,sstep))),1))
for tt, ttime in zip(range(0, len(range(0,800,sstep))),range(0,800,sstep)):
t_window1 = np.multiply(np.divide(np.add([ttime,ttime+sstep],[100,100]),1000.), sRate)
t_window1 = [np.int(i) for i in t_window1]
task = 5
lowNoise1_good = np.mean(M1[np.int(t_window1[0]):np.int(t_window1[1]),:,task], axis = 0) - np.mean(M1[np.int(t_window1[0]):np.int(t_window1[1]),:,task+3], axis = 0)
lowNoise1_good_err = np.std(lowNoise1_good) / np.sqrt(len(lowNoise1_good))
lowNoise1_poor = np.mean(M2[t_window1[0]:t_window1[1],:,task], axis = 0) - np.mean(M2[t_window1[0]:t_window1[1],:,task+3], axis = 0)
lowNoise1_poor_err = np.std(lowNoise1_poor) / np.sqrt(len(lowNoise1_poor))
temp_meg = np.concatenate((lowNoise1_good,lowNoise1_poor))
temp_meg_lex = temp_meg
""" Correlation: Dot task """
t_window1_dot = np.multiply(np.divide(np.add([ttime,ttime+sstep],[100,100]),1000.), sRate)
t_window1_dot = [np.int(i) for i in t_window1_dot]
task = 0
lowNoise1_good = np.mean(M1[np.int(t_window1_dot[0]):np.int(t_window1_dot[1]),:,task], axis = 0) - np.mean(M1[np.int(t_window1_dot[0]):np.int(t_window1_dot[1]),:,task+3], axis = 0)
lowNoise1_good_err = np.std(lowNoise1_good) / np.sqrt(len(lowNoise1_good))
lowNoise1_poor = np.mean(M2[t_window1_dot[0]:t_window1_dot[1],:,task], axis = 0) - np.mean(M2[t_window1_dot[0]:t_window1_dot[1],:,task+3], axis = 0)
lowNoise1_poor_err = np.std(lowNoise1_poor) / np.sqrt(len(lowNoise1_poor))
temp_meg = np.concatenate((lowNoise1_good,lowNoise1_poor))
temp_meg_fix = temp_meg
err = 0.8 * np.std(temp_meg_lex - temp_meg_fix)
# p[tt], a, b = statsmodels.stats.weightstats.ttost_paired(temp_meg_lex, temp_meg_fix, err, -err)
xx, lower_p[tt] = stats.ttest_1samp(temp_meg_lex-temp_meg_fix,-err)
xx, upper_p[tt] = stats.ttest_1samp(temp_meg_lex-temp_meg_fix,err)
p[tt] = max(lower_p[tt], upper_p[tt])*2
plt.figure()
plt.clf()
plt.plot(range(0,800,sstep), p)
plt.plot([0, 800],[0.05,0.05],'--')
plt.xlabel('Time after stimulus onset (ms)')
plt.ylabel('P-value from the equivalence test')
os.chdir(figureDir)
plt.savefig('STG_equivalence.png',dpi=600,papertype='letter',format='png')
plt.savefig('STG_equivalence.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
#tempa = np.random.normal(100,5,(1,100))
#tempb = np.random.normal(10,5,(1,100))
#err = 0.8*5
#tempp, fjsdk, fjdskl = statsmodels.stats.weightstats.ttost_paired(tempa[0], tempb[0], err, -err)
#xxx, xxxx = stats.ttest_rel(tempa[0],tempb[0])
#%%
"""Correlation over time"""
sstep = 10
tstart = 0
n_ttest = np.empty((len(range(tstart,800,sstep)),1))
p_ttest = np.empty((len(range(tstart,800,sstep)),1))
r_lex = np.empty((len(range(tstart,800,sstep)),1))
p_lex = np.empty((len(range(tstart,800,sstep)),1))
r_dot = np.empty((len(range(tstart,800,sstep)),1))
p_dot = np.empty((len(range(tstart,800,sstep)),1))
r_bet = np.empty((len(range(tstart,800,sstep)),1))
p_bet = np.empty((len(range(tstart,800,sstep)),1))
temp_meg_lex = np.empty((len(all_subject),len(range(tstart,800,sstep))))
temp_meg_fix = np.empty((len(all_subject),len(range(tstart,800,sstep))))
for ii, t1 in zip(range(0,len(range(tstart,800,sstep))), range(tstart,800,sstep)):
t_window1 = np.multiply(np.divide(np.add([t1,t1+10],[100,100]),1000.), sRate)
t_window1 = [np.int(i) for i in t_window1]
task = 5
lowNoise1_good = np.mean(M1[np.int(t_window1[0]):np.int(t_window1[1]),:,task], axis = 0) - np.mean(M1[np.int(t_window1[0]):np.int(t_window1[1]),:,task+3], axis = 0)
lowNoise1_good_err = np.std(lowNoise1_good) / np.sqrt(len(lowNoise1_good))
lowNoise1 = np.mean(M[np.int(t_window1[0]):np.int(t_window1[1]),:,task], axis = 0) - np.mean(M[np.int(t_window1[0]):np.int(t_window1[1]),:,task+3], axis = 0)
lowNoise1_poor = np.mean(M2[t_window1[0]:t_window1[1],:,task], axis = 0) - np.mean(M2[t_window1[0]:t_window1[1],:,task+3], axis = 0)
lowNoise1_poor_err = np.std(lowNoise1_poor) / np.sqrt(len(lowNoise1_poor))
temp_meg = np.concatenate((lowNoise1_good,lowNoise1_poor))
temp_read = np.concatenate((orig_twre[good_readers],orig_twre[poor_readers]))
r_lex[ii], p_lex[ii] = stats.pearsonr(temp_read,temp_meg)
n_ttest[ii], p_ttest[ii] = stats.ttest_1samp(lowNoise1,0)
temp_meg_lex[:,ii] = temp_meg
task = 0
lowNoise1_good = np.mean(M1[np.int(t_window1[0]):np.int(t_window1[1]),:,task], axis = 0) - np.mean(M1[np.int(t_window1_dot[0]):np.int(t_window1_dot[1]),:,task+3], axis = 0)
lowNoise1_good_err = np.std(lowNoise1_good) / np.sqrt(len(lowNoise1_good))
lowNoise1_poor = np.mean(M2[t_window1[0]:t_window1[1],:,task], axis = 0) - np.mean(M2[t_window1_dot[0]:t_window1_dot[1],:,task+3], axis = 0)
lowNoise1_poor_err = np.std(lowNoise1_poor) / np.sqrt(len(lowNoise1_poor))
temp_meg = np.concatenate((lowNoise1_good,lowNoise1_poor))
temp_read = np.concatenate((orig_twre[good_readers],orig_twre[poor_readers]))
r_dot[ii], p_dot[ii] = stats.pearsonr(temp_read,temp_meg)
temp_meg_fix[:,ii] = temp_meg
r_bet[ii], p_bet[ii] = stats.pearsonr(temp_meg_fix[:,ii],temp_meg_lex[:,ii])
#%%
"""Correlation over time"""
c = ( (0.6196, 0.0039, 0.2588),
(0.8353, 0.2431, 0.3098),
(0.9569, 0.4275, 0.2627),
(0.9922, 0.6824, 0.3804),
(0.9961, 0.8784, 0.5451),
(1.0000, 1.0000, 0.7490) )
plt.figure()
plt.clf()
x = range(tstart,800,sstep)
plt.plot(x, r_lex, color=[0,0,0])
plt.fill_between([430,530],-0.4,0.7,facecolor=[0,0,0],alpha=0.4)
plt.ylim([-0.4,0.7])
plt.xlabel('Time after stimulus onet (ms)')
plt.ylabel('Correlation (r-value)')
plt.title('STG: Lexical task')
for ttt in range(0, len(range(tstart,800,sstep))):
if p_lex[ttt] >= 0.05:
al = plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[0], markeredgecolor=c[0], markersize=10, alpha = 0.0)
elif p_lex[ttt] < 0.05 and p_lex[ttt] >= 0.01:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[1], markeredgecolor=c[1], markersize=10)
elif p_lex[ttt] < 0.01 and p_lex[ttt] >= 0.005:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[2], markeredgecolor=c[2], markersize=10)
elif p_lex[ttt] < 0.005 and p_lex[ttt] >= 0.001:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[3], markeredgecolor=c[3], markersize=10)
else:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[4], markeredgecolor=c[4], markersize=10)
os.chdir(figureDir)
plt.savefig('STG_corr_lex_overtime.png',dpi=600,papertype='letter',format='png')
plt.savefig('STG_corr_lex_overtime.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
plt.figure()
plt.clf()
x = range(tstart,800,sstep)
plt.plot(x, r_dot, color=[0,0,0])
plt.fill_between([430,530],-0.4,0.7,facecolor=[0,0,0],alpha=0.4)
plt.ylim([-0.4,0.7])
plt.xlabel('Time after stimulus onet (ms)')
plt.ylabel('Correlation (r-value)')
plt.title('STG: Fixation task')
for ttt in range(0, len(range(tstart,800,sstep))):
if p_dot[ttt] >= 0.05:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[0], markeredgecolor=c[0], markersize=10, alpha = 0.0)
elif p_dot[ttt] < 0.05 and p_lex[ttt] >= 0.01:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[1], markeredgecolor=c[1], markersize=10)
elif p_dot[ttt] < 0.01 and p_lex[ttt] >= 0.005:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[2], markeredgecolor=c[2], markersize=10)
elif p_dot[ttt] < 0.005 and p_lex[ttt] >= 0.001:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[3], markeredgecolor=c[3], markersize=10)
else:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[4], markeredgecolor=c[4], markersize=10)
os.chdir(figureDir)
plt.savefig('STG_corr_dot_overtime.png',dpi=600,papertype='letter',format='png')
plt.savefig('STG_corr_dot_overtime.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
plt.figure()
plt.clf()
x = range(tstart,800,sstep)
plt.plot(x, r_bet, color=[0,0,0])
plt.fill_between([430,530],-0.4,0.7,facecolor=[0,0,0],alpha=0.4)
plt.ylim([-0.4,0.7])
plt.xlabel('Time after stimulus onet (ms)')
plt.ylabel('Correlation between tasks (r-value)')
plt.title('STG')
for ttt in range(0, len(range(tstart,800,sstep))):
if p_bet[ttt] >= 0.05:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[0], markeredgecolor=c[0], markersize=10, alpha = 0.0)
elif p_bet[ttt] < 0.05 and p_lex[ttt] >= 0.01:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[1], markeredgecolor=c[1], markersize=10)
elif p_bet[ttt] < 0.01 and p_lex[ttt] >= 0.005:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[2], markeredgecolor=c[2], markersize=10)
elif p_bet[ttt] < 0.005 and p_lex[ttt] >= 0.001:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[3], markeredgecolor=c[3], markersize=10)
else:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[4], markeredgecolor=c[4], markersize=10)
os.chdir(figureDir)
plt.savefig('STG_corr_bettasks_overtime.png',dpi=600,papertype='letter',format='png')
plt.savefig('STG_corr_bettasks_overtime.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
#%%
del M, M1, M2
#%%
""" Broca """
broca_vertices_l = np.load('IFG_Vert.npy')
temp1 = X11[:,:,all_subject,:]
M = np.mean(np.mean(temp1[broca_vertices_l,:,:,:],axis=0),axis=1)
errM = np.std(np.mean(temp1[broca_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(all_subject))
del temp1
plt.figure()
plt.clf()
plt.plot(times, M[:,5]-M[:,8])
plt.ylim([-0.4,0.7])
plt.fill_between([0.35,0.55],-0.4,0.7,facecolor=[0,0,0],alpha=0.4)
plt.xlabel('Time after stimulus onset (s)')
plt.ylabel('Word - Scramble')
plt.title('IFG: Lexical task')
os.chdir(figureDir)
plt.savefig('IFG_Word_Scramble_lex.png',dpi=600,papertype='letter',format='png')
plt.savefig('IFG_Word_Scramble_lex.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
plt.figure()
plt.clf()
plt.plot(times, M[:,0]-M[:,3])
plt.ylim([-0.4,0.7])
plt.fill_between([0.35,0.55],-0.4,0.7,facecolor=[0,0,0],alpha=0.4)
plt.xlabel('Time after stimulus onset (s)')
plt.ylabel('Word - Scramble')
plt.title('IFG: Fixation task')
os.chdir(figureDir)
plt.savefig('IFG_Word_Scramble_fix.png',dpi=600,papertype='letter',format='png')
plt.savefig('IFG_Word_Scramble_fix.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
temp1 = X11[:,:,good_readers,:]
M1 = np.mean(np.mean(temp1[broca_vertices_l,:,:,:],axis=0),axis=1)
errM1 = np.std(np.mean(temp1[broca_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(good_readers))
del temp1
temp1 = X11[:,:,poor_readers,:]
M2 = np.mean(np.mean(temp1[broca_vertices_l,:,:,:],axis=0),axis=1)
errM2 = np.std(np.mean(temp1[broca_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(poor_readers))
del temp1
# For calculating p-values
X = np.mean(X11[broca_vertices_l,:,:,:],axis=0)
###############################################################################
""" Timecourse: Lexical task """
task1 = 5
task2 = 8
plotit2(times, M, errM, task1, task2, yMin=0, yMax=2.3, subject = 'all: IFG')
plotsig2(times,nReps,X, task1, task2, all_subject, boot_pVal)
os.chdir(figureDir)
plt.savefig('IFG_lexical_all.png',dpi=600,papertype='letter',format='png')
plt.savefig('IFG_lexical_all.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
plotit2(times, M1, errM1, task1, task2, yMin=0, yMax=2.7, subject = 'typical: IFG')
plotsig2(times,nReps,X, task1, task2, good_readers, boot_pVal)
plt.fill_between([0.35,0.55],0.0,2.7,facecolor=[0,0,0],alpha=0.4)
os.chdir(figureDir)
plt.savefig('IFG_lexical_good.png',dpi=600,papertype='letter',format='png')
plt.savefig('IFG_lexical_good.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
plotit2(times, M2, errM2, task1, task2, yMin=0, yMax=2.7, subject = 'struggling: IFG')
plotsig2(times,nReps,X, task1, task2, poor_readers, boot_pVal)
plt.fill_between([0.35,0.55],0.0,2.7,facecolor=[0,0,0],alpha=0.4)
os.chdir(figureDir)
plt.savefig('IFG_lexical_poor.png',dpi=600,papertype='letter',format='png')
plt.savefig('IFG_lexical_poor.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
""" Timecourse: Dot task """
task1 = 0
task2 = 3
plotit2(times, M, errM, task1, task2, yMin=0, yMax=2.3, subject = 'all: IFG')
plotsig2(times,nReps,X, task1, task2, all_subject, boot_pVal)
os.chdir(figureDir)
plt.savefig('IFG_dot_all.png',dpi=600,papertype='letter',format='png')
plt.savefig('IFG_dot_all.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
plotit2(times, M1, errM1, task1, task2, yMin=0, yMax=2.7, subject = 'typical: IFG')
plotsig2(times,nReps,X, task1, task2, good_readers, boot_pVal)
plt.fill_between([0.35,0.55],0.0,2.7,facecolor=[0,0,0],alpha=0.4)
os.chdir(figureDir)
plt.savefig('IFG_dot_good.png',dpi=600,papertype='letter',format='png')
plt.savefig('IFG_dot_good.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
plotit2(times, M2, errM2, task1, task2, yMin=0, yMax=2.7, subject = 'struggling: IFG')
plotsig2(times,nReps,X, task1, task2, poor_readers, boot_pVal)
plt.fill_between([0.35,0.55],0.0,2.7,facecolor=[0,0,0],alpha=0.4)
os.chdir(figureDir)
plt.savefig('IFG_dot_poor.png',dpi=600,papertype='letter',format='png')
plt.savefig('IFG_dot_poor.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
#%%
""" Correlation: Lexical """
temp1 = X11[:,:,good_readers,:]
M1 = np.mean(temp1[broca_vertices_l,:,:,:],axis=0)
temp2 = X11[:,:,poor_readers,:]
M2 = np.mean(temp2[broca_vertices_l,:,:,:],axis=0)
del temp1, temp2
#%%
"""Plot"""
t1 = 350
t_window1 = np.multiply(np.divide(np.add([t1,t1+200],[100,100]),1000.), sRate)
t_window1 = [np.int(i) for i in t_window1]
task = 5
lowNoise1_good = np.mean(M1[np.int(t_window1[0]):np.int(t_window1[1]),:,task], axis = 0) - np.mean(M1[np.int(t_window1[0]):np.int(t_window1[1]),:,task+3], axis = 0)
lowNoise1_good_err = np.std(lowNoise1_good) / np.sqrt(len(lowNoise1_good))
lowNoise1_poor = np.mean(M2[t_window1[0]:t_window1[1],:,task], axis = 0) - np.mean(M2[t_window1[0]:t_window1[1],:,task+3], axis = 0)
lowNoise1_poor_err = np.std(lowNoise1_poor) / np.sqrt(len(lowNoise1_poor))
temp_meg = np.concatenate((lowNoise1_good,lowNoise1_poor))
temp_read = np.concatenate((orig_twre[good_readers],orig_twre[poor_readers]))
plt.figure()
plt.clf()
ax = plt.subplot()
fit = np.polyfit(temp_meg, temp_read, deg=1)
ax.plot(temp_meg, fit[0] * temp_meg + fit[1], color=[0,0,0])
ax.plot(temp_meg, temp_read, 'o', markerfacecolor=[.5, .5, .5], markeredgecolor=[1,1,1], markersize=10)
ax.plot(lowNoise1_good, orig_twre[good_readers], 'o', markerfacecolor=[.5, .5, .5], markeredgecolor=[1,1,1], markersize=10)
plt.xlim([-2,3])
os.chdir('figures')
plt.savefig('IFG_corr_lexical_350.png',dpi=600,papertype='letter',format='png')
plt.savefig('IFG_corr_lexical_350.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
r, p = stats.pearsonr(temp_read,temp_meg)
print('lexical(all): correlation = %.4f, p = %.4f' %(r, p))
r, p = stats.pearsonr(orig_twre[good_readers],lowNoise1_good)
print('lexical(good): correlation = %.4f, p = %.4f' %(r, p))
r, p = stats.pearsonr(orig_twre[poor_readers],lowNoise1_poor)
print('lexical(poor): correlation = %.4f, p = %.4f' %(r, p))
temp_meg_lex = temp_meg
""" Correlation: Dot task """
#t1 = 400
t_window1_dot = np.multiply(np.divide(np.add([t1,t1+200],[100,100]),1000.), sRate)
t_window1_dot = [np.int(i) for i in t_window1_dot]
task = 0
lowNoise1_good = np.mean(M1[np.int(t_window1_dot[0]):np.int(t_window1_dot[1]),:,task], axis = 0) - np.mean(M1[np.int(t_window1_dot[0]):np.int(t_window1_dot[1]),:,task+3], axis = 0)
lowNoise1_good_err = np.std(lowNoise1_good) / np.sqrt(len(lowNoise1_good))
lowNoise1_poor = np.mean(M2[t_window1_dot[0]:t_window1_dot[1],:,task], axis = 0) - np.mean(M2[t_window1_dot[0]:t_window1_dot[1],:,task+3], axis = 0)
lowNoise1_poor_err = np.std(lowNoise1_poor) / np.sqrt(len(lowNoise1_poor))
temp_meg = np.concatenate((lowNoise1_good,lowNoise1_poor))
temp_read = np.concatenate((orig_twre[good_readers],orig_twre[poor_readers]))
plt.figure()
plt.clf()
ax = plt.subplot()
fit = np.polyfit(temp_meg, temp_read, deg=1)
ax.plot(temp_meg, fit[0] * temp_meg + fit[1], color=[0,0,0])
ax.plot(temp_meg, temp_read, 'o', markerfacecolor=[.5, .5, .5], markeredgecolor=[1,1,1], markersize=10)
ax.plot(lowNoise1_good, orig_twre[good_readers], 'o', markerfacecolor=[.5, .5, .5], markeredgecolor=[1,1,1], markersize=10)
plt.xlim([-1.5,4])
os.chdir('figures')
plt.savefig('IFG_corr_dot_350.png',dpi=600,papertype='letter',format='png')
plt.savefig('IFG_corr_dot_350.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
r, p = stats.pearsonr(temp_read,temp_meg)
print('Dot(all): correlation = %.4f, p = %.4f' %(r, p))
r, p = stats.pearsonr(orig_twre[good_readers],lowNoise1_good)
print('Dot(good): correlation = %.4f, p = %.4f' %(r, p))
r, p = stats.pearsonr(orig_twre[poor_readers],lowNoise1_poor)
print('Dot(poor): correlation = %.4f, p = %.4f' %(r, p))
temp_meg_fix = temp_meg
""" Corr: Difference score lexical vs. fixation """
plt.figure()
plt.clf()
ax = plt.subplot()
fit = np.polyfit(temp_meg_fix, temp_meg_lex, deg=1)
ax.plot(temp_meg_fix, fit[0] * temp_meg_fix + fit[1], color=[0,0,0])
ax.plot(temp_meg_fix, temp_meg_lex, 'o', markerfacecolor=[0.5,0.5,0.5], markeredgecolor=[1,1,1], markersize=10)
#ax.plot(temp3_good, temp2_good, 'o', markerfacecolor=c_table[3], markeredgecolor=[1,1,1], markersize=10)
plt.axis('square')
plt.ylim([-1.5, 4.5])
plt.xlim([-1.5, 4])
r, p = stats.pearsonr(temp_meg_fix,temp_meg_lex)
print('STG: lexical vs. dot task (all): correlation = %.4f, p = %.7f' %(r, p))
os.chdir(figureDir)
plt.savefig('IFG_lexical_dot_350.png',dpi=600,papertype='letter',format='png')
plt.savefig('IFG_lexical_dot_350.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
#%%
"""Correlation over time"""
""" Correlation """
temp1 = X11[:,:,all_subject,:]
M = np.mean(temp1[broca_vertices_l,:,:,:],axis=0)
temp1 = X11[:,:,good_readers,:]
M1 = np.mean(temp1[broca_vertices_l,:,:,:],axis=0)
temp2 = X11[:,:,poor_readers,:]
M2 = np.mean(temp2[broca_vertices_l,:,:,:],axis=0)
del temp1, temp2
sstep = 10
tstart = 200
n_ttest = np.empty((len(range(tstart,800,sstep)),1))
p_ttest = np.empty((len(range(tstart,800,sstep)),1))
r_lex = np.empty((len(range(tstart,800,sstep)),1))
p_lex = np.empty((len(range(tstart,800,sstep)),1))
r_dot = np.empty((len(range(tstart,800,sstep)),1))
p_dot = np.empty((len(range(tstart,800,sstep)),1))
r_bet = np.empty((len(range(tstart,800,sstep)),1))
p_bet = np.empty((len(range(tstart,800,sstep)),1))
temp_meg_lex = np.empty((len(all_subject),len(range(tstart,800,sstep))))
temp_meg_fix = np.empty((len(all_subject),len(range(tstart,800,sstep))))
for ii, t1 in zip(range(0,len(range(tstart,800,sstep))), range(tstart,800,sstep)):
t_window1 = np.multiply(np.divide(np.add([t1,t1+50],[100,100]),1000.), sRate)
t_window1 = [np.int(i) for i in t_window1]
task = 5
lowNoise1_good = np.mean(M1[np.int(t_window1[0]):np.int(t_window1[1]),:,task], axis = 0) - np.mean(M1[np.int(t_window1[0]):np.int(t_window1[1]),:,task+3], axis = 0)
lowNoise1_good_err = np.std(lowNoise1_good) / np.sqrt(len(lowNoise1_good))
lowNoise1 = np.mean(M[np.int(t_window1[0]):np.int(t_window1[1]),:,task], axis = 0) - np.mean(M[np.int(t_window1[0]):np.int(t_window1[1]),:,task+3], axis = 0)
lowNoise1_poor = np.mean(M2[t_window1[0]:t_window1[1],:,task], axis = 0) - np.mean(M2[t_window1[0]:t_window1[1],:,task+3], axis = 0)
lowNoise1_poor_err = np.std(lowNoise1_poor) / np.sqrt(len(lowNoise1_poor))
temp_meg = np.concatenate((lowNoise1_good,lowNoise1_poor))
temp_read = np.concatenate((orig_twre[good_readers],orig_twre[poor_readers]))
r_lex[ii], p_lex[ii] = stats.pearsonr(temp_read,temp_meg)
n_ttest[ii], p_ttest[ii] = stats.ttest_1samp(lowNoise1,0)
temp_meg_lex[:,ii] = temp_meg
task = 0
lowNoise1_good = np.mean(M1[np.int(t_window1[0]):np.int(t_window1[1]),:,task], axis = 0) - np.mean(M1[np.int(t_window1_dot[0]):np.int(t_window1_dot[1]),:,task+3], axis = 0)
lowNoise1_good_err = np.std(lowNoise1_good) / np.sqrt(len(lowNoise1_good))
lowNoise1_poor = np.mean(M2[t_window1[0]:t_window1[1],:,task], axis = 0) - np.mean(M2[t_window1_dot[0]:t_window1_dot[1],:,task+3], axis = 0)
lowNoise1_poor_err = np.std(lowNoise1_poor) / np.sqrt(len(lowNoise1_poor))
temp_meg = np.concatenate((lowNoise1_good,lowNoise1_poor))
temp_read = np.concatenate((orig_twre[good_readers],orig_twre[poor_readers]))
r_dot[ii], p_dot[ii] = stats.pearsonr(temp_read,temp_meg)
temp_meg_fix[:,ii] = temp_meg
r_bet[ii], p_bet[ii] = stats.pearsonr(temp_meg_fix[:,ii],temp_meg_lex[:,ii])
#%%
"""Correlation over time"""
c = ( (0.6196, 0.0039, 0.2588),
(0.8353, 0.2431, 0.3098),
(0.9569, 0.4275, 0.2627),
(0.9922, 0.6824, 0.3804),
(0.9961, 0.8784, 0.5451),
(1.0000, 1.0000, 0.7490) )
plt.figure()
plt.clf()
x = range(tstart,800,sstep)
plt.plot(x, r_lex, color=[0,0,0])
plt.fill_between([430,530],-0.4,0.7,facecolor=[0,0,0],alpha=0.4)
plt.ylim([-0.4,0.7])
plt.xlabel('Time after stimulus onet (ms)')
plt.ylabel('Correlation (r-value)')
plt.title('IFG: Lexical task')
for ttt in range(0, len(range(tstart,800,sstep))):
if p_lex[ttt] >= 0.05:
al = plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[0], markeredgecolor=c[0], markersize=10, alpha = 0.0)
elif p_lex[ttt] < 0.05 and p_lex[ttt] >= 0.01:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[1], markeredgecolor=c[1], markersize=10)
elif p_lex[ttt] < 0.01 and p_lex[ttt] >= 0.005:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[2], markeredgecolor=c[2], markersize=10)
elif p_lex[ttt] < 0.005 and p_lex[ttt] >= 0.001:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[3], markeredgecolor=c[3], markersize=10)
else:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[4], markeredgecolor=c[4], markersize=10)
os.chdir(figureDir)
plt.savefig('IFG_corr_lex_overtime.png',dpi=600,papertype='letter',format='png')
plt.savefig('IFG_corr_lex_overtime.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
plt.figure()
plt.clf()
x = range(tstart,800,sstep)
plt.plot(x, r_dot, color=[0,0,0])
plt.fill_between([430,530],-0.4,0.7,facecolor=[0,0,0],alpha=0.4)
plt.ylim([-0.4,0.7])
plt.xlabel('Time after stimulus onet (ms)')
plt.ylabel('Correlation (r-value)')
plt.title('IFG: Fixation task')
for ttt in range(0, len(range(tstart,800,sstep))):
if p_dot[ttt] >= 0.05:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[0], markeredgecolor=c[0], markersize=10, alpha = 0.0)
elif p_dot[ttt] < 0.05 and p_lex[ttt] >= 0.01:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[1], markeredgecolor=c[1], markersize=10)
elif p_dot[ttt] < 0.01 and p_lex[ttt] >= 0.005:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[2], markeredgecolor=c[2], markersize=10)
elif p_dot[ttt] < 0.005 and p_lex[ttt] >= 0.001:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[3], markeredgecolor=c[3], markersize=10)
else:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[4], markeredgecolor=c[4], markersize=10)
os.chdir(figureDir)
plt.savefig('IFG_corr_dot_overtime.png',dpi=600,papertype='letter',format='png')
plt.savefig('IFG_corr_dot_overtime.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
plt.figure()
plt.clf()
x = range(tstart,800,sstep)
plt.plot(x, r_bet, color=[0,0,0])
plt.fill_between([430,530],-0.4,0.7,facecolor=[0,0,0],alpha=0.4)
plt.ylim([-0.4,0.7])
plt.xlabel('Time after stimulus onet (ms)')
plt.ylabel('Correlation between tasks (r-value)')
plt.title('IFG')
for ttt in range(0, len(range(tstart,800,sstep))):
if p_bet[ttt] >= 0.05:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[0], markeredgecolor=c[0], markersize=10, alpha = 0.0)
elif p_bet[ttt] < 0.05 and p_lex[ttt] >= 0.01:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[1], markeredgecolor=c[1], markersize=10)
elif p_bet[ttt] < 0.01 and p_lex[ttt] >= 0.005:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[2], markeredgecolor=c[2], markersize=10)
elif p_bet[ttt] < 0.005 and p_lex[ttt] >= 0.001:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[3], markeredgecolor=c[3], markersize=10)
else:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[4], markeredgecolor=c[4], markersize=10)
os.chdir(figureDir)
plt.savefig('IFG_corr_bettasks_overtime.png',dpi=600,papertype='letter',format='png')
plt.savefig('IFG_corr_bettasks_overtime.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
#%%
""" TPJ """
tpj_vertices_l = np.load('TPJ_Vert.npy')
temp1 = X11[:,:,all_subject,:]
M = np.mean(np.mean(temp1[tpj_vertices_l,:,:,:],axis=0),axis=1)
errM = np.std(np.mean(temp1[tpj_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(all_subject))
del temp1
temp1 = X11[:,:,good_readers,:]
M1 = np.mean(np.mean(temp1[tpj_vertices_l,:,:,:],axis=0),axis=1)
errM1 = np.std(np.mean(temp1[tpj_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(good_readers))
diffM1 = np.mean(np.mean(temp1[tpj_vertices_l,:,:,5],axis=0),axis=1) - np.mean(np.mean(temp1[tpj_vertices_l,:,:,8],axis=0),axis=1)
diffM2 = np.mean(np.mean(temp1[tpj_vertices_l,:,:,0],axis=0),axis=1) - np.mean(np.mean(temp1[tpj_vertices_l,:,:,3],axis=0),axis=1)
del temp1
temp1 = X11[:,:,poor_readers,:]
M2 = np.mean(np.mean(temp1[tpj_vertices_l,:,:,:],axis=0),axis=1)
errM2 = np.std( | np.mean(temp1[tpj_vertices_l,:,:,:],axis=0) | numpy.mean |
import os
from mms import MMS
import numpy as np
import scipy.sparse as sp
import iast
class SysParams:
def __init__(self):
self.spatial_discretization_method = 0
self.u_0 = 0
self.t_end = 0
self.dt = 0
self.nt = 0
self.p_in = 0
self.p_out = 0
self.n_points = 0
self.y_in = 0
self.temp = 0
self.void_frac = 0
self.rho_p = 0
self.kl = 0
self.disp = 0
self.c_len = 0
self.dz = 0
self.dp_dz = 0
self.v_in = 0
self.n_components = 0
self.p_total = 0
self.p_partial_in = 0
self.use_mms = 0
self.mms_mode = 0
self.mms_conv_factor = 0
self.ms_pt_distribution = 0
self.outlet_boundary_type = 0
self.void_frac_term = 0
self.atol = 0
self.time_stepping_scheme = 0
self.isotherms = 0
self.R = 0
self.t_samples = 0
self.MMS = 0
# Initializing matrices
self.kl_matrix = 0
self.disp_matrix = 0
self.g_matrix = 0
self.f_matrix = 0
self.l_matrix = 0
self.d_matrix = 0
self.b_v_vector = 0
self.e_vector = 0
self.xi = 0
def init_params(self, y_in, n_points, p_in, temp, c_len, u_in, void_frac, disp, kl, rho_p,
t_end, dt, y_fill_gas, disp_fill_gas, kl_fill_gas, time_stepping_method, atol, dimensionless=True,
mms=False, mms_mode="transient", mms_convergence_factor=1000,
spatial_discretization_method="central"):
"""
Initializes the solver with the parameters that remain constant throughout the calculations
and the initial conditions. Depending on the dimensionless parameter, the variables might turned into the
dimensionless equivalents. The presence of helium is implicit. It means that it is always present no matter
what parameters are passed. Its pressure is equal to the pressure of all components at the inlet.
Therefore, the number of components is always len(y_in)+1.
:param spatial_discretization_method: Sets whether to use upwind or central method
:param atol: Absolute error for linear solvers and time stepping schemes
:param t_end: Final time point.
:param dt: Length of one time step.
:param dimensionless: Boolean that specifies whether dimensionless numbers are used.
:param time_stepping_method: String that specifies the time of stepping methods.
:param y_in: Array containing mole fractions at the start.
:param n_points: Number of grid points.
:param p_in: Total pressure at the inlet.
:param temp: Temperature of the system in Kelvins.
:param c_len: Column length.
:param u_in: Speed at the inlet.
:param void_frac: Void fraction (epsilon).
:param disp: Array containing dispersion coefficient for every component.
:param kl: Array containing effective mass transport coefficient of every component.
:param rho_p: Density of the adsorbent.
:param kl_fill_gas: mass transfer coefficient of helium, should be 0 by default
:param disp_fill_gas: dispersion coefficient of helium
:param y_fill_gas: mole fraction of helium at the inlet, should be 0 by default
:param mms: Choose if dynamic code testing is switched on.
:param mms_mode: Choose if MMS is to be used to steady state or transient simulation.
:param mms_convergence_factor: Choose how quickly MMS is supposed to reach steady state.
"""
p_out = p_in # This could be changed to user input if discretization and vectorization is ever fixed.
ms_pt_distribution = "constant" # Same as above
self.R = 8.314
if dimensionless:
# Dimensionless quantities
self.t_end = t_end * u_in / c_len
self.dt = dt * u_in / c_len
self.nt = int(self.t_end / self.dt)
self.y_in = np.asarray(y_in)
self.kl = np.asarray(kl) * c_len / u_in
self.disp = np.asarray(disp) / (c_len * u_in)
else:
# Quantities with dimensions
self.t_end = t_end
self.dt = dt
self.nt = int(self.t_end / self.dt)
self.y_in = np.asarray(y_in)
self.kl = | np.asarray(kl) | numpy.asarray |
# Licensed under a 3-clause BSD style license - see LICENSE.rst
from __future__ import absolute_import, print_function, division
from itertools import product
import pytest
import numpy as np
from numpy.testing import assert_equal, assert_allclose
from astropy import units as u
from astropy.coordinates.angle_utilities import angular_separation
from ..core import nside_to_pixel_resolution
from .. import core_cython
NSIDE_POWERS = range(0, 21)
ORDERS = ('nested', 'ring')
def get_test_indices(nside):
# For large number of pixels, we only compute a random subset of points
if nside > 2 ** 8:
try:
return np.random.randint(0, 12 * nside ** 2, 12 * 8 ** 2, dtype=np.int64)
except TypeError: # Numpy 1.9 and 1.10
return (np.random.random(12 * 8 ** 2) * (12 * float(nside) ** 2)).astype(np.int64, copy=False)
else:
return np.arange(12 * nside ** 2, dtype=np.int64)
# NOTE: we use capfd in all tests here to make sure no errors/warnings are being
# raised by the C code.
@pytest.mark.parametrize(('order', 'nside_power'), product(ORDERS, NSIDE_POWERS))
def test_roundtrip_healpix_no_offsets(order, nside_power, capfd):
nside = 2 ** nside_power
index = get_test_indices(nside)
lon, lat = core_cython.healpix_to_lonlat(index, nside, order)
index_new = core_cython.lonlat_to_healpix(lon, lat, nside, order)
assert_equal(index, index_new)
out, err = capfd.readouterr()
assert out == "" and err == ""
@pytest.mark.parametrize(('order', 'nside_power'), product(ORDERS, NSIDE_POWERS))
def test_roundtrip_healpix_with_offsets(order, nside_power, capfd):
nside = 2 ** nside_power
index = get_test_indices(nside)
dx = np.random.random(index.shape)
dy = np.random.random(index.shape)
lon, lat = core_cython.healpix_with_offset_to_lonlat(index, dx, dy, nside, order)
index_new, dx_new, dy_new = core_cython.lonlat_to_healpix_with_offset(lon, lat, nside, order)
assert_equal(index, index_new)
assert_allclose(dx, dx_new, atol=1e-8)
| assert_allclose(dy, dy_new, atol=1e-8) | numpy.testing.assert_allclose |
import numpy as np
from itertools import product
from sklearn.model_selection import train_test_split
def get_mask(pairs, shape, row_g2i, col_g2i, sym=True):
'''
Convert a list of pairs, into a boolean indicator matrix, m, where
(a,b) in pairs, is indexed into m[ai, bj] and m[bi, aj] where possible.
if sym = False, then a pair is indicated only once so either
m[ai, bj] == True xor m[bi, aj] == True
'''
mask = np.zeros(shape, dtype=bool)
for i, (a, b) in enumerate(pairs):
inserted=False
if a in row_g2i and b in col_g2i:
mask[row_g2i[a], col_g2i[b]] = True
inserted = True
if not sym and inserted:
assert(inserted)
continue
if a in col_g2i and b in row_g2i:
mask[row_g2i[b], col_g2i[a]] = True
inserted = True
assert(inserted)
return mask
def get_eval_pair_list(pairs, row_g2i, col_g2i, gi_data):
values = gi_data['values']
pairlist_1 = []
pairlist_2 = []
for A, B in pairs:
A_r = row_g2i.get(A)
B_c = col_g2i.get(B)
A_c = col_g2i.get(A)
B_r = row_g2i.get(B)
if (A_r is not None) and \
(B_c is not None) and \
(A_c is not None) and \
(B_r is not None):
v_ab = values[A_r, B_c]
v_ba = values[B_r, A_c]
if not (np.isnan(v_ab) or np.isnan(v_ba)):
pairlist_1.append((A_r, B_c))
pairlist_2.append((B_r, A_c))
else:
pass
elif (A_r is not None) and \
(B_c is not None):
if not np.isnan(values[A_r, B_c]):
pairlist_1.append((A_r, B_c))
pairlist_2.append((A_r, B_c))
else:
pass
elif (A_c is not None) and \
(B_r is not None):
if not np.isnan(values[B_r, A_c]):
pairlist_1.append((B_r, A_c))
pairlist_2.append((B_r, A_c))
else:
pass
else:
continue
pairlist_1 = tuple(zip(*pairlist_1))
pairlist_2 = tuple(zip(*pairlist_2))
return pairlist_1, pairlist_2
def gi_train_test_split_w_pairlists(gi_data, hf):
'''
Sample train/test set but return lists of indices whose indexed values should be
averaged for evaluation
[(A,B), ...], [(B,A),...]
'''
rows = gi_data['rows']
cols = gi_data['cols']
values = gi_data['values']
col_g2i = dict((n, i) for i, n in enumerate(cols))
row_g2i = dict((n, i) for i, n in enumerate(rows))
rowset = set(rows)
colset = set(cols)
pairs = product(rows, cols)
pairs = set(frozenset((a,b)) for a,b in pairs if a != b)
pairs = [tuple(p) for p in pairs]
train_pairs, test_pairs = train_test_split(pairs, test_size=hf)
test_mask = get_mask(test_pairs, values.shape, row_g2i, col_g2i)
# This implements train/test over *all* possible pairs,
# in expectation is equivalent to CV over observed pairs
value_mask = ~np.isnan(values)
test_mask = np.logical_and(value_mask, test_mask)
train_mask = np.logical_and(value_mask, ~test_mask)
train_X = np.where(train_mask, values, np.nan)
test_X = np.where(test_mask, values, np.nan)
eval_pairs1, eval_pairs2 = get_eval_pair_list(test_pairs, row_g2i, col_g2i, gi_data)
assert(np.all(~ | np.isnan(test_X[test_mask]) | numpy.isnan |
# coding:utf-8
import femm
from math import tan, pi, atan, cos, sin, sqrt, copysign, exp
import numpy as np
from csv import reader as csv_reader
import logging
import os
from collections import OrderedDict
import sys
import subprocess
# from utility import *
# import utility
# will not create new list as zip does
from time import sleep
from time import time as clock_time
from .VanGogh import VanGogh
# from . import VanGogh
from .winding_layout_im import winding_layout_v2
SELECT_ALL = 4
EPS = 1e-2 # unit mm
__all__ = ['FEMM_Solver']
class VanGogh_FEMM(VanGogh):
def __init__(self, im, child_index=0):
super(VanGogh_FEMM, self).__init__(im, child_index)
@staticmethod
def mirror_and_copyrotate(Q, Radius, fraction):
# Mirror
femm.mi_selectcircle(0, 0, Radius + EPS,
SELECT_ALL) # this EPS is sometime necessary to selece the arc at Radius.
femm.mi_mirror2(0, 0, -Radius, 0, SELECT_ALL)
# Rotate
femm.mi_selectcircle(0, 0, Radius + EPS, SELECT_ALL)
femm.mi_copyrotate2(0, 0, 360. / Q, int(Q) / fraction, SELECT_ALL)
@staticmethod
def draw_arc(p1, p2, angle, maxseg=1, center=None, **kwarg):
femm.mi_drawarc(p1[0], p1[1], p2[0], p2[1], angle / pi * 180, maxseg) # [deg]
@staticmethod
def add_arc(p1, p2, angle, maxseg=1, center=None, **kwarg):
femm.mi_addarc(p1[0], p1[1], p2[0], p2[1], angle / pi * 180, maxseg) # [deg]
@staticmethod
def draw_line(p1, p2):
femm.mi_drawline(p1[0], p1[1], p2[0], p2[1])
@staticmethod
def add_line(p1, p2):
femm.mi_addsegment(p1[0], p1[1], p2[0], p2[1])
def some_solver_related_operations_rotor_before_mirror_rotation(self, im, P6, P8):
if im.use_drop_shape_rotor_bar == True:
# constraint to reduce element number @rotor-P6
femm.mi_selectarcsegment(P6[0], P6[1])
femm.mi_setarcsegmentprop(8, "<None>", False, 100)
femm.mi_clearselected()
# constraint to reduce element number @rotor-P8
femm.mi_selectarcsegment(P8[0], P8[1])
femm.mi_setarcsegmentprop(8, "<None>", False, 100)
femm.mi_clearselected()
else:
# constraint to reduce element number @rotor-P8
femm.mi_selectarcsegment(P8[0], P8[1])
femm.mi_setarcsegmentprop(8, "<None>", False, 100)
femm.mi_clearselected()
def some_solver_related_operations_fraction(self, im, fraction):
# Boundary
if fraction == 1:
femm.mi_drawarc(im.Radius_Shaft, 0, -im.Radius_Shaft, 0, 180, 20) # 边界不要用太小的segment咯!避免剖分过细(这里设置无效)
femm.mi_drawarc(-im.Radius_Shaft, 0, im.Radius_Shaft, 0, 180, 20)
femm.mi_drawarc(im.Radius_OuterStatorYoke, 0, -im.Radius_OuterStatorYoke, 0, 180, 20)
femm.mi_drawarc(-im.Radius_OuterStatorYoke, 0, im.Radius_OuterStatorYoke, 0, 180, 20)
elif fraction == 4:
femm.mi_drawarc(-im.Radius_Shaft, 0, 0, -im.Radius_Shaft, 90, 10)
femm.mi_drawarc(-im.Radius_OuterStatorYoke, 0, 0, -im.Radius_OuterStatorYoke, 90, 10)
femm.mi_selectrectangle(-EPS - im.Radius_Shaft, EPS, EPS - im.Radius_OuterStatorYoke,
im.Radius_OuterStatorYoke, SELECT_ALL)
femm.mi_selectrectangle(EPS, -EPS - im.Radius_Shaft, im.Radius_OuterStatorYoke,
EPS - im.Radius_OuterStatorYoke, SELECT_ALL)
femm.mi_deleteselected()
# between 2rd and 3th quarters
p1 = (-im.Location_RotorBarCenter2 + im.Radius_of_RotorSlot2, 0)
p2 = (-im.Radius_Shaft, 0)
self.add_line(p1, p2)
p2 = (-im.Location_RotorBarCenter - im.Radius_of_RotorSlot, 0)
self.add_line(p1, p2)
p1 = (-im.Radius_OuterRotor - 0.5 * im.Length_AirGap,
0) # for later extending for moverotate with anti-periodic boundary condition
self.draw_line(p1, p2)
p2 = (-im.Radius_OuterRotor - im.Length_AirGap, 0)
self.draw_line(p1, p2)
p1 = (-im.Radius_OuterStatorYoke, 0)
self.add_line(p1, p2)
# between 3rd and 4th quarters
p1 = (0, -im.Location_RotorBarCenter2 + im.Radius_of_RotorSlot2)
p2 = (0, -im.Radius_Shaft)
self.add_line(p1, p2)
p2 = (0, -im.Location_RotorBarCenter - im.Radius_of_RotorSlot)
self.add_line(p1, p2)
p1 = (0, -im.Radius_OuterRotor - 0.5 * im.Length_AirGap)
self.draw_line(p1, p2)
p2 = (0, -im.Radius_OuterRotor - im.Length_AirGap)
self.draw_line(p1, p2)
p1 = (0, -im.Radius_OuterStatorYoke)
self.add_line(p1, p2)
elif fraction == 2:
femm.mi_drawarc(-im.Radius_Shaft, 0, im.Radius_Shaft, 0, 180, 15)
femm.mi_drawarc(-im.Radius_OuterStatorYoke, 0, im.Radius_OuterStatorYoke, 0, 180, 15)
femm.mi_selectrectangle(EPS - im.Radius_OuterStatorYoke, EPS, -EPS + im.Radius_OuterStatorYoke,
EPS + im.Radius_OuterStatorYoke, SELECT_ALL)
femm.mi_deleteselected()
# between 2rd and 3th quarters
p1 = (-im.Location_RotorBarCenter2 + im.Radius_of_RotorSlot2, 0)
p2 = (-im.Radius_Shaft, 0)
self.add_line(p1, p2)
p2 = (-im.Location_RotorBarCenter - im.Radius_of_RotorSlot, 0)
self.add_line(p1, p2)
p1 = (-im.Radius_OuterRotor - 0.5 * im.Length_AirGap,
0) # for later extending for moverotate with anti-periodic boundary condition
self.draw_line(p1, p2)
p2 = (-im.Radius_OuterRotor - im.Length_AirGap, 0)
self.draw_line(p1, p2)
p1 = (-im.Radius_OuterStatorYoke, 0)
self.add_line(p1, p2)
# between 1rd and 4th quarters
p1 = (+im.Location_RotorBarCenter2 - im.Radius_of_RotorSlot2, 0)
p2 = (+im.Radius_Shaft, 0)
self.add_line(p1, p2)
p2 = (+im.Location_RotorBarCenter + im.Radius_of_RotorSlot, 0)
self.add_line(p1, p2)
p1 = (+im.Radius_OuterRotor + 0.5 * im.Length_AirGap,
0) # for later extending for moverotate with anti-periodic boundary condition
self.draw_line(p1, p2)
p2 = (+im.Radius_OuterRotor + im.Length_AirGap, 0)
self.draw_line(p1, p2)
p1 = (+im.Radius_OuterStatorYoke, 0)
self.add_line(p1, p2)
else:
raise Exception('not supported fraction = %d' % (fraction))
# Air Gap Boundary for Rotor Motion #1
# R = im.Radius_OuterRotor+0.6*im.Length_AirGap
# femm.mi_drawarc(R,0, -R,0, 180, 5)
# femm.mi_drawarc(-R,0, R,0, 180, 5)
# R = im.Radius_OuterRotor+0.4*im.Length_AirGap
# femm.mi_drawarc(R,0, -R,0, 180, 5)
# femm.mi_drawarc(-R,0, R,0, 180, 5)
class FEMM_Solver(object):
def __init__(self, im, flag_read_from_jmag=True, freq=0, individual_index=None, bool_static_fea_loss=False):
self.bool_static_fea_loss = bool_static_fea_loss
self.individual_index = individual_index
self.vangogh = VanGogh_FEMM(im)
self.deg_per_step = im.fea_config_dict['femm.deg_per_step'] # deg, we need this for show_results
self.flag_read_from_jmag = flag_read_from_jmag # read the pre-determined rotor currents from the eddy current FEA of jmag or femm
self.freq = freq
if freq == 0:
self.flag_eddycurrent_solver = False
self.flag_static_solver = not self.flag_eddycurrent_solver
self.fraction = 1
else:
self.flag_eddycurrent_solver = True
self.flag_static_solver = not self.flag_eddycurrent_solver
self.fraction = 2
self.stack_length = im.l_st
self.im = im
self.dir_codes = im.fea_config_dict['run_folder']
# evaluate initial design only or optimize
if im.bool_initial_design == True:
raise Exception('这里好像基本上运行不到了???')
# self.dir_run = im.fea_config_dict['dir.femm_files'] + im.fea_config_dict['model_name_prefix'] + '/'
self.dir_run = im.fea_config_dict['model_name_prefix'] + '/'
if not os.path.exists(self.dir_run):
logging.getLogger(__name__).debug('FEMM: There is no run yet. Generate the run folder under %s.',
self.dir_run)
os.makedirs(self.dir_run)
if flag_read_from_jmag == True:
if self.individual_index is not None:
self.dir_run += 'static-jmag/' + 'ind#%04d/' % (self.individual_index)
else:
self.dir_run += 'static-jmag/'
if not os.path.exists(self.dir_run):
os.makedirs(self.dir_run)
else:
if self.individual_index is not None:
self.dir_run += 'static-femm/' + 'ind#%04d/' % (self.individual_index)
else:
self.dir_run += 'static-femm/'
if not os.path.exists(self.dir_run):
os.makedirs(self.dir_run)
else:
if self.individual_index is not None:
# self.dir_run = im.fea_config_dict['dir.femm_files'] + im.fea_config_dict['run_folder'] + 'ind#%04d/'%(self.individual_index)
self.dir_run = im.fea_config_dict['run_folder'] + 'ind#%04d/' % (self.individual_index)
else:
# self.dir_run = im.fea_config_dict['dir.femm_files'] + im.fea_config_dict['run_folder']
self.dir_run = im.fea_config_dict['run_folder']
if not os.path.exists(self.dir_run):
logger = logging.getLogger(__name__)
logger.debug('FEMM: There is no run yet. Generate the run folder as %s.', self.dir_run)
os.makedirs(self.dir_run)
self.dir_run_sweeping = self.dir_run + 'femm_temp/' # 'sweeping/'
if not os.path.isdir(self.dir_run_sweeping):
os.makedirs(self.dir_run_sweeping)
self.output_file_name = self.get_output_file_name()
self.rotor_slot_per_pole = int(im.Qr / im.DriveW_poles)
self.rotor_phase_name_list = 'ABCDEFGHIJKLMNOPQRSTUVWXYZ'
def add_block_labels(self, fraction=1):
im = self.im
SERIES_CONNECTED = 1
PARALLEL_CONNECTED = 0
if self.im.fea_config_dict['femm.Coarse_Mesh'] == True: # Coarse mesh
if self.im.fea_config_dict['femm.Coarse_Mesh_Level'] == 2:
MESH_SIZE_ALUMINUM = 2 * 6 # 3
MESH_SIZE_STEEL = 2 * 6 # 4
MESH_SIZE_AIR = 2 * 0.75 # 0.5
MESH_SIZE_COPPER = 2 * 10 # 8
elif self.im.fea_config_dict['femm.Coarse_Mesh_Level'] == 3:
MESH_SIZE_ALUMINUM = 1 * 6 # 3
MESH_SIZE_STEEL = 1 * 6 # 4
MESH_SIZE_AIR = 1 * 0.75 # 0.5
MESH_SIZE_COPPER = 1 * 10 # 8
else:
raise Exception('Invalid femm.Coarse_Mesh_Level.')
else:
MESH_SIZE_ALUMINUM = 3
MESH_SIZE_STEEL = 4
MESH_SIZE_AIR = 0.5
MESH_SIZE_COPPER = 8
def block_label(group_no, material_name, p, meshsize_if_no_automesh, incircuit='<None>', turns=0, automesh=True,
magdir=0):
femm.mi_addblocklabel(p[0], p[1])
femm.mi_selectlabel(p[0], p[1])
femm.mi_setblockprop(material_name, automesh, meshsize_if_no_automesh, incircuit, magdir, group_no, turns)
femm.mi_clearselected()
# air region @225deg
X = Y = -(im.Radius_OuterRotor + 0.5 * im.Length_AirGap) / 1.4142135623730951
block_label(9, 'Air', (X, Y), MESH_SIZE_AIR, automesh=self.bool_automesh)
# # Air Gap Boundary for Rotor Motion #2
# block_label(9, '<No Mesh>', (0, im.Radius_OuterRotor+0.5*im.Length_AirGap), 5, automesh=self.bool_automesh)
# block_label(9, 'Air', (0, im.Radius_OuterRotor+0.7*im.Length_AirGap), 0.5, automesh=self.bool_automesh)
# block_label(9, 'Air', (0, im.Radius_OuterRotor+0.3*im.Length_AirGap), 0.5, automesh=self.bool_automesh)
# shaft
if fraction == 1:
block_label(102, '<No Mesh>', (0, 0), 20)
# block_label(101, 'Air', (0, 0), 10, automesh=True) # for deeply-saturated rotor yoke
# Iron Core @225deg
if 'M19' in self.im.stator_iron_mat['core_material']:
steel_name = 'M19Gauge29'
elif self.im.spec_input_dict['Steel'] == 'M15':
steel_name = 'My M-15 Steel'
elif self.im.spec_input_dict['Steel'] == 'Arnon5':
steel_name = 'Arnon5-final'
X = Y = -(im.Radius_Shaft + EPS * 10) / 1.4142135623730951
block_label(100, steel_name, (X, Y), MESH_SIZE_STEEL, automesh=self.bool_automesh)
X = Y = -(0.5 * (im.Radius_InnerStatorYoke + im.Radius_OuterStatorYoke)) / 1.4142135623730951
block_label(10, steel_name, (X, Y), MESH_SIZE_STEEL, automesh=self.bool_automesh)
# Circuit Configuration
# Rotor Winding Part
# Our proposed pole-specific winidng with a neutral plate (this case we ignore fraction and always draw whole model!)
if self.im.PoleSpecificNeutral == True:
R = im.Location_RotorBarCenter # Since 5/23/2019
angle_per_slot = 2 * pi / im.Qr
# THETA_BAR = pi - angle_per_slot
wily_Qr = winding_layout_v2.pole_specific_winding_with_neutral(self.im.Qr, self.im.DriveW_poles / 2,
self.im.BeariW_poles / 2,
self.im.pitch)
for ind, pair in enumerate(wily_Qr.pairs):
circuit_name = 'r%s' % (self.rotor_phase_name_list[ind])
# add excitation for the rotor circuit (which is only seen in FEMM static solver)
if self.flag_static_solver == True: # self.freq == 0: # Static FEA
femm.mi_addcircprop(circuit_name, self.dict_rotor_current_function[i](0.0), SERIES_CONNECTED)
# print self.dict_rotor_current_function[i](0.0)
else: # Eddy Current FEA (with multi-phase 4-bar cage... haha this is practically nothing)
femm.mi_addcircprop(circuit_name, 0, PARALLEL_CONNECTED) # PARALLEL for PS circuit
# THETA_BAR += angle_per_slot
# The general implmentation of any pole PS rotor winding
print('Circuit Configuration')
for index, j in enumerate(pair):
print(f'|FEMM_Solver.{circuit_name}|', j, 'in', pair)
THETA = angle_per_slot * j
X = R * cos(THETA);
Y = R * sin(THETA)
if index % 2 == 0:
block_label(101, 'Aluminum', (X, Y), MESH_SIZE_ALUMINUM, automesh=self.bool_automesh,
incircuit=circuit_name, turns=1)
else:
block_label(101, 'Aluminum', (X, Y), MESH_SIZE_ALUMINUM, automesh=self.bool_automesh,
incircuit=circuit_name, turns=-1)
else: # Chiba's conventional pole specific winding
if fraction == 1:
# Any pole Pole-Specific Rotor Winding
# R = 0.5*(im.Location_RotorBarCenter + im.Location_RotorBarCenter2)
R = im.Location_RotorBarCenter # Since 5/23/2019
angle_per_slot = 2 * pi / im.Qr
THETA_BAR = pi - angle_per_slot
for i in range(self.rotor_slot_per_pole):
circuit_name = 'r%s' % (self.rotor_phase_name_list[i])
if self.flag_static_solver == True: # self.freq == 0: # Static FEA
femm.mi_addcircprop(circuit_name, self.dict_rotor_current_function[i](0.0), SERIES_CONNECTED)
# print self.dict_rotor_current_function[i](0.0)
else: # Eddy Current FEA (with multi-phase 4-bar cage... haha this is practically nothing)
femm.mi_addcircprop(circuit_name, 0, PARALLEL_CONNECTED) # PARALLEL for PS circuit
THETA_BAR += angle_per_slot
# The general implmentation of any pole PS rotor winding
for j in range(im.DriveW_poles):
THETA = THETA_BAR + angle_per_slot * j * self.rotor_slot_per_pole
X = R * cos(THETA);
Y = R * sin(THETA)
if j % 2 == 0:
block_label(101, 'Aluminum', (X, Y), MESH_SIZE_ALUMINUM, automesh=self.bool_automesh,
incircuit=circuit_name, turns=1)
else:
block_label(101, 'Aluminum', (X, Y), MESH_SIZE_ALUMINUM, automesh=self.bool_automesh,
incircuit=circuit_name, turns=-1)
elif fraction == 4 or fraction == 2:
# 2 pole Pole-Specific Rotor Winding with fraction
# poly-four-bar-Cage + no bearing current excitated <=> pole specific winding
# R = 0.5*(im.Location_RotorBarCenter + im.Location_RotorBarCenter2)
R = im.Location_RotorBarCenter # Since 5/23/2019
angle_per_slot = 2 * pi / im.Qr
THETA_BAR = pi - angle_per_slot + EPS # add EPS for the half bar
for i in range(self.rotor_slot_per_pole):
circuit_name = 'r%s' % (self.rotor_phase_name_list[i])
# Eddy Current FEA (with multi-phase 4-bar cage behave the same with PS rotor winding when no bearing current is excited!
femm.mi_addcircprop(circuit_name, 0,
PARALLEL_CONNECTED) # PARALLEL for PS circuit (valid only if there is no 2-pole field)
THETA_BAR += angle_per_slot
THETA = THETA_BAR
X = R * cos(THETA);
Y = R * sin(THETA)
block_label(101, 'Aluminum', (X, Y), MESH_SIZE_ALUMINUM, automesh=self.bool_automesh,
incircuit=circuit_name, turns=1) # rA+ ~ rH+
if fraction == 2:
THETA = THETA_BAR + angle_per_slot * self.rotor_slot_per_pole
X = R * cos(THETA);
Y = R * sin(THETA)
block_label(101, 'Aluminum', (X, Y), MESH_SIZE_ALUMINUM, automesh=self.bool_automesh,
incircuit=circuit_name,
turns=-1) # rA- However, this turns=-1 is not effective for PARALLEL_CONNECTED circuit
# the other half bar
# THETA_BAR += angle_per_slot
THETA = THETA + angle_per_slot - 2 * EPS
X = R * cos(THETA);
Y = R * sin(THETA)
block_label(101, 'Aluminum', (X, Y), MESH_SIZE_ALUMINUM, automesh=self.bool_automesh,
incircuit='r%s' % (self.rotor_phase_name_list[0]),
turns=-1) # However, this turns=-1 is not effective for PARALLEL_CONNECTED circuit
# Stator Winding
npb = im.number_parallel_branch
nwl = im.no_of_layers # number of windign layers
if self.flag_static_solver == True: # self.freq == 0:
# static solver
femm.mi_addcircprop('dU', self.dict_stator_current_function[3](0.0), SERIES_CONNECTED)
femm.mi_addcircprop('dV', self.dict_stator_current_function[4](0.0), SERIES_CONNECTED)
femm.mi_addcircprop('dW', self.dict_stator_current_function[5](0.0), SERIES_CONNECTED)
femm.mi_addcircprop('bU', self.dict_stator_current_function[0](0.0), SERIES_CONNECTED)
femm.mi_addcircprop('bV', self.dict_stator_current_function[1](0.0), SERIES_CONNECTED)
femm.mi_addcircprop('bW', self.dict_stator_current_function[2](0.0), SERIES_CONNECTED)
else: # eddy current solver
# if im.fea_config_dict['DPNV_separate_winding_implementation'] == True or im.fea_config_dict['DPNV'] == False:
if im.DPNV_or_SEPA == False:
# either a separate winding or a DPNV winding implemented as a separate winding
ampD = im.DriveW_CurrentAmp / npb
ampB = im.BeariW_CurrentAmp
else:
# case: DPNV as an actual two layer winding
ampD = im.DriveW_CurrentAmp / npb
ampB = ampD
# 2020/07/07: Does comutating sequence even matter for frequency analysis??? Simply delege this if-else statement will do no harm. (to be tested)
if im.CommutatingSequenceD == 1:
MyCommutatingSequence = ['-', '+'] # 2 pole
else:
# raise
MyCommutatingSequence = ['+', '-'] # 4 pole legacy
femm.mi_addcircprop('dU', '%g' % (ampD), SERIES_CONNECTED)
femm.mi_addcircprop('dV', '%g*(-0.5%sI*0.8660254037844386)' % (ampD, MyCommutatingSequence[0]),
SERIES_CONNECTED)
femm.mi_addcircprop('dW', '%g*(-0.5%sI*0.8660254037844386)' % (ampD, MyCommutatingSequence[1]),
SERIES_CONNECTED)
femm.mi_addcircprop('bU', '%g' % (ampB), SERIES_CONNECTED)
femm.mi_addcircprop('bV', '%g*(-0.5%sI*0.8660254037844386)' % (ampB, MyCommutatingSequence[0]),
SERIES_CONNECTED)
femm.mi_addcircprop('bW', '%g*(-0.5%sI*0.8660254037844386)' % (ampB, MyCommutatingSequence[1]),
SERIES_CONNECTED)
# if fraction == 1: # I thought PS can be realized in FEMM but I was wrong, this fraction==1 case should be deleted!
# # femm.mi_addcircprop('bA', '%g' %(im.BeariW_CurrentAmp), SERIES_CONNECTED)
# # femm.mi_addcircprop('bB', '%g*(-0.5+I*0.8660254037844386)'%(im.BeariW_CurrentAmp), SERIES_CONNECTED)
# # femm.mi_addcircprop('bC', '%g*(-0.5-I*0.8660254037844386)'%(im.BeariW_CurrentAmp), SERIES_CONNECTED)
# femm.mi_addcircprop('bU', '%g' %(ampB), SERIES_CONNECTED)
# femm.mi_addcircprop('bV', '%g*(-0.5%sI*0.8660254037844386)'%(ampB, MyCommutatingSequence[0]), SERIES_CONNECTED)
# femm.mi_addcircprop('bW', '%g*(-0.5%sI*0.8660254037844386)'%(ampB, MyCommutatingSequence[1]), SERIES_CONNECTED)
# elif fraction == 4 or fraction == 2: # no bearing current
# femm.mi_addcircprop('bU', 0, SERIES_CONNECTED)
# femm.mi_addcircprop('bV', 0, SERIES_CONNECTED)
# femm.mi_addcircprop('bW', 0, SERIES_CONNECTED)
# dict_dir = {'+':1, '-':-1} # wrong (not consistent with JMAG)
dict_dir = {'+': -1, '-': 1, 'o': 0}
R = 0.5 * (im.Radius_OuterRotor + im.Radius_InnerStatorYoke)
angle_per_slot = 2 * pi / im.Qs
# torque winding's blocks
THETA = pi - angle_per_slot + 0.5 * angle_per_slot - 3.0 / 360 # This 3 deg must be less than 360/Qs/2,取这么大是为了在GUI上看得清楚点。
count = 0
# for phase, up_or_down in zip(im.l_rightlayer1,im.l_rightlayer2):
for phase, up_or_down in zip(im.layer_phases[0], im.layer_polarity[0]):
circuit_name = 'd' + phase
THETA += angle_per_slot
X = R * cos(THETA);
Y = R * sin(THETA)
count += 1
if fraction == 4:
if not (count > im.Qs * 0.5 + EPS and count <= im.Qs * 0.75 + EPS):
continue
if fraction == 2:
if not (count > im.Qs * 0.5 + EPS):
continue
block_label(11, 'Copper', (X, Y), MESH_SIZE_COPPER, automesh=self.bool_automesh, incircuit=circuit_name,
turns=im.DriveW_zQ / nwl * dict_dir[up_or_down])
# bearing winding's blocks
if fraction == 1:
THETA = pi - angle_per_slot + 0.5 * angle_per_slot + 3.0 / 360
# for phase, up_or_down in zip(im.l_leftlayer1,im.l_leftlayer2):
for phase, up_or_down in zip(im.layer_phases[1], im.layer_polarity[1]):
circuit_name = 'b' + phase
THETA += angle_per_slot
X = R * cos(THETA);
Y = R * sin(THETA)
# if self.im.fea_config_dict['DPNV'] == True:
# else: # separate winding (e.g., Chiba's)
block_label(11, 'Copper', (X, Y), MESH_SIZE_COPPER, automesh=self.bool_automesh, incircuit=circuit_name,
turns=im.BeariW_turns / nwl * dict_dir[up_or_down])
elif fraction == 4 or fraction == 2:
# 危险!FEMM默认把没有设置incircuit的导体都在无限远短接在一起——也就是说,你可能把定子悬浮绕组也短接到鼠笼上去了!
# 所以,一定要设置好悬浮绕组,而且要用serial-connected,电流给定为 0 A。
THETA = pi - angle_per_slot + 0.5 * angle_per_slot + 3.0 / 360
count = 0
# for phase, up_or_down in zip(im.l_leftlayer1,im.l_leftlayer2):
for phase, up_or_down in zip(im.wily.layer_Y_phases, im.wily.layer_Y_signs):
circuit_name = 'b' + phase
THETA += angle_per_slot
X = R * cos(THETA);
Y = R * sin(THETA)
count += 1
if fraction == 4:
if not (count > im.Qs * 0.5 + EPS and count <= im.Qs * 0.75 + EPS):
continue
elif fraction == 2:
if not (count > im.Qs * 0.5 + EPS):
continue
block_label(11, 'Copper', (X, Y), MESH_SIZE_COPPER, automesh=self.bool_automesh, incircuit=circuit_name,
turns=im.BeariW_turns / nwl * dict_dir[up_or_down])
# Boundary Conditions
# femm.mi_makeABC() # open boundary
if fraction == 1:
femm.mi_addboundprop('BC:A=0', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
femm.mi_selectarcsegment(0, -im.Radius_OuterStatorYoke)
femm.mi_setarcsegmentprop(20, "BC:A=0", False, 10) # maxseg = 20 deg (only this is found effective)
femm.mi_clearselected()
femm.mi_selectarcsegment(0, im.Radius_OuterStatorYoke)
femm.mi_setarcsegmentprop(20, "BC:A=0", False, 10)
femm.mi_clearselected()
femm.mi_selectarcsegment(0, -im.Radius_Shaft)
femm.mi_setarcsegmentprop(20, "BC:A=0", False, 100)
femm.mi_clearselected()
femm.mi_selectarcsegment(0, im.Radius_Shaft)
femm.mi_setarcsegmentprop(20, "BC:A=0", False, 100)
femm.mi_clearselected()
elif fraction == 4:
femm.mi_addboundprop('BC:A=0', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
X = Y = -(im.Radius_OuterStatorYoke) / 1.4142135623730951
femm.mi_selectarcsegment(X, Y)
femm.mi_setarcsegmentprop(10, "BC:A=0", False, 10) # maxseg = 20 deg (only this is found effective)
femm.mi_clearselected()
X = Y = -(im.Radius_Shaft) / 1.4142135623730951
femm.mi_selectarcsegment(0, -im.Radius_Shaft)
femm.mi_setarcsegmentprop(10, "BC:A=0", False, 100)
femm.mi_clearselected()
femm.mi_addboundprop('apbc1', 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0)
femm.mi_addboundprop('apbc2', 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0)
femm.mi_addboundprop('apbc3', 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0)
femm.mi_addboundprop('apbc5', 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0)
femm.mi_addboundprop('apbc6', 0, 0, 0, 0, 0, 0, 0, 0, 5, 0,
0) # http://www.femm.info/wiki/periodicboundaries
R = im.Radius_Shaft + EPS
femm.mi_selectsegment(0, -R)
femm.mi_selectsegment(-R, 0)
femm.mi_setsegmentprop("apbc1", 4, False, False, 100)
femm.mi_clearselected()
R = im.Location_RotorBarCenter
femm.mi_selectsegment(0, -R)
femm.mi_selectsegment(-R, 0)
femm.mi_setsegmentprop("apbc2", 3, False, False, 100)
femm.mi_clearselected()
R = im.Radius_OuterRotor + 0.25 * im.Length_AirGap
femm.mi_selectsegment(0, -R)
femm.mi_selectsegment(-R, 0)
femm.mi_setsegmentprop("apbc3", 0.5, False, False, 9)
femm.mi_clearselected()
R = im.Radius_OuterRotor + 0.75 * im.Length_AirGap
femm.mi_selectsegment(0, -R)
femm.mi_selectsegment(-R, 0)
femm.mi_setsegmentprop("apbc5", 0.5, False, False, 9)
femm.mi_clearselected()
R = im.Radius_OuterStatorYoke - EPS
femm.mi_selectsegment(0, -R)
femm.mi_selectsegment(-R, 0)
femm.mi_setsegmentprop("apbc6", 4, False, False, 10)
femm.mi_clearselected()
elif fraction == 2:
femm.mi_addboundprop('BC:A=0', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
X = Y = -(im.Radius_OuterStatorYoke) / 1.4142135623730951
femm.mi_selectarcsegment(X, Y)
femm.mi_setarcsegmentprop(10, "BC:A=0", False, 10) # maxseg = 20 deg (only this is found effective)
femm.mi_clearselected()
X = Y = -(im.Radius_Shaft) / 1.4142135623730951
femm.mi_selectarcsegment(0, -im.Radius_Shaft)
femm.mi_setarcsegmentprop(10, "BC:A=0", False, 100)
femm.mi_clearselected()
femm.mi_addboundprop('pbc1', 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0)
femm.mi_addboundprop('pbc2', 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0)
femm.mi_addboundprop('pbc3', 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0)
femm.mi_addboundprop('pbc5', 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0)
femm.mi_addboundprop('pbc6', 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0)
R = im.Radius_Shaft + EPS
femm.mi_selectsegment(-R, 0)
femm.mi_selectsegment(+R, 0)
femm.mi_setsegmentprop("pbc1", 4, False, False, 100)
femm.mi_clearselected()
R = im.Location_RotorBarCenter
femm.mi_selectsegment(+R, 0)
femm.mi_selectsegment(-R, 0)
femm.mi_setsegmentprop("pbc2", 3, False, False, 100)
femm.mi_clearselected()
R = im.Radius_OuterRotor + 0.25 * im.Length_AirGap
femm.mi_selectsegment(+R, 0)
femm.mi_selectsegment(-R, 0)
femm.mi_setsegmentprop("pbc3", 0.5, False, False, 9)
femm.mi_clearselected()
R = im.Radius_OuterRotor + 0.75 * im.Length_AirGap
femm.mi_selectsegment(+R, 0)
femm.mi_selectsegment(-R, 0)
femm.mi_setsegmentprop("pbc5", 0.5, False, False, 9)
femm.mi_clearselected()
R = im.Radius_OuterStatorYoke - EPS
femm.mi_selectsegment(+R, 0)
femm.mi_selectsegment(-R, 0)
femm.mi_setsegmentprop("pbc6", 4, False, False, 10)
femm.mi_clearselected()
# Air Gap Boundary for Rotor Motion #3
# inner_angle = 0; outer_angle = 0
# femm.mi_addboundprop('AGB4RM', 0,0,0, 0,0,0,0,0, 6, inner_angle, outer_angle)
# R = im.Radius_OuterRotor+0.6*im.Length_AirGap
# femm.mi_selectarcsegment(0,-R)
# femm.mi_setarcsegmentprop(5, "AGB4RM", False, 9)
# femm.mi_clearselected()
# femm.mi_selectarcsegment(0,R)
# femm.mi_setarcsegmentprop(5, "AGB4RM", False, 9)
# femm.mi_clearselected()
# R = im.Radius_OuterRotor+0.4*im.Length_AirGap
# femm.mi_selectarcsegment(0,-R)
# femm.mi_setarcsegmentprop(5, "AGB4RM", False, 9)
# femm.mi_clearselected()
# femm.mi_selectarcsegment(0,R)
# femm.mi_setarcsegmentprop(5, "AGB4RM", False, 9)
# femm.mi_clearselected()
# Other arc-segment-specific mesh constraints are already done in draw_model()
def add_block_labels_static_solver(self, fraction=1):
# add_block_labels_static_solver is implemented with the new general DPNV winding implementation
im = self.im
SERIES_CONNECTED = 1
PARALLEL_CONNECTED = 0
if self.im.fea_config_dict['femm.Coarse_Mesh'] == True: # Coarse mesh
if self.im.fea_config_dict['femm.Coarse_Mesh_Level'] == 2:
MESH_SIZE_ALUMINUM = 2 * 6 # 3
MESH_SIZE_STEEL = 2 * 6 # 4
MESH_SIZE_AIR = 2 * 0.75 # 0.5
MESH_SIZE_COPPER = 2 * 10 # 8
elif self.im.fea_config_dict['femm.Coarse_Mesh_Level'] == 3:
MESH_SIZE_ALUMINUM = 1 * 6 # 3
MESH_SIZE_STEEL = 1 * 6 # 4
MESH_SIZE_AIR = 1 * 0.75 # 0.5
MESH_SIZE_COPPER = 1 * 10 # 8
else:
raise Exception('Invalid femm.Coarse_Mesh_Level.')
else:
MESH_SIZE_ALUMINUM = 3
MESH_SIZE_STEEL = 4
MESH_SIZE_AIR = 0.5
MESH_SIZE_COPPER = 8
def block_label(group_no, material_name, p, meshsize_if_no_automesh, incircuit='<None>', turns=0, automesh=True,
magdir=0):
femm.mi_addblocklabel(p[0], p[1])
femm.mi_selectlabel(p[0], p[1])
femm.mi_setblockprop(material_name, automesh, meshsize_if_no_automesh, incircuit, magdir, group_no, turns)
femm.mi_clearselected()
# air region @225deg
X = Y = -(im.Radius_OuterRotor + 0.5 * im.Length_AirGap) / 1.4142135623730951
block_label(9, 'Air', (X, Y), MESH_SIZE_AIR, automesh=self.bool_automesh)
# # Air Gap Boundary for Rotor Motion #2
# block_label(9, '<No Mesh>', (0, im.Radius_OuterRotor+0.5*im.Length_AirGap), 5, automesh=self.bool_automesh)
# block_label(9, 'Air', (0, im.Radius_OuterRotor+0.7*im.Length_AirGap), 0.5, automesh=self.bool_automesh)
# block_label(9, 'Air', (0, im.Radius_OuterRotor+0.3*im.Length_AirGap), 0.5, automesh=self.bool_automesh)
# shaft
if fraction == 1:
block_label(102, '<No Mesh>', (0, 0), 20)
# block_label(101, 'Air', (0, 0), 10, automesh=True) # for deeply-saturated rotor yoke
# Iron Core @225deg
if 'M19' in self.im.spec_input_dict['Steel']:
steel_name = 'M19Gauge29'
elif self.im.spec_input_dict['Steel'] == 'M15':
steel_name = 'My M-15 Steel'
elif self.im.spec_input_dict['Steel'] == 'Arnon5':
steel_name = 'Arnon5-final'
X = Y = -(im.Radius_Shaft + EPS * 10) / 1.4142135623730951
block_label(100, steel_name, (X, Y), MESH_SIZE_STEEL, automesh=self.bool_automesh)
X = Y = -(0.5 * (im.Radius_InnerStatorYoke + im.Radius_OuterStatorYoke)) / 1.4142135623730951
block_label(10, steel_name, (X, Y), MESH_SIZE_STEEL, automesh=self.bool_automesh)
# Circuit Configuration
# Rotor Winding
if fraction == 1:
# 4 pole Pole-Specific Rotor Winding
# R = 0.5*(im.Location_RotorBarCenter + im.Location_RotorBarCenter2)
R = im.Location_RotorBarCenter # Since 5/23/2019
angle_per_slot = 2 * pi / im.Qr
THETA_BAR = pi - angle_per_slot
for i in range(self.rotor_slot_per_pole):
circuit_name = 'r%s' % (self.rotor_phase_name_list[i])
if self.flag_static_solver == True: # self.freq == 0: # Static FEA
femm.mi_addcircprop(circuit_name, self.dict_rotor_current_function[i](0.0), SERIES_CONNECTED)
# print self.dict_rotor_current_function[i](0.0)
else: # Eddy Current FEA (with multi-phase 4-bar cage... haha this is practically nothing)
femm.mi_addcircprop(circuit_name, 0, PARALLEL_CONNECTED) # PARALLEL for PS circuit
THETA_BAR += angle_per_slot
if True:
# The general implmentation of any pole PS rotor winding
for j in range(im.DriveW_poles):
THETA = THETA_BAR + angle_per_slot * j * self.rotor_slot_per_pole
X = R * cos(THETA);
Y = R * sin(THETA)
if j % 2 == 0:
block_label(101, 'Aluminum', (X, Y), MESH_SIZE_ALUMINUM, automesh=self.bool_automesh,
incircuit=circuit_name, turns=1)
else:
block_label(101, 'Aluminum', (X, Y), MESH_SIZE_ALUMINUM, automesh=self.bool_automesh,
incircuit=circuit_name, turns=-1)
elif im.DriveW_poles == 4:
# The explict implementation only working for 4 pole PS rotor winding
THETA = THETA_BAR
X = R * cos(THETA);
Y = R * sin(THETA)
block_label(101, 'Aluminum', (X, Y), MESH_SIZE_ALUMINUM, automesh=self.bool_automesh,
incircuit=circuit_name, turns=1)
THETA = THETA_BAR + angle_per_slot * self.rotor_slot_per_pole
X = R * cos(THETA);
Y = R * sin(THETA)
block_label(101, 'Aluminum', (X, Y), MESH_SIZE_ALUMINUM, automesh=self.bool_automesh,
incircuit=circuit_name, turns=-1)
THETA = THETA_BAR + angle_per_slot * 2 * self.rotor_slot_per_pole
X = R * cos(THETA);
Y = R * sin(THETA)
block_label(101, 'Aluminum', (X, Y), MESH_SIZE_ALUMINUM, automesh=self.bool_automesh,
incircuit=circuit_name, turns=1)
THETA = THETA_BAR + angle_per_slot * 3 * self.rotor_slot_per_pole
X = R * cos(THETA);
Y = R * sin(THETA)
block_label(101, 'Aluminum', (X, Y), MESH_SIZE_ALUMINUM, automesh=self.bool_automesh,
incircuit=circuit_name, turns=-1)
elif fraction == 4 or fraction == 2:
# 2 pole Pole-Specific Rotor Winding with fraction
# poly-four-bar-Cage + no bearing current excitated <=> pole specific winding
# R = 0.5*(im.Location_RotorBarCenter + im.Location_RotorBarCenter2)
R = im.Location_RotorBarCenter # Since 5/23/2019
angle_per_slot = 2 * pi / im.Qr
THETA_BAR = pi - angle_per_slot + EPS # add EPS for the half bar
for i in range(self.rotor_slot_per_pole):
circuit_name = 'r%s' % (self.rotor_phase_name_list[i])
# Eddy Current FEA (with multi-phase 4-bar cage behave the same with PS rotor winding when no bearing current is excited!
femm.mi_addcircprop(circuit_name, 0,
PARALLEL_CONNECTED) # PARALLEL for PS circuit (valid only if there is no 2-pole field)
THETA_BAR += angle_per_slot
THETA = THETA_BAR
X = R * cos(THETA);
Y = R * sin(THETA)
block_label(101, 'Aluminum', (X, Y), MESH_SIZE_ALUMINUM, automesh=self.bool_automesh,
incircuit=circuit_name, turns=1) # rA+ ~ rH+
if fraction == 2:
THETA = THETA_BAR + angle_per_slot * self.rotor_slot_per_pole
X = R * cos(THETA);
Y = R * sin(THETA)
block_label(101, 'Aluminum', (X, Y), MESH_SIZE_ALUMINUM, automesh=self.bool_automesh,
incircuit=circuit_name,
turns=-1) # rA- However, this turns=-1 is not effective for PARALLEL_CONNECTED circuit
# the other half bar
# THETA_BAR += angle_per_slot
THETA = THETA + angle_per_slot - 2 * EPS
X = R * cos(THETA);
Y = R * sin(THETA)
block_label(101, 'Aluminum', (X, Y), MESH_SIZE_ALUMINUM, automesh=self.bool_automesh,
incircuit='r%s' % (self.rotor_phase_name_list[0]),
turns=-1) # However, this turns=-1 is not effective for PARALLEL_CONNECTED circuit
# Stator Winding
npb = im.wily.number_parallel_branch
nwl = im.wily.number_winding_layer # number of windign layers
if self.flag_static_solver == True: # self.freq == 0:
# static solver
femm.mi_addcircprop('U-GrpAC', self.dict_stator_current_function[0](0.0), SERIES_CONNECTED)
femm.mi_addcircprop('V-GrpAC', self.dict_stator_current_function[1](0.0), SERIES_CONNECTED)
femm.mi_addcircprop('W-GrpAC', self.dict_stator_current_function[2](0.0), SERIES_CONNECTED)
femm.mi_addcircprop('U-GrpBD', self.dict_stator_current_function[3](0.0), SERIES_CONNECTED)
femm.mi_addcircprop('V-GrpBD', self.dict_stator_current_function[4](0.0), SERIES_CONNECTED)
femm.mi_addcircprop('W-GrpBD', self.dict_stator_current_function[5](0.0), SERIES_CONNECTED)
else: # eddy current solver
# if im.fea_config_dict['DPNV_separate_winding_implementation'] == True or im.fea_config_dict['DPNV'] == False:
if im.spec_input_dict['DPNV_or_SEPA'] == False:
# either a separate winding or a DPNV winding implemented as a separate winding
ampD = im.DriveW_CurrentAmp / npb
ampB = im.BeariW_CurrentAmp
else:
# case: DPNV as an actual two layer winding
ampD = im.DriveW_CurrentAmp / npb
ampB = ampD
if im.wily.CommutatingSequenceD == 1:
MyCommutatingSequence = ['-', '+'] # 2 pole
else:
# raise
MyCommutatingSequence = ['+', '-'] # 4 pole legacy
femm.mi_addcircprop('dU', '%g' % (ampD), SERIES_CONNECTED)
femm.mi_addcircprop('dV', '%g*(-0.5%sI*0.8660254037844386)' % (ampD, MyCommutatingSequence[0]),
SERIES_CONNECTED)
femm.mi_addcircprop('dW', '%g*(-0.5%sI*0.8660254037844386)' % (ampD, MyCommutatingSequence[1]),
SERIES_CONNECTED)
femm.mi_addcircprop('bU', '%g' % (ampB), SERIES_CONNECTED)
femm.mi_addcircprop('bV', '%g*(-0.5%sI*0.8660254037844386)' % (ampB, MyCommutatingSequence[0]),
SERIES_CONNECTED)
femm.mi_addcircprop('bW', '%g*(-0.5%sI*0.8660254037844386)' % (ampB, MyCommutatingSequence[1]),
SERIES_CONNECTED)
# if fraction == 1: # I thought PS can be realized in FEMM but I was wrong, this fraction==1 case should be deleted!
# # femm.mi_addcircprop('bA', '%g' %(im.BeariW_CurrentAmp), SERIES_CONNECTED)
# # femm.mi_addcircprop('bB', '%g*(-0.5+I*0.8660254037844386)'%(im.BeariW_CurrentAmp), SERIES_CONNECTED)
# # femm.mi_addcircprop('bC', '%g*(-0.5-I*0.8660254037844386)'%(im.BeariW_CurrentAmp), SERIES_CONNECTED)
# femm.mi_addcircprop('bU', '%g' %(ampB), SERIES_CONNECTED)
# femm.mi_addcircprop('bV', '%g*(-0.5%sI*0.8660254037844386)'%(ampB, MyCommutatingSequence[0]), SERIES_CONNECTED)
# femm.mi_addcircprop('bW', '%g*(-0.5%sI*0.8660254037844386)'%(ampB, MyCommutatingSequence[1]), SERIES_CONNECTED)
# elif fraction == 4 or fraction == 2: # no bearing current
# femm.mi_addcircprop('bU', 0, SERIES_CONNECTED)
# femm.mi_addcircprop('bV', 0, SERIES_CONNECTED)
# femm.mi_addcircprop('bW', 0, SERIES_CONNECTED)
# dict_dir = {'+':1, '-':-1} # wrong (not consistent with JMAG)
dict_dir = {'+': -1, '-': 1, 'o': 0}
R = 0.5 * (im.Radius_OuterRotor + im.Radius_InnerStatorYoke)
angle_per_slot = 2 * pi / im.Qs
# X layer winding's blocks
THETA = - angle_per_slot + 0.5 * angle_per_slot - 3.0 / 360 # This 3 deg must be less than 360/Qs/2,取这么大是为了在GUI上看得清楚点。
count = 0
# for phase, up_or_down in zip(im.l_rightlayer1,im.l_rightlayer2):
for phase, up_or_down, AC_or_BD in zip(im.wily.layer_X_phases, im.wily.layer_X_signs, im.wily.grouping_AC):
# circuit_name = 'd' + phase
circuit_name = phase + '-Grp' + ('AC' if AC_or_BD else 'BD')
THETA += angle_per_slot
X = R * cos(THETA);
Y = R * sin(THETA)
count += 1
if fraction == 4:
if not (count > im.Qs * 0.5 + EPS and count <= im.Qs * 0.75 + EPS):
continue
if fraction == 2:
if not (count > im.Qs * 0.5 + EPS):
continue
# if phase == 'U':
block_label(11, 'Copper', (X, Y), MESH_SIZE_COPPER, automesh=self.bool_automesh, incircuit=circuit_name,
turns=im.DriveW_zQ / nwl * dict_dir[up_or_down])
# print('|||', circuit_name, phase, up_or_down, AC_or_BD, X, Y)
# print(im.wily.grouping_AC)
# print('End of X layer')
# Y layer winding's blocks
if fraction == 1:
THETA = - angle_per_slot + 0.5 * angle_per_slot + 3.0 / 360
grouping_AC_of_Y_layer = winding_layout_v2.infer_Y_layer_grpAC_from_X_layer_and_coil_pitch_y(
im.wily.grouping_AC, im.wily.coil_pitch_y)
# print('-'*100)
# print(im.wily.grouping_AC)
# print(grouping_AC_of_Y_layer)
# quit()
# for phase, up_or_down in zip(im.l_leftlayer1,im.l_leftlayer2):
for phase, up_or_down, AC_or_BD in zip(im.wily.layer_Y_phases, im.wily.layer_Y_signs,
grouping_AC_of_Y_layer):
# circuit_name = 'b' + phase
circuit_name = phase + '-Grp' + ('AC' if AC_or_BD else 'BD')
THETA += angle_per_slot
X = R * cos(THETA);
Y = R * sin(THETA)
# if self.im.fea_config_dict['DPNV'] == True:
# else: # separate winding (e.g., Chiba's)
# if phase == 'U':
block_label(11, 'Copper', (X, Y), MESH_SIZE_COPPER, automesh=self.bool_automesh, incircuit=circuit_name,
turns=im.BeariW_turns / nwl * dict_dir[up_or_down])
# print('|||', circuit_name, phase, up_or_down, AC_or_BD, X, Y)
# print(grouping_AC_of_Y_layer)
# print('End of Y layer')
elif fraction == 4 or fraction == 2:
# 危险!FEMM默认把没有设置incircuit的导体都在无限远短接在一起——也就是说,你可能把定子悬浮绕组也短接到鼠笼上去了!
# 所以,一定要设置好悬浮绕组,而且要用serial-connected,电流给定为 0 A。
THETA = - angle_per_slot + 0.5 * angle_per_slot + 3.0 / 360
count = 0
# for phase, up_or_down in zip(im.l_leftlayer1,im.l_leftlayer2):
for phase, up_or_down in zip(im.wily.layer_Y_phases, im.wily.layer_Y_signs):
circuit_name = 'b' + phase
THETA += angle_per_slot
X = R * cos(THETA);
Y = R * sin(THETA)
count += 1
if fraction == 4:
if not (count > im.Qs * 0.5 + EPS and count <= im.Qs * 0.75 + EPS):
continue
elif fraction == 2:
if not (count > im.Qs * 0.5 + EPS):
continue
block_label(11, 'Copper', (X, Y), MESH_SIZE_COPPER, automesh=self.bool_automesh, incircuit=circuit_name,
turns=im.BeariW_turns / nwl * dict_dir[up_or_down])
# Boundary Conditions
# femm.mi_makeABC() # open boundary
if fraction == 1:
femm.mi_addboundprop('BC:A=0', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
femm.mi_selectarcsegment(0, -im.Radius_OuterStatorYoke)
femm.mi_setarcsegmentprop(20, "BC:A=0", False, 10) # maxseg = 20 deg (only this is found effective)
femm.mi_clearselected()
femm.mi_selectarcsegment(0, im.Radius_OuterStatorYoke)
femm.mi_setarcsegmentprop(20, "BC:A=0", False, 10)
femm.mi_clearselected()
femm.mi_selectarcsegment(0, -im.Radius_Shaft)
femm.mi_setarcsegmentprop(20, "BC:A=0", False, 100)
femm.mi_clearselected()
femm.mi_selectarcsegment(0, im.Radius_Shaft)
femm.mi_setarcsegmentprop(20, "BC:A=0", False, 100)
femm.mi_clearselected()
elif fraction == 4:
femm.mi_addboundprop('BC:A=0', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
X = Y = -(im.Radius_OuterStatorYoke) / 1.4142135623730951
femm.mi_selectarcsegment(X, Y)
femm.mi_setarcsegmentprop(10, "BC:A=0", False, 10) # maxseg = 20 deg (only this is found effective)
femm.mi_clearselected()
X = Y = -(im.Radius_Shaft) / 1.4142135623730951
femm.mi_selectarcsegment(0, -im.Radius_Shaft)
femm.mi_setarcsegmentprop(10, "BC:A=0", False, 100)
femm.mi_clearselected()
femm.mi_addboundprop('apbc1', 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0)
femm.mi_addboundprop('apbc2', 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0)
femm.mi_addboundprop('apbc3', 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0)
femm.mi_addboundprop('apbc5', 0, 0, 0, 0, 0, 0, 0, 0, 5, 0, 0)
femm.mi_addboundprop('apbc6', 0, 0, 0, 0, 0, 0, 0, 0, 5, 0,
0) # http://www.femm.info/wiki/periodicboundaries
R = im.Radius_Shaft + EPS
femm.mi_selectsegment(0, -R)
femm.mi_selectsegment(-R, 0)
femm.mi_setsegmentprop("apbc1", 4, False, False, 100)
femm.mi_clearselected()
R = im.Location_RotorBarCenter
femm.mi_selectsegment(0, -R)
femm.mi_selectsegment(-R, 0)
femm.mi_setsegmentprop("apbc2", 3, False, False, 100)
femm.mi_clearselected()
R = im.Radius_OuterRotor + 0.25 * im.Length_AirGap
femm.mi_selectsegment(0, -R)
femm.mi_selectsegment(-R, 0)
femm.mi_setsegmentprop("apbc3", 0.5, False, False, 9)
femm.mi_clearselected()
R = im.Radius_OuterRotor + 0.75 * im.Length_AirGap
femm.mi_selectsegment(0, -R)
femm.mi_selectsegment(-R, 0)
femm.mi_setsegmentprop("apbc5", 0.5, False, False, 9)
femm.mi_clearselected()
R = im.Radius_OuterStatorYoke - EPS
femm.mi_selectsegment(0, -R)
femm.mi_selectsegment(-R, 0)
femm.mi_setsegmentprop("apbc6", 4, False, False, 10)
femm.mi_clearselected()
elif fraction == 2:
femm.mi_addboundprop('BC:A=0', 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0)
X = Y = -(im.Radius_OuterStatorYoke) / 1.4142135623730951
femm.mi_selectarcsegment(X, Y)
femm.mi_setarcsegmentprop(10, "BC:A=0", False, 10) # maxseg = 20 deg (only this is found effective)
femm.mi_clearselected()
X = Y = -(im.Radius_Shaft) / 1.4142135623730951
femm.mi_selectarcsegment(0, -im.Radius_Shaft)
femm.mi_setarcsegmentprop(10, "BC:A=0", False, 100)
femm.mi_clearselected()
femm.mi_addboundprop('pbc1', 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0)
femm.mi_addboundprop('pbc2', 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0)
femm.mi_addboundprop('pbc3', 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0)
femm.mi_addboundprop('pbc5', 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0)
femm.mi_addboundprop('pbc6', 0, 0, 0, 0, 0, 0, 0, 0, 4, 0, 0)
R = im.Radius_Shaft + EPS
femm.mi_selectsegment(-R, 0)
femm.mi_selectsegment(+R, 0)
femm.mi_setsegmentprop("pbc1", 4, False, False, 100)
femm.mi_clearselected()
R = im.Location_RotorBarCenter
femm.mi_selectsegment(+R, 0)
femm.mi_selectsegment(-R, 0)
femm.mi_setsegmentprop("pbc2", 3, False, False, 100)
femm.mi_clearselected()
R = im.Radius_OuterRotor + 0.25 * im.Length_AirGap
femm.mi_selectsegment(+R, 0)
femm.mi_selectsegment(-R, 0)
femm.mi_setsegmentprop("pbc3", 0.5, False, False, 9)
femm.mi_clearselected()
R = im.Radius_OuterRotor + 0.75 * im.Length_AirGap
femm.mi_selectsegment(+R, 0)
femm.mi_selectsegment(-R, 0)
femm.mi_setsegmentprop("pbc5", 0.5, False, False, 9)
femm.mi_clearselected()
R = im.Radius_OuterStatorYoke - EPS
femm.mi_selectsegment(+R, 0)
femm.mi_selectsegment(-R, 0)
femm.mi_setsegmentprop("pbc6", 4, False, False, 10)
femm.mi_clearselected()
# Air Gap Boundary for Rotor Motion #3
# inner_angle = 0; outer_angle = 0
# femm.mi_addboundprop('AGB4RM', 0,0,0, 0,0,0,0,0, 6, inner_angle, outer_angle)
# R = im.Radius_OuterRotor+0.6*im.Length_AirGap
# femm.mi_selectarcsegment(0,-R)
# femm.mi_setarcsegmentprop(5, "AGB4RM", False, 9)
# femm.mi_clearselected()
# femm.mi_selectarcsegment(0,R)
# femm.mi_setarcsegmentprop(5, "AGB4RM", False, 9)
# femm.mi_clearselected()
# R = im.Radius_OuterRotor+0.4*im.Length_AirGap
# femm.mi_selectarcsegment(0,-R)
# femm.mi_setarcsegmentprop(5, "AGB4RM", False, 9)
# femm.mi_clearselected()
# femm.mi_selectarcsegment(0,R)
# femm.mi_setarcsegmentprop(5, "AGB4RM", False, 9)
# femm.mi_clearselected()
# Other arc-segment-specific mesh constraints are already done in draw_model()
def draw_model(self, fraction=1):
from shapely.geometry import LineString
from shapely.geometry import Point
im = self.im
origin = Point(0, 0)
Stator_Sector_Angle = 2 * pi / im.Qs * 0.5
Rotor_Sector_Angle = 2 * pi / im.Qr * 0.5
def mirror_and_copyrotate(Q, Radius, fraction):
# Mirror
femm.mi_selectcircle(0, 0, Radius + EPS,
SELECT_ALL) # this EPS is sometime necessary to selece the arc at Radius.
femm.mi_mirror2(0, 0, -Radius, 0, SELECT_ALL)
# Rotate
femm.mi_selectcircle(0, 0, Radius + EPS, SELECT_ALL)
femm.mi_copyrotate2(0, 0, 360. / Q, int(Q) / fraction, SELECT_ALL)
def create_circle(p, radius):
return p.buffer(radius).boundary
def get_node_at_intersection(c, l): # this works for c and l having one intersection only
i = c.intersection(l)
# femm.mi_addnode(i.coords[0][0], i.coords[0][1])
return i.coords[0][0], i.coords[0][1]
def draw_arc(p1, p2, angle, maxseg=1):
femm.mi_drawarc(p1[0], p1[1], p2[0], p2[1], angle / pi * 180, maxseg) # [deg]
def add_arc(p1, p2, angle, maxseg=1):
femm.mi_addarc(p1[0], p1[1], p2[0], p2[1], angle / pi * 180, maxseg) # [deg]
def draw_line(p1, p2):
femm.mi_drawline(p1[0], p1[1], p2[0], p2[1])
def add_line(p1, p2):
femm.mi_addsegment(p1[0], p1[1], p2[0], p2[1])
def get_postive_angle(p, origin=(0, 0)):
# using atan loses info about the quadrant
return atan(abs((p[1] - origin[1]) / (p[0] - origin[0])))
''' Part: Stator '''
# Draw Points as direction of CCW
# P1
P1 = (-im.Radius_OuterRotor - im.Length_AirGap, 0)
# P2
# Parallel to Line? No they are actually not parallel
P2_angle = Stator_Sector_Angle - im.Angle_StatorSlotOpen * 0.5 / 180 * pi
k = -tan(P2_angle) # slope
l_sector_parallel = LineString([(0, 0), (-im.Radius_OuterStatorYoke, -im.Radius_OuterStatorYoke * k)])
c = create_circle(origin, im.Radius_OuterRotor + im.Length_AirGap)
P2 = get_node_at_intersection(c, l_sector_parallel)
draw_arc(P2, P1, get_postive_angle(P2))
# P3
c = create_circle(origin, im.Radius_OuterRotor + im.Length_AirGap + im.Width_StatorTeethHeadThickness)
P3 = get_node_at_intersection(c, l_sector_parallel)
draw_line(P2, P3)
# P4
c = create_circle(origin,
im.Radius_OuterRotor + im.Length_AirGap + im.Width_StatorTeethHeadThickness + im.Width_StatorTeethNeck)
l = LineString(
[(0, 0.5 * im.Width_StatorTeethBody), (-im.Radius_OuterStatorYoke, 0.5 * im.Width_StatorTeethBody)])
P4 = get_node_at_intersection(c, l)
draw_line(P3, P4)
# P5
c = create_circle(origin, im.Radius_InnerStatorYoke)
P5 = get_node_at_intersection(c, l)
draw_line(P4, P5)
# P6
k = -tan(Stator_Sector_Angle)
l_sector = LineString([(0, 0), (-im.Radius_OuterStatorYoke, -im.Radius_OuterStatorYoke * k)])
P6 = get_node_at_intersection(c, l_sector)
draw_arc(P6, P5, Stator_Sector_Angle - get_postive_angle(P5))
# P7
c = create_circle(origin, im.Radius_OuterStatorYoke)
P7 = get_node_at_intersection(c, l_sector)
# draw_line(P6, P7)
# P8
P8 = (-im.Radius_OuterStatorYoke, 0)
# draw_arc(P7, P8, Stator_Sector_Angle)
# draw_line(P8, P1)
# P_Coil
l = LineString([(P3[0], P3[1]), (P3[0], im.Radius_OuterStatorYoke)])
P_Coil = get_node_at_intersection(l_sector, l)
draw_line(P4, P_Coil)
draw_line(P6, P_Coil)
mirror_and_copyrotate(im.Qs, im.Radius_OuterStatorYoke, fraction)
''' Part: Rotor '''
# Draw Points as direction of CCW
# P1
# femm.mi_addnode(-im.Radius_Shaft, 0)
P1 = (-im.Radius_Shaft, 0)
# P2
c = create_circle(origin, im.Radius_Shaft)
# Line: y = k*x, with k = -tan(2*pi/im.Qr*0.5)
P2_angle = P3_anlge = Rotor_Sector_Angle
k = -tan(P2_angle)
l_sector = LineString([(0, 0), (-im.Radius_OuterStatorYoke, -im.Radius_OuterStatorYoke * k)])
P2 = get_node_at_intersection(c, l_sector)
# draw_arc(P2, P1, P2_angle)
# P3
c = create_circle(origin, im.Radius_OuterRotor)
P3 = get_node_at_intersection(c, l_sector)
# draw_line(P2, P3)
# P4
l = LineString([(-im.Location_RotorBarCenter, 0.5 * im.Width_RotorSlotOpen),
(-im.Radius_OuterRotor, 0.5 * im.Width_RotorSlotOpen)])
P4 = get_node_at_intersection(c, l)
draw_arc(P3, P4, P3_anlge - get_postive_angle(P4))
# P5
p = Point(-im.Location_RotorBarCenter, 0)
c = create_circle(p, im.Radius_of_RotorSlot)
P5 = get_node_at_intersection(c, l)
draw_line(P4, P5)
# P6
# femm.mi_addnode(-im.Location_RotorBarCenter, im.Radius_of_RotorSlot)
P6 = (-im.Location_RotorBarCenter, im.Radius_of_RotorSlot)
draw_arc(P6, P5, 0.5 * pi - get_postive_angle(P5, c.centroid.coords[0]))
# constraint to reduce element number
femm.mi_selectarcsegment(P6[0], P6[1])
femm.mi_setarcsegmentprop(8, "<None>", False, 100)
femm.mi_clearselected()
# P7
# femm.mi_addnode(-im.Location_RotorBarCenter2, im.Radius_of_RotorSlot2)
P7 = (-im.Location_RotorBarCenter2, im.Radius_of_RotorSlot2)
draw_line(P6, P7)
# P8
# femm.mi_addnode(-im.Location_RotorBarCenter2+im.Radius_of_RotorSlot2, 0)
P8 = (-im.Location_RotorBarCenter2 + im.Radius_of_RotorSlot2, 0)
draw_arc(P8, P7, 0.5 * pi)
# draw_line(P8, P1)
# constraint to reduce element number
femm.mi_selectarcsegment(P8[0], P8[1])
femm.mi_setarcsegmentprop(8, "<None>", False, 100)
femm.mi_clearselected()
# P_Bar
P_Bar = (-im.Location_RotorBarCenter - im.Radius_of_RotorSlot, 0)
draw_arc(P5, P_Bar, get_postive_angle(P5))
# add_line(P_Bar, P8)
mirror_and_copyrotate(im.Qr, im.Radius_OuterRotor, fraction)
# Boundary
if fraction == 1:
femm.mi_drawarc(im.Radius_Shaft, 0, -im.Radius_Shaft, 0, 180, 20) # 边界不要用太小的segment咯!避免剖分过细(这里设置无效)
femm.mi_drawarc(-im.Radius_Shaft, 0, im.Radius_Shaft, 0, 180, 20)
femm.mi_drawarc(im.Radius_OuterStatorYoke, 0, -im.Radius_OuterStatorYoke, 0, 180, 20)
femm.mi_drawarc(-im.Radius_OuterStatorYoke, 0, im.Radius_OuterStatorYoke, 0, 180, 20)
elif fraction == 4:
femm.mi_drawarc(-im.Radius_Shaft, 0, 0, -im.Radius_Shaft, 90, 10)
femm.mi_drawarc(-im.Radius_OuterStatorYoke, 0, 0, -im.Radius_OuterStatorYoke, 90, 10)
femm.mi_selectrectangle(-EPS - im.Radius_Shaft, EPS, EPS - im.Radius_OuterStatorYoke,
im.Radius_OuterStatorYoke, SELECT_ALL)
femm.mi_selectrectangle(EPS, -EPS - im.Radius_Shaft, im.Radius_OuterStatorYoke,
EPS - im.Radius_OuterStatorYoke, SELECT_ALL)
femm.mi_deleteselected()
# between 2rd and 3th quarters
p1 = (-im.Location_RotorBarCenter2 + im.Radius_of_RotorSlot2, 0)
p2 = (-im.Radius_Shaft, 0)
add_line(p1, p2)
p2 = (-im.Location_RotorBarCenter - im.Radius_of_RotorSlot, 0)
add_line(p1, p2)
p1 = (-im.Radius_OuterRotor - 0.5 * im.Length_AirGap,
0) # for later extending for moverotate with anti-periodic boundary condition
draw_line(p1, p2)
p2 = (-im.Radius_OuterRotor - im.Length_AirGap, 0)
draw_line(p1, p2)
p1 = (-im.Radius_OuterStatorYoke, 0)
add_line(p1, p2)
# between 3rd and 4th quarters
p1 = (0, -im.Location_RotorBarCenter2 + im.Radius_of_RotorSlot2)
p2 = (0, -im.Radius_Shaft)
add_line(p1, p2)
p2 = (0, -im.Location_RotorBarCenter - im.Radius_of_RotorSlot)
add_line(p1, p2)
p1 = (0, -im.Radius_OuterRotor - 0.5 * im.Length_AirGap)
draw_line(p1, p2)
p2 = (0, -im.Radius_OuterRotor - im.Length_AirGap)
draw_line(p1, p2)
p1 = (0, -im.Radius_OuterStatorYoke)
add_line(p1, p2)
elif fraction == 2:
femm.mi_drawarc(-im.Radius_Shaft, 0, im.Radius_Shaft, 0, 180, 15)
femm.mi_drawarc(-im.Radius_OuterStatorYoke, 0, im.Radius_OuterStatorYoke, 0, 180, 15)
femm.mi_selectrectangle(EPS - im.Radius_OuterStatorYoke, EPS, -EPS + im.Radius_OuterStatorYoke,
EPS + im.Radius_OuterStatorYoke, SELECT_ALL)
femm.mi_deleteselected()
# between 2rd and 3th quarters
p1 = (-im.Location_RotorBarCenter2 + im.Radius_of_RotorSlot2, 0)
p2 = (-im.Radius_Shaft, 0)
add_line(p1, p2)
p2 = (-im.Location_RotorBarCenter - im.Radius_of_RotorSlot, 0)
add_line(p1, p2)
p1 = (-im.Radius_OuterRotor - 0.5 * im.Length_AirGap,
0) # for later extending for moverotate with anti-periodic boundary condition
draw_line(p1, p2)
p2 = (-im.Radius_OuterRotor - im.Length_AirGap, 0)
draw_line(p1, p2)
p1 = (-im.Radius_OuterStatorYoke, 0)
add_line(p1, p2)
# between 1rd and 4th quarters
p1 = (+im.Location_RotorBarCenter2 - im.Radius_of_RotorSlot2, 0)
p2 = (+im.Radius_Shaft, 0)
add_line(p1, p2)
p2 = (+im.Location_RotorBarCenter + im.Radius_of_RotorSlot, 0)
add_line(p1, p2)
p1 = (+im.Radius_OuterRotor + 0.5 * im.Length_AirGap,
0) # for later extending for moverotate with anti-periodic boundary condition
draw_line(p1, p2)
p2 = (+im.Radius_OuterRotor + im.Length_AirGap, 0)
draw_line(p1, p2)
p1 = (+im.Radius_OuterStatorYoke, 0)
add_line(p1, p2)
else:
raise Exception('not supported fraction = %d' % (fraction))
# Air Gap Boundary for Rotor Motion #1
# R = im.Radius_OuterRotor+0.6*im.Length_AirGap
# femm.mi_drawarc(R,0, -R,0, 180, 5)
# femm.mi_drawarc(-R,0, R,0, 180, 5)
# R = im.Radius_OuterRotor+0.4*im.Length_AirGap
# femm.mi_drawarc(R,0, -R,0, 180, 5)
# femm.mi_drawarc(-R,0, R,0, 180, 5)
def model_rotor_rotate(self, time):
if self.deg_per_step != 0.0:
# 之前用的方法是打开上一个FEM文件,然后将其模型旋转deg_per_step,用不到rotor_position_in_deg的!
# 当然,我们也试过AirGapBoundary(<NAME>推荐的),转动转子不需要重复剖分,但是计算出来的力不准(转矩是准的)
# 现在,我们打开在0位置的fem文件,然后转动,saveas。这样,就不用不断地打开文件了
femm.mi_selectgroup(100) # this only select the block labels
femm.mi_selectgroup(101)
femm.mi_selectcircle(0, 0, self.im.Radius_OuterRotor + EPS,
SELECT_ALL) # this selects the nodes, segments, arcs
femm.mi_moverotate(0, 0, self.deg_per_step)
# femm.mi_zoomnatural()
# rotor current
for i in range(self.rotor_slot_per_pole):
circuit_name = 'r%s' % (self.rotor_phase_name_list[i])
femm.mi_modifycircprop(circuit_name, 1, self.dict_rotor_current_function[i](time))
# stator current
femm.mi_modifycircprop('U-GrpAC', 1, self.dict_stator_current_function[0](time))
femm.mi_modifycircprop('V-GrpAC', 1, self.dict_stator_current_function[1](time))
femm.mi_modifycircprop('W-GrpAC', 1, self.dict_stator_current_function[2](time))
femm.mi_modifycircprop('U-GrpBD', 1, self.dict_stator_current_function[3](time))
femm.mi_modifycircprop('V-GrpBD', 1, self.dict_stator_current_function[4](time))
femm.mi_modifycircprop('W-GrpBD', 1, self.dict_stator_current_function[5](time))
def run_rotating_static_FEA(self): # deg_per_step is key parameter for this function
# STATIC_RUN_PERIOD = 180 # deg
# STATIC_RUN_PERIOD = 4*4*900 # deg
STATIC_RUN_PERIOD = 2 * 90 # deg
self.flag_static_solver = True
self.flag_eddycurrent_solver = False
femm.openfemm(True) # bHide # False for debug
femm.newdocument(0) # magnetic
self.freq = 0 # static
self.stack_length = self.im.l_st * 1
self.probdef()
if self.deg_per_step == 0.0:
print('Locked Rotor! Run 40 stEPS for one slip period.')
self.im.update_mechanical_parameters(syn_freq=0.0)
# read currents from previous ec solve
self.dict_rotor_current_function, self.dict_stator_current_function = self.read_current_from_EC_FEA() # DriveW_Freq and slip_freq_breakdown_torque are used here
# debug current waveform
# from pylab import plt
# t = np.arange(0, 0.0005*STATIC_RUN_PERIOD/90, 0.000005)
# plt.figure()
# for ir in self.dict_rotor_current_function:
# plt.plot(t, [ir(el) for el in t])
# plt.figure()
# plt.plot(t, [self.dict_stator_current_function[0](el) for el in t])
# plt.plot(t, [self.dict_stator_current_function[3](el) for el in t])
# plt.show()
# quit()
# self.time = 0.0
# self.rotor_position_in_deg = 0.0
self.add_material()
# self.draw_model()
self.vangogh.draw_model()
self.add_block_labels_static_solver()
# # debug here
# femm.mi_maximize()
# femm.mi_zoomnatural()
# return
self.output_file_name = self.get_output_file_name()
if self.deg_per_step == 0.0:
for i in range(40): # don't forget there
time += 1.0 / self.im.DriveW_Freq / 40. # don't forget here
# self.rotor_position_in_deg = i # used in file naming
print(i, time, 's')
last_out_file_name = output_file_name
output_file_name = self.output_file_name + '%04d' % (i)
if os.path.exists(output_file_name + '.ans'):
print('.ans file exists. skip this fem file: %s' % (output_file_name))
continue
if last_out_file_name != None:
femm.opendocument(last_out_file_name + '.fem')
self.model_rotor_rotate(0.0)
femm.mi_saveas(output_file_name + '.fem') # locked-rotor test
else: # rotating static FEA
self.list_rotor_position_in_deg = np.arange(0, STATIC_RUN_PERIOD, self.deg_per_step)
self.list_name = ['%04d' % (10 * el) for el in self.list_rotor_position_in_deg] # with no suffix
femm.mi_saveas(self.output_file_name + self.list_name[0] + '.fem')
for rotor_position_in_deg, name in zip(self.list_rotor_position_in_deg[1:], # skip the intial position
self.list_name[1:]):
fem_file = self.output_file_name + name + '.fem'
time = np.abs(rotor_position_in_deg / 180 * pi / self.im.Omega) # DEBUG: 查了这么久的BUG,原来就是转速用错了!应该用机械转速啊!
self.model_rotor_rotate(time)
femm.mi_saveas(fem_file)
print(time * 1e3, 'ms', rotor_position_in_deg, 'deg', self.im.Omega, 'rad/s', self.im.Omega / 2 / np.pi,
's^-1')
femm.closefemm()
def parallel_solve(self, dir_run=None, number_of_instantces=5, bool_watchdog_postproc=True,
bool_run_in_JMAG_Script_Editor=False):
'''[并行求解] 当初没想好,旋转转子竟然不是并行的。。。
Keyword Arguments:
dir_run {[type]} -- [静态场用self.dir_run,涡流场用self.dir_run_sweeping] (default: {None})
Not not use space in dir_run!!!
Not not use space in dir_run!!!
Not not use space in dir_run!!!
number_of_instantces {number} -- [几个?] (default: {5})
bool_watchdog_postproc {bool} -- [有些时候我们不喜欢用看门狗,后面看了就知道] (default: {True})
'''
if dir_run == None:
dir_run = self.dir_run
if bool_run_in_JMAG_Script_Editor: # for running script in JMAG, we need a .bat file wrapper for the subprocess calls.
# os.system('python "%smethod_parallel_solve_4jmag.py" %s' % (self.dir_codes, dir_run))
with open('temp.bat', 'w') as f:
if '01' in self.im.fea_config_dict['pc_name']: # python is not added to path in Severson01
f.write(
'"C:/Program Files/JMAG-Designer17.1/python2.7/python" "%smethod_parallel_solve_4jmag.py" %s %d' % (
self.dir_codes, dir_run, number_of_instantces))
else:
f.write('python "%smethod_parallel_solve_4jmag.py" %s %d' % (
self.dir_codes, dir_run, number_of_instantces))
os.startfile('temp.bat')
# os.remove('temp.bat')
else: # run script in other platforms such as command prompt
# raise Exception('Please explicitly specify bool_run_in_JMAG_Script_Editor.')
procs = []
for i in range(number_of_instantces):
# proc = subprocess.Popen([sys.executable, 'parasolve.py', '{}in.csv'.format(i), '{}out.csv'.format(i)], bufsize=-1)
proc = subprocess.Popen([sys.executable, 'parasolve.py', str(i), str(number_of_instantces), dir_run],
bufsize=-1)
procs.append(proc)
for proc in procs:
proc.wait() # return exit code
# To collct static results, use while instead, it is more straightforward
if self.flag_static_solver == True:
if self.im.fea_config_dict['flag_optimization'] == False: # 优化的话,还是不要用这种看门狗的后处理了,直接求解完就并行后处理。
self.keep_collecting_static_results_for_optimization(self.list_name, self.list_rotor_position_in_deg)
return
# TODO: loop for post_process results
# search for python: constently check for new ans file
if self.im.fea_config_dict['pc_name'] == 'Y730':
print('Initialize watchdog...')
else:
print('watchdog is not installed on servers, quit.')
return
return 'Testing JMAG with no watchdog'
if bool_watchdog_postproc:
import time
from watchdog.observers import Observer
from watchdog.events import FileSystemEventHandler
class MyHandler(FileSystemEventHandler):
def __init__(self, solver):
self.count_ans = 0
self.bool_stop = False
self.solver = solver
super(FileSystemEventHandler, self).__init__()
def on_created(self, event):
if '.ans' in event.src_path:
self.count_ans += 1
if self.solver.flag_eddycurrent_solver == True:
if self.count_ans == len(self.solver.freq_range):
print('\t[Eddy Current Solver] count_ans matches.')
self.solver.show_results(bool_plot=True)
self.bool_stop = True
if self.solver.flag_static_solver == True:
# write data to file per 10 .ans files
if self.count_ans == self.solver.number_ans or self.count_ans == int(
0.5 * self.solver.number_ans):
print('[Static Solver] count_ans matches for %d' % (self.count_ans))
if self.solver.has_results():
self.solver.show_results(bool_plot=True)
self.bool_stop = True
else:
self.solver.show_results(bool_plot=False)
event_handler = MyHandler(self)
observer = Observer()
observer.schedule(event_handler, path=dir_run, recursive=False)
observer.start()
try:
while not event_handler.bool_stop:
time.sleep(1)
except KeyboardInterrupt:
observer.stop()
else:
print('after viewing the plot, watchdog is killed.')
observer.stop()
observer.join()
def read_current_from_EC_FEA(self):
print('Read rotor current conditions from %s...' % ('JMAG' if self.flag_read_from_jmag else 'FEMM'))
if self.flag_read_from_jmag == True:
return self.read_current_conditions_from_JMAG()
else:
return self.read_current_conditions_from_FEMM()
def read_current_conditions_from_FEMM(self):
self.list_rotor_current_amp = []
# print 'femm_rotor_current_conditions000' # for noBar test (iron loss with only stator current excitation)
# data = np.loadtxt(self.dir_run_sweeping + 'femm_rotor_current_conditions.txt', unpack=True, usecols=(0,1))
data = np.loadtxt(self.dir_run_sweeping + 'ind999Freq.csv', unpack=True, skiprows=4, delimiter=",")
print(data)
# quit()
dict_rotor_current_function = []
print('[FEMM] Rotor Current')
for item in zip(data[0], data[1]):
item = item[0] + 1j * item[1]
item *= -1j # -1j is added to be consistent with JMAG (whose Current Source uses sine function)
amp = np.sqrt(item.imag ** 2 + item.real ** 2)
phase = np.arctan2(item.real, -item.imag) # atan2(y, x), y=a, x=-b
print('\tDEBUG:', amp, self.im.slip_freq_breakdown_torque, phase)
dict_rotor_current_function.append(
lambda t, amp=amp, phase=phase: amp * sin(2 * pi * self.im.slip_freq_breakdown_torque * t + phase))
print('\t', item, amp, phase / pi * 180)
self.list_rotor_current_amp.append(amp)
dict_stator_current_function = [None] * 6
# Old way (only valid for p=2, ps=1 full pitch DPNV winding)
# print('[FEMM] Stator Current') # -1j is added to be consistent with JMAG
# self.dict_stator_current_from_EC_FEA = [ ('bU', complex(eval( '-1j*%g' %(self.im.BeariW_CurrentAmp) )) ),
# ('bV', complex(eval( '-1j*%g*(-0.5+1j*0.8660254037844386)'%(self.im.BeariW_CurrentAmp) )) ),
# ('bW', complex(eval( '-1j*%g*(-0.5-1j*0.8660254037844386)'%(self.im.BeariW_CurrentAmp) )) ),
# ('dU', complex(eval( '-1j*%g' %(self.im.DriveW_CurrentAmp) )) ),
# ('dV', complex(eval( '-1j*%g*(-0.5+1j*0.8660254037844386)'%(self.im.DriveW_CurrentAmp) )) ),
# ('dW', complex(eval( '-1j*%g*(-0.5-1j*0.8660254037844386)'%(self.im.DriveW_CurrentAmp) )) )]
# self.dict_stator_current_from_EC_FEA = OrderedDict(self.dict_stator_current_from_EC_FEA)
# dict_stator_current_function = []
# for key, item in self.dict_stator_current_from_EC_FEA.items():
# amp = np.sqrt(item.imag**2 + item.real**2)
# phase = np.arctan2(item.real, -item.imag) # atan2(y, x), y=a, x=-b
# dict_stator_current_function.append(lambda t, amp=amp, phase=phase: amp * sin(2*pi*self.im.DriveW_Freq*t + phase))
# print('\t', key, item, amp, phase/pi*180)
# New way (General DPNV implementation)
npb = self.im.wily.number_parallel_branch
nwl = self.im.wily.number_winding_layer # number of windign layers
if self.im.spec_input_dict['DPNV_or_SEPA'] == False:
# either a separate winding or a (4 pole) DPNV winding implemented as a separate winding
ampD = 0.5 * (self.DriveW_CurrentAmp / npb + self.BeariW_CurrentAmp) # 为了代码能被四极电机和二极电机通用,代入看看就知道啦。
ampB = -0.5 * (
self.DriveW_CurrentAmp / npb - self.BeariW_CurrentAmp) # 关于符号,注意下面的DriveW对应的circuit调用时的ampB前还有个负号!
if self.im.wily.bool_3PhaseCurrentSource != True:
raise Exception('Logic Error Detected.')
else:
'[B]: DriveW_CurrentAmp is set.'
# case: DPNV as an actual two layer winding
ampD = self.im.DriveW_CurrentAmp / npb
ampB = self.im.BeariW_CurrentAmp
if self.im.wily.bool_3PhaseCurrentSource != False:
raise Exception('Logic Error Detected.')
phase_shift_drive = -120 / 180. * np.pi if self.im.wily.CommutatingSequenceD == 1 else 120 / 180. * np.pi
phase_shift_beari = -120 / 180. * np.pi if self.im.wily.CommutatingSequenceB == 1 else 120 / 180. * np.pi
freq = self.im.DriveW_Freq
# DPNV Group A/C
dict_stator_current_function[0] = lambda t, freq=freq, ampD=ampD, ampB=ampB, phaseD=0 * phase_shift_drive, phaseB=0 * phase_shift_beari: ampD * sin(2 * pi * freq * t + phaseD) - ampB * sin(2 * pi * freq * t + phaseB)
dict_stator_current_function[1] = lambda t, freq=freq, ampD=ampD, ampB=ampB, phaseD=1 * phase_shift_drive, phaseB=1 * phase_shift_beari: ampD * sin(2 * pi * freq * t + phaseD) - ampB * sin(2 * pi * freq * t + phaseB)
dict_stator_current_function[2] = lambda t, freq=freq, ampD=ampD, ampB=ampB, phaseD=2 * phase_shift_drive, phaseB=2 * phase_shift_beari: ampD * sin(2 * pi * freq * t + phaseD) - ampB * sin(2 * pi * freq * t + phaseB)
# DPNV Group B/D
dict_stator_current_function[3] = lambda t, freq=freq, ampD=ampD, ampB=ampB, phaseD=0 * phase_shift_drive, phaseB=0 * phase_shift_beari: ampD * sin(2 * pi * freq * t + phaseD) + ampB * sin(2 * pi * freq * t + phaseB)
dict_stator_current_function[4] = lambda t, freq=freq, ampD=ampD, ampB=ampB, phaseD=1 * phase_shift_drive, phaseB=1 * phase_shift_beari: ampD * sin(2 * pi * freq * t + phaseD) + ampB * sin(2 * pi * freq * t + phaseB)
dict_stator_current_function[5] = lambda t, freq=freq, ampD=ampD, ampB=ampB, phaseD=2 * phase_shift_drive, phaseB=2 * phase_shift_beari: ampD * sin(2 * pi * freq * t + phaseD) + ampB * sin(2 * pi * freq * t + phaseB)
return dict_rotor_current_function, dict_stator_current_function
def read_current_conditions_from_JMAG(self):
try:
print('The breakdown torque slip frequency is', self.im.slip_freq_breakdown_torque)
except:
raise Exception('no breakdown torque slip freqeuncy available.')
self.dict_rotor_current_from_EC_FEA = []
self.dict_stator_current_from_EC_FEA = []
rotor_phase_name_list = "ABCDEFGHIJKLMNOPQRSTUVWXYZ"
with open(self.im.csv_previous_solve, 'r') as f:
read_iterator = csv_reader(f, skipinitialspace=True)
for row in self.whole_row_reader(read_iterator):
try:
float(row[0])
except:
continue
else:
if np.abs(self.im.slip_freq_breakdown_torque - float(row[0])) < 1e-3:
# print row
''' Rotor Current '''
beginning_column = 1 + 2 * 3 * 2 # title + drive/bearing * 3 phase * real/imag
for i in range(0, int(self.im.Qr / self.im.DriveW_poles)):
natural_i = i + 1
current_phase_column = beginning_column + i * int(self.im.DriveW_poles) * 2
for j in range(int(self.im.DriveW_poles)):
natural_j = j + 1
re = float(row[current_phase_column + 2 * j])
im = float(row[current_phase_column + 2 * j + 1])
self.dict_rotor_current_from_EC_FEA.append(
("r%s%d" % (rotor_phase_name_list[i], natural_j), (re, im)))
''' Stator Current '''
beginning_column = 1 # title column is not needed
for i, str_phase in zip(list(range(0, 12, 2)), ['2A', '2B', '2C', '4A', '4B', '4C']): # 3 phase
natural_i = i + 1
current_phase_column = beginning_column + i
re = float(row[current_phase_column])
im = float(row[current_phase_column + 1])
self.dict_stator_current_from_EC_FEA.append((str_phase, (re, im)))
print('[JMAG] Rotor Current')
self.list_rotor_current_amp = []
self.dict_rotor_current_from_EC_FEA = OrderedDict(self.dict_rotor_current_from_EC_FEA)
dict_rotor_current_function = []
for key, item in self.dict_rotor_current_from_EC_FEA.items():
amp = np.sqrt(item[1] ** 2 + item[0] ** 2)
phase = np.arctan2(item[0], -item[1]) # atan2(y, x), y=a, x=-b
if '1' in key:
dict_rotor_current_function.append(
lambda t, amp=amp, phase=phase: amp * sin(2 * pi * self.im.slip_freq_breakdown_torque * t + phase))
print('\t', key, item, amp, phase / pi * 180)
self.list_rotor_current_amp.append(amp)
print('[JMAG] Stator Current')
self.dict_stator_current_from_EC_FEA = OrderedDict(self.dict_stator_current_from_EC_FEA)
self.dict_stator_current_function_wrong = []
dict_stator_current_function = []
for key, item in self.dict_stator_current_from_EC_FEA.items():
amp = np.sqrt(item[1] ** 2 + item[0] ** 2)
phase = np.arctan2(item[0], -item[1]) # atan2(y, x), y=a, x=-b
self.dict_stator_current_function_wrong.append(
lambda t, amp=amp, phase=phase: amp * sin(2 * pi * self.im.DriveW_Freq * t + phase))
# dict_stator_current_function.append(lambda t, amp=amp, phase=phase: amp * sin(2*pi*self.im.slip_freq_breakdown_torque*t + phase))
dict_stator_current_function.append(
lambda t, amp=amp, phase=phase: amp * sin(2 * pi * self.im.DriveW_Freq * t + phase))
print('\t', key, item, amp, phase / pi * 180)
# dict_stator_current_function =[self.dict_stator_current_function_wrong[0],
# self.dict_stator_current_function_wrong[2],
# self.dict_stator_current_function_wrong[1],
# self.dict_stator_current_function_wrong[3],
# self.dict_stator_current_function_wrong[5],
# self.dict_stator_current_function_wrong[4],]
# print dict_stator_current_function
if False:
from pylab import show, figure
t = np.arange(0, 0.5, 1e-4)
# t = np.arange(0, 0.5, 1e-3) # down sampling effect of TranFEAwi2TSS. 5e-2 is too less
ax1 = figure(1).gca()
ax2 = figure(2).gca()
ax = ax1
rotor_current_one_pole = np.zeros(t.__len__())
for ind, func in enumerate(dict_rotor_current_function):
ax.plot(t, [func(el) for el in t], label=ind)
rotor_current_one_pole += np.array([func(el) for el in t])
ax.plot(t, rotor_current_one_pole, label='one pole')
ax = ax2
iabc_wrong = []
for ind, func in enumerate(self.dict_stator_current_function_wrong):
if ind == 3 or ind == 0:
ax.plot(t, [-func(el) for el in t], label=str(ind) + 'wrong-reversed')
iabc_wrong.append(np.array([func(el) for el in t]))
ax = ax1
iabc = []
for ind, func in enumerate(self.dict_stator_current_function):
ax.plot(t, [func(el) for el in t], label=ind)
iabc.append(np.array([func(el) for el in t]))
# amplitude-invariant transform - the alpha-beta frame is actually rotating, because iabs is in rotor ref frame (slip frequency)
ialbe = 2 / 3. * np.dot(np.array([[1, -0.5, -0.5],
[0, sqrt(3) / 2, -sqrt(3) / 2]]), np.array([iabc[3], iabc[4], iabc[5]]))
print(np.shape(np.array([iabc[3], iabc[4], iabc[5]])))
print(np.shape(ialbe))
print(self.im.omega / 2 / pi)
ids = []
iqs = []
''' Speed negation is done by ? '''
iabc_stationary_2_tor = []
iabc_stationary_2_sus = []
for i in range(len(t)):
# theta = -self.im.omega * t[i]
# theta = -self.im.DriveW_Freq*2*pi * t[i]
# theta = self.im.omega * t[i]
theta = self.im.DriveW_Freq * 2 * pi * t[i]
# turn into stationary dq frame
temp = np.dot(np.array([[np.cos(theta), -np.sin(theta)],
[np.sin(theta), np.cos(theta)]]), np.array([[ialbe[0][i]],
[ialbe[1][i]]]))
ids.append(temp[0])
iqs.append(temp[1])
iabc_stationary_2_tor.append(self.transformed_dict_stator_current_function(3, t[i], theta))
iabc_stationary_2_sus.append(self.transformed_dict_stator_current_function(0, t[i], theta))
ids = np.array(ids).T[0]
iqs = np.array(iqs).T[0]
# print ids
# print iqs
print('idq', np.shape(ids), np.shape(iqs))
# ax_r.plot(t, idq[0], label='i_ds')
# ax_r.plot(t, idq[1], label='i_qs')
# ax_r.plot(t, ialbe[0], label='alpha')
# ax_r.plot(t, ialbe[1], label='beta')
# tansform to phase coordinates
ax = ax2
iabc_stationary = 1.5 * np.dot(np.array([[2 / 3., 0],
[-1 / 3., sqrt(3) / 3],
[-1 / 3., -sqrt(3) / 3]]), np.array([ids, iqs]))
ax.plot(t, iabc_stationary[0], label='i_a')
# ax.plot(t, iabc_stationary[1], label='i_b')
# ax.plot(t, iabc_stationary[2], label='i_c')
ax.plot(t, [el[0] for el in iabc_stationary_2_sus], label='i_a_2_sus')
ax.plot(t, [el[0] for el in iabc_stationary_2_tor], label='i_a_2_tor')
# ax.plot(t, [el[1]+5 for el in iabc_stationary_2_tor], label='i_b_2')
# ax.plot(t, [el[2]+5 for el in iabc_stationary_2_tor], label='i_c_2')
ax1.legend()
ax2.legend()
show()
quit()
return dict_rotor_current_function, dict_stator_current_function
def transformed_dict_stator_current_function(self, index_phase_A, time, theta):
ia_slip_freq = self.dict_stator_current_function[index_phase_A](time)
ib_slip_freq = self.dict_stator_current_function[index_phase_A + 1](time)
ic_slip_freq = self.dict_stator_current_function[index_phase_A + 2](time)
iabc_vector_slip_freq = np.array([[ia_slip_freq, ib_slip_freq, ic_slip_freq]]).T
ialbe = 2 / 3. * np.dot(np.array([[1, -0.5, -0.5],
[0, sqrt(3) / 2, -sqrt(3) / 2]]), iabc_vector_slip_freq)
# print 'ialbe', ialbe
# turn into stationary dq frame
idq = np.dot(np.array([[np.cos(theta), -np.sin(theta)],
[np.sin(theta), np.cos(theta)]]), ialbe)
# print 'idq', idq
iabc_stationary = 1.5 * np.dot(np.array([[2 / 3., 0],
[-1 / 3., sqrt(3) / 3],
[-1 / 3., -sqrt(3) / 3]]), idq)
return iabc_stationary[0][0], iabc_stationary[1][0], iabc_stationary[2][0]
def get_air_gap_B(self, number_of_points=360):
im = self.im
femm.opendocument(self.output_file_name + '.fem')
femm.mi_loadsolution()
list_B_magitude = []
R = im.Radius_OuterRotor + 0.25 * im.Length_AirGap
for i in range(number_of_points):
THETA = i / 180.0 * pi
X = R * cos(THETA)
Y = R * sin(THETA)
B_vector_complex = femm.mo_getb(X, Y)
B_X_complex = B_vector_complex[0]
B_Y_complex = B_vector_complex[1]
B_X_real = np.real(B_X_complex)
B_Y_real = np.real(B_Y_complex)
# Assume the magnitude is all due to radial component
B_magitude = sqrt(B_X_real ** 2 + B_Y_real ** 2)
inner_product = B_X_real * X + B_Y_real * Y
list_B_magitude.append(B_magitude * copysign(1, inner_product))
return list_B_magitude
def probdef(self):
# femm.smartmesh(False) <- This will not work due to bug of femm.__init__ # let mi_smartmesh deside. You must turn it off in parasolver.py
femm.callfemm_noeval('smartmesh(0)') # call this after probdef
femm.mi_probdef(self.freq, 'millimeters', 'planar', 1e-8, # must < 1e-8
self.stack_length, 18,
1) # The acsolver parameter (default: 0) specifies which solver is to be used for AC problems: 0 for successive approximation, 1 for Newton.
# 1 for 'I intend to try the acsolver of Newton, as this is the default for JMAG@[Nonlinear Calculation] Setting Panel in the [Study Properties] Dialog Box'
# femm.callfemm_noeval('mi_smartmesh(0)') # call this after probdef
self.bool_automesh = False # setting to false gives no effect?
# femm.smartmesh(True) # let mi_smartmesh deside. You must turn it off in parasolver.py
# self.bool_automesh = True # setting to false gives no effect?
def add_material(self):
# mi_addmaterial('matname', mu x, mu y, H c, J, Cduct, Lam d, Phi hmax, lam fill, LamType, Phi hx, Phi hy, nstr, dwire)
femm.mi_getmaterial('Air')
femm.mi_getmaterial('Copper') # for coil
# femm.mi_getmaterial('18 AWG') # for coil
# femm.mi_getmaterial('Aluminum, 1100') # for bar?
# femm.mi_getmaterial('304 Stainless Steel') # for shaft?
# femm.mi_addmaterial('Air', 1, 1, 0, 0, 0, 0, 0, 1, 0, 0, 0);
# femm.mi_addmaterial('Aluminum', 1, 1, 0, 0, 35, 0, 0, 1, 0, 0, 0)
femm.mi_addmaterial('Aluminum', 1, 1, 0, 0, 47619047.61904761 * 1e-6, 0, 0, 1, 0, 0,
0)
# femm.mi_addmaterial('Aluminum', 1, 1, 0, 0, self.im.Bar_Conductivity * 1e-6, 0, 0, 1, 0, 0,
# 0) # [MS/m]
# femm.mi_addmaterial('Aluminum', 1, 1, 0, 0, 1/1.673e-2, 0, 0, 1, 0, 0, 0)
# femm.mi_addmaterial('LinearIron', 2000, 2000, 0, 0, 0, 0, 0, 1, 0, 0, 0);
if self.im.stator_iron_mat['core_material'] == 'M19Gauge29':
# femm.mi_getmaterial('M-19 Steel') # for Stator & Rotor Iron Cores (Nonlinear with B-H curve)
femm.mi_addmaterial('M19Gauge29', 0, 0, 0, 0, 0, 0.3556, 0,
0.95) # no lamination for testing consistency with JMAG
hdata, bdata = np.loadtxt(self.im.stator_iron_mat['core_bh_file'], unpack=True,
usecols=(0, 1))
for n in range(0, len(bdata)):
femm.mi_addbhpoint('M19Gauge29', bdata[n], hdata[n])
elif self.im.spec_input_dict['Steel'] == 'Arnon5':
# Arnon5 is 1/5 thick as M15, which is too thin to use and it is expensive as well
femm.mi_addmaterial('Arnon5-final', 0, 0, 0, 0, 0.0, 0.127, 0, 0.96)
BH = np.loadtxt(self.dir_codes + '../Arnon5/Arnon5-final.txt', unpack=True, usecols=(0, 1))
bdata = BH[1][1:] # if not skip the first point, there will be two (0,0) in FEMM software, reason unknown.
hdata = BH[0][1:] # if not skip the first point, there will be two (0,0) in FEMM software, reason unknown.
for n in range(0, len(bdata)):
femm.mi_addbhpoint('Arnon5-final', bdata[n], hdata[n])
elif self.im.spec_input_dict['Steel'] == 'M15':
femm.mi_addmaterial('My M-15 Steel', 0, 0, 0, 0, 0, 0.635, 0, 0.98)
BH = np.loadtxt(self.dir_codes + '../Arnon5/M-15-Steel-BH-Curve.txt', unpack=True, usecols=(0, 1))
bdata = BH[1]
hdata = BH[0]
for n in range(0, len(bdata)):
femm.mi_addbhpoint('My M-15 Steel', bdata[n], hdata[n])
if False:
# A more interesting material to add is the iron with a nonlinear
# BH curve. First, we create a material in the same way as if we
# were creating a linear material, except the values used for
# permeability are merely placeholders.
femm.mi_addmaterial('Arnon5', 0, 0, 0, 0, 0.0, 0.127, 0, 0.96)
# A set of points defining the BH curve is then specified.
BH = np.loadtxt(self.dir_codes + 'Arnon5_Kang_after_JMAG_Smoothed.txt', unpack=True, usecols=(0, 1))
bdata = BH[1]
hdata = BH[0]
for n in range(0, len(bdata)):
femm.mi_addbhpoint('Arnon5', bdata[n], hdata[n])
def get_output_file_name(self, booL_dir=True):
fname = '%s-%gHz' % (self.im.ID, self.freq)
if booL_dir == True:
self.output_file_name = self.dir_run + fname
return self.output_file_name
else:
return fname
def whole_row_reader(self, reader):
for row in reader:
yield row[:]
def has_results(self, dir_run=None):
# print 'self.freq', self.freq
if dir_run == None:
dir_run = self.dir_run
a = [f for f in os.listdir(dir_run) if '.ans' in f].__len__()
b = [f for f in os.listdir(dir_run) if '.fem' in f].__len__()
if a == 0:
if 'no_duplicates.txt' in os.listdir(dir_run):
return True # 直接把femm的结果从服务器上拷过来也行
else:
return False
print('[FEMM.has_results] ans count: %d. fem count: %d.' % (a, b))
return a == b
def show_results(self, bool_plot=True):
if self.flag_eddycurrent_solver == True:
print('show results for eddy current solver')
return self.show_results_eddycurrent(bool_plot=bool_plot)
if self.flag_static_solver == True:
print('show results for static solver')
return self.show_results_static(bool_plot=bool_plot)
return None
def show_results_eddycurrent(self, bool_plot):
if self.fraction == 1:
self.fraction = 2 # this fixes a bug calling femm_integrate_4_current without calling run_frequency_sweeping before
raise Exception('You should initialize FEMM Solver with freq!=0')
ans_file_list = os.listdir(self.dir_run_sweeping)
ans_file_list = [f for f in ans_file_list if '.ans' in f]
femm.openfemm(True)
list_ans_file = []
list_torque = []
# write results to a data file
str_results = ''
for ind, f in enumerate(ans_file_list):
# femm.opendocument(self.dir_run_sweeping + f[:-4] + '.fem')
# femm.mi_loadsolution()
femm.opendocument(self.dir_run_sweeping + f)
# physical amount on rotor
femm.mo_groupselectblock(100)
femm.mo_groupselectblock(101)
Fx = femm.mo_blockintegral(18) # -- 18 x (or r) part of steady-state weighted stress tensor force
Fy = femm.mo_blockintegral(19) # --19 y (or z) part of steady-state weighted stress tensor force
torque = femm.mo_blockintegral(22) # -- 22 = Steady-state weighted stress tensor torque
femm.mo_clearblock()
# rotor current
# _ = self.femm_integrate_4_current()
# # TODO: this is for testing phase
# if float(f[3:-6]) == 3:
# print '\n', f[3:-6], 'Hz'
# for el in vals_results_rotor_current:
# print abs(el)
str_results += "%s %g %g %g\n" % (f[3:-6], torque, Fx, Fy)
list_ans_file.append(f)
list_torque.append(torque)
femm.mo_close()
with open(self.dir_run_sweeping + "eddycurrent_results.txt", "w") as stream:
stream.write(str_results)
# find breakdown torque and slip frequency that we are interested
# index, breakdown_torque = utility.get_max_and_index(list_torque)
# slip_freq_breakdown_torque = list_ans_file[index][3:-6]
# print("FEMM's breakdown data: %s Hz, %g Nm" % (slip_freq_breakdown_torque, breakdown_torque))
# self.im.update_mechanical_parameters(float(slip_freq_breakdown_torque))
# if self.im.slip_freq_breakdown_torque != float(slip_freq_breakdown_torque):
# raise Exception('[DEBUG] JMAG disagrees with FEMM (!= 3 Hz).')
# write rotor currents to file
self.femm_integrate_4_current(self.dir_run_sweeping + list_ans_file[index], self.fraction)
femm.closefemm()
def femm_integrate_4_current(self, fname, fraction, dir_output=None, returnData=False):
'''Make sure femm is opened
Returns:
[type] -- [list of complex number of rotor currents from FEMM]
'''
# get corresponding rotor current conditions for later static FEA
femm.opendocument(fname)
if True:
# physical amount of Cage
im = self.im
vals_results_rotor_current = []
# R = 0.5*(im.Location_RotorBarCenter + im.Location_RotorBarCenter2)
R = im.Location_RotorBarCenter # Since 5/23/2019
angle_per_slot = 2 * pi / im.Qr
THETA_BAR = pi - angle_per_slot + EPS # add EPS for the half bar
# print 'number of rotor_slot per partial model', self.rotor_slot_per_pole * int(4/fraction)
for i in range(self.rotor_slot_per_pole * int(4 / fraction)):
THETA_BAR += angle_per_slot
THETA = THETA_BAR
X = R * cos(THETA);
Y = R * sin(THETA)
femm.mo_selectblock(X, Y) # or you can select circuit rA rB ...
vals_results_rotor_current.append(femm.mo_blockintegral(7)) # integrate for current
femm.mo_clearblock()
# the other half bar of rA
THETA_BAR += angle_per_slot
THETA = THETA_BAR - 2 * EPS
X = R * cos(THETA);
Y = R * sin(THETA)
femm.mo_selectblock(X, Y)
vals_results_rotor_current.append(femm.mo_blockintegral(7)) # integrate for current
femm.mo_clearblock()
################################################################
# Also collect slot area information for loss evaluation in JMAG optimization
################################################################
if True:
# get stator slot area for copper loss calculation
femm.mo_groupselectblock(11)
stator_slot_area = femm.mo_blockintegral(5) / (
im.Qs / fraction) # unit: m^2 (verified by GUI operation)
femm.mo_clearblock()
# get rotor slot area for copper loss calculation
femm.mo_groupselectblock(101)
rotor_slot_area = femm.mo_blockintegral(5) / (im.Qs / fraction)
femm.mo_clearblock()
femm.mo_close()
# return [-el for el in vals_results_rotor_current[self.rotor_slot_per_pole:2*self.rotor_slot_per_pole]] # 用第四象限的转子电流,因为第三象限的被切了一半,麻烦!
# vals_results_rotor_current[self.rotor_slot_per_pole:2*self.rotor_slot_per_pole]这里用的都是第四象限的转子电流了,我们后面默认用的是第三象限的转子电流,即rA1 rB1 ...,所以要反相一下(-el)
vals_results_rotor_current = [-el for el in vals_results_rotor_current[
self.rotor_slot_per_pole:2 * self.rotor_slot_per_pole]]
# vals_results_rotor_current = self.femm_integrate_4_current(self.fraction)
if dir_output is None:
dir_output = self.dir_run_sweeping
if returnData == False: # no return then write to file
with open(dir_output + "femm_rotor_current_conditions.txt", "w") as stream:
str_results = ''
for el in vals_results_rotor_current:
stream.write("%g %g \n" % (el.real, el.imag))
print('done. append to eddycurrent_results.txt.')
return None
else:
return vals_results_rotor_current, stator_slot_area, rotor_slot_area
def show_results_static(self, bool_plot=True):
# Collect results from all the .ans file in the dir_run folder of FEMM.
# recall that for static FEA, you call show_results once when half .ans files are generated from watchdog
self.freq = 0 # needed for
# TODO 判断,如果文件存在,且不是空的!
# if exists .txt file, then load it
missed_ans_file_list = []
if os.path.exists(self.dir_run + "static_results.txt"):
data = np.loadtxt(self.dir_run + "static_results.txt", unpack=True, usecols=(0, 1, 2, 3))
# use dict to eliminate duplicates
results_dict = {}
for i in range(len(data[0])):
results_dict[data[0][i]] = (data[1][i], data[2][i], data[3][i])
keys_without_duplicates = list(
OrderedDict.fromkeys(data[0])) # remove duplicated item because it is a dict now!
keys_without_duplicates.sort()
# check for missed .ans files
if len(np.arange(0, 180, self.deg_per_step)) == len(keys_without_duplicates):
pass
else:
for self.rotor_position_in_deg in np.arange(0, 180, self.deg_per_step):
flag_missed = True
for key in keys_without_duplicates:
if int('%04d' % (10 * self.rotor_position_in_deg)) == key:
flag_missed = False
break
if flag_missed == True:
missed_ans_file_list.append(self.get_output_file_name(booL_dir=False) + '%04d' % (
10 * self.rotor_position_in_deg) + '.ans')
print('missed:', missed_ans_file_list)
# typical print gives: 5 1813 1795 1799.0
# print len(missed_ans_file_list), len(data[0]), len(keys_without_duplicates), keys_without_duplicates[-1]
# quit()
# write data without duplicates to file
with open(self.dir_run + "static_results_no_duplicates.txt", 'w') as f:
for key in keys_without_duplicates:
f.writelines(
'%g %g %g %g\n' % (key, results_dict[key][0], results_dict[key][1], results_dict[key][2]))
print('[FEMM.show_results_static] the last key is', max(keys_without_duplicates),
'[begin from 0]. the length of keys is', len(keys_without_duplicates))
data = np.loadtxt(self.dir_run + "static_results_no_duplicates.txt", unpack=True, usecols=(0, 1, 2, 3))
last_index = len(data[0])
else:
last_index = 0
ans_file_list = os.listdir(self.dir_run)
ans_file_list = [f for f in ans_file_list if '.ans' in f]
# last_index == 0 则说明是第一次运行后处理
if last_index > 0:
if len(ans_file_list) <= last_index:
if bool_plot == True:
self.plot_results(data)
return data
else:
print('There are new .ans files. Now append them')
# iter ans_file_list and missed_ans_file_list, and write to .txt
femm.openfemm(True) # bHide
print('there are total %d .ans files' % (len(ans_file_list)))
print('I am going to append the rest %d ones.' % (len(ans_file_list) - last_index))
for ind, f in enumerate(ans_file_list[last_index:] + missed_ans_file_list):
if ind >= len(ans_file_list[last_index:]):
print('...open missed .ans files')
if os.path.exists(self.dir_run + f) == False:
print('run mi_analyze for %s' % (f))
femm.opendocument(self.dir_run + f[:-4] + '.fem')
femm.mi_analyze(1)
else:
femm.opendocument(self.dir_run + f[:-4] + '.fem')
else:
print(last_index + ind, end=' ')
femm.opendocument(self.dir_run + f[:-4] + '.fem')
# load solution (if corrupted, re-run)
try:
femm.mi_loadsolution()
except Exception as e:
logger = logging.getLogger(__name__)
logger.error('The .ans file to this .fem file is corrupted. re-run the .fem file %s' % (f),
exc_info=True)
femm.opendocument(self.dir_run + f[:-4] + '.fem')
femm.mi_analyze(1)
femm.mi_loadsolution()
# get the physical amounts on the rotor
try:
femm.mo_groupselectblock(100)
femm.mo_groupselectblock(101)
Fx = femm.mo_blockintegral(18) # -- 18 x (or r) part of steady-state weighted stress tensor force
Fy = femm.mo_blockintegral(19) # --19 y (or z) part of steady-state weighted stress tensor force
torque = femm.mo_blockintegral(22) # -- 22 = Steady-state weighted stress tensor torque
femm.mo_clearblock()
# Air Gap Boundary for Rotor Motion #5
# gap_torque = femm.mo_gapintegral("AGB4RM", 0)
# gap_force = femm.mo_gapintegral("AGB4RM", 1)
# print gap_force, gap_torque, torque, Fx, Fy
# write results to a data file
with open(self.dir_run + "static_results.txt", "a") as stream:
stream.write("%s %g %g %g\n" % (f[-8:-4], torque, Fx, Fy))
except Exception as e:
logger = logging.getLogger(__name__)
logger.error('Encounter error while post-processing (integrating, etc.).', exc_info=True)
raise e
# avoid run out of RAM when there are a thousand of ans files loaded into femm...
# if ind % 10 == 0:
# femm.closefemm()
# femm.openfemm(True)
femm.mo_close() # use mo_ to close .ans file
femm.mi_close() # use mi_ to close .fem file
print('done. append to static_results.txt.')
femm.closefemm()
try:
data
except:
print('call this method again to plot...')
return None
if bool_plot == True:
self.plot_results(data)
return data
def write_physical_data(self, results_list):
with open(self.dir_run + "static_results.txt", "a") as f:
results_rotor = ''
for ind, row in enumerate(results_list):
results_rotor += "%s %g %g %g\n" \
% (row[0][-8:-4], row[1], row[2], row[3])
f.write(results_rotor)
def plot_results(self, data):
from pylab import subplots, legend, show
try:
self.fig
except:
fig, axes = subplots(3, 1, sharex=True)
self.fig = fig
self.axes = axes
else:
fig = self.fig
axes = self.axes
if self.flag_eddycurrent_solver:
ax = axes[0];
ax.plot(data[0], data[1], label='torque');
ax.legend();
ax.grid()
ax = axes[1];
ax.plot(data[0], data[2], label='Fx');
ax.legend();
ax.grid()
ax = axes[2];
ax.plot(data[0], data[3], label='Fy');
ax.legend();
ax.grid()
if self.flag_static_solver:
ax = axes[0];
ax.plot(data[0] * 0.1, data[1], label='torque');
ax.legend();
ax.grid()
ax = axes[1];
ax.plot(data[0] * 0.1, data[2], label='Fx');
ax.legend();
ax.grid()
ax = axes[2];
ax.plot(data[0] * 0.1, data[3], label='Fy');
ax.legend();
ax.grid()
def run_frequency_sweeping(self, freq_range, fraction=2):
if self.has_results(dir_run=self.dir_run_sweeping):
return
self.flag_static_solver = False
self.flag_eddycurrent_solver = True
self.fraction = fraction
for f in os.listdir(self.dir_run_sweeping):
os.remove(self.dir_run_sweeping + f)
femm.openfemm(True) # bHide # False for debug
femm.newdocument(0) # magnetic
self.freq_range = freq_range
self.freq = freq_range[0]
# Alternatively, the length of the machine could be scaled by the number of segments to make this correction automatically.
self.stack_length = self.im.stack_length * fraction
self.probdef()
# is coarse mesh causing biased rotor current?
# femm.smartmesh(True)
# self.bool_automesh = True
self.add_material()
# self.draw_model(fraction=fraction)
self.vangogh.draw_model(fraction=fraction)
self.add_block_labels(fraction=fraction)
# # debug here
# femm.mi_maximize()
# femm.mi_zoomnatural()
# return
list_ans_file = []
for freq in freq_range:
self.freq = freq
temp = self.get_output_file_name(booL_dir=False)
list_ans_file.append(temp + '.ans')
self.output_file_name = self.dir_run_sweeping + temp
print(temp)
if os.path.exists(self.output_file_name + '.ans'):
continue
self.probdef()
femm.mi_saveas(self.output_file_name + '.fem')
self.parallel_solve(dir_run=self.dir_run_sweeping,
number_of_instantces=5) # subprocess will wait for cmd but not the pytho script
self.wait(list_ans_file)
# flux and current of circuit can be used for parameter identification
if False:
dict_circuits = {}
# i = femm.mo_getprobleminfo()
logging.getLogger().info('Sweeping: %g Hz.' % (self.freq))
femm.mi_analyze(1) # None for inherited. 1 for a minimized window,
femm.mi_loadsolution()
# circuit
# i1_re,i1_im, v1_re,v1_im, flux1_re,flux1_im = femm.mo_getcircuitproperties("dA")
# i2_re,i2_im, v2_re,v2_im, flux2_re,flux2_im = femm.mo_getcircuitproperties("bA")
# i3_re,i3_im, v3_re,v3_im, flux3_re,flux3_im = femm.mo_getcircuitproperties("rA")
dict_circuits['dU'] = femm.mo_getcircuitproperties("dU")
dict_circuits['dV'] = femm.mo_getcircuitproperties("dV")
dict_circuits['dW'] = femm.mo_getcircuitproperties("dW")
dict_circuits['bU'] = femm.mo_getcircuitproperties("bU")
dict_circuits['bV'] = femm.mo_getcircuitproperties("bV")
dict_circuits['bW'] = femm.mo_getcircuitproperties("bW")
for i in range(self.rotor_slot_per_pole):
circuit_name = 'r%s' % (self.rotor_phase_name_list[i])
dict_circuits[circuit_name] = femm.mo_getcircuitproperties(circuit_name)
# write results to a data file, multiplying by ? to get
# the results for all ? poles of the machine. # 如果只分析了对称场,记得乘回少掉的那部分。
with open(self.dir_run_sweeping + "results.txt", "a") as f:
results_circuits = "[DW] %g + j%g A. %g + j%g V. %g + j%g Wb. [BW] %g + j%g A. %g + j%g V. %g + j%g Wb. [BAR] %g + j%g A. %g + j%g V. %g + j%g Wb. " \
% (np.real(dict_circuits['dU'][0]), np.imag(dict_circuits['dU'][0]),
np.real(dict_circuits['dU'][1]), np.imag(dict_circuits['dU'][1]),
| np.real(dict_circuits['dU'][2]) | numpy.real |
# coding=utf-8
# Copyright 2021 HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import tempfile
import unittest
from datasets import load_dataset
from transformers.file_utils import cached_property, is_torch_available, is_vision_available
from transformers.testing_utils import require_torch, require_vision, slow, torch_device
from .test_modeling_bert import BertModelTester
from .test_modeling_common import floats_tensor, ids_tensor, random_attention_mask
from .test_modeling_deit import DeiTModelTester
from .test_modeling_trocr import TrOCRStandaloneDecoderModelTester
from .test_modeling_vit import ViTModelTester
if is_torch_available():
import numpy as np
import torch
from transformers import (
AutoTokenizer,
BertLMHeadModel,
DeiTModel,
TrOCRForCausalLM,
VisionEncoderDecoderConfig,
VisionEncoderDecoderModel,
ViTModel,
)
from transformers.modeling_outputs import BaseModelOutput
from transformers.models.vit.modeling_vit import to_2tuple
if is_vision_available():
from PIL import Image
from transformers import TrOCRProcessor, ViTFeatureExtractor
@require_torch
class EncoderDecoderMixin:
def get_encoder_decoder_model(self, config, decoder_config):
pass
def prepare_config_and_inputs(self):
pass
def get_pretrained_model_and_inputs(self):
pass
def check_encoder_decoder_model_from_pretrained_configs(
self, config, decoder_config, decoder_input_ids, decoder_attention_mask, pixel_values=None, **kwargs
):
encoder_decoder_config = VisionEncoderDecoderConfig.from_encoder_decoder_configs(config, decoder_config)
self.assertTrue(encoder_decoder_config.decoder.is_decoder)
enc_dec_model = VisionEncoderDecoderModel(encoder_decoder_config)
enc_dec_model.to(torch_device)
enc_dec_model.eval()
self.assertTrue(enc_dec_model.config.is_encoder_decoder)
outputs_encoder_decoder = enc_dec_model(
pixel_values=pixel_values,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
)
self.assertEqual(
outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,))
)
def check_encoder_decoder_model(
self, config, decoder_config, decoder_input_ids, decoder_attention_mask, pixel_values=None, **kwargs
):
encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config)
enc_dec_model = VisionEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model)
self.assertTrue(enc_dec_model.config.decoder.is_decoder)
self.assertTrue(enc_dec_model.config.decoder.add_cross_attention)
self.assertTrue(enc_dec_model.config.is_encoder_decoder)
enc_dec_model.to(torch_device)
outputs_encoder_decoder = enc_dec_model(
pixel_values=pixel_values,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
output_hidden_states=True,
)
self.assertEqual(
outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,))
)
encoder_outputs = BaseModelOutput(last_hidden_state=outputs_encoder_decoder.encoder_hidden_states[-1])
outputs_encoder_decoder = enc_dec_model(
encoder_outputs=encoder_outputs,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
)
self.assertEqual(
outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,))
)
def check_encoder_decoder_model_from_pretrained(
self,
config,
decoder_config,
decoder_input_ids,
decoder_attention_mask,
return_dict,
pixel_values=None,
**kwargs
):
encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config)
kwargs = {"encoder_model": encoder_model, "decoder_model": decoder_model, "return_dict": return_dict}
enc_dec_model = VisionEncoderDecoderModel.from_encoder_decoder_pretrained(**kwargs)
enc_dec_model.to(torch_device)
outputs_encoder_decoder = enc_dec_model(
pixel_values=pixel_values,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
output_hidden_states=True,
return_dict=True,
)
self.assertEqual(
outputs_encoder_decoder["logits"].shape, (decoder_input_ids.shape + (decoder_config.vocab_size,))
)
def check_save_and_load(
self, config, decoder_config, decoder_input_ids, decoder_attention_mask, pixel_values=None, **kwargs
):
encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config)
enc_dec_model = VisionEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model)
enc_dec_model.to(torch_device)
enc_dec_model.eval()
with torch.no_grad():
outputs = enc_dec_model(
pixel_values=pixel_values,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
)
out_2 = outputs[0].cpu().numpy()
out_2[np.isnan(out_2)] = 0
with tempfile.TemporaryDirectory() as tmpdirname:
enc_dec_model.save_pretrained(tmpdirname)
enc_dec_model = VisionEncoderDecoderModel.from_pretrained(tmpdirname)
enc_dec_model.to(torch_device)
after_outputs = enc_dec_model(
pixel_values=pixel_values,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
)
out_1 = after_outputs[0].cpu().numpy()
out_1[np.isnan(out_1)] = 0
max_diff = np.amax(np.abs(out_1 - out_2))
self.assertLessEqual(max_diff, 1e-5)
def check_save_and_load_encoder_decoder_model(
self, config, decoder_config, decoder_input_ids, decoder_attention_mask, pixel_values=None, **kwargs
):
encoder_model, decoder_model = self.get_encoder_decoder_model(config, decoder_config)
enc_dec_model = VisionEncoderDecoderModel(encoder=encoder_model, decoder=decoder_model)
enc_dec_model.to(torch_device)
enc_dec_model.eval()
with torch.no_grad():
outputs = enc_dec_model(
pixel_values=pixel_values,
decoder_input_ids=decoder_input_ids,
decoder_attention_mask=decoder_attention_mask,
)
out_2 = outputs[0].cpu().numpy()
out_2[ | np.isnan(out_2) | numpy.isnan |
#!/usr/bin/env python
# Copyright 2014-2018 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import unittest
import copy
import numpy
from functools import reduce
from pyscf import gto, lib
from pyscf import scf, dft
from pyscf import mp
from pyscf import cc
from pyscf import ao2mo
from pyscf.cc import uccsd
from pyscf.cc import gccsd
from pyscf.cc import addons
from pyscf.cc import uccsd_rdm
from pyscf.fci import direct_uhf
mol = gto.Mole()
mol.verbose = 7
mol.output = '/dev/null'
mol.atom = [
[8 , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]]
mol.basis = '631g'
mol.build()
rhf = scf.RHF(mol)
rhf.conv_tol_grad = 1e-8
rhf.kernel()
mf = scf.addons.convert_to_uhf(rhf)
myucc = cc.UCCSD(mf).run(conv_tol=1e-10)
mol_s2 = gto.Mole()
mol_s2.atom = [
[8 , (0. , 0. , 0.)],
[1 , (0. , -0.757 , 0.587)],
[1 , (0. , 0.757 , 0.587)]]
mol_s2.basis = '631g'
mol_s2.spin = 2
mol_s2.verbose = 5
mol_s2.output = '/dev/null'
mol_s2.build()
mf_s2 = scf.UHF(mol_s2).run()
eris = uccsd.UCCSD(mf_s2).ao2mo()
def tearDownModule():
global mol, rhf, mf, myucc, mol_s2, mf_s2, eris
mol.stdout.close()
mol_s2.stdout.close()
del mol, rhf, mf, myucc, mol_s2, mf_s2, eris
class KnownValues(unittest.TestCase):
# def test_with_df(self):
# mf = scf.UHF(mol).density_fit(auxbasis='weigend').run()
# mycc = cc.UCCSD(mf).run()
# self.assertAlmostEqual(mycc.e_tot, -76.118403942938741, 7)
def test_ERIS(self):
ucc1 = cc.UCCSD(mf)
nao,nmo = mf.mo_coeff[0].shape
numpy.random.seed(1)
mo_coeff = numpy.random.random((2,nao,nmo))
eris = cc.uccsd._make_eris_incore(ucc1, mo_coeff)
self.assertAlmostEqual(lib.finger(eris.oooo), 4.9638849382825754, 11)
self.assertAlmostEqual(lib.finger(eris.ovoo),-1.3623681896983584, 11)
self.assertAlmostEqual(lib.finger(eris.ovov), 125.81550684442163, 11)
self.assertAlmostEqual(lib.finger(eris.oovv), 55.123681017639598, 11)
self.assertAlmostEqual(lib.finger(eris.ovvo), 133.48083527898248, 11)
self.assertAlmostEqual(lib.finger(eris.ovvv), 59.421927525288183, 11)
self.assertAlmostEqual(lib.finger(eris.vvvv), 43.556602622204778, 11)
self.assertAlmostEqual(lib.finger(eris.OOOO),-407.05319440524585, 11)
self.assertAlmostEqual(lib.finger(eris.OVOO), 56.284299937160796, 11)
self.assertAlmostEqual(lib.finger(eris.OVOV),-287.72899895597448, 11)
self.assertAlmostEqual(lib.finger(eris.OOVV),-85.484299959144522, 11)
self.assertAlmostEqual(lib.finger(eris.OVVO),-228.18996145476956, 11)
self.assertAlmostEqual(lib.finger(eris.OVVV),-10.715902258877399, 11)
self.assertAlmostEqual(lib.finger(eris.VVVV),-89.908425473958303, 11)
self.assertAlmostEqual(lib.finger(eris.ooOO),-336.65979260175226, 11)
self.assertAlmostEqual(lib.finger(eris.ovOO),-16.405125847288176, 11)
self.assertAlmostEqual(lib.finger(eris.ovOV), 231.59042209500075, 11)
self.assertAlmostEqual(lib.finger(eris.ooVV), 20.338077193028354, 11)
self.assertAlmostEqual(lib.finger(eris.ovVO), 206.48662856981386, 11)
self.assertAlmostEqual(lib.finger(eris.ovVV),-71.273249852220516, 11)
self.assertAlmostEqual(lib.finger(eris.vvVV), 172.47130671068496, 11)
self.assertAlmostEqual(lib.finger(eris.OVoo),-19.927660309103977, 11)
self.assertAlmostEqual(lib.finger(eris.OOvv),-27.761433381797019, 11)
self.assertAlmostEqual(lib.finger(eris.OVvo),-140.09648311337384, 11)
self.assertAlmostEqual(lib.finger(eris.OVvv), 40.700983950220547, 11)
uccsd.MEMORYMIN, bak = 0, uccsd.MEMORYMIN
ucc1.max_memory = 0
eris1 = ucc1.ao2mo(mo_coeff)
uccsd.MEMORYMIN = bak
self.assertAlmostEqual(abs(numpy.array(eris1.oooo)-eris.oooo).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.ovoo)-eris.ovoo).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.ovov)-eris.ovov).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.oovv)-eris.oovv).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.ovvo)-eris.ovvo).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.ovvv)-eris.ovvv).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.vvvv)-eris.vvvv).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.OOOO)-eris.OOOO).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.OVOO)-eris.OVOO).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.OVOV)-eris.OVOV).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.OOVV)-eris.OOVV).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.OVVO)-eris.OVVO).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.OVVV)-eris.OVVV).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.VVVV)-eris.VVVV).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.ooOO)-eris.ooOO).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.ovOO)-eris.ovOO).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.ovOV)-eris.ovOV).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.ooVV)-eris.ooVV).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.ovVO)-eris.ovVO).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.ovVV)-eris.ovVV).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.vvVV)-eris.vvVV).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.OVoo)-eris.OVoo).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.OOvv)-eris.OOvv).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.OVvo)-eris.OVvo).max(), 0, 11)
self.assertAlmostEqual(abs(numpy.array(eris1.OVvv)-eris.OVvv).max(), 0, 11)
# Testing the complex MO integrals
def ao2mofn(mos):
if isinstance(mos, numpy.ndarray) and mos.ndim == 2:
mos = [mos]*4
nmos = [mo.shape[1] for mo in mos]
eri_mo = ao2mo.kernel(mf._eri, mos, compact=False).reshape(nmos)
return eri_mo * 1j
eris1 = cc.uccsd._make_eris_incore(ucc1, mo_coeff, ao2mofn=ao2mofn)
self.assertAlmostEqual(abs(eris1.oooo.imag-eris.oooo).max(), 0, 11)
self.assertAlmostEqual(abs(eris1.ovoo.imag-eris.ovoo).max(), 0, 11)
self.assertAlmostEqual(abs(eris1.ovov.imag-eris.ovov).max(), 0, 11)
self.assertAlmostEqual(abs(eris1.oovv.imag-eris.oovv).max(), 0, 11)
self.assertAlmostEqual(abs(eris1.ovvo.imag-eris.ovvo).max(), 0, 11)
#self.assertAlmostEqual(abs(eris1.ovvv.imag-eris.ovvv).max(), 0, 11)
#self.assertAlmostEqual(abs(eris1.vvvv.imag-eris.vvvv).max(), 0, 11)
self.assertAlmostEqual(abs(eris1.OOOO.imag-eris.OOOO).max(), 0, 11)
self.assertAlmostEqual(abs(eris1.OVOO.imag-eris.OVOO).max(), 0, 11)
self.assertAlmostEqual(abs(eris1.OVOV.imag-eris.OVOV).max(), 0, 11)
self.assertAlmostEqual(abs(eris1.OOVV.imag-eris.OOVV).max(), 0, 11)
self.assertAlmostEqual(abs(eris1.OVVO.imag-eris.OVVO).max(), 0, 11)
#self.assertAlmostEqual(abs(eris1.OVVV.imag-eris.OVVV).max(), 0, 11)
#self.assertAlmostEqual(abs(eris1.VVVV.imag-eris.VVVV).max(), 0, 11)
self.assertAlmostEqual(abs(eris1.ooOO.imag-eris.ooOO).max(), 0, 11)
self.assertAlmostEqual(abs(eris1.ovOO.imag-eris.ovOO).max(), 0, 11)
self.assertAlmostEqual(abs(eris1.ovOV.imag-eris.ovOV).max(), 0, 11)
self.assertAlmostEqual(abs(eris1.ooVV.imag-eris.ooVV).max(), 0, 11)
self.assertAlmostEqual(abs(eris1.ovVO.imag-eris.ovVO).max(), 0, 11)
#self.assertAlmostEqual(abs(eris1.ovVV.imag-eris.ovVV).max(), 0, 11)
#self.assertAlmostEqual(abs(eris1.vvVV.imag-eris.vvVV).max(), 0, 11)
self.assertAlmostEqual(abs(eris1.OVoo.imag-eris.OVoo).max(), 0, 11)
self.assertAlmostEqual(abs(eris1.OOvv.imag-eris.OOvv).max(), 0, 11)
self.assertAlmostEqual(abs(eris1.OVvo.imag-eris.OVvo).max(), 0, 11)
#self.assertAlmostEqual(abs(eris1.OVvv.imag-eris.OVvv).max(), 0, 11)
def test_amplitudes_from_rccsd(self):
e, t1, t2 = cc.RCCSD(rhf).set(conv_tol=1e-10).kernel()
t1, t2 = myucc.amplitudes_from_rccsd(t1, t2)
self.assertAlmostEqual(abs(t1[0]-myucc.t1[0]).max(), 0, 6)
self.assertAlmostEqual(abs(t1[1]-myucc.t1[1]).max(), 0, 6)
self.assertAlmostEqual(abs(t2[0]-myucc.t2[0]).max(), 0, 6)
self.assertAlmostEqual(abs(t2[1]-myucc.t2[1]).max(), 0, 6)
self.assertAlmostEqual(abs(t2[2]-myucc.t2[2]).max(), 0, 6)
def test_uccsd_frozen(self):
ucc1 = copy.copy(myucc)
ucc1.frozen = 1
self.assertEqual(ucc1.nmo, (12,12))
self.assertEqual(ucc1.nocc, (4,4))
ucc1.frozen = [0,1]
self.assertEqual(ucc1.nmo, (11,11))
self.assertEqual(ucc1.nocc, (3,3))
ucc1.frozen = [[0,1], [0,1]]
self.assertEqual(ucc1.nmo, (11,11))
self.assertEqual(ucc1.nocc, (3,3))
ucc1.frozen = [1,9]
self.assertEqual(ucc1.nmo, (11,11))
self.assertEqual(ucc1.nocc, (4,4))
ucc1.frozen = [[1,9], [1,9]]
self.assertEqual(ucc1.nmo, (11,11))
self.assertEqual(ucc1.nocc, (4,4))
ucc1.frozen = [9,10,12]
self.assertEqual(ucc1.nmo, (10,10))
self.assertEqual(ucc1.nocc, (5,5))
ucc1.nmo = (13,12)
ucc1.nocc = (5,4)
self.assertEqual(ucc1.nmo, (13,12))
self.assertEqual(ucc1.nocc, (5,4))
def test_uccsd_frozen(self):
# Freeze 1s electrons
frozen = [[0,1], [0,1]]
ucc = cc.UCCSD(mf_s2, frozen=frozen)
ucc.diis_start_cycle = 1
ecc, t1, t2 = ucc.kernel()
self.assertAlmostEqual(ecc, -0.07414978284611283, 8)
def test_rdm(self):
nocc = 5
nvir = 7
mol = gto.M()
mf = scf.UHF(mol)
mf.mo_occ = numpy.zeros((2,nocc+nvir))
mf.mo_occ[:,:nocc] = 1
mycc = uccsd.UCCSD(mf)
def antisym(t2):
t2 = t2 - t2.transpose(0,1,3,2)
t2 = t2 - t2.transpose(1,0,2,3)
return t2
orbspin = numpy.zeros((nocc+nvir)*2, dtype=int)
orbspin[1::2] = 1
numpy.random.seed(1)
t1 = numpy.random.random((2,nocc,nvir))*.1 - .1
t2ab = numpy.random.random((nocc,nocc,nvir,nvir))*.1 - .1
t2aa = antisym(numpy.random.random((nocc,nocc,nvir,nvir))*.1 - .1)
t2bb = antisym(numpy.random.random((nocc,nocc,nvir,nvir))*.1 - .1)
t2 = (t2aa,t2ab,t2bb)
l1 = numpy.random.random((2,nocc,nvir))*.1 - .1
l2ab = numpy.random.random((nocc,nocc,nvir,nvir))*.1 - .1
l2aa = antisym(numpy.random.random((nocc,nocc,nvir,nvir))*.1 - .1)
l2bb = antisym(numpy.random.random((nocc,nocc,nvir,nvir))*.1 - .1)
l2 = (l2aa,l2ab,l2bb)
dm1a, dm1b = mycc.make_rdm1(t1, t2, l1, l2)
dm2aa, dm2ab, dm2bb = mycc.make_rdm2(t1, t2, l1, l2)
ia = orbspin == 0
ib = orbspin == 1
oa = orbspin[:nocc*2] == 0
ob = orbspin[:nocc*2] == 1
va = orbspin[nocc*2:] == 0
vb = orbspin[nocc*2:] == 1
t1 = addons.spatial2spin(t1, orbspin)
t2 = addons.spatial2spin(t2, orbspin)
l1 = addons.spatial2spin(l1, orbspin)
l2 = addons.spatial2spin(l2, orbspin)
mf1 = scf.GHF(mol)
mf1.mo_occ = numpy.zeros((nocc+nvir)*2)
mf.mo_occ[:,:nocc*2] = 1
mycc1 = gccsd.GCCSD(mf1)
dm1 = mycc1.make_rdm1(t1, t2, l1, l2)
dm2 = mycc1.make_rdm2(t1, t2, l1, l2)
self.assertAlmostEqual(abs(dm1[ia][:,ia]-dm1a).max(), 0, 9)
self.assertAlmostEqual(abs(dm1[ib][:,ib]-dm1b).max(), 0, 9)
self.assertAlmostEqual(abs(dm2[ia][:,ia][:,:,ia][:,:,:,ia]-dm2aa).max(), 0, 9)
self.assertAlmostEqual(abs(dm2[ia][:,ia][:,:,ib][:,:,:,ib]-dm2ab).max(), 0, 9)
self.assertAlmostEqual(abs(dm2[ib][:,ib][:,:,ib][:,:,:,ib]-dm2bb).max(), 0, 9)
def test_h2o_rdm(self):
mol = mol_s2
mf = mf_s2
mycc = uccsd.UCCSD(mf)
mycc.frozen = 2
ecc, t1, t2 = mycc.kernel()
l1, l2 = mycc.solve_lambda()
dm1a,dm1b = mycc.make_rdm1(t1, t2, l1, l2)
dm2aa,dm2ab,dm2bb = mycc.make_rdm2(t1, t2, l1, l2)
mo_a = mf.mo_coeff[0]
mo_b = mf.mo_coeff[1]
nmoa = mo_a.shape[1]
nmob = mo_b.shape[1]
eriaa = ao2mo.kernel(mf._eri, mo_a, compact=False).reshape([nmoa]*4)
eribb = ao2mo.kernel(mf._eri, mo_b, compact=False).reshape([nmob]*4)
eriab = ao2mo.kernel(mf._eri, (mo_a,mo_a,mo_b,mo_b), compact=False)
eriab = eriab.reshape([nmoa,nmoa,nmob,nmob])
hcore = mf.get_hcore()
h1a = reduce(numpy.dot, (mo_a.T.conj(), hcore, mo_a))
h1b = reduce(numpy.dot, (mo_b.T.conj(), hcore, mo_b))
e1 = numpy.einsum('ij,ji', h1a, dm1a)
e1+= numpy.einsum('ij,ji', h1b, dm1b)
e1+= numpy.einsum('ijkl,ijkl', eriaa, dm2aa) * .5
e1+= numpy.einsum('ijkl,ijkl', eriab, dm2ab)
e1+= numpy.einsum('ijkl,ijkl', eribb, dm2bb) * .5
e1+= mol.energy_nuc()
self.assertAlmostEqual(e1, mycc.e_tot, 7)
d1 = uccsd_rdm._gamma1_intermediates(mycc, mycc.t1, mycc.t2, mycc.l1, mycc.l2)
mycc.max_memory = 0
d2 = uccsd_rdm._gamma2_intermediates(mycc, mycc.t1, mycc.t2, mycc.l1, mycc.l2, True)
dm2 = uccsd_rdm._make_rdm2(mycc, d1, d2, with_dm1=True, with_frozen=True)
e1 = numpy.einsum('ij,ji', h1a, dm1a)
e1+= numpy.einsum('ij,ji', h1b, dm1b)
e1+= numpy.einsum('ijkl,ijkl', eriaa, dm2[0]) * .5
e1+= numpy.einsum('ijkl,ijkl', eriab, dm2[1])
e1+= numpy.einsum('ijkl,ijkl', eribb, dm2[2]) * .5
e1+= mol.energy_nuc()
self.assertAlmostEqual(e1, mycc.e_tot, 7)
def test_h4_rdm(self):
mol = gto.Mole()
mol.verbose = 0
mol.atom = [
['H', ( 1.,-1. , 0. )],
['H', ( 0.,-1. ,-1. )],
['H', ( 1.,-0.5 , 0. )],
['H', ( 0.,-1. , 1. )],
]
mol.charge = 2
mol.spin = 2
mol.basis = '6-31g'
mol.build()
mf = scf.UHF(mol).set(init_guess='1e').run(conv_tol=1e-14)
ehf0 = mf.e_tot - mol.energy_nuc()
mycc = uccsd.UCCSD(mf).run()
mycc.solve_lambda()
eri_aa = ao2mo.kernel(mf._eri, mf.mo_coeff[0])
eri_bb = ao2mo.kernel(mf._eri, mf.mo_coeff[1])
eri_ab = ao2mo.kernel(mf._eri, [mf.mo_coeff[0], mf.mo_coeff[0],
mf.mo_coeff[1], mf.mo_coeff[1]])
h1a = reduce(numpy.dot, (mf.mo_coeff[0].T, mf.get_hcore(), mf.mo_coeff[0]))
h1b = reduce(numpy.dot, (mf.mo_coeff[1].T, mf.get_hcore(), mf.mo_coeff[1]))
efci, fcivec = direct_uhf.kernel((h1a,h1b), (eri_aa,eri_ab,eri_bb),
h1a.shape[0], mol.nelec)
dm1ref, dm2ref = direct_uhf.make_rdm12s(fcivec, h1a.shape[0], mol.nelec)
t1, t2 = mycc.t1, mycc.t2
l1, l2 = mycc.l1, mycc.l2
rdm1 = mycc.make_rdm1(t1, t2, l1, l2)
rdm2 = mycc.make_rdm2(t1, t2, l1, l2)
self.assertAlmostEqual(abs(dm1ref[0] - rdm1[0]).max(), 0, 6)
self.assertAlmostEqual(abs(dm1ref[1] - rdm1[1]).max(), 0, 6)
self.assertAlmostEqual(abs(dm2ref[0] - rdm2[0]).max(), 0, 6)
self.assertAlmostEqual(abs(dm2ref[1] - rdm2[1]).max(), 0, 6)
self.assertAlmostEqual(abs(dm2ref[2] - rdm2[2]).max(), 0, 6)
def test_eris_contract_vvvv_t2(self):
mol = gto.Mole()
nocca, noccb, nvira, nvirb = 5, 4, 12, 13
nvira_pair = nvira*(nvira+1)//2
nvirb_pair = nvirb*(nvirb+1)//2
numpy.random.seed(9)
t2 = numpy.random.random((nocca,noccb,nvira,nvirb))
eris = uccsd._ChemistsERIs()
eris.vvVV = numpy.random.random((nvira_pair,nvirb_pair))
eris.mol = mol
myucc.max_memory, bak = 0, myucc.max_memory
vt2 = eris._contract_vvVV_t2(myucc, t2, eris.vvVV)
myucc.max_memory = bak
self.assertAlmostEqual(lib.finger(vt2), 12.00904827896089, 11)
idxa = lib.square_mat_in_trilu_indices(nvira)
idxb = lib.square_mat_in_trilu_indices(nvirb)
vvVV = eris.vvVV[:,idxb][idxa]
ref = lib.einsum('acbd,ijcd->ijab', vvVV, t2)
self.assertAlmostEqual(abs(vt2 - ref).max(), 0, 11)
# _contract_VVVV_t2, testing complex and real mixed contraction
VVVV =(numpy.random.random((nvirb,nvirb,nvirb,nvirb)) +
numpy.random.random((nvirb,nvirb,nvirb,nvirb))*1j - (.5+.5j))
VVVV = VVVV + VVVV.transpose(1,0,3,2).conj()
VVVV = VVVV + VVVV.transpose(2,3,0,1)
eris.VVVV = VVVV
t2 = numpy.random.random((noccb,noccb,nvirb,nvirb))
t2 = t2 - t2.transpose(0,1,3,2)
t2 = t2 - t2.transpose(1,0,3,2)
myucc.max_memory, bak = 0, myucc.max_memory
vt2 = eris._contract_VVVV_t2(myucc, t2, eris.VVVV)
myucc.max_memory = bak
self.assertAlmostEqual(lib.finger(vt2), 47.903883794299404-50.501573400833429j, 11)
ref = lib.einsum('acbd,ijcd->ijab', eris.VVVV, t2)
self.assertAlmostEqual(abs(vt2 - ref).max(), 0, 11)
def test_update_amps1(self):
mf = scf.UHF(mol_s2)
numpy.random.seed(9)
nmo = mf_s2.mo_occ[0].size
mf.mo_coeff = numpy.random.random((2,nmo,nmo)) - 0.5
mf.mo_occ = numpy.zeros((2,nmo))
mf.mo_occ[0,:6] = 1
mf.mo_occ[1,:5] = 1
mycc = uccsd.UCCSD(mf)
nocca, noccb = 6, 5
nvira, nvirb = nmo-nocca, nmo-noccb
nvira_pair = nvira*(nvira+1)//2
nvirb_pair = nvirb*(nvirb+1)//2
eris = mycc.ao2mo()
fakeris = uccsd._ChemistsERIs()
fakeris.mo_coeff = eris.mo_coeff
fakeris.vvVV = eris.vvVV
fakeris.mol = mol_s2
t2ab = numpy.random.random((nocca,noccb,nvira,nvirb))
t1a = numpy.zeros((nocca,nvira))
t1b = numpy.zeros((noccb,nvirb))
self.assertAlmostEqual(lib.finger(mycc._add_vvVV(None, t2ab, fakeris)), 21.652482203108928, 9)
fakeris.vvVV = None
mycc.direct = True
mycc.max_memory = 0
self.assertAlmostEqual(lib.finger(mycc._add_vvVV(None, t2ab, fakeris)), 21.652482203108928, 9)
t1 = (numpy.random.random((nocca,nvira)), numpy.random.random((noccb,nvirb)))
t2 = (numpy.random.random((nocca,nocca,nvira,nvira)),
numpy.random.random((nocca,noccb,nvira,nvirb)),
numpy.random.random((noccb,noccb,nvirb,nvirb)))
t1, t2 = mycc.vector_to_amplitudes(mycc.amplitudes_to_vector(t1, t2))
t1, t2 = mycc.update_amps(t1, t2, eris)
self.assertAlmostEqual(lib.finger(t1[0]), 49.912690337392938, 10)
self.assertAlmostEqual(lib.finger(t1[1]), 74.596097348134776, 10)
self.assertAlmostEqual(lib.finger(t2[0]), -41.784696524955393, 10)
self.assertAlmostEqual(lib.finger(t2[1]), -9675.7677695314342, 7)
self.assertAlmostEqual(lib.finger(t2[2]), 270.75447826471577, 8)
self.assertAlmostEqual(lib.finger(mycc.amplitudes_to_vector(t1, t2)), 4341.9623137256776, 6)
def test_vector_to_amplitudes(self):
t1, t2 = myucc.vector_to_amplitudes(myucc.amplitudes_to_vector(myucc.t1, myucc.t2))
self.assertAlmostEqual(abs(t1[0]-myucc.t1[0]).max(), 0, 12)
self.assertAlmostEqual(abs(t1[1]-myucc.t1[1]).max(), 0, 12)
self.assertAlmostEqual(abs(t2[0]-myucc.t2[0]).max(), 0, 12)
self.assertAlmostEqual(abs(t2[1]-myucc.t2[1]).max(), 0, 12)
self.assertAlmostEqual(abs(t2[2]-myucc.t2[2]).max(), 0, 12)
def test_update_amps2(self): # compare to gccsd.update_amps
mol = mol_s2
mf = mf_s2
myucc = uccsd.UCCSD(mf)
nocca, noccb = 6,4
nmo = mol.nao_nr()
nvira,nvirb = nmo-nocca, nmo-noccb
numpy.random.seed(9)
t1 = [numpy.random.random((nocca,nvira))-.9,
numpy.random.random((noccb,nvirb))-.9]
t2 = [numpy.random.random((nocca,nocca,nvira,nvira))-.9,
numpy.random.random((nocca,noccb,nvira,nvirb))-.9,
numpy.random.random((noccb,noccb,nvirb,nvirb))-.9]
t2[0] = t2[0] - t2[0].transpose(1,0,2,3)
t2[0] = t2[0] - t2[0].transpose(0,1,3,2)
t2[2] = t2[2] - t2[2].transpose(1,0,2,3)
t2[2] = t2[2] - t2[2].transpose(0,1,3,2)
mo_a = mf.mo_coeff[0] + numpy.sin(mf.mo_coeff[0]) * .01j
mo_b = mf.mo_coeff[1] + numpy.sin(mf.mo_coeff[1]) * .01j
nao = mo_a.shape[0]
eri = ao2mo.restore(1, mf._eri, nao)
eri0aa = lib.einsum('pqrs,pi,qj,rk,sl->ijkl', eri, mo_a.conj(), mo_a, mo_a.conj(), mo_a)
eri0ab = lib.einsum('pqrs,pi,qj,rk,sl->ijkl', eri, mo_a.conj(), mo_a, mo_b.conj(), mo_b)
eri0bb = lib.einsum('pqrs,pi,qj,rk,sl->ijkl', eri, mo_b.conj(), mo_b, mo_b.conj(), mo_b)
eri0ba = eri0ab.transpose(2,3,0,1)
nvira = nao - nocca
nvirb = nao - noccb
eris = uccsd._ChemistsERIs(mol)
eris.oooo = eri0aa[:nocca,:nocca,:nocca,:nocca].copy()
eris.ovoo = eri0aa[:nocca,nocca:,:nocca,:nocca].copy()
eris.oovv = eri0aa[:nocca,:nocca,nocca:,nocca:].copy()
eris.ovvo = eri0aa[:nocca,nocca:,nocca:,:nocca].copy()
eris.ovov = eri0aa[:nocca,nocca:,:nocca,nocca:].copy()
eris.ovvv = eri0aa[:nocca,nocca:,nocca:,nocca:].copy()
eris.vvvv = eri0aa[nocca:,nocca:,nocca:,nocca:].copy()
eris.OOOO = eri0bb[:noccb,:noccb,:noccb,:noccb].copy()
eris.OVOO = eri0bb[:noccb,noccb:,:noccb,:noccb].copy()
eris.OOVV = eri0bb[:noccb,:noccb,noccb:,noccb:].copy()
eris.OVVO = eri0bb[:noccb,noccb:,noccb:,:noccb].copy()
eris.OVOV = eri0bb[:noccb,noccb:,:noccb,noccb:].copy()
eris.OVVV = eri0bb[:noccb,noccb:,noccb:,noccb:].copy()
eris.VVVV = eri0bb[noccb:,noccb:,noccb:,noccb:].copy()
eris.ooOO = eri0ab[:nocca,:nocca,:noccb,:noccb].copy()
eris.ovOO = eri0ab[:nocca,nocca:,:noccb,:noccb].copy()
eris.ooVV = eri0ab[:nocca,:nocca,noccb:,noccb:].copy()
eris.ovVO = eri0ab[:nocca,nocca:,noccb:,:noccb].copy()
eris.ovOV = eri0ab[:nocca,nocca:,:noccb,noccb:].copy()
eris.ovVV = eri0ab[:nocca,nocca:,noccb:,noccb:].copy()
eris.vvVV = eri0ab[nocca:,nocca:,noccb:,noccb:].copy()
eris.OOoo = eri0ba[:noccb,:noccb,:nocca,:nocca].copy()
eris.OVoo = eri0ba[:noccb,noccb:,:nocca,:nocca].copy()
eris.OOvv = eri0ba[:noccb,:noccb,nocca:,nocca:].copy()
eris.OVvo = eri0ba[:noccb,noccb:,nocca:,:nocca].copy()
eris.OVov = eri0ba[:noccb,noccb:,:nocca,nocca:].copy()
eris.OVvv = eri0ba[:noccb,noccb:,nocca:,nocca:].copy()
eris.VVvv = eri0ba[noccb:,noccb:,nocca:,nocca:].copy()
eris.focka = numpy.diag(mf.mo_energy[0])
eris.fockb = numpy.diag(mf.mo_energy[1])
t1[0] = t1[0] + numpy.sin(t1[0]) * .05j
t1[1] = t1[1] + numpy.sin(t1[1]) * .05j
t2[0] = t2[0] + numpy.sin(t2[0]) * .05j
t2[1] = t2[1] + numpy.sin(t2[1]) * .05j
t2[2] = t2[2] + numpy.sin(t2[2]) * .05j
t1new_ref, t2new_ref = uccsd.update_amps(myucc, t1, t2, eris)
nocc = nocca + noccb
orbspin = numpy.zeros(nao*2, dtype=int)
orbspin[1::2] = 1
orbspin[nocc-1] = 0
orbspin[nocc ] = 1
eri1 = numpy.zeros([nao*2]*4, dtype=numpy.complex)
idxa = numpy.where(orbspin == 0)[0]
idxb = numpy.where(orbspin == 1)[0]
eri1[idxa[:,None,None,None],idxa[:,None,None],idxa[:,None],idxa] = eri0aa
eri1[idxa[:,None,None,None],idxa[:,None,None],idxb[:,None],idxb] = eri0ab
eri1[idxb[:,None,None,None],idxb[:,None,None],idxa[:,None],idxa] = eri0ba
eri1[idxb[:,None,None,None],idxb[:,None,None],idxb[:,None],idxb] = eri0bb
eri1 = eri1.transpose(0,2,1,3) - eri1.transpose(0,2,3,1)
erig = gccsd._PhysicistsERIs()
erig.oooo = eri1[:nocc,:nocc,:nocc,:nocc].copy()
erig.ooov = eri1[:nocc,:nocc,:nocc,nocc:].copy()
erig.ovov = eri1[:nocc,nocc:,:nocc,nocc:].copy()
erig.ovvo = eri1[:nocc,nocc:,nocc:,:nocc].copy()
erig.oovv = eri1[:nocc,:nocc,nocc:,nocc:].copy()
erig.ovvv = eri1[:nocc,nocc:,nocc:,nocc:].copy()
erig.vvvv = eri1[nocc:,nocc:,nocc:,nocc:].copy()
mo_e = numpy.empty(nao*2)
mo_e[orbspin==0] = mf.mo_energy[0]
mo_e[orbspin==1] = mf.mo_energy[1]
erig.fock = numpy.diag(mo_e)
myccg = gccsd.GCCSD(scf.addons.convert_to_ghf(mf))
t1 = myccg.spatial2spin(t1, orbspin)
t2 = myccg.spatial2spin(t2, orbspin)
t1new, t2new = gccsd.update_amps(myccg, t1, t2, erig)
t1new = myccg.spin2spatial(t1new, orbspin)
t2new = myccg.spin2spatial(t2new, orbspin)
self.assertAlmostEqual(abs(t1new[0] - t1new_ref[0]).max(), 0, 12)
self.assertAlmostEqual(abs(t1new[1] - t1new_ref[1]).max(), 0, 12)
self.assertAlmostEqual(abs(t2new[0] - t2new_ref[0]).max(), 0, 12)
self.assertAlmostEqual(abs(t2new[1] - t2new_ref[1]).max(), 0, 12)
self.assertAlmostEqual(abs(t2new[2] - t2new_ref[2]).max(), 0, 12)
def test_mbpt2(self):
myucc = uccsd.UCCSD(mf)
e = myucc.kernel(mbpt2=True)[0]
self.assertAlmostEqual(e, -0.12886859466216125, 10)
emp2 = mp.MP2(mf).kernel()[0]
self.assertAlmostEqual(e, emp2, 10)
myucc = uccsd.UCCSD(mf_s2)
e = myucc.kernel(mbpt2=True)[0]
self.assertAlmostEqual(e, -0.096257842171487293, 10)
emp2 = mp.MP2(mf_s2).kernel()[0]
self.assertAlmostEqual(e, emp2, 10)
def test_uintermediats(self):
from pyscf.cc import uintermediates
self.assertTrue(eris.get_ovvv().ndim == 4)
self.assertTrue(eris.get_ovVV().ndim == 4)
self.assertTrue(eris.get_OVvv().ndim == 4)
self.assertTrue(eris.get_OVVV().ndim == 4)
self.assertTrue(eris.get_ovvv(slice(None), slice(2,4)).ndim == 4)
self.assertTrue(eris.get_ovVV(slice(None), slice(2,4)).ndim == 4)
self.assertTrue(eris.get_OVvv(slice(None), slice(2,4)).ndim == 4)
self.assertTrue(eris.get_OVVV(slice(None), slice(2,4)).ndim == 4)
self.assertTrue(uintermediates._get_vvvv(eris).ndim == 4)
self.assertTrue(uintermediates._get_vvVV(eris).ndim == 4)
self.assertTrue(uintermediates._get_VVVV(eris).ndim == 4)
def test_add_vvvv(self):
myucc = uccsd.UCCSD(mf_s2)
nocca, noccb = 6,4
nmo = mf_s2.mo_occ[0].size
nvira, nvirb = nmo-nocca, nmo-noccb
numpy.random.seed(9)
t1 = [numpy.zeros((nocca,nvira)),
numpy.zeros((noccb,nvirb))]
t2 = [numpy.random.random((nocca,nocca,nvira,nvira))-.9,
numpy.random.random((nocca,noccb,nvira,nvirb))-.9,
numpy.random.random((noccb,noccb,nvirb,nvirb))-.9]
t2[0] = t2[0] - t2[0].transpose(1,0,2,3)
t2[0] = t2[0] - t2[0].transpose(0,1,3,2)
t2[2] = t2[2] - t2[2].transpose(1,0,2,3)
t2[2] = t2[2] - t2[2].transpose(0,1,3,2)
eris1 = copy.copy(eris)
idxa = lib.square_mat_in_trilu_indices(nvira)
idxb = lib.square_mat_in_trilu_indices(nvirb)
ref =(lib.einsum('acbd,ijcd->ijab', eris1.vvvv[:,idxa][idxa], t2[0]),
lib.einsum('acbd,ijcd->ijab', eris1.vvVV[:,idxb][idxa], t2[1]),
lib.einsum('acbd,ijcd->ijab', eris1.VVVV[:,idxb][idxb], t2[2]))
t2a = myucc._add_vvvv((t1[0]*0,t1[1]*0), t2, eris, t2sym=False)
self.assertAlmostEqual(abs(ref[0]-t2a[0]).max(), 0, 12)
self.assertAlmostEqual(abs(ref[1]-t2a[1]).max(), 0, 12)
self.assertAlmostEqual(abs(ref[2]-t2a[2]).max(), 0, 12)
myucc.direct = True
eris1.vvvv = None # == with_ovvv=True in the call below
eris1.VVVV = None
eris1.vvVV = None
t1 = None
myucc.mo_coeff, eris1.mo_coeff = eris1.mo_coeff, None
t2b = myucc._add_vvvv(t1, t2, eris1)
self.assertAlmostEqual(abs(ref[0]-t2b[0]).max(), 0, 12)
self.assertAlmostEqual(abs(ref[1]-t2b[1]).max(), 0, 12)
self.assertAlmostEqual(abs(ref[2]-t2b[2]).max(), 0, 12)
def test_add_vvVV(self):
myucc = uccsd.UCCSD(mf_s2)
nocca, noccb = 6,4
nmo = mf_s2.mo_occ[0].size
nvira, nvirb = nmo-nocca, nmo-noccb
numpy.random.seed(9)
t1 = [numpy.zeros((nocca,nvira)),
| numpy.zeros((noccb,nvirb)) | numpy.zeros |
import numpy as np
import os
import re
import requests
import sys
import time
from netCDF4 import Dataset
import pandas as pd
from bs4 import BeautifulSoup
from tqdm import tqdm
# setup constants used to access the data from the different M2M interfaces
BASE_URL = 'https://ooinet.oceanobservatories.org/api/m2m/' # base M2M URL
SENSOR_URL = '12576/sensor/inv/' # Sensor Information
# setup access credentials
AUTH = ['OOIAPI-853A3LA6QI3L62', '<KEY>']
def M2M_Call(uframe_dataset_name, start_date, end_date):
options = '?beginDT=' + start_date + '&endDT=' + end_date + '&format=application/netcdf'
r = requests.get(BASE_URL + SENSOR_URL + uframe_dataset_name + options, auth=(AUTH[0], AUTH[1]))
if r.status_code == requests.codes.ok:
data = r.json()
else:
return None
# wait until the request is completed
print('Waiting for OOINet to process and prepare data request, this may take up to 20 minutes')
url = [url for url in data['allURLs'] if re.match(r'.*async_results.*', url)][0]
check_complete = url + '/status.txt'
with tqdm(total=400, desc='Waiting') as bar:
for i in range(400):
r = requests.get(check_complete)
bar.update(1)
if r.status_code == requests.codes.ok:
bar.n = 400
bar.last_print_n = 400
bar.refresh()
print('\nrequest completed in %f minutes.' % elapsed)
break
else:
time.sleep(3)
elapsed = (i * 3) / 60
return data
def M2M_Files(data, tag=''):
"""
Use a regex tag combined with the results of the M2M data request to collect the data from the THREDDS catalog.
Collected data is gathered into an xarray dataset for further processing.
:param data: JSON object returned from M2M data request with details on where the data is to be found for download
:param tag: regex tag to use in discriminating the data files, so we only collect the correct ones
:return: the collected data as an xarray dataset
"""
# Create a list of the files from the request above using a simple regex as a tag to discriminate the files
url = [url for url in data['allURLs'] if re.match(r'.*thredds.*', url)][0]
files = list_files(url, tag)
return files
def list_files(url, tag=''):
"""
Function to create a list of the NetCDF data files in the THREDDS catalog created by a request to the M2M system.
:param url: URL to user's THREDDS catalog specific to a data request
:param tag: regex pattern used to distinguish files of interest
:return: list of files in the catalog with the URL path set relative to the catalog
"""
page = requests.get(url).text
soup = BeautifulSoup(page, 'html.parser')
pattern = re.compile(tag)
return [node.get('href') for node in soup.find_all('a', text=pattern)]
def M2M_Data(nclist,variables):
thredds = 'https://opendap.oceanobservatories.org/thredds/dodsC/ooi/'
#nclist is going to contain more than one url eventually
for jj in range(len(nclist)):
url=nclist[jj]
url=url[25:]
dap_url = thredds + url + '#fillmismatch'
openFile = Dataset(dap_url,'r')
for ii in range(len(variables)):
dum = openFile.variables[variables[ii].name]
variables[ii].data = np.append(variables[ii].data, dum[:].data)
tmp = variables[0].data/60/60/24
time_converted = pd.to_datetime(tmp, unit='D', origin=pd.Timestamp('1900-01-01'))
return variables, time_converted
class var(object):
def __init__(self):
"""A Class that generically holds data with a variable name
and the units as attributes"""
self.name = ''
self.data = np.array([])
self.units = ''
def __repr__(self):
return_str = "name: " + self.name + '\n'
return_str += "units: " + self.units + '\n'
return_str += "data: size: " + str(self.data.shape)
return return_str
class structtype(object):
def __init__(self):
""" A class that imitates a Matlab structure type
"""
self._data = []
def __getitem__(self, index):
"""implement index behavior in the struct"""
if index == len(self._data):
self._data.append(var())
return self._data[index]
def __len__(self):
return len(self._data)
def M2M_URLs(platform_name,node,instrument_class,method):
var_list = structtype()
#MOPAK
if platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/telemetered/fdchp_a_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#ZPLSC
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#VEL3DK
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#PARAD
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
##
#MOPAK
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_host/fdchp_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
#uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = | np.array([]) | numpy.array |
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 7 13:43:01 2016
@author: fergal
A series of metrics to quantify the noise in a lightcurve:
Includes:
x sgCdpp
x Marshall's noise estimate
o An FT based estimate of 6 hour artifact strength.
o A per thruster firing estimate of 6 hour artifact strength.
$Id$
$URL$
"""
__version__ = "$Id$"
__URL__ = "$URL$"
from scipy.signal import savgol_filter
import matplotlib.pyplot as mp
import numpy as np
import fft
keplerLongCadence_s = 1765.4679
keplerLongCadence_days = keplerLongCadence_s / float(86400)
def computeRollTweakAmplitude(y, nHarmonics = 3, tweakPeriod_days = .25, \
expTime_days=None, plot=False):
"""Compute strength of roll tweak artifact in K2 data with an FT approach.
Compute FT of lightcurve
Optional Inputs:
-----------------
plot
Show a diagnostic plot
Returns:
--------
float indicating strength of correction. A value of 1 means the
amplitude of the tweak is approx equal to the strength of all other
signals in the FT.
"""
if expTime_days is None:
expTime_days = keplerLongCadence_days
#computes FT with frequencies in cycles per days
ft = fft.computeFft(y, expTime_days)
#Thruster firings every 6 hours
artifactFreq_cd = 1/tweakPeriod_days #cycles per day
if plot:
mp.clf()
mp.plot(ft[:,0], 1e6*ft[:,1], 'b-')
metric = 0
nPtsForMed = 50
for i in range(1, nHarmonics+1):
wh = np.argmin( np.fabs(ft[:,0] - i*artifactFreq_cd))
med = np.median(ft[wh-nPtsForMed:wh+nPtsForMed, 1])
metric += ft[wh, 1] / med
if plot:
mp.axvline(i*artifactFreq_cd, color='m')
return metric / float(nHarmonics)
def computeSgCdpp_ppm(y, transitDuration_cadences=13, plot=False):
"""Estimates 6hr CDPP using <NAME> Cleve's Savitzy-Golay technique
An interesting estimate of the noise in a lightcurve is the scatter
after all long term trends have been removed. This is the kernel of
the idea behind the Combined Differential Photometric Precision (CDPP)
metric used in classic Kepler. <NAME> devised a much simpler
algorithm for computing CDPP using a Savitzy-Golay detrending, which
he called Savitzy-Golay CDPP, or SG-CDPP. We implement his algorithm
here.
Inputs:
----------
y
(1d numpy array) normalised flux to calculate noise from. Flux
should have a mean of zero and be in units of fractional amplitude.
Note: Bad data in input will skew result. Some filtering of
outliers is performed, but Nan's or Infs will not be caught.
Optional Inputs:
-----------------
transitDuration_cadences
(int) Adjust the assumed transit width, in cadences. Default is
13, which corresponds to a 6.5 hour transit in K2
plot
Show a diagnostic plot
Returns:
------------
Estimated noise in parts per million.
Notes:
-------------
Taken from
svn+ssh://murzim/repo/so/trunk/Develop/jvc/common/compute_SG_noise.m
by <NAME>
"""
#These 3 values were chosen for the original algorithm, and we don't
#change them here.
window = 101
polyorder=2
noiseNorm = 1.40
#Name change for consistency with original algorithm
cadencesPerTransit = transitDuration_cadences
if cadencesPerTransit < 4:
raise ValueError("Cadences per transit must be >= 4")
if len(y) < window:
raise ValueError("Can't compute CDPP for timeseries with fewer points than defined window (%i points)" %(window))
trend = savgol_filter(y, window_length=window, polyorder=polyorder)
detrend = y-trend
filtered = np.ones(cadencesPerTransit)/float(cadencesPerTransit)
smoothed = np.convolve(detrend, filtered, mode='same')
if plot:
mp.clf()
mp.plot(y, 'ko')
mp.plot(trend, 'r-')
mp.plot(smoothed, 'g.')
sgCdpp_ppm = noiseNorm*robustStd(smoothed, 1)*1e6
return sgCdpp_ppm
def estimateScatterWithMarshallMethod(flux, plot=False):
"""Estimate the typical scatter in a lightcurve.
Uses the same method as Marshall (Mullally et al 2016 submitted)
Inputs:
----------
flux
(np 1d array). Flux to measure scatter of. Need not have
zero mean.
Optional Inputs:
-----------------
plot
Show a diagnostic plot
Returns:
------------
(float) scatter of data in the same units as in the input ``flux``
Notes:
----------
Algorithm is reasonably sensitive to outliers. For best results
uses outlier rejection on your lightcurve before computing scatter.
Nan's and infs in lightcurve will propegate to the return value.
"""
diff= np.diff(flux)
#Remove egregious outliers. Shouldn't make much difference
idx = sigmaClip(diff, 5)
diff = diff[~idx]
mean = np.mean(diff)
mad = np.median(np.fabs(diff-mean))
std = 1.4826*mad
if plot:
mp.clf()
mp.plot(flux, 'ko')
mp.plot(diff, 'r.')
mp.figure(2)
mp.clf()
bins = np.linspace(-3000, 3000, 61)
mp.hist(1e6*diff, bins=bins, ec="none")
mp.xlim(-3000, 3000)
mp.axvline(-1e6*float(std/np.sqrt(2)), color='r')
mp.axvline(1e6*float(std/np.sqrt(2)), color='r')
#std is the rms of the diff. std on single point
#is 1/sqrt(2) of that value,
return float(std/np.sqrt(2))
def singlePointDifferenceSigmaClip(a, nSigma=4, maxIter=1e4, initialClip=None):
"""Iteratively find and remove outliers in first derivative
If a dataset can be modeled as a constant offset + noise + outliers,
those outliers can be found and rejected with a sigma-clipping approach.
If the data contains some time-varying signal, this signal must be removed
before applying a sigma clip. This function removes the signal by applying
a single point difference.
The function computes a[i+1] - a[i], and sigma clips the result. Slowly
varying trends will have single point differences that are dominated by noise,
but outliers have strong first derivatives and will show up strongly in this
metric.
Inputs:
----------
y
(1d numpy array) Array to be cleaned
nSigma
(float) Threshold to cut at. 5 is typically a good value for
most arrays found in practice.
Optional Inputs:
-------------------
maxIter
(int) Maximum number of iterations
initialClip
(1d boolean array) If an element of initialClip is set to True,
that value is treated as a bad value in the first iteration, and
not included in the computation of the mean and std.
Returns:
------------
1d numpy array. Where set to True, the corresponding element of y
is an outlier.
"""
#Scatter in single point difference is root 2 time larger
#than in initial lightcurve
threshold = nSigma/np.sqrt(2)
diff1 = np.roll(a, -1) - a
diff1[-1] = 0 #Don't trust the last value because a[-1] not necessarily equal to a
idx1 = sigmaClip(diff1, nSigma, maxIter, initialClip)
diff2 = np.roll(a, 1) - a
diff2[0] = 0
idx2 = sigmaClip(diff2, nSigma, maxIter, initialClip)
flags = idx1 & idx2
#This bit of magic ensures only single point outliers are marked,
#not strong trends in the data. It insists that the previous point
#in difference time series is an outlier in the opposite direction, otherwise
#the point is considered unflagged. This prevents marking transits as bad data.
outlierIdx = flags
outlierIdx &= np.roll(idx1, 1)
outlierIdx &= (np.roll(diff1, 1) * diff1 < 0)
return outlierIdx
def sigmaClip(y, nSigma, maxIter=1e4, initialClip=None):
"""Iteratively find and remove outliers
Find outliers by identifiny all points more than **nSigma** from
the mean value. The recalculate the mean and std and repeat until
no more outliers found.
Inputs:
----------
y
(1d numpy array) Array to be cleaned
nSigma
(float) Threshold to cut at. 5 is typically a good value for
most arrays found in practice.
Optional Inputs:
-------------------
maxIter
(int) Maximum number of iterations
initialClip
(1d boolean array) If an element of initialClip is set to True,
that value is treated as a bad value in the first iteration, and
not included in the computation of the mean and std.
Returns:
------------
1d numpy array. Where set to True, the corresponding element of y
is an outlier.
"""
#import matplotlib.pyplot as mp
idx = initialClip
if initialClip is None:
idx = np.zeros( len(y), dtype=bool)
assert(len(idx) == len(y))
#x = np.arange(len(y))
#mp.plot(x, y, 'k.')
oldNumClipped = np.sum(idx)
for i in range(int(maxIter)):
mean = np.nanmean(y[~idx])
std = np.nanstd(y[~idx])
newIdx = np.fabs(y-mean) > nSigma*std
newIdx = | np.logical_or(idx, newIdx) | numpy.logical_or |
#!/usr/bin/env python
"""
This file is part of IMSIS
Licensed under the MIT license:
http://www.opensource.org/licenses/MIT-license
This module contains image processing methods
"""
import os
import sys
import cv2 as cv
import matplotlib.gridspec as gridspec
import numpy as np
import scipy.misc
from matplotlib import pyplot as plt
import numpy.random as random
from matplotlib.colors import hsv_to_rgb
from datetime import datetime
class Image(object):
@staticmethod
def load(filename, verbose=True):
"""Load image
Supported file formats: PNG, TIF, BMP
note: by default images are converted to grayscale (8bit gray), conversion to 8 bit can be disabled.
:Parameters: filename, gray=True, verbose=False
:Returns: image
"""
img = None
if (os.path.isfile(filename)):
img = cv.imread(filename, -1)
if (verbose == True):
print("load file ", filename, img.shape, img.dtype)
else:
print('Error, file does not exist. ', filename)
sys.exit()
try:
q = img.shape
except:
print('Error, File could not be read. ', filename)
sys.exit()
return img
@staticmethod
def crop_rectangle(img, rect):
"""Crop an image using rectangle shape as input [(x0,y0),(x1,y1)]
:Parameters: image, rectangle
:Returns: image
"""
if len(rect) > 0:
out = Image.crop(img, rect[0][0], rect[0][1], rect[1][0], rect[1][1])
else:
print("Error: rectangle not defined.")
out = img
return out
@staticmethod
def crop(img, x0, y0, x1, y1):
"""Crop an image using pixels at x0,y0,x1,y1
:Parameters: image, x0, y0, x1, y1
:Returns: image
"""
res = img[y0:y1, x0:x1] # Crop from y0:y1,x0:x1
# print("Cropped region: (" , x0,y0,x1,y1,")")
return res
@staticmethod
def crop_percentage(img, scale=1.0):
"""Crop an image centered
:Parameters: image, scale=1.0
:Returns: image
"""
center_x, center_y = img.shape[1] / 2, img.shape[0] / 2
width_scaled, height_scaled = img.shape[1] * scale, img.shape[0] * scale
left_x, right_x = center_x - width_scaled / 2, center_x + width_scaled / 2
top_y, bottom_y = center_y - height_scaled / 2, center_y + height_scaled / 2
img_cropped = img[int(top_y):int(bottom_y), int(left_x):int(right_x)]
return img_cropped
@staticmethod
def resize(img, factor=0.5):
"""Resize image
:Parameters: image, factor
:Returns: image
"""
small = cv.resize(img, (0, 0), fx=factor, fy=factor)
return small
@staticmethod
def _blur_edge(img, d=31):
"""blur edge
:Parameters: image, d
:Returns: image
"""
h, w = img.shape[:2]
img_pad = cv.copyMakeBorder(img, d, d, d, d, cv.BORDER_WRAP)
img_blur = cv.GaussianBlur(img_pad, (2 * d + 1, 2 * d + 1), -1)[d:-d, d:-d]
y, x = np.indices((h, w))
dist = np.dstack([x, w - x - 1, y, h - y - 1]).min(-1)
w = np.minimum(np.float32(dist) / d, 1.0)
return img * w + img_blur * (1 - w)
@staticmethod
def _motion_kernel(angle, d, sz=65):
"""determine motion kernel value
:Parameters: angle, d, size
:Returns: kernel
"""
kern = np.ones((1, d), np.float32)
c, s = np.cos(angle), np.sin(angle)
A = np.float32([[c, -s, 0], [s, c, 0]])
sz2 = sz // 2
A[:, 2] = (sz2, sz2) - np.dot(A[:, :2], ((d - 1) * 0.5, 0))
kern = cv.warpAffine(kern, A, (sz, sz), flags=cv2.INTER_CUBIC)
return kern
@staticmethod
def _defocus_kernel(d, sz=65):
"""determine defocus kernel value
:Parameters: d, size
:Returns: kernel
"""
kern = np.zeros((sz, sz), np.uint8)
cv.circle(kern, (sz, sz), d, 255, -1, cv.LINE_AA, shift=1)
kern = np.float32(kern) / 255.0
return kern
@staticmethod
def _image_stats(image):
# compute the mean and standard deviation of each channel
(l, a, b) = cv.split(image)
(lMean, lStd) = (l.mean(), l.std())
(aMean, aStd) = (a.mean(), a.std())
(bMean, bStd) = (b.mean(), b.std())
# return the color statistics
return (lMean, lStd, aMean, aStd, bMean, bStd)
@staticmethod
def save(img, fn):
"""Save image (PNG,TIF)
:Parameters: image, filename
"""
try:
if (os.path.dirname(fn)):
os.makedirs(os.path.dirname(fn), exist_ok=True) #mkdir if not empty
cv.imwrite(fn, img)
print("file saved. ", fn)
except:
print("Error: cannot save file {}".format(fn))
@staticmethod
def save_withuniquetimestamp(img):
"""Save PNG image with unique timestamp.
:Parameters: image
"""
path = "./output/"
os.makedirs(os.path.dirname(path), exist_ok=True)
sttime = datetime.now().strftime('Image_%Y%m%d%H%M%S')
fn = path + sttime + '.png'
print("file saved. ", fn)
cv.imwrite(fn, img)
'''
@staticmethod
def PSNR(img1, img2):
"""Return peaksignal to noise ratio
:Parameters: image1, image2
:Returns: float
"""
mse = np.mean((img1 - img2) ** 2)
if mse == 0:
return 100
PIXEL_MAX = 255.0
# print(np.sqrt(mse))
n = np.sqrt(mse)
# n=255/3.525
return 20 * np.log10(PIXEL_MAX / n)
'''
# implemented twice remove the 2nd one
@staticmethod
def cut(img, center=[0, 0], size=[0, 0]):
"""return a image cut out
:Parameters: image, center=[0, 0], size=[0, 0]
:Returns: image
"""
x0 = center[0] - round(size[0] * 0.5)
x1 = center[0] + round(size[0] * 0.5)
y0 = center[1] - round(size[1] * 0.5)
y1 = center[1] + round(size[1] * 0.5)
if x0 < 0:
x0 = 0
if y0 < 0:
y0 = 0
template = Image.crop(img, int(x0), int(y0), int(x1), int(y1))
return template
@staticmethod
def _multipleof2(number):
"""Rounds the given number to the nearest multiple of two."""
remainder = number % 2
if remainder > 1:
number += (2 - remainder)
else:
number -= remainder
return int(number)
@staticmethod
def subtract(img0, img1):
"""subtract 2 images
:Parameters: image1, image2
:Returns: image
"""
out = cv.subtract(img0, img1)
return out
'''
@staticmethod
def add(img0, img1):
"""add 2 images
:Parameters: image1, image2
:Returns: image
"""
out = cv.addWeighted(img0, 0.5, img1, 0.5, 0.0)
return out
'''
@staticmethod
def add(img0, img1, alpha=0.5):
"""add 2 images weighted (default alpha=0.5)
:Parameters: image1, image2, alpha
:Returns: image
"""
a = img0
b = img1
beta = 1 - alpha
out = cv.addWeighted(a, alpha, b, beta, gamma)
return out
@staticmethod
def new(height, width):
"""Create a new blank image
:Parameters: height,width
:Returns: image
"""
img = np.zeros((height, width), np.uint8)
return img
@staticmethod
def gaussiankernel(kernlen=21, nsig=3):
"""returns a 2D gaussian kernel
:Parameters: kernelsize, nsig
:Returns: image
"""
x = np.linspace(-nsig, nsig, kernlen + 1)
kern1d = np.diff(st.norm.cdf(x))
kern2d = np.outer(kern1d, kern1d)
return kern2d / kern2d.sum()
@staticmethod
def info(img):
"""get image properties
:Parameters: img
"""
print(img.shape)
print(img.size)
print(img.dtype)
@staticmethod
def unique_colours(image):
"""get number of unique colors in an image
:Parameters: img
"""
print(image.shape)
if (len(image.shape) == 3):
out = len(np.unique(image.reshape(-1, image.shape[2]), axis=0))
# b, g, r = cv.split(image)
# out_in_32U_2D = np.int32(b) << 16 + np.int32(g) << 8 + np.int32(r) # bit wise shift 8 for each channel.
# out_in_32U_1D = out_in_32U_2D.reshape(-1) # convert to 1D
# np.unique(out_in_32U_1D)
# out = len(np.unique(out_in_32U_1D))
else:
out_in_32U_2D = np.int32(image) # bit wise shift 8 for each channel.
out_in_32U_1D = out_in_32U_2D.reshape(-1) # convert to 1D
np.unique(out_in_32U_1D)
out = len(np.unique(out_in_32U_1D))
print(out)
return out
@staticmethod
def video_to_imagesondisk(file_in='video.avi', path_out='images'):
"""video to image
:Parameters: video_filename
:Returns: images
"""
video_file = file_in
output_folder = path_out
vidcap = cv.VideoCapture(video_file)
success, image = vidcap.read()
count = 0
success = True
while success:
fn = output_folder + "/" + "frame%d.png" % count
cv.imwrite(fn, image) # save frame as JPEG file
success, image = vidcap.read()
print('Read a new frame: ', success, fn)
count += 1
print("ready.")
@staticmethod
def imagesfromdisk_to_video(path_in, file_out='video.avi', framerate=15):
"""images from file to video
:Parameters: path with list of frames
:Returns: video
"""
image_folder = path_in
video_name = file_out
output_folder = "output"
fn = image_folder + "/" + output_folder + "/"
print(fn)
os.makedirs(os.path.dirname(fn), exist_ok=True)
images = [img for img in os.listdir(image_folder) if (img.endswith(".tif") or img.endswith(".png"))]
frame = cv.imread(os.path.join(image_folder, images[0]))
height, width, layers = frame.shape
video = cv.VideoWriter(fn + video_name, 0, framerate, (width, height))
for image in images:
video.write(cv.imread(os.path.join(image_folder, image)))
cv.destroyAllWindows()
video.release()
'''
@staticmethod
def zoom(image0, factor=2):
"""
zoom image, resize with factor n, crop in center to same size as original image
:Parameters: image0, zoom factor
:Returns: image
"""
h = image0.shape[0]
w = image0.shape[1]
img = Image.resize(image0,factor)
x0 = int(factor*w/4)
y0 = int(factor*h/4)
x1 = x0+w
y1 = y0+h
print(x0,y0,x1,y1,w,h,img.shape[0],img.shape[1])
img = Image.crop(img,x0,y0,x1,y1)
return img
'''
@staticmethod
def zoom(image0, factor=2, cx=0.5, cy=0.5):
"""
zoom image, resize with factor n, crop in center to same size as original image
:Parameters: image0, zoom factor
:Returns: image
"""
h = image0.shape[0]
w = image0.shape[1]
img = Image.resize(image0, factor)
x0 = int(factor * w * cx * 0.5)
y0 = int(factor * h * cy * 0.5)
x1 = x0 + w
y1 = y0 + h
# print(x0, y0, x1, y1, w, h, img.shape[0], img.shape[1])
img = Image.crop(img, x0, y0, x1, y1)
return img
class Process:
@staticmethod
def directionalsharpness(img, ksize=-1):
"""
DirectionalSharpness
Measure sharnpess in X and Y seperately
Note: Negative slopes are missed when converting to unaryint8, therefore convert to float
:Parameters: image, kernel
:Returns: gradientx , gradienty, gradientxy, theta
"""
sobelx64f = cv.Sobel(img, cv.CV_64F, 1, 0, ksize=ksize)
sobely64f = cv.Sobel(img, cv.CV_64F, 0, 1, ksize=ksize)
grad = np.power(np.power(sobelx64f, 2.0) + np.power(sobely64f, 2.0), 0.5)
theta = np.arctan2(sobely64f, sobelx64f)
Gx = np.absolute(sobelx64f)
Gy = np.absolute(sobely64f)
mx = cv.mean(Gx)[0]
my = cv.mean(Gy)[0]
return mx, my, grad, theta
@staticmethod
def gradient_image(img, kx=11, ky=3):
"""Create a gradient image
Method used: gradient by bi-directional sobel filter
:Parameters: image, blurkernelx, blurkernely
:Returns: image
"""
# Calculate gradient
gx = cv.Sobel(img, cv.CV_32F, 1, 0, ksize=1)
gy = cv.Sobel(img, cv.CV_32F, 0, 1, ksize=1)
# mag, angle = cv.cartToPolar(gx, gy, angleInDegrees=True)
blurredgx = cv.GaussianBlur(gx, (kx, ky), 1)
blurredgy = cv.GaussianBlur(gy, (kx, ky), 1)
mag, angle = cv.cartToPolar(blurredgx, blurredgy)
return mag, angle
@staticmethod
def gradient_image_nonmaxsuppressed(img, blur=5, threshold=40):
"""Apply non maximum suppressed gradient filter sequence
threshold not used??
:Parameters: image, blur=5, threshold=40
:Returns: image, angle
"""
def nonmaxsuppression(im, grad):
# Non-maximum suppression
gradSup = grad.copy()
for r in range(im.shape[0]):
for c in range(im.shape[1]):
# Suppress pixels at the image edge
if r == 0 or r == im.shape[0] - 1 or c == 0 or c == im.shape[1] - 1:
gradSup[r, c] = 0
continue
tq = thetaQ[r, c] % 4
if tq == 0: # 0 is E-W (horizontal)
if grad[r, c] <= grad[r, c - 1] or grad[r, c] <= grad[r, c + 1]:
gradSup[r, c] = 0
if tq == 1: # 1 is NE-SW
if grad[r, c] <= grad[r - 1, c + 1] or grad[r, c] <= grad[r + 1, c - 1]:
gradSup[r, c] = 0
if tq == 2: # 2 is N-S (vertical)
if grad[r, c] <= grad[r - 1, c] or grad[r, c] <= grad[r + 1, c]:
gradSup[r, c] = 0
if tq == 3: # 3 is NW-SE
if grad[r, c] <= grad[r - 1, c - 1] or grad[r, c] <= grad[r + 1, c + 1]:
gradSup[r, c] = 0
return gradSup
img = Image.Convert.toGray(img)
im = np.array(img, dtype=float) # Convert to float to prevent clipping values
# Gaussian Blur
im2 = cv.GaussianBlur(im, (blur, blur), 0)
# Find gradients
im3h = cv.filter2D(im2, -1, np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]]))
im3v = cv.filter2D(im2, -1, np.array([[1, 2, 1], [0, 0, 0], [-1, -2, -1]]))
# Get gradient and direction
grad = np.power(np.power(im3h, 2.0) + np.power(im3v, 2.0), 0.5)
theta = np.arctan2(im3v, im3h)
thetaQ = (np.round(theta * (5.0 / np.pi)) + 5) % 5 # Quantize direction
gradSup = nonmaxsuppression(im, grad)
return gradSup, thetaQ
@staticmethod
def nonlocalmeans(img, h=10, templatewindowsize=7, searchwindowsize=21):
"""Apply a non-local-means filter with filtering strength (h), template windowsize (blocksize), searchwindowsize
:Parameters: image, h=10, templatewindowsize=7, searchwindowsize=21
:Returns: image
"""
# img = cv.pyrDown(img)
dst = cv.fastNlMeansDenoising(img, None, h, templatewindowsize, searchwindowsize)
return dst
@staticmethod
def deconvolution_wiener(img, d=3, noise=11):
"""Apply Wiener deconvolution
grayscale images only
:Parameters: image, d, noise
:Returns: kernel
"""
img = Image.Convert.toGray(img)
noise = 10 ** (-0.1 * noise)
img = np.float32(img) / 255.0
IMG = cv.dft(img, flags=cv.DFT_COMPLEX_OUTPUT)
psf = Image._defocus_kernel(d)
psf /= psf.sum()
psf_pad = np.zeros_like(img)
kh, kw = psf.shape
psf_pad[:kh, :kw] = psf
PSF = cv.dft(psf_pad, flags=cv.DFT_COMPLEX_OUTPUT, nonzeroRows=kh)
PSF2 = (PSF ** 2).sum(-1)
iPSF = PSF / (PSF2 + noise)[..., np.newaxis]
RES = cv.mulSpectrums(IMG, iPSF, 0)
res = cv.idft(RES, flags=cv.DFT_SCALE | cv.DFT_REAL_OUTPUT)
res = np.roll(res, -kh // 2, 0)
res = np.roll(res, -kw // 2, 1)
return res
@staticmethod
def median(image, kernel=5):
"""Apply a median filter
:Parameters: image
:Returns: image
"""
out = cv.medianBlur(image, kernel)
return out
@staticmethod
def cannyedge_auto(image, sigma=0.33):
"""Apply a Canny Edge filter automatically
:Parameters: image, sigma
:Returns: image
"""
# compute the median of the single channel pixel intensities
v = np.median(image)
# apply automatic Canny edge detection using the computed median
lower = int(max(0, (1.0 - sigma) * v))
upper = int(min(255, (1.0 + sigma) * v))
edged = cv.Canny(image, lower, upper)
return edged
# smooth, threshold
@staticmethod
def gaussian_blur(img, smooth=3):
"""Gaussian blur image with kernel n
:Parameters: image, kernel
:Returns: image
"""
# img = cv.pyrDown(img)
imout = cv.GaussianBlur(img, (smooth, smooth), 0)
return imout
@staticmethod
def unsharp_mask(img, kernel_size=5, sigma=1.0, amount=1.0, threshold=0):
"""Unsharp mask filter
:Parameters: image, kernel_size=5, sigma=1.0, amount=1.0, threshold=0
:Returns: image
"""
blurred = cv.GaussianBlur(img, (5, 5), sigma)
sharpened = float(amount + 1) * img - float(amount) * blurred
sharpened = np.maximum(sharpened, np.zeros(sharpened.shape))
sharpened = np.minimum(sharpened, 255 * np.ones(sharpened.shape))
sharpened = sharpened.round().astype(np.uint8)
if threshold > 0:
low_contrast_mask = np.absolute(img - blurred) < threshold
np.copyto(sharpened, img, where=low_contrast_mask)
return sharpened
@staticmethod
def FFT(img):
"""Apply a fourier transform
generate a discrete fourier transform shift matrix and a magnitude spectrum image for viewing
:Parameters: image
:Returns: dft_shift, specimage
"""
# img = Image.Convert.toGray(img)
# do dft saving as complex output
dft = np.fft.fft2(img, axes=(0, 1))
# apply shift of origin to center of image
dft_shift = np.fft.fftshift(dft)
mag = np.abs(dft_shift)
spec = np.log(mag) / 20
# magnitude_spectrum[np.isneginf(magnitude_spectrum)] = 0
return dft_shift, spec
@staticmethod
def IFFT(fft_img):
"""Apply an inverse fourier transform
:Parameters: image_fft
:Returns: image
"""
back_ishift = np.fft.ifftshift(fft_img)
img_back = np.fft.ifft2(back_ishift, axes=(0, 1))
img_back = np.abs(img_back).clip(0, 255).astype(np.uint8)
return img_back
@staticmethod
def FD_bandpass_filter(img, D0=5, w=10, bptype=0):
gray = Image.Convert.toGray(img)
kernel = Image.FilterKernels.ideal_bandpass_kernel(gray, D0, w)
if bptype == 1:
kernel = Image.FilterKernels.gaussian_bandpass_kernel(gray, D0, w)
elif bptype == 2:
kernel = Image.FilterKernels.butterworth_bandpass_kernel(gray, D0, w)
gray = np.float64(gray)
gray_fft = np.fft.fft2(gray)
gray_fftshift = np.fft.fftshift(gray_fft)
dst_filtered = np.multiply(kernel, gray_fftshift)
dst_ifftshift = np.fft.ifftshift(dst_filtered)
dst_ifft = np.fft.ifft2(dst_ifftshift)
dst = np.abs(np.real(dst_ifft))
dst = np.clip(dst, 0, 255)
out = np.uint8(dst)
return out, kernel
'''
def FFT_highpass(img, maskradius=8, maskblur=19):
dft = np.fft.fft2(img, axes=(0, 1))
dft_shift = np.fft.fftshift(dft)
mag = np.abs(dft_shift)
spec = np.log(mag) / 20
radius = maskradius
mask = np.zeros_like(img, dtype=np.float32)
cy = mask.shape[0] // 2
cx = mask.shape[1] // 2
cv.circle(mask, (cx, cy), radius, (1, 1, 1), -1)[0]
mask = 1 - mask
mask = 1 + 0.5 * mask # high boost filter (sharpening) = 1 + fraction of high pass filter
if maskblur > 0:
mask2 = cv.GaussianBlur(mask, (maskblur, maskblur), 0)
dft_shift_masked2 = np.multiply(dft_shift, mask2)
back_ishift_masked2 = np.fft.ifftshift(dft_shift_masked2)
img_filtered2 = np.fft.ifft2(back_ishift_masked2, axes=(0, 1))
out = np.abs(img_filtered2).clip(0, 255).astype(np.uint8)
else:
dft_shift_masked = np.multiply(dft_shift, mask)
back_ishift_masked = np.fft.ifftshift(dft_shift_masked)
img_filtered = np.fft.ifft2(back_ishift_masked, axes=(0, 1))
out = np.abs(img_filtered).clip(0, 255).astype(np.uint8)
mask2= mask
return out, mask2
def FFT_lowpass(img, maskradius=8, maskblur=19):
dft = np.fft.fft2(img, axes=(0, 1))
dft_shift = np.fft.fftshift(dft)
mag = np.abs(dft_shift)
spec = np.log(mag) / 20
radius = maskradius
mask = np.zeros_like(img, dtype=np.float32)
cy = mask.shape[0] // 2
cx = mask.shape[1] // 2
cv.circle(mask, (cx, cy), radius, (255, 255, 255), -1)[0]
if maskblur > 0:
mask2 = cv.GaussianBlur(mask, (maskblur, maskblur), 0)
dft_shift_masked2 = np.multiply(dft_shift, mask2)/ 255
back_ishift_masked2 = np.fft.ifftshift(dft_shift_masked2)
img_filtered2 = np.fft.ifft2(back_ishift_masked2, axes=(0, 1))
out = np.abs(img_filtered2).clip(0, 255).astype(np.uint8)
else:
dft_shift_masked = np.multiply(dft_shift, mask)/ 255
back_ishift_masked = np.fft.ifftshift(dft_shift_masked)
img_filtered = np.fft.ifft2(back_ishift_masked, axes=(0, 1))
out = np.abs(img_filtered).clip(0, 255).astype(np.uint8)
mask2 = mask
return out,mask2
'''
'''
@staticmethod
def FFT_lowpass(img, radius=16, lpType=2, n=2):
"""Lowpass filter in frequency domain
radius kernel size
lpType: 0-ideal, 1 butterworth, 2 gaussian
:Parameters: image, radius, lptype, n
:Returns: image, mask
"""
def createLPFilter(shape, center, radius, lpType=2, n=2):
rows, cols = shape[:2]
r, c = np.mgrid[0:rows:1, 0:cols:1]
c -= center[0]
r -= center[1]
d = np.power(c, 2.0) + np.power(r, 2.0)
lpFilter_matrix = np.zeros((rows, cols), np.float32)
if lpType == 0: # ideal low-pass filter
lpFilter = np.copy(d)
lpFilter[lpFilter < pow(radius, 2.0)] = 1
lpFilter[lpFilter >= pow(radius, 2.0)] = 0
elif lpType == 1: # Butterworth low-pass filter
lpFilter = 1.0 / (1 + np.power(np.sqrt(d) / radius, 2 * n))
elif lpType == 2: # Gaussian low pass filter
lpFilter = np.exp(-d / (2 * pow(radius, 2.0)))
lpFilter_matrix[:, :] = lpFilter
return lpFilter_matrix
dft_shift, imgfft = Image.Process.FFT(img)
cy = dft_shift.shape[0] // 2
cx = dft_shift.shape[1] // 2
mask = createLPFilter(dft_shift.shape, (cx, cy), radius=radius, lpType=lpType, n=n)
if len(img.shape) == 3:
mask = Image.Convert.toRGB(mask)
ifft = np.multiply(dft_shift, mask)
out = Image.Process.IFFT(ifft)
return out, mask
@staticmethod
def FFT_highpass(img, radius=16, lpType=2, n=2):
"""Highpass filter in frequency domain
radius kernel size
lpType: 0-ideal, 1 butterworth, 2 gaussian
:Parameters: image, radius, lptype, n
:Returns: image, mask
"""
def createHPFilter(shape, center, radius, lpType=2, n=2):
rows, cols = shape[:2]
r, c = np.mgrid[0:rows:1, 0:cols:1]
c -= center[0]
r -= center[1]
d = np.power(c, 2.0) + np.power(r, 2.0)
lpFilter_matrix = np.zeros((rows, cols), np.float32)
if lpType == 0: # Ideal high pass filter
lpFilter = np.copy(d)
lpFilter[lpFilter < pow(radius, 2.0)] = 0
lpFilter[lpFilter >= pow(radius, 2.0)] = 1
elif lpType == 1: # Butterworth Highpass Filters
lpFilter = 1.0 - 1.0 / (1 + np.power(np.sqrt(d) / radius, 2 * n))
elif lpType == 2: # Gaussian Highpass Filter
lpFilter = 1.0 - np.exp(-d / (2 * pow(radius, 2.0)))
lpFilter_matrix[:, :] = lpFilter
return lpFilter_matrix
dft_shift, imgfft = Image.Process.FFT(img)
cy = dft_shift.shape[0] // 2
cx = dft_shift.shape[1] // 2
mask = createHPFilter(dft_shift.shape, (cx, cy), radius=radius, lpType=lpType, n=n)
if len(img.shape) == 3:
mask = Image.Convert.toRGB(mask)
ifft = np.multiply(dft_shift, mask)
out = Image.Process.IFFT(ifft)
return out, mask
@staticmethod
def FFT_bandpass(img, bandcenter=32, bandwidth=16, lpType=2, n=2):
"""Bandpass filter in frequency domain
radius kernel size
lpType: 0-ideal, 1 butterworth, 2 gaussian
:Parameters: image, bandcenter, bandwidth, lptype, n
:Returns: image, mask
"""
def createBPFilter(shape, center, bandCenter, bandWidth, lpType=2, n=2):
rows, cols = shape[:2]
r, c = np.mgrid[0:rows:1, 0:cols:1]
c -= center[0]
r -= center[1]
d = np.sqrt(np.power(c, 2.0) + np.power(r, 2.0))
lpFilter_matrix = np.zeros((rows,cols), np.float32)
if lpType == 0: # Ideal bandpass filter
lpFilter = np.copy(d)
lpFilter[:, :] = 1
lpFilter[d > (bandCenter + bandWidth / 2)] = 0
lpFilter[d < (bandCenter - bandWidth / 2)] = 0
elif lpType == 1: # Butterworth bandpass filter
if bandCenter ==0:
bandCenter=1
lpFilter = 1.0 - 1.0 / (1 + np.power(d * bandWidth / (d - pow(bandCenter, 2)), 2 * n))
elif lpType == 2: # Gaussian bandpass filter
if bandWidth ==0:
bandWidth=1
lpFilter = np.exp(-pow((d - pow(bandCenter, 2)) / (d * bandWidth), 2))
lpFilter_matrix[:, :] = lpFilter
return lpFilter_matrix
dft_shift, imgfft = Image.Process.FFT(img)
cy = dft_shift.shape[0] // 2
cx = dft_shift.shape[1] // 2
mask = createBPFilter(dft_shift.shape, (cx, cy), bandCenter=bandcenter, bandWidth=bandwidth, lpType=lpType,
n=n)
if len(img.shape) == 3:
mask = Image.Convert.toRGB(mask)
#print(mask.dtype,dft_shift.dtype)
ifft = np.multiply(dft_shift, mask)
out = Image.Process.IFFT(ifft)
return out, mask
@staticmethod
def FFT_bandstop(img, bandcenter=32, bandwidth=16, lpType=2, n=2):
"""Bandstop filter in frequency domain
radius kernel size
lpType: 0-ideal, 1 butterworth, 2 gaussian
:Parameters: image, bandcenter, bandwidth, lptype, n
:Returns: image, mask
"""
def createBRFilter(shape, center, bandCenter, bandWidth, lpType=2, n=2):
rows, cols = shape[:2]
r, c = np.mgrid[0:rows:1, 0:cols:1]
c -= center[0]
r -= center[1]
d = np.sqrt(np.power(c, 2.0) + np.power(r, 2.0))
lpFilter_matrix = np.zeros((rows, cols), np.float32)
if lpType == 0: # Ideal band stop filter
lpFilter = np.copy(d)
lpFilter[:, :] = 0
lpFilter[d > (bandCenter + bandWidth / 2)] = 1
lpFilter[d < (bandCenter - bandWidth / 2)] = 1
elif lpType == 1: # Butterworth band stop filter
lpFilter = 1.0 / (1 + np.power(d * bandWidth / (d - pow(bandCenter, 2)), 2 * n))
elif lpType == 2: # Gaussian band stop filter
lpFilter = 1 - np.exp(-pow((d - pow(bandCenter, 2)) / (d * bandWidth), 2))
lpFilter_matrix[:, :] = lpFilter
return lpFilter_matrix
dft_shift, imgfft = Image.Process.FFT(img)
cy = dft_shift.shape[0] // 2
cx = dft_shift.shape[1] // 2
mask = createBRFilter(dft_shift.shape, (cx, cy), bandCenter=bandcenter, bandWidth=bandwidth, lpType=lpType,
n=n)
if len(img.shape) == 3:
mask = Image.Convert.toRGB(mask)
ifft = np.multiply(dft_shift, mask)
out = Image.Process.IFFT(ifft)
return out, mask
'''
def pencilsketch(img):
"""Apply a pencil sketch filter to a grayscale image
:Parameters: image
:Returns: image
"""
def dodgeV2(image, mask):
return cv.divide(image, 255 - mask, scale=256)
def burnV2(image, mask):
return 255 - cv.divide(255 - image, 255 - mask, scale=256)
img_gray_inv = 255 - img
img_blur = cv.GaussianBlur(img_gray_inv, ksize=(21, 21),
sigmaX=0, sigmaY=0)
out = dodgeV2(img, img_blur)
return out
def sepia(img):
"""Apply sepia filter
:Parameters: image
:Returns: image
"""
res = img.copy()
res = cv.cvtColor(res, cv.COLOR_BGR2RGB) # converting to RGB as sepia matrix is for RGB
res = np.array(res, dtype=np.float64)
res = cv.transform(res, np.matrix([[0.393, 0.769, 0.189], [0.349, 0.686, 0.168], [0.272, 0.534, 0.131]]))
res[np.where(res > 255)] = 255 # clipping values greater than 255 to 255
res = np.array(res, dtype=np.uint8)
res = cv.cvtColor(res, cv.COLOR_RGB2BGR)
return res
@staticmethod
def gaussian_noise(img, prob=0.25):
""" Add gaussian noise
:Parameters: image, sigma=0.25
:Returns: image
"""
noise_img = img.astype(np.float)
stddev = prob * 100.0
noise = np.random.randn(*img.shape) * stddev
noise_img += noise
noise_img = np.clip(noise_img, 0, 255).astype(np.uint8)
return noise_img
@staticmethod
def salt_and_pepper_noise(image, prob=0.01):
"""Add salt and pepper noise
:Parameters: image, sigma=0.01
:Returns: image
"""
output = np.zeros(image.shape, np.uint8)
thres = 1 - prob
for i in range(image.shape[0]):
for j in range(image.shape[1]):
rdn = random.random()
if rdn < prob:
output[i][j] = 0
elif rdn > thres:
output[i][j] = 255
else:
output[i][j] = image[i][j]
return output
@staticmethod
def poisson_noise(img, prob=0.25):
""" Induce poisson noise
:Parameters: image, lambda=0.25
:Returns: image
"""
# Noise range from 0 to 100
"""
seed = 42
data = np.float32(img / 255) #convert to float to add poisson noise
np.random.seed(seed=seed)
out = np.random.poisson(data * 256) / 256.
out = np.uint8(out*255)
out = np.clip(out, 0, 255).astype(np.uint8) #convert back to UINT8
"""
# data = np.float32(img) #convert to float to add poisson noise
data = img.astype(np.float)
noise = prob
# peak = 256.0-noise*(256-32)
peak = 256.0 - noise * (256)
# print(noise,peak)
noise_image = np.random.poisson(data / 255.0 * peak) / peak * 255
out = np.clip(noise_image, 0, 255).astype(np.uint8)
return out
@staticmethod
def k_means(image, k=3):
""" k_means clustering
:Parameters: image, k=3
:Returns: image
"""
pixel_vals = image.reshape((-1, 3))
pixel_vals = np.float32(pixel_vals)
criteria = (cv.TERM_CRITERIA_EPS + cv.TERM_CRITERIA_MAX_ITER, 100, 0.85)
retval, labels, centers = cv.kmeans(pixel_vals, k, None, criteria, 10, cv.KMEANS_RANDOM_CENTERS)
centers = np.uint8(centers)
segmented_data = centers[labels.flatten()]
segmented_image = segmented_data.reshape((image.shape))
return segmented_image
class Falsecolor:
@staticmethod
def falsecolor_jet(img):
"""False color jet
:Parameters: image
:Returns: image
"""
im_color = cv.applyColorMap(img, cv.COLORMAP_JET)
return im_color
@staticmethod
def falsecolor_rainbow(img):
"""False color rainbow
:Parameters: image
:Returns: image
"""
im_color = cv.applyColorMap(img, cv.COLORMAP_RAINBOW)
return im_color
@staticmethod
def falsecolor_transfer(source, target):
""" convert RGB to LAB color space
:Parameters: source_image, target_image
:Returns: image
"""
# convert the images from the RGB to L*ab* color space, being
# sure to utilizing the floating point data type (note: OpenCV
# expects floats to be 32-bit, so use that instead of 64-bit)
source = cv.cvtColor(source, cv.COLOR_GRAY2BGR)
target = cv.cvtColor(target, cv.COLOR_GRAY2BGR)
source = cv.cvtColor(source, cv.COLOR_BGR2LAB).astype("float32")
target = cv.cvtColor(target, cv.COLOR_BGR2LAB).astype("float32")
# compute color statistics for the source and target images
(lMeanSrc, lStdSrc, aMeanSrc, aStdSrc, bMeanSrc, bStdSrc) = _image_stats(source)
(lMeanTar, lStdTar, aMeanTar, aStdTar, bMeanTar, bStdTar) = _image_stats(target)
# subtract the means from the target image
(l, a, b) = cv.split(target)
l -= lMeanTar
a -= aMeanTar
b -= bMeanTar
# scale by the standard deviations
l = (lStdTar / lStdSrc) * l
a = (aStdTar / aStdSrc) * a
b = (bStdTar / bStdSrc) * b
# add in the source mean
l += lMeanSrc
a += aMeanSrc
b += bMeanSrc
# clip the pixel intensities to [0, 255] if they fall outside
# this range
l = np.clip(l, 0, 255)
a = np.clip(a, 0, 255)
b = np.clip(b, 0, 255)
# merge the channels together and convert back to the RGB color
# space, being sure to utilize the 8-bit unsigned integer data
# type
transfer = cv.merge([l, a, b])
transfer = cv.cvtColor(transfer.astype("uint8"), cv.COLOR_LAB2BGR)
# return the color transferred image
return transfer
@staticmethod
def falsecolor_merge2channels(img0, img1):
"""Merge 2 images using 2 colors
:Parameters: image1, image2
:Returns: image
"""
img0 = Image.Convert.toGray(img0)
img1 = Image.Convert.toGray(img1)
img0 = Image.Adjust.histostretch_clahe(img0)
img1 = Image.Adjust.histostretch_clahe(img1)
img0 = cv.cvtColor(img0, cv.COLOR_GRAY2BGR)
img1 = cv.cvtColor(img1, cv.COLOR_GRAY2BGR)
r0, g0, b0 = cv.split(img0)
r1, g1, b1 = cv.split(img1)
img3 = cv.merge([b1, g1, r0])
return img3
@staticmethod
def falsecolor_merge3channels(img0, img1, img2):
"""Merge 3 images using 3 colors
:Parameters: image1, image2, image3
:Returns: image
"""
img0 = Image.Adjust.histostretch_clahe(img0)
img1 = Image.Adjust.histostretch_clahe(img1)
img2 = Image.Adjust.histostretch_clahe(img2)
img0 = cv.cvtColor(img0, cv.COLOR_GRAY2BGR)
img1 = cv.cvtColor(img1, cv.COLOR_GRAY2BGR)
img2 = cv.cvtColor(img2, cv.COLOR_GRAY2BGR)
r0, g0, b0 = cv.split(img0)
r1, g1, b1 = cv.split(img1)
r2, g2, b2 = cv.split(img2)
img3 = cv.merge([b2, g1, r0])
return img3
class Adjust:
@staticmethod
def invert(img):
"""Invert image
:Parameters: image
:Returns: image
"""
img2 = cv.bitwise_not(img)
return img2
@staticmethod
def squared_and_bin(img):
"""First make image squared followed by binning to 256 pixels
:Parameters: image
:Returns: image
"""
img0 = Image.Tools.squared(img, leadingaxislargest=False)
scale = 256 / img0.shape[1]
img0 = cv.resize(img0, None, None, scale, scale, interpolation=cv.INTER_AREA)
return img0
@staticmethod
def bin(img, shrinkfactor=2):
"""bin image with shrinkfactor (default shrinkfactor= 2)
:Parameters: image, shrinkfactor
:Returns: image
"""
scale = 1 / shrinkfactor
img0 = cv.resize(img, None, None, scale, scale, interpolation=cv.INTER_AREA)
return img0
@staticmethod
def histogram(img):
"""create histogram of an image as an image
:Parameters: image
:Output: histogram image
"""
w = img.shape[1]
h = img.shape[0]
if (img.dtype == np.uint8):
rng = 256
else:
rng = 65535
# bitdepth = img.dtype
hist, bins = np.histogram(img.flatten(), 256, [0, rng])
cdf = hist.cumsum()
cdf_normalized = cdf * hist.max() / cdf.max() # this line not necessary.
fig = plt.figure()
plt.plot(cdf_normalized, color='b')
plt.hist(img.flatten(), 256, [0, rng], color='0.30')
plt.axis("off") # turns off axes
fig.tight_layout()
fig.canvas.draw()
image_from_plot = np.frombuffer(fig.canvas.tostring_rgb(), dtype=np.uint8)
out = image_from_plot.reshape(fig.canvas.get_width_height()[::-1] + (3,))
plt.close()
# cv.imwrite("test.png",out)
return out
@staticmethod
def histostretch_clahe(img):
"""Apply a CLAHE (Contrast Limited Adaptive Histogram Equalization) filter on a grayscale image
supports 8 and 16 bit images.
:Parameters: image
:Returns: image
"""
# img = cv.pyrDown(img)
if (len(img.shape) < 3):
clahe = cv.createCLAHE(clipLimit=2.0, tileGridSize=(8, 8))
cl1 = clahe.apply(img)
img = cl1
else:
clahe = cv.createCLAHE(clipLimit=3., tileGridSize=(8, 8))
lab = cv.cvtColor(img, cv.COLOR_BGR2LAB) # convert from BGR to LAB color space
l, a, b = cv.split(lab) # split on 3 different channels
l2 = clahe.apply(l) # apply CLAHE to the L-channel
lab = cv.merge((l2, a, b)) # merge channels
img = cv.cvtColor(lab, cv.COLOR_LAB2BGR) # convert from LAB to BGR
return img
'''
@staticmethod
def histostretch_equalized(img):
"""Apply a equalize histogram filter (8-bit images only!)
:Parameters: image
:Returns: image
"""
# img = cv.pyrDown(img)
equ = cv.equalizeHist(img)
return equ
'''
@staticmethod
def histostretch_equalized(img):
"""Apply a equalize histogram filter
8 and 16 bit
:Parameters: image
:Returns: image
#https://github.com/torywalker/histogram-equalizer/blob/master/HistogramEqualization.ipynb
"""
def get_histogram(image, bins):
# array with size of bins, set to zeros
histogram = np.zeros(bins)
# loop through pixels and sum up counts of pixels
for pixel in image:
histogram[pixel] += 1
# return our final result
return histogram
# create our cumulative sum function
def cumsum(a):
a = iter(a)
b = [next(a)]
for i in a:
b.append(b[-1] + i)
return np.array(b)
if (img.dtype == np.uint16):
flat = img.flatten()
hist = get_histogram(flat, 65536)
# plt.plot(hist)
#
cs = cumsum(hist)
# re-normalize cumsum values to be between 0-255
# numerator & denomenator
nj = (cs - cs.min()) * 65535
N = cs.max() - cs.min()
# re-normalize the cdf
cs = nj / N
cs = cs.astype('uint16')
img_new = cs[flat]
# plt.hist(img_new, bins=65536)
# plt.show(block=True)
img_new = np.reshape(img_new, img.shape)
else:
if len(img.shape) == 2:
img_new = cv.equalizeHist(img)
else:
img_yuv = cv.cvtColor(img, cv.COLOR_BGR2YUV) # equalize the histogram of the Y channel
img_yuv[:, :, 0] = cv.equalizeHist(img_yuv[:, :, 0]) # convert the YUV image back to RGB format
img_new = cv.cvtColor(img_yuv, cv.COLOR_YUV2BGR)
return img_new
@staticmethod
def histostretch_normalize(img):
"""Normalize histogram
8bit between 0 and 255
16bit between 0 and 65535
:Parameters: image
:Returns: image
"""
# img = cv.pyrDown(img)
if (img.dtype == np.uint16):
norm = cv.normalize(img, None, alpha=0, beta=65535, norm_type=cv.NORM_MINMAX, dtype=cv.CV_16U)
else:
norm = cv.normalize(img, None, alpha=0, beta=255, norm_type=cv.NORM_MINMAX, dtype=cv.CV_8U)
return norm
# smooth, threshold
@staticmethod
def threshold(img, thresh=128):
"""Applies a fixed-level threshold to each array element. [0-255]
:Parameters: image, threshold
:Returns: image
"""
ret, imout = cv.threshold(img, thresh, 255, cv.THRESH_BINARY)
return imout
@staticmethod
def normalize(img):
"""Normalize image. [0-255]
:Parameters: image
:Returns: image
"""
imout = cv.normalize(img, None, alpha=0, beta=255, norm_type=cv.NORM_MINMAX, dtype=cv.CV_64F)
return imout
@staticmethod
def thresholdrange(img, threshmin=128, threshmax=255):
"""threshold image between min and max value
:Parameters: image, thresholdmin, thresholdmax
:Returns: image
"""
imout = cv.inRange(img, threshmin, threshmax)
return imout
@staticmethod
def threshold_otsu(img):
"""Applies an automatic threshold using the Otsu method for thresholding
:Parameters: image
:Returns: image
"""
ret, imout = cv.threshold(img, 0, 255, cv.THRESH_OTSU)
return imout
@staticmethod
def adjust_contrast_brightness(img, contrast=0, brightness=0):
"""adjust contrast and brightness
contrast range: -127..127
brightness range: -255..255
:Parameters: image
:Returns: image
"""
table = np.array([i * (contrast / 127 + 1) - contrast + brightness for i in range(0, 256)]).clip(0,
255).astype(
'uint8')
# if len(img.shape) == 3:
# out = cv.LUT(img, table)[:, :, np.newaxis]
# else:
out = cv.LUT(img, table)
return out
@staticmethod
def adjust_gamma(image, gamma=1.0):
"""adjust gamma [0..3.0], default = 1
gamma cannot be 0
:Parameters: image, gamma=1.0
:Returns: image
"""
invGamma = 1.0 / gamma
table = np.array([((i / 255.0) ** invGamma) * 255
for i in np.arange(0, 256)]).astype("uint8")
# apply gamma correction using the lookup table
return cv.LUT(image, table)
@staticmethod
def adjust_HSV(img, hval, sval, vval):
"""adjust Hue [0..179], Saturation [-255..255], lightness [-255..255]
:Parameters: image, hue, saturation, lightness
:Returns: image
"""
img = Image.Convert.toRGB(img) # changing channels for nicer image
hsv = Image.Convert.BGRtoHSV(img)
h = hsv[:, :, 0]
s = hsv[:, :, 1]
v = hsv[:, :, 2]
h = np.where(h <= 255.0 - hval, h + hval, 255)
if (sval > 0):
s = np.where(s <= 255.0 - sval, s + sval, 255)
else:
s = (s * ((255.0 + sval) / 255.0))
if (vval > 0):
v = np.where(v <= 255.0 - vval, v + vval, 255)
else:
v = v * ((255.0 + vval) / 255.0)
hsv[:, :, 0] = h
hsv[:, :, 1] = s
hsv[:, :, 2] = v
img1 = Image.Convert.HSVtoBGR(hsv)
return img1
@staticmethod
def adjust_HSL(img, hval, sval, lval):
"""adjust Hue [0..179], Saturation [0..255], lightness [0..255]
The definition HSL is most commonly used, occasionly this is called HLS
:Parameters: image, hue, saturation, lightness
:Returns: image
"""
img = Image.Convert.toRGB(img) # changing channels for nicer image
hls = cv.cvtColor(img, cv.COLOR_RGB2HLS)
h = hls[:, :, 0]
l = hls[:, :, 1]
s = hls[:, :, 2]
h = np.where(h <= 255.0 - hval, h + hval, 255)
if (sval > 0):
s = np.where(s <= 255.0 - sval, s + sval, 255)
else:
s = (s * ((255.0 + sval) / 255.0))
if (lval > 0):
l = np.where(l <= 255.0 - lval, l + lval, 255)
else:
l = l * ((255.0 + lval) / 255.0)
hls[:, :, 0] = h
hls[:, :, 1] = l
hls[:, :, 2] = s
img1 = cv.cvtColor(hls, cv.COLOR_HLS2RGB)
return img1
@staticmethod
def adjust_auto_whitebalance(img):
"""auto whitebalance
https://stackoverflow.com/questions/46390779/automatic-white-balancing-with-grayworld-assumption
:Parameters: image, temperature
:Returns: image
"""
result = cv.cvtColor(img, cv.COLOR_BGR2LAB)
avg_a = np.average(result[:, :, 1])
avg_b = np.average(result[:, :, 2])
result[:, :, 1] = result[:, :, 1] - ((avg_a - 128) * (result[:, :, 0] / 255.0) * 1.1)
result[:, :, 2] = result[:, :, 2] - ((avg_b - 128) * (result[:, :, 0] / 255.0) * 1.1)
result = cv.cvtColor(result, cv.COLOR_LAB2BGR)
return result
class Transform:
@staticmethod
def flip_horizontal(img):
"""Flip image horizontal
:Parameters: image
:Returns: image
"""
horizontal_img = cv.flip(img, 0)
return horizontal_img
@staticmethod
def flip_vertical(img):
"""Flip image vertical
:Parameters: image
:Returns: image
"""
vertical_img = cv.flip(img, 1)
return vertical_img
@staticmethod
def translate(img, shiftx, shifty):
"""Shift image n x and y pixels
:Parameters: image, shiftx, shifty
:Returns: image
"""
w = img.shape[1]
h = img.shape[0]
M = np.float32([[1, 0, shiftx], [0, 1, shifty]])
img2 = cv.warpAffine(img, M, (w, h))
return img2
@staticmethod
def rotate(image, angle):
"""Rotate image
:Parameters: image, angle
:Returns: image
"""
image_center = tuple(np.array(image.shape[1::-1]) / 2)
rot_mat = cv.getRotationMatrix2D(image_center, angle, 1.0)
result = cv.warpAffine(image, rot_mat, image.shape[1::-1], flags=cv.INTER_LINEAR)
return result
class Binary:
@staticmethod
def skeletonize(img):
"""skeletonize a thresholded image.
:Parameters: image
:Returns: image
"""
size = np.size(img)
skel = np.zeros(img.shape, np.uint8)
element = cv.getStructuringElement(cv.MORPH_CROSS, (3, 3))
done = False
while (not done):
eroded = cv.erode(img, element)
temp = cv.dilate(eroded, element)
temp = cv.subtract(img, temp)
skel = cv.bitwise_or(skel, temp)
img = eroded.copy()
zeros = size - cv.countNonZero(img)
if zeros == size:
done = True
return skel
# Zhang-Suen Thinning Algorithm - https://github.com/linbojin/Skeletonization-by-Zhang-Suen-Thinning-Algorithm
# note: slow filter
@staticmethod
def thinning(img):
"""Applies the Zhang-Suen thinning algorithm.
:Parameters: image
:Returns: image
"""
def neighbours(x, y, img):
"Return 8-neighbours of image point P1(x,y), in a clockwise order"
img = img
x_1, y_1, x1, y1 = x - 1, y - 1, x + 1, y + 1
return [img[x_1][y], img[x_1][y1], img[x][y1], img[x1][y1], # P2,P3,P4,P5
img[x1][y], img[x1][y_1], img[x][y_1], img[x_1][y_1]] # P6,P7,P8,P9
def transitions(neighbours):
"No. of 0,1 patterns (transitions from 0 to 1) in the ordered sequence"
n = neighbours + neighbours[0:1] # P2, P3, ... , P8, P9, P2
return sum((n1, n2) == (0, 1) for n1, n2 in zip(n, n[1:])) # (P2,P3), (P3,P4), ... , (P8,P9), (P9,P2)
ret, imout = cv.threshold(img, 0, 255, cv.THRESH_OTSU)
img = img < ret # must set object region as 1, background region as 0 !
print("the Zhang-Suen Thinning Algorithm")
img_Thinned = img.copy() # deepcopy to protect the original img
changing1 = changing2 = 1 # the points to be removed (set as 0)
while changing1 or changing2: # iterates until no further changes occur in the img
# Step 1
changing1 = []
rows, columns = img_Thinned.shape # x for rows, y for columns
for x in range(1, rows - 1): # No. of rows
for y in range(1, columns - 1): # No. of columns
P2, P3, P4, P5, P6, P7, P8, P9 = n = neighbours(x, y, img_Thinned)
if (img_Thinned[x][y] == 1 and # Condition 0: Point P1 in the object regions
2 <= sum(n) <= 6 and # Condition 1: 2<= N(P1) <= 6
transitions(n) == 1 and # Condition 2: S(P1)=1
P2 * P4 * P6 == 0 and # Condition 3
P4 * P6 * P8 == 0): # Condition 4
changing1.append((x, y))
for x, y in changing1:
img_Thinned[x][y] = 0
# Step 2
changing2 = []
for x in range(1, rows - 1):
for y in range(1, columns - 1):
P2, P3, P4, P5, P6, P7, P8, P9 = n = neighbours(x, y, img_Thinned)
if (img_Thinned[x][y] == 1 and # Condition 0
2 <= sum(n) <= 6 and # Condition 1
transitions(n) == 1 and # Condition 2
P2 * P4 * P8 == 0 and # Condition 3
P2 * P6 * P8 == 0): # Condition 4
changing2.append((x, y))
for x, y in changing2:
img_Thinned[x][y] = 0
return img_Thinned
@staticmethod
def morphology_erode(img, kernel=5):
"""Morphology filter - erode
:Parameters: image, kernel
:Returns: image
"""
kerneln = np.ones((kernel, kernel), np.uint8)
erosion = cv.erode(img, kerneln, iterations=1)
return erosion
@staticmethod
def morphology_dilate(img, kernel=5):
"""Morphology filter - dilate
:Parameters: image, kernel
:Returns: image
"""
kerneln = np.ones((kernel, kernel), np.uint8)
dilation = cv.dilate(img, kerneln, iterations=1)
return dilation
@staticmethod
def morphology_open(img, kernel=5):
"""Morphology filter - open
:Parameters: image, kernel
:Returns: image
"""
kerneln = np.ones((kernel, kernel), np.uint8)
opening = cv.morphologyEx(img, cv.MORPH_OPEN, kerneln)
return opening
@staticmethod
def morphology_close(img, kernel=5):
"""Morphology filter - close
:Parameters: image, kernel
:Returns: image
"""
kerneln = np.ones((kernel, kernel), np.uint8)
opening = cv.morphologyEx(img, cv.MORPH_CLOSE, kerneln)
return opening
@staticmethod
def morphology_fillholes(im_in):
"""Morphology filter - fillholes
:Parameters: image, kernel
:Returns: image
"""
im_floodfill = im_in.copy()
# Mask used to flood filling.
# Notice the size needs to be 2 pixels than the image.
h, w = im_in.shape[:2]
mask = np.zeros((h + 2, w + 2), np.uint8)
# Floodfill from point (0, 0)
cv.floodFill(im_floodfill, mask, (0, 0), 255)
# Invert floodfilled image
im_floodfill_inv = cv.bitwise_not(im_floodfill)
# Combine the two images to get the foreground.
im_out = im_in | im_floodfill_inv
return im_in, im_floodfill, im_floodfill_inv, im_out
@staticmethod
def remove_isolated_pixels(img0):
"""Remove isolated pixels in an image
:Parameters: image
:Returns: image
"""
input_image = cv.threshold(img0, 254, 255, cv.THRESH_BINARY)[1]
input_image_comp = cv.bitwise_not(input_image) # could just use 255-img
kernel1 = np.array([[0, 0, 0],
[0, 1, 0],
[0, 0, 0]], np.uint8)
kernel2 = np.array([[1, 1, 1],
[1, 0, 1],
[1, 1, 1]], np.uint8)
hitormiss1 = cv.morphologyEx(input_image, cv.MORPH_ERODE, kernel1)
hitormiss2 = cv.morphologyEx(input_image_comp, cv.MORPH_ERODE, kernel2)
hitormiss = cv.bitwise_and(hitormiss1, hitormiss2)
hitormiss_comp = cv.bitwise_not(hitormiss) # could just use 255-img
del_isolated = cv.bitwise_and(input_image, input_image, mask=hitormiss_comp)
return del_isolated
@staticmethod
def remove_islands(img0, min_size=150):
"""Remove islands in an image
:Parameters: image, min_size=150
:Returns: image
"""
# find all your connected components (white blobs in your image)
nb_components, output, stats, centroids = cv.connectedComponentsWithStats(img0, connectivity=8)
# connectedComponentswithStats yields every seperated component with information on each of them, such as size
# the following part is just taking out the background which is also considered a component, but most of the time we don't want that.
sizes = stats[1:, -1]
nb_components = nb_components - 1
# minimum size of features we want to keep (number of pixels)
# here, it's a fixed value, but you can set it as you want, eg the mean of the sizes or whatever
# your answer image
img2 = np.zeros((output.shape))
# for every component in the image, you keep it only if it's above min_size
for i in range(0, nb_components):
if sizes[i] >= min_size:
img2[output == i + 1] = 255
return img2
class Convert:
@staticmethod
def to8bit(img):
"""Convert to 8 bit image
:Parameters: image
:Returns: image
"""
if (img.dtype == np.uint16):
img1 = (img / 256).astype('uint8') # updated this one on 20191216 for 16 bit imaging
else:
img1 = (img).astype('uint8')
# img1 = img.astype('uint8') # 16bit to 8bit
return img1
@staticmethod
def to16bit(img):
"""Convert to 16 bit image
:Parameters: image
:Returns: image
"""
if (img.dtype == np.uint8):
img1 = (img * 256).astype('uint16') # updated this one on 20191216 for 16 bit imaging
else:
img1 = (img).astype('uint16')
# img1 = img.astype('uint8') # 16bit to 8bit
return img1
@staticmethod
def toRGB(img):
"""Convert grayscale to RGB image
:Parameters: image
:Returns: image
"""
img1 = img
channels = len(img.shape)
if (channels != 3):
img1 = cv.cvtColor(img, cv.COLOR_GRAY2BGR)
# print('Image converted from Grayscale to RGB')
return img1
@staticmethod
def toGray(img):
"""Convert RGB to color grayscale image
:Parameters: image
:Returns: image
"""
img1 = img
channels = len(img.shape)
if (channels > 2):
img1 = cv.cvtColor(img, cv.COLOR_RGB2GRAY)
# print('Image converted from RGB to Grayscale')
return img1
@staticmethod
def BGRtoRGB(img):
"""Convert BGR to RGB
:Parameters: image
:Returns: image
"""
img1 = img
channels = len(img.shape)
if (channels > 2):
b, g, r = cv.split(img) # get b,g,r
img1 = cv.merge([r, g, b]) # switch it to rgb (OpenCV uses BGR)
return img1
@staticmethod
def RGBtoBGR(img):
"""Convert RGB to BGR
:Parameters: image
:Returns: image
"""
img1 = img
channels = len(img.shape)
if (channels > 2):
r, g, b = cv.split(img) # get b,g,r
img1 = cv.merge([b, g, r]) # switch it to rgb (OpenCV uses BGR)
return img1
@staticmethod
def BGRtoHSV(img):
"""Convert BGR to HSV
:Parameters: image
:Returns: image
"""
img1 = cv.cvtColor(img, cv.COLOR_BGR2HSV)
return img1
@staticmethod
def HSVtoBGR(img):
"""Convert HSV to BGR
:Parameters: image
:Returns: image
"""
img1 = cv.cvtColor(img, cv.COLOR_HSV2BGR)
return img1
@staticmethod
def binarytogray(img):
"""Convert binary image to grayscale (dtype=bool -> dtype=uint8)
:Parameters: image
:Returns: image
"""
img = img.astype('uint8') * 255
return img
class FilterKernels:
@staticmethod
def ideal_lowpass_kernel(img, radius=32):
rows, cols = img.shape[:2]
r, c = np.mgrid[0:rows:1, 0:cols:1]
c -= int(cols / 2)
r -= int(rows / 2)
d = np.power(c, 2.0) + np.power(r, 2.0)
kernel_matrix = np.zeros((rows, cols), np.float32)
kernel = np.copy(d)
kernel[kernel < pow(radius, 2.0)] = 1
kernel[kernel >= pow(radius, 2.0)] = 0
kernel_matrix[:, :] = kernel
return kernel_matrix
@staticmethod
def gaussian_lowpass_kernel(img, radius=32):
rows, cols = img.shape[:2]
r, c = np.mgrid[0:rows:1, 0:cols:1]
c -= int(cols / 2)
r -= int(rows / 2)
d = np.power(c, 2.0) + np.power(r, 2.0)
kernel_matrix = np.zeros((rows, cols), np.float32)
kernel = np.exp(-d / (2 * pow(radius, 2.0)))
kernel_matrix[:, :] = kernel
return kernel_matrix
@staticmethod
def butterworth_lowpass_kernel(img, radius=32, n=2):
rows, cols = img.shape[:2]
r, c = np.mgrid[0:rows:1, 0:cols:1]
c -= int(cols / 2)
r -= int(rows / 2)
d = np.power(c, 2.0) + np.power(r, 2.0)
kernel_matrix = np.zeros((rows, cols), np.float32)
kernel = 1.0 / (1 + np.power(np.sqrt(d) / radius, 2 * n))
kernel_matrix[:, :] = kernel
return kernel_matrix
@staticmethod
def ideal_bandpass_kernel(img, D0=32, w=9):
rows, cols = img.shape
crow, ccol = int(rows / 2), int(cols / 2)
mask = np.ones((rows, cols), np.uint8)
for i in range(0, rows):
for j in range(0, cols):
d = np.sqrt(pow(i - crow, 2) + pow(j - ccol, 2))
if D0 - w / 2 < d < D0 + w / 2:
mask[i, j] = 1
else:
mask[i, j] = 0
kernel = mask
return kernel
@staticmethod
def ideal_bandstop_kernel(img, D0=32, W=9):
kernel = 1.0 - Image.FilterKernels.ideal_bandpass_kernel(img, D0, W)
return kernel
@staticmethod
def gaussian_bandstop_kernel(img, D0=32, W=9):
r, c = img.shape[1], img.shape[0]
u = np.arange(r)
v = np.arange(c)
u, v = np.meshgrid(u, v)
low_pass = np.sqrt((u - r / 2) ** 2 + (v - c / 2) ** 2)
kernel = 1.0 - np.exp(-0.5 * (((low_pass ** 2 - D0 ** 2) / (low_pass * W + 1.0e-5)) ** 2))
return kernel
@staticmethod
def gaussian_bandpass_kernel(img, D0=32, W=9):
assert img.ndim == 2
# kernel = Image.FilterKernels.gaussian_bandstop_kernel(img, D0, W)
kernel = 1.0 - Image.FilterKernels.gaussian_bandstop_kernel(img, D0, W)
return kernel
@staticmethod
def butterworth_bandstop_kernel(img, D0=32, W=9, n=1):
r, c = img.shape[1], img.shape[0]
u = np.arange(r)
v = np.arange(c)
u, v = np.meshgrid(u, v)
low_pass = np.sqrt((u - r / 2) ** 2 + (v - c / 2) ** 2)
kernel = (1 / (1 + ((low_pass * W) / (low_pass ** 2 - D0 ** 2)) ** (2 * n)))
return kernel
def butterworth_bandpass_kernel(img, D0=5, W=10):
kernel = 1.0 - Image.FilterKernels.butterworth_bandstop_kernel(img, D0, W)
return kernel
'''
def convert_kernel_to_image(kernel):
out = np.dstack((kernel, np.zeros(kernel.shape[:-1])))
return out
'''
class Tools:
# combined sequences
@staticmethod
def image_with_2_closeups(img, t_size=[0.2, 0.2], t_center1=[0.3, 0.3], t_center2=[0.6, 0.6]):
"""image with 2 closeups, the output is a color image.
:Parameters: image, t_size=[0.2, 0.2], t_center1=[0.3, 0.3], t_center2=[0.6, 0.6]
:Returns: image
"""
w = img.shape[1]
h = img.shape[0]
rgb = Image.Convert.toRGB(img)
xt0 = Image._multipleof2((t_center1[0] - t_size[0] * 0.5) * w)
yt0 = Image._multipleof2((t_center1[1] - t_size[1] * 0.5) * h)
xt1 = Image._multipleof2((t_center1[0] + t_size[0] * 0.5) * w)
yt1 = Image._multipleof2((t_center1[1] + t_size[1] * 0.5) * h)
# rgb = img
template1 = Image.crop(rgb, xt0, yt0, xt1, yt1)
w3 = np.abs(xt0 - xt1)
h3 = np.abs(yt0 - yt1)
xt0b = Image._multipleof2((t_center2[0] - t_size[0] * 0.5) * w)
yt0b = Image._multipleof2((t_center2[1] - t_size[1] * 0.5) * h)
# rgb = img
template2 = Image.crop(rgb, xt0b, yt0b, xt0b + w3, yt0b + h3)
wt = template1.shape[1]
ht = template1.shape[0]
scalefactor = (w * 0.5) / wt
template1b = Image.resize(template1, scalefactor)
# print(template1b.shape)
wt2 = template1b.shape[1]
ht2 = template1b.shape[0]
template2b = Image.resize(template2, scalefactor)
# print(template2b.shape)
# print(w,h)
# print(wt2,ht2)
output = np.zeros((h + ht2, w, 3), np.uint8)
print(output.shape)
print(rgb.shape)
print(template1b.shape)
print(template2b.shape)
output[0:h, 0:w] = rgb
output[h:h + ht2, 0:wt2] = template1b
output[h:h + ht2, wt2:w] = template2b
output = cv.rectangle(output, (xt0, yt0), (xt1, yt1), (33, 145, 237), 3)
output = cv.rectangle(output, (xt0b, yt0b), (xt0b + w3, yt0b + h3), (240, 167, 41), 3)
output = cv.rectangle(output, (wt2 + 3, h), (w - 2, h + ht2 - 3), (240, 167, 41), 3)
output = cv.rectangle(output, (0 + 2, h), (wt2 - 2, h + ht2 - 3), (33, 145, 237), 3)
return output
@staticmethod
def anaglyph(img0, img1):
"""Create a anaglyph from 2 images (stereo image)
:Parameters: image1, image2
:Returns: image
"""
matrices = {
'true': [[0.299, 0.587, 0.114, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 0, 0, 0.299, 0.587, 0.114]],
'mono': [[0.299, 0.587, 0.114, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0.299, 0.587, 0.114, 0.299, 0.587, 0.114]],
'color': [[1, 0, 0, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 1]],
'halfcolor': [[0.299, 0.587, 0.114, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 1]],
'optimized': [[0, 0.7, 0.3, 0, 0, 0, 0, 0, 0], [0, 0, 0, 0, 1, 0, 0, 0, 1]],
}
# img1 = translate_image(img1,8,0)
width = img0.shape[0]
height = img0.shape[1]
leftImage = cv.cvtColor(img0, cv.COLOR_GRAY2BGR)
rightImage = cv.cvtColor(img1, cv.COLOR_GRAY2BGR)
m = matrices['optimized']
result = np.zeros((img0.shape[0], img0.shape[1], 3), np.uint8)
# split the left and right images into separate blue, green and red images
lb, lg, lr = cv.split(np.asarray(leftImage[:, :]))
rb, rg, rr = cv.split(np.asarray(rightImage[:, :]))
resultArray = np.asarray(result[:, :])
resultArray[:, :, 0] = lb * m[0][6] + lg * m[0][7] + lr * m[0][8] + rb * m[1][6] + rg * m[1][7] + rr * m[1][
8]
resultArray[:, :, 1] = lb * m[0][3] + lg * m[0][4] + lr * m[0][5] + rb * m[1][3] + rg * m[1][4] + rr * m[1][
5]
resultArray[:, :, 2] = lb * m[0][0] + lg * m[0][1] + lr * m[0][2] + rb * m[1][0] + rg * m[1][1] + rr * m[1][
2]
return result
@staticmethod
def image2patches(img, patchsize, overlappx=0, verbose=False):
"""
Convert single image to a list of patches.
The size of a patch is determined by patchsize, be aware of rounding incase image width or height cannot be divided through the patchsize.
Works both for color and grayscale images.
overlap in pixels (default overlap=0)
:Parameters: image, rows, cols
:Returns: image_list
"""
h0, w0 = img.shape[0], img.shape[1]
# determine number of steps (rows and columns
cols = int( | np.round(w0 / patchsize, 0) | numpy.round |
"""Plotting for manuscript figures."""
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import fit
from models import calc_cs, calc_ffs, calc_ge_gm, calc_rho, dipole_ffs, get_b2, hbarc
matplotlib.rcParams["text.usetex"] = True
matplotlib.rcParams["font.size"] = 13
matplotlib.rcParams["font.family"] = "lmodern"
matplotlib.rcParams["text.latex.preamble"] = r"\usepackage{lmodern}"
matplotlib.rcParams["xtick.labelsize"] = 12
matplotlib.rcParams["ytick.labelsize"] = 12
# Number of samples to use when generating statistical uncertainty bands
N_SAMPLES = 1000
def read_Rosenbluth_data():
"""Read data for G_E and G_M from "Rosenbluth.dat"."""
col_names = ["Q2", "GE", "delta_GE", "GM", "delta_GM"]
data = pd.read_csv("data/Rosenbluth.dat", sep=" ", skiprows=5, names=col_names)
return data
def calc_interval(calc_func, x_range, param_list, order):
"""Calculate 68% ("1 sigma") percentile interval from param sample."""
out = np.array([calc_func(x_range, param, order) for param in param_list])
return np.percentile(out, (15.9, 84.1), 0)
def calc_params(data, order, reg_param):
"""Run fit and get model parameters and covariance."""
params, _, _, _, cov = fit.fit(data, data, order, reg_param)
params = params[fit.N_NORM_PARAMS :]
cov = cov[fit.N_NORM_PARAMS :, fit.N_NORM_PARAMS :]
return params, cov
def calc_sys_bands(calc_func, x_range, data, order, reg_param):
"""Calculate systematic error bands for given quantity."""
params, _ = calc_params(data, order, reg_param)
f1, f2 = calc_func(x_range, params, order)
mincut_params = fit.fit_systematic_variant("cs_mincut", data, order, reg_param)[0]
maxcut_params = fit.fit_systematic_variant("cs_maxcut", data, order, reg_param)[0]
sysup_params = fit.fit_systematic_variant("cs_sysup", data, order, reg_param)[0]
syslow_params = fit.fit_systematic_variant("cs_syslow", data, order, reg_param)[0]
mincut_f1, mincut_f2 = calc_func(x_range, mincut_params, order)
maxcut_f1, maxcut_f2 = calc_func(x_range, maxcut_params, order)
sysup_f1, sysup_f2 = calc_func(x_range, sysup_params, order)
syslow_f1, syslow_f2 = calc_func(x_range, syslow_params, order)
# Calculate upper and lower limits for each of the systematic variations:
f1_cut_up = np.clip(np.max(np.stack([mincut_f1 - f1, maxcut_f1 - f1]), 0), 0, None)
f1_cut_low = np.clip(np.min(np.stack([mincut_f1 - f1, maxcut_f1 - f1]), 0), None, 0)
f1_sys_up = np.clip(np.max(np.stack([sysup_f1 - f1, syslow_f1 - f1]), 0), 0, None)
f1_sys_low = np.clip(np.min(np.stack([sysup_f1 - f1, syslow_f1 - f1]), 0), None, 0)
f2_cut_up = np.clip(np.max(np.stack([mincut_f2 - f2, maxcut_f2 - f2]), 0), 0, None)
f2_cut_low = np.clip(np.min(np.stack([mincut_f2 - f2, maxcut_f2 - f2]), 0), None, 0)
f2_sys_up = np.clip(np.max(np.stack([sysup_f2 - f2, syslow_f2 - f2]), 0), 0, None)
f2_sys_low = np.clip(np.min(np.stack([sysup_f2 - f2, syslow_f2 - f2]), 0), None, 0)
# Add two systematic "errors" in quadrature:
f1_up = np.sqrt(f1_cut_up ** 2 + f1_sys_up ** 2)
f1_low = np.sqrt(f1_cut_low ** 2 + f1_sys_low ** 2)
f2_up = np.sqrt(f2_cut_up ** 2 + f2_sys_up ** 2)
f2_low = np.sqrt(f2_cut_low ** 2 + f2_sys_low ** 2)
return f1_up, f1_low, f2_up, f2_low
def fill_between(x_range, y_up, y_low, color, hbarc_scale=False):
"""Plot confidence interval."""
if hbarc_scale:
x_range = hbarc * x_range
y_up = y_up / (hbarc * hbarc)
y_low = y_low / (hbarc * hbarc)
plt.fill_between(x_range, y_up, y_low, color=color, lw=0, alpha=0.7)
def plot_f1_f2(data, order, reg_param):
"""Plot the Dirac and Pauli form factors."""
params, cov = calc_params(data, order, reg_param)
Q2_range = np.linspace(0, 1, 100)
F1, F2 = calc_ffs(Q2_range, params, order)
# Transverse charge radius and the slope of F1:
b2, _ = get_b2(params, cov)
slope_x = np.linspace(0, 0.15, 10)
slope_y = 1 - slope_x * b2 / 4
# Plot the form factor slope:
plt.plot(slope_x, slope_y, ls="--", color="black", lw=1)
if fit.covariance_bad(cov):
print("Warning: Covariance ill-conditioned, will not plot confidence intervals")
draw_confidence = False
else:
draw_confidence = True
if draw_confidence:
# Calculate statistical uncertainties:
params = np.random.multivariate_normal(params, cov, size=N_SAMPLES)
interval = calc_interval(calc_ffs, Q2_range, params, order)
# Calculate systematic uncertainties:
f1_up, f1_low, f2_up, f2_low = calc_sys_bands(calc_ffs, Q2_range, data, order, reg_param)
# Plot the systematic band for F2:
fill_between(Q2_range, interval[1, 1] + f2_up, interval[1, 1], "blue")
fill_between(Q2_range, interval[0, 1], interval[0, 1] - f2_low, "blue")
# Plot the statistical band for F2:
fill_between(Q2_range, interval[1, 1], interval[0, 1], "#AAAAFF")
# Plot the best-fit line for F2:
plt.plot(Q2_range, F2, color="blue", lw=0.6, alpha=0.7)
# Plot the same things for F1:
if draw_confidence:
fill_between(Q2_range, interval[1, 0] + f1_up, interval[1, 0], "red")
fill_between(Q2_range, interval[0, 0], interval[0, 0] - f1_low, "red")
fill_between(Q2_range, interval[1, 0], interval[0, 0], "#FFAAAA")
plt.plot(Q2_range, F1, color="red", lw=0.6, alpha=0.7)
# Axes and labels:
plt.xlim(0, 1)
plt.xlabel(r"$Q^2~\left(\mathrm{GeV}^2\right)$")
plt.ylabel(r"$F_1, \, F_2$", labelpad=11)
if order == 5:
plt.text(0.45, 0.46, r"$F_1$", color="#FF0000")
plt.text(0.36, 0.31, r"$F_2$", color="#0000FF")
def plot_rhos(data, order, reg_param):
"""Plot the transverse densities rho1 and rho2."""
rho_range = np.linspace(0, 10.1, 100)
params, cov = calc_params(data, order, reg_param)
rho1, rho2 = calc_rho(rho_range, params, order)
if fit.covariance_bad(cov):
print("Warning: Covariance ill-conditioned, will not plot confidence intervals")
draw_confidence = False
else:
draw_confidence = True
if draw_confidence:
# Calculate statistical uncertainties:
params = np.random.multivariate_normal(params, cov, size=N_SAMPLES)
interval = calc_interval(calc_rho, rho_range, params, order)
# Calculate systematic uncertainties:
rho1_up, rho1_low, rho2_up, rho2_low = calc_sys_bands(calc_rho, rho_range, data, order, reg_param)
# Plot the systematic band for rho1:
fill_between(rho_range, interval[1, 0] + rho1_up, interval[1, 0], "red", hbarc_scale=True)
fill_between(rho_range, interval[0, 0], interval[0, 0] - rho1_low, "red", hbarc_scale=True)
# Plot the statistical band for rho1:
fill_between(rho_range, interval[1, 0], interval[0, 0], "#FFAAAA", hbarc_scale=True)
# Plot the best-fit line for rho1:
plt.plot(hbarc * rho_range, rho1 / (hbarc * hbarc), color="red", alpha=0.7, lw=0.6)
# Plot the same things for rho2:
if draw_confidence:
fill_between(rho_range, interval[1, 1] + rho2_up, interval[1, 1], "blue", hbarc_scale=True)
fill_between(rho_range, interval[0, 1], interval[0, 1] - rho2_low, "blue", hbarc_scale=True)
fill_between(rho_range, interval[1, 1], interval[0, 1], "#AAAAFF", hbarc_scale=True)
plt.plot(hbarc * rho_range, rho2 / (hbarc * hbarc), color="blue", alpha=0.7, lw=0.6)
# Axes and labels:
plt.xlim(0, 2)
plt.yscale("log")
plt.xlabel(r"$b~(\mathrm{fm})$", labelpad=6)
plt.ylabel(r"$\rho_1, \, \rho_2~\left(\mathrm{fm}^{-2}\right)$")
if order == 5:
plt.text(0.94, 0.013, r"$\rho_1$", color="#FF0000")
plt.text(1.1, 0.079, r"$\rho_2$", color="#0000FF")
def plot_ge_gm(cs_data, R_data, order, reg_param):
"""Plot the Sachs electric and magnetic form factors."""
params, cov = calc_params(cs_data, order, reg_param)
Q2_range = np.linspace(0, 1, 100)
GE, GM = calc_ge_gm(Q2_range, params, order)
if fit.covariance_bad(cov):
print("Warning: Covariance ill-conditioned, will not plot confidence intervals")
draw_confidence = False
else:
draw_confidence = True
# Calculate statistical uncertainties:
if draw_confidence:
params = | np.random.multivariate_normal(params, cov, size=N_SAMPLES) | numpy.random.multivariate_normal |
import numpy as np
from rdt.transformers.numerical import GaussianCopulaTransformer, NumericalTransformer
class TestNumericalTransformer:
def test_null_column(self):
data = np.array([1, 2, 1, 2, np.nan, 1])
nt = NumericalTransformer()
transformed = nt.fit_transform(data)
assert isinstance(transformed, np.ndarray)
assert transformed.shape == (6, 2)
assert list(transformed[:, 1]) == [0, 0, 0, 0, 1, 0]
reverse = nt.reverse_transform(transformed)
np.testing.assert_array_almost_equal(reverse, data, decimal=2)
def test_not_null_column(self):
data = np.array([1, 2, 1, 2, np.nan, 1])
nt = NumericalTransformer(null_column=False)
transformed = nt.fit_transform(data)
assert isinstance(transformed, np.ndarray)
assert transformed.shape == (6, )
reverse = nt.reverse_transform(transformed)
np.testing.assert_array_almost_equal(reverse, data, decimal=2)
def test_int(self):
data = np.array([1, 2, 1, 2, 1])
nt = NumericalTransformer(dtype=int)
transformed = nt.fit_transform(data)
assert isinstance(transformed, np.ndarray)
assert transformed.shape == (5, )
reverse = nt.reverse_transform(transformed)
assert list(reverse) == [1, 2, 1, 2, 1]
def test_int_nan(self):
data = np.array([1, 2, 1, 2, 1, np.nan])
nt = NumericalTransformer(dtype=int)
transformed = nt.fit_transform(data)
assert isinstance(transformed, np.ndarray)
assert transformed.shape == (6, 2)
reverse = nt.reverse_transform(transformed)
np.testing.assert_array_almost_equal(reverse, data, decimal=2)
class TestGaussianCopulaTransformer:
def test_stats(self):
data = np.random.normal(loc=4, scale=4, size=1000)
ct = GaussianCopulaTransformer()
transformed = ct.fit_transform(data)
assert isinstance(transformed, np.ndarray)
assert transformed.shape == (1000, )
np.testing.assert_almost_equal(transformed.mean(), 0, decimal=1)
np.testing.assert_almost_equal(transformed.std(), 1, decimal=1)
reverse = ct.reverse_transform(transformed)
np.testing.assert_array_almost_equal(reverse, data, decimal=1)
def test_null_column(self):
data = | np.array([1, 2, 1, 2, np.nan, 1]) | numpy.array |
import collections
import copy
import os
import kerastuner
import numpy as np
import tensorflow as tf
from tensorflow.python.util import nest
class AutoTuner(kerastuner.engine.multi_execution_tuner.MultiExecutionTuner):
"""A Tuner class based on KerasTuner for AutoKeras.
Different from KerasTuner's Tuner class. AutoTuner's not only tunes the
Hypermodel which can be directly built into a Keras model, but also the
preprocessors. Therefore, a HyperGraph stores the overall search space containing
both the Preprocessors and Hypermodel. For every trial, the HyperGraph build the
PreprocessGraph and KerasGraph with the provided HyperParameters.
The AutoTuner uses EarlyStopping for acceleration during the search and fully
train the model with full epochs and with both training and validation data.
The fully trained model is the best model to be used by AutoModel.
# Arguments
**kwargs: The args supported by KerasTuner.
"""
def __init__(self, **kwargs):
super().__init__(**kwargs)
self._finished = False
# Override the function to prevent building the model during initialization.
def _populate_initial_space(self):
pass
def get_best_model(self):
model = super().get_best_models()[0]
model.load_weights(self.best_model_path)
return model
@staticmethod
def _adapt_model(model, dataset):
from tensorflow.keras.layers.experimental import preprocessing
x = dataset.map(lambda x, y: x)
def get_output_layer(tensor):
tensor = nest.flatten(tensor)[0]
for layer in model.layers:
if isinstance(layer, tf.keras.layers.InputLayer):
continue
input_node = nest.flatten(layer.input)[0]
if input_node is tensor:
return layer
return None
for index, input_node in enumerate(nest.flatten(model.input)):
def get_data(*args):
return args[index]
temp_x = x.map(get_data)
layer = get_output_layer(input_node)
while isinstance(layer, preprocessing.PreprocessingLayer):
layer.adapt(temp_x)
layer = get_output_layer(layer.output)
return model
def run_trial(self, trial, x=None, *fit_args, **fit_kwargs):
model_checkpoint = tf.keras.callbacks.ModelCheckpoint(
filepath=self._get_checkpoint_fname(
trial.trial_id, self._reported_step),
monitor=self.oracle.objective.name,
mode=self.oracle.objective.direction,
save_best_only=True,
save_weights_only=True)
original_callbacks = fit_kwargs.pop('callbacks', [])
# Run the training process multiple times.
metrics = collections.defaultdict(list)
for execution in range(self.executions_per_trial):
copied_fit_kwargs = copy.copy(fit_kwargs)
callbacks = self._deepcopy_callbacks(original_callbacks)
self._configure_tensorboard_dir(callbacks, trial.trial_id, execution)
callbacks.append(
kerastuner.engine.tuner_utils.TunerCallback(self, trial))
# Only checkpoint the best epoch across all executions.
callbacks.append(model_checkpoint)
copied_fit_kwargs['callbacks'] = callbacks
model = self.hypermodel.build(trial.hyperparameters)
self._adapt_model(model, x)
history = model.fit(x, *fit_args, **copied_fit_kwargs)
for metric, epoch_values in history.history.items():
if self.oracle.objective.direction == 'min':
best_value = np.min(epoch_values)
else:
best_value = np.max(epoch_values)
metrics[metric].append(best_value)
# Average the results across executions and send to the Oracle.
averaged_metrics = {}
for metric, execution_values in metrics.items():
averaged_metrics[metric] = | np.mean(execution_values) | numpy.mean |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.