prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
"""Rangeland Production Model."""
import os
import logging
import tempfile
import shutil
from builtins import range
import re
import math
import pickle
import numpy
import pandas
from osgeo import ogr
from osgeo import osr
from osgeo import gdal
import pygeoprocessing
from rangeland_production import utils
from rangeland_production import validation
LOGGER = logging.getLogger('rangeland_production.forage')
# we only have these types of soils
SOIL_TYPE_LIST = ['clay', 'silt', 'sand']
# temporary directory to store intermediate files
PROCESSING_DIR = None
# user-supplied crude protein of vegetation
CRUDE_PROTEIN = None
# state variables and parameters take their names from Century
# _SITE_STATE_VARIABLE_FILES contains state variables that are a
# property of the site, including:
# carbon in each soil compartment
# (structural, metabolic, som1, som2, som3) and layer (1=surface, 2=soil)
# e.g., som2c_2 = carbon in soil som2;
# N and P in each soil layer and compartment (1=N, 2=P)
# e.g., som2e_1_1 = N in surface som2, som2e_1_2 = P in surface som2;
# water in each soil layer, asmos_<layer>
# state variables fully described in this table:
# https://docs.google.com/spreadsheets/d/1TGCDOJS4nNsJpzTWdiWed390NmbhQFB2uUoMs9oTTYo/edit?usp=sharing
_SITE_STATE_VARIABLE_FILES = {
'metabc_1_path': 'metabc_1.tif',
'metabc_2_path': 'metabc_2.tif',
'som1c_1_path': 'som1c_1.tif',
'som1c_2_path': 'som1c_2.tif',
'som2c_1_path': 'som2c_1.tif',
'som2c_2_path': 'som2c_2.tif',
'som3c_path': 'som3c.tif',
'strucc_1_path': 'strucc_1.tif',
'strucc_2_path': 'strucc_2.tif',
'strlig_1_path': 'strlig_1.tif',
'strlig_2_path': 'strlig_2.tif',
'metabe_1_1_path': 'metabe_1_1.tif',
'metabe_2_1_path': 'metabe_2_1.tif',
'som1e_1_1_path': 'som1e_1_1.tif',
'som1e_2_1_path': 'som1e_2_1.tif',
'som2e_1_1_path': 'som2e_1_1.tif',
'som2e_2_1_path': 'som2e_2_1.tif',
'som3e_1_path': 'som3e_1.tif',
'struce_1_1_path': 'struce_1_1.tif',
'struce_2_1_path': 'struce_2_1.tif',
'metabe_1_2_path': 'metabe_1_2.tif',
'metabe_2_2_path': 'metabe_2_2.tif',
'plabil_path': 'plabil.tif',
'secndy_2_path': 'secndy_2.tif',
'parent_2_path': 'parent_2.tif',
'occlud_path': 'occlud.tif',
'som1e_1_2_path': 'som1e_1_2.tif',
'som1e_2_2_path': 'som1e_2_2.tif',
'som2e_1_2_path': 'som2e_1_2.tif',
'som2e_2_2_path': 'som2e_2_2.tif',
'som3e_2_path': 'som3e_2.tif',
'struce_1_2_path': 'struce_1_2.tif',
'struce_2_2_path': 'struce_2_2.tif',
'asmos_1_path': 'asmos_1.tif',
'asmos_2_path': 'asmos_2.tif',
'asmos_3_path': 'asmos_3.tif',
'asmos_4_path': 'asmos_4.tif',
'asmos_5_path': 'asmos_5.tif',
'asmos_6_path': 'asmos_6.tif',
'asmos_7_path': 'asmos_7.tif',
'asmos_8_path': 'asmos_8.tif',
'asmos_9_path': 'asmos_9.tif',
'avh2o_3_path': 'avh2o_3.tif',
'minerl_1_1_path': 'minerl_1_1.tif',
'minerl_2_1_path': 'minerl_2_1.tif',
'minerl_3_1_path': 'minerl_3_1.tif',
'minerl_4_1_path': 'minerl_4_1.tif',
'minerl_5_1_path': 'minerl_5_1.tif',
'minerl_6_1_path': 'minerl_6_1.tif',
'minerl_7_1_path': 'minerl_7_1.tif',
'minerl_8_1_path': 'minerl_8_1.tif',
'minerl_9_1_path': 'minerl_9_1.tif',
'minerl_10_1_path': 'minerl_10_1.tif',
'minerl_1_2_path': 'minerl_1_2.tif',
'minerl_2_2_path': 'minerl_2_2.tif',
'minerl_3_2_path': 'minerl_3_2.tif',
'minerl_4_2_path': 'minerl_4_2.tif',
'minerl_5_2_path': 'minerl_5_2.tif',
'minerl_6_2_path': 'minerl_6_2.tif',
'minerl_7_2_path': 'minerl_7_2.tif',
'minerl_8_2_path': 'minerl_8_2.tif',
'minerl_9_2_path': 'minerl_9_2.tif',
'minerl_10_2_path': 'minerl_10_2.tif',
'snow_path': 'snow.tif',
'snlq_path': 'snlq.tif',
}
# _PFT_STATE_VARIABLES contains state variables that are a
# property of a PFT, including:
# carbon, nitrogen, and phosphorous in aboveground biomass
# where 1=N, 2=P
# e.g. aglivc = C in aboveground live biomass,
# aglive_1 = N in aboveground live biomass;
# carbon, nitrogen, and phosphorous in aboveground standing dead
# biomass, stdedc and stdede;
# carbon, nitrogen and phosphorous in belowground live biomass,
# aglivc and aglive
# state variables fully described in this table:
# https://docs.google.com/spreadsheets/d/1TGCDOJS4nNsJpzTWdiWed390NmbhQFB2uUoMs9oTTYo/edit?usp=sharing
_PFT_STATE_VARIABLES = [
'aglivc', 'bglivc', 'stdedc', 'aglive_1', 'bglive_1',
'stdede_1', 'aglive_2', 'bglive_2', 'stdede_2', 'avh2o_1',
'crpstg_1', 'crpstg_2',
]
# intermediate parameters that do not change between timesteps,
# including field capacity and wilting point of each soil layer,
# coefficients describing effect of soil texture on decomposition
# rates
_PERSISTENT_PARAMS_FILES = {
'afiel_1_path': 'afiel_1.tif',
'afiel_2_path': 'afiel_2.tif',
'afiel_3_path': 'afiel_3.tif',
'afiel_4_path': 'afiel_4.tif',
'afiel_5_path': 'afiel_5.tif',
'afiel_6_path': 'afiel_6.tif',
'afiel_7_path': 'afiel_7.tif',
'afiel_8_path': 'afiel_8.tif',
'afiel_9_path': 'afiel_9.tif',
'awilt_1_path': 'awilt_1.tif',
'awilt_2_path': 'awilt_2.tif',
'awilt_3_path': 'awilt_3.tif',
'awilt_4_path': 'awilt_4.tif',
'awilt_5_path': 'awilt_5.tif',
'awilt_6_path': 'awilt_6.tif',
'awilt_7_path': 'awilt_7.tif',
'awilt_8_path': 'awilt_8.tif',
'awilt_9_path': 'awilt_9.tif',
'wc_path': 'wc.tif',
'eftext_path': 'eftext.tif',
'p1co2_2_path': 'p1co2_2.tif',
'fps1s3_path': 'fps1s3.tif',
'orglch_path': 'orglch.tif',
'fps2s3_path': 'fps2s3.tif',
'rnewas_1_1_path': 'rnewas_1_1.tif',
'rnewas_2_1_path': 'rnewas_2_1.tif',
'rnewas_1_2_path': 'rnewas_1_2.tif',
'rnewas_2_2_path': 'rnewas_2_2.tif',
'rnewbs_1_1_path': 'rnewbs_1_1.tif',
'rnewbs_1_2_path': 'rnewbs_1_2.tif',
'rnewbs_2_1_path': 'rnewbs_2_1.tif',
'rnewbs_2_2_path': 'rnewbs_2_2.tif',
'vlossg_path': 'vlossg.tif',
}
# site-level values that are updated once per year
_YEARLY_FILES = {
'annual_precip_path': 'annual_precip.tif',
'baseNdep_path': 'baseNdep.tif',
}
# pft-level values that are updated once per year
_YEARLY_PFT_FILES = ['pltlig_above', 'pltlig_below']
# intermediate values for each plant functional type that are shared
# between submodels, but do not need to be saved as output
_PFT_INTERMEDIATE_VALUES = [
'h2ogef_1', 'tgprod_pot_prod',
'cercrp_min_above_1', 'cercrp_min_above_2',
'cercrp_max_above_1', 'cercrp_max_above_2',
'cercrp_min_below_1', 'cercrp_min_below_2',
'cercrp_max_below_1', 'cercrp_max_below_2',
'tgprod', 'rtsh', 'flgrem', 'fdgrem']
# intermediate site-level values that are shared between submodels,
# but do not need to be saved as output
_SITE_INTERMEDIATE_VALUES = [
'amov_1', 'amov_2', 'amov_3', 'amov_4', 'amov_5', 'amov_6', 'amov_7',
'amov_8', 'amov_9', 'amov_10', 'snowmelt', 'bgwfunc', 'diet_sufficiency']
# fixed parameters for each grazing animal type are adapted from the GRAZPLAN
# model as described by Freer et al. 2012, "The GRAZPLAN animal biology model
# for sheep and cattle and the GrazFeed decision support tool"
_FREER_PARAM_DICT = {
'b_indicus': {
'CN1': 0.0115,
'CN2': 0.27,
'CN3': 0.4,
'CI1': 0.025,
'CI2': 1.7,
'CI8': 62,
'CI9': 1.7,
'CI15': 0.5,
'CI19': 0.416,
'CI20': 1.5,
'CR1': 0.8,
'CR2': 0.17,
'CR3': 1.7,
'CR4': 0.00078,
'CR5': 0.6,
'CR6': 0.00074,
'CR7': 0.5,
'CR12': 0.8,
'CR13': 0.35,
'CK1': 0.5,
'CK2': 0.02,
'CK3': 0.85,
'CK5': 0.4,
'CK6': 0.02,
'CK8': 0.133,
'CL0': 0.375,
'CL1': 4,
'CL2': 30,
'CL3': 0.6,
'CL5': 0.94,
'CL6': 3.1,
'CL15': 0.032,
'CM1': 0.09,
'CM2': 0.31,
'CM3': 0.00008,
'CM4': 0.84,
'CM6': 0.0025,
'CM7': 0.9,
'CM16': 0.0026,
'CRD1': 0.3,
'CRD2': 0.25,
'CRD4': 0.007,
'CRD5': 0.005,
'CRD6': 0.35,
'CRD7': 0.1,
'CA1': 0.05,
'CA2': 0.85,
'CA3': 5.5,
'CA4': 0.178,
'CA6': 1,
'CA7': 0.6,
'CP1': 285,
'CP4': 0.33,
'CP5': 1.8,
'CP6': 2.42,
'CP7': 1.16,
'CP8': 4.11,
'CP9': 343.5,
'CP10': 0.0164,
'CP15': 0.07,
},
'b_taurus': {
'CN1': 0.0115,
'CN2': 0.27,
'CN3': 0.4,
'CI1': 0.025,
'CI2': 1.7,
'CI8': 62,
'CI9': 1.7,
'CI15': 0.5,
'CI19': 0.416,
'CI20': 1.5,
'CR1': 0.8,
'CR2': 0.17,
'CR3': 1.7,
'CR4': 0.00078,
'CR5': 0.6,
'CR6': 0.00074,
'CR7': 0.5,
'CR12': 0.8,
'CR13': 0.35,
'CK1': 0.5,
'CK2': 0.02,
'CK3': 0.85,
'CK5': 0.4,
'CK6': 0.02,
'CK8': 0.133,
'CL0': 0.375,
'CL1': 4,
'CL2': 30,
'CL3': 0.6,
'CL5': 0.94,
'CL6': 3.1,
'CL15': 0.032,
'CM1': 0.09,
'CM2': 0.36,
'CM3': 0.00008,
'CM4': 0.84,
'CM6': 0.0025,
'CM7': 0.9,
'CM16': 0.0026,
'CRD1': 0.3,
'CRD2': 0.25,
'CRD4': 0.007,
'CRD5': 0.005,
'CRD6': 0.35,
'CRD7': 0.1,
'CA1': 0.05,
'CA2': 0.85,
'CA3': 5.5,
'CA4': 0.178,
'CA6': 1,
'CA7': 0.6,
'CP1': 285,
'CP4': 0.33,
'CP5': 1.8,
'CP6': 2.42,
'CP7': 1.16,
'CP8': 4.11,
'CP9': 343.5,
'CP10': 0.0164,
'CP15': 0.07,
},
'indicus_x_taurus': {
'CN1': 0.0115,
'CN2': 0.27,
'CN3': 0.4,
'CI1': 0.025,
'CI2': 1.7,
'CI8': 62,
'CI9': 1.7,
'CI15': 0.5,
'CI19': 0.416,
'CI20': 1.5,
'CR1': 0.8,
'CR2': 0.17,
'CR3': 1.7,
'CR4': 0.00078,
'CR5': 0.6,
'CR6': 0.00074,
'CR7': 0.5,
'CR12': 0.8,
'CR13': 0.35,
'CK1': 0.5,
'CK2': 0.02,
'CK3': 0.85,
'CK5': 0.4,
'CK6': 0.02,
'CK8': 0.133,
'CL0': 0.375,
'CL1': 4,
'CL2': 30,
'CL3': 0.6,
'CL5': 0.94,
'CL6': 3.1,
'CL15': 0.032,
'CM1': 0.09,
'CM2': 0.335,
'CM3': 0.00008,
'CM4': 0.84,
'CM6': 0.0025,
'CM7': 0.9,
'CM16': 0.0026,
'CRD1': 0.3,
'CRD2': 0.25,
'CRD4': 0.007,
'CRD5': 0.005,
'CRD6': 0.35,
'CRD7': 0.1,
'CA1': 0.05,
'CA2': 0.85,
'CA3': 5.5,
'CA4': 0.178,
'CA6': 1,
'CA7': 0.6,
'CP1': 285,
'CP4': 0.33,
'CP5': 1.8,
'CP6': 2.42,
'CP7': 1.16,
'CP8': 4.11,
'CP9': 343.5,
'CP10': 0.0164,
'CP15': 0.07,
},
'sheep': {
'CN1': 0.0157,
'CN2': 0.27,
'CN3': 0.4,
'CI1': 0.04,
'CI2': 1.7,
'CI8': 28,
'CI9': 1.4,
'CI12': 0.15,
'CI13': 0.02,
'CI14': 0.002,
'CI20': 1.5,
'CR1': 0.8,
'CR2': 0.17,
'CR3': 1.7,
'CR4': 0.00112,
'CR5': 0.6,
'CR6': 0.00112,
'CR7': 0,
'CR12': 0.8,
'CR13': 0.35,
'CK1': 0.5,
'CK2': 0.02,
'CK3': 0.85,
'CK5': 0.4,
'CK6': 0.02,
'CK8': 0.133,
'CL0': 0.486,
'CL1': 2,
'CL2': 22,
'CL3': 1,
'CL5': 0.94,
'CL6': 4.7,
'CL15': 0.045,
'CM1': 0.09,
'CM2': 0.26,
'CM3': 0.00008,
'CM4': 0.84,
'CM6': 0.02,
'CM7': 0.9,
'CM16': 0.0026,
'CRD1': 0.3,
'CRD2': 0.25,
'CRD4': 0.007,
'CRD5': 0.005,
'CRD6': 0.35,
'CRD7': 0.1,
'CA1': 0.05,
'CA2': 0.85,
'CA3': 5.5,
'CA4': 0.178,
'CA6': 1,
'CA7': 0.6,
'CW1': 24,
'CW2': 0.004,
'CW3': 0.7,
'CW5': 0.25,
'CW6': 0.072,
'CW7': 1.35,
'CW8': 0.016,
'CW9': 1,
'CW12': 0.025,
'CP1': 150,
'CP4': 0.33,
'CP5': 1.43,
'CP6': 3.38,
'CP7': 0.91,
'CP8': 4.33,
'CP9': 4.37,
'CP10': 0.965,
'CP15': 0.1,
},
}
# Target nodata is for general rasters that are positive, and _IC_NODATA are
# for rasters that are any range
_TARGET_NODATA = -1.0
_IC_NODATA = float(numpy.finfo('float32').min)
# SV_NODATA is for state variables
_SV_NODATA = -1.0
def execute(args):
"""InVEST Forage Model.
[model description]
Parameters:
args['workspace_dir'] (string): path to target output workspace.
args['results_suffix'] (string): (optional) string to append to any
output file names
args['starting_month'] (int): what month to start reporting where
the range 1..12 is equivalent to Jan..Dec.
args['starting_year'] (int): what year to start runs. this value is
used to notate outputs in the form [month_int]_[year]
args['n_months'] (int): number of months to run model, the model run
will start reporting in `args['starting_month']`.
args['aoi_path'] (string): path to polygon vector indicating the
desired spatial extent of the model. This has the effect of
clipping the computational area of the input datasets to be the
area intersected by this polygon.
args['management_threshold'] (float): biomass in kg/ha required to be
left standing at each model step after offtake by grazing animals
args['proportion_legume_path'] (string): path to raster containing
fraction of pasture that is legume, by weight
args['bulk_density_path'] (string): path to bulk density raster.
args['ph_path'] (string): path to soil pH raster.
args['clay_proportion_path'] (string): path to raster representing
per-pixel proportion of soil component that is clay
args['silt_proportion_path'] (string): path to raster representing
per-pixel proportion of soil component that is silt
args['sand_proportion_path'] (string): path to raster representing
per-pixel proportion of soil component that is sand
args['precip_dir'] (string): path to a directory containing monthly
precipitation rasters. The model requires at least 12 months of
precipitation and expects to find a precipitation file input for
every month of the simulation, so the number of precipitation
files should be the maximum of 12 and `n_months`. The file name of
each precipitation raster must end with the year, followed by an
underscore, followed by the month number. E.g., Precip_2016_1.tif
for January of 2016.
args['min_temp_dir'] (string): path to a directory containing monthly
minimum temperature rasters. The model requires one minimum
temperature raster for each month of the year, or each month that
the model is run, whichever is smaller. The file name of each
minimum temperature raster must end with the month number. E.g.,
Min_temperature_1.tif for January.
args['max_temp_dir'] (string): path to a directory containing monthly
maximum temperature rasters. The model requires one maximum
temperature raster for each month of the year, or each month that
the model is run, whichever is smaller. The file name of each
maximum temperature raster must end with the month number. E.g.,
Max_temperature_1.tif for January.
args['site_param_table'] (string): path to csv file giving site
parameters. This file must contain a column named "site" that
contains unique integers. These integer values correspond to site
type identifiers which are values in the site parameter spatial
index raster. Other required fields for this table are site and
"fixed" parameters from the Century model, i.e., the parameters
in the Century input files site.100 and fix.100.
args['site_param_spatial_index_path'] (string): path to a raster file
that indexes site parameters, indicating which set of site
parameter values should apply at each pixel in the raster. The
raster should be composed of integers that correspond to values in
the field "site" in `site_param_table`.
args['veg_trait_path'] (string): path to csv file giving vegetation
traits for each plant functional type available for grazing. This
file must contain a column named "PFT" that contains unique
integers. These integer values correspond to PFT identifiers of
veg spatial composition rasters. Other required fields for this
table are vegetation input parameters from the Century model, for
example maximum intrinsic growth rate, optimum temperature for
production, minimum C/N ratio, etc.
args['veg_spatial_composition_path_pattern'] (string): path to
vegetation rasters, one per plant functional type available for
grazing, where <PFT> can be replaced with an integer that is
indexed in the veg trait csv.
Example: if this value is given as `./vegetation/pft_<PFT>.tif`
and the directory `./vegetation/` contains these files:
"pft_1.tif"
"pft_12.tif"
"pft_50.tif",
then the "PFT" field in the vegetation trait table must contain
the values 1, 12, and 50.
args['animal_trait_path'] (string): path to csv file giving animal
traits for each animal type - number - duration combination. This
table must contain a column named "animal_id" that contains unique
integers. These integer values correspond to features in the
animal management layer.
Other required fields in this table are:
type (allowable values: b_indicus, b_taurus,
indicus_x_taurus, sheep, camelid, hindgut_fermenter)
sex (allowable values: entire_m, castrate, breeding_female,
NA)
age (days)
weight (kg)
SRW (standard reference weight, kg; the weight of a mature
female in median condition)
SFW (standard fleece weight, kg; the average weight of fleece
of a mature adult; for sheep only)
birth_weight (kg)
grz_months (a string of integers, separated by ','; months of
the simulation when animals are present,
relative to `starting_month`. For example, if `n_months`
is 3, and animals are present during the entire simulation
period, `grz_months` should be "1,2,3")
args['animal_grazing_areas_path'] (string): path to animal vector
inputs giving the location of grazing animals. Must have a field
named "animal_id", containing unique integers that correspond to
the values in the "animal_id" column of the animal trait csv, and
a field named "num_animal" giving the number of animals grazing
inside each polygon feature.
args['initial_conditions_dir'] (string): optional input, path to
directory containing initial conditions. If this directory is not
supplied, a site_initial_table and pft_initial_table must be
supplied. If supplied, this directory must contain a series of
rasters with initial values for each PFT and for the site.
Required rasters for each PFT:
initial variables that are a property of PFT in the table
https://docs.google.com/spreadsheets/d/1TGCDOJS4nNsJpzTWdiWed390NmbhQFB2uUoMs9oTTYo/edit?usp=sharing
e.g., aglivc_<PFT>.tif
Required for the site:
initial variables that are a property of site in the table
https://docs.google.com/spreadsheets/d/1TGCDOJS4nNsJpzTWdiWed390NmbhQFB2uUoMs9oTTYo/edit?usp=sharing
args['site_initial_table'] (string): optional input, path to table
containing initial conditions for each site state variable. If an
initial conditions directory is not supplied, this table must be
supplied. This table must contain a value for each site code and
each state variable listed in the following table:
https://docs.google.com/spreadsheets/d/1TGCDOJS4nNsJpzTWdiWed390NmbhQFB2uUoMs9oTTYo/edit?usp=sharing
args['pft_initial_table'] (string): optional input, path to table
containing initial conditions for each plant functional type state
variable. If an initial conditions directory is not supplied, this
table must be supplied. This table must contain a value for each
plant functional type index and each state variable listed in the
following table:
https://docs.google.com/spreadsheets/d/1TGCDOJS4nNsJpzTWdiWed390NmbhQFB2uUoMs9oTTYo/edit?usp=sharing
args['save_sv_rasters'] (boolean): optional input, default false.
Should rasters containing all state variables be saved for each
model time step?
args['animal_density'] (string): optional input, density of grazing
animals in animals per hectare.
args['crude_protein'] (float): optional input, crude protein
concentration of forage for the purposes of animal diet selection.
Should be a value between 0-1. If included, this value is
substituted for N content of forage when calculating digestibility
and "ingestibility" of forage, and protein content of the diet, for
grazing animals.
Returns:
None.
"""
LOGGER.info("model execute: %s", args)
starting_month = int(args['starting_month'])
starting_year = int(args['starting_year'])
n_months = int(args['n_months'])
try:
delete_sv_folders = not args['save_sv_rasters']
except KeyError:
delete_sv_folders = True
try:
global CRUDE_PROTEIN
CRUDE_PROTEIN = args['crude_protein']
except KeyError:
pass
try:
animal_density_path = args['animal_density']
except KeyError:
args['animal_density'] = None
# this set will build up the integer months that are used so we can index
# them with temperature later
temperature_month_set = set()
# this dict will be used to build the set of input rasters associated with
# a reasonable lookup ID so we can have a nice dataset to align for raster
# stack operations
base_align_raster_path_id_map = {}
precip_dir_list = [
os.path.join(args['precip_dir'], f) for f in
os.listdir(args['precip_dir'])]
for month_index in range(n_months):
month_i = (starting_month + month_index - 1) % 12 + 1
temperature_month_set.add(month_i)
year = starting_year + (starting_month + month_index - 1) // 12
year_month_match = re.compile(
r'.*[^\d]%d_%d\.[^.]+$' % (year, month_i))
file_list = [
month_file_path for month_file_path in precip_dir_list if
year_month_match.match(month_file_path)]
if len(file_list) == 0:
raise ValueError(
"No precipitation data found for year %d, month %d" %
(year, month_i))
if len(file_list) > 1:
raise ValueError(
"Ambiguous set of files found for year %d, month %d: %s" %
(year, month_i, file_list))
base_align_raster_path_id_map[
'precip_{}'.format(month_index)] = file_list[0]
# the model requires 12 months of precipitation data to calculate
# atmospheric N deposition and potential production from annual precip
n_precip_months = int(args['n_months'])
if n_precip_months < 12:
m_index = int(args['n_months'])
while n_precip_months < 12:
month_i = (starting_month + m_index - 1) % 12 + 1
year = starting_year + (starting_month + m_index - 1) // 12
year_month_match = re.compile(
r'.*[^\d]%d_%d\.[^.]+$' % (year, month_i))
file_list = [
month_file_path for month_file_path in precip_dir_list if
year_month_match.match(month_file_path)]
if len(file_list) == 0:
break
if len(file_list) > 1:
raise ValueError(
"Ambiguous set of files found for year %d, month %d: %s" %
(year, month_i, file_list))
base_align_raster_path_id_map[
'precip_%d' % m_index] = file_list[0]
n_precip_months = n_precip_months + 1
m_index = m_index + 1
if n_precip_months < 12:
raise ValueError("At least 12 months of precipitation data required")
# collect monthly temperature data
min_temp_dir_list = [
os.path.join(args['min_temp_dir'], f) for f in
os.listdir(args['min_temp_dir'])]
for month_i in temperature_month_set:
month_file_match = re.compile(r'.*[^\d]%d\.[^.]+$' % month_i)
file_list = [
month_file_path for month_file_path in min_temp_dir_list if
month_file_match.match(month_file_path)]
if len(file_list) == 0:
raise ValueError(
"No minimum temperature data found for month %d" % month_i)
if len(file_list) > 1:
raise ValueError(
"Ambiguous set of files found for month %d: %s" %
(month_i, file_list))
base_align_raster_path_id_map[
'min_temp_%d' % month_i] = file_list[0]
max_temp_dir_list = [
os.path.join(args['max_temp_dir'], f) for f in
os.listdir(args['max_temp_dir'])]
for month_i in temperature_month_set:
month_file_match = re.compile(r'.*[^\d]%d\.[^.]+$' % month_i)
file_list = [
month_file_path for month_file_path in max_temp_dir_list if
month_file_match.match(month_file_path)]
if len(file_list) == 0:
raise ValueError(
"No maximum temperature data found for month %d" % month_i)
if len(file_list) > 1:
raise ValueError(
"Ambiguous set of files found for month %d: %s" %
(month_i, file_list))
base_align_raster_path_id_map[
'max_temp_%d' % month_i] = file_list[0]
# lookup to provide path to soil percent given soil type
for soil_type in SOIL_TYPE_LIST:
base_align_raster_path_id_map[soil_type] = (
args['%s_proportion_path' % soil_type])
if not os.path.exists(base_align_raster_path_id_map[soil_type]):
raise ValueError(
"Couldn't find %s for %s" % (
base_align_raster_path_id_map[soil_type], soil_type))
base_align_raster_path_id_map['bulk_d_path'] = args['bulk_density_path']
base_align_raster_path_id_map['ph_path'] = args['ph_path']
# make sure site initial conditions and parameters exist for each site
# identifier
base_align_raster_path_id_map['site_index'] = (
args['site_param_spatial_index_path'])
n_bands = pygeoprocessing.get_raster_info(
args['site_param_spatial_index_path'])['n_bands']
if n_bands > 1:
raise ValueError(
'Site spatial index raster must contain only one band')
site_datatype = pygeoprocessing.get_raster_info(
args['site_param_spatial_index_path'])['datatype']
if site_datatype not in [1, 2, 3, 4, 5]:
raise ValueError('Site spatial index raster must be integer type')
# get unique values in site param raster
site_index_set = set()
for offset_map, raster_block in pygeoprocessing.iterblocks(
(args['site_param_spatial_index_path'], 1)):
site_index_set.update(numpy.unique(raster_block))
site_nodata = pygeoprocessing.get_raster_info(
args['site_param_spatial_index_path'])['nodata'][0]
if site_nodata in site_index_set:
site_index_set.remove(site_nodata)
site_param_table = utils.build_lookup_from_csv(
args['site_param_table'], 'site')
missing_site_index_list = list(
site_index_set.difference(site_param_table.keys()))
if missing_site_index_list:
raise ValueError(
"Couldn't find parameter values for the following site " +
"indices: %s\n\t" + ", ".join(missing_site_index_list))
# make sure plant functional type parameters exist for each pft raster
pft_dir = os.path.dirname(args['veg_spatial_composition_path_pattern'])
pft_basename = os.path.basename(
args['veg_spatial_composition_path_pattern'])
files = [
f for f in os.listdir(pft_dir) if os.path.isfile(
os.path.join(pft_dir, f))]
pft_regex = re.compile(pft_basename.replace('<PFT>', r'(\d+)'))
pft_matches = [
m for m in [pft_regex.search(f) for f in files] if m is not None]
pft_id_set = set([int(m.group(1)) for m in pft_matches])
for pft_i in pft_id_set:
pft_path = args['veg_spatial_composition_path_pattern'].replace(
'<PFT>', '%d' % pft_i)
base_align_raster_path_id_map['pft_%d' % pft_i] = pft_path
veg_trait_table = utils.build_lookup_from_csv(
args['veg_trait_path'], 'PFT')
missing_pft_trait_list = pft_id_set.difference(veg_trait_table.keys())
if missing_pft_trait_list:
raise ValueError(
"Couldn't find trait values for the following plant functional " +
"types: %s\n\t" + ", ".join(missing_pft_trait_list))
frtcindx_set = set([
pft_i['frtcindx'] for pft_i in veg_trait_table.values()])
if frtcindx_set.difference(set([0, 1])):
raise ValueError("frtcindx parameter contains invalid values")
base_align_raster_path_id_map['proportion_legume_path'] = args[
'proportion_legume_path']
# track separate state variable files for each PFT
pft_sv_dict = {}
for pft_i in pft_id_set:
for sv in _PFT_STATE_VARIABLES:
pft_sv_dict['{}_{}_path'.format(
sv, pft_i)] = '{}_{}.tif'.format(sv, pft_i)
# make sure animal traits exist for each feature in animal management
# layer
anim_id_list = []
driver = ogr.GetDriverByName('ESRI Shapefile')
datasource = driver.Open(args['animal_grazing_areas_path'], 0)
layer = datasource.GetLayer()
for feature in layer:
anim_id_list.append(feature.GetField('animal_id'))
input_animal_trait_table = utils.build_lookup_from_csv(
args['animal_trait_path'], 'animal_id')
missing_animal_trait_list = set(
anim_id_list).difference(input_animal_trait_table.keys())
if missing_animal_trait_list:
raise ValueError(
"Couldn't find trait values for the following animal " +
"ids: %s\n\t" + ", ".join(missing_animal_trait_list))
# if animal density is supplied, align inputs to match its resolution
# otherwise, match resolution of precipitation rasters
if args['animal_density']:
target_pixel_size = pygeoprocessing.get_raster_info(
args['animal_density'])['pixel_size']
base_align_raster_path_id_map['animal_density'] = args[
'animal_density']
else:
target_pixel_size = pygeoprocessing.get_raster_info(
base_align_raster_path_id_map['precip_0'])['pixel_size']
LOGGER.info(
"pixel size of aligned inputs: %s", target_pixel_size)
# temporary directory for intermediate files
global PROCESSING_DIR
PROCESSING_DIR = os.path.join(args['workspace_dir'], "temporary_files")
if not os.path.exists(PROCESSING_DIR):
os.makedirs(PROCESSING_DIR)
# set up a dictionary that uses the same keys as
# 'base_align_raster_path_id_map' to point to the clipped/resampled
# rasters to be used in raster calculations for the model.
aligned_raster_dir = os.path.join(
args['workspace_dir'], 'aligned_inputs')
if os.path.exists(aligned_raster_dir):
shutil.rmtree(aligned_raster_dir)
os.makedirs(aligned_raster_dir)
aligned_inputs = dict([(key, os.path.join(
aligned_raster_dir, 'aligned_%s' % os.path.basename(path)))
for key, path in base_align_raster_path_id_map.items()])
# align all the base inputs to be the minimum known pixel size and to
# only extend over their combined intersections
source_input_path_list = [
base_align_raster_path_id_map[k] for k in sorted(
base_align_raster_path_id_map.keys())]
aligned_input_path_list = [
aligned_inputs[k] for k in sorted(aligned_inputs.keys())]
pygeoprocessing.align_and_resize_raster_stack(
source_input_path_list, aligned_input_path_list,
['near'] * len(source_input_path_list),
target_pixel_size, 'intersection',
base_vector_path_list=[args['aoi_path']],
vector_mask_options={'mask_vector_path': args['aoi_path']})
_check_pft_fractional_cover_sum(aligned_inputs, pft_id_set)
file_suffix = utils.make_suffix_string(args, 'results_suffix')
# create animal trait spatial index raster from management polygon
aligned_inputs['animal_index'] = os.path.join(
aligned_raster_dir, 'animal_spatial_index.tif')
pygeoprocessing.new_raster_from_base(
aligned_inputs['site_index'], aligned_inputs['animal_index'],
gdal.GDT_Int32, [_TARGET_NODATA], fill_value_list=[_TARGET_NODATA])
pygeoprocessing.rasterize(
args['animal_grazing_areas_path'], aligned_inputs['animal_index'],
option_list=["ATTRIBUTE=animal_id"])
# create uniform animal density raster, if not supplied as input
if not args['animal_density']:
aligned_inputs['animal_density'] = os.path.join(
aligned_raster_dir, 'animal_density.tif')
_animal_density(aligned_inputs, args['animal_grazing_areas_path'])
# Initialization
sv_dir = os.path.join(args['workspace_dir'], 'state_variables_m-1')
os.makedirs(sv_dir)
initial_conditions_dir = None
try:
initial_conditions_dir = args['initial_conditions_dir']
except KeyError:
pass
if initial_conditions_dir:
# check that a raster for each required state variable is supplied
missing_initial_values = []
# set _SV_NODATA from initial rasters
state_var_nodata = set([])
# align initial state variables to resampled inputs
resample_initial_path_map = {}
for sv in _SITE_STATE_VARIABLE_FILES:
sv_path = os.path.join(
initial_conditions_dir, _SITE_STATE_VARIABLE_FILES[sv])
state_var_nodata.update(
set([pygeoprocessing.get_raster_info(sv_path)['nodata'][0]]))
resample_initial_path_map[sv] = sv_path
if not os.path.exists(sv_path):
missing_initial_values.append(sv_path)
for pft_i in pft_id_set:
for sv in _PFT_STATE_VARIABLES:
sv_key = '{}_{}_path'.format(sv, pft_i)
sv_path = os.path.join(
initial_conditions_dir, '{}_{}.tif'.format(sv, pft_i))
state_var_nodata.update(
set([pygeoprocessing.get_raster_info(sv_path)['nodata']
[0]]))
resample_initial_path_map[sv_key] = sv_path
if not os.path.exists(sv_path):
missing_initial_values.append(sv_path)
if missing_initial_values:
raise ValueError(
"Couldn't find the following required initial values: " +
"\n\t".join(missing_initial_values))
if len(state_var_nodata) > 1:
raise ValueError(
"Initial state variable rasters contain >1 nodata value")
global _SV_NODATA
_SV_NODATA = list(state_var_nodata)[0]
# align initial values with inputs
initial_path_list = (
[aligned_inputs['precip_0']] +
[resample_initial_path_map[key] for key in sorted(
resample_initial_path_map.keys())])
aligned_initial_path_list = (
[os.path.join(PROCESSING_DIR, 'aligned_input_template.tif')] +
[os.path.join(
sv_dir, os.path.basename(resample_initial_path_map[key])) for
key in sorted(resample_initial_path_map.keys())])
pygeoprocessing.align_and_resize_raster_stack(
initial_path_list, aligned_initial_path_list,
['near'] * len(initial_path_list),
target_pixel_size, 'intersection',
base_vector_path_list=[args['aoi_path']], raster_align_index=0,
vector_mask_options={'mask_vector_path': args['aoi_path']})
sv_reg = dict(
[(key, os.path.join(sv_dir, os.path.basename(path)))
for key, path in resample_initial_path_map.items()])
else:
# create initialization rasters from tables
try:
site_initial_conditions_table = utils.build_lookup_from_csv(
args['site_initial_table'], 'site')
except KeyError:
raise ValueError(
"If initial conditions rasters are not supplied, initial " +
"conditions tables must be supplied")
missing_site_index_list = list(
site_index_set.difference(site_initial_conditions_table.keys()))
if missing_site_index_list:
raise ValueError(
"Couldn't find initial conditions values for the following " +
"site indices: %s\n\t" + ", ".join(missing_site_index_list))
try:
pft_initial_conditions_table = utils.build_lookup_from_csv(
args['pft_initial_table'], 'PFT')
except KeyError:
raise ValueError(
"If initial conditions rasters are not supplied, initial " +
"conditions tables must be supplied")
missing_pft_index_list = pft_id_set.difference(
pft_initial_conditions_table.keys())
if missing_pft_index_list:
raise ValueError(
"Couldn't find initial condition values for the following "
"plant functional types: %s\n\t" + ", ".join(
missing_pft_index_list))
sv_reg = initial_conditions_from_tables(
aligned_inputs, sv_dir, pft_id_set, site_initial_conditions_table,
pft_initial_conditions_table)
# calculate persistent intermediate parameters that do not change during
# the simulation
persist_param_dir = os.path.join(
args['workspace_dir'], 'intermediate_parameters')
utils.make_directories([persist_param_dir])
pp_reg = utils.build_file_registry(
[(_PERSISTENT_PARAMS_FILES, persist_param_dir)], file_suffix)
# calculate derived animal traits that do not change during the simulation
freer_parameter_df = pandas.DataFrame.from_dict(
_FREER_PARAM_DICT, orient='index')
freer_parameter_df['type'] = freer_parameter_df.index
animal_trait_table = calc_derived_animal_traits(
input_animal_trait_table, freer_parameter_df)
# calculate maximum potential intake of each animal type
for animal_id in animal_trait_table.keys():
revised_animal_trait_dict = calc_max_intake(
animal_trait_table[animal_id])
animal_trait_table[animal_id] = revised_animal_trait_dict
# calculate field capacity and wilting point
LOGGER.info("Calculating field capacity and wilting point")
_afiel_awilt(
aligned_inputs['site_index'], site_param_table,
sv_reg['som1c_2_path'], sv_reg['som2c_2_path'], sv_reg['som3c_path'],
aligned_inputs['sand'], aligned_inputs['silt'],
aligned_inputs['clay'], aligned_inputs['bulk_d_path'], pp_reg)
# calculate other persistent parameters
LOGGER.info("Calculating persistent parameters")
_persistent_params(
aligned_inputs['site_index'], site_param_table,
aligned_inputs['sand'], aligned_inputs['clay'], pp_reg)
# calculate required ratios for decomposition of structural material
LOGGER.info("Calculating required ratios for structural decomposition")
_structural_ratios(
aligned_inputs['site_index'], site_param_table, sv_reg, pp_reg)
# make yearly directory for values that are updated every twelve months
year_dir = tempfile.mkdtemp(dir=PROCESSING_DIR)
year_reg = dict(
[(key, os.path.join(year_dir, path)) for key, path in
_YEARLY_FILES.items()])
for pft_i in pft_id_set:
for file in _YEARLY_PFT_FILES:
year_reg['{}_{}'.format(file, pft_i)] = os.path.join(
year_dir, '{}_{}.tif'.format(file, pft_i))
# make monthly directory for monthly intermediate parameters that are
# shared between submodels, but do not need to be saved as output
month_temp_dir = tempfile.mkdtemp(dir=PROCESSING_DIR)
month_reg = {}
for pft_i in pft_id_set:
for val in _PFT_INTERMEDIATE_VALUES:
month_reg['{}_{}'.format(
val, pft_i)] = os.path.join(
month_temp_dir, '{}_{}.tif'.format(val, pft_i))
for val in _SITE_INTERMEDIATE_VALUES:
month_reg[val] = os.path.join(month_temp_dir, '{}.tif'.format(val))
output_dir = os.path.join(args['workspace_dir'], "output")
if not os.path.exists(output_dir):
os.makedirs(output_dir)
# provisional state variable registry contains provisional biomass in
# absence of grazing
provisional_sv_dir = tempfile.mkdtemp(dir=PROCESSING_DIR)
provisional_sv_reg = utils.build_file_registry(
[(_SITE_STATE_VARIABLE_FILES, provisional_sv_dir),
(pft_sv_dict, provisional_sv_dir)], file_suffix)
intermediate_sv_dir = tempfile.mkdtemp(dir=PROCESSING_DIR)
# Main simulation loop
# for each step in the simulation
for month_index in range(n_months):
if (month_index % 12) == 0:
# Update yearly quantities
_yearly_tasks(
aligned_inputs, site_param_table, veg_trait_table, month_index,
pft_id_set, year_reg)
current_month = (starting_month + month_index - 1) % 12 + 1
current_year = starting_year + (starting_month + month_index - 1) // 12
# track state variables from previous step
prev_sv_reg = sv_reg
for animal_id in animal_trait_table.keys():
if animal_trait_table[animal_id]['sex'] == 'breeding_female':
revised_animal_trait_dict = update_breeding_female_status(
animal_trait_table[animal_id], month_index)
animal_trait_table[animal_id] = revised_animal_trait_dict
revised_animal_trait_dict = calc_max_intake(
animal_trait_table[animal_id])
animal_trait_table[animal_id] = revised_animal_trait_dict
# enforce absence of grazing as zero biomass removed
for pft_i in pft_id_set:
pygeoprocessing.new_raster_from_base(
aligned_inputs['pft_{}'.format(pft_i)],
month_reg['flgrem_{}'.format(pft_i)], gdal.GDT_Float32,
[_TARGET_NODATA], fill_value_list=[0])
pygeoprocessing.new_raster_from_base(
aligned_inputs['pft_{}'.format(pft_i)],
month_reg['fdgrem_{}'.format(pft_i)], gdal.GDT_Float32,
[_TARGET_NODATA], fill_value_list=[0])
# populate provisional_sv_reg with provisional biomass in absence of
# grazing
_potential_production(
aligned_inputs, site_param_table, current_month, month_index,
pft_id_set, veg_trait_table, prev_sv_reg, pp_reg, month_reg)
_root_shoot_ratio(
aligned_inputs, site_param_table, current_month, pft_id_set,
veg_trait_table, prev_sv_reg, year_reg, month_reg)
_soil_water(
aligned_inputs, site_param_table, veg_trait_table, current_month,
month_index, prev_sv_reg, pp_reg, pft_id_set, month_reg,
provisional_sv_reg)
_decomposition(
aligned_inputs, current_month, month_index, pft_id_set,
site_param_table, year_reg, month_reg, prev_sv_reg, pp_reg,
provisional_sv_reg)
_death_and_partition(
'stded', aligned_inputs, site_param_table, current_month,
year_reg, pft_id_set, veg_trait_table, prev_sv_reg,
provisional_sv_reg)
_death_and_partition(
'bgliv', aligned_inputs, site_param_table, current_month,
year_reg, pft_id_set, veg_trait_table, prev_sv_reg,
provisional_sv_reg)
_shoot_senescence(
pft_id_set, veg_trait_table, prev_sv_reg, month_reg, current_month,
provisional_sv_reg)
intermediate_sv_reg = copy_intermediate_sv(
pft_id_set, provisional_sv_reg, intermediate_sv_dir)
delta_agliv_dict = _new_growth(
pft_id_set, aligned_inputs, site_param_table, veg_trait_table,
month_reg, current_month, provisional_sv_reg)
_apply_new_growth(delta_agliv_dict, pft_id_set, provisional_sv_reg)
# estimate grazing offtake by animals relative to provisional biomass
# at an intermediate step, after senescence but before new growth
_calc_grazing_offtake(
aligned_inputs, args['aoi_path'], args['management_threshold'],
intermediate_sv_reg, pft_id_set, aligned_inputs['animal_index'],
animal_trait_table, veg_trait_table, current_month, month_reg)
# estimate actual biomass production for this step, integrating impacts
# of grazing
sv_dir = os.path.join(
args['workspace_dir'], 'state_variables_m%d' % month_index)
utils.make_directories([sv_dir])
sv_reg = utils.build_file_registry(
[(_SITE_STATE_VARIABLE_FILES, sv_dir),
(pft_sv_dict, sv_dir)], file_suffix)
_potential_production(
aligned_inputs, site_param_table, current_month, month_index,
pft_id_set, veg_trait_table, prev_sv_reg, pp_reg, month_reg)
_root_shoot_ratio(
aligned_inputs, site_param_table, current_month, pft_id_set,
veg_trait_table, prev_sv_reg, year_reg, month_reg)
_soil_water(
aligned_inputs, site_param_table, veg_trait_table, current_month,
month_index, prev_sv_reg, pp_reg, pft_id_set, month_reg, sv_reg)
_decomposition(
aligned_inputs, current_month, month_index, pft_id_set,
site_param_table, year_reg, month_reg, prev_sv_reg, pp_reg, sv_reg)
_death_and_partition(
'stded', aligned_inputs, site_param_table, current_month,
year_reg, pft_id_set, veg_trait_table, prev_sv_reg, sv_reg)
_death_and_partition(
'bgliv', aligned_inputs, site_param_table, current_month,
year_reg, pft_id_set, veg_trait_table, prev_sv_reg, sv_reg)
_shoot_senescence(
pft_id_set, veg_trait_table, prev_sv_reg, month_reg, current_month,
sv_reg)
delta_agliv_dict = _new_growth(
pft_id_set, aligned_inputs, site_param_table, veg_trait_table,
month_reg, current_month, sv_reg)
_animal_diet_sufficiency(
sv_reg, pft_id_set, aligned_inputs, animal_trait_table,
veg_trait_table, current_month, month_reg)
_grazing(
aligned_inputs, site_param_table, month_reg, animal_trait_table,
pft_id_set, sv_reg)
_apply_new_growth(delta_agliv_dict, pft_id_set, sv_reg)
_leach(aligned_inputs, site_param_table, month_reg, sv_reg)
_write_monthly_outputs(
aligned_inputs, provisional_sv_reg, sv_reg, month_reg, pft_id_set,
current_year, current_month, output_dir, file_suffix)
# summary results
summary_output_dir = os.path.join(output_dir, 'summary_results')
os.makedirs(summary_output_dir)
summary_shp_path = os.path.join(
summary_output_dir,
'grazing_areas_results_rpm{}.shp'.format(file_suffix))
create_vector_copy(
args['animal_grazing_areas_path'], summary_shp_path)
field_pickle_map, field_header_order_list = aggregate_and_pickle_results(
output_dir, summary_shp_path)
_add_fields_to_shapefile(
field_pickle_map, field_header_order_list, summary_shp_path)
# clean up
shutil.rmtree(persist_param_dir)
shutil.rmtree(PROCESSING_DIR)
if delete_sv_folders:
for month_index in range(-1, n_months):
shutil.rmtree(
os.path.join(
args['workspace_dir'],
'state_variables_m%d' % month_index))
def raster_multiplication(
raster1, raster1_nodata, raster2, raster2_nodata, target_path,
target_path_nodata):
"""Multiply raster1 by raster2.
Multiply raster1 by raster2 element-wise. In any pixel where raster1 or
raster2 is nodata, the result is nodata. The result is always of float
datatype.
Side effects:
modifies or creates the raster indicated by `target_path`
Returns:
None
"""
def raster_multiply_op(raster1, raster2):
"""Multiply two rasters."""
valid_mask = (
(~numpy.isclose(raster1, raster1_nodata)) &
(~numpy.isclose(raster2, raster2_nodata)))
result = numpy.empty(raster1.shape, dtype=numpy.float32)
result[:] = target_path_nodata
result[valid_mask] = raster1[valid_mask] * raster2[valid_mask]
return result
pygeoprocessing.raster_calculator(
[(path, 1) for path in [raster1, raster2]],
raster_multiply_op, target_path, gdal.GDT_Float32,
target_path_nodata)
def raster_division(
raster1, raster1_nodata, raster2, raster2_nodata, target_path,
target_path_nodata):
"""Divide raster1 by raster2.
Divide raster1 by raster2 element-wise. In any pixel where raster1 or
raster2 is nodata, the result is nodata. The result is always of float
datatype.
Side effects:
modifies or creates the raster indicated by `target_path`
Returns:
None
"""
def raster_divide_op(raster1, raster2):
"""Divide raster1 by raster2."""
valid_mask = (
(~numpy.isclose(raster1, raster1_nodata)) &
(~numpy.isclose(raster2, raster2_nodata)))
raster1 = raster1.astype(numpy.float32)
raster2 = raster2.astype(numpy.float32)
result = numpy.empty(raster1.shape, dtype=numpy.float32)
result[:] = target_path_nodata
error_mask = ((raster1 != 0) & (raster2 == 0.) & valid_mask)
zero_mask = ((raster1 == 0.) & (raster2 == 0.) & valid_mask)
nonzero_mask = ((raster2 != 0.) & valid_mask)
result[error_mask] = target_path_nodata
result[zero_mask] = 0.
result[nonzero_mask] = raster1[nonzero_mask] / raster2[nonzero_mask]
return result
pygeoprocessing.raster_calculator(
[(path, 1) for path in [raster1, raster2]],
raster_divide_op, target_path, gdal.GDT_Float32,
target_path_nodata)
def raster_list_sum(
raster_list, input_nodata, target_path, target_nodata,
nodata_remove=False):
"""Calculate the sum per pixel across rasters in a list.
Sum the rasters in `raster_list` element-wise, allowing nodata values
in the rasters to propagate to the result or treating nodata as zero. If
nodata is treated as zero, areas where all inputs are nodata will be nodata
in the output.
Parameters:
raster_list (list): list of paths to rasters to sum
input_nodata (float or int): nodata value in the input rasters
target_path (string): path to location to store the result
target_nodata (float or int): nodata value for the result raster
nodata_remove (bool): if true, treat nodata values in input
rasters as zero. If false, the sum in a pixel where any input
raster is nodata is nodata.
Side effects:
modifies or creates the raster indicated by `target_path`
Returns:
None
"""
def raster_sum_op(*raster_list):
"""Add the rasters in raster_list without removing nodata values."""
invalid_mask = numpy.any(
numpy.isclose(numpy.array(raster_list), input_nodata), axis=0)
for r in raster_list:
numpy.place(r, numpy.isclose(r, input_nodata), [0])
sum_of_rasters = numpy.sum(raster_list, axis=0)
sum_of_rasters[invalid_mask] = target_nodata
return sum_of_rasters
def raster_sum_op_nodata_remove(*raster_list):
"""Add the rasters in raster_list, treating nodata as zero."""
invalid_mask = numpy.all(
numpy.isclose(numpy.array(raster_list), input_nodata), axis=0)
for r in raster_list:
numpy.place(r, numpy.isclose(r, input_nodata), [0])
sum_of_rasters = numpy.sum(raster_list, axis=0)
sum_of_rasters[invalid_mask] = target_nodata
return sum_of_rasters
if nodata_remove:
pygeoprocessing.raster_calculator(
[(path, 1) for path in raster_list], raster_sum_op_nodata_remove,
target_path, gdal.GDT_Float32, target_nodata)
else:
pygeoprocessing.raster_calculator(
[(path, 1) for path in raster_list], raster_sum_op,
target_path, gdal.GDT_Float32, target_nodata)
def raster_sum(
raster1, raster1_nodata, raster2, raster2_nodata, target_path,
target_nodata, nodata_remove=False):
"""Add raster 1 and raster2.
Add raster1 and raster2, allowing nodata values in the rasters to
propagate to the result or treating nodata as zero.
Parameters:
raster1 (string): path to one raster operand
raster1_nodata (float or int): nodata value in raster1
raster2 (string): path to second raster operand
raster2_nodata (float or int): nodata value in raster2
target_path (string): path to location to store the sum
target_nodata (float or int): nodata value for the result raster
nodata_remove (bool): if true, treat nodata values in input
rasters as zero. If false, the sum in a pixel where any
input raster is nodata is nodata.
Side effects:
modifies or creates the raster indicated by `target_path`
Returns:
None
"""
def raster_sum_op(raster1, raster2):
"""Add raster1 and raster2 without removing nodata values."""
valid_mask = (
(~numpy.isclose(raster1, raster1_nodata)) &
(~numpy.isclose(raster2, raster2_nodata)))
result = numpy.empty(raster1.shape, dtype=numpy.float32)
result[:] = target_nodata
result[valid_mask] = raster1[valid_mask] + raster2[valid_mask]
return result
def raster_sum_op_nodata_remove(raster1, raster2):
"""Add raster1 and raster2, treating nodata as zero."""
numpy.place(raster1, numpy.isclose(raster1, raster1_nodata), [0])
numpy.place(raster2, numpy.isclose(raster2, raster2_nodata), [0])
result = raster1 + raster2
return result
if nodata_remove:
pygeoprocessing.raster_calculator(
[(path, 1) for path in [raster1, raster2]],
raster_sum_op_nodata_remove, target_path, gdal.GDT_Float32,
target_nodata)
else:
pygeoprocessing.raster_calculator(
[(path, 1) for path in [raster1, raster2]],
raster_sum_op, target_path, gdal.GDT_Float32,
target_nodata)
def raster_difference(
raster1, raster1_nodata, raster2, raster2_nodata, target_path,
target_nodata, nodata_remove=False):
"""Subtract raster2 from raster1.
Subtract raster2 from raster1 element-wise, allowing nodata values in the
rasters to propagate to the result or treating nodata as zero.
Parameters:
raster1 (string): path to raster from which to subtract raster2
raster1_nodata (float or int): nodata value in raster1
raster2 (string): path to raster which should be subtracted from
raster1
raster2_nodata (float or int): nodata value in raster2
target_path (string): path to location to store the difference
target_nodata (float or int): nodata value for the result raster
nodata_remove (bool): if true, treat nodata values in input
rasters as zero. If false, the difference in a pixel where any
input raster is nodata is nodata.
Side effects:
modifies or creates the raster indicated by `target_path`
Returns:
None
"""
def raster_difference_op(raster1, raster2):
"""Subtract raster2 from raster1 without removing nodata values."""
valid_mask = (
(~numpy.isclose(raster1, raster1_nodata)) &
(~numpy.isclose(raster2, raster2_nodata)))
result = numpy.empty(raster1.shape, dtype=numpy.float32)
result[:] = target_nodata
result[valid_mask] = raster1[valid_mask] - raster2[valid_mask]
return result
def raster_difference_op_nodata_remove(raster1, raster2):
"""Subtract raster2 from raster1, treating nodata as zero."""
numpy.place(raster1, numpy.isclose(raster1, raster1_nodata), [0])
numpy.place(raster2, numpy.isclose(raster2, raster2_nodata), [0])
result = raster1 - raster2
return result
if nodata_remove:
pygeoprocessing.raster_calculator(
[(path, 1) for path in [raster1, raster2]],
raster_difference_op_nodata_remove, target_path, gdal.GDT_Float32,
target_nodata)
else:
pygeoprocessing.raster_calculator(
[(path, 1) for path in [raster1, raster2]],
raster_difference_op, target_path, gdal.GDT_Float32,
target_nodata)
def reclassify_nodata(target_path, new_nodata_value):
"""Reclassify the nodata value of a raster to a new value.
Convert all areas of nodata in the target raster to the new nodata
value, which must be an integer.
Parameters:
target_path (string): path to target raster
new_nodata_value (integer): new value to set as nodata
Side effects:
modifies the raster indicated by `target_path`
Returns:
None
"""
def reclassify_op(target_raster):
reclassified_raster = numpy.copy(target_raster)
reclassify_mask = (target_raster == previous_nodata_value)
reclassified_raster[reclassify_mask] = new_nodata_value
return reclassified_raster
fd, temp_path = tempfile.mkstemp(dir=PROCESSING_DIR)
shutil.copyfile(target_path, temp_path)
previous_nodata_value = pygeoprocessing.get_raster_info(
target_path)['nodata'][0]
pygeoprocessing.raster_calculator(
[(temp_path, 1)], reclassify_op, target_path, gdal.GDT_Float32,
new_nodata_value)
# clean up
os.close(fd)
os.remove(temp_path)
def weighted_state_variable_sum(
sv, sv_reg, aligned_inputs, pft_id_set, weighted_sum_path):
"""Calculate weighted sum of state variable across plant functional types.
To sum a state variable across PFTs within a grid cell, the state variable
must be weighted by the fractional cover of each PFT inside the grid cell.
First multiply the state variable by its fractional cover, and then add up
the weighted products.
Parameters:
sv (string): state variable to be summed across plant functional types
sv_reg (dict): map of key, path pairs giving paths to state variables,
including sv, the state variable to be summed
aligned_inputs (dict): map of key, path pairs indicating paths
to aligned model inputs, including fractional cover of each plant
functional type
pft_id_set (set): set of integers identifying plant functional types
weighted_sum_path (string): path to raster that should contain the
weighted sum across PFTs
Side effects:
modifies or creates the raster indicated by `weighted_sum_path`
Returns:
None
"""
temp_dir = tempfile.mkdtemp(dir=PROCESSING_DIR)
temp_val_dict = {}
for pft_i in pft_id_set:
val = '{}_weighted'.format(sv)
temp_val_dict['{}_{}'.format(val, pft_i)] = os.path.join(
temp_dir, '{}_{}.tif'.format(val, pft_i))
weighted_path_list = []
for pft_i in pft_id_set:
target_path = temp_val_dict['{}_weighted_{}'.format(sv, pft_i)]
pft_nodata = pygeoprocessing.get_raster_info(
aligned_inputs['pft_{}'.format(pft_i)])['nodata'][0]
raster_multiplication(
sv_reg['{}_{}_path'.format(sv, pft_i)], _SV_NODATA,
aligned_inputs['pft_{}'.format(pft_i)], pft_nodata,
target_path, _TARGET_NODATA)
weighted_path_list.append(target_path)
raster_list_sum(
weighted_path_list, _TARGET_NODATA, weighted_sum_path, _TARGET_NODATA,
nodata_remove=True)
# clean up temporary files
shutil.rmtree(temp_dir)
def _check_pft_fractional_cover_sum(aligned_inputs, pft_id_set):
"""Check the sum of fractional cover across plant functional types.
Parameters:
aligned_inputs (dict): map of key, path pairs indicating paths
to aligned model inputs, including fractional cover of each plant
functional type
pft_id_set (set): set of integers identifying plant functional types
Raises:
ValueError if the pixel-wise sum of fractional cover values across
plant functional types exceeds 1
Returns:
None
"""
with tempfile.NamedTemporaryFile(
prefix='cover_sum', dir=PROCESSING_DIR) as cover_sum_temp_file:
cover_sum_path = cover_sum_temp_file.name
with tempfile.NamedTemporaryFile(
prefix='operand_temp', dir=PROCESSING_DIR) as operand_temp_file:
operand_temp_path = operand_temp_file.name
# initialize sum to zero
pygeoprocessing.new_raster_from_base(
aligned_inputs['site_index'], cover_sum_path, gdal.GDT_Float32,
[_TARGET_NODATA], fill_value_list=[0])
for pft_i in pft_id_set:
shutil.copyfile(cover_sum_path, operand_temp_path)
pft_nodata = pygeoprocessing.get_raster_info(
aligned_inputs['pft_{}'.format(pft_i)])['nodata'][0]
raster_sum(
aligned_inputs['pft_{}'.format(pft_i)], pft_nodata,
operand_temp_path, _TARGET_NODATA,
cover_sum_path, _TARGET_NODATA)
# get maximum sum of fractional cover
max_cover = 0.
for offset_map, raster_block in pygeoprocessing.iterblocks(
(cover_sum_path, 1)):
valid_mask = (raster_block != _TARGET_NODATA)
if raster_block[valid_mask].size > 0:
max_cover = max(max_cover, numpy.amax(raster_block[valid_mask]))
if max_cover > 1:
raise ValueError(
"Fractional cover across plant functional types exceeds 1")
# clean up
os.remove(cover_sum_path)
def initial_conditions_from_tables(
aligned_inputs, sv_dir, pft_id_set, site_initial_conditions_table,
pft_initial_conditions_table):
"""Generate initial state variable registry from initial conditions tables.
Parameters:
aligned_inputs (dict): map of key, path pairs indicating paths
to aligned model inputs, including site spatial index raster and
fractional cover of each plant functional type
sv_dir (string): path to directory where initial state variable rasters
should be stored
pft_id_set (set): set of integers identifying plant functional types
site_initial_conditions_table (dict): map of site spatial index to
dictionaries that contain initial values for site-level state
variables
pft_initial_conditions_table (dict): map of plant functional type index
to dictionaries that contain initial values for plant functional
type-level state variables
Returns:
initial_sv_reg, map of key, path pairs giving paths to initial state
variable rasters
"""
def full_masked(pft_cover, fill_val):
"""Create a constant raster masked by pft fractional cover.
Parameters:
pft_cover (numpy.ndarray): input, fractional cover of the plant
functional type
fill_val (float): constant value with which to fill raster in areas
where fractional cover > 0
Returns:
full_masked, a raster containing `fill_val` in areas where
`pft_cover` > 0
"""
valid_mask = (
(~numpy.isclose(pft_cover, _SV_NODATA)) &
(pft_cover > 0))
full_masked = numpy.empty(pft_cover.shape, dtype=numpy.float32)
full_masked[:] = _SV_NODATA
full_masked[valid_mask] = fill_val
return full_masked
initial_sv_reg = {}
# site-level state variables
# check for missing state variable values
required_site_state_var = set(
[sv_key[:-5] for sv_key in _SITE_STATE_VARIABLE_FILES.keys()])
for site_code in site_initial_conditions_table.keys():
missing_site_state_var = required_site_state_var.difference(
site_initial_conditions_table[site_code].keys())
if missing_site_state_var:
raise ValueError(
"The following state variables were not found in the site " +
"initial conditions table: \n\t" + "\n\t".join(
missing_site_state_var))
for sv_key, basename in _SITE_STATE_VARIABLE_FILES.items():
state_var = sv_key[:-5]
site_to_val = dict(
[(site_code, float(table[state_var])) for (
site_code, table) in
site_initial_conditions_table.items()])
target_path = os.path.join(sv_dir, basename)
initial_sv_reg[sv_key] = target_path
pygeoprocessing.reclassify_raster(
(aligned_inputs['site_index'], 1), site_to_val, target_path,
gdal.GDT_Float32, _SV_NODATA)
# PFT-level state variables
for pft_i in pft_id_set:
# check for missing values
missing_pft_state_var = set(_PFT_STATE_VARIABLES).difference(
pft_initial_conditions_table[pft_i].keys())
if missing_pft_state_var:
raise ValueError(
"The following state variables were not found in the plant " +
"functional type initial conditions table: \n\t" + "\n\t".join(
missing_pft_state_var))
for state_var in _PFT_STATE_VARIABLES:
fill_val = pft_initial_conditions_table[pft_i][state_var]
pft_cover_path = aligned_inputs['pft_{}'.format(pft_i)]
target_path = os.path.join(
sv_dir, '{}_{}.tif'.format(state_var, pft_i))
sv_key = '{}_{}_path'.format(state_var, pft_i)
initial_sv_reg[sv_key] = target_path
pygeoprocessing.raster_calculator(
[(pft_cover_path, 1), (fill_val, 'raw')],
full_masked, target_path, gdal.GDT_Float32, _SV_NODATA)
return initial_sv_reg
def _calc_ompc(
som1c_2_path, som2c_2_path, som3c_path, bulkd_path, edepth_path,
ompc_path):
"""Estimate total soil organic matter.
Total soil organic matter is the sum of soil carbon across
slow, active, and passive compartments, weighted by bulk
density and total modeled soil depth. Lines 220-222, Prelim.f
Parameters:
som1c_2_path (string): path to active organic soil carbon raster
som2c_2_path (string): path to slow organic soil carbon raster
som3c_path (string): path to passive organic soil carbon raster
bulkd_path (string): path to bulk density of soil raster
edepth (string): path to depth of soil raster
ompc_path (string): path to result, total soil organic matter
Side effects:
modifies or creates the raster indicated by `ompc_path`
Returns:
None
"""
def ompc_op(som1c_2, som2c_2, som3c, bulkd, edepth):
"""Estimate total soil organic matter.
Total soil organic matter is the sum of soil carbon across
slow, active, and passive compartments, weighted by bulk
density and total modeled soil depth. Lines 220-222, Prelim.f
Parameters:
som1c_2_path (string): state variable, active organic soil carbon
som2c_2_path (string): state variable, slow organic soil carbon
som3c_path (string): state variable, passive organic soil carbon
bulkd_path (string): input, bulk density of soil
edepth_path (string): parameter, depth of soil for this
calculation
Returns:
ompc, total soil organic matter weighted by bulk
density.
"""
ompc = numpy.empty(som1c_2.shape, dtype=numpy.float32)
ompc[:] = _TARGET_NODATA
valid_mask = (
(~numpy.isclose(som1c_2, _SV_NODATA)) &
(~numpy.isclose(som2c_2, _SV_NODATA)) &
(~numpy.isclose(som3c, _SV_NODATA)) &
(~numpy.isclose(bulkd, bulkd_nodata)) &
(edepth != _IC_NODATA))
ompc[valid_mask] = (
(som1c_2[valid_mask] + som2c_2[valid_mask] +
som3c[valid_mask]) * 1.724 /
(10000. * bulkd[valid_mask] * edepth[valid_mask]))
return ompc
bulkd_nodata = pygeoprocessing.get_raster_info(bulkd_path)['nodata'][0]
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
som1c_2_path, som2c_2_path, som3c_path,
bulkd_path, edepth_path]],
ompc_op, ompc_path, gdal.GDT_Float32, _TARGET_NODATA)
def _calc_afiel(
sand_path, silt_path, clay_path, ompc_path, bulkd_path, afiel_path):
"""Calculate field capacity for one soil layer.
Parameters:
sand_path (string): path to proportion sand in soil raster
silt_path (string): path to proportion silt in soil raster
clay_path (string): path to proportion clay in soil raster
ompc_path (string): path to estimated total soil organic matter raster
bulkd_path (string): path to bulk density of soil raster
afiel_path (string): path to result raster, field capacity for this
soil layer
Side effects:
creates the raster indicated by `afiel_path`
Returns:
None
"""
def afiel_op(sand, silt, clay, ompc, bulkd):
"""Calculate field capacity for one soil layer.
Field capacity, maximum soil moisture retention capacity,
from <NAME> Larson 1979, 'Estimating soil and water
retention characteristics from particle size distribution,
organic matter percent and bulk density'. Water Resources
Research 15:1633.
Parameters:
sand_path (string): input, proportion sand in soil
silt_path (string): input, proportion silt in soil
clay_path (string): input, proportion clay in soil
ompc_path (string): derived, estimated total soil organic matter
bulkd_path (string): input, bulk density of soil
Returns:
afiel, field capacity for this soil layer
"""
afiel = numpy.empty(sand.shape, dtype=numpy.float32)
afiel[:] = _TARGET_NODATA
valid_mask = (
(~numpy.isclose(sand, sand_nodata)) &
(~numpy.isclose(silt, silt_nodata)) &
(~numpy.isclose(clay, clay_nodata)) &
(ompc != _TARGET_NODATA) &
(~numpy.isclose(bulkd, bulkd_nodata)))
afiel[valid_mask] = (
0.3075 * sand[valid_mask] + 0.5886 * silt[valid_mask] +
0.8039 * clay[valid_mask] + 2.208E-03 * ompc[valid_mask] +
-0.1434 * bulkd[valid_mask])
return afiel
sand_nodata = pygeoprocessing.get_raster_info(sand_path)['nodata'][0]
silt_nodata = pygeoprocessing.get_raster_info(silt_path)['nodata'][0]
clay_nodata = pygeoprocessing.get_raster_info(clay_path)['nodata'][0]
bulkd_nodata = pygeoprocessing.get_raster_info(bulkd_path)['nodata'][0]
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
sand_path, silt_path, clay_path, ompc_path, bulkd_path]],
afiel_op, afiel_path, gdal.GDT_Float32, _TARGET_NODATA)
def _calc_awilt(
sand_path, silt_path, clay_path, ompc_path, bulkd_path, awilt_path):
"""Calculate wilting point for one soil layer.
Wilting point, minimum soil water required by plants before
wilting, from Gupta and Larson 1979, 'Estimating soil and
water retention characteristics from particle size distribution,
organic matter percent and bulk density'. Water Resources
Research 15:1633.
Parameters:
sand_path (string): path to proportion sand in soil raster
silt_path (string): path to proportion silt in soil raster
clay_path (string): path to proportion clay in soil raster
ompc_path (string): path to estimated total soil organic matter raster
bulkd_path (string): path to bulk density of soil raster
awilt_path (string): path to result raster, wilting point for this
soil layer
Side effects:
creates the raster indicated by `awilt_path`
Returns:
None
"""
def awilt_op(sand, silt, clay, ompc, bulkd):
"""Calculate wilting point for one soil layer.
Wilting point, minimum soil water required by plants before
wilting, from Gupta and Larson 1979, 'Estimating soil and
water retention characteristics from particle size distribution,
organic matter percent and bulk density'. Water Resources
Research 15:1633.
Parameters:
sand_path (string): input, proportion sand in soil
silt_path (string): input, proportion silt in soil
clay_path (string): input, proportion clay in soil
ompc_path (string): derived, estimated total soil organic matter
bulkd_path (string): input, bulk density of soil
Returns:
awilt, wilting point for this soil layer
"""
awilt = numpy.empty(sand.shape, dtype=numpy.float32)
awilt[:] = _TARGET_NODATA
valid_mask = (
(~numpy.isclose(sand, sand_nodata)) &
(~numpy.isclose(silt, silt_nodata)) &
(~numpy.isclose(clay, clay_nodata)) &
(ompc != _TARGET_NODATA) &
(~numpy.isclose(bulkd, bulkd_nodata)))
awilt[valid_mask] = (
-0.0059 * sand[valid_mask] + 0.1142 * silt[valid_mask] +
0.5766 * clay[valid_mask] + 2.228E-03 * ompc[valid_mask] +
0.02671 * bulkd[valid_mask])
return awilt
sand_nodata = pygeoprocessing.get_raster_info(sand_path)['nodata'][0]
silt_nodata = pygeoprocessing.get_raster_info(silt_path)['nodata'][0]
clay_nodata = pygeoprocessing.get_raster_info(clay_path)['nodata'][0]
bulkd_nodata = pygeoprocessing.get_raster_info(bulkd_path)['nodata'][0]
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
sand_path, silt_path, clay_path, ompc_path, bulkd_path]],
awilt_op, awilt_path, gdal.GDT_Float32, _TARGET_NODATA)
def _afiel_awilt(
site_index_path, site_param_table, som1c_2_path, som2c_2_path,
som3c_path, sand_path, silt_path, clay_path, bulk_d_path, pp_reg):
"""Calculate field capacity and wilting point for each soil layer.
Computations based on Gupta and Larson 1979, 'Estimating soil and water
retention characteristics from particle size distribution, organic
matter percent and bulk density'. Water Resources Research 15:1633.
Field capacity is calculated for -0.33 bar; wilting point is
calculated for water content at -15 bars.
Parameters:
site_index_path (string): path to site spatial index raster
site_param_table (dict): map of site spatial index to dictionaries
that contain site-level parameters including 'edepth' field
som1c_2_path (string): path to the state variable 'som1c_2',
active organic soil carbon
som2c_2_path (string): path to the state variable 'som2c_2',
slow organic soil carbon
som3c_path (string): path to the state variable 'som3c',
passive organic soil carbon
sand_path (string): path to raster containing proportion sand in soil
silt_path (string): path to raster containing proportion silt in soil
clay_path (string): path to raster containing proportion clay in soil
bulk_d_path (string): path to raster containing bulk density of soil
pp_reg (dict): map of key, path pairs giving paths to persistent
intermediate parameters that do not change over the course of
the simulation
Modifies the rasters pp_reg['afiel_<layer>'] and pp_reg['awilt_<layer>']
for all soil layers.
Returns:
None
"""
def decrement_ompc(ompc_orig_path, ompc_dec_path):
"""Decrease estimated organic matter to 85% of its value.
In each subsequent soil layer, estimated organic matter is decreased
by 15%, to 85% of its previous value.
Parameters:
ompc_orig_path (string): path to estimated soil organic matter
raster
ompc_dec_path (string): path to result raster, estimated soil
organic matter decreased to 85% of its previous value
Side effects:
modifies or creates the raster indicated by `ompc_dec_path`
Returns:
None
"""
def decrement_op(ompc_orig):
"""Reduce organic matter to 85% of its previous value."""
ompc_dec = numpy.empty(ompc_orig.shape, dtype=numpy.float32)
ompc_dec[:] = _TARGET_NODATA
valid_mask = (ompc_orig != _TARGET_NODATA)
ompc_dec[valid_mask] = ompc_orig[valid_mask] * 0.85
return ompc_dec
pygeoprocessing.raster_calculator(
[(ompc_orig_path, 1)], decrement_op, ompc_dec_path,
gdal.GDT_Float32, _TARGET_NODATA)
# temporary intermediate rasters for calculating field capacity and
# wilting point
temp_dir = tempfile.mkdtemp(dir=PROCESSING_DIR)
edepth_path = os.path.join(temp_dir, 'edepth.tif')
ompc_path = os.path.join(temp_dir, 'ompc.tif')
site_to_edepth = dict(
[(site_code, float(table['edepth'])) for
(site_code, table) in site_param_table.items()])
pygeoprocessing.reclassify_raster(
(site_index_path, 1), site_to_edepth, edepth_path, gdal.GDT_Float32,
_IC_NODATA)
# estimate total soil organic matter
_calc_ompc(
som1c_2_path, som2c_2_path, som3c_path, bulk_d_path, edepth_path,
ompc_path)
# calculate field capacity and wilting point for each soil layer,
# decreasing organic matter content by 85% with each layer
for lyr in range(1, 10):
afiel_path = pp_reg['afiel_{}_path'.format(lyr)]
awilt_path = pp_reg['awilt_{}_path'.format(lyr)]
_calc_afiel(
sand_path, silt_path, clay_path, ompc_path, bulk_d_path,
afiel_path)
_calc_awilt(
sand_path, silt_path, clay_path, ompc_path, bulk_d_path,
awilt_path)
ompc_dec_path = os.path.join(temp_dir, 'ompc{}.tif'.format(lyr))
decrement_ompc(ompc_path, ompc_dec_path)
ompc_path = ompc_dec_path
# clean up temporary files
shutil.rmtree(temp_dir)
def _persistent_params(
site_index_path, site_param_table, sand_path, clay_path, pp_reg):
"""Calculate persistent parameters.
The calculated values do not change over the course of the simulation.
Parameters:
site_index_path (string): path to site spatial index raster
site_param_table (dict): map of site spatial index to dictionaries
that contain site-level parameters
sand_path (string): path to raster containing proportion sand in soil
clay_path (string): path to raster containing proportion clay in soil
pp_reg (dict): map of key, path pairs giving paths to persistent
intermediate parameters that do not change over the course of
the simulation.
Modifies the persistent parameter rasters indexed by the following
keys:
pp_reg['wc_path']
pp_reg['eftext_path']
pp_reg['p1co2_2_path']
pp_reg['fps1s3_path']
pp_reg['fps2s3_path']
pp_reg['orglch_path']
pp_reg['vlossg_path']
Returns:
None
"""
sand_nodata = pygeoprocessing.get_raster_info(sand_path)['nodata'][0]
clay_nodata = pygeoprocessing.get_raster_info(clay_path)['nodata'][0]
# temporary intermediate rasters for persistent parameters calculation
temp_dir = tempfile.mkdtemp(dir=PROCESSING_DIR)
param_val_dict = {}
for val in[
'peftxa', 'peftxb', 'p1co2a_2', 'p1co2b_2', 'ps1s3_1',
'ps1s3_2', 'ps2s3_1', 'ps2s3_2', 'omlech_1', 'omlech_2', 'vlossg']:
target_path = os.path.join(temp_dir, '{}.tif'.format(val))
param_val_dict[val] = target_path
site_to_val = dict(
[(site_code, float(table[val])) for (
site_code, table) in site_param_table.items()])
pygeoprocessing.reclassify_raster(
(site_index_path, 1), site_to_val, target_path, gdal.GDT_Float32,
_IC_NODATA)
def calc_wc(afiel_1, awilt_1):
"""Calculate water content of soil layer 1."""
return afiel_1 - awilt_1
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
pp_reg['afiel_1_path'], pp_reg['awilt_1_path']]],
calc_wc, pp_reg['wc_path'], gdal.GDT_Float32, _TARGET_NODATA)
def calc_eftext(peftxa, peftxb, sand):
"""Calculate effect of soil texture on microbial decomposition.
Use an empirical regression to estimate the effect of soil
sand content on the microbe decomposition rate. Line 359 Prelim.f
Parameters:
peftxa (numpy.ndarray): parameter, regression intercept
peftxb (numpy.ndarray): parameter, regression slope
sand (numpy.ndarray): input, proportion sand in soil
Returns:
eftext, coefficient that modifies microbe decomposition rate.
"""
eftext = numpy.empty(sand.shape, dtype=numpy.float32)
eftext[:] = _IC_NODATA
valid_mask = (
(peftxa != _IC_NODATA) &
(peftxb != _IC_NODATA) &
(~numpy.isclose(sand, sand_nodata)))
eftext[valid_mask] = (
peftxa[valid_mask] + (peftxb[valid_mask] * sand[valid_mask]))
return eftext
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
param_val_dict['peftxa'], param_val_dict['peftxb'], sand_path]],
calc_eftext, pp_reg['eftext_path'], gdal.GDT_Float32, _IC_NODATA)
def calc_p1co2_2(p1co2a_2, p1co2b_2, sand):
"""Calculate the fraction of carbon lost to CO2 from som1c_2.
During decomposition from active organic soil carbon, a fraction
of decomposing material is lost to CO2 as the soil respires.
Line 366 Prelim.f
Parameters:
p1co2a_2 (numpy.ndarray): parameter, intercept of regression
predicting loss to CO2 from active organic soil carbon
p1co2b_2 (numpy.ndarray): parameter, slope of regression
predicting loss to CO2 from active organic soil carbon
sand (numpy.ndarray): input, proportion sand in soil
Returns:
p1co2_2, fraction of carbon that flows to CO2 from active
organic soil carbon
"""
p1co2_2 = numpy.empty(sand.shape, dtype=numpy.float32)
p1co2_2[:] = _IC_NODATA
valid_mask = (
(p1co2a_2 != _IC_NODATA) &
(p1co2b_2 != _IC_NODATA) &
(~numpy.isclose(sand, sand_nodata)))
p1co2_2[valid_mask] = (
p1co2a_2[valid_mask] + (p1co2b_2[valid_mask] * sand[valid_mask]))
return p1co2_2
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
param_val_dict['p1co2a_2'],
param_val_dict['p1co2b_2'], sand_path]],
calc_p1co2_2, pp_reg['p1co2_2_path'], gdal.GDT_Float32, _IC_NODATA)
def calc_fps1s3(ps1s3_1, ps1s3_2, clay):
"""Calculate effect of clay content on decomposition from som1c_2.
Use an empirical regression to estimate the effect of clay content
of soil on flow from soil organic matter with fast turnover to
soil organic matter with slow turnover. Line 370 Prelim.f
Parameters:
ps1s3_1 (numpy.ndarray): parameter, regression intercept
ps1s3_2 (numpy.ndarray): parameter, regression slope
clay (numpy.ndarray): input, proportion clay in soil
Returns:
fps1s3, coefficient that modifies rate of decomposition
from som1c_2
"""
fps1s3 = numpy.empty(clay.shape, dtype=numpy.float32)
fps1s3[:] = _IC_NODATA
valid_mask = (
(ps1s3_1 != _IC_NODATA) &
(ps1s3_2 != _IC_NODATA) &
(~numpy.isclose(clay, clay_nodata)))
fps1s3[valid_mask] = (
ps1s3_1[valid_mask] + (ps1s3_2[valid_mask] * clay[valid_mask]))
return fps1s3
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
param_val_dict['ps1s3_1'], param_val_dict['ps1s3_2'], clay_path]],
calc_fps1s3, pp_reg['fps1s3_path'], gdal.GDT_Float32, _IC_NODATA)
def calc_fps2s3(ps2s3_1, ps2s3_2, clay):
"""Calculate effect of clay content on decomposition from som2c_2.
Use an empirical regression to estimate the effect of clay content
of soil on flow from slow soil organic carbon to soil passive organic
carbon. Line 371 Prelim.f
Parameters:
ps2s3_1 (numpy.ndarray): parameter, regression intercept
ps2s3_2 (numpy.ndarray): parameter, regression slope
clay (numpy.ndarray): input, proportion clay in soil
Returns:
fps2s3, coefficient that modifies rate of decomposition from
som2c_2 to som3c
"""
fps2s3 = numpy.empty(clay.shape, dtype=numpy.float32)
fps2s3[:] = _IC_NODATA
valid_mask = (
(ps2s3_1 != _IC_NODATA) &
(ps2s3_2 != _IC_NODATA) &
(~numpy.isclose(clay, clay_nodata)))
fps2s3[valid_mask] = (
ps2s3_1[valid_mask] + (ps2s3_2[valid_mask] * clay[valid_mask]))
return fps2s3
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
param_val_dict['ps2s3_1'], param_val_dict['ps2s3_2'], clay_path]],
calc_fps2s3, pp_reg['fps2s3_path'], gdal.GDT_Float32, _IC_NODATA)
def calc_orglch(omlech_1, omlech_2, sand):
"""Calculate the effect of sand content on leaching from soil.
Use an empirical regression to estimate the effect of sand content
of soil on rate of organic leaching from soil when there is drainage
of soil water from soil layer 1 to soil layer 2. Line 110 Predec.f
Parameters:
omlech_1 (numpy.ndarray): parameter, regression intercept
omlech_2 (numpy.ndarray): parameter, regression slope
sand (numpy.ndarray): input, proportion sand in soil
Returns:
orglch, the fraction of organic compounds leaching from soil
with drainage from soil layer 1 to layer 2
"""
orglch = numpy.empty(sand.shape, dtype=numpy.float32)
orglch[:] = _IC_NODATA
valid_mask = (
(omlech_1 != _IC_NODATA) &
(omlech_2 != _IC_NODATA) &
(~numpy.isclose(sand, sand_nodata)))
orglch[valid_mask] = (
omlech_1[valid_mask] + (omlech_2[valid_mask] * sand[valid_mask]))
return orglch
pygeoprocessing.raster_calculator(
[(path, 1) for path in [
param_val_dict['omlech_1'], param_val_dict['omlech_2'],
sand_path]],
calc_orglch, pp_reg['orglch_path'], gdal.GDT_Float32, _IC_NODATA)
def calc_vlossg(vlossg_param, clay):
"""Calculate proportion of gross mineralized N that is volatized.
During decomposition, some N is lost to volatilization. This is a
function of the gross mineralized N and is calculated according to this
multiplier, which varies with soil clay content.
Parameters:
vlossg (numpy.ndarray): parameter, volatilization loss multiplier
clay (numpy.ndarray): input, proportion clay in soil
Returns:
vlossg, proportion of gross mineralized N that is volatized
"""
valid_mask = (
(vlossg_param != _IC_NODATA) &
(~numpy.isclose(clay, clay_nodata)))
vlossg = numpy.empty(vlossg_param.shape, dtype=numpy.float32)
vlossg[:] = _IC_NODATA
max_mask = ((clay > 0.3) & valid_mask)
min_mask = ((clay < 0.1) & valid_mask)
vlossg[valid_mask] = -0.1 * (clay[valid_mask] - 0.3) + 0.01
vlossg[max_mask] = 0.01
vlossg[min_mask] = 0.03
vlossg[valid_mask] = vlossg[valid_mask] * vlossg_param[valid_mask]
return vlossg
pygeoprocessing.raster_calculator(
[(path, 1) for path in [param_val_dict['vlossg'], clay_path]],
calc_vlossg, pp_reg['vlossg_path'], gdal.GDT_Float32, _IC_NODATA)
# clean up temporary files
shutil.rmtree(temp_dir)
def _aboveground_ratio(anps, tca, pcemic_1, pcemic_2, pcemic_3):
"""Calculate C/<iel> ratios of decomposing aboveground material.
This ratio is used to test whether there is sufficient <iel> (N or P)
in aboveground material for the material to decompose. Agdrat.f
Parameters:
anps (numpy.ndarray): state variable, N or P in the donor material
tca (numpy.ndarray): state variable, total C in the donor material
pcemic_1 (numpy.ndarray): parameter, maximum C/<iel> of new material
pcemic_2 (numpy.ndarray): parameter, minimum C/<iel> of new material
pcemic_3 (numpy.ndarray): parameter, minimum <iel> content of
decomposing material that gives minimum C/<iel> of new material
Returns:
agdrat, the C/<iel> ratio of new material
"""
valid_mask = (
(~numpy.isclose(anps, _SV_NODATA)) &
(~numpy.isclose(tca, _SV_NODATA)) &
(pcemic_1 != _IC_NODATA) &
(pcemic_2 != _IC_NODATA) &
(pcemic_3 != _IC_NODATA))
cemicb = numpy.empty(anps.shape, dtype=numpy.float32)
cemicb[:] = _IC_NODATA
cemicb[valid_mask] = (
(pcemic_2[valid_mask] - pcemic_1[valid_mask]) /
pcemic_3[valid_mask])
econt = | numpy.empty(anps.shape, dtype=numpy.float32) | numpy.empty |
import tensorflow as tf
import numpy as np
class estimator_1st_order(object):
"""
This class implements a first order estimator of underlying parameter given an extensive set of evaluations
"""
def __init__(self, sims, fiducial_point, offsets, print_params=False, tf_dtype=tf.float32,
tikohnov=0.0):
"""
Initsalizes the first order estimator from a given evaluation of a function around a fiducial point
:param sims: prediction of the fiducial point and its perturbations, shape [2*n_params + 1, n_sims, n_output]
:param fiducial_point: the fiducial point of the expansion
:param offsets: the perturbations used
:param print_params: whether to print out the relevant parameters or not
:param tf_dtype: the tensorflow dtype (float32 or float64)
:param tikohnov: Add tikohnov regularization before inverting the jacobian
"""
self.tf_dtype = tf_dtype
# dimension check
fidu_param = np.atleast_2d(fiducial_point)
n_param = fidu_param.shape[-1]
# get the fidu mean and cov
sims = sims.astype(np.float64)
fidu_sim = sims[0]
# set fidu sims
self.fidu_sim = fidu_sim.copy()
fidu_mean = np.mean(fidu_sim, axis=0)
fidu_cov = np.cov(fidu_sim, rowvar=False)
# repeat the beginning
fidu_sim = sims[0]
fidu_mean = np.mean(fidu_sim, axis=0)
fidu_cov = np.cov(fidu_sim, rowvar=False)
# First we calculate the first order derivatives
mean_derivatives = []
cov_derivatives = []
# to save the means
means = []
covs = []
for i in range(n_param):
# sims
sims_minus = sims[2 * (i + 1) - 1]
sims_plus = sims[2 * (i + 1)]
# means
mean_plus = np.mean(sims_plus, axis=0)
mean_minus = np.mean(sims_minus, axis=0)
# covariance
cov_plus = np.cov(sims_plus, rowvar=False)
cov_minus = np.cov(sims_minus, rowvar=False)
# save
means.append([mean_plus, mean_minus])
covs.append([cov_plus, cov_minus])
mean_derivatives.append((mean_plus - mean_minus) / (2.0 * offsets[i]))
cov_derivatives.append((cov_plus - cov_minus) / (2.0 * offsets[i]))
mean_jacobian = np.stack(mean_derivatives, axis=-1)
cov_jacobian = np.stack(cov_derivatives, axis=-1)
# calculate approximate fisher information
# F = inv(J^-1 cov J^T^-1) = J^T cov^-1 J
try:
inv_cov = np.linalg.inv(fidu_cov)
except:
print("Covariance appears to be singular, using pseudo inverse...")
inv_cov = np.linalg.pinv(fidu_cov)
fisher = | np.einsum('ij,jk->ik', inv_cov, mean_jacobian) | numpy.einsum |
import copy
import numpy as np
from spysort.functions import convolution, cut_sgl_evt
from spysort.Events import events
class align_events(events.build_events):
""" Alignment of spike forms after clustering using a Brute-Force method"""
def __init__(self, data, positions, goodEvts, clusters, CSize, win=[],
before=14, after=30, thr=3):
""" Performs a PCA-aided k-Means clustering and creates the proper
indexes for further alignment of the raw data.
**Parameters**
data : double
The input normalized data list
positions : int
The positions of spike events as they have been computed by the
spike_detection (becareful the user has to define treat explicitly
the size and the contents of the position array)
clusters : double
The clustered data
CSize : int
The number of the chosen clusters
win : double
The filtering window
before : int
The number of sampling point to keep before the peak
after : int
The number of sampling point to keep after the peak
thr : double
Filtering threshold value
"""
self.win = win
self.thr = thr
self.before = before
self.after = after
self.positions = positions
# Converts input list data to a numpy array
self.data = np.asarray(data)
self.goodEvts = goodEvts
# Events instance
events.build_events.__init__(self, self.data, self.positions, self.win,
self.before, self.after)
# k-Means clustering
self.kmc = clusters
# Construction of the proper cluster indices
self.gcpos = copy.deepcopy([self.positions[self.goodEvts]
[np.array(self.kmc) == i]
for i in range(CSize)])
def classify_and_align_evt(self, evt_pos, centers, abs_jitter_max=3,
otherData=False, x=[]):
""" One step of the Brute-Force method of realignment. It returns the
name of the closest center in terms of Euclidean distance or "?" if
none of the clusters' waveform does better than a uniformly null one,
the new position of the event (the previous position corrected by the
integer part of the estimated jitter), the remaining jitter.
**Parameters**
evt_pos : int
A sampling point at which an event was detected.
centers : dict
A dictionary that contains all the necessary arrays and parameters
in order to perform properly the classification and the alignment
of the raw data
abs_jitter_max : double
The absolute maximum permitted value of the jitter
**Returns**
A list with the following components: The name of the closest center
if it was close enough or ’?’. The nearest sampling point to the events
peak. The jitter: difference between the estimated actual peak
position and the nearest sampling point.
"""
if otherData is False:
data = self.data
else:
data = np.asarray(x)
cluster_names = sorted(list(centers))
n_sites = data.shape[0]
centersM = np.array([centers[c_name]["center"]
[np.tile((-self.before <= centers[c_name]
["center_idx"]).__and__(centers[c_name]
["center_idx"] <= self.after), n_sites)]
for c_name in cluster_names])
evt = cut_sgl_evt(data, evt_pos, self.before, self.after)
delta = -(centersM - evt)
cluster_idx = np.argmin(np.sum(delta**2, axis=1))
good_cluster_name = cluster_names[cluster_idx]
good_cluster_idx = np.tile((-self.before <=
centers[good_cluster_name]
["center_idx"]).__and__(
centers[good_cluster_name]
["center_idx"] <= self.after), n_sites)
centerD = centers[good_cluster_name]["centerD"][good_cluster_idx]
centerD_norm2 = np.dot(centerD, centerD)
centerDD = centers[good_cluster_name]["centerDD"][good_cluster_idx]
centerDD_norm2 = np.dot(centerDD, centerDD)
centerD_dot_centerDD = np.dot(centerD, centerDD)
h = delta[cluster_idx, :]
h_order0_norm2 = np.sum(h**2)
h_dot_centerD = np.dot(h, centerD)
jitter0 = h_dot_centerD/centerD_norm2
# print jitter0
h_order1_norm2 = np.sum((h-jitter0*centerD)**2)
if h_order0_norm2 > h_order1_norm2:
h_dot_centerDD = np.dot(h, centerDD)
first = (-2. * h_dot_centerD + 2. * jitter0 *
(centerD_norm2 - h_dot_centerDD) + 3. * jitter0**2 *
centerD_dot_centerDD + jitter0**3 * centerDD_norm2)
second = (2. * (centerD_norm2 - h_dot_centerDD) + 6. * jitter0 *
centerD_dot_centerDD + 3. * jitter0**2 * centerDD_norm2)
jitter1 = jitter0 - first/second
h_order2_norm2 = sum((h-jitter1*centerD-jitter1**2/2*centerDD)**2)
if h_order1_norm2 <= h_order2_norm2:
jitter1 = jitter0
else:
jitter1 = 0
if np.abs(np.round(jitter1)) > 0:
evt_pos -= int(np.round(jitter1))
evt = cut_sgl_evt(data, evt_pos, self.before, self.after)
h = evt - centers[good_cluster_name]["center"][good_cluster_idx]
h_order0_norm2 = np.sum(h**2)
h_dot_centerD = np.dot(h, centerD)
jitter0 = h_dot_centerD/centerD_norm2
h_order1_norm2 = np.sum((h - jitter0 * centerD)**2)
if h_order0_norm2 > h_order1_norm2:
h_dot_centerDD = | np.dot(h, centerDD) | numpy.dot |
import numpy as np
import cv2
import time
import warnings
warnings.filterwarnings('error')
# allow the camera to warmup
time.sleep(0.1)
# class for lane detection
class Lines():
def __init__(self):
# were the lines detected at least once
self.detected_first = False
# were the lines detected in the last iteration?
self.detected = False
# average x values of the fitted lines
self.bestxl = None
self.bestyl = None
self.bestxr = None
self.bestyr = None
# polynomial coefficients averaged over the last iterations
self.best_fit_l = None
self.best_fit_r = None
# polynomial coefficients for the most recent fit
self.current_fit_l = None
self.current_fit_r = None
# radius of curvature of the lines in meters
self.left_curverad = None
self.right_curverad = None
# distance in meters of vehicle center from the line
self.offset = None
# x values for detected line pixels
self.allxl = None
self.allxr = None
# y values for detected line pixels
self.allyl = None
self.allyr = None
# camera calibration parameters
self.cam_mtx = None
self.cam_dst = None
# camera distortion parameters
self.M = None
self.Minv = None
# image shape
self.im_shape = (None, None)
# distance to look ahead in meters
self.look_ahead = 10
self.remove_pixels = 90
# enlarge output image
self.enlarge = 2.5
# warning from numpy polyfit
self.poly_warning = False
# set camera calibration parameters
def set_cam_calib_param(self, mtx, dst):
self.cam_mtx = mtx
self.cam_dst = dst
# undistort image
def undistort(self, img):
return cv2.undistort(img, self.cam_mtx, self.cam_dst, None, self.cam_mtx)
# get binary image based on color thresholding
def color_thresh(self, img, thresh=(0, 255)):
# convert to HSV color space and separate the V channel
hsv = cv2.cvtColor(img, cv2.COLOR_RGB2HLS).astype(np.float)
s_channel = hsv[:, :, 2]
# threshold color channel
s_binary = np.zeros_like(s_channel)
s_binary[(s_channel >= thresh[0]) & (s_channel <= thresh[1])] = 1
return s_binary
# get binary image based on sobel gradient thresholding
def abs_sobel_thresh(self, sobel, thresh=(0, 255)):
abs_sobel = np.absolute(sobel)
max_s = np.max(abs_sobel)
if max_s == 0:
max_s = 1
scaled_sobel = np.uint8(255 * abs_sobel / max_s)
sbinary = np.zeros_like(scaled_sobel)
sbinary[(scaled_sobel >= thresh[0]) & (scaled_sobel <= thresh[1])] = 1
return sbinary
# get binary image based on sobel magnitude gradient thresholding
def mag_thresh(self, sobelx, sobely, mag_thresh=(0, 255)):
abs_sobel = np.sqrt(sobelx ** 2 + sobely ** 2)
max_s = np.max(abs_sobel)
if max_s == 0:
max_s = 1
scaled_sobel = np.uint8(255 * abs_sobel / max_s)
sbinary = np.zeros_like(scaled_sobel)
sbinary[(scaled_sobel >= mag_thresh[0]) & (scaled_sobel <= mag_thresh[1])] = 1
return sbinary
# get binary image based on directional gradient thresholding
def dir_threshold(self, sobelx, sobely, thresh=(0, np.pi / 2)):
abs_sobelx = np.abs(sobelx)
abs_sobely = np.abs(sobely)
grad_sobel = np.arctan2(abs_sobely, abs_sobelx)
sbinary = np.zeros_like(grad_sobel)
sbinary[(grad_sobel >= thresh[0]) & (grad_sobel <= thresh[1])] = 1
return sbinary
# get binary combining various thresholding methods
def binary_extraction(self, image, ksize=3):
# undistort first
# image = self.undistort(image)
color_bin = self.color_thresh(image, thresh=(90, 150)) # initial values 110, 255
gray = cv2.cvtColor(image, cv2.COLOR_RGB2GRAY)
sobelx = cv2.Sobel(gray, cv2.CV_64F, 1, 0, ksize)
sobely = cv2.Sobel(gray, cv2.CV_64F, 0, 1, ksize)
gradx = self.abs_sobel_thresh(sobelx, thresh=(100, 190)) # initial values 40, 160
grady = self.abs_sobel_thresh(sobely, thresh=(100, 190)) # initial values 40, 160
mag_binary = self.mag_thresh(sobelx, sobely, mag_thresh=(100, 190)) # initial values 40, 160
# dir_binary = self.dir_threshold(sobelx, sobely, thresh=(0.7, 1.3))
combined = np.zeros_like(gradx)
# combined[(((gradx == 1) & (grady == 1)) | ((mag_binary == 1) & (dir_binary == 1))) | (color_bin==1) ] = 1
combined[(((gradx == 1) & (grady == 1)) | (mag_binary == 1)) | (color_bin == 1)] = 1
# combined[(((gradx == 1) & (grady == 1)) | (mag_binary == 1)) ] = 1
return combined
# transform perspective
def trans_per(self, image):
image = self.binary_extraction(image)
self.binary_image = image
ysize = image.shape[0]
xsize = image.shape[1]
# define region of interest
left_bottom = (xsize / 10, ysize)
apex_l = (xsize / 2 - 2600 / (self.look_ahead ** 2), ysize - self.look_ahead * 275 / 30)
apex_r = (xsize / 2 + 2600 / (self.look_ahead ** 2), ysize - self.look_ahead * 275 / 30)
right_bottom = (xsize - xsize / 10, ysize)
# define vertices for perspective transformation
src = np.array([[left_bottom], [apex_l], [apex_r], [right_bottom]], dtype=np.float32)
dst = np.float32([[xsize / 3, ysize], [xsize / 4.5, 0], [xsize - xsize / 4.5, 0], [xsize - xsize / 3, ysize]])
self.M = cv2.getPerspectiveTransform(src, dst)
self.Minv = cv2.getPerspectiveTransform(dst, src)
if len(image.shape) > 2:
warped = cv2.warpPerspective(image, self.M, image.shape[-2:None:-1], flags=cv2.INTER_LINEAR)
else:
warped = cv2.warpPerspective(image, self.M, image.shape[-1:None:-1], flags=cv2.INTER_LINEAR)
return warped
# creat window mask for lane detecion
def window_mask(self, width, height, img_ref, center, level):
output = np.zeros_like(img_ref)
output[int(img_ref.shape[0] - (level + 1) * height):int(img_ref.shape[0] - level * height), \
max(0, int(center - width / 2)):min(int(center + width / 2), img_ref.shape[1])] = 1
return output
# find widow centroids of left and right lane
def find_window_centroids(self, warped, window_width, window_height, margin):
window_centroids = [] # Store the (left,right) window centroid positions per level
window = np.ones(window_width) # Create our window template that we will use for convolutions
# First find the two starting positions for the left and right lane by using np.sum to get the vertical image slice
# and then np.convolve the vertical image slice with the window template
# Sum quarter bottom of image to get slice, could use a different ratio
l_sum = np.sum(warped[int(3 * warped.shape[0] / 4):, :int(warped.shape[1] / 2)], axis=0)
l_center = np.argmax(np.convolve(window, l_sum)) - window_width / 2
r_sum = np.sum(warped[int(3 * warped.shape[0] / 4):, int(warped.shape[1] / 2):], axis=0)
r_center = np.argmax(np.convolve(window, r_sum)) - window_width / 2 + int(warped.shape[1] / 2)
# Add what we found for the first layer
window_centroids.append((l_center, r_center))
# Go through each layer looking for max pixel locations
for level in range(1, (int)(warped.shape[0] / window_height)):
# convolve the window into the vertical slice of the image
image_layer = np.sum(
warped[int(warped.shape[0] - (level + 1) * window_height):int(warped.shape[0] - level * window_height),
:], axis=0)
conv_signal = np.convolve(window, image_layer)
# Find the best left centroid by using past left center as a reference
# Use window_width/2 as offset because convolution signal reference is at right side of window, not center of window
offset = window_width / 2
l_min_index = int(max(l_center + offset - margin, 0))
l_max_index = int(min(l_center + offset + margin, warped.shape[1]))
l_center = np.argmax(conv_signal[l_min_index:l_max_index]) + l_min_index - offset
# Find the best right centroid by using past right center as a reference
r_min_index = int(max(r_center + offset - margin, 0))
r_max_index = int(min(r_center + offset + margin, warped.shape[1]))
r_center = np.argmax(conv_signal[r_min_index:r_max_index]) + r_min_index - offset
# Add what we found for that layer
window_centroids.append((l_center, r_center))
return window_centroids
# fit polynomials on the extracted left and right lane
def get_fit(self, image):
# check if the lanes were detected in the last iteration, if not search for the lanes
if not self.detected:
# window settings
window_width = 40
window_height = 40 # break image into 9 vertical layers since image height is 720
margin = 10 # how much to slide left and right for searching
window_centroids = self.find_window_centroids(image, window_width, window_height, margin)
# if we found any window centers
if len(window_centroids) > 0:
# points used to draw all the left and right windows
l_points = np.zeros_like(image)
r_points = np.zeros_like(image)
# go through each level and draw the windows
for level in range(0, len(window_centroids)):
# Window_mask is a function to draw window areas
l_mask = self.window_mask(window_width, window_height, image, window_centroids[level][0], level)
r_mask = self.window_mask(window_width, window_height, image, window_centroids[level][1], level)
# Add graphic points from window mask here to total pixels found
l_points[(image == 1) & (l_mask == 1)] = 1
r_points[(image == 1) & (r_mask == 1)] = 1
# construct images of the results
template_l = np.array(l_points * 255, np.uint8) # add left window pixels
template_r = np.array(r_points * 255, np.uint8) # add right window pixels
zero_channel = np.zeros_like(template_l) # create a zero color channel
left_right = np.array(cv2.merge((template_l, zero_channel, template_r)),
np.uint8) # make color image left and right lane
# get points for polynomial fit
self.allyl, self.allxl = l_points.nonzero()
self.allyr, self.allxr = r_points.nonzero()
# check if lanes are detected
if (len(self.allxl) > 0) & (len(self.allxr) > 0):
try:
self.current_fit_l = np.polyfit(self.allyl, self.allxl, 2)
self.current_fit_r = np.polyfit(self.allyr, self.allxr, 2)
self.poly_warning = False
except np.RankWarning:
self.poly_warning = True
pass
# check if lanes are detected correctly
if self.check_fit():
self.detected = True
# if this is the first detection initialize the best values
if not self.detected_first:
self.best_fit_l = self.current_fit_l
self.best_fit_r = self.current_fit_r
# if not then average with new
else:
self.best_fit_l = self.best_fit_l * 0.6 + self.current_fit_l * 0.4
self.best_fit_r = self.best_fit_r * 0.6 + self.current_fit_r * 0.4
# assign new best values based on this iteration
self.detected_first = True
self.bestxl = self.allxl
self.bestyl = self.allyl
self.bestxr = self.allxr
self.bestyr = self.allyr
self.left_right = left_right
# set flag if lanes are not detected correctly
else:
self.detected = False
# if lanes were detected in the last frame, search area for current frame
else:
non_zero_y, non_zero_x = image.nonzero()
margin = 10 # search area margin
left_lane_points_indx = ((non_zero_x > (
self.best_fit_l[0] * (non_zero_y ** 2) + self.best_fit_l[1] * non_zero_y + self.best_fit_l[
2] - margin)) & (
non_zero_x < (
self.best_fit_l[0] * (non_zero_y ** 2) + self.best_fit_l[1] * non_zero_y +
self.best_fit_l[2] + margin)))
right_lane_points_indx = ((non_zero_x > (
self.best_fit_r[0] * (non_zero_y ** 2) + self.best_fit_r[1] * non_zero_y + self.best_fit_r[
2] - margin)) & (
non_zero_x < (
self.best_fit_r[0] * (non_zero_y ** 2) + self.best_fit_r[1] * non_zero_y +
self.best_fit_r[2] + margin)))
# extracted lef lane pixels
self.allxl = non_zero_x[left_lane_points_indx]
self.allyl = non_zero_y[left_lane_points_indx]
# extracted rightt lane pixels
self.allxr = non_zero_x[right_lane_points_indx]
self.allyr = non_zero_y[right_lane_points_indx]
# if lines were found
if (len(self.allxl) > 0) & (len(self.allxr) > 0):
try:
self.current_fit_l = np.polyfit(self.allyl, self.allxl, 2)
self.current_fit_r = np.polyfit(self.allyr, self.allxr, 2)
except np.RankWarning:
self.poly_warning = True
pass
# check if lanes are detected correctly
if self.check_fit():
# average out the best fit with new values
self.best_fit_l = self.best_fit_l * 0.6 + self.current_fit_l * 0.4
self.best_fit_r = self.best_fit_r * 0.6 + self.current_fit_r * 0.4
# assign new best values based on this iteration
self.bestxl = self.allxl
self.bestyl = self.allyl
self.bestxr = self.allxr
self.bestyr = self.allyr
# construct images of the results
template_l = np.copy(image).astype(np.uint8)
template_r = np.copy(image).astype(np.uint8)
template_l[non_zero_y[left_lane_points_indx], non_zero_x[
left_lane_points_indx]] = 255 # add left window pixels
template_r[non_zero_y[right_lane_points_indx], non_zero_x[
right_lane_points_indx]] = 255 # add right window pixels
zero_channel = np.zeros_like(template_l) # create a zero color channel
self.left_right = np.array(cv2.merge((template_l, zero_channel, template_r)),
np.uint8) # make color image left and right lane
# set flag if lanes are not detected correctly
else:
self.detected = False
# check if lanes are detected correctly
def check_fit(self):
# Generate x and y values of the fit
ploty = np.linspace(0, self.im_shape[0] - 1, self.im_shape[0])
left_fitx = self.current_fit_l[0] * ploty ** 2 + self.current_fit_l[1] * ploty + self.current_fit_l[2]
right_fitx = self.current_fit_r[0] * ploty ** 2 + self.current_fit_r[1] * ploty + self.current_fit_r[2]
# find max, min and mean distance between the lanes
max_dist = np.amax(np.abs(right_fitx - left_fitx))
min_dist = np.amin(np.abs(right_fitx - left_fitx))
mean_dist = np.mean(np.abs(right_fitx - left_fitx))
# check if the lanes don't have a big deviation from the mean
if (max_dist > 250) | (np.abs(max_dist - mean_dist) > 100) | (np.abs(mean_dist - min_dist) > 100) | (
mean_dist < 50) | self.poly_warning:
return False
else:
return True
def calculate_curvature_offset(self):
if self.detected_first:
# define y value near the car
y_eval = self.im_shape[0]
# define conversions in x and y from pixels space to meters
ym_per_pix = 50 / 250 # meters per pixel in y dimension
xm_per_pix = 3.7 / 75 # meters per pixel in x dimension
# create new polynomials to x,y in world space
try:
left_fit_cr = np.polyfit(self.bestyl * ym_per_pix, self.bestxl * xm_per_pix, 2)
right_fit_cr = np.polyfit(self.bestyr * ym_per_pix, self.bestxr * xm_per_pix, 2)
except np.RankWarning:
self.poly_warning = True
pass
# if the poly fit is ok proceed
if not self.poly_warning:
# calculate the new radii of curvature
left_curverad = ((1 + (
2 * left_fit_cr[0] * y_eval * ym_per_pix + left_fit_cr[1]) ** 2) ** 1.5) / np.absolute(
2 * left_fit_cr[0])
right_curverad = ((1 + (
2 * right_fit_cr[0] * y_eval * ym_per_pix + right_fit_cr[1]) ** 2) ** 1.5) / np.absolute(
2 * right_fit_cr[0])
# now our radius of curvature is in meters
# calculate the offset from the center of the road
y_eval = y_eval * ym_per_pix
midpoint_car = self.im_shape[1] / 2.0
midpoint_lane = (right_fit_cr[0] * (y_eval ** 2) + right_fit_cr[1] * y_eval + right_fit_cr[2]) + \
(left_fit_cr[0] * (y_eval ** 2) + left_fit_cr[1] * y_eval + left_fit_cr[2])
offset = midpoint_car * xm_per_pix - midpoint_lane / 2
# initialize the curvature and offset if this is the first detection
if self.left_curverad == None:
self.left_curverad = left_curverad
self.right_curverad = right_curverad
self.offset = offset
# average out the curvature and offset
else:
self.left_curverad = self.left_curverad * 0.8 + left_curverad * 0.2
self.right_curverad = self.right_curverad * 0.8 + right_curverad * 0.2
self.offset = self.offset * 0.9 + offset * 0.1
# project results on the source image
def project_on_road_debug(self, image_input):
image = image_input[self.remove_pixels:, :]
image = self.trans_per(image)
self.im_shape = image.shape
self.get_fit(image)
if self.detected_first & self.detected:
# create fill image
temp_filler = np.zeros((self.remove_pixels, self.im_shape[1])).astype(np.uint8)
filler = np.dstack((temp_filler, temp_filler, temp_filler))
# create an image to draw the lines on
warp_zero = np.zeros_like(image).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
ploty = np.linspace(0, image_input.shape[0] - 1, image_input.shape[0])
left_fitx = self.best_fit_l[0] * ploty ** 2 + self.best_fit_l[1] * ploty + self.best_fit_l[2]
right_fitx = self.best_fit_r[0] * ploty ** 2 + self.best_fit_r[1] * ploty + self.best_fit_r[2]
# recast the x and y points into usable format for cv2.fillPoly()
pts_left = np.array([np.transpose(np.vstack([left_fitx, ploty]))])
pts_right = np.array([np.flipud(np.transpose(np.vstack([right_fitx, ploty])))])
pts = np.hstack((pts_left, pts_right))
# draw the lane onto the warped blank image
cv2.fillPoly(color_warp, np.int_([pts]), (0, 255, 0))
# warp the blank back to original image space using inverse perspective matrix (Minv)
newwarp = cv2.warpPerspective(color_warp, self.Minv, color_warp.shape[-2:None:-1])
left_right = cv2.warpPerspective(self.left_right, self.Minv, color_warp.shape[-2:None:-1])
# combine the result with the original image
left_right_fill = np.vstack((filler, left_right))
result = cv2.addWeighted(left_right_fill, 1, image_input, 1, 0)
result = cv2.addWeighted(result, 1, np.vstack((filler, newwarp)), 0.3, 0)
# get curvature and offset
# self.calculate_curvature_offset()
# plot text on resulting image
#img_text = "radius of curvature: " + str(round((self.left_curverad + self.right_curverad) / 2, 2)) + ' (m)'
#if self.offset < 0:
# img_text2 = "vehicle is: " + str(round(np.abs(self.offset), 2)) + ' (m) left of center'
#else:
# img_text2 = "vehicle is: " + str(round(np.abs(self.offset), 2)) + ' (m) right of center'
small = cv2.resize(left_right_fill, (0, 0), fx=0.5, fy=0.5)
small2 = cv2.resize(np.vstack((filler, self.left_right)), (0, 0), fx=0.5, fy=0.5)
result2 = cv2.resize(np.hstack((result, np.vstack((small2, small)))), (0, 0), fx=self.enlarge,
fy=self.enlarge)
# result2 = cv2.resize(np.hstack((np.vstack((filler,np.dstack((self.binary_image*255,self.binary_image*255,self.binary_image*255)))), np.vstack((small2,small)))), (0,0), fx=self.enlarge, fy=self.enlarge)
#cv2.putText(result2, img_text, (15, 15), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 1)
#cv2.putText(result2, img_text2, (15, 40), cv2.FONT_HERSHEY_SIMPLEX, 0.6, (255, 255, 255), 1)
return result2
# if lanes were not detected output source image
else:
return_image = cv2.resize(
np.hstack((image_input, cv2.resize(np.zeros_like(image_input), (0, 0), fx=0.5, fy=1.0))), (0, 0),
fx=self.enlarge, fy=self.enlarge)
return return_image
# project results on the source image
def project_on_road(self, image_input):
image = image_input[self.remove_pixels:, :]
image = self.trans_per(image)
self.im_shape = image.shape
self.get_fit(image)
if self.detected_first & self.detected:
# create fill image
temp_filler = np.zeros((self.remove_pixels, self.im_shape[1])).astype(np.uint8)
filler = np.dstack((temp_filler, temp_filler, temp_filler))
# create an image to draw the lines on
warp_zero = np.zeros_like(image).astype(np.uint8)
color_warp = np.dstack((warp_zero, warp_zero, warp_zero))
ploty = | np.linspace(0, image_input.shape[0] - 1, image_input.shape[0]) | numpy.linspace |
# Copyright (c) 2019 MindAffect B.V.
# Author: <NAME> <<EMAIL>>
# This file is part of pymindaffectBCI <https://github.com/mindaffect/pymindaffectBCI>.
#
# pymindaffectBCI is free software: you can redistribute it and/or modify
# it under the terms of the GNU General Public License as published by
# the Free Software Foundation, either version 3 of the License, or
# (at your option) any later version.
#
# pymindaffectBCI is distributed in the hope that it will be useful,
# but WITHOUT ANY WARRANTY; without even the implied warranty of
# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
# GNU General Public License for more details.
#
# You should have received a copy of the GNU General Public License
# along with pymindaffectBCI. If not, see <http://www.gnu.org/licenses/>
import numpy as np
# time-series tests
def window_axis(a, winsz, axis=0, step=1, prependwindowdim=False):
''' efficient view-based slicing of equal-sized equally-spaced windows along a selected axis of a numpy nd-array '''
if axis < 0: # no negative axis indices
axis = len(a.shape)+axis
# compute the shape/strides for the windowed view of a
if prependwindowdim: # window dim before axis
shape = a.shape[:axis] + (winsz, int((a.shape[axis]-winsz)/step)+1) + a.shape[(axis+1):]
strides = a.strides[:axis] + (a.strides[axis], a.strides[axis]*step) + a.strides[(axis+1):]
else: # window dim after axis
shape = a.shape[:axis] + (int((a.shape[axis]-winsz)/step)+1, winsz) + a.shape[(axis+1):]
strides = a.strides[:axis] + (a.strides[axis]*step, a.strides[axis]) + a.strides[(axis+1):]
#print("a={}".format(a.shape))
#print("shape={} stride={}".format(shape,strides))
# return the computed view
return np.lib.stride_tricks.as_strided(a, shape=shape, strides=strides)
def equals_subarray(a, pat, axis=-1, match=-1):
''' efficiently find matches of a 1-d sub-array along axis within an nd-array '''
if axis < 0: # no negative dims
axis = a.ndim+axis
# reshape to match dims of a
if not isinstance(pat, np.ndarray): pat = np.array(pat) # ensure is numpy
pshape = np.ones(a.ndim+1, dtype=int); pshape[axis+1] = pat.size
pat = np.array(pat.ravel(),dtype=a.dtype).reshape(pshape) # [ ... x l x...]
# window a into pat-len pieces
aw = window_axis(a, pat.size, axis=axis, step=1) # [ ... x t-l x l x ...]
# do the match
F = np.all(np.equal(aw, pat), axis=axis+1) # [... x t-l x ...]
# pad to make the same shape as input
padshape = list(a.shape); padshape[axis] = a.shape[axis]-F.shape[axis]
if match == -1: # match at end of pattern -> pad before
F = np.append(np.zeros(padshape, dtype=F.dtype), F, axis)
else: # match at start of pattern -> pad after
F = np.append(F, np.zeros(padshape, dtype=F.dtype), axis)
return F
class RingBuffer:
''' time efficient linear ring-buffer for storing packed data, e.g. continguous np-arrays '''
def __init__(self, maxsize, shape, dtype=np.float32):
self.elementshape = shape
self.bufshape = (int(maxsize), )+shape
self.buffer = np.zeros((2*int(maxsize), np.prod(shape)), dtype=dtype) # store as 2d
# position for the -1 element. N.B. start maxsize so pos-maxsize is always valid
self.pos = int(maxsize)
self.n = 0 # count of total number elements added to the buffer
self.copypos = 0 # position of the last element copied to the 1st half
self.copysize = 0 # number entries to copy as a block
def clear(self):
'''empty the ring-buffer and reset to empty'''
self.pos=int(self.bufshape[0])
self.n =0
self.copypos=0
self.copysize=0
def append(self, x):
'''add single element to the ring buffer'''
return self.extend(x[np.newaxis, ...])
def extend(self, x):
'''add a group of elements to the ring buffer'''
# TODO[] : incremental copy to the 1st half, to spread the copy cost?
nx = x.shape[0]
if self.pos+nx >= self.buffer.shape[0]:
flippos = self.buffer.shape[0]//2
# flippos-nx to 1st half
self.buffer[:(flippos-nx), :] = self.buffer[(self.pos-(flippos-nx)):self.pos, :]
# move cursor to end 1st half
self.pos = flippos-nx
# insert in the buffer
self.buffer[self.pos:self.pos+nx, :] = x.reshape((nx, self.buffer.shape[1]))
# move the cursor
self.pos = self.pos+nx
# update the count
self.n = self.n + nx
return self
@property
def shape(self):
return (min(self.n,self.bufshape[0]),)+self.bufshape[1:]
def unwrap(self):
'''get a view on the valid portion of the ring buffer'''
return self.buffer[self.pos-min(self.n,self.bufshape[0]):self.pos, :].reshape(self.shape)
def __getitem__(self, item):
return self.unwrap()[item]
def __iter__(self):
return iter(self.unwrap())
def extract_ringbuffer_segment(rb, bgn_ts, end_ts=None):
''' extract the data between start/end time stamps, from time-stamps contained in the last channel of a nd matrix'''
# get the data / msgs from the ringbuffers
X = rb.unwrap() # (nsamp,nch+1)
X_ts = X[:, -1] # last channel is timestamps
# TODO: binary-search to make these searches more efficient!
# search backwards for trial-start time-stamp
# TODO[X] : use a bracketing test.. (better with wrap-arround)
bgn_samp = np.flatnonzero(np.logical_and(X_ts[:-1] < bgn_ts, bgn_ts <= X_ts[1:]))
# get the index of this timestamp, guarding for after last sample
if len(bgn_samp) == 0 :
bgn_samp = 0 if bgn_ts <= X_ts[0] else len(X_ts)+1
else:
bgn_samp = bgn_samp[0]
# and just to be sure the trial-end timestamp
if end_ts is not None:
end_samp = np.flatnonzero(np.logical_and(X_ts[:-1] < end_ts, end_ts <= X_ts[1:]))
# get index of this timestamp, guarding for after last data sample
end_samp = end_samp[-1] if len(end_samp) > 0 else len(X_ts)
else: # until now
end_samp = len(X_ts)
# extract the trial data, and make copy (just to be sure)
X = X[bgn_samp:end_samp+1, :].copy()
return X
def unwrap(x,range=None):
''' unwrap a list of numbers to correct for truncation due to limited bit-resolution, e.g. time-stamps stored in 24bit integers'''
if range is None:
range = 1<< int(np.ceil(np.log2(max(x))))
wrap_ind = np.diff(x) < -range/2
unwrap = np.zeros(x.shape)
unwrap[np.flatnonzero(wrap_ind)+1]=range
unwrap=np.cumsum(unwrap)
x = x + unwrap
return x
def unwrap_test():
x = np.cumsum(np.random.rand(6000,1))
xw = x%(1<<10)
xuw = unwrap(x)
import matplotlib.pyplot as plt
plt.plot(x,label='x')
plt.plot(xw,label='x (wrapped)')
plt.plot(xuw,label='x (unwrapped')
plt.legend()
def search_directories_for_file(f,*args):
"""search a given set of directories for given filename, return 1st match
Args:
f (str): filename to search for (or a pattern)
*args (): set for directory names to look in
Returns:
f (str): the *first* full path to where f is found, or f if not found.
"""
import os
import glob
f = os.path.expanduser(f)
if os.path.exists(f) or len(glob.glob(f))>0:
return f
for d in args:
#print('Searching dir: {}'.format(d))
df = os.path.join(d,f)
if os.path.exists(df) or len(glob.glob(df))>0:
f = df
break
return f
# toy data generation
#@function
def randomSummaryStats(d=10, nE=2, tau=10, nY=1):
import numpy as np
# pure random test-case
Cxx = np.random.standard_normal((d, d))
Cxy = np.random.standard_normal((nY, nE, tau, d))
Cyy = np.random.standard_normal((nY, nE, tau, nE, tau))
return (Cxx, Cxy, Cyy)
def testNoSignal(d=10, nE=2, nY=1, isi=5, tau=None, nSamp=10000, nTrl=1):
# Simple test-problem -- no real signal
if tau is None:
tau = 10*isi
X = np.random.standard_normal((nTrl, nSamp, d))
stimTimes_samp = np.arange(0, X.shape[-2] - tau, isi)
Me = np.random.standard_normal((nTrl, len(stimTimes_samp), nY, nE))>1
Y = np.zeros((nTrl, X.shape[-2], nY, nE))
Y[:, stimTimes_samp, :, :] = Me
return (X, Y, stimTimes_samp)
def testSignal(nTrl=1, d=5, nE=2, nY=30, isi=5, tau=None, offset=0, nSamp=10000, stimthresh=.6, noise2signal=1, irf=None):
#simple test problem, with overlapping response
import numpy as np
if tau is None:
tau = 10 if irf is None else len(irf)
nEp = int((nSamp-tau)/isi)
cb = | np.random.standard_normal((nEp, nY, nE)) | numpy.random.standard_normal |
import numpy as np
import cv2
import torch
from assets.matchers.base import BaseMatcher
from experiments.service.utils import desc_similarity
class KnnMatcher(BaseMatcher):
def __init__(self, matcher_cfg):
super().__init__(matcher_cfg)
self.d_threshold = matcher_cfg.matcher_params.feat_distance_threshold
def match(self, s1: torch.Tensor, s2: torch.Tensor):
sim = desc_similarity(s1, s2)
if sim is None:
self.n_fails += 1
return np.asarray([]), | np.asarray([]) | numpy.asarray |
# REFS Some (most) of those functions come from the keras library (https://github.com/fchollet/keras)
# Some are modified to add output images and output centerline
# keras.preprocessing.image: flip_axis, random_channel_shift, apply_transform, transform_matrix_offset_center, ApplyRandomTransformations
import time
import numpy as np
import random
import scipy as sp
import scipy.interpolate
import scipy.ndimage
import scipy.ndimage.interpolation
from NnetsX import IS_CHANNELS_FIRST
# from File import SavePickle
INTENSITY_FACTOR = 0.2
VECTOR_FIELD_SIGMA = 5. # in pixel
ROTATION_FACTOR = 10 # degree
TRANSLATION_FACTOR = 0.2 # proportion of the image size
SHEAR_FACTOR = 2*np.pi/180 # in radian
ZOOM_FACTOR = 0.1
def flip_axis(x, axis):
x = np.asarray(x).swapaxes(axis, 0)
x = x[::-1, ...]
x = x.swapaxes(0, axis)
return x
def random_channel_shift(x, intensity, channel_index=0):
x = np.rollaxis(x, channel_index, 0)
min_x, max_x = np.min(x), np.max(x)
shift = np.random.uniform(-intensity, intensity) # TODO add a choice if we want the same shift for all channels
channel_images = [np.clip(x_channel + shift, min_x, max_x)
for x_channel in x]
# channel_images = [np.clip(x_channel + np.random.uniform(-intensity, intensity), min_x, max_x)
# for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_index+1)
return x
def apply_transform(x, transform_matrix, channel_index=0, fill_mode='nearest', cval=0.):
x = np.rollaxis(x, channel_index, 0)
final_affine_matrix = transform_matrix[:2, :2]
final_offset = transform_matrix[:2, 2]
channel_images = [sp.ndimage.interpolation.affine_transform(x_channel, final_affine_matrix,
final_offset, order=0, mode=fill_mode, cval=cval) for x_channel in x]
x = np.stack(channel_images, axis=0)
x = np.rollaxis(x, 0, channel_index+1)
return x
def transform_matrix_offset_center(matrix, x, y):
o_x = float(x) / 2 + 0.5
o_y = float(y) / 2 + 0.5
offset_matrix = np.array([[1, 0, o_x], [0, 1, o_y], [0, 0, 1]])
reset_matrix = np.array([[1, 0, -o_x], [0, 1, -o_y], [0, 0, 1]])
transform_matrix = np.dot(np.dot(offset_matrix, matrix), reset_matrix)
return transform_matrix
def ApplyRandomTransformations(_x, _y, _pts, _trans, _rot, _zoom, _shear, _elastix, _row_index=1, _col_index=2, _channel_index=0, _fill_mode='constant', _cval=0.):
if _elastix != 0:
sigma = _elastix # in pixel
kernelSize = 3
sizeAll = kernelSize + 2
imgShape = (_x.shape[1], _x.shape[2])
# create the indices of the 5x5 vector field (fieldPts.shape = (25,2))
fieldPts = np.mgrid[0.:1.:complex(sizeAll), 0.:1.:complex(sizeAll)].swapaxes(0,2).swapaxes(0,1).reshape((sizeAll*sizeAll, 2))
# create the displacement (x and y) of the 5x5 vector field (border have no displacement so it's 0) (displacementX.shape = (25))
displacementX = np.zeros((sizeAll*sizeAll))
displacementY = np.zeros((sizeAll*sizeAll))
for i in range(0, sizeAll*sizeAll):
if fieldPts[i][0] != 0. and fieldPts[i][0] != 1. \
and fieldPts[i][1] != 0. and fieldPts[i][1] != 1.:
displacementX[i] = np.random.normal(0, sigma, 1)
displacementY[i] = np.random.normal(0, sigma, 1)
# transform the indice of the 5x5 vector field in the image coordinate system (TODO WARNING works only with square images)
fieldPts = fieldPts*imgShape[0] # TODO check if it's not imgShape[0] - 1?
# create the indices of all pixels in the image (gridX.shape = (1024,1024))
gridX, gridY = np.mgrid[0.:(imgShape[0] - 1):complex(imgShape[0]), 0.:(imgShape[1] - 1):complex(imgShape[1])]
# interpolate the vector field for every pixels in the image (dxGrid.shape = (1024,1024))
dxGrid = scipy.interpolate.griddata(fieldPts, displacementX, (gridX, gridY), method='cubic')
dyGrid = scipy.interpolate.griddata(fieldPts, displacementY, (gridX, gridY), method='cubic')
# apply the displacement on every pixels (indices = [indices.shape[0] = 1024*1024, indices.shape[1] = 1024*1024])
indices = np.reshape(gridY + dyGrid, (-1, 1)), np.reshape(gridX + dxGrid, (-1, 1))
for chan in range(_x.shape[0]):
_x[chan] = scipy.ndimage.interpolation.map_coordinates(_x[chan], indices, order=2, mode='reflect').reshape(imgShape)
_x[chan] = np.clip(_x[chan], 0., 1.)
if _y is not None:
for chan in range(_y.shape[0]):
_y[chan] = scipy.ndimage.interpolation.map_coordinates(_y[chan], indices, order=2, mode='reflect').reshape(imgShape)
_y[chan] = np.clip(_y[chan], 0., 1.)
#if _pts is not None:
matrix = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
if _rot != 0:
theta = np.pi/180*np.random.uniform(-_rot, _rot)
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
matrix = np.dot(matrix, rotation_matrix)
if _trans != 0:
ty = np.random.uniform(-_trans, _trans)*_x.shape[_row_index]
tx = np.random.uniform(-_trans, _trans)*_x.shape[_col_index]
translation_matrix = np.array([[1, 0, ty],
[0, 1, tx],
[0, 0, 1]])
matrix = np.dot(matrix, translation_matrix)
if _shear != 0:
shear = np.random.uniform(-_shear, _shear)
shear_matrix = np.array([[1, -np.sin(shear), 0],
[0, np.cos(shear), 0],
[0, 0, 1]])
matrix = np.dot(matrix, shear_matrix)
if _zoom != 0:
zx, zy = np.random.uniform(1 - _zoom, 1 + _zoom, 2)
zoom_matrix = np.array([[zx, 0, 0],
[0, zy, 0],
[0, 0, 1]])
matrix = np.dot(matrix, zoom_matrix)
h, w = _x.shape[_row_index], _x.shape[_col_index]
transformMatrix = transform_matrix_offset_center(matrix, h, w)
_x = apply_transform(_x, transformMatrix, _channel_index, _fill_mode, _cval)
if _y is not None:
_y = apply_transform(_y, transformMatrix, _channel_index, _fill_mode, _cval)
if _pts is not None:
matrix = np.array([[1, 0, 0],
[0, 1, 0],
[0, 0, 1]])
if _rot != 0:
rotation_matrix = np.array([[np.cos(theta), -np.sin(theta), 0],
[np.sin(theta), np.cos(theta), 0],
[0, 0, 1]])
matrix = np.dot(matrix, rotation_matrix)
if _trans != 0:
translation_matrix = np.array([[1, 0, -tx],
[0, 1, -ty],
[0, 0, 1]])
matrix = np.dot(translation_matrix, matrix)
if _shear != 0:
shear_matrix = np.array([[np.cos(shear), 0, 0],
[-np.sin(shear), 1, 0],
[0, 0, 1]])
shear_matrix = np.linalg.inv(shear_matrix) # TODO write the inverse properly without computing it
matrix = np.dot(shear_matrix, matrix)
if _zoom != 0:
zoom_matrix = np.array([[1./zy, 0, 0],
[0, 1./zx, 0],
[0, 0, 1]])
matrix = np.dot(zoom_matrix, matrix)
transformMatrix = transform_matrix_offset_center(matrix, h, w)
_pts = np.dot(_pts, transformMatrix.T)
return _x, _y, _pts
# if _nbData = -1 then the function creates infinite data
def GenerateImageOnTheFly(_createImageXFct, _GetIdFromNeedFct, _X, _previousImages, _Y, _outputTrain, _previousOutput, _setFiles, _needSetX, _needSetY, _batchSize, _epochSize , _nbData, _keepPctOriginal=0.5, _trans=TRANSLATION_FACTOR, _rot=ROTATION_FACTOR, _zoom=ZOOM_FACTOR, _shear=SHEAR_FACTOR, _elastix=VECTOR_FIELD_SIGMA, _intensity=INTENSITY_FACTOR, _hflip=True, _vflip=True, _3Dshape=False):
shapeX = _X.shape
shapeY = _Y.shape
nbChannels = shapeX[1] + _previousImages + _previousOutput
currentBatch = 0
imgX = np.empty((nbChannels, shapeX[2], shapeX[3]), dtype=np.float32)
blackImage = np.zeros((shapeX[2], shapeX[3]), dtype=np.float32)
if _nbData != -1:
seedList = np.random.randint(999999, size=_nbData)
nbSeedPerImage = _nbData//_epochSize
# debug = 0
while 1:
# if _nbData != -1:
# shuffleList = np.arange(_nbData)
# else:
# shuffleList = np.arange(_epochSize)
shuffleList = np.arange(_epochSize)
np.random.shuffle(shuffleList)
for i in range(_epochSize):
if currentBatch == 0:
if _3Dshape == False:
if IS_CHANNELS_FIRST == True:
x = np.empty((_batchSize, nbChannels, shapeX[2], shapeX[3]), dtype=np.float32)
else:
x = np.empty((_batchSize, shapeX[2], shapeX[3], nbChannels), dtype=np.float32)
else:
x = np.empty((_batchSize, 1, nbChannels, shapeX[2], shapeX[3]), dtype=np.float32)
if IS_CHANNELS_FIRST == True:
y = np.empty((_batchSize, shapeY[1], shapeY[2], shapeY[3]), dtype=np.float32)
else:
y = np.empty((_batchSize, shapeY[2], shapeY[3], shapeY[1]), dtype=np.float32)
# if _nbData != -1:
# rndStateNp = np.random.get_state()
# rndState = random.getstate()
# # np.random.seed(int(seedList[shuffleList[i]]))
# # random.seed(int(seedList[shuffleList[i]]))
# np.random.seed(int(seedList[shuffleList[i]*nbSeedPerImage + random.randint(0, nbSeedPerImage - 1)]))
# random.seed(int(seedList[shuffleList[i]*nbSeedPerImage + random.randint(0, nbSeedPerImage - 1)]))
# imgId = shuffleList[i]%len(_setFiles)
imgId = shuffleList[i]
_createImageXFct(imgX, imgId, _X, _previousImages, _outputTrain, _previousOutput, _setFiles, _needSetX, _needSetY, blackImage)
imgY = _Y[_GetIdFromNeedFct(imgId, _setFiles, _needSetY),...]
if random.random() > _keepPctOriginal:
# if False:
if _nbData != -1:
rndStateNp = np.random.get_state()
rndState = random.getstate()
np.random.seed(int(seedList[shuffleList[i]*nbSeedPerImage + random.randint(0, nbSeedPerImage - 1)]))
random.seed(int(seedList[shuffleList[i]*nbSeedPerImage + random.randint(0, nbSeedPerImage - 1)]))
if _intensity != 0:
imgX = random_channel_shift(imgX, _intensity)
imgX, imgY, _ = ApplyRandomTransformations(imgX, imgY, None, _trans, _rot, _zoom, _shear, _elastix)
if _hflip == True and random.random() > 0.5:
imgX = flip_axis(imgX, 1)
imgY = flip_axis(imgY, 1)
if _vflip == True and random.random() > 0.5:
imgX = flip_axis(imgX, 2)
imgY = flip_axis(imgY, 2)
if _nbData != -1:
np.random.set_state(rndStateNp)
random.setstate(rndState)
if IS_CHANNELS_FIRST == True:
x[currentBatch][...] = imgX[...]
y[currentBatch][...] = imgY[...]
else:
imgXtmp = | np.rollaxis(imgX, 0, 3) | numpy.rollaxis |
import matplotlib.pyplot as plt
import numpy as np
from scipy import ndimage
from scipy.ndimage import binary_fill_holes as fillholes
from skimage import img_as_ubyte
from skimage.util import img_as_float
from skimage.exposure import adjust_sigmoid
from skimage.filters import threshold_otsu, threshold_triangle, rank, laplace, sobel
from skimage.segmentation import clear_border
from skimage.measure import label
from skimage.morphology import closing, square, disk, remove_small_objects, opening, dilation, watershed, erosion
from skimage.color import label2rgb, rgb2gray
from skimage.transform import rescale
import os
from os.path import join
from scipy import ndimage as ndi
def frequency_filter(im, mu, sigma, passtype='low'):
'''
This function applies a lowpass or highpass filter to an image.
Paramters
---------
im : (N, M) ndarray
Grayscale input image.
mu : float, optional
Average for input in low pass filter. Default value is 500.
sigma : float, optional
Standard deviation for input in low pass filter. Default value is 70.
passtype: string
Applies a 'high' or 'low' pass filter. Default value is 'low'.
Returns
-------
out : ndarray
Low or high pass filtered output image.
Examples
--------
>>> image = plt.imread('..\C3-NTG-CFbs_NTG5ECM_1mMRGD_20x_003.tif')
>>> lowpass = frequency_filter(im, 500, 70, passtype='low')
'''
# define x and y based on image shape
y_length, x_length = np.shape(im)
xi = np.linspace(0, x_length-1, x_length)
yi = np.linspace(0, y_length-1, y_length)
x, y = np.meshgrid(xi, yi)
# define lowpass or highpass filter
if passtype == 'low':
gfilt = np.exp(-((x-mu)**2 + (y-mu)**2)/(2*sigma**2))
if passtype == 'high':
gfilt = 1 - np.exp(-((x-mu)**2 + (y-mu)**2)/(2*sigma**2))
fim = np.fft.fft2(im) # moving to spacial domain
fim_c = np.fft.fftshift(fim) # centering
fim_filt = np.multiply(fim_c, gfilt) # apply the filter
fim_uc = np.fft.ifftshift(fim_filt) # uncenter
im_pass = np.real(np.fft.ifft2(fim_uc)) # perform inverse transform
return im_pass
def _check_dtype_supported(ar):
'''
Used in remove_large_objects function and taken from
skimage.morphology package.
'''
# Should use `issubdtype` for bool below, but there's a bug in numpy 1.7
if not (ar.dtype == bool or np.issubdtype(ar.dtype, np.integer)):
raise TypeError("Only bool or integer image types are supported. "
"Got %s." % ar.dtype)
def remove_large_objects(ar, max_size=10000, connectivity=1, in_place=False):
'''
Remove connected components larger than the specified size. (Modified from
skimage.morphology.remove_small_objects)
Parameters
----------
ar : ndarray (arbitrary shape, int or bool type)
The array containing the connected components of interest. If the array
type is int, it is assumed that it contains already-labeled objects.
The ints must be non-negative.
max_size : int, optional (default: 10000)
The largest allowable connected component size.
connectivity : int, {1, 2, ..., ar.ndim}, optional (default: 1)
The connectivity defining the neighborhood of a pixel.
in_place : bool, optional (default: False)
If `True`, remove the connected components in the input array itself.
Otherwise, make a copy.
Raises
------
TypeError
If the input array is of an invalid type, such as float or string.
ValueError
If the input array contains negative values.
Returns
-------
out : ndarray, same shape and type as input `ar`
The input array with small connected components removed.
Examples
--------
>>> from skimage import morphology
>>> a = np.array([[0, 0, 0, 1, 0],
... [1, 1, 1, 0, 0],
... [1, 1, 1, 0, 1]], bool)
>>> b = morphology.remove_large_objects(a, 6)
>>> b
array([[False, False, False, False, False],
[ True, True, True, False, False],
[ True, True, True, False, False]], dtype=bool)
>>> c = morphology.remove_small_objects(a, 7, connectivity=2)
>>> c
array([[False, False, False, True, False],
[ True, True, True, False, False],
[ True, True, True, False, False]], dtype=bool)
>>> d = morphology.remove_large_objects(a, 6, in_place=True)
>>> d is a
True
'''
# Raising type error if not int or bool
_check_dtype_supported(ar)
if in_place:
out = ar
else:
out = ar.copy()
if max_size == 0: # shortcut for efficiency
return out
if out.dtype == bool:
selem = ndi.generate_binary_structure(ar.ndim, connectivity)
ccs = np.zeros_like(ar, dtype=np.int32)
ndi.label(ar, selem, output=ccs)
else:
ccs = out
try:
component_sizes = np.bincount(ccs.ravel())
except ValueError:
raise ValueError("Negative value labels are not supported. Try "
"relabeling the input with `scipy.ndimage.label` or "
"`skimage.morphology.label`.")
if len(component_sizes) == 2:
warn("Only one label was provided to `remove_small_objects`. "
"Did you mean to use a boolean array?")
too_large = component_sizes > max_size
too_large_mask = too_large[ccs]
out[too_large_mask] = 0
return out
def phalloidin_labeled(im, selem=disk(3), mu=500, sigma=70, cutoff=0, gain=100,
min_size=250, max_size=10000, connectivity=1):
"""
Signature: phalloidin_labeled(*args)
Docstring: Segment and label image
Extended Summary
----------------
The colorize function applies preprocessing filters (contrast and high
pass) then defines the threshold value for the desired image. Thresholding
is calculated by the otsu function creates a binarized image by setting
pixel intensities above that thresh value to white, and the ones below to
black (background). Next, it cleans up the image by filling in random noise
within the cell outlines and removes small background objects. It then
labels adjacent pixels with the same value and defines them as a region.
It returns an RGB image with color-coded labels.
Paramters
---------
im : (N, M) ndarray
Grayscale input image.
selem : numpy.ndarray, optional
Area used for separating cells. Default value is
skimage.morphology.disk(3).
cutoff : float, optional
Cutoff of the sigmoid function that shifts the characteristic curve
in horizontal direction. Default value is 0.
gain : float, optional
The constant multiplier in exponential's power of sigmoid function.
Default value is 100.
mu : float, optional
Average for input in low pass filter. Default value is 500.
sigma : float, optional
Standard deviation for input in low pass filter. Default value is 70.
min_size : int, optional
The smallest allowable object size. Default value is 250.
max_size : int, optional
The largest allowable object size. Default value is 10000.
connectivity : int, optional
The connectvitivy defining the neighborhood of a pixel. Default value
is 1.
Returns
-------
out : label_image (ndarray) segmented and object labeled for analysis
Examples
--------
image = plt.imread('C3-NTG-CFbs_NTG5ECM_1mMRGD_20x_003.tif')
label_image = phalloidin_488_binary(image, mu=500, sigma=70,
cutoff=0, gain=100)
image = plt.imread('..\C3-NTG-CFbs_NTG5ECM_1mMRGD_20x_003.tif')
label, overlay = phalloidin_488_segment(image, mu=500, sigma=70,
cutoff=0, gain=100)
"""
# contrast adjustment
im_con = adjust_sigmoid(im, cutoff=cutoff, gain=gain, inv=False)
# contrast + low pass filter
im_lo = frequency_filter(im_con, mu, sigma, passtype='low')
# contrast + low pass + binary
thresh = threshold_otsu(im_lo, nbins=256)
im_bin = im_lo > thresh
# fill holes, separate cells, and remove small/large objects
im_fill = ndimage.binary_fill_holes(im_bin)
im_open = opening(im_fill, selem)
im_clean_i = remove_small_objects(im_open, min_size=min_size,
connectivity=connectivity, in_place=False)
im_clean = remove_large_objects(im_clean_i, max_size=max_size,
connectivity=connectivity, in_place=False)
# labelling regions that are cells
label_image = label(im_clean)
# coloring labels over cells
image_label_overlay = label2rgb(label_image, image=im, bg_label=0)
print(image_label_overlay.shape)
# plot overlay image
fig, ax = plt.subplots(figsize=(10, 6))
ax.imshow(image_label_overlay)
ax.set_axis_off()
plt.tight_layout()
plt.show()
return (label_image)
def SMA_segment(im, mu=500, sigma=70, cutoff=0, gain=100,
min_size=100, connectivity=1):
"""
This function binarizes a Smooth muscle actin (SMA) fluorescence microscopy channel
using contrast adjustment, high pass filter, otsu thresholding, and removal
of small objects.
Paramters
---------
im : (N, M) ndarray
Grayscale input image.
cutoff : float, optional
Cutoff of the sigmoid function that shifts the characteristic curve
in horizontal direction. Default value is 0.
gain : float, optional
The constant multiplier in exponential's power of sigmoid function.
Default value is 100.
mu : float, optional
Average for input in low pass filter. Default value is 500.
sigma : float, optional
Standard deviation for input in low pass filter. Default value is 70.
min_size : int, optional
The smallest allowable object size. Default value is 100.
connectivity : int, optional
The connectvitivy defining the neighborhood of a pixel. Default value
is 1.
Returns
-------
out : label_image (ndarray) segmented and object labeled for analysis,
image_label_overlay (ndarray)
Examples
--------
>>> image = plt.imread('..\C4-NTG-CFbs_NTG5ECM_1mMRGD_20x_003.tif')
>>> label, overlay = SMA_segment(image, mu=500, sigma=70,
cutoff=0, gain=100)
"""
# contrast adjustment
im_con = adjust_sigmoid(im, cutoff=cutoff, gain=gain, inv=False)
# contrast + low pass filter
im_lo = frequency_filter(im_con, mu, sigma, passtype='low')
# contrast + low pass + binary
thresh = threshold_otsu(im_lo, nbins=256)
im_bin = im_lo > thresh
# remove small objects
im_bin_clean = remove_small_objects(im_bin, min_size=min_size,
connectivity=connectivity,
in_place=False)
# labelling regions that are cells
label_image = label(im_bin_clean)
# coloring labels over cells
image_label_overlay = label2rgb(label_image, image=im, bg_label=0)
return label_image, image_label_overlay
def colorize(image, i, x):
"""
Signature: colorize(*args)
Docstring: segment and label image
Extended Summary:
----------------
The colorize function defines the threshold value for the desired image by
the triangle function and then creates a binarized image by setting pixel
intensities above that thresh value to white, and the ones below to black
(background). Next, it closes up the image by filling in random noise
within the cell outlines and smooths/clears out the border. It then labels
adjacent pixels with the same value and defines them as a region. It
returns an RGB image with color-coded labels.
Parameters:
----------
image : 2D array
greyscale image
i : int
dimension of square to be used for binarization
x : float
dimension of image in microns according to imageJ
Returns:
--------
RGB image overlay
int : 2D ndarray
"""
# resizing image
image = rescale(image, x/1024, anti_aliasing=False)
# applying threshold to image
thresh = threshold_triangle(image)
binary = closing(image > thresh, square(i))
binary = ndimage.binary_fill_holes(binary)
# cleaning up boundaries of cells
cleared = clear_border(binary)
# labelling regions that are cells
label_image = label(cleared)
# coloring labels over cells
image_label_overlay = label2rgb(label_image, image=image, bg_label=0)
print(image_label_overlay.shape)
fig, ax = plt.subplots(figsize=(10, 6))
ax.imshow(image_label_overlay)
ax.set_axis_off()
plt.tight_layout()
plt.show()
return (label_image)
def sharpen_nuclei(image, selem=square(8), ksize=10, alpha=0.2, sigma=40,
imshow=True):
"""
Highlight nucleis in the image.
Make a sharp contrast between nucleis and background to highlight nucleis
in the input image, achieved by mean blurring, laplace sharpening, and
Gaussian high-pass filter. Selem, ksize, alpha, sigma parameters have
default values while could be customize by user.
Parameters
----------
image : numpy.ndarray
grayscale image which needs to enhance the nucleis.
selem : numpy.ndarray
area used for scanning in blurring, default to be square(8).
ksize : int
ksize used for laplace transform, default to be 10.
alpha : float
coefficient used in laplace sharpening, default to be 0.2.
sigma : int
power coefficient in Gussian filter, default to be 40.
imshow : bool, str
users choose whether to show the processed images, default to be True.
Returns
----------
Return to 2 processed grayscale images with sharpened nucleis(2 dimension arrays)
in the image using two different sharpening styles.
"""
image = img_as_ubyte(image)
def custom(image):
imin = | np.min(image) | numpy.min |
import numpy as np
#import bethy_fapar as fapar
class photosynthesis():
def __init__(self,datashape=None):
'''
Class initialisation and setup of parameters
'''
if datashape == None:
self.data = np.zeros([100])
self.Tc = np.ones([100])*25
self.C3 = np.ones([100]).astype(bool)
self.Ipar = (np.arange(100)/100.) * 2000. * 1e-6
self.Lcarbon = np.ones([100]) * 1
self.Rcarbon = np.ones([100]) * 1
self.Scarbon = np.ones([100]) * 1
self.pft = np.array(['C3 grass']*100)
# zero C in K
self.zeroC = 273.15
# gas constant J mol-1 K-1
self.R_gas = 8.314
# oxygen concentration
self.Ox = 0.21 # mol(O2)mol(air)-1
self.O2 = 0.23 # Atmospheric concentration of oxygen (kg O2/kg air)
# energy content of PAR quanta
self.EPAR = 220. # kJmol-1
# ratio of dark respiration to PVM at 25 C
self.FRDC3 = 0.011
self.FRDC4 = 0.042
# scaling for GammaStar
self.GammaStarScale = 1.7e-6
# Effective quantum efficiency C4
self.ALC4 = 0.04
# Curvature parameter (C4)
self.Theta = 0.83
self.molarMassAir_kg = 28.97e-3
self.molarMassCO2_kg = 44.011e-3
self.co2SpecificGravity = self.molarMassCO2_kg/self.molarMassAir_kg
self.variables()
self.defaults()
self.initialise()
def test1(self):
'''
low light, span a temperature range, normal CO2
'''
self.Ipar = np.ones_like(self.data) * 200. * 1e-6
self.co2_ppmv = 390.
self.Tc = np.arange(100) - 30.
self.initialise()
self.defaults()
self.photosynthesis()
import pylab as plt
plt.clf()
plt.plot(self.Tc,self.Wc * 1e6,label='Wc')
plt.plot(self.Tc,self.Wl * 1e6,label='Wl')
plt.plot(self.Tc,self.We * 1e6,label='We')
plt.plot(self.Tc,self.W * 1e6,label='W')
plt.legend()
def photosynthesis(self):
'''
Uses:
self.Tc : canopy (leaf) temperature (C)
self.C3 : array of True ('C3') or False ('C4')
self.Ipar : incident PAR (mol m-2 s-1)
self.Lcarbon : leaf C pool (kg C m-2)
self.Rcarbon : root C pool (kg C m-2)
self.Scarbon : respiring stem C pool (kg C m-2)
'''
self.leafPhotosynthesis()
self.canopyPhotosynthesis()
def variables(self):
'''
Set some items that might be driven from a control file
Generates:
self.theta : mean soil moisture concentration in the root zone,
self.thetac : Critical volumetric SMC (cubic m per cubic m of soil)
self.thetaw : Volumetric wilting point (cubic m per cubic m of soil)
'''
self.thetaw = 0.136328
self.thetac = 0.242433
self.theta = np.ones_like(self.data)
self.m_air = 28.966
self.co2_ppmv = 383.
def initialise(self):
'''
Initialise some items that might be driven from a control file
Uses:
self.data : data sizing array
Generates:
self.theta : mean soil moisture concentration in the root zone,
self.co2c : Canopy level CO2 concentration (kg CO2/kg air).
self.pstar : Surface pressure (Pa)
self.m_co2 : molecular weight of CO2
self.m_air : molecular weight of dry air
'''
self.m_co2 = self.m_air * self.epco2
self.co2_mmr = self.co2_ppmv * self.m_co2 / self.m_air * 1.0e-6
self.co2c = self.co2_mmr*1.
def defaults(self):
'''
Uses:
self.C3 : array of True ('C3') or False ('C4')
self.Tc : canopy (leaf) temperature (C)
Generates:
self.data : data sizing array
self.epco2 : Ratio of molecular weights of CO2 and dry air.
self.epo2 : Ratio of molecular weights of O2 and dry air.
self.Oa : Partial pressume of O2 in the atmosphere
self.ne : constant for Vcmax (mol CO2 m-2 s-1 kg C (kg N)-1)
self.Q10_leaf: Q10 dependence leaf
self.Q10_rs : Q10 dependence rs
self.Q10_Kc : Q10 dependence Kc: CO2
self.Q10_Ko : Q10 dependence Ko: O2
self.Kc : Michaelis-Menten paramemeter for CO2
self.Ko : Michaelis-Menten paramemeter for O2
self.beta1 : colimitation coefficients
self.beta2 : colimitation coefficients
self.nl : leaf nitrogen
self.Gamma : CO2 compensation point in the absence of mitochindrial
respiration (Pa)
self.tau : Rubisco specificity for CO2 relative to O2
self.kappao3 : ratio of leaf resistance for O3 to leaf resistance to water vapour
self.Tupp : PFT-specific parameter ranges: upper (C)
self.Tlow : PFT-specific parameter ranges: lower (C)
self.Fo3_crit: critical value of Ozone stress limitation
self.a : Ozone factor
self.k : PAR extinction coefficient
self.alpha : quantum efficiency mol CO2 [mol PAR photons]-1
self.omega : leaf PAR scattering coefficient
self.fdr : dark respiration coefficient
self.rg : growth respiration coefficient
self.n0 : top leaf N concentration (kg N [kg C]-1)
self.nrl : ratio of N conc in roots and leaves
self.nsl : ratio of N conc in stems and leaves
self.Vcmax25 : maximum rate of carboxylation of Rubisco (mol CO2 m-2 s-1)
at 25 C
self.Vcmax : maximum rate of carboxylation of Rubisco (mol CO2 m-2 s-1)
self.fc : temperature factors for Vcmax
self.aws : ratio of total stem C to respiring stem C
self.gamma0 : minimum leaf turnover rate (360 days-1)
self.dm : rate of change of turnover with soil moisture
stress (360 days-1)
self.dt : rate of change of turnover with T (360 days K)-1
self.moff : threshold soil mositure stress
self.toff : threshold temperature (K)
self.gammap : rate of leaf growth (360 days)-1
self.gammav : disturbance rate (360 days-1)
self.gammar : root biomass turnover rate (360 days-1)
self.gammaw : woody biomass turnover rate (360 days-1)
self.Lmax : maximum LAI
self.Lmin : minimum LAI
self.sigmal : specific leaf density (kg C m-2 per unit LAI)
self.awl : allometric coefficient
self.bwl : allometric exponent
self.etasl : ratio of live stemwood to LAI * height
self.dt : time interval
self.ratio : Ratio of leaf resistance for CO2 to leaf resistance for H2O.
self.glmin : minimum stomatal conductance
'''
self.dt = 1.0
self.data = np.zeros_like(self.C3).astype(float)
self.glmin = 1.0e-10
self.pstar = 101e3
self.epco2 = 1.5194
self.epo2 = 1.106
self.ratio=1.6
#==============Jules/ triffid parameters
# default self.Q10_leaf, self.Q10_rs etc.
self.Q10_leaf = 2.0
self.Q10_rs = 0.57
self.Q10_Kc = 2.1
self.Q10_Ko = 1.2
# leaf nitrogen/Vcmax terms
# default for self.ne mol CO2 m-2 s-1 kg C (kg N)-1
self.n0 = np.zeros_like(self.data) + 0.060
self.n0[self.pft == 'Broadleaf tree'] = 0.046
self.n0[self.pft == 'Needleleaf tree'] = 0.033
self.n0[self.pft == 'C3 grass'] = 0.073
self.ne = 0.0008*np.ones_like(self.data)
self.ne[~self.C3] = 0.0004
self.nl = self.n0*np.ones_like(self.data)
# CO2 compensation point
self.Oa = 0.21 * self.pstar # assuming 21% of atmosphere is O2
self.tau = 2600.*self.Q10_rs**(0.1*(self.Tc-25.))
self.Gamma = (self.Oa/(2.*self.tau))*np.ones_like(self.data)
self.Gamma[~self.C3] = 0.
# colimitation coefficients:
self.beta1 = 0.83
self.beta2 = 0.93
# use larger values here
self.beta1 = 0.999
self.beta2 = 0.999
# ratio of leaf resistance for O3 to leaf resistance to water vapour
self.kappao3 = 1.67
# leaf T limits (C)
self.Tupp = np.zeros_like(self.data) + 36.0
self.Tlow = np.zeros_like(self.data)
self.Tlow[self.pft == 'Needleleaf tree'] = -10.0
self.Tlow[self.pft == 'C4 grass'] = 13.0
self.Tupp[self.pft == 'Needleleaf tree'] = 26.0
self.Tupp[self.pft == 'C4 grass'] = 45.0
self.Vcmax25 = self.ne * self.nl
self.ft = self.Q10_leaf ** (0.1 * (self.Tc-25.))
self.Vcmax = self.Vcmax25 * self.ft / ((1.0+np.exp(0.3*(self.Tc-self.Tupp)))\
*(1.0+np.exp(0.3*(self.Tlow-self.Tc))))
# O3 terms
self.Fo3_crit = np.zeros_like(self.data) + 1.6
self.Fo3_crit[self.pft == 'C3 grass'] = 5.0
self.Fo3_crit[self.pft == 'C4 grass'] = 5.0
self.a = np.zeros_like(self.data) + 0.04
self.a[self.pft == 'Needleleaf tree'] = 0.02
self.a[self.pft == 'C3 grass'] = 0.25
self.a[self.pft == 'C4 grass'] = 0.13
self.a[self.pft == 'Shrub'] = 0.03
self.k = np.zeros_like(self.data) + 0.5
self.alpha = np.zeros_like(self.data) + 0.08
self.alpha[self.pft == 'C3 grass'] = 0.12
self.alpha[self.pft == 'C4 grass'] = 0.06
self.omega = np.zeros_like(self.data) + 0.15
self.omega[self.pft == 'C4 grass'] = 0.17
self.fdr = np.zeros_like(self.data) + 0.015
self.fdr[self.pft == 'C4 grass'] = 0.025
self.rg = np.zeros_like(self.data) + 0.25
self.nrl = np.zeros_like(self.data) + 1.00
self.nsl = np.zeros_like(self.data) + 1.00
self.nsl[self.pft == 'Broadleaf tree'] = 0.10
self.nsl[self.pft == 'Needleleaf tree'] = 0.10
self.aws = np.zeros_like(self.data) + 10.0
self.aws[self.pft == 'C3 grass'] = 1.0
self.aws[self.pft == 'C4 grass'] = 1.0
self.gamma0 = np.zeros_like(self.data) + 0.25
self.dm = np.zeros_like(self.data) + 0.0
self.dt = np.zeros_like(self.data) + 9.0
self.moff = np.zeros_like(self.data) + 0.0
self.toff = np.zeros_like(self.data) + 278.15
self.toff[self.pft == 'Needleleaf tree'] = 233.15
self.toff[self.pft == 'Shrub'] = 233.15
self.gammap = np.zeros_like(self.data) + 20.
self.gammap[self.pft == 'Broadleaf tree'] = 15.0
self.gammav = np.zeros_like(self.data) + 0.2
self.gammav[self.pft == 'Broadleaf tree'] = 0.005
self.gammav[self.pft == 'Needleleaf tree'] = 0.007
self.gammav[self.pft == 'Shrub'] = 0.05
self.gammar = np.zeros_like(self.data) + 0.25
self.gammar[self.pft == 'Needleleaf tree'] = 0.15
self.gammaw = np.zeros_like(self.data) + 0.20
self.gammaw[self.pft == 'Broadleaf tree'] = 0.005
self.gammaw[self.pft == 'Needleleaf tree'] = 0.005
self.gammaw[self.pft == 'Shrub'] = 0.05
self.Lmax = np.zeros_like(self.data) + 4.0
self.Lmax[self.pft == 'Broadleaf tree'] = 9.00
self.Lmax[self.pft == 'Needleleaf tree'] = 5.00
self.Lmax[self.pft == 'Shrub'] = 3.00
self.Lmin = np.zeros_like(self.data) + 1.0
self.awl = np.zeros_like(self.data) + 0.65
self.awl[self.pft == 'C3 grass'] = 0.005
self.awl[self.pft == 'C4 grass'] = 0.005
self.awl[self.pft == 'Shrub'] = 0.10
self.bwl = np.zeros_like(self.data) + 1.667
self.sigmal = np.zeros_like(self.data) + 0.05
self.sigmal[self.pft == 'C3 grass'] = 0.025
self.sigmal[self.pft == 'Needleleaf tree'] = 0.10
self.sigmal[self.pft == 'Broadleaf tree'] = 0.0375
self.etasl = np.zeros_like(self.data) + 0.01
def leafPhotosynthesis(self):
'''
NB:
O3 treatment requires:
self.ra, self.Fo3_crit, self.a, self.kappao3, self.gl, self.O3
which are starred * below. Safe failure if not present
Uses:
self.Tc : canopy (leaf) temperature (C)
self.C3 : array of True ('C3') or False ('C4')
self.Ipar : incident PAR (mol m-2 s-1)
*self.O3 : molar conc. of O3 at reference level (nmol m-3)
*self.ra : aerodynamic and boundary layer resistance between leaf surface
and reference level (s m-1)
*self.gl : leaf conductance for H20 (m s-1)
[set in self.initialise()]
self.thetac : soil moisture critical concentration
self.thetaw : soil moisture critical concentration
[set in self.variables()]
self.theta : mean soil moisture concentration in the root zone,
self.pstar : Surface pressure (Pa)
self.co2c : Canopy level CO2 concentration (kg CO2/kg air).
[set in initialiser]
self.zeroC : 0 C in K
self.R_gas : J mol-1 K-1
self.o2 : Canopy level O2 concentration (kg O2/kg air).
[set in self.defaults()]
self.Oa : Partial pressure of atmos Oxygen (Pa)
self.epco2 : Ratio of molecular weights of CO2 and dry air.
self.epo2 : Ratio of molecular weights of O2 and dry air.
self.Vcmax : maximum rate of carboxylation of Rubisco (mol CO2 m-2 s-1)
self.Gamma : CO2 compensation point in the absence of mitochindrial
respiration (Pa)
self.beta1 : colimitation coefficients
self.beta2 : colimitation coefficients
self.alpha : quantum efficiency of photosynthesis (mol CO2 mol-1 PAR)
self.omega : leaf scattering coefficient for PAR
*self.kappao3 : ratio of leaf resistance for O3 to leaf resistance to water vapour
*self.Fo3_crit: critical value of Ozone stress limitation
*self.a : Ozone factor
self.ratio : Ratio of leaf resistance for CO2 to leaf resistance for H2O.
Generates:
self.Kc : Michaelis-Menten paramemeter for CO2
self.Ko : Michaelis-Menten paramemeter for O2
self.ci : leaf internal CO2 partial pressure (Pa)
self.Wc : Rubisco-limited rate
self.Wl : Light-limited rate
self.We : Rate of transport of photosynthetic products
self.Wp : Wc/Wl smoothed term
self.W : combined limiting rate
self.Rd : leaf dark respiration
*self.Fo3 : leaf O3 flux
self.Ap : (unstressed) leaf photosynthetic carbon uptake
self.beta : water stress limitation
*self.F : Ozone stress limitation
self.Al : leaf photosynthetic carbon uptake
Updated:
self.gl : leaf stomatal conductance
'''
c3 = np.where(self.C3)
c4 = np.where(~self.C3)
self.ca = np.ones_like(self.data) * self.co2c / self.epco2 * self.pstar
self.oa = np.ones_like(self.data) * self.O2 / self.epo2 * self.pstar
# we need ci here
# we will estimate that here after Knorr, 1988
# for simplicity
self.ci = self.ca * 0.87
self.ci[~self.C3] = self.ca * 0.67
self.Kc = 30. * self.Q10_Kc ** (0.1*(self.Tc - 25.))
self.Ko = 3e4 * self.Q10_Ko ** (0.1*(self.Tc - 25.))
self.Wc = self.Vcmax*1.
self.Wc[c3] = self.Vcmax[c3] * ((self.ci-self.Gamma)/(self.ci+self.Kc*(1+self.Oa/self.Ko)))[c3]
self.Wc[self.Wc<0] = 0.
self.Wl = self.alpha*(1-self.omega)*self.Ipar
self.Wl[c3] = self.alpha*(1-self.omega)*self.Ipar\
* ((self.ci-self.Gamma)/(self.ci+2.*self.Gamma))[c3]
self.Wl[self.Wl<0] = 0.
self.We = 0.5 * self.Vcmax
self.We[c4] = (2.e4 * self.Vcmax * self.ci/self.pstar)[c4]
self.We[self.We<0] = 0.
b1 = self.beta1*np.ones_like(self.data)
b2 = -(self.Wc+self.Wl)
b3 = self.Wc*self.Wl
self.Wp = (-b2/(2.*b1) - np.sqrt(b2*b2/(4*b1*b1) - b3/b1))/self.beta1
b1 = self.beta2*np.ones_like(self.data)
b2 = -(self.Wp+self.We)
b3 = self.Wp*self.We
self.W = -b2/(2.*b1) - np.sqrt(b2*b2/(4*b1*b1) - b3/b1)
self.Rd = self.fdr * self.Vcmax
# Calculate the net rate of photosynthesis
self.Ap = self.W - self.Rd
self.beta = 1.0+(self.W*0.)
w = np.where(self.theta <= self.thetac)
self.beta[w] = ((self.theta-self.thetaw)/(self.thetac-self.thetaw))[w]
w = np.where(self.theta <= self.thetaw)
self.beta[w] = 0.0
# water limited net rate of photosynthesis
self.Al = self.Ap * self.beta
# Calculate the factor for converting mol/m3 into Pa (J/m3).
conv = self.R_gas * (self.Tc + self.zeroC)
# Diagnose the leaf conductance
# Leaf conductance for CO2 (m/s)
glco2 = (self.Al * conv) / (self.ca - self.ci)
self.gl = self.ratio * glco2
# Close stomata at points with negative or zero net photosynthesis
# or where the leaf resistance exceeds its maximum value.
w = np.where( ( self.gl <= self.glmin ) * (self.Al <= 0))
self.gl[w] = self.glmin
glco2 = self.gl/self.ratio
self.gl = self.ratio * glco2
self.Al[w] = -self.Rd * self.beta
# quadratic for O3
# requires:
# self.ra, self.Fo3_crit, self.a, self.kappao3, self.gl, self.O3
try:
a = self.gl * self.ra
b = self.a * self.Fo3_crit * self.O3 - self.kappao3 \
- self.gl * self.ra * (self.a * self.Fo3_crit + 1)
c = self.a * self.Fo3_crit * self.kappao3 + self.kappao3
coefs = [a,b,c]
roots = np.roots(coeff)
self.F = np.min(roots)
gl = self.gl * self.F
self.Fo3 = self.O3/(self.ra - self.kappao3 / gl)
self.F = (self.Fo3-self.Fo3_crit)
self.F[self.F<0.] = 0.
self.F = 1. - self.a * self.F
except:
self.F = 1.0
self.Fo3 = 0.0
self.Al = self.Al * self.F
self.gl = self.gl * self.F
return
def canopyPhotosynthesis(self):
'''
Big leaf (Sellers equivalent)
Uses:
self.Lcarbon : leaf C pool (kg C m-2)
self.Rcarbon : root C pool (kg C m-2)
self.Scarbon : respiring stem C pool (kg C m-2)
[set in self.defaults()]
self.k : canopy geometry term (G function)
self.rg : growth respiration coefficient
self.n0 : top leaf N concentration (kg N (kg C)-1)
self.sigmal : specific leaf density (kg C m-2 per unit of LAI)
self.nrl : proportion of root N to leaf N
self.nsl : proportion of stem N to leaf N
self.aws : ratio of total stem C to respiring stem C
self.etasl : ratio of live stemwood to LAI * height
[set in self.leafPhotosynthesis()]
self.Al : leaf assimilation
self.Rd : leaf dark respiration
self.beta : water limioting factor
Generates:
self.nm : mean leaf N concentration (kg N (kg C)-1)
self.Ac : canopy assimilation
self.Rdc : canopy dark respiration
self.PiG : GPP
self.Pi : NPP
self.Rp : plant respiration
self.Rpg : growth respiration
self.Rpm : maintenance respiration
self.Nl : leaf N conc.
self.Nr : root N conc.
self.Nw : wood N conc.
self.Lc : leaf area index
'''
self.Lc = self.Lcarbon / self.sigmal
self.Ac = self.Al * (1. - np.exp(-self.k * self.Lc))/self.k
self.Rdc = self.Rd * (1. - np.exp(-self.k * self.Lc))/self.k
self.PiG = self.Ac + self.beta * self.Rdc
self.nm = self.n0*1.
#self.Scarbon = self.etasl * self.h * self.Lc
self.Nl = self.nm * self.sigmal * self.Lc
self.Nr = self.nrl * self.nm * self.Rcarbon
self.Ns = self.nsl * self.nm * self.Scarbon
self.Rpm = 0.012 * self.Rdc * (self.beta + (self.Nr + self.Ns)/(self.Nl))
self.Rpg = self.rg * (self.PiG - self.Rpm)
self.Rp = self.Rpm + self.Rpg
self.Pi = self.PiG - self.Rp
def phenology(self):
'''
Uses:
self.gamma0 : minimum leaf turnover rate (360 days-1)
self.dm : rate of change of turnover with soil moisture
stress (360 days-1)
self.dt : rate of change of turnover with T (360 days K)-1
self.moff : threshold soil mositure stress
self.toff : threshold temperature (K)
self.gammap : rate of leaf growth (360 days)-1
self.Tc : canopy (leaf) temperature (C)
self.Lb : seasonal maximum LAI
self.L : actual LAI
self.dt : time interval(days)
Generates:
self.gammalm : leaf mortality rate
Updates:
self.p : phenological status
'''
self.gammalm = self.gamma0 * (1. + self.dt*(self.toff-self.Tc))
self.gammalm[self.Tc > self.toff] = self.gamma0
#self.p = self.L / self.Lb
self.dp_dt = | np.zeros_like(self.Tc) | numpy.zeros_like |
"""
Basic Equations for solving shallow water problems
#####################
"""
from pysph.sph.equation import Equation
from pysph.sph.integrator_step import IntegratorStep
from pysph.sph.integrator import Integrator
from compyle.api import declare
from pysph.sph.wc.linalg import gj_solve, augmented_matrix
from numpy import sqrt, cos, sin, zeros, pi, exp
import numpy as np
import numpy
M_PI = pi
class CheckForParticlesToSplit(Equation):
r"""Particles are tagged for splitting if the following condition is
satisfied:
.. math::
(A_i > A_max) and (h_i < h_max) and (x_min < x_i < x_max) and (y_min <
y_i < y_max)
References
----------
.. [Vacondio2010] <NAME>, <NAME> and <NAME>, "Accurate
particle splitting for smoothed particle hydrodynamics in shallow water
with shock capturing", Int. J. Numer. Meth. Fluids, 69 (2012), pp.
1377-1410
"""
def __init__(self, dest, h_max=1e9, A_max=1e9, x_min=-1e9, x_max=1e9,
y_min=-1e9, y_max=1e9):
r"""
Parameters
----------
h_max : float
maximum smoothing length beyond which splitting is deactivated
A_max : float
maximum area beyond which splitting is activated
x_min : float
minimum distance along x-direction beyond which splitting is
activated
x_max : float
maximum distance along x-direction beyond which splitting is
deactivated
y_min : float
minimum distance along y-direction beyond which splitting is
activated
y_max : float
maximum distance along y-direction beyond which splitting is
deactivated
"""
self.A_max = A_max
self.h_max = h_max
self.x_min = x_min
self.x_max = x_max
self.y_min = y_min
self.y_max = y_max
super(CheckForParticlesToSplit, self).__init__(dest, None)
def initialize(self, d_idx, d_A, d_h, d_x, d_y, d_pa_to_split):
if (d_A[d_idx] > self.A_max and d_h[d_idx] < self.h_max
and (self.x_min < d_x[d_idx] < self.x_max)
and (self.y_min < d_y[d_idx] < self.y_max)):
d_pa_to_split[d_idx] = 1
else:
d_pa_to_split[d_idx] = 0
class ParticleSplit(object):
r"""**Hexagonal particle splitting algorithm**
References
----------
.. [Vacondio2010] <NAME>, <NAME> and <NAME>, "Accurate
particle splitting for smoothed particle hydrodynamics in shallow water
with shock capturing", Int. J. Numer. Meth. Fluids, 69 (2012), pp.
1377-1410
"""
def __init__(self, pa_arr):
r"""
Parameters
----------
pa_arr : pysph.base.particle_array.ParticleArray
particle array of fluid
"""
self.pa_arr = pa_arr
# Ratio of mass of daughter particle located at center of hexagon to
# that of its parents mass
self.center_pa_mass_frac = 0.178705766141917
# Ratio of mass of daughter particle located at vertex of hexagon to
# that of its parents mass
self.vertex_pa_mass_frac = 0.136882287617319
# Ratio of smoothing length of daughter particle to that of its parents
# smoothing length
self.pa_h_ratio = 0.9
# Ratio of distance between center daughter particle and vertex
# daughter particle to that of its parents smoothing length
self.center_and_vertex_pa_separation_frac = 0.4
# Get index of the parent particles to split
self.idx_pa_to_split = self._get_idx_of_particles_to_split()
# Number of daughter particles located at the vertices of hexagon after
# splitting
self.num_vertex_pa_after_single_split = 6
def do_particle_split(self, solver=None):
if not self.idx_pa_to_split.size:
# If no particles to split then return
return
else:
# Properties of parent particles to split
h_parent = self.pa_arr.h[self.idx_pa_to_split]
h0_parent = self.pa_arr.h0[self.idx_pa_to_split]
m_parent = self.pa_arr.m[self.idx_pa_to_split]
x_parent = self.pa_arr.x[self.idx_pa_to_split]
y_parent = self.pa_arr.y[self.idx_pa_to_split]
u_parent = self.pa_arr.u[self.idx_pa_to_split]
v_parent = self.pa_arr.v[self.idx_pa_to_split]
u_prev_step_parent = self.pa_arr.u_prev_step[self.idx_pa_to_split]
v_prev_step_parent = self.pa_arr.v_prev_step[self.idx_pa_to_split]
rho_parent = self.pa_arr.rho[self.idx_pa_to_split]
rho0_parent = self.pa_arr.rho0[self.idx_pa_to_split]
alpha_parent = self.pa_arr.alpha[self.idx_pa_to_split]
# Vertex daughter particle properties update
n = self.num_vertex_pa_after_single_split
h_vertex_pa = self.pa_h_ratio * np.repeat(h_parent, n)
h0_vertex_pa = self.pa_h_ratio * | np.repeat(h0_parent, n) | numpy.repeat |
#######################################################################
# Copyright (C) #
# 2016 - 2017 <NAME>(<EMAIL>) #
# 2016 <NAME>(<EMAIL>) #
# Permission given to modify the code as long as you keep this #
# declaration at the top #
#######################################################################
from __future__ import print_function
import numpy as np
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
# all states: state 0-5 are upper states
STATES = np.arange(0, 7)
# state 6 is lower state
LOWER_STATE = 6
# discount factor
DISCOUNT = 0.99
# each state is represented by a vector of length 8
FEATURE_SIZE = 8
FEATURES = np.zeros((len(STATES), FEATURE_SIZE))
for i in range(LOWER_STATE):
FEATURES[i, i] = 2
FEATURES[i, 7] = 1
FEATURES[LOWER_STATE, 6] = 1
FEATURES[LOWER_STATE, 7] = 2
# all possible actions
DASHED = 0
SOLID = 1
ACTIONS = [DASHED, SOLID]
# reward is always zero
REWARD = 0
# take @action at @state, return the new state
def takeAction(state, action):
if action == SOLID:
return LOWER_STATE
return np.random.choice(STATES[: LOWER_STATE])
# target policy
def targetPolicy(state):
return SOLID
# state distribution for the behavior policy
stateDistribution = np.ones(len(STATES)) / 7
stateDistributionMat = np.matrix(np.diag(stateDistribution))
# projection matrix for minimize MSVE
projectionMatrix = np.matrix(FEATURES) * \
np.linalg.pinv(np.matrix(FEATURES.T) * stateDistributionMat * np.matrix(FEATURES)) * \
np.matrix(FEATURES.T) * \
stateDistributionMat
# behavior policy
BEHAVIOR_SOLID_PROBABILITY = 1.0 / 7
def behaviorPolicy(state):
if np.random.binomial(1, BEHAVIOR_SOLID_PROBABILITY) == 1:
return SOLID
return DASHED
# Semi-gradient off-policy temporal difference
# @state: current state
# @theta: weight for each component of the feature vector
# @alpha: step size
# @return: next state
def semiGradientOffPolicyTD(state, theta, alpha):
action = behaviorPolicy(state)
nextState = takeAction(state, action)
# get the importance ratio
if action == DASHED:
rho = 0.0
else:
rho = 1.0 / BEHAVIOR_SOLID_PROBABILITY
delta = REWARD + DISCOUNT * np.dot(FEATURES[nextState, :], theta) - \
np.dot(FEATURES[state, :], theta)
delta *= rho * alpha
# derivatives happen to be the same matrix due to the linearity
theta += FEATURES[state, :] * delta
return nextState
# Semi-gradient DP
# @theta: weight for each component of the feature vector
# @alpha: step size
def semiGradientDP(theta, alpha):
delta = 0.0
# go through all the states
for currentState in STATES:
expectedReturn = 0.0
# compute bellman error for each state
for nextState in STATES:
if nextState == LOWER_STATE:
expectedReturn += REWARD + DISCOUNT * np.dot(theta, FEATURES[nextState, :])
bellmanError = expectedReturn - np.dot(theta, FEATURES[currentState, :])
# accumulate gradients
delta += bellmanError * FEATURES[currentState, :]
# derivatives happen to be the same matrix due to the linearity
theta += alpha / len(STATES) * delta
# temporal difference with gradient correction
# @state: current state
# @theta: weight of each component of the feature vector
# @weight: auxiliary trace for gradient correction
# @alpha: step size of @theta
# @beta: step size of @weight
def TDC(state, theta, weight, alpha, beta):
action = behaviorPolicy(state)
nextState = takeAction(state, action)
# get the importance ratio
if action == DASHED:
rho = 0.0
else:
rho = 1.0 / BEHAVIOR_SOLID_PROBABILITY
delta = REWARD + DISCOUNT * np.dot(FEATURES[nextState, :], theta) - \
np.dot(FEATURES[state, :], theta)
theta += alpha * rho * (delta * FEATURES[state, :] - DISCOUNT * FEATURES[nextState, :] * np.dot(FEATURES[state, :], weight))
weight += beta * rho * (delta - np.dot(FEATURES[state, :], weight)) * FEATURES[state, :]
return nextState
# expected temporal difference with gradient correction
# @theta: weight of each component of the feature vector
# @weight: auxiliary trace for gradient correction
# @alpha: step size of @theta
# @beta: step size of @weight
def expectedTDC(theta, weight, alpha, beta):
for currentState in STATES:
# When computing expected update target, if next state is not lower state, importance ratio will be 0,
# so we can safely ignore this case and assume next state is always lower state
delta = REWARD + DISCOUNT * np.dot(FEATURES[LOWER_STATE, :], theta) - np.dot(FEATURES[currentState, :], theta)
rho = 1 / BEHAVIOR_SOLID_PROBABILITY
# Under behavior policy, state distribution is uniform, so the probability for each state is 1.0 / len(STATES)
expectedUpdateTheta = 1.0 / len(STATES) * BEHAVIOR_SOLID_PROBABILITY * rho * (
delta * FEATURES[currentState, :] - DISCOUNT * FEATURES[LOWER_STATE, :] * np.dot(weight, FEATURES[currentState, :]))
theta += alpha * expectedUpdateTheta
expectedUpdateWeight = 1.0 / len(STATES) * BEHAVIOR_SOLID_PROBABILITY * rho * (
delta - np.dot(weight, FEATURES[currentState, :])) * FEATURES[currentState, :]
weight += beta * expectedUpdateWeight
# if *accumulate* expected update and actually apply update here, then it's synchronous
# theta += alpha * expectedUpdateTheta
# weight += beta * expectedUpdateWeight
# interest is 1 for every state
INTEREST = 1
# expected update of ETD
# @theta: weight of each component of the feature vector
# @emphasis: current emphasis
# @alpha: step size of @theta
# @return: expected next emphasis
def expectedEmphaticTD(theta, emphasis, alpha):
# we perform synchronous update for both theta and emphasis
expectedUpdate = 0
expectedNextEmphasis = 0.0
# go through all the states
for state in STATES:
# compute rho(t-1)
if state == LOWER_STATE:
rho = 1.0 / BEHAVIOR_SOLID_PROBABILITY
else:
rho = 0
# update emphasis
nextEmphasis = DISCOUNT * rho * emphasis + INTEREST
expectedNextEmphasis += nextEmphasis
# When computing expected update target, if next state is not lower state, importance ratio will be 0,
# so we can safely ignore this case and assume next state is always lower state
nextState = LOWER_STATE
delta = REWARD + DISCOUNT * np.dot(FEATURES[nextState, :], theta) - | np.dot(FEATURES[state, :], theta) | numpy.dot |
# -*- coding: utf-8 -*-
"""
Created on Sun Feb 17 15:18:25 2013
@author: ash
"""
import numpy as np
import scipy as sp
import scipy.linalg as la
import scipy.sparse.linalg as las
import tdvp_common as tm
import matmul as m
import logging
log = logging.getLogger(__name__)
class PinvOp:
def __init__(self, p, A1, A2, l=None, r=None, left=False, pseudo=True):
assert not (pseudo and (l is None or r is None)), 'For pseudo-inverse l and r must be set!'
self.A1 = A1
self.A2 = A2
self.l = l
self.r = r
self.p = p
self.left = left
self.pseudo = pseudo
self.D = A1.shape[1]
self.shape = (self.D**2, self.D**2)
self.dtype = A1.dtype
self.out = np.empty((self.D, self.D), dtype=self.dtype)
def matvec(self, v):
x = v.reshape((self.D, self.D))
if self.left: #Multiplying from the left, but x is a col. vector, so use mat_dagger
Ehx = tm.eps_l_noop_inplace(x, self.A1, self.A2, self.out)
if self.pseudo:
QEQhx = Ehx - self.l * m.adot(self.r, x)
res = x - sp.exp(-1.j * self.p) * QEQhx
else:
res = x - sp.exp(-1.j * self.p) * Ehx
else:
Ex = tm.eps_r_noop_inplace(x, self.A1, self.A2, self.out)
if self.pseudo:
QEQx = Ex - self.r * m.adot(self.l, x)
res = x - sp.exp(1.j * self.p) * QEQx
else:
res = x - sp.exp(1.j * self.p) * Ex
return res.ravel()
def pinv_1mE_brute(A1, A2, l, r, p=0, pseudo=True):
D = A1.shape[1]
E = np.zeros((D**2, D**2), dtype=A1.dtype)
for s in xrange(A1.shape[0]):
E += np.kron(A1[s], A2[s].conj())
l = | np.asarray(l) | numpy.asarray |
import numpy as np
import os
import re
import requests
import sys
import time
from netCDF4 import Dataset
import pandas as pd
from bs4 import BeautifulSoup
from tqdm import tqdm
# setup constants used to access the data from the different M2M interfaces
BASE_URL = 'https://ooinet.oceanobservatories.org/api/m2m/' # base M2M URL
SENSOR_URL = '12576/sensor/inv/' # Sensor Information
# setup access credentials
AUTH = ['OOIAPI-853A3LA6QI3L62', '<KEY>']
def M2M_Call(uframe_dataset_name, start_date, end_date):
options = '?beginDT=' + start_date + '&endDT=' + end_date + '&format=application/netcdf'
r = requests.get(BASE_URL + SENSOR_URL + uframe_dataset_name + options, auth=(AUTH[0], AUTH[1]))
if r.status_code == requests.codes.ok:
data = r.json()
else:
return None
# wait until the request is completed
print('Waiting for OOINet to process and prepare data request, this may take up to 20 minutes')
url = [url for url in data['allURLs'] if re.match(r'.*async_results.*', url)][0]
check_complete = url + '/status.txt'
with tqdm(total=400, desc='Waiting') as bar:
for i in range(400):
r = requests.get(check_complete)
bar.update(1)
if r.status_code == requests.codes.ok:
bar.n = 400
bar.last_print_n = 400
bar.refresh()
print('\nrequest completed in %f minutes.' % elapsed)
break
else:
time.sleep(3)
elapsed = (i * 3) / 60
return data
def M2M_Files(data, tag=''):
"""
Use a regex tag combined with the results of the M2M data request to collect the data from the THREDDS catalog.
Collected data is gathered into an xarray dataset for further processing.
:param data: JSON object returned from M2M data request with details on where the data is to be found for download
:param tag: regex tag to use in discriminating the data files, so we only collect the correct ones
:return: the collected data as an xarray dataset
"""
# Create a list of the files from the request above using a simple regex as a tag to discriminate the files
url = [url for url in data['allURLs'] if re.match(r'.*thredds.*', url)][0]
files = list_files(url, tag)
return files
def list_files(url, tag=''):
"""
Function to create a list of the NetCDF data files in the THREDDS catalog created by a request to the M2M system.
:param url: URL to user's THREDDS catalog specific to a data request
:param tag: regex pattern used to distinguish files of interest
:return: list of files in the catalog with the URL path set relative to the catalog
"""
page = requests.get(url).text
soup = BeautifulSoup(page, 'html.parser')
pattern = re.compile(tag)
return [node.get('href') for node in soup.find_all('a', text=pattern)]
def M2M_Data(nclist,variables):
thredds = 'https://opendap.oceanobservatories.org/thredds/dodsC/ooi/'
#nclist is going to contain more than one url eventually
for jj in range(len(nclist)):
url=nclist[jj]
url=url[25:]
dap_url = thredds + url + '#fillmismatch'
openFile = Dataset(dap_url,'r')
for ii in range(len(variables)):
dum = openFile.variables[variables[ii].name]
variables[ii].data = np.append(variables[ii].data, dum[:].data)
tmp = variables[0].data/60/60/24
time_converted = pd.to_datetime(tmp, unit='D', origin=pd.Timestamp('1900-01-01'))
return variables, time_converted
class var(object):
def __init__(self):
"""A Class that generically holds data with a variable name
and the units as attributes"""
self.name = ''
self.data = np.array([])
self.units = ''
def __repr__(self):
return_str = "name: " + self.name + '\n'
return_str += "units: " + self.units + '\n'
return_str += "data: size: " + str(self.data.shape)
return return_str
class structtype(object):
def __init__(self):
""" A class that imitates a Matlab structure type
"""
self._data = []
def __getitem__(self, index):
"""implement index behavior in the struct"""
if index == len(self._data):
self._data.append(var())
return self._data[index]
def __len__(self):
return len(self._data)
def M2M_URLs(platform_name,node,instrument_class,method):
var_list = structtype()
#MOPAK
if platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/telemetered/fdchp_a_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#ZPLSC
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#VEL3DK
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#PARAD
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
##
#MOPAK
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_host/fdchp_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
#uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_inst/fdchp_a_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_inst/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_inst/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcpt_m_instrument_log9_recovered'
var_list[0].name = 'time'
var_list[1].name = 'significant_wave_height'
var_list[2].name = 'peak_wave_period'
var_list[3].name = 'peak_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'seconds'
var_list[3].units = 'degrees'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcpt_m_instrument_log9_recovered'
var_list[0].name = 'time'
var_list[1].name = 'significant_wave_height'
var_list[2].name = 'peak_wave_period'
var_list[3].name = 'peak_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'seconds'
var_list[3].units = 'degrees'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/06-CTDBPN106/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_no_seawater_pressure'
var_list[5].name = 'ctdbp_no_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/06-CTDBPO108/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_no_seawater_pressure'
var_list[5].name = 'ctdbp_no_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/06-CTDBPN106/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/06-CTDBPO108/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/10-PHSEND103/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/10-PHSEND107/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/09-PCO2WB103/streamed/pco2w_b_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/09-PCO2WB104/streamed/pco2w_b_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'ADCP' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/05-ADCPTB104/streamed/adcp_velocity_beam'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'ADCP' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/05-ADCPSI103/streamed/adcp_velocity_beam'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'VEL3D' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/07-VEL3DC108/streamed/vel3d_cd_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'VEL3D' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/07-VEL3DC107/streamed/vel3d_cd_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'OPTAA' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/08-OPTAAD106/streamed/optaa_sample'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'OPTAA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/08-OPTAAC104/streamed/optaa_sample'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#CSPP Data below
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/08-FLORTJ000/telemetered/flort_dj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/08-FLORTJ000/telemetered/flort_dj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/02-DOSTAJ000/telemetered/dosta_abcdjm_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/02-DOSTAJ000/telemetered/dosta_abcdjm_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/09-CTDPFJ000/telemetered/ctdpf_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/09-CTDPFJ000/telemetered/ctdpf_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/10-PARADJ000/telemetered/parad_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/10-PARADJ000/telemetered/parad_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/07-SPKIRJ000/telemetered/spkir_abj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/07-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/07-SPKIRJ000/telemetered/spkir_abj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/07-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/05-VELPTJ000/telemetered/velpt_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/05-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/05-VELPTJ000/telemetered/velpt_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/05-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/07-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/07-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/01-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/01-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/08-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/08-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/09-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/09-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/05-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/05-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/06-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/06-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/02-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/02-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/02-FLORTM000/telemetered/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/02-FLORTM000/recovered_host/flort_m_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'sci_flbbcd_chlor_units'
var_list[3].name = 'sci_flbbcd_cdom_units'
var_list[4].name = 'sci_flbbcd_bb_units'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[7].name = 'lat'
var_list[8].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
var_list[7].units = 'degree_north'
var_list[8].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/01-PARADM000/telemetered/parad_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'PARAD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/01-PARADM000/recovered_host/parad_m_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_m_par'
var_list[2].name = 'int_ctd_pressure'
var_list[3].name = 'lat'
var_list[4].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
var_list[3].units = 'degree_north'
var_list[4].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/03-ADCPAM000/recovered_host/adcp_velocity_glider'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[8].name = 'int_ctd_pressure'
var_list[9].name = 'lat'
var_list[10].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
var_list[8].units = 'dbar'
var_list[9].units = 'degree_north'
var_list[10].units = 'degree_east'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/2A-CTDPFA107/streamed/ctdpf_sbe43_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'seawater_pressure'
var_list[5].name = 'seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/01-CTDPFL105/recovered_inst/dpc_ctd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'dpc_ctd_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/01-CTDPFL105/recovered_wfp/dpc_ctd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'dpc_ctd_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/2A-CTDPFA107/streamed/ctdpf_sbe43_sample'
var_list[0].name = 'time'
var_list[1].name = 'corrected_dissolved_oxygen'
var_list[2].name = 'seawater_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/06-DOSTAD105/recovered_inst/dpc_optode_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/06-DOSTAD105/recovered_wfp/dpc_optode_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/3A-FLORTD104/streamed/flort_d_data_record'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/04-FLNTUA103/recovered_inst/dpc_flnturtd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'flntu_x_mmp_cds_fluorometric_chlorophyll_a'
var_list[2].name = 'flntu_x_mmp_cds_total_volume_scattering_coefficient '
var_list[3].name = 'flntu_x_mmp_cds_bback_total'
var_list[4].name = 'flcdr_x_mmp_cds_fluorometric_cdom'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'ug/L'
var_list[2].units = 'm-1 sr-1'
var_list[3].units = 'm-1'
var_list[4].units = 'ppb'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/03-FLCDRA103/recovered_wfp/dpc_flcdrtd_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'flntu_x_mmp_cds_fluorometric_chlorophyll_a'
var_list[2].name = 'flntu_x_mmp_cds_total_volume_scattering_coefficient '
var_list[3].name = 'flntu_x_mmp_cds_bback_total'
var_list[4].name = 'flcdr_x_mmp_cds_fluorometric_cdom'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'ug/L'
var_list[2].units = 'm-1 sr-1'
var_list[3].units = 'm-1'
var_list[4].units = 'ppb'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/2B-PHSENA108/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/3C-PARADA102/streamed/parad_sa_sample'
var_list[0].name = 'time'
var_list[1].name = 'par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/3D-SPKIRA102/streamed/spkir_data_record'
var_list[0].name = 'time'
var_list[1].name = 'spkir_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/4A-NUTNRA102/streamed/nutnr_a_sample'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/4F-PCO2WA102/streamed/pco2w_a_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
var_list[3].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/SF01B/4B-VELPTD106/streamed/velpt_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'velpt_d_eastward_velocity'
var_list[2].name = 'velpt_d_northward_velocity'
var_list[3].name = 'velpt_d_upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[9].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
var_list[9].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSPD/DP01B/02-VEL3DA105/recovered_inst/dpc_acm_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_a_eastward_velocity'
var_list[2].name = 'vel3d_a_northward_velocity'
var_list[3].name = 'vel3d_a_upward_velocity_ascending'
var_list[4].name = 'vel3d_a_upward_velocity_descending'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'm/s'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPD' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE04OSPD/DP01B/02-VEL3DA105/recovered_wfp/dpc_acm_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_a_eastward_velocity'
var_list[2].name = 'vel3d_a_northward_velocity'
var_list[3].name = 'vel3d_a_upward_velocity_ascending'
var_list[4].name = 'vel3d_a_upward_velocity_descending'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'm/s'
var_list[5].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/PC01B/4A-CTDPFA109/streamed/ctdpf_optode_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'seawater_pressure'
var_list[5].name = 'seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'DOSTA' and method == 'Streamed':
#uframe_dataset_name = 'CE04OSPS/PC01B/4A-DOSTAD109/streamed/ctdpf_optode_sample'
uframe_dataset_name = 'CE04OSPS/PC01B/4A-CTDPFA109/streamed/ctdpf_optode_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'seawater_pressure' #also use this for the '4A-DOSTAD109/streamed/ctdpf_optode_sample' stream
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'dbar'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/PC01B/4B-PHSENA106/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSPS' and node == 'PLATFORM200M' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSPS/PC01B/4D-PCO2WA105/streamed/pco2w_a_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#Coastal Pioneer CSM Data Streams
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK2' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#WAVSS
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_mean_directional'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_MeanDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_mean_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'mean_direction'
var_list[2].name = 'number_bands'
var_list[3].name = 'initial_frequency'
var_list[4].name = 'frequency_spacing'
var_list[5].name = 'psd_mean_directional'
var_list[6].name = 'mean_direction_array'
var_list[7].name = 'directional_spread_array'
var_list[8].name = 'spread_direction'
var_list[9].name = 'wavss_a_directional_frequency'
var_list[10].name = 'wavss_a_corrected_mean_wave_direction'
var_list[11].name = 'wavss_a_corrected_directional_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degrees'
var_list[2].units = '1'
var_list[3].units = 'Hz'
var_list[4].units = 'Hz'
var_list[5].units = 'm2 Hz-1'
var_list[6].units = 'degrees'
var_list[7].units = 'degrees'
var_list[8].units = 'degrees'
var_list[9].units = 'Hz'
var_list[10].units = 'deg'
var_list[11].units = 'deg'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_non_directional'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_NonDir' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_non_directional_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'psd_non_directional'
var_list[5].name = 'wavss_a_non_directional_frequency'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = 'm2 Hz-1'
var_list[5].units = 'Hz'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_motion'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Motion' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_motion_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_time_samples'
var_list[2].name = 'initial_time'
var_list[3].name = 'time_spacing'
var_list[4].name = 'solution_found'
var_list[5].name = 'heave_offset_array'
var_list[6].name = 'north_offset_array'
var_list[7].name = 'east_offset_array'
var_list[8].name = 'wavss_a_buoymotion_time'
var_list[9].name = 'wavss_a_magcor_buoymotion_x'
var_list[10].name = 'wavss_a_magcor_buoymotion_y'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'sec'
var_list[3].units = 'sec'
var_list[4].units = '1'
var_list[5].units = 'm'
var_list[6].units = 'm'
var_list[7].units = 'm'
var_list[8].units = 'seconds since 1900-01-01'
var_list[9].units = 'm'
var_list[10].units = 'm'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_fourier'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'WAVSS_Fourier' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_fourier_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_bands'
var_list[2].name = 'initial_frequency'
var_list[3].name = 'frequency_spacing'
var_list[4].name = 'number_directional_bands'
var_list[5].name = 'initial_directional_frequency'
var_list[6].name = 'directional_frequency_spacing'
var_list[7].name = 'fourier_coefficient_2d_array'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = '1'
var_list[2].units = 'Hz'
var_list[3].units = 'Hz'
var_list[4].units = '1'
var_list[5].units = 'Hz'
var_list[6].units = 'Hz'
var_list[7].units = '1'
#PCO2A
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#PCO2A
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CP04OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#FDCHP
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredInst':
uframe_dataset_name = 'CP01CNSM/SBD12/08-FDCHPA000/recovered_inst/fdchp_a_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD12/08-FDCHPA000/telemetered/fdchp_a_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD12/08-FDCHPA000/recovered_host/fdchp_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP01CNSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP01CNSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP03ISSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'RecoveredHost':
uframe_dataset_name = 'CP03ISSM/SBD11/06-METBKA000/recovered_host/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'mm/hr'
var_list[2].units = 'W/m2'
var_list[3].units = 'W/m2'
var_list[4].units = 'mm/hr'
var_list[5].units = 'W/m2'
var_list[6].units = 'W/m2'
var_list[7].units = 'N/m2'
var_list[8].units = 'W/m2'
var_list[9].units = 'W/m2'
var_list[10].units = 'W/m2'
var_list[11].units = 'g/kg'
var_list[12].units = 'unitless'
var_list[13].units = 'degC'
var_list[14].units = 'degC'
var_list[15].units = 'm/s'
var_list[16].units = 'W/m2'
elif platform_name == 'CP04OSSM' and node == 'BUOY' and instrument_class == 'METBK1-hr' and method == 'Telemetered':
uframe_dataset_name = 'CP04OSSM/SBD11/06-METBKA000/telemetered/metbk_hourly'
var_list[0].name = 'met_timeflx'
var_list[1].name = 'met_rainrte'
var_list[2].name = 'met_buoyfls'
var_list[3].name = 'met_buoyflx'
var_list[4].name = 'met_frshflx'
var_list[5].name = 'met_heatflx'
var_list[6].name = 'met_latnflx'
var_list[7].name = 'met_mommflx'
var_list[8].name = 'met_netlirr'
var_list[9].name = 'met_rainflx'
var_list[10].name = 'met_sensflx'
var_list[11].name = 'met_sphum2m'
var_list[12].name = 'met_stablty'
var_list[13].name = 'met_tempa2m'
var_list[14].name = 'met_tempskn'
var_list[15].name = 'met_wind10m'
var_list[16].name = 'met_netsirr_hourly'
var_list[0].data = np.array([])
var_list[1].data = | np.array([]) | numpy.array |
# Copyright 2021 The Trieste Contributors
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
In this module, we test the *behaviour* of trieste models against reference GPflow models (thus
implicitly assuming the latter are correct).
*NOTE:* Where GPflow models are used as the underlying model in an trieste model, we should
*not* test that the underlying model is used in any particular way. To do so would break
encapsulation. For example, we should *not* test that methods on the GPflow models are called
(except in the rare case that such behaviour is an explicitly documented behaviour of the
trieste model).
"""
from __future__ import annotations
import unittest.mock
from typing import Any, cast
import gpflow
import numpy as np
import numpy.testing as npt
import pytest
import tensorflow as tf
import tensorflow_probability as tfp
from gpflow.models import SGPR, SVGP, VGP
from tests.util.misc import random_seed
from tests.util.models.gpflow.models import (
ModelFactoryType,
gpr_model,
mock_data,
sgpr_model,
svgp_model,
two_output_svgp_model,
vgp_matern_model,
vgp_model,
)
from tests.util.models.models import fnc_2sin_x_over_3, fnc_3x_plus_10
from trieste.data import Dataset
from trieste.logging import step_number, tensorboard_writer
from trieste.models import TrainableProbabilisticModel
from trieste.models.config import create_model
from trieste.models.gpflow import (
GaussianProcessRegression,
SparseVariational,
VariationalGaussianProcess,
)
from trieste.models.gpflow.models import NumDataPropertyMixin
from trieste.models.gpflow.sampler import RandomFourierFeatureTrajectorySampler
from trieste.models.optimizer import BatchOptimizer, DatasetTransformer, Optimizer
def _3x_plus_gaussian_noise(x: tf.Tensor) -> tf.Tensor:
return 3.0 * x + | np.random.normal(scale=0.01, size=x.shape) | numpy.random.normal |
#!/usr/bin/env python
from __future__ import division, absolute_import, print_function
import numpy as np
import scipy.optimize as opt # curve_fit, fmin, fmin_tnc
import jams.functions as functions # from jams
from jams.mad import mad # from jams
import warnings
# import pdb
# ----------------------------------------------------------------------
def nee2gpp(dates, nee, t, isday, rg=False, vpd=False, undef=np.nan,
method='reichstein', shape=False, masked=False, nogppnight=False):
"""
Calculate photosynthesis (GPP) and ecosystem respiration (Reco) from original
Eddy flux data.
It uses either
1. a fit of Reco vs. temperature to all nighttime data, or
2. several fits over the season of Reco vs. temperature as in Reichstein et al. (2005), or
3. the daytime method of Lasslop et al. (2010),
in order to calculate Reco and then GPP = Reco - NEE.
Definition
----------
def nee2gpp(dates, nee, t, isday, rg=False, vpd=False, undef=np.nan,
method='reichstein', shape=False, masked=False):
Input
-----
Inputs are 1D arrays that can be masked or not.
dates julian days
nee net ecosystem exchange (uptake is <0) [umol m-2 s-1]
t temperature [K]
Optional Input
--------------
If method = 'day' | 'lasslop', extra inputs are
rg global radiation, i.e. shortwave down [W m-2]
vpd vapour pressure deficit [Pa]
Parameters
----------
undef undefined values in data (default: np.nan)
Input arrays will be masked at undef, keeping the original mask
method if 'global' | 'falge': fit of Reco vs. temperature to all nighttime data
if 'local' | 'reichstein': method of Reichstein et al. (2005)
if 'day' | 'lasslop': method of Lasslop et al. (2010)
shape if False then outputs are 1D arrays;
if True, output have the same shape as datain
if a shape tuple is given, then this tuple is used to reshape
masked if False: outputs are undef where nee and t are masked or undef
if True: return masked arrays where outputs would be undef
If method = 'night' | 'reichstein', extra parameters are
nogppnight if True: Resp=NEE, GPP=0 at night, GPP always positive
if False: Resp=lloyd_taylor, GPP=Resp-NEE at night (default)
Ouput
-----
GPP, Reco photosynthesis, ecosystem respiration
Restrictions
------------
Negative respiration possible at night when gpp is forced to 0 with nogppnight=True
Literature
----------
Falge et al. (2001)
Gap filling strategies for defensible annual sums of net ecosystem exchange
Acricultural and Forest Meteorology 107, 43-69
Lasslop et al. (2010)
Separation of net ecosystem exchange into assimilation and respiration using
a light response curve approach: critical issues and global evaluation
Global Change Biology 16, 187-208
Reichstein et al. (2005)
On the separation of net ecosystem exchange into assimilation and ecosystem
respiration: review and improved algorithm.
Global Change Biology 11, 1424-1439
Examples
--------
>>> from jams.fread import fread # from jams
>>> from jams.date2dec import date2dec # from jams
>>> dat = fread('test_nee2gpp.csv', skip=2, transpose=True)
>>> dates = date2dec(dy=dat[0,:], mo=dat[1,:], yr=dat[2,:], hr=dat[3,:], mi=dat[4,:])
>>> NEE = np.squeeze(dat[5,:])
>>> rg = np.squeeze(dat[6,:])
>>> tair = np.squeeze(dat[7,:])
>>> undef = -9999.
>>> isday = np.where(rg > 10., True, False)
>>> tt = np.where(tair == undef, undef, tair+273.15)
>>> # partition
>>> GPP, Reco = nee2gpp(dates, NEE, tt, isday, undef=undef, method='local')
>>> print(GPP[1120:1128])
[-9.99900000e+03 -9.99900000e+03 -9.99900000e+03 4.40606871e+00
8.31942152e+00 1.06242542e+01 8.49245664e+00 1.12381973e+01]
>>> print(Reco[1120:1128])
[1.68311981 1.81012431 1.9874173 2.17108871 2.38759152 2.64372415
2.90076664 3.18592735]
>>> GPP, Reco = nee2gpp(dates, NEE, tt, isday, undef=undef, method='local')
>>> print(GPP[1120:1128])
[-9.99900000e+03 -9.99900000e+03 -9.99900000e+03 4.40606871e+00
8.31942152e+00 1.06242542e+01 8.49245664e+00 1.12381973e+01]
>>> GPP, Reco = nee2gpp(dates, NEE, tt, isday, undef=undef, method='global')
>>> print(GPP[1120:1128])
[-9.99900000e+03 -9.99900000e+03 -9.99900000e+03 4.33166157e+00
8.18228013e+00 1.04092252e+01 8.19395317e+00 1.08427448e+01]
>>> GPP, Reco = nee2gpp(dates, NEE, tt, isday, undef=undef, method='Reichstein', masked=True)
>>> print(GPP[1120:1128])
[-- -- -- 4.406068706013192 8.319421516040766 10.624254150217764
8.492456637225963 11.238197347837367]
>>> GPP, Reco = nee2gpp(dates, NEE, tt, isday, undef=undef, method='reichstein', shape=(np.size(NEE),1))
>>> print(GPP[1120:1128])
[[-9.99900000e+03]
[-9.99900000e+03]
[-9.99900000e+03]
[ 4.40606871e+00]
[ 8.31942152e+00]
[ 1.06242542e+01]
[ 8.49245664e+00]
[ 1.12381973e+01]]
>>> VPD = np.squeeze(dat[8,:])
>>> vpd = np.where(VPD == undef, undef, VPD*100.)
>>> GPP, Reco = nee2gpp(dates, NEE, tt, isday, rg, vpd, undef=undef, method='day')
>>> print(GPP[1120:1128])
[-9.99900000e+03 -9.99900000e+03 -9.99900000e+03 2.78457540e+00
6.63212545e+00 8.88902165e+00 6.74243873e+00 9.51364527e+00]
>>> print(Reco[1120:1128])
[0.28786696 0.34594516 0.43893276 0.5495954 0.70029545 0.90849165
1.15074873 1.46137527]
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany.
Copyright (c) 2012-2014 <NAME>, <NAME> - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written MC, Mar 2012
Modified AP, Mar 2012 - undef=np.nan
MC, Nov 2012 - wrapper for individual routines nee2gpp_reichstein etc.
MC, Feb 2013 - ported to Python 3
MC, May 2013 - replaced cost functions by generel cost function cost_abs if possible
AP, Aug 2014 - replaced fmin with fmin_tnc to permit params<0,
permit gpp<0 at any time if nogppnight=True
"""
# Global relationship in Reichstein et al. (2005)
if ((method.lower() == 'global') | (method.lower() == 'falge')):
return nee2gpp_falge(dates, nee, t, isday, undef=undef, shape=shape, masked=masked)
# Local relationship = Reichstein et al. (2005)
elif ((method.lower() == 'local') | (method.lower() == 'reichstein')):
return nee2gpp_reichstein(dates, nee, t, isday, undef=undef, shape=shape, masked=masked, nogppnight=nogppnight)
# Lasslop et al. (2010) method
elif ((method.lower() == 'day') | (method.lower() == 'lasslop')):
return nee2gpp_lasslop(dates, nee, t, isday, rg, vpd, undef=undef, shape=shape, masked=masked, nogppnight=nogppnight)
# Include new methods here
else:
raise ValueError('Error nee2gpp: method not implemented yet.')
# ----------------------------------------------------------------------
def nee2gpp_falge(dates, nee, t, isday, undef=np.nan,
shape=False, masked=False):
"""
Calculate photosynthesis (GPP) and ecosystem respiration (Reco) from original
Eddy flux data, using a fit of Reco vs. temperature to all nighttime data,
in order to calculate Reco and then GPP = Reco - NEE.
Definition
----------
def nee2gpp_falge(dates, nee, t, isday, undef=np.nan, shape=False, masked=False):
Input
-----
Inputs are 1D arrays that can be masked or not.
dates julian days
nee net ecosystem exchange (uptake is <0) [umol m-2 s-1]
t temperature [K]
Parameters
----------
undef undefined values in data (default: np.nan)
Input arrays will be masked at undef, keeping the original mask
shape if False then outputs are 1D arrays;
if True, output have the same shape as datain
if a shape tuple is given, then this tuple is used to reshape
masked if False: outputs are undef where nee and t are masked or undef
if True: return masked arrays where outputs would be undef
Ouput
-----
GPP, Reco photosynthesis, ecosystem respiration
Restrictions
------------
None.
Literature
----------
Falge et al. (2001)
Gap filling strategies for defensible annual sums of net ecosystem exchange
Acricultural and Forest Meteorology 107, 43-69
Examples
--------
>>> from jams.fread import fread # from jams
>>> from jams.date2dec import date2dec # from jams
>>> dat = fread('test_nee2gpp.csv', skip=2, transpose=True)
>>> dates = date2dec(dy=dat[0,:], mo=dat[1,:], yr=dat[2,:], hr=dat[3,:], mi=dat[4,:])
>>> NEE = np.squeeze(dat[5,:])
>>> rg = np.squeeze(dat[6,:])
>>> tair = np.squeeze(dat[7,:])
>>> undef = -9999.
>>> isday = np.where(rg > 10., True, False)
>>> tt = np.where(tair == undef, undef, tair+273.15)
>>> # partition
>>> GPP, Reco = nee2gpp(dates, NEE, tt, isday, undef=undef, method='global')
>>> print(GPP[1120:1128])
[-9.99900000e+03 -9.99900000e+03 -9.99900000e+03 4.33166157e+00
8.18228013e+00 1.04092252e+01 8.19395317e+00 1.08427448e+01]
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany.
Copyright (c) 2012-2013 <NAME>, <NAME> - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written MC, Mar 2012
Modified AP, Mar 2012 - undef=np.nan
MC, Nov 2012 - individual routine
MC, Feb 2013 - ported to Python 3
"""
# Checks
# remember shape if any
inshape = nee.shape
dates = np.squeeze(dates)
nee = np.squeeze(nee)
t = np.squeeze(t)
isday = np.squeeze(isday)
# Check squeezed shape
if dates.ndim != 1: raise Error('Error nee2gpp_falge: squeezed dates must be 1D array.')
if nee.ndim != 1: raise Error('Error nee2gpp_falge: squeezed nee must be 1D array.')
if t.ndim != 1: raise Error('Error nee2gpp_falge: squeezed t must be 1D array.')
if isday.ndim != 1: raise Error('Error nee2gpp_falge: squeezed isday must be 1D array.')
ndata = dates.size
if ((nee.size != ndata) | (t.size != ndata) | (isday.size != ndata)):
raise Error('Error nee2gpp_falge: inputs must have the same size.')
# Transform to masked array with 1D mask
nee = np.ma.array(nee, mask=False)
t = np.ma.array(t, mask=False)
isday = np.ma.array(isday, mask=False)
# mask also undef
if np.isnan(undef):
if np.ma.any(np.isnan(nee)): nee[np.isnan(nee)] = np.ma.masked
if np.ma.any(np.isnan(t)): t[np.isnan(t)] = np.ma.masked
if np.ma.any(np.isnan(isday)): isday[np.isnan(isday)] = np.ma.masked
else:
if np.ma.any(nee==undef): nee[nee==undef] = np.ma.masked
if np.ma.any(t==undef): t[t==undef] = np.ma.masked
if np.ma.any(isday==undef): isday[isday==undef] = np.ma.masked
# Partition - Global relationship as in Falge et al. (2001)
# Select valid nighttime
mask = isday | nee.mask | t.mask | isday.mask
ii = np.where(~mask)[0]
tt = np.ma.compressed(t[ii])
net = np.ma.compressed(nee[ii])
# p, c = opt.curve_fit(functions.lloyd_fix, tt, net, p0=[2.,200.]) # global parameter, global cov matrix
#p = opt.fmin(functions.cost_lloyd_fix, [2.,200.], args=(tt, net), disp=False)
p = opt.fmin(functions.cost_abs, [2.,200.], args=(functions.lloyd_fix_p, tt, net), disp=False)
Reco = np.ones(ndata)*undef
ii = np.where(~t.mask)[0]
Reco[ii] = functions.lloyd_fix(t[ii], p[0], p[1])
# GPP
GPP = np.ones(ndata)*undef
ii = np.where(~(t.mask | nee.mask))[0]
GPP[ii] = Reco[ii] - nee[ii]
# Return
if masked:
if np.isnan(undef):
GPP = np.ma.array(GPP, mask=np.isnan(GPP))
Reco = np.ma.array(Reco, mask=np.isnan(Reco))
else:
GPP = np.ma.array(GPP, mask=(GPP == undef))
Reco = np.ma.array(Reco, mask=(Reco == undef))
if shape != False:
if shape != True:
return np.reshape(GPP,shape), np.reshape(Reco,shape)
else:
return np.reshape(GPP,inshape), np.reshape(Reco,inshape)
else:
return GPP, Reco
# ----------------------------------------------------------------------
def nee2gpp_reichstein(dates, nee, t, isday, rg=False, vpd=False, undef=np.nan,
shape=False, masked=False, nogppnight=False):
"""
Calculate photosynthesis (GPP) and ecosystem respiration (Reco) from original
Eddy flux data, using several fits of Reco vs. temperature of nighttime data
over the season, as in Reichstein et al. (2005), in order to calculate Reco
and then GPP = Reco - NEE.
Definition
----------
def nee2gpp_reichstein(dates, nee, t, isday, undef=np.nan, shape=None, masked=False):
Input
-----
Inputs are 1D arrays that can be masked or not.
dates julian days
nee net ecosystem exchange (uptake is <0) [umol m-2 s-1]
t temperature [K]
Parameters
----------
undef undefined values in data (default: np.nan)
Input arrays will be masked at undef, keeping the original mask
shape if False then outputs are 1D arrays (default)
if True, output have the same shape as datain
if a shape tuple is given, then this tuple is used to reshape
masked if False: outputs are undef where nee and t are masked or undef (default)
if True: return masked arrays where outputs would be undef
nogppnight if True: Resp=NEE, GPP=0 at night
if False: Resp=lloyd_taylor, GPP=Resp-NEE at night (default)
Ouput
-----
GPP, Reco photosynthesis, ecosystem respiration
Restrictions
------------
None.
Literature
----------
Reichstein et al. (2005)
On the separation of net ecosystem exchange into assimilation and ecosystem
respiration: review and improved algorithm.
Global Change Biology 11, 1424-1439
Examples
--------
>>> from jams.fread import fread # from jams
>>> from jams.date2dec import date2dec # from jams
>>> dat = fread('test_nee2gpp.csv', skip=2, transpose=True)
>>> dates = date2dec(dy=dat[0,:], mo=dat[1,:], yr=dat[2,:], hr=dat[3,:], mi=dat[4,:])
>>> NEE = np.squeeze(dat[5,:])
>>> rg = np.squeeze(dat[6,:])
>>> tair = np.squeeze(dat[7,:])
>>> undef = -9999.
>>> isday = np.where(rg > 10., True, False)
>>> tt = np.where(tair == undef, undef, tair+273.15)
>>> # partition
>>> GPP, Reco = nee2gpp(dates, NEE, tt, isday, undef=undef, method='local')
>>> print(GPP[1120:1128])
[-9.99900000e+03 -9.99900000e+03 -9.99900000e+03 4.40606871e+00
8.31942152e+00 1.06242542e+01 8.49245664e+00 1.12381973e+01]
>>> print(Reco[1120:1128])
[1.68311981 1.81012431 1.9874173 2.17108871 2.38759152 2.64372415
2.90076664 3.18592735]
>>> GPP, Reco = nee2gpp(dates, NEE, tt, isday, undef=undef, method='local')
>>> print(GPP[1120:1128])
[-9.99900000e+03 -9.99900000e+03 -9.99900000e+03 4.40606871e+00
8.31942152e+00 1.06242542e+01 8.49245664e+00 1.12381973e+01]
>>> GPP, Reco = nee2gpp(dates, NEE, tt, isday, undef=undef, method='Reichstein', masked=True)
>>> print(GPP[1120:1128])
[-- -- -- 4.406068706013192 8.319421516040766 10.624254150217764
8.492456637225963 11.238197347837367]
>>> GPP, Reco = nee2gpp(dates, NEE, tt, isday, undef=undef, method='reichstein', shape=(np.size(NEE),1))
>>> print(GPP[1120:1128])
[[-9.99900000e+03]
[-9.99900000e+03]
[-9.99900000e+03]
[ 4.40606871e+00]
[ 8.31942152e+00]
[ 1.06242542e+01]
[ 8.49245664e+00]
[ 1.12381973e+01]]
License
-------
This file is part of the JAMS Python package, distributed under the MIT
License. The JAMS Python package originates from the former UFZ Python library,
Department of Computational Hydrosystems, Helmholtz Centre for Environmental
Research - UFZ, Leipzig, Germany.
Copyright (c) 2012-2013 <NAME>, <NAME> - mc (at) macu (dot) de
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
History
-------
Written MC, Mar 2012
Modified AP, Mar 2012 - undef=np.nan
MC, Nov 2012 - individual routine
MC, Feb 2013 - ported to Python 3
"""
# Checks
# remember shape if any
if shape != False:
if shape != True:
inshape = shape
else:
inshape = nee.shape
dates = np.squeeze(dates)
nee = np.squeeze(nee)
t = np.squeeze(t)
isday = np.squeeze(isday)
if shape == False: inshape = nee.shape
# Check squeezed shape
if dates.ndim != 1: raise ValueError('Error nee2gpp_reichstein: squeezed dates must be 1D array.')
if nee.ndim != 1: raise ValueError('Error nee2gpp_reichstein: squeezed nee must be 1D array.')
if t.ndim != 1: raise ValueError('Error nee2gpp_reichstein: squeezed t must be 1D array.')
if isday.ndim != 1: raise ValueError('Error nee2gpp_reichstein: squeezed isday must be 1D array.')
ndata = dates.size
if ((nee.size != ndata) | (t.size != ndata) | (isday.size != ndata)):
raise ValueError('Error nee2gpp_reichstein: inputs must have the same size.')
# Transform to masked array with 1D mask
nee = np.ma.array(nee, mask=False)
t = np.ma.array(t, mask=False)
isday = np.ma.array(isday, mask=False)
# mask also undef
if np.isnan(undef):
if np.ma.any(np.isnan(nee)): nee[np.isnan(nee)] = np.ma.masked
if np.ma.any(np.isnan(t)): t[np.isnan(t)] = np.ma.masked
if np.ma.any(np.isnan(isday)): isday[np.isnan(isday)] = np.ma.masked
else:
if np.ma.any(nee==undef): nee[nee==undef] = np.ma.masked
if np.ma.any(t==undef): t[t==undef] = np.ma.masked
if np.ma.any(isday==undef): isday[isday==undef] = np.ma.masked
# Partition - Local relationship = Reichstein et al. (2005)
# Select valid nighttime
mask = isday | nee.mask | t.mask | isday.mask
ii = np.where(~mask)[0]
if (ii.size==0):
print('Warning nee2gpp_reichstein: no valid nighttime data.')
if masked:
GPP = np.ma.array(np.reshape(nee,inshape), mask=np.ones(inshape, dtype=bool))
Reco = np.ma.array(np.reshape(nee,inshape), mask=np.ones(inshape, dtype=bool))
else:
GPP = np.ones(np.reshape(nee,inshape))*undef
Reco = np.ones(np.reshape(nee,inshape))*undef
return GPP, Reco
jul = dates[ii]
tt = np.ma.compressed(t[ii])
net = np.ma.compressed(nee[ii])
# 1. each 5 days, in 15 day period, fit if range of T > 5
locp = [] # local param
locs = [] # local err
dmin = np.floor(np.amin(jul)).astype(int) # be aware that julian days starts at noon, i.e. 1.0 is 12h
dmax = np.ceil(np.amax(jul)).astype(int) # so the search will be from noon to noon and thus includes all nights
for i in range(dmin,dmax,5):
iii = np.where((jul>=i) & (jul<(i+14)))[0]
niii = iii.size
if niii > 6:
tt1 = tt[iii]
net1 = net[iii]
mm = ~mad(net1, z=4.5) # make fit more robust by removing outliers
if (np.ptp(tt[iii]) >= 5.) & (np.sum(mm) > 6):
# print(i)
#p = opt.fmin(functions.cost_lloyd_fix, [2.,200.], args=(tt1[mm], net1[mm]), disp=False) # robust params
p, temp1, temp2 = opt.fmin_tnc(functions.cost_lloyd_fix, [2.,200.], bounds=[[0.,None],[0.,None]],
args=(tt1[mm], net1[mm]),
approx_grad=True, disp=False)
try:
p1, c = opt.curve_fit(functions.lloyd_fix, tt1[mm], net1[mm], p0=p, maxfev=10000) # params, covariance
if np.all(np.isfinite(c)): # possible return of curvefit: c=inf
s = np.sqrt(np.diag(c))
else:
s = 10.*np.abs(p)
except:
s = 10.*np.abs(p)
locp += [p]
locs += [s]
# if ((s[1]/p[1])<0.5) & (p[1] > 0.): pdb.set_trace()
if len(locp) == 0:
raise ValueError('Error nee2gpp_reichstein: No local relationship found.')
print('Warning nee2gpp_reichstein: No local relationship found.')
if masked:
GPP = np.ma.array(np.reshape(nee,inshape), mask=np.ones(inshape, dtype=bool))
Reco = np.ma.array(np.reshape(nee,inshape), mask=np.ones(inshape, dtype=bool))
else:
GPP = np.ones(np.reshape(nee,inshape))*undef
Reco = np.ones(np.reshape(nee,inshape))*undef
return GPP, Reco
locp = np.squeeze(np.array(locp).astype(float))
locs = np.squeeze(np.array(locs).astype(float))
# 2. E0 = avg of best 3
# Reichstein et al. (2005), p. 1430, 1st paragraph.
with warnings.catch_warnings():
warnings.simplefilter("ignore")
iii = np.where((locp[:,1] > 0.) & (locp[:,1] < 450.) & (np.abs(locs[:,1]/locp[:,1]) < 0.5))[0]
niii = iii.size
if niii==0:
# raise ValueError('Error nee2gpp_reichstein: No good local relationship found.')
# loosen the criteria: take the best three estimates anyway
iii = np.where((locp[:,1] > 0.))[0]
niii = iii.size
if niii<1:
raise ValueError('Error nee2gpp_reichstein: No E0>0 found.')
print('Warning nee2gpp_reichstein: No E0>0 found.')
if masked:
GPP = np.ma.array(np.reshape(nee,inshape), mask=np.ones(inshape, dtype=bool))
Reco = np.ma.array(np.reshape(nee,inshape), mask=np.ones(inshape, dtype=bool))
else:
GPP = np.ones(np.reshape(nee,inshape))*undef
Reco = np.ones(np.reshape(nee,inshape))*undef
return GPP, Reco
lp = locp[iii,:]
ls = locs[iii,:]
iis = np.argsort(ls[:,1])
bestp = np.mean(lp[iis[0:np.minimum(3,niii)],:],axis=0)
bests = np.mean(ls[iis[0:np.minimum(3,niii)],:],axis=0)
elif niii==1:
bestp = np.squeeze(locp[iii,:])
bests = np.squeeze(locs[iii,:])
elif niii==2:
bestp = np.mean(locp[iii,:],axis=0)
bests = np.mean(locs[iii,:],axis=0)
# ls = locs[iii,:]
# iis = np.argsort(ls[:,1])
else:
lp = locp[iii,:]
ls = locs[iii,:]
iis = np.argsort(ls[:,1])
bestp = np.mean(lp[iis[0:3],:],axis=0)
bests = | np.mean(ls[iis[0:3],:],axis=0) | numpy.mean |
#!/usr/bin/python2.5
#
# Copyright 2014 <NAME>.
#
# Author: <NAME> (<EMAIL>)
#
# Permission is hereby granted, free of charge, to any person obtaining a copy
# of this software and associated documentation files (the "Software"), to deal
# in the Software without restriction, including without limitation the rights
# to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
# copies of the Software, and to permit persons to whom the Software is
# furnished to do so, subject to the following conditions:
#
# The above copyright notice and this permission notice shall be included in
# all copies or substantial portions of the Software.
#
# THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
# IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
# FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
# AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
# LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
# OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN
# THE SOFTWARE.
#
# See http://creativecommons.org/licenses/MIT/ for more information.
#
# -----------------------------------------------------------------------------
#
# Waveform definitions.
import numpy
waveforms = []
"""----------------------------------------------------------------------------
Waveshaper for audio rate
----------------------------------------------------------------------------"""
WAVESHAPER_SIZE = 1024
x = numpy.arange(0, WAVESHAPER_SIZE + 1) / float(WAVESHAPER_SIZE)
linear = x
sin = (1.0 - numpy.cos(numpy.pi * x)) / 2.0
tan = numpy.arctan(8 * numpy.cos(numpy.pi * x))
scale = tan.max()
tan = (1.0 - tan / scale) / 2.0
inverse_sin = numpy.arccos(1 - 2 * x) / numpy.pi
inverse_tan = numpy.arccos(numpy.tan(scale * (1.0 - 2.0 * x)) / 8.0) / numpy.pi
def audio_rate_flip(x):
x = numpy.array(list(-x[WAVESHAPER_SIZE:0:-1]) + list(x))
return numpy.round((x * 32767.0)).astype(int)
audio_rate_tables = []
audio_rate_tables.append(('inverse_tan_audio', audio_rate_flip(inverse_tan)))
audio_rate_tables.append(('inverse_sin_audio', audio_rate_flip(inverse_sin)))
audio_rate_tables.append(('linear_audio', audio_rate_flip(linear)))
audio_rate_tables.append(('sin_audio', audio_rate_flip(sin)))
audio_rate_tables.append(('tan_audio', audio_rate_flip(tan)))
waveforms.extend(audio_rate_tables)
"""----------------------------------------------------------------------------
Waveshaper for control rate
----------------------------------------------------------------------------"""
WAVESHAPER_SIZE = 1024
x = numpy.arange(0, WAVESHAPER_SIZE + 1) / float(WAVESHAPER_SIZE)
linear = x
sin = (1.0 - numpy.cos(numpy.pi * x)) / 2.0
inverse_sin = numpy.arccos(1 - 2 * x) / numpy.pi
expo = 1.0 - numpy.exp(-3 * x)
expo_max = expo.max()
expo /= expo_max
expo_flipped = (1.0 - numpy.exp(-3 * (1 - x))) / expo_max
log = numpy.log(1.0 - x * expo_max) / -3.0
log_flipped = numpy.log(1.0 - (1 - x) * expo_max) / -3.0
def control_rate_flip(x, y):
x = numpy.array(list(x) + list(y[1:]))
return numpy.round((x * 32767.0)).astype(int)
control_rate_tables = []
control_rate_tables.append(
('reversed_control', control_rate_flip(log, 1.0 - log)))
control_rate_tables.append(
('spiky_exp_control', control_rate_flip(log, log_flipped)))
control_rate_tables.append(
('spiky_control', control_rate_flip(inverse_sin, 1.0 - inverse_sin)))
control_rate_tables.append(
('linear_control', control_rate_flip(linear, 1.0 - linear)))
control_rate_tables.append(
('bump_control', control_rate_flip(sin, 1.0 - sin)))
control_rate_tables.append(
('bump_exp_control', control_rate_flip(expo, expo_flipped)))
control_rate_tables.append(
('normal_control', control_rate_flip(expo, 1.0 - expo)))
waveforms.extend(control_rate_tables)
"""----------------------------------------------------------------------------
Post waveshaper
----------------------------------------------------------------------------"""
x = numpy.arange(0, WAVESHAPER_SIZE + 1) / (WAVESHAPER_SIZE / 2.0) - 1.0
x[-1] = x[-2]
sine = numpy.sin(8 * numpy.pi * x)
window = numpy.exp(-x * x * 4) ** 2
bipolar_fold = sine * window + numpy.arctan(3 * x) * (1 - window)
bipolar_fold /= numpy.abs(bipolar_fold).max()
waveforms.append(('bipolar_fold', | numpy.round(32767 * bipolar_fold) | numpy.round |
import numpy as np
from highway_env.road.road import Road, RoadNetwork
from highway_env.vehicle.controller import ControlledVehicle
from highway_env.vehicle.kinematics import Vehicle
from highway_env.road.lane import LineType, StraightLane, SineLane, CircularLane, AbstractLane
from highway_env.road.regulation import RegulatedRoad
from highway_env.vehicle.objects import Obstacle
from highway_env.vehicle.behavior import CustomVehicle
from highway_env import utils
from highway_env.road.lane import CircularLane
from highway_env.utils import near_split
from gym.utils import seeding
import random
import copy
class Scenario:
def __init__(self, env, scenario_number=0):
self.env = env
self.env.default_config = copy.deepcopy(env.config)
self.road = None
self.controlled_vehicles = None
# self.road_types = ["intersection", "roundabout", "highway","twoway","uturn","road_merge","road_exit"]
self.road_types = self.env.config['scenario']['road_types']
# self.road_types = ["road_exit"]
self.complex = self.env.config['scenario']['complex']
self.simple = self.env.config['scenario']['simple']
self.road_types_idx = -1
# self.road_missions = ["merging","exit"]
if scenario_number != 0:
if scenario_number == 2:
self.env.config.update(self.default_config_merge())
if scenario_number == 3:
self.env.config.update(self.default_config_exit())
self.random_scenario = self.env.config['scenario']['random_scenario']
if self.random_scenario:
# idx = np.random.randint(0, len(self.road_types))
self.road_types_idx = idx =self.env.episode%len(self.road_types)
self.road_type = self.road_types[idx]
self.env.config['scenario']['road_type'] = self.road_type
if self.road_type == "road_merge":
self.mission_type ="merging"
self.env.config['screen_width'] = 2900
self.env.config['screen_height'] = 300
elif self.road_type == "road_exit":
self.mission_type = "exit"
self.env.config['screen_width'] = 2900
self.env.config['screen_height'] = 300
elif self.road_type == "intersection":
self.env.config['screen_width'] =900
self.env.config['screen_height'] = 900
self.env.config['controlled_vehicle']['controlled_vehicle_speed'] = 15
self.mission_type = "none"
elif self.road_type == "roundabout":
self.env.config['screen_width'] = 900
self.env.config['screen_height'] = 900
self.mission_type = "none"
elif self.road_type == "uturn":
self.env.config['screen_width'] = 1000
self.env.config['screen_height'] = 500
self.mission_type = "none"
else:
self.env.config['screen_width'] = 2900
self.env.config['screen_height'] = 300
self.mission_type = "none"
self.env.config['scenario']['mission_type'] = self.mission_type
else:
self.road_type = self.env.config['scenario']['road_type']
random_offset = copy.deepcopy(self.env.config['scenario']['random_offset'])
delta_before, delta_converging, delta_merge = (0, 0, 0)
if self.env.config['scenario']['randomize_before']:
delta_before = | np.random.randint(low=random_offset[0], high=random_offset[1]) | numpy.random.randint |
import numpy as np
import scipy.sparse as sp
from sklearn import preprocessing
class NMFADMM:
r"""An implementation of `"NMF-ADMM" <http://statweb.stanford.edu/~dlsun/papers/nmf_admm.pdf>`_
from the ICASSP '14 paper "Alternating Direction Method of Multipliers for
Non-Negative Matrix Factorization with the Beta-Divergence". The procedure
learns an embedding of the normalized adjacency matrix with by using the alternating
direction method of multipliers to solve a non negative matrix factorization problem.
"""
def __init__(self, dimensions: int = 32, iterations: int = 100, rho: float = 1.0, seed: int = None):
self.dimensions = dimensions
self.iterations = iterations
self.rho = rho
self.seed = seed
def _init_weights(self):
"""
Initializing model weights.
"""
self._W = np.random.uniform(-0.1, 0.1, (self._V.shape[0], self.dimensions))
self._H = np.random.uniform(-0.1, 0.1, (self.dimensions, self._V.shape[1]))
X_i, Y_i = np.nonzero(self._V)
scores = self._W[X_i] * self._H[:, Y_i].T + np.random.uniform(0, 1, (self.dimensions, ))
values = np.sum(scores, axis=-1)
self._X = sp.coo_matrix((values, (X_i, Y_i)), shape=self._V.shape)
self._W_plus = np.random.uniform(0, 0.1, (self._V.shape[0], self.dimensions))
self._H_plus = np.random.uniform(0, 0.1, (self.dimensions, self._V.shape[1]))
self._alpha_X = sp.coo_matrix(( | np.zeros(values.shape[0]) | numpy.zeros |
from tsfresh.examples.robot_execution_failures import download_robot_execution_failures, \
load_robot_execution_failures
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
from glob import glob
import numpy as np
import pandas as pd
from sklearn.preprocessing import StandardScaler
from sklearn.model_selection import train_test_split
from sklearn.ensemble import RandomForestClassifier
from sklearn.ensemble import AdaBoostClassifier
from sklearn.tree import DecisionTreeClassifier
from sklearn.metrics import classification_report
from sklearn.model_selection import StratifiedKFold
from tsfresh.transformers import RelevantFeatureAugmenter
from tsfresh.utilities.dataframe_functions import impute
from tsfresh.feature_extraction import ComprehensiveFCParameters
settings = ComprehensiveFCParameters()
from tsfresh import extract_features
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from tsfresh.feature_selection.relevance import calculate_relevance_table
from pca import PCAForPandas
from dtwnn import KnnDtw
from boruta import BorutaPy
import copy
import time
import sys
import csv
import matplotlib.colors as mcolors
# adjust for testing, but the full run requires 10 stratified sample folds
num_folds = 10
# tell pandas to consider infinity as a missing value (for filtering)
pd.options.mode.use_inf_as_na = True
# record our overall start time for time delta display in log messages
mark = time.time()
# return value to indicate that the test for a fold failed and should be ignored
ignore_this_fold = {
'rfc': -1,
'ada': -1,
'rfc_count': -1,
'ada_count': -1,
}
# read both the TEST and TRAIN files for a particular
# dataset into a single set, then partition the data
# and label into X and y DataFrames
def get_combined_raw_dataset(root_path: str):
name = root_path.split('/')[2]
raw_train = pd.read_csv(root_path + name + '_TRAIN.tsv', delimiter='\t', header=None)
raw_test = pd.read_csv(root_path + name + '_TEST.tsv', delimiter='\t', header=None)
combined = raw_train.append(raw_test)
v = combined.reset_index().drop(['index'], axis=1)
X = v.iloc[:,1:]
y = v.iloc[:,:1]
return (X, y)
# convert a raw dataframe into the vertically oriented
# format that tsfresh requires for feature extraction
def raw_to_tsfresh(X, y):
ids = []
values = []
ys = []
indices = []
for id, row in X.iterrows():
c = (y.loc[[id], :]).iloc[0][0]
ys.append(int(c))
indices.append(id)
first = True
for v in row:
if (not first):
ids.append(id)
values.append(float(v))
first = False
d = { 'id': ids, 'value': values }
return (pd.DataFrame(data=d), pd.Series(data=ys, index=indices))
# helper function to filter features out of a dataframe given
# a calculated tsfresh relevance table (R)
def filter_features(df, R):
for id, row in R.iterrows():
if (row['relevant'] == False):
df = df.drop([row['feature']], axis=1)
return df
# calculate the accuracy rate of a prediction
def accuracy_rate(predicted, actual):
correct = 0
for p, a in zip(predicted, actual):
if (p == a):
correct += 1
return correct / len(predicted)
# a single place to configure our RFC and ADA classifiers:
def build_rfc():
return RandomForestClassifier()
def build_ada():
return AdaBoostClassifier()
# Perform the standard FRESH algorithm
def perform_fresh(X_train, y_train, X_test, y_test):
log('Processing fresh')
fresh_train_X, fresh_train_y = raw_to_tsfresh(X_train, y_train)
fresh_test_X, fresh_test_y = raw_to_tsfresh(X_test, y_test)
# Run the feature extraction and relevance tests ONLY on the train
# data set.
extracted_train = extract_features(fresh_train_X, column_id='id', column_value='value')
extracted_train = extracted_train.dropna(axis='columns')
# We run FRESH and its variants first at the default fdr level of 0.05,
# but if it returns 0 features (why?) then we lower the value and try
# again.
filtered_train = None
for fdr in [0.05, 0.01, 0.005, 0.001, 0.00001]:
log('Using ' + str(fdr))
R = calculate_relevance_table(extracted_train, y_train.squeeze(), fdr_level=fdr)
filtered_train = filter_features(extracted_train, R)
if (filtered_train.shape[1] > 0):
break
# Extract features from the test set, but then apply the same relevant
# features that we used from the train set
extracted_test = extract_features(fresh_test_X, column_id='id', column_value='value')
extracted_test = extracted_test.dropna(axis='columns')
filtered_test = filter_features(extracted_test, R)
# Train classifiers on the train set
clf = build_rfc()
trained_model = clf.fit(filtered_train, y_train.squeeze())
rfc_predicted = list(map(lambda v: int(v), clf.predict(filtered_test)))
actual = y_test.squeeze().tolist()
# Create and fit an AdaBoosted decision tree
bdt = build_ada()
trained_model = bdt.fit(filtered_train, y_train.squeeze())
ada_predicted = list(map(lambda v: int(v), bdt.predict(filtered_test)))
return {
'rfc': accuracy_rate(rfc_predicted, actual),
'ada': accuracy_rate(ada_predicted, actual),
'rfc_count': len(clf.estimators_),
'ada_count': len(bdt.estimators_),
}
# Safely executes a feature-based fold run, catching any
# exceptions so that we simply ignore this failed fold. This
# was added to make FRESH and its variants more robust, as
# sometimes a single fold out of 10 in FRESH would fail as
# the algorithm (even at low fdr settings) would report zero
# relevant features
def run_safely(f, X_train, y_train, X_test, y_test):
try:
return f(X_train, y_train, X_test, y_test)
except:
return ignore_this_fold
# FRESH variant with PCA run on the extracted relevant features
def perform_fresh_pca_after(X_train, y_train, X_test, y_test):
log('Processing fresh_pca_after')
fresh_train_X, fresh_train_y = raw_to_tsfresh(X_train, y_train)
fresh_test_X, fresh_test_y = raw_to_tsfresh(X_test, y_test)
# Run the feature extraction and relevance tests ONLY on the train
# data set.
extracted_train = extract_features(fresh_train_X, column_id='id', column_value='value')
# For some reason, tsfresh is extracting features that contain Nan,
# Infinity or None. This breaks the PCA step. To avoid this, we
# drop columns that contain these values. I know of nothing else to do here.
extracted_train = extracted_train.dropna(axis='columns')
filtered_train = None
# execute at different fdr levels to try to make FRESH more robust
for fdr in [0.05, 0.01, 0.005, 0.001]:
R = calculate_relevance_table(extracted_train, y_train.squeeze(), fdr_level=fdr)
filtered_train = filter_features(extracted_train, R)
if (filtered_train.shape[1] > 0):
break
# Perform PCA on the filtered set of features
pca_train = PCAForPandas(n_components=0.95, svd_solver='full')
filtered_train = pca_train.fit_transform(filtered_train)
# Extract features from the test set, but then apply the same relevant
# features that we used from the train set
extracted_test = extract_features(fresh_test_X, column_id='id', column_value='value')
extracted_test = extracted_test.dropna(axis='columns')
filtered_test = filter_features(extracted_test, R)
filtered_test = pca_train.transform(filtered_test)
# Train classifiers on the train set
clf = build_rfc()
trained_model = clf.fit(filtered_train, y_train.squeeze())
rfc_predicted = list(map(lambda v: int(v), clf.predict(filtered_test)))
actual = y_test.squeeze().tolist()
# Create and fit an AdaBoosted decision tree
bdt = build_ada()
trained_model = bdt.fit(filtered_train, y_train.squeeze())
ada_predicted = list(map(lambda v: int(v), bdt.predict(filtered_test)))
return {
'rfc': accuracy_rate(rfc_predicted, actual),
'ada': accuracy_rate(ada_predicted, actual),
'rfc_count': len(clf.estimators_),
'ada_count': len(bdt.estimators_),
}
# FRESH variant that runs PCA before the filtering step
def perform_fresh_pca_before(X_train, y_train, X_test, y_test):
log('Processing fresh_pca_before')
fresh_train_X, fresh_train_y = raw_to_tsfresh(X_train, y_train)
fresh_test_X, fresh_test_y = raw_to_tsfresh(X_test, y_test)
# Run the feature extraction and relevance tests ONLY on the train
# data set.
extracted_train = extract_features(fresh_train_X, column_id='id', column_value='value')
# For some reason, tsfresh is extracting features that contain Nan,
# Infinity or None. This breaks the PCA step. To avoid this, we
# drop columns that contain these values.
extracted_train = extracted_train.dropna(axis='columns')
# Perform PCA on the complete set of extracted features
pca_train = PCAForPandas(n_components=0.95, svd_solver='full')
extracted_train = pca_train.fit_transform(extracted_train)
filtered_train = extracted_train.reset_index(drop=True)
y_train = y_train.reset_index(drop=True)
# Extract features from the test set, but then apply the same relevant
# features that we used from the train set
extracted_test = extract_features(fresh_test_X, column_id='id', column_value='value')
extracted_test = extracted_test.dropna(axis='columns')
filtered_test = pca_train.transform(extracted_test)
# Train classifiers on the train set
clf = build_rfc()
trained_model = clf.fit(filtered_train, y_train.squeeze())
rfc_predicted = list(map(lambda v: int(v), clf.predict(filtered_test)))
actual = y_test.squeeze().tolist()
# Create and fit an AdaBoosted decision tree
bdt = build_ada()
trained_model = bdt.fit(filtered_train, y_train.squeeze())
ada_predicted = list(map(lambda v: int(v), bdt.predict(filtered_test)))
return {
'rfc': accuracy_rate(rfc_predicted, actual),
'ada': accuracy_rate(ada_predicted, actual),
'rfc_count': len(clf.estimators_),
'ada_count': len(bdt.estimators_),
}
# The Borunta based feature-extraction algorithm
def perform_boruta(X_train, y_train, X_test, y_test):
log('Processing boruta')
rf = build_rfc()
feat_selector = BorutaPy(rf, n_estimators='auto', perc=90, verbose=2, random_state=0)
feat_selector.fit(X_train.values, y_train.values)
X_filtered = feat_selector.transform(X_train.values)
X_test_filtered = feat_selector.transform(X_test.values)
trained_model = rf.fit(X_filtered, y_train.squeeze().values)
rfc_predicted = list(map(lambda v: int(v), rf.predict(X_test_filtered)))
actual = y_test.squeeze().tolist()
bdt = build_ada()
trained_model = bdt.fit(X_filtered, y_train.squeeze().values)
ada_predicted = list(map(lambda v: int(v), bdt.predict(X_test_filtered)))
return {
'rfc': accuracy_rate(rfc_predicted, actual),
'ada': accuracy_rate(ada_predicted, actual),
'rfc_count': len(rf.estimators_),
'ada_count': len(bdt.estimators_),
}
# LDA
def perform_lda(X_train, y_train, X_test, y_test):
log('Processing lda')
X_train = X_train.values
y_train = y_train.values
X_test = X_test.values
y_test = y_test.values
sc = StandardScaler()
X_train = sc.fit_transform(X_train)
X_test = sc.transform(X_test)
lda = LDA()
X_train = lda.fit_transform(X_train, y_train)
X_test = lda.transform(X_test)
rf = build_rfc()
trained_model = rf.fit(X_train, y_train.squeeze())
rfc_predicted = list(map(lambda v: int(v), rf.predict(X_test)))
actual = y_test.squeeze().tolist()
bdt = build_ada()
trained_model = bdt.fit(X_train, y_train.squeeze())
ada_predicted = list(map(lambda v: int(v), bdt.predict(X_test)))
return {
'rfc': accuracy_rate(rfc_predicted, actual),
'ada': accuracy_rate(ada_predicted, actual),
'rfc_count': len(rf.estimators_),
'ada_count': len(bdt.estimators_),
}
# Take the extracted features from FRESH and use them unfiltered
# to make a prediction
def perform_unfiltered(X_train, y_train, X_test, y_test):
log('Processing unfiltered')
fresh_train_X, fresh_train_y = raw_to_tsfresh(X_train, y_train)
fresh_test_X, fresh_test_y = raw_to_tsfresh(X_test, y_test)
# Run the feature extraction only
extracted_train = extract_features(fresh_train_X, column_id='id', column_value='value')
extracted_test = extract_features(fresh_test_X, column_id='id', column_value='value')
extracted_train = extracted_train.dropna(axis='columns')
extracted_test = extracted_test.dropna(axis='columns')
# Train classifiers on the train set
clf = build_rfc()
trained_model = clf.fit(extracted_train, y_train.squeeze())
rfc_predicted = list(map(lambda v: int(v), clf.predict(extracted_test)))
actual = y_test.squeeze().tolist()
# Create and fit an AdaBoosted decision tree
bdt = build_ada()
trained_model = bdt.fit(extracted_train, y_train.squeeze())
ada_predicted = list(map(lambda v: int(v), bdt.predict(extracted_test)))
return {
'rfc': accuracy_rate(rfc_predicted, actual),
'ada': accuracy_rate(ada_predicted, actual),
'rfc_count': len(clf.estimators_),
'ada_count': len(bdt.estimators_),
}
# Nearest Neighbors with Dynamic Time Warping
def perform_dtw_nn(X_train, y_train, X_test, y_test):
log('Processing dtw_nn')
m = KnnDtw(n_neighbors=1, max_warping_window=10)
m.fit(X_train.values, y_train.values)
predicted, proba = m.predict(X_test.values)
actual = y_test.squeeze().tolist()
return accuracy_rate(predicted, actual), 0
# A simple majority vote classifier
def perform_trivial(X_train, y_train, X_test, y_test):
log('Processing trivial')
counts = {}
for v in y_train:
if v not in counts:
counts[v] = 1
else:
counts[v] = counts.get(v) + 1
m = -1
majority = None
for k in counts:
v = counts.get(k)
if (v > m):
m = v
majority = k
majority = np.argmax(counts)
predicted = np.full(len(y_test.squeeze().values), majority)
actual = y_test.squeeze().tolist()
return accuracy_rate(predicted, actual)
# Process a single test/train fold
def process_fold(X_train, y_train, X_test, y_test):
# Fresh and it's variants
fresh = run_safely(perform_fresh, X_train, y_train, X_test, y_test)
fresh_b = run_safely(perform_fresh_pca_before, X_train, y_train, X_test, y_test)
fresh_a = run_safely(perform_fresh_pca_after, X_train, y_train, X_test, y_test)
unfiltered = run_safely(perform_unfiltered, X_train, y_train, X_test, y_test)
# The other two feature-based approaches
boruta = run_safely(perform_boruta, X_train, y_train, X_test, y_test)
lda = run_safely(perform_lda, X_train, y_train, X_test, y_test)
# Shape based DTW_NN and the majority vote classifier
dtw = perform_dtw_nn(X_train, y_train, X_test, y_test)
trivial = perform_trivial(X_train, y_train, X_test, y_test)
return ({
'Boruta_ada': boruta.get('ada'),
'Boruta_rfc': boruta.get('rfc'),
'DTW_NN': dtw[0],
'FRESH_PCAa_ada': fresh_a.get('ada'),
'FRESH_PCAa_rfc': fresh_a.get('rfc'),
'FRESH_PCAb_ada': fresh_b.get('ada'),
'FRESH_PCAb_rfc': fresh_b.get('rfc'),
'FRESH_ada': fresh.get('ada'),
'FRESH_rfc': fresh.get('rfc'),
'LDA_ada': lda.get('ada'),
'LDA_rfc': lda.get('rfc'),
'ada': unfiltered.get('ada'),
'rfc': unfiltered.get('rfc'),
'trivial': trivial,
}, {
'Boruta_ada': boruta.get('ada_count'),
'Boruta_rfc': boruta.get('rfc_count'),
'DTW_NN': dtw[1],
'FRESH_PCAa_ada': fresh_a.get('ada_count'),
'FRESH_PCAa_rfc': fresh_a.get('rfc_count'),
'FRESH_PCAb_ada': fresh_b.get('ada_count'),
'FRESH_PCAb_rfc': fresh_b.get('ada_count'),
'FRESH_ada': fresh.get('ada_count'),
'FRESH_rfc': fresh.get('rfc_count'),
'LDA_ada': lda.get('ada_count'),
'LDA_rfc': lda.get('rfc_count'),
'ada': unfiltered.get('ada_count'),
'rfc': unfiltered.get('rfc_count'),
'trivial': 0,
})
# Complete processing of one data set. Does 10-fold cross-validation
# extraction and classification
def process_data_set(root_path: str):
combined_X, combined_y = get_combined_raw_dataset(root_path)
skf = StratifiedKFold(n_splits=num_folds)
skf.get_n_splits(combined_X, combined_y)
total_acc = 0
results = []
fold = 1
for train_index, test_index in skf.split(combined_X, combined_y):
log('Processing fold ' + str(fold))
X_train, X_test = combined_X.iloc[train_index], combined_X.iloc[test_index]
y_train, y_test = combined_y.iloc[train_index], combined_y.iloc[test_index]
results.append(process_fold(X_train, y_train, X_test, y_test))
fold += 1
# For this dataset, averages is a map from the name of the
# pipeline (e.g. Boruta_rfc) to the average of all folds,
# similar for std_devs
averages, std_devs, counts = calc_statistics(results)
return averages, std_devs, counts
# Calculates the mean, std_dev and average counts of the
# results
def calc_statistics(results):
averages = {}
std_devs = {}
counts = {}
for k in results[0][0]:
values = []
for r in results:
f = r[0]
if (f.get(k) != -1):
values.append(f.get(k))
averages[k] = np.mean(values)
std_devs[k] = np.std(values)
for k in results[0][1]:
values = []
for r in results:
f = r[1]
if (f.get(k) != -1):
values.append(f.get(k))
counts[k] = np.mean(values)
return averages, std_devs, counts
# dump contents of array of strings to a file
def out_to_file(file: str, lines):
f = open(file, 'w')
for line in lines:
f.write(line + '\n')
f.close()
# log our progress.
def log(message):
elapsed = str(round(time.time() - mark, 0))
f = open('./log.txt', 'w+')
f.write('[' + elapsed.rjust(15, '0') + '] ' + message + '\n')
f.close()
# Output the captured results to the various tsv output files
def output_results(results):
header = 'dataset'
first = results.get(next(iter(results)))[0]
for k in first:
header = header + '\t' + k
# averages
lines = [header]
for r in results:
line = r
aves = results.get(r)[0]
for k in aves:
line = line + '\t' + str(aves.get(k))
lines.append(line)
out_to_file('./averages.tsv', lines)
# std_devs
lines = [header]
for r in results:
line = r
aves = results.get(r)[1]
for k in aves:
line = line + '\t' + str(aves.get(k))
lines.append(line)
out_to_file('./std_devs.tsv', lines)
# counts
lines = [header]
for r in results:
line = r
aves = results.get(r)[2]
for k in aves:
line = line + '\t' + str(aves.get(k))
lines.append(line)
out_to_file('./counts.tsv', lines)
def get_dataset_dirs():
return glob("./data/*/")
# builds a (X, y) DataFrame pair of a random time series with
# a binary label and specified number of samples and length
def build_random_ts(num_samples, length_of_ts):
data = {}
labels = []
for s in range (0, num_samples):
labels.append(np.random.choice([1, 2]))
data['y'] = labels
for col in range(0, length_of_ts):
key = 'feature_' + str(col + 1)
values = []
for s in range (0, num_samples):
values.append(np.random.normal())
data[key] = values
df = pd.DataFrame.from_dict(data)
X = df.iloc[:,1:]
y = df.iloc[:,:1]
return (X, y)
# Dump the current snapshot of results to a given output filename
def capture_timing_result(f, results):
lines = []
for r in results:
values = results.get(r)
line = r
for v in values:
line = line + '\t' + str(v)
lines.append(line)
out_to_file(f, lines)
# Perform the full timing test first for fixed number of
# samples and then a fixed length of time series
def perform_timing_test():
log('performing timing test')
# The collection of tests that we run
tests = [
('Boruta', perform_boruta),
('DTW_NN', perform_dtw_nn),
('FRESH', perform_fresh),
('FRESH_PCAa', perform_fresh_pca_after),
('FRESH_PCAb', perform_fresh_pca_before),
('LDA', perform_lda),
('Full_X', perform_unfiltered)
]
# keep the number of samples constant
constant_samples_results = {}
for test in tests:
constant_samples_results[test[0]] = []
for length in [100, 1000, 2000]:
log('running 1000 samples and ' + str(length) + ' length')
X, y = build_random_ts(1000, length)
skf = StratifiedKFold(n_splits=10)
skf.get_n_splits(X, y)
train_index, test_index = next(skf.split(X, y))
X_train, X_test = X.iloc[train_index], X.iloc[test_index]
y_train, y_test = y.iloc[train_index], y.iloc[test_index]
for test in tests:
mark = time.time()
try:
test[1](X_train, y_train, X_test, y_test)
except:
log(test[0] + ' ERROR')
constant_samples_results.get(test[0]).append(time.time() - mark)
capture_timing_result('./fixed_samples.tsv', constant_samples_results)
# keep the length constant
constant_length_results = {}
for test in tests:
constant_length_results[test[0]] = []
for num_samples in [100, 1000, 2000]:
log('running 1000 length and ' + str(length) + ' samples')
X, y = build_random_ts(num_samples, 1000)
skf = StratifiedKFold(n_splits=10)
skf.get_n_splits(X, y)
train_index, test_index = next(skf.split(X, y))
X_train, X_test = X.iloc[train_index], X.iloc[test_index]
y_train, y_test = y.iloc[train_index], y.iloc[test_index]
for test in tests:
mark = time.time()
try:
test[1](X_train, y_train, X_test, y_test)
except:
log(test[0] + ' ERROR')
constant_length_results.get(test[0]).append(time.time() - mark)
capture_timing_result('./fixed_length.tsv', constant_length_results)
def load_and_plot(filename, out, title, colormap, vmax):
df = pd.read_csv(filename, delimiter='\t')
datasets = df['dataset'].tolist()
algorithms = list(df.columns.values)[1:]
data = df.iloc[:,1:].values
create_heatmap(out, data, datasets, algorithms, title, colormap, vmax)
def make_colormap(seq):
"""Return a LinearSegmentedColormap
seq: a sequence of floats and RGB-tuples. The floats should be increasing
and in the interval (0,1).
"""
seq = [(None,) * 3, 0.0] + list(seq) + [1.0, (None,) * 3]
cdict = {'red': [], 'green': [], 'blue': []}
for i, item in enumerate(seq):
if isinstance(item, float):
r1, g1, b1 = seq[i - 1]
r2, g2, b2 = seq[i + 1]
cdict['red'].append([item, r1, r2])
cdict['green'].append([item, g1, g2])
cdict['blue'].append([item, b1, b2])
return mcolors.LinearSegmentedColormap('CustomMap', cdict)
def create_boxplot(data, algorithms):
fig = plt.figure(1, figsize=(9, 6))
# Create an axes instance
ax = fig.add_subplot(111)
# rectangular box plot
bplot1 = ax.boxplot(data,
vert=True, # vertical box alignment
patch_artist=True, # fill with color
labels=algorithms) # will be used to label x-ticks
ax.set_title('Used Features')
# fill with colors
colors = ['pink', 'orange', 'darkgoldenrod', 'olive', 'green', 'lightseagreen', 'seagreen', 'lightgreen', 'deepskyblue', 'orchid', 'hotpink', 'palevioletred']
for patch, color in zip(bplot1['boxes'], colors):
patch.set_facecolor(color)
# adding horizontal grid lines
ax.yaxis.grid(True)
plt.setp(ax.get_xticklabels(), rotation=90, ha="right")
ax.set_xlabel('Algorithm')
ax.set_ylabel('Used feature counts')
plt.savefig('./results/counts.png')
def create_heatmap(out, data, row_labels, col_labels, title, colormap, vmax, ax=None,
cbar_kw={}, cbarlabel="", **kwargs):
"""
Create a heatmap from a numpy array and two lists of labels.
Arguments:
data : A 2D numpy array of shape (N,M)
row_labels : A list or array of length N with the labels
for the rows
col_labels : A list or array of length M with the labels
for the columns
Optional arguments:
ax : A matplotlib.axes.Axes instance to which the heatmap
is plotted. If not provided, use current axes or
create a new one.
cbar_kw : A dictionary with arguments to
:meth:`matplotlib.Figure.colorbar`.
cbarlabel : The label for the colorbar
All other arguments are directly passed on to the imshow call.
"""
if not ax:
ax = plt.gca()
# Plot the heatmap
im = ax.imshow(data, cmap=colormap, vmin=0, vmax=vmax, **kwargs)
# Create colorbar
cbar = ax.figure.colorbar(im, ax=ax, **cbar_kw)
cbar.ax.set_ylabel(cbarlabel, rotation=-90, va="bottom")
plt.gcf().subplots_adjust(bottom=0.25)
# We want to show all ticks...
ax.set_xticks(np.arange(data.shape[1]))
ax.set_yticks( | np.arange(data.shape[0]) | numpy.arange |
import unittest
import numpy as np
from sklearn.datasets import (
load_breast_cancer,
load_iris
)
from msitrees._core import (
gini_impurity,
gini_information_gain,
entropy,
get_class_and_proba,
classif_best_split
)
class TestGiniImpurity(unittest.TestCase):
def test_input_type_list(self):
try:
gini_impurity([0, 0])
except TypeError:
self.fail('Exception on allowed input type - list')
def test_input_type_tuple(self):
try:
gini_impurity((0, 0))
except TypeError:
self.fail('Exception on allowed input type - tuple')
def test_input_type_numpy(self):
try:
gini_impurity(np.array([0, 0]))
except TypeError:
self.fail('Exception on allowed input type - np.ndarray')
def test_input_int(self):
with self.assertRaises(ValueError):
gini_impurity(0)
def test_input_other(self):
with self.assertRaises(TypeError):
gini_impurity('foo')
with self.assertRaises(TypeError):
gini_impurity({'foo': 1})
def test_input_wrong_shape(self):
with self.assertRaises(ValueError):
gini_impurity(np.array([[1, 0], [1, 0]]))
def test_input_empty_list(self):
with self.assertRaises(ValueError):
gini_impurity([])
def test_input_empty_array(self):
with self.assertRaises(ValueError):
gini_impurity(np.array([]))
def test_binary_max_impurity(self):
arr = np.array([1, 0, 1, 0])
gini = gini_impurity(arr)
self.assertAlmostEqual(gini, 0.5)
def test_binary_min_impurity(self):
arr = np.array([0, 0, 0, 0])
gini = gini_impurity(arr)
self.assertAlmostEqual(gini, 0.0)
def test_multiclass_max_impurity(self):
arr = np.array(list(range(5)))
max_impurity = 1 - (1 / arr.shape[0])
gini = gini_impurity(arr)
self.assertAlmostEqual(gini, max_impurity)
class TestEntropy(unittest.TestCase):
def test_input_type_list(self):
try:
entropy([0, 0])
except TypeError:
self.fail('Exception on allowed input type - list')
def test_input_type_tuple(self):
try:
entropy((0, 0))
except TypeError:
self.fail('Exception on allowed input type - tuple')
def test_input_type_numpy(self):
try:
entropy(np.array([0, 0]))
except TypeError:
self.fail('Exception on allowed input type - np.ndarray')
def test_input_int(self):
with self.assertRaises(ValueError):
entropy(0)
def test_input_other(self):
with self.assertRaises(TypeError):
entropy('foo')
with self.assertRaises(TypeError):
entropy({'foo': 1})
def test_input_wrong_shape(self):
with self.assertRaises(ValueError):
entropy(np.array([[1, 0], [1, 0]]))
def test_input_empty_list(self):
with self.assertRaises(ValueError):
entropy([])
def test_input_empty_array(self):
with self.assertRaises(ValueError):
entropy(np.array([]))
def test_binary_max_impurity(self):
arr = np.array([1, 0, 1, 0])
hs = entropy(arr)
self.assertAlmostEqual(hs, 1.)
def test_binary_min_impurity(self):
arr = np.array([0, 0, 0, 0])
hs = entropy(arr)
self.assertAlmostEqual(hs, 0.)
def test_multiclass_max_impurity(self):
arr = np.array([1, 2, 3, 4])
hs = entropy(arr)
self.assertAlmostEqual(hs, 2.)
class TestGiniInformationGain(unittest.TestCase):
def test_input_type_list(self):
yl = [0, 0, 0]
yr = [1, 1, 1]
yall = [0, 0, 0, 1, 1, 1]
try:
gini_information_gain(yl, yr, yall)
except TypeError:
self.fail('Exception on allowed input type - list')
def test_input_type_tuple(self):
yl = (0, 0, 0)
yr = (1, 1, 1)
yall = (0, 0, 0, 1, 1, 1)
try:
gini_information_gain(yl, yr, yall)
except TypeError:
self.fail('Exception on allowed input type - tuple')
def test_input_type_numpy(self):
yl = np.array([0, 0, 0])
yr = np.array([1, 1, 1])
yall = np.array([0, 0, 0, 1, 1, 1])
try:
gini_information_gain(yl, yr, yall)
except TypeError:
self.fail('Exception on allowed input type - np.ndarray')
def test_input_int(self):
yl = np.array([0, 0, 0])
yr = np.array([1, 1, 1])
yall = np.array([0, 0, 0, 1, 1, 1])
with self.assertRaises(ValueError):
gini_information_gain(0, yr, yall)
with self.assertRaises(ValueError):
gini_information_gain(yl, 0, yall)
with self.assertRaises(ValueError):
gini_information_gain(yl, yr, 0)
def test_input_other(self):
yl = np.array([0, 0, 0])
yr = np.array([1, 1, 1])
yall = np.array([0, 0, 0, 1, 1, 1])
with self.assertRaises(TypeError):
gini_information_gain('foo', yr, yall)
with self.assertRaises(TypeError):
gini_information_gain(yl, 'foo', yr)
with self.assertRaises(TypeError):
gini_information_gain(yl, yr, 'foo11')
def test_input_wrong_shape(self):
badshape = np.array([[1], [1], [1]])
yl = np.array([0, 0, 0])
yr = np.array([1, 1, 1])
yall = np.array([0, 0, 0, 1, 1, 1])
with self.assertRaises(ValueError):
gini_information_gain(badshape, yr, yall)
with self.assertRaises(ValueError):
gini_information_gain(yl, badshape, yall)
with self.assertRaises(ValueError):
gini_information_gain(yl, yr, badshape)
def test_input_empty_array(self):
yl = np.array([0, 0, 0])
yr = np.array([1, 1, 1])
yall = np.array([0, 0, 0, 1, 1, 1])
with self.assertRaises(ValueError):
gini_information_gain([], yr, yall)
with self.assertRaises(ValueError):
gini_information_gain(yl, [], yall)
with self.assertRaises(ValueError):
gini_information_gain(yl, yr, [])
def test_binary_perfect_split(self):
yl = np.array([0, 0])
yr = np.array([1, 1])
yall = np.array([0, 0, 1, 1])
gain = gini_information_gain(yl, yr, yall)
self.assertAlmostEqual(gain, 0.5)
def test_binary_noisy_split(self):
yl = np.array([0, 1])
yr = np.array([1, 0])
yall = np.array([0, 0, 1, 1])
gain = gini_information_gain(yl, yr, yall)
self.assertAlmostEqual(gain, 0.0)
def test_binary_uneven_split(self):
yl = np.array([0, 0])
yr = np.array([1, 1, 1])
yall = np.array([0, 0, 1, 1, 1])
gain = gini_information_gain(yl, yr, yall)
self.assertAlmostEqual(gain, 0.48)
def test_multiclass_perfect_split(self):
yl = np.array([1, 1])
yr = np.array([2, 2])
yall = np.array([2, 2, 1, 1])
gain = gini_information_gain(yl, yr, yall)
self.assertAlmostEqual(gain, 0.5)
def test_multiclass_noisy_split(self):
yl = np.array([2, 1])
yr = np.array([1, 2])
yall = np.array([2, 2, 1, 1])
gain = gini_information_gain(yl, yr, yall)
self.assertAlmostEqual(gain, 0.0)
def test_multiclass_uneven_split(self):
yl = np.array([1, 1])
yr = np.array([2, 2, 3])
yall = np.array([2, 2, 1, 1, 3])
gain = gini_information_gain(yl, yr, yall)
self.assertAlmostEqual(gain, 0.3733, places=4)
class TestGetClassProba(unittest.TestCase):
def test_input_type_list(self):
y = [1, 1, 0, 0]
try:
get_class_and_proba(y, 2)
except TypeError:
self.fail('Exception on allowed input type - list')
def test_input_type_tuple(self):
y = (1, 1, 0, 0)
try:
get_class_and_proba(y, 2)
except TypeError:
self.fail('Exception on allowed input type - tuple')
def test_input_type_numpy(self):
y = np.array([1, 1, 0, 0])
try:
get_class_and_proba(y, 2)
except TypeError:
self.fail('Exception on allowed input type - np.ndarray')
def test_input_int(self):
with self.assertRaises(ValueError):
get_class_and_proba(0, 0)
def test_input_other(self):
with self.assertRaises(TypeError):
get_class_and_proba('foo', 0)
def test_input_wrong_shape(self):
badshape = np.array([[1], [1], [1]])
with self.assertRaises(ValueError):
get_class_and_proba(badshape, 2)
def test_input_empty_array(self):
with self.assertRaises(ValueError):
get_class_and_proba([], 0)
def test_binary_class_major(self):
y = np.array([0, 0, 1, 1, 1])
label, _ = get_class_and_proba(y, 2)
self.assertEqual(label, 1)
def test_binary_class_draw(self):
y = np.array([0, 0, 1, 1])
label, _ = get_class_and_proba(y, 2)
self.assertEqual(label, 0)
def test_multiclass_class_major(self):
y = np.array([0, 1, 2, 2])
label, _ = get_class_and_proba(y, 3)
self.assertEqual(label, 2)
def test_multiclass_class_draw(self):
y = np.array([0, 0, 1, 1, 2, 2])
label, _ = get_class_and_proba(y, 3)
self.assertEqual(label, 0)
def test_binary_proba_major(self):
y = np.array([0, 0, 1, 1, 1])
label, proba = get_class_and_proba(y, 2)
self.assertAlmostEqual(proba[label], 0.6)
self.assertAlmostEqual(proba[0], 1 - 0.6)
def test_binary_proba_draw(self):
y = np.array([0, 0, 1, 1])
label, proba = get_class_and_proba(y, 2)
self.assertAlmostEqual(proba[label], 0.5)
self.assertAlmostEqual(proba[1], 1 - 0.5)
def test_multiclass_proba_major(self):
y = np.array([0, 1, 2, 2])
label, proba = get_class_and_proba(y, 3)
self.assertAlmostEqual(proba[label], 0.5)
self.assertAlmostEqual(proba[0], 0.25)
self.assertAlmostEqual(proba[1], 0.25)
def test_multiclass_proba_draw(self):
y = np.array([0, 0, 1, 1, 2, 2])
label, proba = get_class_and_proba(y, 3)
self.assertAlmostEqual(proba[label], 0.33333, places=5)
self.assertAlmostEqual(proba[1], 0.33333, places=5)
self.assertAlmostEqual(proba[2], 0.33333, places=5)
def test_padding_left(self):
# binary classification, but
# leaf only has class 1 - test
# if 0 is represented with proba 0.
y = np.array([1, 1])
_, proba = get_class_and_proba(y, 2)
self.assertEqual(len(proba), 2)
self.assertEqual(proba[0], 0.0)
self.assertEqual(proba[1], 1.0)
def test_padding_inner(self):
# multiclass classification
# with classes 0, 1, 2 but leaf
# only has class 1 and 2. check
# if class 1 is represented with proba 0.
y = | np.array([0, 0, 2, 2]) | numpy.array |
# Script to perform decoding analyses on the trained layer activations and the recurrent flow
# Requires tensorflow 1.13, python 3.7, scikit-learn, and pytorch 1.6.0
############################# IMPORTING MODULES ##################################
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
from torch.utils.data import Dataset, DataLoader
from torch.utils.data.sampler import SubsetRandomSampler
import numpy as np
import tensorflow as tf
import scipy
from scipy.ndimage.interpolation import zoom
from scipy.ndimage.interpolation import rotate
from random import shuffle
from sklearn import svm
from scipy import ndimage
from tensorflow.examples.tutorials.mnist import input_data
mnist = input_data.read_data_sets('MNIST_data', one_hot=True)
fmnist = input_data.read_data_sets('fMNIST_data', one_hot=True)
############################# FUNCTIONS DEFINED ##################################
# A function to scramble image chunks
def im_scram(im,parts_h): # scramble parts_h*parts_h equal parts of the given image
win_prop = parts_h
dimsh = np.shape(im)
im_new = np.zeros(dimsh)
dimsh_win = np.floor(dimsh[0]/win_prop)
n_cells = np.square(np.int(dimsh[0]/dimsh_win))
cell_c = np.int(dimsh[0]/dimsh_win)
ind_new = np.linspace(0,n_cells-1,n_cells).astype('int32')
while np.mean(ind_new == np.linspace(0,n_cells-1,n_cells).astype('int32')) == 1:
shuffle(ind_new)
for i in range(n_cells):
j = ind_new[i]
im_new[np.int(np.mod(i,cell_c)*dimsh_win):np.int(np.mod(i,cell_c)*dimsh_win+dimsh_win),
np.int(np.floor(i*1./cell_c*1.)*dimsh_win):np.int(np.floor(i*1./cell_c*1.)*dimsh_win+dimsh_win)] = im[
np.int(np.mod(j,cell_c)*dimsh_win):np.int(np.mod(j,cell_c)*dimsh_win+dimsh_win),
np.int(np.floor(j*1./cell_c*1.)*dimsh_win):np.int(np.floor(j*1./cell_c*1.)*dimsh_win+dimsh_win)]
return im_new
# A function to generate images and the respective labels for training and testing
def gen_images(n_imgs,n_set): # n_imgs required, set used (0 train, 1 val, 2 test) 8 objects in image (1 is intact), 2 levels of zoom, rotation and x/y pos for each object
imgs_h = np.zeros([n_imgs,1,100,100])
imgs_h1 = np.zeros([n_imgs,1,100,100])
labs_h = np.zeros([n_imgs,20])
pos_x_h = np.zeros([n_imgs,2])
pos_y_h = np.zeros([n_imgs,2])
size_h = np.zeros([n_imgs,2])
rot_h = np.zeros([n_imgs,2])
n_objs = 8
for n_im in | np.arange(n_imgs) | numpy.arange |
from casadi import *
import numpy as np
import scipy.linalg as scipylinalg
csfp = os.path.abspath(os.path.dirname(__file__))
if csfp not in sys.path:
sys.path.insert(0, csfp)
from utils.OrthogonalCollocation import construct_polynomials_basis
import Criteria
class MBDoE:
def __init__(self, Model_Def, horizon, thetas, S_thetas, collocation_degree = 4, penalize_u=False,
ukf=False, theta_unc=None, S_exp = None):
self.NoModels = len(Model_Def)
self.Model_def = []
self.ukf = ukf
for i in range(self.NoModels):
self.Model_def += [Model_Def[i]()] # Take class of the dynamic system
self.dc = collocation_degree # Define the degree of collocation
self.N = horizon # Define the Horizon of the problem
# FIXME Add different horizon for control and prediction
dt, x0, Lsolver, c_code, self.shrinking_horizon = self.Model_def[0].specifications()
self.dt = dt
self.f = []
self.hmeas = []
self.S_theta = []
# FIXME Change the DAE to be for all the models
if ukf:
for i in range(self.NoModels):
xd, _, u, uncertainty, ODEeq, _, self.u_min, self.u_max, self.x_min, self.x_max, _, \
_, _, self.nd, _, self.nu, self.n_ref, self.ntheta, _, self.ng, self.gfcn, \
self.Obj_M, self.Obj_L, self.Obj_D, self.R = self.Model_def[i].DAE_system(uncertain_parameters=True) # Define the System
self.f += [Function('f1', [xd, u, uncertainty], [vertcat(*ODEeq)])]
self.hmeas += [Function('h1', [xd, u], [xd])]
# self.S_theta += [theta_unc[i]]
else:
for i in range(self.NoModels):
xd, _, u, uncertainty, ODEeq, _, self.u_min, self.u_max, self.x_min, self.x_max, _, \
_, _, self.nd, _, self.nu, self.n_ref, self.ntheta, _, self.ng, self.gfcn, \
self.Obj_M, self.Obj_L, self.Obj_D, self.R = self.Model_def[i].DAE_system() # Define the System
self.f += [Function('f1', [xd, u,uncertainty], [vertcat(*ODEeq)])]
"""
Define noise and disturbances for the system
"""
self.Q = 1e-7 * np.eye(self.nd)
if S_exp == None:
self.S_exp = 1e-4 * np.eye(self.nd)
self.penalize_u = penalize_u
# Define options for solver
opts = {}
opts["expand"] = True
opts["ipopt.print_level"] = 5
opts["ipopt.max_iter"] = 1000
opts["ipopt.tol"] = 1e-8
opts["calc_lam_p"] = False
opts["calc_multipliers"] = False
opts["ipopt.print_timing_statistics"] = "no"
opts["print_time"] = False
self.opts = opts
if not(ukf):
self.MPC_construct()
else:
self.MPC_construct_ukf_thetas(thetas, S_thetas)
def MPC_construct(self):
"""
ODEeq: Is the system of ODE
gcn : Is the inequality constraints
ObjM : Is the mayer term of DO
ObjL : Is the Lagrange term of DO
Obj_D: The a discretized objective for each time
:return:
"""
N = self.N
dc = self.dc
dt = self.dt
NoModels = self.NoModels
C, D, B = construct_polynomials_basis(dc, 'radau')
w = []
w0 = []
lbw = []
ubw = []
J = 0
g = []
lbg = []
ubg = []
Ts = []
t = 0
# "Lift" initial conditions
x_plot = []
u_plot = []
X_models= []
X_0 = SX.sym('p_x' , self.nd) # This is a parameter that defines the Initial Conditions
shrink = SX.sym('p_shrink', self.N)
x_ref = SX.sym('p_ref' , self.n_ref)
thetas = SX.sym('p_thetas', self.ntheta * NoModels)
if self.penalize_u:
U_past = SX.sym('p_u', self.nu) #This is a parameter that defines the Initial Conditions
prev = U_past
u_apply = []
for m in range(NoModels):
Xk = SX.sym('X0', self.nd)
w += [Xk]
lbw += [*self.x_min]
ubw += [*self.x_max]
w0 += [*self.x_min]
g += [Xk - X_0]
lbg += [*np.zeros([self.nd])]
ubg += [*np.zeros([self.nd])]
x_plot += [Xk]
X_his = []
theta = SX.sym('theta', self.ntheta)
w += [theta]
lbw += [*(0*np.ones([self.ntheta]))]
#[*(0.8*np.array( [0.0923*0.62, 178.85, 447.12, 393.10, 0.001, 504.49,
# 2.544*0.62*1e-4, 23.51, 800.0, 0.281, 16.89]))]#[*(0*np.ones([self.ntheta]))]
ubw += [*(1000*np.ones([self.ntheta]))]
#[*(1.1*np.array( [0.0923*0.62, 178.85, 447.12, 393.10, 0.001, 504.49,
# 2.544*0.62*1e-4, 23.51, 800.0, 0.281, 16.89]))]#[*(500*np.ones([self.ntheta]))]
w0 += [*(100*np.ones([self.ntheta]))]
#[*(np.array( [0.0923*0.62, 178.85, 447.12, 393.10, 0.001, 504.49,
# 2.544*0.62*1e-4, 23.51, 800.0, 0.281, 16.89]))]
g += [theta - thetas[m*self.ntheta:(m+1)*(self.ntheta)]]
lbg += [*np.zeros([self.ntheta])]
ubg += [*np.zeros([self.ntheta])]
for i in range(N):
# Formulate the NLP
# New NLP variable for the control
if m ==0:
Uk = SX.sym('U_' + str(i), self.nu)
if self.penalize_u:
J += (Uk-prev).T @ self.R @ (Uk - prev) * shrink[i]
prev = Uk
w += [Uk]
lbw += [*self.u_min]
ubw += [*self.u_max]
w0 += [*(self.u_min)]
u_plot += [Uk]
u_apply += [Uk]
# Integrate till the end of the interval
w, lbw, ubw, w0, g, lbg, ubg, Xk, x_plot, _ = self.perform_orthogonal_collocation(dc, self.nd, w, lbw, ubw, w0,
self.x_min, self.x_max,
D, Xk, i, C, self.f[m], u_apply[i], dt,
g, lbg, ubg, shrink[i], x_plot, B, J, x_ref,theta)#F1(x0=Xk, p=Uk, y=yk)#, DT=DTk)
for ig in range(self.ng):
g += [self.gfcn(Xk, x_ref, u_apply[i])[ig]*shrink[i]]
lbg += [-inf]
ubg += [0.]
X_his = vertcat(X_his,Xk.T)
X_models += [X_his]
J += -Criteria.HR(X_models)
# J+= self.Obj_D(Xk, x_ref, Uk) * shrink[i]
# J += self.Obj_M(Xk, x_ref, Uk)
if self.penalize_u:
p = []
p += [X_0]
p += [x_ref]
p += [U_past]
p += [shrink]
p += [thetas]
prob = {'f': J, 'x': vertcat(*w),'p': vertcat(*p), 'g': vertcat(*g)}
else:
p = []
p += [X_0]
p += [x_ref]
p += [shrink]
p += [thetas]
prob = {'f': J, 'x': vertcat(*w),'p': vertcat(*p), 'g': vertcat(*g)}
trajectories = Function('trajectories', [vertcat(*w)]
, [horzcat(*x_plot), horzcat(*u_plot)], ['w'], ['x','u'])
solver = nlpsol('solver', 'ipopt', prob, self.opts) # 'bonmin', prob, {"discrete": discrete})#'ipopt', prob, {'ipopt.output_file': 'error_on_fail'+str(ind)+'.txt'})#
self.solver, self.trajectories, self.w0, self.lbw, self.ubw, self.lbg, self.ubg = \
solver, trajectories, w0, lbw, ubw, lbg, ubg
return solver, trajectories, w0, lbw, ubw, lbg, ubg
def MPC_construct_ukf(self):
"""
ODEeq: Is the system of ODE
gcn : Is the inequality constraints
ObjM : Is the mayer term of DO
ObjL : Is the Lagrange term of DO
Obj_D: The a discretized objective for each time
:return:
"""
N = self.N
dc = self.dc
dt = self.dt
NoModels = self.NoModels
C, D, B = construct_polynomials_basis(dc, 'radau')
w = []
w0 = []
lbw = []
ubw = []
J = 0
g = []
lbg = []
ubg = []
Ts = []
t = 0
# "Lift" initial conditions
x_plot = []
u_plot = []
X_models = []
X_0 = SX.sym('p_x', self.nd) # This is a parameter that defines the Initial Conditions
shrink = SX.sym('p_shrink', self.N)
x_ref = SX.sym('p_ref', self.n_ref)
thetas = SX.sym('p_thetas', self.ntheta * NoModels)
S_thetas = []
for m in range(NoModels):
S_thetas += [SX.sym('p_S_thetas_'+str(m), self.ntheta * self.ntheta)]
if self.penalize_u:
U_past = SX.sym('p_u', self.nu) # This is a parameter that defines the Initial Conditions
prev = U_past
u_apply = []
for m in range(NoModels):
# Create a square matrix for the S_theta
S_theta = SX.sym('S_theta', self.ntheta**2)
w += [S_theta]
lbw += [*(0 * np.ones([self.ntheta**2]))]
# [*(0.8*np.array( [0.0923*0.62, 178.85, 447.12, 393.10, 0.001, 504.49,
# 2.544*0.62*1e-4, 23.51, 800.0, 0.281, 16.89]))]#[*(0*np.ones([self.ntheta]))]
ubw += [*(1 * np.ones([self.ntheta**2]))]
# [*(1.1*np.array( [0.0923*0.62, 178.85, 447.12, 393.10, 0.001, 504.49,
# 2.544*0.62*1e-4, 23.51, 800.0, 0.281, 16.89]))]#[*(500*np.ones([self.ntheta]))]
w0 += [*(0 * np.ones([self.ntheta**2]))]
# [*(np.array( [0.0923*0.62, 178.85, 447.12, 393.10, 0.001, 504.49,
# 2.544*0.62*1e-4, 23.51, 800.0, 0.281, 16.89]))]
g += [S_theta - S_thetas[m]]
lbg += [*np.zeros([self.ntheta**2])]
ubg += [*np.zeros([self.ntheta**2])]
S_theta_reshaped = S_theta.reshape((self.ntheta, self.ntheta))
Xk = SX.sym('X0', self.nd)
w += [Xk]
lbw += [*self.x_min]
ubw += [*self.x_max]
w0 += [*self.x_min]
g += [Xk - X_0]
lbg += [*np.zeros([self.nd])]
ubg += [*np.zeros([self.nd])]
x_plot += [Xk]
X_his = []
theta = SX.sym('theta', self.ntheta)
w += [theta]
lbw += [*(0 * np.ones([self.ntheta]))]
# [*(0.8*np.array( [0.0923*0.62, 178.85, 447.12, 393.10, 0.001, 504.49,
# 2.544*0.62*1e-4, 23.51, 800.0, 0.281, 16.89]))]#[*(0*np.ones([self.ntheta]))]
ubw += [*(1000 * np.ones([self.ntheta]))]
# [*(1.1*np.array( [0.0923*0.62, 178.85, 447.12, 393.10, 0.001, 504.49,
# 2.544*0.62*1e-4, 23.51, 800.0, 0.281, 16.89]))]#[*(500*np.ones([self.ntheta]))]
w0 += [*(100 * np.ones([self.ntheta]))]
# [*(np.array( [0.0923*0.62, 178.85, 447.12, 393.10, 0.001, 504.49,
# 2.544*0.62*1e-4, 23.51, 800.0, 0.281, 16.89]))]
g += [theta - thetas[m * self.ntheta:(m + 1) * (self.ntheta)]]
lbg += [*np.zeros([self.ntheta])]
ubg += [*np.zeros([self.ntheta])]
S = SX(0.001*np.eye(self.nd))
for i in range(N):
# Formulate the NLP
# New NLP variable for the control
if m == 0:
Uk = SX.sym('U_' + str(i), self.nu)
if self.penalize_u:
J += (Uk - prev).T @ self.R @ (Uk - prev) * shrink[i]
prev = Uk
w += [Uk]
lbw += [*self.u_min]
ubw += [*self.u_max]
w0 += [*(self.u_min)]
u_plot += [Uk]
u_apply += [Uk]
# Integrate till the end of the interval
auxiliary_vars = dc, self.nd, w, lbw, ubw, w0, \
self.x_min, self.x_max, \
D, i, C, dt, g, lbg, ubg, \
shrink[i], x_plot, B, x_ref
if N<1:
Xk, S, w, lbw, ubw, w0, g, lbg, ubg, _, x_plot = self.ukf1(self.f[m], Xk, S, theta, S_theta_reshaped,
self.hmeas[m], self.hmeas[m](Xk, u_apply[i]), self.Q, self.S_exp, u_apply[i], auxiliary_vars)
else:
Xk, _, w, lbw, ubw, w0, g, lbg, ubg, _, x_plot = self.ukf1(self.f[m], Xk, S, theta, S_theta_reshaped,
self.hmeas[m], self.hmeas[m](Xk, u_apply[i]), self.Q, self.S_exp, u_apply[i], auxiliary_vars)
for ig in range(self.ng):
g += [self.gfcn(Xk, x_ref, u_apply[i])[ig]*shrink[i]]
lbg += [-inf]
ubg += [0.]
X_his = vertcat(X_his,Xk.T)
X_models += [X_his]
J += -Criteria.HR(X_models)
# J+= self.Obj_D(Xk, x_ref, Uk) * shrink[i]
# J += self.Obj_M(Xk, x_ref, Uk)
if self.penalize_u:
p = []
p += [X_0]
p += [x_ref]
p += [U_past]
p += [shrink]
p += [thetas]
for i in range(self.NoModels):
p += [S_thetas[i]]
prob = {'f': J, 'x': vertcat(*w),'p': vertcat(*p), 'g': vertcat(*g)}
else:
p = []
p += [X_0]
p += [x_ref]
p += [shrink]
p += [thetas]
for i in range(self.NoModels):
p += [S_thetas[i]]
prob = {'f': J, 'x': vertcat(*w),'p': vertcat(*p), 'g': vertcat(*g)}
trajectories = Function('trajectories', [vertcat(*w)]
, [horzcat(*x_plot), horzcat(*u_plot)], ['w'], ['x','u'])
solver = nlpsol('solver', 'ipopt', prob, self.opts) # 'bonmin', prob, {"discrete": discrete})#'ipopt', prob, {'ipopt.output_file': 'error_on_fail'+str(ind)+'.txt'})#
self.solver, self.trajectories, self.w0, self.lbw, self.ubw, self.lbg, self.ubg = \
solver, trajectories, w0, lbw, ubw, lbg, ubg
return solver, trajectories, w0, lbw, ubw, lbg, ubg
def MPC_construct_ukf_no_thetas(self, thetas, S_thetas):
"""
ODEeq: Is the system of ODE
gcn : Is the inequality constraints
ObjM : Is the mayer term of DO
ObjL : Is the Lagrange term of DO
Obj_D: The a discretized objective for each time
:return:
"""
N = self.N
dc = self.dc
dt = self.dt
NoModels = self.NoModels
C, D, B = construct_polynomials_basis(dc, 'radau')
w = []
w0 = []
lbw = []
ubw = []
J = 0
g = []
lbg = []
ubg = []
Ts = []
t = 0
# "Lift" initial conditions
x_plot = []
u_plot = []
X_models = []
S_models = []
X_0 = SX.sym('p_x', self.nd) # This is a parameter that defines the Initial Conditions
shrink = SX.sym('p_shrink', self.N)
x_ref = SX.sym('p_ref', self.n_ref)
# thetas = SX.sym('p_thetas', self.ntheta * NoModels)
# S_thetas = []
# for m in range(NoModels):
# S_thetas += [SX.sym('p_S_thetas_'+str(m), self.ntheta * self.ntheta)]
if self.penalize_u:
U_past = SX.sym('p_u', self.nu) # This is a parameter that defines the Initial Conditions
prev = U_past
u_apply = []
for m in range(NoModels):
# Create a square matrix for the S_theta
S_theta = S_thetas[m]
S_theta_reshaped = SX(S_theta.reshape((self.ntheta, self.ntheta)))
Xk = SX.sym('X0', self.nd)
w += [Xk]
lbw += [*self.x_min]
ubw += [*self.x_max]
w0 += [*(self.x_min)]
g += [Xk - X_0]
lbg += [*np.zeros([self.nd])]
ubg += [*np.zeros([self.nd])]
x_plot += [Xk]
X_his = []
S_his = []
# theta = SX.sym('theta', self.ntheta)
# w += [theta]
# lbw += [*(0 * np.ones([self.ntheta]))]
# # [*(0.8*np.array( [0.0923*0.62, 178.85, 447.12, 393.10, 0.001, 504.49,
# # 2.544*0.62*1e-4, 23.51, 800.0, 0.281, 16.89]))]#[*(0*np.ones([self.ntheta]))]
# ubw += [*(1000 * np.ones([self.ntheta]))]
# # [*(1.1*np.array( [0.0923*0.62, 178.85, 447.12, 393.10, 0.001, 504.49,
# # 2.544*0.62*1e-4, 23.51, 800.0, 0.281, 16.89]))]#[*(500*np.ones([self.ntheta]))]
# w0 += [*(100 * np.ones([self.ntheta]))]
# # [*(np.array( [0.0923*0.62, 178.85, 447.12, 393.10, 0.001, 504.49,
# # 2.544*0.62*1e-4, 23.51, 800.0, 0.281, 16.89]))]
theta = SX(thetas[m])# * self.ntheta:(m + 1) * (self.ntheta)])
# lbg += [*np.zeros([self.ntheta])]
# ubg += [*np.zeros([self.ntheta])]
S = SX(0.0000*np.eye(self.nd))
for i in range(N):
# Formulate the NLP
# New NLP variable for the control
if m == 0:
Uk = SX.sym('U_' + str(i), self.nu)
if self.penalize_u:
J += (Uk - prev).T @ self.R @ (Uk - prev) * shrink[i]
prev = Uk
w += [Uk]
lbw += [*self.u_min]
ubw += [*self.u_max]
w0 += [*((self.u_min+self.u_max)/2)]
u_plot += [Uk]
u_apply += [Uk]
# Integrate till the end of the interval
auxiliary_vars = dc, self.nd, w, lbw, ubw, w0, \
self.x_min, self.x_max, \
D, i, C, dt, g, lbg, ubg, \
shrink[i], [], B, x_ref
if i<4:
Xk, S, w, lbw, ubw, w0, g, lbg, ubg, _, _ = self.ukf1_regular(self.f[m], Xk, S, theta, S_theta_reshaped,
self.hmeas[m], self.hmeas[m](Xk, u_apply[i]), self.Q, self.S_exp, u_apply[i], auxiliary_vars)
else:
Xk, _, w, lbw, ubw, w0, g, lbg, ubg, _, _ = self.ukf1_regular(self.f[m], Xk, S, theta, S_theta_reshaped,
self.hmeas[m], self.hmeas[m](Xk, u_apply[i]), self.Q, self.S_exp, u_apply[i], auxiliary_vars)
x_plot += [Xk]
for ig in range(self.ng):
g += [self.gfcn(Xk, x_ref, u_apply[i])[ig]*shrink[i]]
lbg += [-inf]
ubg += [0.]
X_his = vertcat(X_his,Xk.T)
S_his += [S]
X_models += [X_his]
S_models += [S_his]
J += -Criteria.BF(X_models, S_models, 0.000001*np.eye(self.nd))
# J+= self.Obj_D(Xk, x_ref, Uk) * shrink[i]
# J += self.Obj_M(Xk, x_ref, Uk)
if self.penalize_u:
p = []
p += [X_0]
p += [x_ref]
p += [U_past]
p += [shrink]
# p += [thetas]
# for i in range(self.NoModels):
# p += [S_thetas[i]]
prob = {'f': J, 'x': vertcat(*w),'p': vertcat(*p), 'g': vertcat(*g)}
else:
p = []
p += [X_0]
p += [x_ref]
p += [shrink]
# p += [thetas]
# for i in range(self.NoModels):
# p += [S_thetas[i]]
prob = {'f': J, 'x': vertcat(*w),'p': vertcat(*p), 'g': vertcat(*g)}
trajectories = Function('trajectories', [vertcat(*w)]
, [horzcat(*x_plot), horzcat(*u_plot)], ['w'], ['x','u'])
solver = nlpsol('solver', 'ipopt', prob, self.opts) # 'bonmin', prob, {"discrete": discrete})#'ipopt', prob, {'ipopt.output_file': 'error_on_fail'+str(ind)+'.txt'})#
self.solver, self.trajectories, self.w0, self.lbw, self.ubw, self.lbg, self.ubg = \
solver, trajectories, w0, lbw, ubw, lbg, ubg
return solver, trajectories, w0, lbw, ubw, lbg, ubg
def MPC_construct_ukf_thetas(self, thetas, S_thetas):
"""
ODEeq: Is the system of ODE
gcn : Is the inequality constraints
ObjM : Is the mayer term of DO
ObjL : Is the Lagrange term of DO
Obj_D: The a discretized objective for each time
:return:
"""
N = self.N
dc = self.dc
dt = self.dt
NoModels = self.NoModels
C, D, B = construct_polynomials_basis(dc, 'radau')
w = []
w0 = []
lbw = []
ubw = []
J = 0
g = []
lbg = []
ubg = []
Ts = []
t = 0
# "Lift" initial conditions
x_plot = []
u_plot = []
X_models = []
S_models = []
X_0 = SX.sym('p_x', self.nd) # This is a parameter that defines the Initial Conditions
shrink = np.ones(self.N)#SX.sym('p_shrink', self.N)
x_ref = SX.sym('p_ref', self.n_ref)
# thetas = SX.sym('p_thetas', self.ntheta * NoModels)
# S_thetas = []
# for m in range(NoModels):
# S_thetas += [SX.sym('p_S_thetas_'+str(m), self.ntheta * self.ntheta)]
if self.penalize_u:
U_past = SX.sym('p_u', self.nu) # This is a parameter that defines the Initial Conditions
prev = U_past
u_apply = []
S_plot = []
# K = SX.sym('K_a_', self.nu*self.nd)
# w += [K]
# lbw += [*(-1000*np.ones(self.nu*self.nd))]
# ubw += [*(1000*np.ones(self.nu*self.nd))]
# w0 += [*(np.zeros(self.nu*self.nd))]
# K_sq = K.reshape((self.nu,self.nd))
for m in range(NoModels):
# Create a square matrix for the S_theta
S_theta = S_thetas[m]
S_theta_reshaped = SX(S_theta.reshape((self.ntheta, self.ntheta)))
Xk = SX.sym('X0', self.nd)
w += [Xk]
lbw += [*self.x_min]
ubw += [*self.x_max]
w0 += [*(self.x_min)]
g += [Xk - X_0]
lbg += [*np.zeros([self.nd])]
ubg += [*np.zeros([self.nd])]
x_plot += [Xk]
X_his = []
S_his = []
# theta = SX.sym('theta', self.ntheta)
# w += [theta]
# lbw += [*(0 * np.ones([self.ntheta]))]
# # [*(0.8*np.array( [0.0923*0.62, 178.85, 447.12, 393.10, 0.001, 504.49,
# # 2.544*0.62*1e-4, 23.51, 800.0, 0.281, 16.89]))]#[*(0*np.ones([self.ntheta]))]
# ubw += [*(1000 * np.ones([self.ntheta]))]
# # [*(1.1*np.array( [0.0923*0.62, 178.85, 447.12, 393.10, 0.001, 504.49,
# # 2.544*0.62*1e-4, 23.51, 800.0, 0.281, 16.89]))]#[*(500*np.ones([self.ntheta]))]
# w0 += [*(100 * np.ones([self.ntheta]))]
# # [*(np.array( [0.0923*0.62, 178.85, 447.12, 393.10, 0.001, 504.49,
# # 2.544*0.62*1e-4, 23.51, 800.0, 0.281, 16.89]))]
theta = SX(thetas[m])# * self.ntheta:(m + 1) * (self.ntheta)])
# lbg += [*np.zeros([self.ntheta])]
# ubg += [*np.zeros([self.ntheta])]
S = SX(0.0000*np.eye(self.nd))
for i in range(N):
# Formulate the NLP
# New NLP variable for the control
if m == 0:
Uk = SX.sym('U_' + str(i), self.nu)
if self.penalize_u:
J += (Uk - prev).T @ self.R @ (Uk - prev) * shrink[i]
prev = Uk
w += [Uk]
lbw += [*self.u_min]
ubw += [*self.u_max]
w0 += [*((self.u_min+self.u_max)/2)]
u_plot += [Uk]
u_apply += [Uk]# + K_sq @ (Xk)]
# Integrate till the end of the interval
auxiliary_vars = dc, self.nd, w, lbw, ubw, w0, \
self.x_min, self.x_max, \
D, i, C, dt, g, lbg, ubg, \
shrink[i], [], B, x_ref
if i<N:
Xk, S, w, lbw, ubw, w0, g, lbg, ubg, _, _ = self.ukf1_regular(self.f[m], Xk, S, theta, S_theta_reshaped,
self.hmeas[m], self.hmeas[m](Xk, u_apply[i]), self.Q, self.S_exp, u_apply[i], auxiliary_vars)
else:
Xk, _, w, lbw, ubw, w0, g, lbg, ubg, _, _ = self.ukf1_regular(self.f[m], Xk, S, theta, S_theta_reshaped,
self.hmeas[m], self.hmeas[m](Xk, u_apply[i]), self.Q, self.S_exp, u_apply[i], auxiliary_vars)
x_plot += [Xk]
for ig in range(self.ng):
g += [self.gfcn(Xk, x_ref, u_apply[i])[ig]]# + 4.35*sqrt(S[1,1])]
lbg += [-inf]
ubg += [0.]
X_his = vertcat(X_his,(Xk).T)#/[14,800,1]
S_his += [S]
if m ==0:
S_plot+= [S.reshape((self.nd**2,1))]
X_models += [X_his]
S_models += [S_his]
J += -log(Criteria.BF(X_models, S_models, self.S_exp)+1e-7)#(Criteria.AW(X_models, S_models, self.S_exp)+1e-7)
# J+= self.Obj_D(Xk, x_ref, Uk) * shrink[i]
# J += self.Obj_M(Xk, x_ref, Uk)
if self.penalize_u:
p = []
p += [X_0]
p += [x_ref]
p += [U_past]
# p += [thetas]
# for i in range(self.NoModels):
# p += [S_thetas[i]]
prob = {'f': J, 'x': vertcat(*w),'p': vertcat(*p), 'g': vertcat(*g)}
else:
p = []
p += [X_0]
p += [x_ref]
# p += [thetas]
# for i in range(self.NoModels):
# p += [S_thetas[i]]
prob = {'f': J, 'x': vertcat(*w),'p': vertcat(*p), 'g': vertcat(*g)}
trajectories = Function('trajectories', [vertcat(*w)]
, [horzcat(*x_plot), horzcat(*u_plot), horzcat(*S_plot)], ['w'], ['x','u','S'])
solver = nlpsol('solver', 'ipopt', prob, self.opts) # 'bonmin', prob, {"discrete": discrete})#'ipopt', prob, {'ipopt.output_file': 'error_on_fail'+str(ind)+'.txt'})#
self.solver, self.trajectories, self.w0, self.lbw, self.ubw, self.lbg, self.ubg = \
solver, trajectories, w0, lbw, ubw, lbg, ubg
return solver, trajectories, w0, lbw, ubw, lbg, ubg
def solve_MPC(self, x, thetas, ref=None, u=None, t=0., S_theta=None):
if self.n_ref>0:
p0 = np.concatenate((x, np.array([ref]).reshape((-1,))))
else:
p0 = x
if self.shrinking_horizon:
if t==0.:
shrink = np.ones([self.N])
self.steps = self.N
else:
shrink = np.concatenate((np.ones([self.steps]), np.zeros([self.N-self.steps])))
else:
shrink = np.ones([self.N])
if self.penalize_u:
p0 = np.concatenate((p0,u))
theta = np.array(thetas)
theta_reshaped = np.reshape(theta, self.ntheta*self.NoModels)
p0 = np.concatenate((p0, shrink, theta_reshaped))
#
# # Add the parametric unc in the problem
if self.ukf:
for i in range(self.NoModels):
S_theta_single = S_theta[i].reshape((self.ntheta**2))
p0 = np.concatenate((p0, S_theta_single))
sol = self.solver(x0=self.w0, lbx=self.lbw, ubx=self.ubw, lbg=self.lbg, ubg=self.ubg,
p=p0)
w_opt = sol['x'].full().flatten()
x_opt, u_opt = self. trajectories(sol['x'])
if self.solver.stats()['return_status'] != 'Solve_Succeeded':
print('Opt failed')
if self.shrinking_horizon:
self.steps += - 1
self.obj = sol['f'].full().flatten()
return u_opt, x_opt, w_opt
def solve_MPC_unc(self, x, ref=None, u=None, t=0.):
if self.n_ref>0:
p0 = np.concatenate((x, np.array([ref]).reshape((-1,))))
else:
p0 = x
# if self.shrinking_horizon:
# if t==0.:
# shrink = np.ones([self.N])
# self.steps = self.N
# else:
# shrink = np.concatenate((np.ones([self.steps]), np.zeros([self.N-self.steps])))
# else:
# shrink = np.ones([self.N])
if self.penalize_u:
p0 = np.concatenate((p0,u))
# theta = np.array(thetas)
# theta_reshaped = np.reshape(theta, self.ntheta*self.NoModels)
#p0 = np.concatenate((p0))#, theta_reshaped))
#
# # Add the parametric unc in the problem
# if self.ukf:
# for i in range(self.NoModels):
# S_theta_single = S_theta[i].reshape((self.ntheta**2))
# p0 = np.concatenate((p0, S_theta_single))
sol = self.solver(x0=self.w0, lbx=self.lbw, ubx=self.ubw, lbg=self.lbg, ubg=self.ubg,
p=p0)
w_opt = sol['x'].full().flatten()
x_opt, u_opt, S_opt = self. trajectories(sol['x'])
if self.solver.stats()['return_status'] != 'Solve_Succeeded':
print('Opt failed')
# if self.shrinking_horizon:
# self.steps += - 1
self.obj = sol['f'].full().flatten()
return u_opt, x_opt, w_opt, S_opt
def perform_orthogonal_collocation(self,d, nx, w, lbw, ubw, w0, lbx, ubx, D, Xk, s, C, f, Uk,
h, g, lbg, ubg, shrink, x_plot, B, J, x_ref,unc_theta):
"""
:return:
"""
Xc = []
for j in range(d):
Xkj = SX.sym('X_' + str(s) + '_' + str(j), nx)
Xc += [Xkj]
w += [Xkj]
lbw.extend(lbx)
ubw.extend(ubx)
w0.extend((self.x_min*1.2))
x_plot+= [Xkj]
# Loop over collocation points
Xk_end = D[0] * Xk
for j in range(1, d + 1):
# Expression for the state derivative at the collocation point
xp = C[0, j] * Xk
for r in range(d):
xp = xp + C[r + 1, j] * Xc[r]
# Append collocation equations
fj = f(Xc[j - 1], Uk, unc_theta) * shrink #
g += [(h * fj - xp)]
lbg.extend([-1e-8] * nx)
ubg.extend([1e-8] * nx)
if not(self.ukf):
for ig in range(self.ng):
g += [self.gfcn(Xc[j-1], x_ref, Uk)[ig]*shrink]
lbg += [-inf]
ubg += [0.]
# Add contribution to the end state
Xk_end = Xk_end + D[j] * Xc[j - 1]
# if int(j1) < np.shape(t_meas)[0]:
# if np.real(k * T / N) == t_meas[j1]:
# count[k] = 1
# j1 += 1
# Add contribution to quadrature function
qj = 0.#self.Obj_L(Xc[j - 1], x_ref,Uk) * shrink #
J += B[j]*qj*h
# New NLP variable for state at end of interval
Xk = SX.sym('X_' + str(s + 1), nx)
w += [Xk]
lbw.extend(lbx)
ubw.extend(ubx)
w0.extend((self.x_min*1.2))
# Add equality constraint
g += [Xk_end - Xk]
lbg.extend([0.] * nx)
ubg.extend([0.] * nx)
return w, lbw, ubw, w0, g, lbg, ubg, Xk, x_plot, J
def ukf1(self, fstate, x, S, theta, S_theta, hmeas, z, Q, R, u, auxiliary_vars):
dc, nd, w, lbw, ubw, w0,\
x_min, x_max,\
D, i, C, dt,g, lbg, ubg, \
shrink, x_plot, B, x_ref = auxiliary_vars
x_aug = vertcat(x, theta)
S_aug = diagcat(S, S_theta)
L = max(np.shape(x_aug)) # 2*len(x)+1
m = z.shape[0]
alpha = 1e-3
ki = 0
beta = 2
lambda1 = alpha ** 2 * (L + ki) - L
c = L + lambda1
Wm = np.zeros(1 + 2 * L)
Wm[0] = lambda1 / c
Wm[1:] = 0.5 / c + np.zeros([1, 2 * L])
Wc = Wm.copy()
Wc[0] = Wc[0] + (1 - alpha ** 2 + beta)
c = np.sqrt(c)
# S[-4:,-4:]= 0.999**0.5 * S[-4:,-4:]
X = self.sigmas(x_aug, S_aug, c)
x1, X1, S1, X2, w, lbw, ubw, w0, g, lbg, ubg, Xk, x_plot = self.ut_with_orthogonal_collocation(
fstate, X[:self.nd,:], X[self.nd:,:], Wm, Wc, nd, Q, u, auxiliary_vars)
z1, Z1, S2, Z2 = self.ut(hmeas, X1, Wm, Wc, m, R, u)
P12 = X2 @ np.diagflat(Wc) @ Z2.T
# P12 = mtimes(mtimes(X2,np.diagflat(Wc)),Z2.T)
K = mtimes(mtimes(P12, pinv(S2)), pinv(S2).T) # .full()##P12 @np.linalg.pinv(S2)**2
# K = np.dot(np.dot(P12, np.linalg.pinv(S2.T)),np.linalg.pinv(S2)) #np.linalg.lstsq(S2.T,np.linalg.lstsq(S2, P12.T)[0].T)[0]
# K1 = np.linalg.lstsq(S2.T, np.linalg.lstsq(S2, P12.T)[0].T)[0]
x = x1 + K @ (z - z1)
U = K @ S2.T
for i in range(np.shape(z)[0]):
S1 = self.cholupdate(S1, U[:, i], '-')
S = S1
return x, S, w, lbw, ubw, w0, g, lbg, ubg, Xk, x_plot
def ukf1_regular(self, fstate, x, S, theta, S_theta, hmeas, z, Q, R, u, auxiliary_vars):
dc, nd, w, lbw, ubw, w0,\
x_min, x_max,\
D, i, C, dt,g, lbg, ubg, \
shrink, x_plot, B, x_ref = auxiliary_vars
x_aug = vertcat(x, theta)
S_aug = diagcat(S, S_theta)
L = max(np.shape(x_aug)) # 2*len(x)+1
m = z.shape[0]
alpha = 1e-3
ki = 0
beta = 2
lambda1 = 3 - L # L*(alpha**2-1)#alpha**2*(L+ki)-L
c = L + lambda1
Wm = np.zeros(1 + 2 * L)
Wm[0] = lambda1 / c
Wm[1:] = 0.5 / c + np.zeros([1, 2 * L])
Wc = Wm.copy()
Wc[0] = Wc[0]# + (1 - alpha ** 2 + beta)
#c = np.sqrt(c)
# S[-4:,-4:]= 0.999**0.5 * S[-4:,-4:]
X = self.sigmas_regular(x_aug, S_aug, c)
x1, X1, S1, X2, w, lbw, ubw, w0, g, lbg, ubg, Xk, x_plot = self.ut_with_orthogonal_collocation_regular(
fstate, X[:self.nd,:], X[self.nd:,:], Wm, Wc, nd, Q, u, auxiliary_vars)
return x1, S1, w, lbw, ubw, w0, g, lbg, ubg, Xk, x_plot
def ut_regular(self,f, X, Wm, Wc, n, R, u):
L = X.shape[1]
y = SX(np.zeros([n, ]))
Y = SX(np.zeros([n, L]))
for k in range(L):
Y[:, k] = (f((X[:, k]), (u)))
y += Wm[k] * Y[:, k]
Sum_mean_matrix_m = []
for i in range(L):
Sum_mean_matrix_m = horzcat(Sum_mean_matrix_m, y)
Y1 = (Y - Sum_mean_matrix_m)
res = Y1 @ np.sqrt(np.diagflat(abs(Wc)))
a = horzcat((Y1 @ sqrt(np.diagflat(abs(Wc))))[:, 1:L], SX(R)).T
_, S = qr(a)
if Wc[0] < 0:
S1 = self.cholupdate(S, res[:, 0], '-')
else:
S1 = self.cholupdate(S, res[:, 0], '+')
S = S1
# [email protected](Wc) @ Y1.T+R
return y, Y, S, Y1
def ut_with_orthogonal_collocation_regular(self, f, X, theta, Wm, Wc, n, R, u, auxiliary_vars):
dc, nd, w, lbw, ubw, w0,\
x_min, x_max,\
D, i, C, dt,g, lbg, ubg, \
shrink, x_plot, B, x_ref = auxiliary_vars
L = X.shape[1]
y = SX(np.zeros([n, ]))
Y = SX(np.zeros([n, L]))
for k in range(L):
w, lbw, ubw, w0, g, lbg, ubg, Xk, _, _ = self.perform_orthogonal_collocation(
dc, self.nd, w, lbw, ubw, w0,
self.x_min, self.x_max,
D, X[:,k], i, C, f, u, dt,
g, lbg, ubg, shrink, x_plot, B, 0, x_ref, theta[:,k]) # F1(x0=Xk, p=Uk, y=yk)#, DT=DTk)
Y[:, k] = Xk
y += Wm[k] * Y[:, k]
Sum_mean_matrix_m = []
for i in range(L):
Sum_mean_matrix_m = horzcat(Sum_mean_matrix_m, y)
Y1 = (Y - Sum_mean_matrix_m)
# res = Y1 @ np.sqrt(np.diagflat(abs(Wc)))
S = Wc[0]*(Y[:,[0]]-y)@(Y[:,[0]]-y).T#Y1[:,[0]] @ Y1[:,[0]].T
for i in range(1,L):
S += Wc[i]*(Y[:,[i]]-y)@(Y[:,[i]]-y).T#Wc[i]*Y1[:,[i]] @ Y1[:,[i]].T
S +=1e-7*SX(np.eye(self.nd))
# Sk = SX.sym('X0', self.nd**2)
# w += [Sk]
# lbS = -20*np.ones([self.nd, self.nd])+20*np.eye(self.nd)
# lbw += [*lbS.reshape((self.nd**2,1))]
# ubw += [*(np.zeros([self.nd**2])+20)]
# w0 += [*((1e-7)*np.eye(self.nd).reshape((self.nd**2,1)))]
# g += [Sk - S.reshape((self.nd**2,1))]
# lbg += [*np.zeros([self.nd**2])]
# ubg += [*np.zeros([self.nd**2])]
#Sk.reshape((self.nd, self.nd))
return y, Y, S, Y1, w, lbw, ubw, w0, g, lbg, ubg, Xk, x_plot
def ut(self,f, X, Wm, Wc, n, R, u):
L = X.shape[1]
y = SX(np.zeros([n, ]))
Y = SX(np.zeros([n, L]))
for k in range(L):
Y[:, k] = (f((X[:, k]), (u)))
y += Wm[k] * Y[:, k]
Sum_mean_matrix_m = []
for i in range(L):
Sum_mean_matrix_m = horzcat(Sum_mean_matrix_m, y)
Y1 = (Y - Sum_mean_matrix_m)
res = Y1 @ np.sqrt(np.diagflat(abs(Wc)))
a = horzcat((Y1 @ sqrt(np.diagflat(abs(Wc))))[:, 1:L], SX(R)).T
_, S = qr(a)
if Wc[0] < 0:
S1 = self.cholupdate(S, res[:, 0], '-')
else:
S1 = self.cholupdate(S, res[:, 0], '+')
S = S1
# [email protected](Wc) @ Y1.T+R
return y, Y, S, Y1
def ut_with_orthogonal_collocation(self, f, X, theta, Wm, Wc, n, R, u, auxiliary_vars):
dc, nd, w, lbw, ubw, w0,\
x_min, x_max,\
D, i, C, dt,g, lbg, ubg, \
shrink, x_plot, B, x_ref = auxiliary_vars
L = X.shape[1]
y = SX(np.zeros([n, ]))
Y = SX(np.zeros([n, L]))
for k in range(L):
w, lbw, ubw, w0, g, lbg, ubg, Xk, x_plot, _ = self.perform_orthogonal_collocation(
dc, self.nd, w, lbw, ubw, w0,
self.x_min, self.x_max,
D, X[:,k], i, C, f, u, dt,
g, lbg, ubg, shrink, x_plot, B, 0, x_ref, theta[:,k]) # F1(x0=Xk, p=Uk, y=yk)#, DT=DTk)
Y[:, k] = Xk
y += Wm[k] * Y[:, k]
Sum_mean_matrix_m = []
for i in range(L):
Sum_mean_matrix_m = horzcat(Sum_mean_matrix_m, y)
Y1 = (Y - Sum_mean_matrix_m)
res = Y1 @ np.sqrt(np.diagflat(abs(Wc)))
a = horzcat((Y1 @ sqrt(np.diagflat(abs(Wc))))[:, 1:L], SX(R)).T
_, S = qr(a)
if Wc[0] < 0:
S1 = self.cholupdate(S, res[:, 0], '-')
else:
S1 = self.cholupdate(S, res[:, 0], '+')
S = S1
# [email protected](Wc) @ Y1.T+R
return y, Y, S, Y1, w, lbw, ubw, w0, g, lbg, ubg, Xk, x_plot
def cholupdate(self,R, x, sign):
p = max(np.shape(x))
x = x.T
for k in range(p):
if sign == '+':
r = sqrt(R[k, k] ** 2 + x[k] ** 2)
elif sign == '-':
r = sqrt(R[k, k] ** 2 - x[k] ** 2)
c = r / R[k, k]
s = x[k] / R[k, k]
R[k, k] = r
if k < p - 1:
if sign == '+':
R[k, k + 1:p] = (R[k, k + 1:p] + s * x[k + 1:p]) / c
elif sign == '-':
R[k, k + 1:p] = (R[k, k + 1:p] - s * x[k + 1:p]) / c
x[k + 1:p] = c * x[k + 1:p] - s * R[k, k + 1:p]
return R
def sigmas(self,x, S, c):
A = chol(c * S.T).T
# Y = x[:,np.ones([1,len(x)])]
n = x.shape[0]
X = horzcat(x.reshape((n, 1)), x.reshape((n, 1)) + A, x.reshape((n, 1)) - A)
return X
def sigmas_regular(self,x, S, c):
A = chol(c * S.T).T
# Y = x[:,np.ones([1,len(x)])]
n = x.shape[0]
X = horzcat(x.reshape((n, 1)), x.reshape((n, 1)) + A, x.reshape((n, 1)) - A)
return X
class cosntract_history:
def __init__(self, model, N, store_u = True, set_point0 = 0.):
#Define self vars
dt, x0, Lsolver, c_code, specifications = model.specifications()
xd, xa, u, uncertainty, ODEeq, Aeq, u_min, u_max, x_min, x_max, states, \
algebraics, inputs, nd, na, nu, n_ref, nmp, modparval, ng, gfcn, Obj_M, \
Obj_L, Obj_D, R = model.DAE_system()
self.model = model # The model defined in terms of casadi
self.N = N # Number of past data
self.store_u = store_u
self.nx = nd
self.nu = nu
self.nsp = len(set_point0)
self.u_min = u_min
self.u_max = u_max
state_0, e_sp0 = x0, x0-set_point0#model.reset(set_point0)
# initialize history
history_x = np.array([*state_0]*N).reshape((-1,1))
history_sp = np.array([*e_sp0]*N).reshape((-1,1))
if store_u: # If u are stored as history (simple RNN structure)
history_u = np.array([0]*N*self.nu).reshape((-1,1))
self.history = np.vstack((history_x,history_sp,history_u))
self.size_states = N * (self.nu + self.nx + self.nsp)
else:
self.history = np.vstack((history_x,history_sp))
self.size_states = N * (self.nx+self.nsp)
self.history = self.history.reshape((-1,))
# start counting the past values
self.past = 1
def append_history(self, new_state, u, e_sp):
if self.store_u:
n = self.nx+self.nu + self.nsp
self.history[n:] = self.history[:n*(self.N-1)]
aug_states = np.concatenate((new_state, e_sp, u))
self.history[:n] = aug_states
else:
n = self.nx+ self.nsp
self.history[n:] = self.history[:n*(self.N-1)]
aug_states = np.concatenate((new_state, e_sp))
self.history[:n] = aug_states
self.past +=1
return self.history
class Uncertainty_module:
def __init__(self, Model_def, sensitivity=False):
self.sensitivity = sensitivity
dt, _, _, _, _ = Model_def().specifications()
x, _, u, theta, ODEeq, _, u_min, u_max, x_min, x_max, _, \
_, _, nx, _, nu, n_ref, ntheta, _, ng, gfcn, \
Obj_M, Obj_L, Obj_D, R = Model_def().DAE_system(
uncertain_parameters=True) # Define the System
xdot = vertcat(*ODEeq)
x_p = SX.sym('xp', nx * ntheta)
if sensitivity:
xpdot = []
for i in range(ntheta):
xpdot = vertcat(xpdot, jacobian(xdot, x) @ (x_p[nx * i: nx * i + nx])
+ jacobian(xdot, theta)[nx * i: nx * i + nx])
f = Function('f', [x, u, theta, x_p], [xdot, xpdot],
['x', 'u', 'theta', 'xp'], ['xdot', 'xpdot'])
else:
f = Function('f', [x, u, theta], [xdot],
['x', 'u', 'theta'], ['xdot'])
self.f = f
self.nu = nu
self.nx = nx
self.ntheta = ntheta
self.dt = dt
def integrator_model(self, embedded=True, sensitivity=True):
"""
This function constructs the integrator to be suitable with casadi environment, for the equations of the model
and the objective function with variable time step.
inputs: model, sizes
outputs: F: Function([x, u, dt]--> [xf, obj])
"""
f = self.f
nu = self.nu
nx = self.nx
ntheta = self.ntheta
dt = self.dt
M = 4 # RK4 steps per interval
DT = dt#.sym('DT')
DT1 = DT / M
X0 = SX.sym('X0', nx)
U = SX.sym('U', nu)
theta = SX.sym('theta', ntheta)
xp0 = SX.sym('xp', np.shape(X0)[0] * np.shape(theta)[0])
X = X0
Q = 0
G = 0
S = xp0
if embedded:
if sensitivity:
xdot, xpdot = f(X, U, theta, xp0)
dae = {'x': vertcat(X, xp0), 'p': vertcat(U, theta), 'ode': vertcat(xdot, xpdot)}
opts = {'tf': dt} # interval length
F = integrator('F', 'cvodes', dae, opts)
else:
xdot = f(X, U, theta)
dae = {'x': vertcat(X), 'p': vertcat(U, theta), 'ode': vertcat(xdot)}
opts = {'tf': dt} # interval length
F = integrator('F', 'cvodes', dae, opts)
else:
if sensitivity:
for j in range(M):
k1, k1_a, k1_p = f(X, U, theta, S)
k2, k2_a, k2_p = f(X + DT1 / 2 * k1, U, theta, S + DT1 / 2 * k1_p)
k3, k3_a, k3_p = f(X + DT1 / 2 * k2, U, theta, S + DT1 / 2 * k2_p)
k4, k4_a, k4_p = f(X + DT1 * k3, U, theta, S + DT1 * k3_p)
X = X + DT1 / 6 * (k1 + 2 * k2 + 2 * k3 + k4)
G = G + DT1 / 6 * (k1_a + 2 * k2_a + 2 * k3_a + k4_a)
S = S + DT1 / 6 * (k1_p + 2 * k2_p + 2 * k3_p + k4_p)
F = Function('F', [X0, U, theta, xp0], [X, G, S], ['x0', 'p', 'theta', 'xp0'], ['xf', 'g', 'xp'])
else:
for j in range(M):
k1,_ = f(X, U, theta)
k2,_ = f(X + DT1 / 2 * k1, U, theta)
k3,_ = f(X + DT1 / 2 * k2, U, theta)
k4,_ = f(X + DT1 * k3, U, theta)
X = X + DT1 / 6 * (k1 + 2 * k2 + 2 * k3 + k4)
F = Function('F', [X0, vertcat(U, theta)], [X], ['x0', 'p'], ['xf'])
self.F = F
return F
def simulate_single_step(self,x0, u, theta, xp0):
self.integrator_model(sensitivity=self.sensitivity)
Fk = self.F(x0=vertcat(x0, xp0), p=vertcat(u, theta))
x11 = Fk['xf'][0:self.nx]
xp1 = Fk['xf'][self.nx:]
return np.array(x11), np.array(xp1)
def compute_FIM(self, x_initial, u_apply, theta, S_exp, criterion='D', prior = None):
if prior is None:
prior = 0
steps = u_apply.shape[1]
N_mc = 1
Sum_of_FIM = 0
for k in range(N_mc):
x0 = x_initial
xp0 = np.zeros(self.ntheta * self.nx)
xp_reshaped = xp0.reshape((self.nx, self.ntheta))
FIM = xp_reshaped.T @ S_exp @ xp_reshaped + prior
for i in range(steps):
x11, xp1 = self.simulate_single_step(x0, u_apply[:,i], theta, xp0)
x0 = x11
xp0 = xp1
xp_reshaped = xp0.reshape((self.nx, self.ntheta))
FIM += xp_reshaped.T @ np.linalg.pinv(S_exp) @ xp_reshaped
if criterion == 'D':
metric_FIM = log(det(FIM + 1e-8 * np.eye(self.ntheta)))
elif criterion == 'A':
metric_FIM = trace(FIM)
else:
raise Exception("Sorry, criterion " + criterion + " to be implemented")
Sum_of_FIM += metric_FIM
mean_FIM = Sum_of_FIM/N_mc
return mean_FIM
def compute_full_path(self, u_opt, N, x0, S0,
theta, S_theta, z, Q, R):
u_apply = u_opt.reshape((self.nu, N))
x_his = np.array([])
S_his = []
if (S_theta == 0).all():
for i in range(N):
# if self.FIM_included:
# x1, xp1 = self.simulate_single_step(x0[:self.nx], u_apply[:,i], theta, x0[self.nx:])
#
# x0 = np.hstack((x1, xp1))
# else:
x1, _ = self.simulate_single_step(x0, u_apply[:,i], theta, x0[self.nx:])
x0 = x1
if i == 0:
x_his = x1.T
else:
x_his = np.vstack((x_his, x1.T))
return x_his
else:
for i in range(N):
x1, S1 = self.ukf1_regular(x0, S0, theta, S_theta, z, Q, R, u_apply[:,i])
x0 = x1
S0 = S1
if i == 0:
x_his =x1.T
else:
x_his = np.vstack((x_his,x1.T))
S_his += [S1]
return x_his, S_his
def ut_regular(self, X, theta, Wm, Wc, n, u):
L = X.shape[1]
y = (np.zeros([n, ]))
Y = (np.zeros([n, L]))
for k in range(L):
if self.FIM_included:
x11, xp1 = self.simulate_single_step(X[:self.nx,k], u, theta[:,k], X[self.nx:,k])
Xk = np.hstack((x11, xp1))
else:
Xk, _ = self.simulate_single_step(X[:self.nx,k], u, theta[:,k], X[self.nx:,k])
Y[:, k] = Xk.reshape((-1,))
y += Wm[k] * Y[:, k]
y = y.reshape((-1,1))
Sum_mean_matrix_m = []
for i in range(L):
Sum_mean_matrix_m = horzcat(Sum_mean_matrix_m, y)
Y1 = (Y - Sum_mean_matrix_m)
S = Wc[0]*(Y[:,[0]]-y)@(Y[:,[0]]-y).T#Y1[:,[0]] @ Y1[:,[0]].T
for i in range(1,L):
S += Wc[i]*(Y[:,[i]]-y)@(Y[:,[i]]-y).T#Wc[i]*Y1[:,[i]] @ Y1[:,[i]].T
S +=1e-7*(np.eye(self.nx))
return y, Y, S, Y1
def ukf1_regular(self, x, S, theta, S_theta, z, Q, R, u,FIM_included=False):
self.FIM_included = FIM_included
x = x.reshape((-1,1))
theta = theta.reshape((-1,1))
x_aug = | np.vstack((x, theta)) | numpy.vstack |
"""
Greedy Word Swap with Word Importance Ranking
===================================================
When WIR method is set to ``unk``, this is a reimplementation of the search
method from the paper: Is BERT Really Robust?
A Strong Baseline for Natural Language Attack on Text Classification and
Entailment by Jin et. al, 2019. See https://arxiv.org/abs/1907.11932 and
https://github.com/jind11/TextFooler.
"""
import numpy as np
import torch
from torch.nn.functional import softmax
from textattack.goal_function_results import GoalFunctionResultStatus
from textattack.search_methods import SearchMethod
from textattack.shared.validators import (
transformation_consists_of_word_swaps_and_deletions,
)
class GreedyWordSwapWIR(SearchMethod):
"""An attack that greedily chooses from a list of possible perturbations in
order of index, after ranking indices by importance.
Args:
wir_method: method for ranking most important words
"""
def __init__(self, wir_method="unk"):
self.wir_method = wir_method
def _get_index_order(self, initial_text):
"""Returns word indices of ``initial_text`` in descending order of
importance."""
len_text = len(initial_text.words)
if self.wir_method == "unk":
leave_one_texts = [
initial_text.replace_word_at_index(i, "[UNK]") for i in range(len_text)
]
leave_one_results, search_over = self.get_goal_results(leave_one_texts)
index_scores = | np.array([result.score for result in leave_one_results]) | numpy.array |
from SemiSupervisedLearning import visualisations
import matplotlib.pyplot as plt
import numpy as np
import torch
import pathlib
def to_cuda(elements):
"""
Transfers every object in elements to GPU VRAM if available.
elements can be a object or list/tuple of objects
"""
if torch.cuda.is_available():
if type(elements) == tuple or type(elements) == list:
return [x.cuda() for x in elements]
return elements.cuda()
return elements
def get_data_to_tensors(data, batch_size):
train_data_subsample, test_data = [], []
for (images, classes) in data.batch_generator(training=True, batch_size=batch_size):
images = normalize_images(images)
images, classes = torch.from_numpy(images).float(), torch.from_numpy(classes).float()
images = images.permute(0, 3, 1, 2) # change axis from NHWC to NCHW
batch = (images, classes)
train_data_subsample.append(batch)
for (images, classes) in data.batch_generator(training=False, batch_size=batch_size):
images = normalize_images(images)
images, classes = torch.from_numpy(images).float(), torch.from_numpy(classes).float()
images = images.permute(0, 3, 1, 2) # change axis from NHWC to NCHW
batch = (images, classes)
test_data.append(batch)
return (train_data_subsample, test_data)
def normalize_images(images):
# Assuming pixel values are more or less the same for all images
# We pick the first image of the batch
image = images[0]
pixels = np.asarray(image)
means = pixels.mean(axis=(0, 1), dtype='float64')
stds = pixels.std(axis=(0, 1), dtype='float64')
pixels = (pixels - means) / stds
# Apply normalization to all images in the batch
norm_images = []
for i in range(len(images)):
norm_images.append((images[i] - means) / stds)
norm_images = np.array(norm_images)
return norm_images
def make_reconstructions(autoencoder, vis_data, num_images, batch_size, image_dimensions, title):
# Extremely inefficient way of doing this
# Forward all images, then selecting the ones i want to visualize
images = []
reconstructions = []
labels = []
for image_batch, label in vis_data:
#Make reconstruction
image_batch = to_cuda(image_batch)
reconstruction_batch, aux = autoencoder(image_batch)
# Convert from tensor to numpy
image_batch = image_batch.reshape(
image_batch.shape[0],
image_batch.shape[2],
image_batch.shape[3],
image_batch.shape[1]
)
image_batch = image_batch.cpu().detach().numpy()
label = label.cpu().detach().numpy()
reconstruction_batch = reconstruction_batch.reshape(
reconstruction_batch.shape[0],
reconstruction_batch.shape[2],
reconstruction_batch.shape[3],
reconstruction_batch.shape[1]
)
reconstruction_batch = reconstruction_batch.cpu().detach().numpy()
images.extend(image_batch)
labels.extend(label)
reconstructions.extend(reconstruction_batch)
vis_images = images[1000: 1000 + num_images]
vis_reconstructions = reconstructions[1000: 1000 +num_images]
vis_labels = labels[1000: 1000 + num_images]
#
visualisations.show_images_and_reconstructions(np.array(vis_images), title, vis_labels)
visualisations.show_images_and_reconstructions( | np.array(vis_reconstructions) | numpy.array |
import numpy as np
import scipy.sparse as sp
from sklearn.utils.fixes import loguniform
from sklearn.utils.validation import check_random_state
import pytest
from cca_zoo.data import generate_covariance_data
from cca_zoo.model_selection import GridSearchCV, RandomizedSearchCV
from cca_zoo.models import (
rCCA,
CCA,
PLS,
CCA_ALS,
SCCA,
PMD,
ElasticCCA,
KCCA,
KTCCA,
MCCA,
GCCA,
TCCA,
SpanCCA,
SWCCA,
PLS_ALS,
KGCCA,
NCCA,
ParkhomenkoCCA,
SCCA_ADMM,
PartialCCA,
)
from cca_zoo.utils.plotting import cv_plot
n = 50
rng = check_random_state(0)
X = rng.rand(n, 4)
Y = rng.rand(n, 5)
Z = rng.rand(n, 6)
X_sp = sp.random(n, 4, density=0.5, random_state=rng)
Y_sp = sp.random(n, 5, density=0.5, random_state=rng)
def test_unregularized_methods():
# Tests unregularized CCA methods. The idea is that all of these should give the same result.
latent_dims = 2
cca = CCA(latent_dims=latent_dims).fit([X, Y])
iter = CCA_ALS(
latent_dims=latent_dims, tol=1e-9, stochastic=False, random_state=rng
).fit([X, Y])
gcca = GCCA(latent_dims=latent_dims).fit([X, Y])
mcca = MCCA(latent_dims=latent_dims, eps=1e-9).fit([X, Y])
kcca = KCCA(latent_dims=latent_dims).fit([X, Y])
kgcca = KGCCA(latent_dims=latent_dims).fit([X, Y])
tcca = TCCA(latent_dims=latent_dims).fit([X, Y])
corr_cca = cca.score((X, Y))
corr_iter = iter.score((X, Y))
corr_gcca = gcca.score((X, Y))
corr_mcca = mcca.score((X, Y))
corr_kcca = kcca.score((X, Y))
corr_kgcca = kgcca.score((X, Y))
corr_tcca = tcca.score((X, Y))
assert np.testing.assert_array_almost_equal(corr_cca, corr_iter, decimal=1) is None
assert np.testing.assert_array_almost_equal(corr_cca, corr_mcca, decimal=1) is None
assert np.testing.assert_array_almost_equal(corr_cca, corr_gcca, decimal=1) is None
assert np.testing.assert_array_almost_equal(corr_cca, corr_kcca, decimal=1) is None
assert np.testing.assert_array_almost_equal(corr_cca, corr_tcca, decimal=1) is None
assert (
np.testing.assert_array_almost_equal(corr_kgcca, corr_gcca, decimal=1) is None
)
# Check standardized models have standard outputs
assert (
np.testing.assert_allclose(
np.linalg.norm(iter.transform((X, Y))[0], axis=0) ** 2, n, rtol=0.2
)
is None
)
assert (
np.testing.assert_allclose(
np.linalg.norm(cca.transform((X, Y))[0], axis=0) ** 2, n, rtol=0.2
)
is None
)
assert (
np.testing.assert_allclose(
np.linalg.norm(mcca.transform((X, Y))[0], axis=0) ** 2, n, rtol=0.2
)
is None
)
assert (
np.testing.assert_allclose(
np.linalg.norm(kcca.transform((X, Y))[0], axis=0) ** 2, n, rtol=0.2
)
is None
)
assert (
np.testing.assert_allclose(
np.linalg.norm(iter.transform((X, Y))[1], axis=0) ** 2, n, rtol=0.2
)
is None
)
assert (
np.testing.assert_allclose(
np.linalg.norm(cca.transform((X, Y))[1], axis=0) ** 2, n, rtol=0.2
)
is None
)
assert (
np.testing.assert_allclose(
np.linalg.norm(mcca.transform((X, Y))[1], axis=0) ** 2, n, rtol=0.2
)
is None
)
assert (
np.testing.assert_allclose(
np.linalg.norm(kcca.transform((X, Y))[1], axis=0) ** 2, n, rtol=0.2
)
is None
)
def test_sparse_input():
# Tests unregularized CCA methods. The idea is that all of these should give the same result.
latent_dims = 2
cca = CCA(latent_dims=latent_dims, centre=False).fit((X_sp, Y_sp))
iter = CCA_ALS(
latent_dims=latent_dims,
tol=1e-9,
stochastic=False,
centre=False,
random_state=rng,
).fit((X_sp, Y_sp))
iter_pls = PLS_ALS(latent_dims=latent_dims, tol=1e-9, centre=False).fit(
(X_sp, Y_sp)
)
gcca = GCCA(latent_dims=latent_dims, centre=False).fit((X_sp, Y_sp))
mcca = MCCA(latent_dims=latent_dims, centre=False).fit((X_sp, Y_sp))
kcca = KCCA(latent_dims=latent_dims, centre=False).fit((X_sp, Y_sp))
scca = SCCA(latent_dims=latent_dims, centre=False, c=0.001).fit((X_sp, Y_sp))
corr_cca = cca.score((X, Y))
corr_iter = iter.score((X, Y))
corr_gcca = gcca.score((X, Y))
corr_mcca = mcca.score((X, Y))
corr_kcca = kcca.score((X, Y))
# Check the correlations from each unregularized method are the same
assert np.testing.assert_array_almost_equal(corr_iter, corr_mcca, decimal=1) is None
assert np.testing.assert_array_almost_equal(corr_iter, corr_gcca, decimal=1) is None
assert np.testing.assert_array_almost_equal(corr_iter, corr_kcca, decimal=1) is None
def test_unregularized_multi():
# Tests unregularized CCA methods for more than 2 views. The idea is that all of these should give the same result.
latent_dims = 2
cca = rCCA(latent_dims=latent_dims).fit((X, Y, Z))
iter = CCA_ALS(
latent_dims=latent_dims, stochastic=False, tol=1e-12, random_state=rng
).fit((X, Y, Z))
gcca = GCCA(latent_dims=latent_dims).fit((X, Y, Z))
mcca = MCCA(latent_dims=latent_dims).fit((X, Y, Z))
kcca = KCCA(latent_dims=latent_dims).fit((X, Y, Z))
corr_cca = cca.score((X, Y, Z))
corr_iter = iter.score((X, Y, Z))
corr_gcca = gcca.score((X, Y, Z))
corr_mcca = mcca.score((X, Y, Z))
corr_kcca = kcca.score((X, Y, Z))
# Check the correlations from each unregularized method are the same
assert np.testing.assert_array_almost_equal(corr_cca, corr_iter, decimal=1) is None
assert np.testing.assert_array_almost_equal(corr_cca, corr_mcca, decimal=1) is None
assert np.testing.assert_array_almost_equal(corr_cca, corr_gcca, decimal=1) is None
assert np.testing.assert_array_almost_equal(corr_cca, corr_kcca, decimal=1) is None
def test_regularized_methods():
# Test that linear regularized methods match PLS solution when using maximum regularisation.
latent_dims = 2
c = 1
kernel = KCCA(latent_dims=latent_dims, c=[c, c], kernel=["linear", "linear"]).fit(
(X, Y)
)
pls = PLS(latent_dims=latent_dims).fit([X, Y])
gcca = GCCA(latent_dims=latent_dims, c=[c, c]).fit([X, Y])
mcca = MCCA(latent_dims=latent_dims, c=[c, c]).fit([X, Y])
rcca = rCCA(latent_dims=latent_dims, c=[c, c]).fit([X, Y])
corr_gcca = gcca.score((X, Y))
corr_mcca = mcca.score((X, Y))
corr_kernel = kernel.score((X, Y))
corr_pls = pls.score((X, Y))
corr_rcca = rcca.score((X, Y))
# Check the correlations from each unregularized method are the same
assert np.testing.assert_array_almost_equal(corr_pls, corr_mcca, decimal=1) is None
assert (
np.testing.assert_array_almost_equal(corr_pls, corr_kernel, decimal=1) is None
)
assert | np.testing.assert_array_almost_equal(corr_pls, corr_rcca, decimal=1) | numpy.testing.assert_array_almost_equal |
r"""
Routines for displacing a molecular geometry by translation or
proper/improper rotation.
::
1
|
4
/ \
2 3
Example axes for displacements:
1. X1X4 stretch: :math:`r_{14}`
2. X1X4 torsion: :math:`r_{14}` (for motion of 2, 3)
3. X1X4X2 bend: :math:`r_{14} \times r_{24}`
4. X1 out-of-plane: :math:`r_{24} - r_{34}` or
:math:`(r_{24} \times r_{34}) \times r_{14}`
Each internal coordinate measurement has the option of changing the units
(see the constants module) or taking the absolute value.
"""
import operator as op
import pyparsing as pp
import numpy as np
import gimbal.constants as con
class VectorParser(object):
"""An object for defining and evaluating vector operations on
a cartesian geometry.
A new VectorParser instance takes a cartesian geometry as an
optional input. The instance can be called with a vector (no
action), 3x3 array (cross product) or string, parsed according
to the syntax in :func:`~VectorParser.generate_parser`.
Attributes
----------
xyz : (N, 3) array_like, optional
The cartesian geometry which defines the indices in parsed
expressions. If None, only expressions without indices can
be parsed.
unop : dict
A dictionary which defines unary operations.
bnadd : dict
A dictionary which defines binary addition operations.
bnmul : dict
A dictionary which defines binary multiplication operations.
bnop : dict
A dictionary which defines all binary operations.
axes : dict
A dictionary which defines cartesian axis labels.
expr : pyparsing.Forward
A pyparsing grammar used to evaluate expressions. Automatically
generated when xyz is set.
"""
def __init__(self, xyz=None):
self.unop = {'+': op.pos, '-': op.neg}
self.bnadd = {'+': op.add, '-': op.sub}
self.bnmul = {'*': op.mul, '/': op.truediv, 'o': np.dot, 'x': np.cross}
self.bnop = self.bnadd.copy()
self.bnop.update(self.bnmul)
self.axes = dict(X = np.array([1., 0., 0.]),
Y = np.array([0., 1., 0.]),
Z = np.array([0., 0., 1.]))
self.xyz = xyz
def __call__(self, inp, unit=False):
"""Evaluates an expression based on a string.
Parameters
----------
inp : str or array_like
A string or array used to specify an axis.
unit : bool, optional
Specifies if the axis is converted to a unit vector.
Returns
-------
float or ndarray
The result of the vector operation.
Raises
------
ValueError
If input is not a string, 3-vector or 3x3 array.
"""
if isinstance(inp, str):
u = self.expr.parseString(inp, parseAll=True)[0]
elif len(inp) == 3:
u = np.array(inp, dtype=float)
if u.size == 9:
u = np.cross(u[0] - u[1], u[2] - u[1])
else:
raise ValueError('Axis specification not recognized')
if unit:
return con.unit_vec(u)
else:
return u
@property
def xyz(self):
"""Gets the value of xyz."""
return self._xyz
@xyz.setter
def xyz(self, val):
"""Sets the value of xyz and generates the parser."""
self.expr = self.generate_parser(val)
self._xyz = val
def _eval_unary(self, tokens):
"""Evaluates unary operations.
Parameters
----------
tokens : list
A list of pyparsing tokens from a matching unary expression.
Returns
-------
float or ndarray
The expression after unary operation.
"""
vals = tokens[0]
return self.unop[vals[0]](vals[1])
def _eval_binary(self, tokens):
"""Evaluates binary operations.
Parameters
----------
tokens : list
A list of pyparsing tokens from a matching binary expression.
Returns
-------
float or ndarray
The expression after binary operation.
"""
vals = tokens[0]
newval = vals[0]
it = iter(vals[1:])
for oper in it:
newval = self.bnop[oper](newval, next(it))
return newval
def _eval_power(self, tokens):
"""Evaluates power operations.
Parameters
----------
tokens : list
A list of pyparsing tokens from a matching power expression.
Returns
-------
float or ndarray
The expression after power operation.
"""
vals = tokens[0]
newval = vals[-1]
for v in vals[-3::-2]:
newval = v**newval
return newval
def generate_parser(self, xyz=None):
"""Creates the pyparsing expression based on geometry.
The syntax is as follows:
- ``i+`` are indices of xyz and return vectors.
- ``i+.j`` are floating point numbers (j optional).
- ``i[j]`` is the j-th (scalar) element of xyz[i].
- ``X, Y, Z`` are unit vectors along x, y and z axes (uppercase only).
- ``+`` and ``-`` are addition/subtraction of vectors or scalars.
- ``*`` and ``/`` are multiplication/division of vectors and scalars
(elementwise).
- ``o`` and ``x`` are scalar/vector products of vectors only.
- ``^`` is the power of a vector/scalar by a scalar (elementwise).
- ``(`` and ``)`` specify order of operation.
- ``[i, j, k]`` gives a vector with scalar elements i, j and k.
Parameters
----------
xyz : (N, 3), array_like, optional
The cartesian geometry used in index expressions. If not
provided, strings containing indices will raise an error.
Returns
-------
pyparsing.Forward
A pyparsing grammar definition.
"""
expr = pp.Forward()
# operand types: int, int with index, float, axis or delimited list
intnum = pp.Word(pp.nums)
fltind = pp.Word(pp.nums) + '[' + pp.Word(pp.nums) + ']'
fltnum = pp.Combine(pp.Word(pp.nums) + '.' + pp.Optional(pp.Word(pp.nums)))
alphax = pp.oneOf(' '.join(self.axes))
dllist = pp.Suppress('[') + pp.delimitedList(expr) + pp.Suppress(']')
intnum.setParseAction(lambda t: xyz[int(t[0])])
fltind.setParseAction(lambda t: xyz[int(t[0])][int(t[2])])
fltnum.setParseAction(lambda t: float(t[0]))
alphax.setParseAction(lambda t: self.axes[t[0]])
dllist.setParseAction(lambda t: np.array(t[:]))
operand = dllist | alphax | fltnum | fltind | intnum
# operators: unary, power, binary multiplication/division,
# binary addition/subtraction
sgnop = pp.oneOf(' '.join(self.unop))
expop = pp.Literal('^')
mulop = pp.oneOf(' '.join(self.bnmul))
addop = pp.oneOf(' '.join(self.bnadd))
# set operator precedence
prec = [(sgnop, 1, pp.opAssoc.RIGHT, self._eval_unary),
(expop, 2, pp.opAssoc.LEFT, self._eval_power),
(mulop, 2, pp.opAssoc.LEFT, self._eval_binary),
(addop, 2, pp.opAssoc.LEFT, self._eval_binary)]
return expr << pp.infixNotation(operand, prec)
def translate(xyz, amp, axis, ind=None, units='ang'):
"""Translates a set of atoms along a given vector.
Parameters
----------
xyz : (N, 3) array_like
The atomic cartesian coordinates.
amp : float
The distance for translation.
axis : array_like or str
The axis of translation, parsed by :class:`VectorParser`.
ind : array_like, optional
List of atomic indices to specify which atoms are displaced. If
ind is None (default) then all atoms are displaced.
units : str, optional
The units of length for displacement. Default is angstroms.
Returns
-------
(N, 3) ndarray
The atomic cartesian coordinates of the displaced molecule.
"""
if ind is None:
ind = range(len(xyz))
vp = VectorParser(xyz)
u = vp(axis, unit=True)
amp *= con.conv(units, 'ang')
newxyz = np.copy(xyz)
newxyz[ind] += amp * u
return newxyz
def rotmat(ang, u, det=1, units='rad', xyz=None):
r"""Returns the rotational matrix based on an angle and axis.
A general rotational matrix in 3D can be formed given an angle and
an axis by
.. math::
\mathbf{R} = \cos(a) \mathbf{I} + (\det(\mathbf{R}) -
\cos(a)) \mathbf{u} \otimes \mathbf{u} + \sin(a) [\mathbf{u}]_\times
for identity matrix **I**, angle *a*, axis **u**,
outer product :math:`\otimes` and cross-product matrix
:math:`[\mathbf{u}]_\times`. Determinants of +1 and -1 give proper and
improper rotation, respectively. Thus, :math:`\det(\mathbf{R}) = -1` and
:math:`a = 0` is a reflection along the axis. Action of the rotational
matrix occurs about the origin. See en.wikipedia.org/wiki/Rotation_matrix
and http://scipp.ucsc.edu/~haber/ph251/rotreflect_17.pdf
Parameters
----------
ang : float
The angle of rotation.
u : array_like or str
The axis of rotation, converted to a unit vector.
det : int, optional
The determinant of the matrix (1 or -1) used to specify proper
and improper rotations. Default is 1.
units : str, optional
The units of angle for the rotation. Default is radians.
xyz : (N, 3) array_like, optional
The cartesian coordinates used in axis specification.
Returns
-------
(3, 3) ndarray
The rotational matrix of the given angle and axis.
Raises
------
ValueError
When the absolute value of the determinant is not equal to 1.
"""
if not np.isclose(np.abs(det), 1):
raise ValueError('Determinant of a rotational matrix must be +/- 1')
u /= np.linalg.norm(u)
amp = ang * con.conv(units, 'rad')
ucross = np.array([[0, u[2], -u[1]], [-u[2], 0, u[0]], [u[1], -u[0], 0]])
return (np.cos(amp) * np.eye(3) + np.sin(amp) * ucross +
(det - np.cos(amp)) * np.outer(u, u))
def angax(rotmat, units='rad'):
r"""Returns the angle, axis of rotation and determinant of a
rotational matrix.
Based on the form of **R**, it can be separated into symmetric
and antisymmetric components with :math:`(r_{ij} + r_{ji})/2` and
:math:`(r_{ij} - r_{ji})/2`, respectively. Then,
.. math::
r_{ii} = \cos(a) + u_i^2 (\det(\mathbf{R}) - \cos(a)),
\cos(a) = (-\det(\mathbf{R}) + \sum_j r_{jj}) / 2 =
(\mathrm{tr}(\mathbf{R}) - \det(\mathbf{R})) / 2.
From the expression for :math:`r_{ii}`, the magnitude of :math:`u_i`
can be found
.. math::
|u_i| = \sqrt{\frac{1 + \det(\mathbf{R}) [2 r_{ii} -
\mathrm{tr}(\mathbf{R})])}{3 - \det(\mathbf{R}) \mathrm{tr}(\mathbf{R})}},
which satisfies :math:`u \cdot u = 1`. Note that if
:math:`\det(\mathbf{R}) \mathrm{tr}(\mathbf{R}) = 3`, the axis is arbitrary
(identity or inversion). Otherwise, the sign can be found from the
antisymmetric component of **R**.
.. math::
u_i \sin(a) = (r_{jk} - r_{kj}) / 2, \quad i \neq j \neq k,
\mathrm{sign}(u_i) = \mathrm{sign}(r_{jk} - r_{kj}),
since :math:`\sin(a)` is positive in the range 0 to :math:`\pi`. :math:`i`,
:math:`j` and :math:`k` obey the cyclic relation 3 -> 2 -> 1 -> 3 -> ...
This fails when :math:`det(\mathbf{R}) \mathrm{tr}(\mathbf{R}) = -1`, in
which case the symmetric component of **R** is used
.. math::
u_i u_j (\det(\mathbf{R}) - \cos(a)) = (r_{ij} + r_{ji}) / 2,
\mathrm{sign}(u_i) \mathrm{sign}(u_j) = \det(\mathbf{R}) \mathrm{sign}(r_{ij} + r_{ji}).
The signs can then be found by letting :math:`\mathrm{sign}(u_3) = +1`,
since a rotation of :math:`pi` or a reflection are equivalent for
antiparallel axes. See
http://scipp.ucsc.edu/~haber/ph251/rotreflect_17.pdf
Parameters
----------
rotmat : (3, 3) array_like
The rotational matrix.
units : str, optional
The output units for the angle. Default is radians.
Returns
-------
ang : float
The angle of rotation.
u : (3,) ndarray
The axis of rotation as a 3D vector.
det : int
The determinant of the rotation matrix.
Raises
------
ValueError
When the absolute value of the determinant is not equal to 1.
"""
det = np.linalg.det(rotmat)
if not np.isclose(np.abs(det), 1):
raise ValueError('Determinant of a rotational matrix must be +/- 1')
tr = np.trace(rotmat)
ang = con.arccos((tr - det) / 2) * con.conv('rad', units)
if np.isclose(det*tr, 3):
u = np.array([0, 0, 1])
else:
u = np.sqrt((1 + det*(2*np.diag(rotmat) - tr)) / (3 - det*tr))
if np.isclose(det*tr, -1):
sgn = np.ones(3)
sgn[1] = det * _nonzero_sign(rotmat[1,2] + rotmat[2,1])
sgn[0] = det * sgn[1] * _nonzero_sign(rotmat[0,1] + rotmat[1,0])
u *= sgn
else:
u[0] *= _nonzero_sign(rotmat[1,2] - rotmat[2,1])
u[1] *= _nonzero_sign(rotmat[2,0] - rotmat[0,2])
u[2] *= _nonzero_sign(rotmat[0,1] - rotmat[1,0])
return ang, u, det
def rotate(xyz, ang, axis, ind=None, origin=np.zeros(3), det=1, units='rad'):
"""Rotates a set of atoms about a given vector.
An origin can be specified for rotation about a specific point. If
no indices are specified, all atoms are displaced. Setting det=-1
leads to an improper rotation.
Parameters
----------
xyz : (N, 3) array_like
The atomic cartesian coordinates.
ang : float
The angle of rotation.
axis : array_like or str
The axis of rotation, parsed by :class:`VectorParser`.
ind : array_like, optional
List of atomic indices to specify which atoms are displaced. If
ind is None (default) then all atoms are displaced.
origin : (3,) array_like, optional
The origin of rotation, parsed by :class:`VectorParser`. Default
is the cartesian origin.
det : float, optional
The determinant of the rotation. 1 (default) is a proper rotation
and -1 is an improper rotation (rotation + reflection).
units : str, optional
The units of length for displacement. Default is angstroms.
Returns
-------
(N, 3) ndarray
The atomic cartesian coordinates of the displaced molecule.
"""
if ind is None:
ind = range(len(xyz))
vp = VectorParser(xyz)
ax = vp(axis, unit=True)
org = vp(origin)
newxyz = xyz - org
newxyz[ind] = newxyz[ind].dot(rotmat(ang, ax, det=det, units=units))
return newxyz + org
def align_pos(xyz, test_crd, ref_crd, ind=None):
"""Translates a set of atoms such that two positions are coincident.
Parameters
----------
xyz : (N, 3) array_like
The atomic cartesian coordinates.
test_crd : (3,) array_like
Cartesian coordinates of the original position.
test_crd : (3,) array_like
Cartesian coordinates of the final position.
ind : array_like, optional
List of atomic indices to specify which atoms are displaced. If
`ind == None` (default) then all atoms are displaced.
Returns
-------
(N, 3) ndarray
The atomic cartesian coordinates of the displaced molecule.
"""
transax = ref_crd - test_crd
dist = np.linalg.norm(transax)
return translate(xyz, dist, transax, ind=ind)
def align_axis(xyz, test_ax, ref_ax, ind=None, origin= | np.zeros(3) | numpy.zeros |
import numpy as np
from prml.dimreduction.pca import PCA
class BayesianPCA(PCA):
def fit(self, X, iter_max=100, initial="random"):
"""
empirical bayes estimation of pca parameters
Parameters
----------
X : (sample_size, n_features) ndarray
input data
iter_max : int
maximum number of em steps
Returns
-------
mean : (n_features,) ndarray
sample mean fo the input data
W : (n_features, n_components) ndarray
projection matrix
var : float
variance of observation noise
"""
initial_list = ["random", "eigen"]
self.mean = np.mean(X, axis=0)
self.I = np.eye(self.n_components)
if initial not in initial_list:
print("availabel initializations are {}".format(initial_list))
if initial == "random":
self.W = np.eye(np.size(X, 1), self.n_components)
#self.W = np.random.randn(np.size(X, 1), self.n_components)
self.var = 1.
elif initial == "eigen":
self.eigen(X)
self.alpha = len(self.mean) / np.sum(self.W ** 2, axis=0).clip(min=1e-10)
for i in range(iter_max):
W = np.copy(self.W)
stats = self._expectation(X - self.mean)
self._maximization(X - self.mean, *stats)
#self.alpha = len(self.mean) / np.sum(self.W ** 2, axis=0).clip(min=1e-10)
#if np.allclose(W, self.W):
# break
self.n_iter = i + 1
self.C = self.W @ self.W.T + self.var * np.eye(np.size(X, 1))
self.Cinv = | np.linalg.inv(self.C) | numpy.linalg.inv |
from matplotlib import pyplot as plt
from os import path
import numpy as np
import cv2 as cv
# Our local modules
from ds_tools.shared import util
def find_sift_correspondence(src_img, dest_img, visualize=False):
src_img = cv.equalizeHist(src_img)
dest_img = cv.equalizeHist(dest_img)
sift = cv.xfeatures2d.SIFT_create()
src_keypoints, src_descs = sift.detectAndCompute(src_img, None)
dest_keypoints, dest_descs = sift.detectAndCompute(dest_img, None)
FLANN_INDEX_KDTREE = 0
flann = cv.FlannBasedMatcher({'algorithm': FLANN_INDEX_KDTREE, 'trees': 5}, {'checks': 50})
all_matches = flann.knnMatch(src_descs, dest_descs, k=2)
matches = []
for a, b in all_matches:
if a.distance < 0.7 * b.distance:
matches.append(a)
print('Found {} matches!'.format(len(matches)))
if visualize:
src_points = np.float32([src_keypoints[m.queryIdx].pt for m in matches]).reshape(-1, 1, 2)
dst_points = | np.float32([dest_keypoints[m.trainIdx].pt for m in matches]) | numpy.float32 |
import os
import yaml
import logging
import numpy as np
import pandas as pd
from time import time
from tqdm import trange
from typing import List
from datetime import datetime
import matplotlib.pyplot as plt
from ortools.graph import pywrapgraph
plt.style.use('seaborn')
class Worker:
"""
Worker class to store all the variables associated with a
given worker.
"""
def __init__(self, id: int,
learning_rate: float,
forgetting_rate: float,
initial_skill: List[int],
station_id: int):
"""
Initialise the variables associated with the current worker
:param id (int): Worker ID
:param learning_rate (float): Learning rate of the worker
:param forgetting_rate (float): Forgetting rate of the worker
:param initial_skill (list of ints): Skill levels for each station; length
must be equal to the number of stations
:param station_id (int): Initial station assignment
"""
self.id = id
self.beta = np.log2(learning_rate)
self.gamma = np.log2(forgetting_rate)
self.initial_skill = initial_skill
# Remaining skills of the worker for each station
self.rem_skill = initial_skill
self.assign_history = [station_id]
class Station:
"""
Station class to store all the variables associated with a
given station.
"""
def __init__(self, id: int,
S_max: float,
S_min: float,
delta: float,
eps: float):
"""
Initialise the variables associated with the current station.
:param id (int): ID of the current worker
:param S_max (float): Max theoretical skill level attainable
:param S_min (float): Min theoretical skill level attainable
:param delta (float): Practical max skill threshold
:param eps (float): Practical min skill threshold
"""
self.id = id
self.S_max = S_max
self.S_min = S_min
# Practically achievable skill levels
self.S_UB = int(S_max - delta)
self.S_LB = int(S_min + eps)
self.delta = delta
self.eps = eps
class WorkerStationProblem:
def __init__(self, num_workers: int,
num_stations: int,
num_time_steps: int,
demands: List[int], experiment_name: str):
"""
Initial setup for the Worker-Station problem
:param num_workers: Number of workers in the problem
:param num_stations: Number of stations in the problem;
Must be equal to num_workers
:param num_time_steps: Number of time steps to optimize
:param demands: List of ints containing demands at each time step
"""
assert num_workers == num_stations, "Number of workers must be equal to" \
"the number of work stations"
assert num_time_steps == len(demands), "Invalid number of demands"
self.num_workers = num_workers
self.num_stations = num_stations
self.num_time_steps = num_time_steps
self.demands = demands
self.source_id = 0
self.worker_id = list(range(1, self.num_workers + 1))
self.station_id = list(range(self.num_workers + 1, self.num_stations + self.num_stations + 1))
self.sink_id = self.num_stations + self.num_workers + 1
self.WIP_inventory = [0]*self.num_stations
self.output_inventory = [0]*self.num_time_steps
self.inventory_log = np.empty((0, 3))
self.mean_units = 0
self.actual_units = [0] * self.num_stations
self.final_num_units = []
self.experiment_time = 0
self.Q = []
# List to store the Worker and Station class objects
self.workers = []
self.stations = []
# Handle for the MinCostFlow solver
self.solver = pywrapgraph.SimpleMinCostFlow()
self.SAVE_PATH = experiment_name + '/'
os.makedirs(self.SAVE_PATH, exist_ok=True)
if os.path.exists(self.SAVE_PATH + experiment_name+'.log'):
os.remove(self.SAVE_PATH + experiment_name+'.log')
logging.basicConfig(filename=self.SAVE_PATH + experiment_name+'.log', level=logging.INFO, format='%(message)s')
logging.info("Experiment Date: {} \n".format(datetime.now().strftime("%d/%m/%Y %H:%M:%S")))
logging.info("Demands: {}".format(self.demands))
cols = []
for i in range(self.num_time_steps):
cols.append('Time Step {}'.format(i+1))
self.pd_file = pd.DataFrame(columns=cols)
def _build_graph(self, cost_fn):
"""
Function to convert the Worker-Station assignment problem
into a Min-cost flow problem. Based on the above given data,
this function builds a graph, which can then be passed to the
solver.
Reference -
[1] https://developers.google.com/optimization/assignment/assignment_min_cost_flow
"""
"""
Graph Setup -
ID | Meaning
---------------------
0 | sink
1,2,..., N | workers
N+1, ...,2N | stations
2N+1 | sink
----------------------
"""
self.start_nodes = [0]*self.num_workers + \
sum([[i+1]*self.num_stations for i in range(self.num_workers)], []) + \
list(range(self.num_workers+1, self.num_workers + self.num_stations+1))
self.end_nodes = list(range(1, self.num_workers+1)) +\
list(range(self.num_stations+1, 2*self.num_stations+1))*self.num_workers + \
[2*self.num_stations+1]*self.num_stations
"""
Set the capacities all to one as this is just a simple mapping problem
and we do not have any cost associated with the mapping itself.
"""
self.capacities = [1]*self.num_workers
self.capacities += ([1]*self.num_workers)*self.num_stations
self.capacities += [1]*self.num_stations
"""
Computation of costs
- Zero costs for the links from the Source -> Worker
- User-defined cost for Worker -> Station
- Zero costs for links from Worker -> Sink
"""
self.costs = [0]*self.num_workers
# Compute the Worker - Station costs
for worker_id in range(self.num_workers):
for station_id in range(self.num_stations):
cost = cost_fn(worker_id, station_id)
self.costs.append(cost)
self.costs += [0]*self.num_stations
"""
Computation of Supplies
- Source supply = number of workers
- Zero supplies from Worker -> Station
- Sink supply = - number of stations
[Law of conservation of resources]
"""
self.supplies = [self.num_workers]
self.supplies += [0]*(self.num_workers+self.num_stations)
self.supplies += [-self.num_stations]
# Add each link(arc) to the solver graph
for i in range(len(self.start_nodes)):
self.solver.AddArcWithCapacityAndUnitCost(self.start_nodes[i], self.end_nodes[i],
self.capacities[i], self.costs[i])
# Add node supplies to the solver graph
for i in range(len(self.supplies)):
self.solver.SetNodeSupply(i, self.supplies[i])
def run_optimizer(self, log_history):
"""
Given a graph, this function runs the Mincostflow algorithm
:log_history: Boolean to control when to log the history of the
assignment to station. This is helpful during the
second phase of optimization, which is actually
the final assignment, and hence needs to be logged.
:return: None
"""
# Find the minimum cost flow between node 0 and node 10.
if self.solver.Solve() == self.solver.OPTIMAL:
logging.info('Total Minimum cost = {}'.format(self.solver.OptimalCost()))
for arc in range(self.solver.NumArcs()):
# Can ignore arcs leading out of source or into sink.
if self.solver.Tail(arc) != self.source_id and self.solver.Head(arc) != self.sink_id:
# Arcs in the solution have a flow value of 1. Their start and end nodes
# give an assignment of worker to task.
if self.solver.Flow(arc) > 0:
worker_id = self.solver.Tail(arc)
station_id = self.solver.Head(arc) - self.num_workers
self.theoretical_units_produced[station_id - 1] = \
self.workers[worker_id - 1].rem_skill[station_id - 1]
units_produced = self.workers[worker_id - 1].rem_skill[station_id - 1]
if log_history:
self.workers[worker_id - 1].assign_history.append(station_id)
logging.info('Worker {:d} assigned to Station {:d}, capable of {:d} units; Deficit = {:d}'.format(
worker_id,
station_id,
units_produced,
self.solver.UnitCost(arc)))
elif self.solver.Solve() == self.solver.INFEASIBLE:
raise RuntimeError("Infeasible problem input. Terminating code.")
else:
raise RuntimeError("Bad Result! Terminating code.")
def _compute_units_produced(self, compute_WIP):
# Compute the actual units produced & Inventory
"""
This calculation is simple - the current station cannot produce
more than its previous station (assuming empty inventory)
"""
self.actual_units = [0] * self.num_stations
for i in range(self.num_stations):
self.actual_units[i] = self.theoretical_units_produced[i]
if i > 0:
"""
If the current station can produce more than the previous station,
then max number of units produced by the current station is equal
to its previous station.
"""
if self.actual_units[i] > self.actual_units[i - 1]:
self.actual_units[i] = min(self.theoretical_units_produced[i],
self.actual_units[i - 1] + self.WIP_inventory[i])
if compute_WIP:
self.WIP_inventory[i] += self.actual_units[i - 1] - self.actual_units[i]
logging.info('Station {:d} practically produces {:d} units'.
format(i + 1, self.actual_units[i]))
if compute_WIP:
logging.info("WIP Inventory {}".format(self.WIP_inventory))
self.final_num_units.append(self.actual_units[-1])
self.output_inventory[self.current_time_step] = self.final_num_units[-1] - \
self.demands[self.current_time_step]
if self.current_time_step > 0:
self.output_inventory[self.current_time_step] += \
self.output_inventory[self.current_time_step-1]
def Solve(self):
"""
Function to solve for the optimum assignment using the
"Cost-scaling push-relabel algorithm"
Reference -
[1] https://developers.google.com/optimization/reference/graph/min_cost_flow/
"""
# Check if the worker and station objects have been added
assert len(self.workers) == self.num_workers, "Number of given workers" \
"less than num_workers"
assert len(self.stations) == self.num_stations, "Number of given stations" \
"less than num_stations"
self.current_time_step = 0
# Display the skills for each worker
logging.info("Initial Skill Levels:--------------------------------------------------")
for worker_id in range(self.num_workers):
logging.info("worker ID: {} Skill Levels: {}".format(worker_id + 1,
self.workers[worker_id].rem_skill))
for _ in trange(self.num_time_steps, desc="Time Step"):
start = time()
self._build_graph(cost_fn= self._compute_lost_sales_cost)
build_time = time() - start
logging.info("================== Solving for Time Step {} ==========================" .format(
self.current_time_step + 1))
logging.info("Graph Info:-----------------------------------------------------------")
logging.info("Worker IDs : {}".format(self.worker_id))
logging.info("Station IDs : {}".format(self.station_id))
logging.info("Start Nodes: {}".format(self.start_nodes))
logging.info("End Nodes: {}".format(self.end_nodes))
logging.info("Capacities: {}".format(self.capacities))
logging.info("Costs: {}".format(self.costs))
logging.info("Supplies: {}".format(self.supplies))
logging.info("------------------- Minimizing Lost Sales ----------------------------")
self.mean_units = 0
# Reset for each iteration
self.theoretical_units_produced = [0] * self.num_stations
start = time()
self.run_optimizer(log_history=False) # DO NOT log history
solve_time = time() - start
logging.info("---------------------------------------------------------------------")
self._compute_units_produced(compute_WIP=False)
del self.solver
self.solver = pywrapgraph.SimpleMinCostFlow()
logging.info("------------------- Minimizing Standard Deviation --------------------")
start = time()
self._build_graph(cost_fn= self._compute_std_cost)
build_time += time() - start
start = time()
self.run_optimizer(log_history=True) # Log final assignment
solve_time += time() - start
self.experiment_time += build_time + solve_time
logging.info("---------------------------------------------------------------------")
self._compute_units_produced(compute_WIP=True)
self.inventory_log = np.append(self.inventory_log, [self.WIP_inventory], axis=0)
logging.info("---------------------------------------------------------------------")
logging.info('Graph building time: {:.5f}s'.format(build_time))
logging.info('Solving time: {:.5f}s'.format(solve_time))
logging.info('Total time: {:.5f}s'.format(build_time + solve_time))
logging.info("======================================================================")
logging.info('\n')
self.current_time_step += 1
# Update the skills for each worker
logging.info("Updated Skill Levels:--------------------------------------------------")
for worker_id in range(self.num_workers):
for station_id in range(self.num_stations):
skill_level = self.update_skills(worker_id, station_id)
self.workers[worker_id].rem_skill[station_id] = skill_level
logging.info("worker ID: {} Skill Levels: {}".format(worker_id+1,
self.workers[worker_id].rem_skill))
del self.solver
self.solver = pywrapgraph.SimpleMinCostFlow()
logging.info("======================================================================")
logging.info('Total experiment time: {:.5f}s'.format(self.experiment_time))
# Save output to CSV
for i, worker in enumerate(self.workers):
self.pd_file = self.pd_file.append(pd.Series(worker.assign_history[1:],
index=self.pd_file.columns,
name='Worker {}'.format(i+1)))
self.pd_file.to_csv(self.SAVE_PATH + "Assignment Result.csv")
def add_worker(self, lr: float,
fr: float,
initial_skill: List[int],
station_id: int):
"""
Helper function to add a worker object to the problem setup
:param lr: Learning rate of the worker
:param fr: Forgetting rate of the worker
:param initial_skill: Initial skill levels for each station
:param station_id: Initial station assignment
:return:
"""
id = len(self.workers)
assert len(initial_skill) == self.num_stations, "len(initial_skill)" \
"must be equal to the number of" \
"stations"
"""
NOTE: station_id for assign_history is only for display purposes.
Therefore, we use 1-based indexing.
"""
self.workers.append(Worker(id, lr, fr, initial_skill, station_id))
def add_station(self, S_max: float,
S_min: float,
delta: float,
eps: float):
"""
Helper function to add Station object to the problem setup
:param S_max (float): Max theoretical skill level attainable
:param S_min (float): Min theoretical skill level attainable
:param delta (float): Practical max skill threshold
:param eps (float): Practical min skill threshold
"""
id = len(self.stations)
self.stations.append(Station(id, S_max, S_min, delta, eps))
def update_skills(self, worker_id: int, station_id: int):
"""
Function to update the skill levels of a given
worker and a station at the end of each time step.
The skill is improved if the station id is the same
as the given worker's station id; else the skill
is deteriorated.
:param worker_id: Worker ID
:param station_id: Station ID
"""
if self.workers[worker_id].assign_history[-1] == station_id + 1:
S = self._skill_improvement(worker_id, station_id)
else:
S = self._skill_deterioration(worker_id, station_id)
return S
def _skill_improvement(self, worker_id, station_id) -> int:
"""
The skill improvement formula for a given worker_id k and
station_id j at time step l is given as follows -
S_jkl = S_max_j - (S_max_j - S_rem_jk)e^(beta_k*l)
:return: Improved skill level (int)
"""
l = 1 if self.current_time_step > 0 else 0
S_max = self.stations[station_id].S_max
S_rem = self.workers[worker_id].rem_skill[station_id]
beta = self.workers[worker_id].beta
exp_term = np.exp(beta*l)
S = (1 - exp_term)*S_max + S_rem*exp_term #S_max - (S_max - S_rem)*exp_term
S = int(round(abs(S)))
return min(S, self.stations[station_id].S_UB)
def _skill_deterioration(self, worker_id, station_id) -> int:
"""
The skill deterioration formula for a given worker_id k and
station_id j at time step l is given as follows -
S_rem_jkl = S_min_j + (S_jk - S_min_j)e^(gamma_k*l)
:return: Deteriorated skill level (int)
"""
l = 1 if self.current_time_step > 0 else 0
S_min = self.stations[station_id].S_min
S_curr = self.workers[worker_id].rem_skill[station_id]
gamma = self.workers[worker_id].gamma
exp_term = np.exp(gamma*l)
S = (1 - exp_term)*S_min + S_curr*exp_term #S_min + (S_curr - S_min)*exp_term
S = int(round(abs(S)))
return max(S, self.stations[station_id].S_LB)
def _compute_lost_sales_cost(self, worker_id, station_id) -> int:
S = self.workers[worker_id].rem_skill[station_id]
# if worker_id > 0:
# th = max([x if x < S else 0 for x in self.workers[worker_id - 1].rem_skill])
# else:
# th = 0
current_loss = max(0, self.demands[self.current_time_step] - S) + \
max(0, S - self.demands[self.current_time_step])
return current_loss
def _compute_std_cost(self, worker_id, station_id):
self.mean_units = sum(self.actual_units)/len(self.actual_units)
S = self.workers[worker_id].rem_skill[station_id]
future_loss = 0
demand_erraticity = abs(np.diff(self.demands)).sum() # Total variation Distance
future_loss_weight = 3 #int(round(np.exp(-0.00405058 *demand_erraticity + 1.54692256)))
# print(future_loss_weight, demand_erraticity)
for i in range(self.current_time_step + 1, self.num_time_steps):
future_loss += max(0, self.demands[i] - S) + \
future_loss_weight*max(0, S - self.demands[i])
return int(round(abs((S - self.mean_units)))) + future_loss
def get_plots(self, is_show = False):
"""
Function to display all required plots.
:return:
"""
"""
Plot 1: Worker Station Assignment
"""
plt.figure()
for i, worker in enumerate(self.workers):
plt.plot(worker.assign_history, lw=2, ls='-', marker='o',
alpha=0.7, ms=10, label="Worker {}".format(i+1))
plt.xticks(np.arange(1, self.num_time_steps + 1, 1))
plt.yticks(np.arange(1, self.num_workers + 1, 1))
plt.legend(loc='center left', bbox_to_anchor=(1, 0.5),
fontsize=15)
plt.xlabel('Time Steps', fontsize=15)
plt.ylabel('Station ID', fontsize=15)
plt.title('Worker Station Assignment', fontsize=15)
plt.tight_layout()
plt.savefig(self.SAVE_PATH + "Worker Station Assignment.pdf", dpi=300)
"""
Plot 2: Demand Vs Output
"""
plt.figure()
inds = np.arange(1, self.num_time_steps + 1)
width = 0.35
plt.bar(inds - width/2, self.demands,
width, label="Demands")
plt.bar(inds + width / 2, self.final_num_units,
width, label="Output")
plt.xticks( | np.arange(1, self.num_time_steps + 1, 1) | numpy.arange |
bl_info = {
"name": "Import Planar Code",
"author": "<NAME>",
"version": (1, 0),
"blender": (2, 80, 0),
"location": "File > Import > Planar Code",
"description": "Import planar code and construct mesh by assigning vertex positions.",
"warning": "",
"support": "TESTING",
"wiki_url": "",
"tracker_url": "",
"category": "Import-Export",
}
import bpy
import bmesh
import numpy as np
import mathutils as mu
from bpy.props import StringProperty, IntProperty, BoolProperty
import struct
import collections
import os
import random
class PlanarCodeReader:
def __init__(self, filename, index, embed2d, embed3d):
self.faceCounters = []
verts_loc, faces = self.read(filename, index)
if (not verts_loc):
return
if (len(verts_loc) <= 0):
return
# create new mesh
name = os.path.basename(filename) + "_" + str(index)
mesh = bpy.data.meshes.new(name)
mesh.from_pydata(verts_loc,[],faces)
mesh.update(calc_edges=True)
# create new bmesh
bm = bmesh.new()
bm.from_mesh(mesh)
# enable lookup
bm.verts.ensure_lookup_table()
bm.edges.ensure_lookup_table()
bm.faces.ensure_lookup_table()
if (embed2d):
pv = self.embed(bm)
print(pv)
if (embed3d):
self.liftup(bm, pv)
bm.to_mesh(mesh)
# create new object
obj = bpy.data.objects.new(name, mesh)
# set object location
obj.location = bpy.context.scene.cursor.location
# link the object to collection
bpy.context.scene.collection.objects.link(obj)
def read(self, filename, index):
self.f = open(filename, "rb")
verts = []
faces = []
try:
DEFAULT_HEADER = b">>planar_code<<"
header = self.f.read(len(DEFAULT_HEADER))
if (header == DEFAULT_HEADER):
print(index)
self.skip(index)
# create verts
num_vert = struct.unpack('b', self.f.read(1))
i = 0
while i < num_vert[0]:
# create vertex
verts.append((0, 0, 0))
# read adjacant vertices
adj = []
while True:
tmp = struct.unpack('b', self.f.read(1))
if (tmp[0] <= 0): # 0 means separator
break
adj.append(tmp[0])
# add face counter
lastIndex = len(adj)-1
for j in range(lastIndex):
self.addIfAbsent(collections.Counter([i, adj[j]-1, adj[j+1]-1]))
self.addIfAbsent(collections.Counter([i, adj[0]-1, adj[lastIndex]-1]))
i += 1
for counter in self.faceCounters:
faces.append(tuple(counter))
except:
print(f"Error in reading {filename}")
self.f.close()
return
self.f.close()
del self.f
return verts, faces
def skip(self, index):
# skip to target index
for i in range(index):
num_vert = struct.unpack('b', self.f.read(1))
n = num_vert[0]
while n > 0:
d = struct.unpack('b', self.f.read(1))
if (d[0] == 0):
n -= 1
def addIfAbsent(self, fc):
for counter in self.faceCounters:
if (counter == fc):
break
else:
self.faceCounters.append(fc)
def embed(self, bm):
# randomly pick up a face
outerFace = bm.faces[random.randint(0, len(bm.faces)-1)]
# embed an outer face to form a regular polygon inscribed into a circle
n = len(outerFace.verts)
inv_sqrt = 1.0 / np.sqrt(n)
angle = 360.0 / n
for i, v in enumerate(outerFace.verts):
rad = (i * angle / 180.0) * np.pi
x = inv_sqrt * np.cos(rad)
y = inv_sqrt * np.sin(rad)
v.co.x = x
v.co.y = y
rests = []
for v in bm.verts:
if (not v in outerFace.verts):
rests.append(v)
# variables for the force F_uv on a Edge(u,v)
fuv = np.zeros((len(bm.edges), 3))
# force F_v on a Vertex(v)
fv = np.zeros((len(bm.verts), 3))
# Constant value
n_pi = np.sqrt(len(bm.verts) / np.pi)
# final double A = 2.5;
avg_area = np.pi / len(bm.verts)
loop = 0
# iterations
while (loop < 500):
# Set F_v to zero
fv[:] = 0
# Calculate F_uv for Edges
for j, e in enumerate(bm.edges):
v = e.verts[0]
u = e.verts[1]
C = n_pi
x = C * | np.power(v.co.x - u.co.x, 3) | numpy.power |
#! /usr/bin/env python
"""
Forward model matched filter relying on either KLIP (Soummer et al. 2012;
Pueyo 2016) and LOCI (Lafreniere et al. 2007b) for the PSF reference
approximation. The original concept of matched filter applied to KLIP has been
first proposed in Ruffio et al. (2019) and then adapted in Dahlqvist et al.
(2021) to use the LOCI framework. For both PSF-subtraction techniques, a
forward model of the PSF is computed for each pixel contained in the field of
view and each frame to account for the over-subtraction and self-subtraction
of potential planetary signal due to the reference PSF subtraction. The
obtained model is then compared to the pixels intensities within each frame of
the residual cube. The SNR associated to each pixel contained in the field of
view, as well as its estimated contrast is thn obtained via a
Gaussian maximum likelihood approach.
"""
__author__ = '<NAME>'
__all__ = ['fmmf']
import numpy as np
import numpy.linalg as la
from skimage.draw import disk
from ..var import get_annulus_segments, frame_center
from ..preproc import frame_crop,cube_crop_frames,cube_derotate
from ..config.utils_conf import pool_map, iterable
from ..fm import cube_inject_companions
from ..preproc.derotation import _find_indices_adi
def fmmf(cube, pa, psf, fwhm, min_r=None,max_r=None, model='KLIP',var='FR',
param={'ncomp': 20, 'tolerance': 5e-3, 'delta_rot':0.5}, crop=5,
imlib='opencv', interpolation='lanczos4',ncore=1,verbose=True):
"""
Forward model matched filter generating SNR map and contrast map, using
either KLIP or LOCI as PSF subtraction techniques.
Parameters
----------
cube : numpy ndarray, 3d
Input cube (ADI sequences), Dim 1 = temporal axis, Dim 2-3 =
spatial axis
pa : numpy ndarray, 1d
Parallactic angles for each frame of the ADI sequences.
psf : numpy ndarray 2d
2d array with the normalized PSF template, with an odd shape.
The PSF image must be centered wrt to the array! Therefore, it is
recommended to run the function ``normalize_psf`` to generate a
centered and flux-normalized PSF template.
fwhm: int
Full width at half maximum for the instrument PSF
min_r : int,optional
Center radius of the first annulus considered in the FMMF detection
map estimation. The radius should be larger than half
the value of the 'crop' parameter . Default is None which
corresponds to one FWHM.
max_r : int
Center radius of the last annulus considered in the FMMF detection
map estimation. The radius should be smaller or equal to half the
size of the image minus half the value of the 'crop' parameter.
Default is None which corresponds to half the size of the image
minus half the value of the 'crop' parameter.
model: string, optional
Selected PSF-subtraction technique for the computation of the FMMF
detection map. FMMF work either with KLIP or LOCI. Default is 'KLIP'.
var: str, optional
Model used for the residual noise variance estimation used in the
matched filtering (maximum likelihood estimation of the flux and SNR).
Three different approaches are proposed: 'FR', 'FM', and 'TE'.
'FR': consider the pixels in the selected annulus with a width equal
to asize but separately for every frame.
'FM': consider the pixels in the selected annulus with a width
equal to asize but separately for every frame. Apply a mask one FWHM
on the selected pixel and its surrounding.
'TE': rely on the method developped in PACO to estimate the
residual noise variance (take the pixels in a region of one FWHM
arround the selected pixel, considering every frame in the
derotated cube of residuals except for the selected frame)
param: dict, optional
Dictionnary regrouping the parameters used by the KLIP (ncomp and
delta_rot) or LOCI (tolerance and delta_rot) PSF-subtraction
technique.
ncomp : int, optional
Number of components used for the low-rank approximation of the
speckle field. Default is 20.
tolerance: float, optional
Tolerance level for the approximation of the speckle field via
a linear combination of the reference images in the LOCI
algorithm. Default is 5e-3.
delta_rot : float, optional
Factor for tunning the parallactic angle threshold, expressed
in FWHM. Default is 0.5 (excludes 0.5xFHWM on each side of the
considered frame).
crop: int, optional
Part of the PSF tempalte considered is the estimation of the FMMF
detection map. Default is 5.
imlib : str, optional
Parameter used for the derotation of the residual cube. See the
documentation of the ``vip_hci.preproc.frame_rotate`` function.
interpolation : str, optional
Parameter used for the derotation of the residual cube. See the
documentation of the ``vip_hci.preproc.frame_rotate`` function.
ncore : int, optional
Number of processes for parallel computing. By default ('ncore=1')
the algorithm works in single-process mode.
verbose: bool, optional
If True provide a message each time an annulus has been treated.
Default True.
Returns
-------
flux_matrix : 2d ndarray
Maximum likelihood estimate of the contrast for each pixel in the field
of view
snr_matrix : 2d ndarray
Signal to noise ratio map (defined as the estimated contrast divided by
the estimated standard deviation of the contrast).
"""
if crop>=2*round(fwhm)+1:
raise ValueError("Maximum cropsize should be lower or equal to two"+
" FWHM,please change accordingly the value of 'crop'")
if min_r is None:
min_r=int(round(fwhm))
if max_r is None:
max_r= cube.shape[-1]//2-(crop//2+1)
res_full = pool_map(ncore, snr_contrast_esti, iterable(range(min_r,max_r)),
cube, pa, psf, fwhm, model,var,param, crop, imlib,
interpolation,verbose)
flux_matrix=np.zeros((cube.shape[1],cube.shape[2]))
snr_matrix=np.zeros((cube.shape[1],cube.shape[2]))
for res_temp in res_full:
indices=get_annulus_segments(cube[0], res_temp[2],1)
flux_matrix[indices[0][0],indices[0][1]]=res_temp[0]
snr_matrix[indices[0][0],indices[0][1]]=res_temp[1]
return flux_matrix,snr_matrix
def var_esti(mcube,pa,var,crop,ann_center):
"""
Computation of the residual noise variance
"""
n,y,x=mcube.shape
if var=='FR':
var_f=np.zeros(n)
indices = get_annulus_segments(mcube[0], ann_center-int(crop/2),crop,1)
poscentx=indices[0][1]
poscenty=indices[0][0]
for a in range(n):
var_f[a]=np.var(mcube[a,poscenty,poscentx])
elif var=='FM' :
indices=get_annulus_segments(mcube[0], ann_center,1,1)
indicesy=indices[0][0]
indicesx=indices[0][1]
var_f=np.zeros((len(indicesy),n))
indices = get_annulus_segments(mcube[0], ann_center-int(crop/2),crop,1)
for a in range(len(indicesy)):
indc=disk((indicesy[a], indicesx[a]),3)
positionx=[]
positiony=[]
for k in range(0,len(indices[0][1])):
cond1=set(np.where(indices[0][1][k]==indc[1])[0])
cond2=set(np.where(indices[0][0][k]==indc[0])[0])
if len(cond1 & cond2)==0:
positionx.append(indices[0][1][k])
positiony.append(indices[0][0][k])
for b in range((n)):
var_f[a,b]=np.var(mcube[b,positiony,positionx])
elif var=='TE' :
indices=get_annulus_segments(mcube[0], ann_center,1,1)
indicesy=indices[0][0]
indicesx=indices[0][1]
var_f=np.zeros((len(indicesy),n))
mcube_derot=cube_derotate(mcube,-pa)
for a in range(0,len(indicesy)):
radist=np.sqrt((indicesx[a]-int(x/2))**2+(indicesy[a]-int(y/2))**2)
if (indicesy[a]-int(y/2))>=0:
ang_s= np.arccos((indicesx[a]-int(x/2))/radist)/np.pi*180
else:
ang_s= 360-np.arccos((indicesx[a]-int(x/2))/radist)/np.pi*180
for b in range(n):
twopi=2*np.pi
sigposy=int(y/2 + np.sin((ang_s-pa[b])/360*twopi)*radist)
sigposx=int(x/2+ np.cos((ang_s-pa[b])/360*twopi)*radist)
y0 = int(sigposy - int(crop/2))
y1 = int(sigposy + int(crop/2)+1) # +1 cause endpoint is
#excluded when slicing
x0 = int(sigposx - int(crop/2))
x1 = int(sigposx + int(crop/2)+1)
mask = np.ones(mcube_derot.shape[0],dtype=bool)
mask[b]=False
mcube_sel=mcube_derot[mask,y0:y1,x0:x1]
var_f[a,b]=np.var(np.asarray(mcube_sel))
return var_f
def snr_contrast_esti(ann_center,cube, pa, psf, fwhm, model,var,param, crop
, imlib, interpolation,verbose):
"""
Computation of the SNR and contrast associated to the pixels contained
in a given annulus via the foward model matched filter
"""
n,y,x=cube.shape
evals_matrix=[]
evecs_matrix=[]
KL_basis_matrix=[]
refs_mean_sub_matrix=[]
sci_mean_sub_matrix=[]
resicube_klip=None
ind_ref_list=None
coef_list=None
ncomp=param['ncomp']
tolerance=param['tolerance']
delta_rot=param['delta_rot']
# Computation of the reference PSF, and the matrices
# required for the computation of the PSF forward models
pa_threshold = np.rad2deg(2 * np.arctan(delta_rot * fwhm / (2 * (ann_center))))
mid_range = np.abs(np.amax(pa) - np.amin(pa)) / 2
if pa_threshold >= mid_range - mid_range * 0.1:
pa_threshold = float(mid_range - mid_range * 0.1)
if model=='KLIP':
resicube_klip=np.zeros_like(cube)
indices = get_annulus_segments(cube[0],
ann_center-int(round(fwhm)/2),int(round(fwhm)),1)
for k in range(0,cube.shape[0]):
res_temp=KLIP_patch(k,cube[:, indices[0][0], indices[0][1]],
ncomp,pa, int(round(fwhm)), pa_threshold, ann_center)
evals_temp=res_temp[0]
evecs_temp=res_temp[1]
KL_basis_temp=res_temp[2]
sub_img_rows_temp=res_temp[3]
refs_mean_sub_temp=res_temp[4]
sci_mean_sub_temp=res_temp[5]
resicube_klip[k,indices[0][0], indices[0][1]] = sub_img_rows_temp
evals_matrix.append(evals_temp)
evecs_matrix.append(evecs_temp)
KL_basis_matrix.append(KL_basis_temp)
refs_mean_sub_matrix.append(refs_mean_sub_temp)
sci_mean_sub_matrix.append(sci_mean_sub_temp)
mcube=cube_derotate(resicube_klip,pa,imlib=imlib,
interpolation=interpolation)
elif model=='LOCI':
resicube, ind_ref_list,coef_list=LOCI_FM(cube, psf, ann_center, pa,
int(round(fwhm)), fwhm, tolerance,delta_rot,pa_threshold)
mcube=cube_derotate(resicube,pa,imlib=imlib,
interpolation=interpolation)
ceny, cenx = frame_center(cube[0])
indices=get_annulus_segments(mcube[0], ann_center,1,1)
indicesy=indices[0][0]
indicesx=indices[0][1]
flux_esti=np.zeros_like(indicesy)
prob_esti=np.zeros_like(indicesy)
var_f=var_esti(mcube,pa,var,crop,ann_center)
for i in range(0,len(indicesy)):
psfm_temp=None
poscenty=indicesy[i]
poscentx=indicesx[i]
indices = get_annulus_segments(cube[0],
ann_center-int(round(fwhm)/2),int(round(fwhm)),1)
an_dist = np.sqrt((poscenty-ceny)**2 + (poscentx-cenx)**2)
theta = np.degrees(np.arctan2(poscenty-ceny, poscentx-cenx))
model_matrix=cube_inject_companions(np.zeros_like(cube), psf, pa,
flevel=1, plsc=0.1,
rad_dists=an_dist, theta=theta, n_branches=1,verbose=False)
#PSF forward model computation for KLIP
if model=='KLIP':
psf_map=np.zeros_like(model_matrix)
for b in range(0,n):
psf_map_temp = perturb(b,
model_matrix[:, indices[0][0], indices[0][1]],
ncomp,evals_matrix, evecs_matrix,KL_basis_matrix,
sci_mean_sub_matrix,refs_mean_sub_matrix, pa, fwhm,
pa_threshold, ann_center)
psf_map[b,indices[0][0], indices[0][1]]=psf_map_temp
psf_map[b,indices[0][0], indices[0][1]]-=np.mean(psf_map_temp)
psf_map_der = cube_derotate(psf_map, pa, imlib=imlib,
interpolation=interpolation)
psfm_temp=cube_crop_frames(psf_map_der,int(2*round(fwhm)+1),
xy=(poscentx,poscenty),verbose=False)
#PSF forward model computation for LOCI
if model=='LOCI':
values_fc = model_matrix[:, indices[0][0], indices[0][1]]
cube_res_fc=np.zeros_like(model_matrix)
matrix_res_fc = np.zeros((values_fc.shape[0],
indices[0][0].shape[0]))
for e in range(values_fc.shape[0]):
recon_fc = np.dot(coef_list[e], values_fc[ind_ref_list[e]])
matrix_res_fc[e] = values_fc[e] - recon_fc
cube_res_fc[:, indices[0][0], indices[0][1]] = matrix_res_fc
cube_der_fc = cube_derotate(cube_res_fc-np.mean(cube_res_fc),
pa, imlib=imlib, interpolation=interpolation)
psfm_temp=cube_crop_frames(cube_der_fc,int(2*round(fwhm)+1),
xy=(poscentx,poscenty),verbose=False)
num=[]
denom=[]
# Matched Filter
for j in range(n):
if var=='FR':
svar=var_f[j]
elif var=='FM' :
svar=var_f[i,j]
elif var=='TE':
svar=var_f[i,j]
if psfm_temp.shape[1]==crop:
psfm=psfm_temp[j]
else:
psfm=frame_crop(psfm_temp[j],
crop,cenxy=[int(psfm_temp.shape[-1]/2),
int(psfm_temp.shape[-1]/2)],verbose=False)
num.append(np.multiply(frame_crop(mcube[j],crop,
cenxy=[poscentx,poscenty],verbose=False),psfm).sum()/svar)
denom.append(np.multiply(psfm,psfm).sum()/svar)
flux_esti[i]=sum(num)/np.sqrt(sum(denom))
prob_esti[i]=sum(num)/sum(denom)
if verbose==True:
print("Radial distance "+"{}".format(ann_center)+" done!")
return prob_esti,flux_esti,ann_center
def perturb(frame,model_matrix,numbasis,evals_matrix, evecs_matrix,
KL_basis_matrix,sci_mean_sub_matrix,refs_mean_sub_matrix,
angle_list, fwhm, pa_threshold, ann_center):
"""
Function allowing the estimation of the PSF forward model when relying on
KLIP for the computation of the speckle field. The code is based on the
PyKLIP library considering only the ADI case with a singlle number of
principal components considered. For more details about the code, consider
the PyKLIP library or the original articles (Pueyo, L. 2016, ApJ, 824, 117
or <NAME>., <NAME>., <NAME>., & Pueyo, L. 2017, ApJ, 842)
"""
#Selection of the reference library based on the given parralactic angle threshold
if pa_threshold != 0:
indices_left = _find_indices_adi(angle_list, frame,
pa_threshold, truncate=False)
models_ref = model_matrix[indices_left]
else:
models_ref = model_matrix
#Computation of the self-subtraction and over-subtraction for the current frame
model_sci = model_matrix[frame]
KL_basis=KL_basis_matrix[frame]
sci_mean_sub=sci_mean_sub_matrix[frame]
refs_mean_sub=refs_mean_sub_matrix[frame]
evals=evals_matrix[frame]
evecs=evecs_matrix[frame]
max_basis = KL_basis.shape[0]
N_pix = KL_basis.shape[1]
models_mean_sub = models_ref - np.nanmean(models_ref, axis=1)[:,None]
models_mean_sub[np.where(np.isnan(models_mean_sub))] = 0
model_sci_mean_sub = model_sci- np.nanmean(model_sci)
model_sci_mean_sub[np.where(np.isnan(model_sci_mean_sub))] = 0
model_sci_mean_sub_rows = np.reshape(model_sci_mean_sub,(1,N_pix))
sci_mean_sub_rows = np.reshape(sci_mean_sub,(1,N_pix))
delta_KL = np.zeros([max_basis, N_pix])
models_mean_sub_X_refs_mean_sub_T = models_mean_sub.dot(refs_mean_sub.transpose())
for k in range(max_basis):
Zk = np.reshape(KL_basis[k,:],(1,KL_basis[k,:].size))
Vk = (evecs[:,k])[:,None]
diagVk_X_models_mean_sub_X_refs_mean_sub_T = (Vk.T).dot(models_mean_sub_X_refs_mean_sub_T)
models_mean_sub_X_refs_mean_sub_T_X_Vk = models_mean_sub_X_refs_mean_sub_T.dot(Vk)
DeltaZk = -(1/(2* | np.sqrt(evals[k]) | numpy.sqrt |
######################
## Version 0.10 #######
######################
"""
**********************************************************************
Copyright(C) 2020 <NAME>
email: <EMAIL>
<EMAIL>
This file contains the code for Polynomial regression via latent
tensor reconstruction (PRLTR.
PRLTR is free software: you can redistribute it and/or modify
it under the terms of the GNU Lesser General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
PRLTR is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Lesser General Public License for more details.
You should have received a copy of the GNU General Public License
along with PRLTR. If not, see <http://www.gnu.org/licenses/>.
***********************************************************************/
"""
"""
Polynomial regression via latent tensor reconstruction
Activation funtion a(X\lambda_uU)V^{T}
Version 0.10 (15.03.2021)
"""
"""
Vector output, one view, ranks range processed as in one step
polynomial encoding, a(X\lambda_{u}U)V^{T}
"""
## #####################################################
import sys
import time
import pickle
import numpy as np
## #####################################################
## import matplotlib
## import matplotlib.pyplot as plt
## import matplotlib.cm as cmplot
## ## import matplotlib.animation as animation
## import mpl_toolkits.mplot3d.axes3d as p3
## import mpl_toolkits.mplot3d.art3d as art3d
## ## from matplotlib.collections import Line3DCollection
## from matplotlib.colors import colorConverter
## ###################################################
## ################################################################
## ################################################################
## ################################################################
class store_cls:
def __init__(self):
self.lPstore=None
return
## ################################################################
class tensor_latent_vector_cls:
"""
Task: to implement the latent tensor reconstruction based
polynomial regression
"""
## -------------------------------------------------
def __init__(self,norder=1,nordery=1,rank=10,rankuv=None):
"""
Task: to initialize the ltr object
Input: norder input degree(oreder) of the polynomial
nordery output degree, in the current version it has to be 1 !!!
rank the number of rank-one layers
rankuv common rank in UV^{T} U ndimx,rankuv, V nrank,nrankuv
"""
self.ifirstrun=1 ## =1 first fit run
## =0 next run for new data or
self.norder=norder ## input degree(order)
self.nordery=nordery ## output degree
## -----------------------------------------
self.nrank0=rank ## initial saved maximum rank
## initial maximum rank, in the rank extension self.nrank it is changed
## if in function fit irank_add=1
self.nrank=rank
## ------------------------------------------
self.nrankuv=rankuv ## P=UV^{T}, U ndimx,nrankuv V nrank,nrankuv
self.nminrank=0 ## initial minimum rank
self.lranks=[] ## accumulated (nminrank,nrank) in the rank blocks
self.rankcount=0 ## number of extensions
self.ndimx=0 ## view wise input dimensions
self.ndimy=0 ## output dimension
self.iter=0 ## iteration counter used in the ADAM update
self.cregular=0.000005
self.cP=self.cregular ## regularization penalty on U,V
self.cQ=self.cregular ## regularization penalty on Q
self.clambda=self.cregular ## lambda regularization constant
## parameter initialization
# self.xP=None ## input poly parameters (order,rank,ndimx)
# self.xPnext=None ## input next poly parameters in NAG
# self.xGrad=None ## the gardients of xP vectors
# self.xV=None ## aggregated gradients of xP
# self.xnG=None ## aggregated gradient lenght^2
self.xU=None ## input poly parameters (order,rankuv,ndimx)
self.xUnext=None ## input next poly parameters in NAG
self.xGradU=None ## the gardients of xU vectors
self.xAU=None ## aggregated gradients of xU
self.xnAU=None ## aggregated gradient lenght^2
self.xV=None ## input poly parameters (order,rank,nrankuv)
self.xVnext=None ## input next poly parameters in NAG
self.xGradV=None ## the gardients of xP vectors
self.xAV=None ## aggregated gradients of xP
self.xnAV=None ## aggregated gradient lenght^2
self.xQ=None ## output poly parameter (rank,dimy)
self.xQnext=None ## output next in NAG
self.xGradQ=None ## the gradients of xQ
self.xAQ=None ## the aggregated gradients of xQ
self.xnAQ=None ## the aggregated gradient length^2
self.xlambda=None ## lambda factors
self.xlambdanext=0 ## next for NAG
self.xlambdagrad=0 ## lambda gradient
self.xAlambda=0 ## aggregated lambda gradients
self.xnAlambda=0 ## aggrageted lambda gadient length
self.xlambdaU=None ## lambda factors
self.xlambdanextU=0 ## next for NAG
self.xlambdagradU=0 ## lambda gradient
self.xAlambdaU=0 ## aggregated lambda gradients
self.xnAlambdaU=0 ## aggrageted lambda gadient length
self.ilambda=1 ## xlambda is updated by gradient
self.ilambdamean=0 ## =1 mean =0 independent components
self.f=None ## computed function value
self.yerr=[] ## rmse block error
self.irandom=1 ## =1 random block =0 order preserving blocks
self.iscale=1 ## =1 error average =0 no
self.dscale=2 ## x-1/dscale *x^2 stepsize update
## ADAM + NAG
self.sigma0=0.1 ## initial learning speed
self.sigma=0.1 ## updated learning speed
self.sigmabase=1 ## sigma scale
self.gamma=1.0 ## discount factor
self.gammanag=0.99 ## Nesterov accelerated gradient factor
self.gammanag2=0.99 ## Nesterov accelerated gradient factor
self.ngeps=0.00001 ## ADAM correction to avoid 0 division
self.nsigma=1 ## range without sigma update
self.sigmamax=1 ## maximum sigma*len_grad
self.iyscale=1 ## =1 output vectors are scaled
self.yscale=1 ## the scaling value
self.mblock=10 ## data block size, number of examples
self.mblock_gap=None ## shift of blocks
self.ibias=1 ## =1 bias is computed =0 otherwise
self.pbias=None ## bias vectors, matrix with size nrank,ndimy
self.lbias=[]
self.inormalize=1 ## force normalization in each iteration
## test environment
self.istore=0
self.cstore_numerator=store_cls()
self.cstore_denominator=store_cls()
self.cstore_output=store_cls()
self.store_bias=None
self.store_yscale=None
self.store_lambda=None
self.store_grad=[]
self.store_acc=[]
self.max_grad_norm=0 ## maximum gradient norm
self.maxngrad=0 ## to signal of long gradients
self.Ytrain=None ## deflated output vectors
## parameter variables which can be stored after training
## and reloaded in test
self.lsave=['norder','nrank','iyscale','yscale','ndimx','ndimy', \
'xU','xV','xQ','xlambda','pbias']
## activation function
self.iactfunc=0 ## =0 identity, =1 arcsinh =2 2*sigmoid-1 =3 tanh =4 relu
## loss degree
self.lossdegree=0 ## =0 L_2^2, =1 L_2, =0.5 L_2^{0.5}, ...L_2^{z}
## Kolmogorov mean
self.ikolmogorov=1 ## L_1 norm approximation log(cosh(tx))
self.kolm_t=1 ## t factor in cosh
## power regularization
self.regdegree=1 ## degree of regularization
## penalty constant to enforce the orthogonality all P[order,rank,:] vectors
self.iortho=0 ## =1 orthogonality forced =0 not
self.cortho=0.0 ## penalty constant
## nonlinear regularization \dfrac{\partial f}{\partial x \partial P}
self.iregular_nonlinear=0 ## =1 regularize =0 not
self.cregularnl=0.0005 ## penalty constant
self.report_freq=100 ## state report frequency relative to the number of minibatches.
return
## ------------------------------------------------
def init_lp(self):
"""
Task: to initialize the parameter vectors U,V,Q, bias,lambda lambdaU
Input:
Output:
Modifies: self.xU, self.xV, self,xQ, self.xlambda, self. xlambdaU, self.pbias
"""
nrank=self.nrank
nrankuv=self.nrankuv
self.xU=np.random.randn(self.norder,self.ndimx,nrankuv)
self.xV=np.random.randn(self.norder,nrank,self.nrankuv)
self.xQ=np.random.randn(nrank,self.ndimy)
self.xlambda=np.ones(nrank)
self.xlambdaU=np.ones(self.ndimx)
self.pbias=np.zeros((1,self.ndimy))
return
# ## ------------------------------------------------
# def volume(self,irank):
# """
# Task: to compute of the volume spanned by vectors
# xP[0][irank],...,xP[norder-1][irank]
# Output: vol scalar volume
# """
# P=np.array([ self.xP[t,irank] for t in range(self.norder)])
# PP=np.dot(P,P.T)
# vol=np.linalg.det(PP)
# if vol<0:
# vol=0
# vol=np.sqrt(vol)
# return(vol)
# ## ------------------------------------------------
def init_grad(self):
"""
Task: to initialize the gradients
Input:
Output:
Modifies:
self.xGradU the gradient of the xU
self.xAU the accumulated gradient of xU
self.xnAU the accumulated gradient norms of xU
self.xUnext the pushforward xU ( Nesterov accelerated gradient)
self.xGradV the gradient of the xV
self.xAV the accumulated gradient of xV
self.xnAV the accumulated gradient norms of xV
self.xVnext the pushforward xU ( Nesterov accelerated gradient)
self.xGradQ the gradient of the xQ
self.xAQ the accumulated gradient of xQ
self.xnAQ the accumulated gradient norms of xQ
self.xQnext the pushforward xQ ( Nesterov accelerated gradient)
"""
drank=self.nrank-self.nminrank
ndimx=self.ndimx
ndimy=self.ndimy
norder=self.norder
nrankuv=self.nrankuv
self.xGradU=np.zeros((norder,ndimx,nrankuv))
self.xAU=np.zeros((norder,ndimx,nrankuv))
self.xnAU=np.zeros((norder,ndimx))
self.xUnext=np.zeros((norder,ndimx,nrankuv))
self.xGradV=np.zeros((norder,drank,nrankuv))
self.xAV=np.zeros((norder,drank,nrankuv))
self.xnAV=np.zeros((norder,drank))
self.xVnext=np.zeros((norder,drank,nrankuv))
self.xGradQ=np.zeros((drank,ndimy))
self.xAQ=np.zeros((drank,ndimy))
self.xnAQ=np.zeros((drank))
self.xQnext=np.zeros((drank,ndimy))
self.xlambdagrad=np.zeros(drank)
self.xAlambda=np.zeros(drank)
self.xnAlambda=0
self.xlambdanext=np.zeros(drank)
self.xlambdagradU=np.zeros(ndimx)
self.xAlambdaU=np.zeros(ndimx)
self.xnAlambdaU=0
self.xlambdanextU=np.zeros(ndimx)
return
## -------------------------------------------------
def extend_poly_parameters_rank(self,nextrank):
"""
Task: extedn the parameter lists to the next rank
Input: nextrank the extended rank beyond self.nrank
Output:
Modifies: self.xV, self.xQ, self.xlambda, self.pbias
"""
nrank=self.nrank
nrankuv=self.nrankuv
ndimx=self.ndimx
ndimy=self.ndimy
if nextrank>nrank:
drank=nextrank-nrank
xprev=np.copy(self.xV)
self.xV=np.zeros((self.norder,nextrank,nrankuv))
for d in range(self.norder):
self.xV[d]=np.vstack((xprev[d],np.random.randn(drank,nrankuv)))
xprev=np.copy(self.xQ)
self.xQ=np.vstack((xprev,np.random.randn(drank,ndimy)))
self.xlambda=np.concatenate((self.xlambda,np.ones(drank)))
self.pbias=np.vstack((self.pbias,np.zeros(ndimy)))
return
## -------------------------------------------------
def update_parameters(self,**dparams):
"""
Task: to update the initialized parameters
Input: dprams dictionary { parameter name : value }
Output:
Modifies: corresponding parameters
"""
for key,value in dparams.items():
if key in self.__dict__:
self.__dict__[key]=value
if self.mblock_gap is None:
self.mblock_gap=self.mblock
return
## ------------------------------------------------
def normalize_lp(self,ilambda=1):
"""
Task: to project, normalize by L2 norm, the polynomial parameters
xP, xQ
Input:
ilambda =1 xlambda[irank], vlambda nglambda is updated
with the product of lenght of the parameter
vectors before normalization
Output: xnormlambda the product of lenght of the parameter
vectors before normalization
Modifies: xP, xQ
or
xP[irank], xQ[irank], xlambda[irank], vlambda nglambda
"""
nrank=self.nrank
nrankuv=self.nrankuv
nminrank=self.nminrank
drank=nrank-nminrank
xnormlambda=np.ones(drank)
xnormlambdaU=np.ones(self.ndimx)
for d in range(self.norder):
xnorm=np.sqrt(np.sum(self.xU[d]**2,1))
xnorm=xnorm+(xnorm==0)
## xnorm/=np.sqrt(nrankuv)
self.xU[d]=self.xU[d] \
/np.outer(xnorm,np.ones(nrankuv))
xnormlambdaU*=xnorm
for d in range(self.norder):
xnorm=np.sqrt(np.sum(self.xV[d,nminrank:nrank]**2,1))
xnorm=xnorm+(xnorm==0)
## xnorm/=np.sqrt(nrankuv)
self.xV[d,nminrank:nrank]=self.xV[d,nminrank:nrank] \
/np.outer(xnorm,np.ones(nrankuv))
## xnormlambda*=xnorm
xnorm=np.sqrt(np.sum(self.xQ[nminrank:nrank]**2,1))
xnorm=xnorm+(xnorm==0)
## xnorm/=np.sqrt(self.ndimy)
self.xQ[nminrank:nrank]=self.xQ[nminrank:nrank] \
/np.outer(xnorm,np.ones(self.ndimy))
## xnormlambda*=xnorm
self.xlambdaU*=xnormlambdaU
xnorm=np.sqrt(np.sum(self.xlambdaU**2))
xnorm=xnorm+(xnorm==0)
xnorm/=np.sqrt(self.ndimx)
self.xlambdaU/=xnorm
## xnormlambda*=xnorm
self.xlambda[nminrank:nrank]*=xnormlambda
self.xAlambda*=np.power(xnormlambda,0.5)
## to avoid overflow
## self.xnAlambda*=np.prod(xnormlambda)
xscale=np.mean(xnormlambda)
if np.min(xscale)>0:
## self.xnAlambda*=np.prod(np.power(xnormlambda,0.1))
self.xnAlambda/=xscale
return(xnormlambda)
## --------------------------------------------
def update_lambda_matrix_bias(self,X,Y):
"""
Task: to compute the initial estimate of xlambda and the bias
Input: X list of 2d arrays, the arrays of input block views
Y 2d array of output block
Output: xlambda real, the estimation of xlambda
bias vector of bias estimation
"""
nrank=self.nrank
nminrank=self.nminrank
norder=self.norder
drank=nrank-nminrank
nrankuv=self.nrankuv
m=len(X)
## @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
Fr=np.array([None for _ in range(norder)])
for d in range(norder):
XU=np.dot(X,self.xU[d]*np.outer(self.xlambdaU,np.ones(nrankuv)))
AXU=self.activation_func(XU,self.iactfunc)
Fr[d]=np.dot(AXU,self.xV[d].T)
## @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
if len(Fr)>1:
F=np.prod(Fr,0)
else:
F=np.copy(Fr[0])
Q=self.xQ[nminrank:nrank]
nt,ny=Q.shape
m=Y.shape[0]
## @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
em=np.ones(m)
YQT=np.dot(Y,Q.T)
QQT=np.dot(Q,Q.T)
FTF=np.dot(F.T,F)
f1=np.dot(em,F)
## @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
xright=np.dot(em,F*YQT)-np.dot(em,F*np.outer(em,np.dot(em,YQT)))/m
xleft=QQT*(FTF-np.outer(f1,f1)/m)
xlambda=np.dot(xright,np.linalg.pinv(xleft))
bias=np.dot(em,(Y-np.dot(F,np.outer(xlambda,np.ones(ny))*Q)))/m
return(xlambda,bias)
## --------------------------------------------
def nag_next(self,gamma=0,psign=-1):
"""
Task: to compute the next point for Nesterov Acclerated gradient
Input: gamma =0 no change , gradient scale otherwise
psign =0 no change, -1 pull back, +1 push forward
"""
## pull back or push forward
psign=-1 ## -1 pull back, +1 push forward
pgamma=psign*gamma
nrank=self.nrank
nminrank=self.nminrank
for d in range(self.norder):
self.xVnext[d]=self.xV[d,nminrank:nrank]+pgamma*self.xAV[d]
self.xUnext[d]=self.xU[d]+pgamma*self.xAU[d]
self.xQnext=self.xQ[nminrank:nrank]+pgamma*self.xAQ
self.xlambdanext=self.xlambda[nminrank:nrank]+pgamma*self.xAlambda
self.xlambdanextU=self.xlambdaU+pgamma*self.xAlambdaU
return
# ## -----------------------------------------------------
# def update_parameters_nag(self):
# """
# Task: to update the parameters of a polynomial, cpoly,
# based on the Nesterov accelerated gradient
# Input: irank rank index in xP,xQ, xlambda
# Modify: xV, xP, xQ, xVQ, xlambda, vlambda
# """
# norder=self.norder
# nrank=self.nrank
# nminrank=self.nminrank
# xnorm=np.zeros(norder)
# for d in range(norder):
# xnorm[d]=np.linalg.norm(self.xGrad[d])
# ## self.store_grad.apprnd(xnorm)
# xmax=np.max(xnorm)
# if xmax>self.max_grad_norm:
# self.max_grad_norm=xmax
# ## print('Grad norm max:',xmax)
# if self.sigma*xmax>self.sigmamax:
# sigmacorrect=self.sigmamax/(self.sigma*xmax)
# ## print('>>>',self.sigma*xmax,sigmacorrect)
# else:
# sigmacorrect=1
# for d in range(norder):
# self.xV[d]=self.gammanag*self.xV[d] \
# -sigmacorrect*self.sigmabase*self.sigma*self.xGrad[d]
# for d in range(norder):
# self.xP[d,nminrank:nrank]=self.gamma*self.xP[d,nminrank:nrank]+self.xV[d]
# self.xVQ=self.gammanag*self.xVQ \
# -sigmacorrect*self.sigmabase*self.sigma*self.xGradQ
# self.xQ[nminrank:nrank]=self.gamma*self.xQ[nminrank:nrank]+self.xVQ
# self.vlambda=self.gammanag*self.vlambda \
# -sigmacorrect*self.sigmabase*self.sigma \
# *self.xlambdagrad
# self.xlambda[nminrank:nrank]=self.gamma*self.xlambda[nminrank:nrank] \
# +self.vlambda
# return
# ## -----------------------------------------------------
def update_parameters_adam(self):
"""
Task: to update the parameters of a polynomial, cpoly,
based on the ADAM additive update
Input:
Modify: xU,XV,xQ, xAU, xAV,xAQ, xnAU, xnAV,xnAQ,
xlambda, xAlambda, xnAlambda,
xlambdaU, xAlambdaU, xnAlambdaU,
"""
norder=self.norder
nrank=self.nrank
nrankuv=self.nrankuv
nminrank=self.nminrank
xnormU=np.zeros(norder)
xnormV=np.zeros(norder)
for d in range(norder):
xnormU[d]=np.linalg.norm(self.xGradU[d])
xnormV[d]=np.linalg.norm(self.xGradV[d])
## self.store_grad.apprnd(xnorm)
xmax=np.max(np.vstack((xnormU,xnormV)))
if xmax>self.max_grad_norm:
self.max_grad_norm=xmax
## print('Grad norm max:',xmax)
if self.sigma*xmax>self.sigmamax:
sigmacorrect=self.sigmamax/(self.sigma*xmax)
## print('>>>',self.sigma*xmax,sigmacorrect)
else:
sigmacorrect=1
gammanag=self.gammanag
gammanag2=self.gammanag2
for d in range(norder):
## xU ---------------------------
ngrad=np.sum((sigmacorrect*self.xGradU[d])**2,1)
self.xAU[d]=gammanag*self.xAU[d]+(1-gammanag) \
*sigmacorrect*self.xGradU[d]
vhat=self.xAU[d]/(1-gammanag**self.iter)
self.xnAU[d]=gammanag2*self.xnAU[d]+(1-gammanag2)*ngrad
ngradhat=self.xnAU[d]/(1-gammanag2**self.iter)
self.xU[d]=self.gamma*self.xU[d] \
-self.sigmabase*self.sigma \
*vhat/(np.outer(np.sqrt(ngradhat),np.ones(self.nrankuv))+self.ngeps)
## xV -----------------------
ngrad=np.sum((sigmacorrect*self.xGradV[d])**2,1)
self.xAV[d]=gammanag*self.xAV[d]+(1-gammanag) \
*sigmacorrect*self.xGradV[d]
vhat=self.xAV[d]/(1-gammanag**self.iter)
self.xnAV[d]=gammanag2*self.xnAV[d]+(1-gammanag2)*ngrad
ngradhat=self.xnAV[d]/(1-gammanag2**self.iter)
self.xV[d,nminrank:nrank]=self.gamma*self.xV[d,nminrank:nrank] \
-self.sigmabase*self.sigma \
*vhat/(np.outer(np.sqrt(ngradhat),np.ones(self.nrankuv))+self.ngeps)
## xQ -----------------------------------------
ngrad=np.sum((sigmacorrect*self.xGradQ)**2,1)
self.xAQ=gammanag*self.xAQ \
+(1-gammanag)*sigmacorrect*self.xGradQ
vhat=self.xAQ/(1-gammanag**self.iter)
## print('Qgrad:',ngrad)
if self.maxngrad<np.sum(ngrad):
self.maxngrad=np.sum(ngrad)
if self.maxngrad>1.0:
print('Max Qgrad:',self.maxngrad)
self.xnAQ=gammanag2*self.xnAQ \
+(1-gammanag2)*ngrad
ngradhat=self.xnAQ/(1-gammanag2**self.iter)
self.xQ[nminrank:nrank]=self.gamma*self.xQ[nminrank:nrank] \
-self.sigmabase*self.sigma \
*vhat/(np.outer(np.sqrt(ngradhat),np.ones(self.ndimy))+self.ngeps)
## lambda ------------------------------------------------
ngrad=np.sum((sigmacorrect*self.xlambdagrad)**2)
self.xAlambda=gammanag*self.xAlambda \
+(1-gammanag)*sigmacorrect*self.xlambdagrad
vhat=self.xAlambda/(1-gammanag**self.iter)
self.xnAlambda=gammanag2*self.xnAlambda \
+(1-gammanag2)*ngrad
ngradhat=self.xnAlambda/(1-gammanag2**self.iter)
self.xlambda[nminrank:nrank]=self.gamma*self.xlambda[nminrank:nrank] \
-self.sigmabase*self.sigma \
*vhat/(np.sqrt(ngradhat)+self.ngeps)
## lambdaU ------------------------------------------------
ngrad=np.sum((sigmacorrect*self.xlambdagradU)**2)
self.xAlambdaU=gammanag*self.xAlambdaU \
+(1-gammanag)*sigmacorrect*self.xlambdagradU
vhat=self.xAlambdaU/(1-gammanag**self.iter)
self.xnAlambdaU=gammanag2*self.xnAlambdaU \
+(1-gammanag2)*ngrad
ngradhat=self.xnAlambdaU/(1-gammanag2**self.iter)
self.xlambdaU=self.gamma*self.xlambdaU \
-self.sigmabase*self.sigma \
*vhat/(np.sqrt(ngradhat)+self.ngeps)
return
## ------------------------------------------------
def activation_func(self,f,ifunc=0):
"""
Task: to compute the value of activation function
Input: f array of input
ifunc =0 identity
=1 arcsinh ln(f+(f^2+1)^{1/2})
=2 sigmoid 2e^x/(e^x+1)-1
=3 tangent hyperbolisc
Output: F array of activation values
"""
if ifunc==0:
F=f ## identity
elif ifunc==1:
F=np.log(f+(f**2+1)**0.5) ## arcsinh
elif ifunc==2:
F=2/(1+np.exp(-f))-1 ## sigmoid
elif ifunc==3:
F=np.tanh(f) ## tangent hyperbolic
elif ifunc==4: ## relu
F=f*(f>0)
return(F)
## ------------------------------------------------
def activation_func_diff(self,f,ifunc=0,ipar=1):
"""
Task: to compute the value of the pointwise derivative of
activation function
Input: f array of input
ifunc =0 identity
=1 arcsinh ln(f+(f^2+1)^{1/2})
=2 sigmoid e^x/(e^x+1)
=3 tangent hyperbolisc
Output: DF array of pointwise drivative of the activation function
"""
m,n=f.shape
if ifunc==0:
DF=np.ones((m,n)) ## identity
elif ifunc==1:
DF=1/(f**2+1)**0.5 ## arcsinh
elif ifunc==2: ## sigmoid
DF=2*np.exp(-f)/(1+np.exp(-f))**2
elif ifunc==3:
DF=1/np.cosh(f)**2 ## tangent hyperbolic
elif ifunc==4: ## relu
DF=ipar*(f>0)
return(DF)
## ------------------------------------------------
def function_value(self,X,rankcount,xU=None,xV=None,xQ=None, \
xlambda=None,xlambdaU=None,bias=None, \
ifunc=None,irange=0):
"""
Task: to compute the rank related function value
f=\lambda \circ_r Xp_r q^T +bias
Input: X list of 2d array of input data views
rankcount index of rank-block
xU tensor of input parameter arrays, (norder,nrank,nrankuv)
xV tensor of input parameter arrays, (norder,ndimx,nrankuv)
xQ matrix of output parameter arrays, (nrank,ndimy)
xlambda singular values (nrank)
xlambdaU data variable weight values (ndimx)
bias vector of bias (ndimy)
ifunc None => 0
=0 identity
=1 arcsinh ln(f+(f^2+1)^{1/2})
=2 sigmoid e^x/(e^x+1)
Output: f 2d array =\sum \circ_t XP^(t)T M_{\lambda} Q +bias
"""
nminrank,nrank=self.lranks[rankcount]
drank=nrank-nminrank
nrankuv=self.nrankuv
m,n=X.shape
## temporal case
ifunc=0
if ifunc is None:
ifunc=0
f=np.zeros((m,self.ndimy))
if xU is None:
xU=self.xU
if xV is None:
xV=self.xV[:,nminrank:nrank]
if xQ is None:
xQ=self.xQ[nminrank:nrank]
if xlambda is None:
xlambda=self.xlambda[nminrank:nrank]
if xlambdaU is None:
xlambdaU=self.xlambdaU
if bias is None:
bias=self.pbias[rankcount]
## @@@@@@@@@@@@@@@@@@@@@@@@@@@@@
F0=np.ones((m,drank))
for d in range(self.norder):
XU=np.dot(X,self.xU[d]*np.outer(self.xlambdaU,np.ones(nrankuv)))
AXU=self.activation_func(XU,self.iactfunc)
F0*=np.dot(AXU,self.xV[d].T)
## @@@@@@@@@@@@@@@@@@@@@@@@@@@@@
F=np.dot(F0,np.outer(xlambda,np.ones(self.ndimy))*xQ)
F+=np.outer(np.ones(m),bias)
return(F)
## ------------------------------------------------
def gradient(self,X,y,rankcount,xU=None,xV=None,xQ=None, \
xlambda=None,xlambdaU=None,bias=None,icount=None):
"""
Task: to compute the gradients for xP, xQ, xlambda
Input: X 2d array of input data block
y 2d array of output block
rankcount index of rank-block
xU tensor of parameters (norder,ndimx,nrankuv)
xV tensor of parameters (norder,nrank,nrankuv)
xQ 2d array (nrank,ndimy)
xlambda vector (nrank)
xlambdaU vector (ndimx)
bias vector (ndimy)
Output:
Modifies: self.xGradU, self.xGradV, self.xGradQ,
self.xlambdagrad, self.xlambdagradU
"""
norder=self.norder
nrankuv=self.nrankuv
ndimx=self.ndimx
ndimy=self.ndimy
nminrank,nrank=self.lranks[rankcount]
drank=nrank-nminrank
m=X.shape[0]
if xU is None:
xU=self.xU
if xV is None:
xV=self.xV[:,nminrank:nrank]
if xQ is None:
xQ=self.xQ[nminrank:nrank]
if bias is None:
bias=self.pbias[self.rankcount]
if xlambda is None:
xlambda=self.xlambda[nminrank:nrank]
if xlambdaU is None:
xlambdaU=self.xlambdaU
## setting the regularization constants
self.cP=self.cregular ## regularization penalty on P
self.cQ=self.cregular ## regularization penalty on Q
self.clambda=self.cregular ## lambda regularization constant
## scaling the loss and the regularization
if self.iscale==1:
scale_loss=1/(m*ndimy)
scale_lambda=1/drank
else:
scale_loss=1
scale_lambda=1
self.xGradU=np.zeros((norder,ndimx,nrankuv))
self.xGradV=np.zeros((norder,drank,nrankuv))
self.xGradQ=np.zeros((drank,ndimy))
xXUV=np.array([None for _ in range(norder)])
xXU=np.array([None for _ in range(norder)])
xActD=np.array([None for _ in range(norder)])
## Compute the transformations of X by P_d
## @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
for d in range(norder):
XU=np.dot(X,self.xU[d]*np.outer(self.xlambdaU,np.ones(nrankuv)))
AXU=self.activation_func(XU,self.iactfunc)
xXU[d]=AXU
xActD[d]=self.activation_func_diff(XU,self.iactfunc)
xXUV[d]=np.dot(AXU,self.xV[d].T)
## @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
F0=np.prod(xXUV,0) ## \circ_d XM_{lambdaU}U^((d)}V^{(d)}T}M_{\lambda}
## entire predictor function values
H=np.dot(F0,np.outer(xlambda,np.ones(ndimy))*xQ)
## error
ferr=H+np.outer(np.ones(m),bias)-y ## the loss, error
## if the loss is not least square change it
## ikolmogorov=1 ## =1 kolmogorov mean variant =0 not
## kolm_t=1 ## kolmogorov mean scale factor
if self.ikolmogorov==0:
if self.lossdegree>0:
dferr=np.linalg.norm(ferr)
if dferr==0:
dferr=1
dferr=dferr**self.lossdegree
ferr/=dferr
elif self.ikolmogorov==1:
ferr=np.tanh(self.kolm_t*ferr)
elif self.ikolmogorov==2:
dferr=np.sqrt(np.sum(ferr**2,1))
dferr=dferr+(dferr==0)
ferr=ferr*np.outer(np.tanh(self.kolm_t*dferr)/dferr,np.ones(ndimy))
## averaging the loss on the min-batch
ferr=ferr*scale_loss
## @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@2
## computation the gradients
## =================================================
## computing more than ones occuring terms and factors to save time
ferrQT=np.dot(ferr,xQ.T)
## =================================================
## gradient not depending on degree
self.xGradQ=np.dot(F0.T,ferr)*np.outer(xlambda,np.ones(ndimy))
self.xlambdagrad=np.sum(F0*ferrQT,0)
## ================================================
## compute F_{\subsetminus d}
self.xlambdagradU=np.zeros(ndimx)
if norder>1:
for d in range(norder):
ipx=np.arange(norder-1)
if d<norder-1:
ipx[d:]+=1
Zd=np.prod(xXUV[ipx],0) ## Z^{(d)}
dEQF=Zd*ferrQT
dEQFV=np.dot(dEQF,xV[d]*np.outer(xlambda,np.ones(nrankuv)))
dEQFVH=dEQFV*xActD[d]
dXEQFVH=np.dot(X.T,dEQFVH)
##Gd=np.dot(X.T,Zd*xActD[d]*ferrQT)
## @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
self.xGradV[d]=np.outer(xlambda,np.ones(nrankuv))*np.dot(dEQF.T,xXU[d])
self.xGradU[d]=dXEQFVH*np.outer(xlambdaU,np.ones(nrankuv))
self.xlambdagradU+=np.sum(dXEQFVH*xU[d],1)
## @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
else:
## Zd is empty, or = ones(m,nrank)
dEQF=ferrQT
dEQFV=np.dot(dEQF,xV[d]*np.outer(xlambda,np.ones(nrankuv)))
dEQFVH=dEQFV*xActD[d]
dXEQFVH=np.dot(X.T,dEQFVH)
## @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
self.xGradV[d]=np.outer(xlambda,np.ones(nrankuv))*np.dot(dEQF.T,xXU[d])
self.xGradU[d]=dXEQFVH*np.outer(xlambdaU,np.ones(nrankuv))
self.xlambdagradU+=np.sum(dXEQFVH*xU[d],1)
## @@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@@
## regularization terms
if self.ilambda==1:
self.xlambdagrad-=self.clambda*scale_lambda \
*np.sign(xlambda)*np.abs(xlambda)**(self.regdegree-1)
return
## ------------------------------------------------
def incremental_step(self,X,y,icount):
"""
Task: to compute one step of the iteration
Input: X 2d array of input block
y 2d array of output block
icount block index
Modifies: via the called functions, xP,xQ and the gradient related variables
"""
m,n=X.shape
if self.inormalize==1:
self.normalize_lp()
## if first parameter =0 then no Nesterov push forward step
self.nag_next(gamma=self.gammanag,psign=-1)
xlambdanext=self.xlambdanext
xlambdanextU=self.xlambdanextU
## xlambdanext=None
self.gradient(X,y,self.rankcount,xU=self.xUnext,xV=self.xVnext,xQ=self.xQnext, \
xlambda=xlambdanext, xlambdaU=xlambdanextU, \
bias=self.pbias[self.rankcount],icount=icount)
## self.update_parameters_nag()
self.update_parameters_adam()
## self.xlambda=self.normalize_lp(ilambda=0)
## self.normalize_lp(ilambda=0)
f=self.function_value(X,self.rankcount,xU=None,xV=None,xQ=None, \
xlambda=None,xlambdaU=None,bias=None, \
ifunc=None,irange=1)
## bias is averaged on all processed block and data
if self.ibias==1:
prev_bias=self.pbias[self.rankcount]
self.pbias[self.rankcount]= | np.mean(y-f,0) | numpy.mean |
import numpy as np
import scipy as sp
import scipy.stats
def rle(inarray):
""" run length encoding. Partial credit to R rle function.
Multi datatype arrays catered for including non Numpy
returns: tuple (runlengths, startpositions, values) """
ia = np.array(inarray) # force numpy
n = len(ia)
if n == 0:
return (None, None, None)
else:
y = np.array(ia[1:] != ia[:-1]) # pairwise unequal (string safe)
i = np.append(np.where(y), n - 1) # must include last element posi
z = np.diff(np.append(-1, i)) # run lengths
p = np.cumsum(np.append(0, z))[:-1] # positions
return(z, p, ia[i])
def split_list_by_lengths(values, lengths):
"""
>>> split_list_by_lengths([0,0,0,1,1,1,2,2,2], [2,2,5])
[[0, 0], [0, 1], [1, 1, 2, 2, 2]]
"""
assert np.sum(lengths) == len(values)
idxs = | np.cumsum(lengths) | numpy.cumsum |
# Author: <NAME> at 10/11/2021 <<EMAIL>>
# Licence: MIT License
# Copyright: <NAME> (2018) <<EMAIL>>
import numpy as np
import pytest
from numpy.testing import assert_array_equal
from reservoirpy.nodes.io import Input
from ..model import Model
from ..ops import merge
from .dummy_nodes import *
def test_node_link(plus_node, minus_node):
clean_registry(Model)
model1 = plus_node >> minus_node
model2 = minus_node >> plus_node
assert model1.name == "Model-0"
assert model1.params["PlusNode-0"]["c"] is None
assert model1.hypers["PlusNode-0"]["h"] == 1
assert model1["PlusNode-0"].input_dim is None
assert model2.name == "Model-1"
assert model2.params["PlusNode-0"]["c"] is None
assert model2.hypers["PlusNode-0"]["h"] == 1
assert model2["PlusNode-0"].input_dim is None
assert model1.edges == [(plus_node, minus_node)]
assert model2.edges == [(minus_node, plus_node)]
assert set(model1.nodes) == set(model2.nodes)
with pytest.raises(RuntimeError):
model1 & model2
with pytest.raises(RuntimeError):
plus_node >> minus_node >> plus_node
with pytest.raises(RuntimeError):
plus_node >> plus_node
def test_empty_model_init():
model = Model()
assert model.is_empty
def test_model_call(plus_node, minus_node):
model = plus_node >> minus_node
data = np.zeros((1, 5))
res = model(data)
assert_array_equal(res, data)
input_node = Input()
branch1 = input_node >> plus_node
branch2 = input_node >> minus_node
model = branch1 & branch2
res = model(data)
for name, arr in res.items():
assert name in [out.name for out in model.output_nodes]
if name == "PlusNode-0":
assert_array_equal(arr, data + 2)
else:
assert_array_equal(arr, data - 2)
res = model(data)
for name, arr in res.items():
assert name in [out.name for out in model.output_nodes]
if name == "PlusNode-0":
assert_array_equal(arr, data + 4)
else:
assert_array_equal(arr, data)
res = model(data, reset=True)
for name, arr in res.items():
assert name in [out.name for out in model.output_nodes]
if name == "PlusNode-0":
assert_array_equal(arr, data + 2)
else:
assert_array_equal(arr, data - 2)
res = model(data, stateful=False)
for name, arr in res.items():
assert name in [out.name for out in model.output_nodes]
if name == "PlusNode-0":
assert_array_equal(arr, data + 4)
else:
assert_array_equal(arr, data)
for node in model.output_nodes:
if node.name == "PlusNode-0":
assert_array_equal(node.state(), data + 2)
else:
assert_array_equal(node.state(), data - 2)
def test_model_with_state(plus_node, minus_node):
model = plus_node >> minus_node
data = np.zeros((1, 5))
res = model(data)
assert_array_equal(res, data)
input_node = Input()
branch1 = input_node >> plus_node
branch2 = input_node >> minus_node
model = branch1 & branch2
res = model(data)
with model.with_state(state={plus_node.name: np.zeros_like(plus_node.state())}):
assert_array_equal(plus_node.state(), np.zeros_like(plus_node.state()))
with pytest.raises(TypeError):
with model.with_state(state=np.zeros_like(plus_node.state())):
pass
def test_model_run(plus_node, minus_node):
input_node = Input()
branch1 = input_node >> plus_node
branch2 = input_node >> minus_node
model = merge(branch1, branch2)
data = np.zeros((3, 5))
res = model.run(data)
expected_plus = np.array([[2] * 5, [4] * 5, [6] * 5])
expected_minus = np.array([[-2] * 5, [0] * 5, [-2] * 5])
for name, arr in res.items():
assert name in [out.name for out in model.output_nodes]
if name == "PlusNode-0":
assert_array_equal(arr, expected_plus)
assert_array_equal(arr[-1][np.newaxis, :], plus_node.state())
else:
assert_array_equal(arr, expected_minus)
assert_array_equal(arr[-1][np.newaxis, :], minus_node.state())
res = model.run(data, reset=True)
expected_plus = np.array([[2] * 5, [4] * 5, [6] * 5])
expected_minus = np.array([[-2] * 5, [0] * 5, [-2] * 5])
for name, arr in res.items():
assert name in [out.name for out in model.output_nodes]
if name == "PlusNode-0":
assert_array_equal(arr, expected_plus)
assert_array_equal(arr[-1][np.newaxis, :], plus_node.state())
else:
assert_array_equal(arr, expected_minus)
assert_array_equal(arr[-1][np.newaxis, :], minus_node.state())
res = model.run(data, stateful=False)
expected_plus2 = | np.array([[8] * 5, [10] * 5, [12] * 5]) | numpy.array |
## activation_functions.py
##
## Definitions of ReLU, leaky-ReLU and sigmoid family
## activation functions and their upper and lower bounds
##
## Copyright (C) 2018, <NAME> <<EMAIL>> and contributors
##
## This program is licenced under the BSD 2-Clause License,
## contained in the LICENCE file in this directory.
## See CREDITS for a list of contributors.
##
import numpy as np
from numba import jit
@jit(nopython=True)
def relu_ub_pn(u, l):
a = u / (u - l)
return a, -l * a
# upper bound, unsure
@jit(nopython=True)
def leaky_relu_ub_pn(u, l, k):
a = (u - k * l) / (u - l)
b = l * u * (k - 1.0) / (u - l)
return a, b
@jit(nopython=True)
def relu_lb_pn(u, l):
# adaptive bound
intercept = np.zeros_like(u)
slope = np.zeros_like(u)
mask = np.abs(u) > np.abs(l)
slope[mask] = 1.0
return slope, intercept
# lower bound, unsure (adaptive)
@jit(nopython=True)
def leaky_relu_lb_pn(u, l, k):
# adaptive bound
intercept = np.zeros_like(u)
slope = np.full(len(u), k, dtype=u.dtype)
mask = np.abs(u) > np.abs(l)
slope[mask] = 1.0
return slope, intercept
@jit(nopython=True)
def relu_ub_p(u, l):
return np.ones_like(u), | np.zeros_like(u) | numpy.zeros_like |
import os
import sys
cwd = os.getcwd()
sys.path.append(cwd)
sys.path.append(cwd + '/utils/python_utils')
sys.path.append(cwd + '/simulator/pybullet')
sys.path.append(cwd + '/build/lib')
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import time, math
from collections import OrderedDict
import copy
import signal
import shutil
from pnc.robot_system.pinocchio_robot_system import PinocchioRobotSystem
from pinocchio.visualize import MeshcatVisualizer
import pinocchio as pin
import cv2
import pybullet as p
import numpy as np
np.set_printoptions(precision=5)
from util import pybullet_util
from util import util
DT = 0.001
jac_i = np.array([[1, -1]])
s_a = np.array([[0, 1]])
def IsClose(a, b, threshold=0.0001):
if np.abs(a - b) < threshold:
return True
else:
return False
def UpdateRobotSystem(robot, q, qdot):
robot.update_system(np.zeros(3), np.zeros(4), np.zeros(3), np.zeros(3),
np.zeros(3), | np.zeros(4) | numpy.zeros |
# Original Author: <NAME> 2002
# Bug-fixes in 2006 by <NAME>
# The content of this file is based on scipy.optimize's anneal.py file,
# which is distributed under the BSD license.
from __future__ import print_function, division
import numpy
from numpy import asarray, tan, exp, ones, squeeze, sign, \
all, log, sqrt, pi, shape, array, minimum, where
from numpy import random
import numpy as np
import collections
from PyAstronomy.pyaC import pyaErrors as PE
_double_min = numpy.finfo(float).min
_double_max = numpy.finfo(float).max
class base_schedule(object):
def __init__(self):
self.dwell = 20
self.learn_rate = 0.5
self.lower = -10
self.upper = 10
self.Ninit = 50
self.accepted = 0
self.tests = 0
self.feval = 0
self.k = 0
self.T = None
def init(self, **options):
self.__dict__.update(options)
self.lower = asarray(self.lower)
self.lower = where(self.lower == numpy.NINF, -_double_max, self.lower)
self.upper = asarray(self.upper)
self.upper = where(self.upper == numpy.PINF, _double_max, self.upper)
self.k = 0
self.accepted = 0
self.feval = 0
self.tests = 0
def getstart_temp(self, best_state):
""" Find a matching starting temperature and starting parameters vector
i.e. find x0 such that func(x0) = T0.
Parameters
----------
best_state : _state
A _state object to store the function value and x0 found.
Returns
-------
x0 : array
The starting parameters vector.
"""
assert(not self.dims is None)
lrange = self.lower
urange = self.upper
fmax = _double_min
fmin = _double_max
for _ in range(self.Ninit):
x0 = random.uniform(size=self.dims)*(urange-lrange) + lrange
fval = self.func(x0, *self.args)
self.feval += 1
if fval > fmax:
fmax = fval
if fval < fmin:
fmin = fval
best_state.cost = fval
best_state.x = array(x0)
self.T0 = (fmax-fmin)*1.5
return best_state.x
def accept_test(self, dE):
T = self.T
self.tests += 1
if dE < 0:
self.accepted += 1
return 1
p = exp(-dE*1.0/self.boltzmann/T)
if (p > random.uniform(0.0, 1.0)):
self.accepted += 1
return 1
return 0
def update_guess(self, x0):
pass
def update_temp(self, x0):
pass
# A schedule due to Lester Ingber
class fast_sa(base_schedule):
def init(self, **options):
self.__dict__.update(options)
if self.m is None:
self.m = 1.0
if self.n is None:
self.n = 1.0
self.c = self.m * exp(-self.n * self.quench)
def update_guess(self, x0):
x0 = np.asarray(x0)
u = np.squeeze(np.random.uniform(0.0, 1.0, size=self.dims))
T = self.T
y = | np.sign(u-0.5) | numpy.sign |
# Copyright 2020-2021 OpenDR European Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from closed_shape_2D import ClosedShape2D
from line import Line
class Polygon(ClosedShape2D):
"""
Polygon Class comprised of a list of ordered vertices.
A Polygon is considered a closed region, so the first and last vertices are connected by an edge.
No need to duplicate the first vertex in order to close it.
"""
def __init__(self, vertices=None, compute_bounding_box=True, opacity=1.0):
"""
Constructor.
:param vertices: (list|ndarray) Ordered list of [x, y] vertices comprising a closed polygon.
Last edge connects the first and last vertices automatically,
so no need to duplicate either of them.
:param compute_bounding_box: (bool) Determines whether to compute a bounding rectangular box for this polygon.
Used only because bounding boxes are themselves polygons, and must not get
their own bounding box, as then they would infinitely recurse.
:param opacity: (float: [0.0, 1.0]) Opacity level of the polygon. 0.0 means that it is totally transparent,
while 1.0 is totally opaque. Used for line_intersect() to randomly determine
if a line intersects it or not when opacity is set to a value other than 1.
"""
super(Polygon, self).__init__()
self.vertices = None
self.boundingBox = compute_bounding_box
self.opacity = opacity
self.min = None
self.max = None
if vertices is not None:
self.set_vertices(vertices, compute_bounding_box)
def set_vertices(self, vertices, compute_bounding_box=True):
"""
Sets the vertices of a polygon after being created and recomputes its bounding box if specified.
:param vertices: (list|ndarray) Ordered list of vertices comprising a closed polygon. Last edge connects the
first and last vertices automatically, so no need to duplicate either of them.
:param compute_bounding_box: (bool) Determines whether to compute a bounding rectangular box for this polygon.
Used only because bounding boxes are themselves polygons, and must not get
their own bounding box, as then they would infinitely recurse.
:return: None
"""
if not isinstance(vertices, np.ndarray):
vertices = np.array(vertices)
self.vertices = vertices
if compute_bounding_box:
self._set_bounding_box()
def _set_bounding_box(self):
"""
Sets the polygon's bounding box from its minimum and maximum x and y values.
:return: (Polygon) A polygon representing a bounding box rectangle completely enclosing its parent Polygon.
"""
x_s = self.vertices[:, 0]
y_s = self.vertices[:, 1]
self.min_x = np.min(x_s)
self.min_y = np.min(y_s)
self.max_x = | np.max(x_s) | numpy.max |
import configparser
import os
import time
from datetime import datetime
import imageio as imageio
import matplotlib.pyplot as plt
import numpy as np
import pandas as pd
import torch
from torch.utils.tensorboard import SummaryWriter
from torch.utils.tensorboard.summary import hparams
from DQN_Conv import AgentDQN
from SQN import AgentSQN, CoordinatorSQN
from grid_generators.random_start_goal import random_start_goal
from gridworld import GridWorld
# Model training parameters
DEVICE = torch.device("cuda" if torch.cuda.is_available() else "cpu") # Use GPU
class SummaryWriter(SummaryWriter):
def add_hparams(
self, hparam_dict, metric_dict, hparam_domain_discrete=None, run_name=None
):
torch._C._log_api_usage_once("tensorboard.logging.add_hparams")
if type(hparam_dict) is not dict or type(metric_dict) is not dict:
raise TypeError('hparam_dict and metric_dict should be dictionary.')
exp, ssi, sei = hparams(hparam_dict, metric_dict, hparam_domain_discrete)
self.file_writer.add_summary(exp)
self.file_writer.add_summary(ssi)
self.file_writer.add_summary(sei)
for k, v in metric_dict.items():
self.add_scalar(k, v)
def read_config(config):
rl_parameters = config['RL Parameters']
steps = rl_parameters.getint("steps")
episodes = rl_parameters.getint("episodes")
train_period = rl_parameters.getint("train_period")
start_goal_reset_period = rl_parameters.getint("start_goal_reset_period")
grid_reset_period = rl_parameters.getint("grid_reset_period")
effects = rl_parameters.getboolean("effects")
return steps, episodes, train_period, start_goal_reset_period, grid_reset_period, effects
def read_grid_config(config):
return config.getint("Grid Parameters", "grid_size")
def read_agents_config(config, env):
n = config.getint("Agents Parameters", "n")
model = config.get("Agents Parameters", "model")
grid_size = config.getint("Grid Parameters", "grid_size")
coordinator = None
if model == "SQN":
coordinator = AgentSQN(0, obs_shape=env.observation_space.shape, device=DEVICE, config=config)
else:
raise NotImplementedError
return n, coordinator, model
def read_env_config(config):
# Environmental conditions to be considered
env_parameters = config['Environmental Effects']
col_wind = env_parameters.getint("col_wind")
range_random_wind = env_parameters.getint("range_random_wind")
probabilities = env_parameters.getint("probabilities")
return col_wind, range_random_wind, probabilities
def create_gif(filename, env, agents, reset_start_goal=True, reset_grid=True, steps=100, episodes=1):
""" render result and create gif of the result"""
filenames = []
fig = plt.figure()
plt.ion()
grid_size = len(env.grid)
for episode in range(episodes):
obs = env.reset(reset_start_goal, reset_grid)
for step in range(steps):
env.render()
plt.savefig(f'images/gif_frame/E{episode:03}S{step:05}.png')
plt.cla()
filenames.append(f'images/gif_frame/E{episode:03}S{step:05}.png')
actions = [agents[i].select_action(obs[i]) for i in range(len(agents))]
obs, rewards, done, info = env.step(actions)
if done:
break
print(f"Episode finished after {step + 1} time steps")
env.render()
plt.savefig(f'images/gif_frame/final.png')
plt.cla()
filenames.append(f'images/gif_frame/final.png')
plt.ioff()
with imageio.get_writer(f'images/gif/{filename}.gif', mode='I') as writer:
for filename in filenames:
image = imageio.imread(filename)
writer.append_data(image)
# TODO : try to save fig as numpy array to preserve data and speed up the rendering
for filename in set(filenames):
os.remove(filename)
def create_comment(config):
steps = config["RL Parameters"]["steps"]
episodes = config["RL Parameters"]["episodes"]
train_period = config["RL Parameters"]["train_period"]
start_goal_reset_period = config["RL Parameters"]["start_goal_reset_period"]
grid_reset_period = config["RL Parameters"]["grid_reset_period"]
alpha = config["SQN Parameters"]["alpha"]
update_period = config["SQN Parameters"]["update_period"]
batch_size = config["SQN Parameters"]["batch_size"]
free = config["Gridworld Parameters"]["FREE"]
goal = config["Gridworld Parameters"]["GOAL"]
out_of_b = config["Gridworld Parameters"]["OUT_OF_BOUNDS"]
obstacle = config["Gridworld Parameters"]["OBSTACLE"]
return f" steps={steps} episodes={episodes} train_period={train_period} start_goal_reset_period={start_goal_reset_period}" \
f" grid_reset_period={grid_reset_period} alpha={alpha} update_period={update_period} batch_size={batch_size}" \
f" rwd_free={free} rwd_goal={goal} rwd_out_of_b={out_of_b} rwd_obstacle={obstacle}"
def create_hparams(config):
hyperparameters = {
"steps": config["RL Parameters"]["steps"],
"episodes": config["RL Parameters"]["episodes"],
"train_period": config["RL Parameters"]["train_period"],
"start_goal_reset_period": config["RL Parameters"]["start_goal_reset_period"],
"grid_reset_period": config["RL Parameters"]["grid_reset_period"],
"alpha": config["SQN Parameters"]["alpha"],
"update_period": config["SQN Parameters"]["update_period"],
"batch_size": config["SQN Parameters"]["batch_size"],
# rewards:
"FREE": config["Gridworld Parameters"]["FREE"],
"GOAL": config["Gridworld Parameters"]["GOAL"],
"OUT_OF_BOUNDS": config["Gridworld Parameters"]["OUT_OF_BOUNDS"],
"OBSTACLES": config["Gridworld Parameters"]["OBSTACLE"]
}
return hyperparameters
def main(config: configparser.ConfigParser):
print(DEVICE)
steps, episodes, train_period, start_goal_reset_period, grid_reset_period, effects = read_config(config)
step_done = 0
#################
# GRID CREATION #
#################
grid_size = read_grid_config(config) # Square grid size
# init_grid = np.genfromtxt('sample_grid/init_grid.csv', delimiter=',') # generate a grid from a csv file
# init_grid = random_maze(start, goal, width=grid_size, height=grid_size, complexity=0.5, density=0.5)
init_grid = np.zeros((grid_size, grid_size)) # empty grid
####################
# STARTS AND GOALS #
####################
# start = (0, 0)
# goal = (grid_size - 1, grid_size - 1) # place goal in bottom-right corner
start, goal = random_start_goal(width=grid_size, start_bounds=((0, 1), (0, grid_size)),
goal_bounds=((grid_size - 7, grid_size), (0, grid_size)))
#######################
# GRIDWORLDMARLS CREATION #
#######################
n = config.getint("Agents Parameters", "n")
if not effects:
# Ordinary gridworldMARL
env = GridWorld(n_agents=n, grid=init_grid)
else:
# Stochastic windy gridworldMARL
col_wind, range_random_wind, probabilities = read_env_config(config)
env = GridWorld(n_agents=n, grid=init_grid, col_wind=col_wind,
range_random_wind=range_random_wind, probabilities=probabilities)
env.read_reward_config(config)
###################
# AGENTS CREATION #
###################
# create Agent : giving a start or goal position fix it, otherwise it is randomly generated at each reset
# agent_1 = AgentSQN(2, window_size=init_grid.shape[0], device=device, start=start, goal=goal)
# agent_1 = AgentSQN(2, window_size=grid_size, device=DEVICE)
# agent_1 = AgentSQN(2, window_size=5, device=device, start=start_position)
# agent_1 = AgentSQN(2, window_size=5, device=device, goal=goal)
n_agents, coordinator, model = read_agents_config(config, env)
########
# MAIN #
########
# verification
print("Hyperparameters :")
print("steps", steps)
print("episodes", episodes)
print("train_period", train_period)
print("start_goal_reset_period", start_goal_reset_period)
print("grid_reset_period", grid_reset_period)
print("alpha", coordinator.alpha)
print("update_period", coordinator.update_steps)
print("batch_size", coordinator.batch_size)
print("reward free", env.rewards["free"])
print("reward goal", env.rewards["goal"])
print("reward obstacle", env.rewards["obstacles"])
now = datetime.now().strftime('%Y%m%d%H%M%S')
# Tensorboard initialisation (for logging values)
log_dir = os.path.join("/content/runs", now)
comment = create_comment(config)
tb = SummaryWriter(log_dir=log_dir, comment=comment)
plt.ion()
total_reward_s = []
cumulated_reward = np.zeros(n_agents)
cumulated_rewards = []
total_steps = []
total_loss_s = []
times = []
starts = []
goals = []
# save config file in logs with good datetime
with open(f"logs/configs/{now}.ini", "w") as configfile:
config.write(configfile)
print("Start")
try:
obs = env.reset(reset_starts_goals=True, reset_grid=True) # put reset_grid=True if you want a random init grid
env.render()
plt.savefig(f"images/{now}.png")
plt.savefig(f"images/{now}.svg", format="svg")
plt.show()
train_period_s = np.linspace(start=2000, stop=train_period, num=50, endpoint=True, dtype=int)
train_period_delay_s = np.linspace(start=0, stop=8000, num=50, endpoint=True, dtype=int)
radius_s = np.linspace(start=2, stop=10, num=9, endpoint=True, dtype=int)
radius_delays = np.linspace(start=0, stop=16000, num=9, endpoint=True, dtype=int)
print(radius_delays)
radius_index = 0
radius = radius_s[0]
# train_period_index = 0
# train_period = train_period_s[0]
reset_start_goal = True
reset_grid = True
start_time = time.time()
for episode in range(episodes):
# if episode <=8000 and episode == train_period_delay_s[train_period_index]:
# train_period = train_period_s[train_period_index]
# train_period_index += 1
# print(f"New training period is {train_period} steps")
if episode <= 16000 and episode == radius_delays[radius_index]:
radius = radius_s[radius_index]
radius_index += 1
print(f"Radius increased to {radius} cells")
# if start_goal_period elapsed: change start and goal
reset_start_goal = episode > 0 and episode % start_goal_reset_period == 0
# if reset_grid_period elapsed: change grid
reset_grid = episode > 0 and episode % grid_reset_period == 0
obs = env.reset(reset_starts_goals=reset_start_goal, radius=3, reset_grid=reset_grid)
starts.append([agent.init_pos for agent in env.agents])
goals.append([agent.init_goal for agent in env.agents])
working_agents = set(range(n_agents)) # set of agents with on-going mission
total_reward = np.zeros(n_agents)
total_loss = 0
for step in range(steps):
# env.render()
# create new observations using the local obs of each agent and adding the pos and goal of other agents
obs = [torch.cat([obs[i]] + [obs[j][:, -2:] for j in range(n_agents) if j != i], dim=1)
for i in range(n_agents)]
# select and perform actions
actions = [coordinator.select_action(obs[i]) for i in range(n_agents)]
new_obs, rewards, done, info = env.step(actions)
# store the transition in memory
for i in range(n_agents):
if i in working_agents:
# only add relevant transitions: if agent mission was already done, don't add transition in replay memory
action = actions[i]
reward = torch.tensor([rewards[i]], device=DEVICE)
total_reward[i] += reward.item()
# compute new_observation the same way as before
new_observation = torch.cat([new_obs[i]] + [new_obs[j][:, -2:] for j in range(n_agents) if j != i],
dim=1)
coordinator.add_to_buffer(obs[i], action, reward, new_observation, done[i])
if done[i]:
# if agent mission is done, remove it from working agents (to prevent adding future transitions)
reason = ""
if reward == env.rewards["goal"]:
reason = "REACHED GOAL"
elif reward == env.rewards["battery_depleted"]:
reason = "BATTERY DEPLETED"
print(f"Agent {i} is done after {step + 1} time steps ({reason})")
working_agents.remove(i)
# move to the next state
obs = new_obs
step_done += 1
# Perform one step of the optimisation
if step_done > 0 and step_done % train_period == 0:
loss = coordinator.train()
total_loss += loss * coordinator.batch_size
step_done =0
if all(done):
break
# # Perform one step of the optimisation
# if episode > 0 and episode % train_period == 0:
# for i in range(n_agents):
# loss = coordinator.train()
# total_loss += loss * coordinator.batch_size
print(f"Episode {episode} finished after {step + 1} time steps")
total_reward_s.append(total_reward)
total_steps.append(step + 1)
total_loss_s.append(total_loss)
cumulated_reward += np.array(total_reward)
cumulated_rewards.append(cumulated_reward.copy())
times.append(time.time() - start_time)
# Tensorboard logging
# for agent in agents:
# suffix = f"agent_{agent.id}"
# # agent target network parameters
# for name, weight in agent.target_model.named_parameters():
# tb.add_histogram(f"{suffix}.{name}", weight, episode)
# # tb.add_histogram(f"{suffix}.{name}.grad", weight.grad, episode)
tb.add_scalar("Loss", total_loss, episode)
tb.add_scalar("Total Reward", total_reward[0], episode)
tb.add_scalar("Cumulated Reward", cumulated_reward[0], episode)
tb.add_scalar("Total steps", step + 1, episode)
if episode > 0 and episode % 10000 == 0:
t_rwd_s = np.array(total_reward_s)
for i in range(n_agents):
coordinator.save(f"logs/models/{now}_{i}_{episode}.pt")
data = {"Loss": total_loss_s,
"Total Steps": total_steps,
"Times": times}
for i in range(n_agents):
data[f"Total Reward per episode {i}"] = t_rwd_s[:, i]
training_data = pd.DataFrame(data=data)
training_data.to_csv(f"logs/csv/{now}.csv")
# save cumulated reward plot
plt.clf()
fig, ax = plt.subplots(nrows=n_agents, ncols=1, sharex=True, sharey=True, squeeze=0)
fig.suptitle(("Cumulated Reward per episode for " + model))
for i in range(n_agents):
ax[i, 0].set_title(f"Agent {i}")
ax[i, 0].plot(t_rwd_s[:,i])
plt.xlabel("Epochs")
plt.savefig(f"logs/cumulated_rewards/{now}.png")
tb.add_figure("Cumulated Reward Plot", plt.gcf())
plt.clf()
print("Complete")
print("--- %s seconds ---" % (time.time() - start_time))
except Exception as e:
raise e
finally:
results_path = "logs/results/open_grid_random_gcs"
total_reward_s = np.array(total_reward_s)
cumulated_rewards = np.array(cumulated_rewards)
starts =np.array(starts, dtype='int,int')
goals=np.array(goals, dtype='int,int')
# save models
for i in range(n_agents):
coordinator.save(f"{results_path}/models/{now}_{i}.pt")
# save config file in logs with good datetime
with open(f"{results_path}/configs/{now}.ini", "w") as configfile:
config.write(configfile)
data = {"Loss": total_loss_s,
"Total Steps": total_steps,
"Times": times}
for i in range(n_agents):
data[f"Total Reward per episode {i}"] = total_reward_s[:, i]
data[f"Starts {i}"] = starts[:,i]
data[f"Goals {i}"] = goals[:,i]
training_data = pd.DataFrame(data=data)
training_data.to_csv(f"{results_path}/csv/{now}.csv")
# save cumulated reward plot
plt.clf()
fig, ax = plt.subplots(nrows=n_agents, ncols=1, sharex=True, sharey=True, squeeze=0)
fig.suptitle(("Cumulated Reward per episode for " + model))
for i in range(n_agents):
ax[i, 0].set_title(f"Agent {i}")
ax[i, 0].plot(total_reward_s[:,i])
plt.xlabel("Epochs")
plt.savefig(f"{results_path}/cumulated_rewards/{now}.png")
tb.add_figure("Cumulated Reward Plot", plt.gcf())
plt.clf()
# save total steps plot
plt.title(("Total steps for " + model))
plt.xlabel("Epochs")
plt.plot(total_steps)
plt.savefig(f"{results_path}/total_steps/{now}.png")
tb.add_figure("Total steps Plot", plt.gcf())
plt.clf()
# Calculate improvement over training duration
# Equivalent to the # of times the agent reaches the goal within the designated number of timesteps
improvement = | np.zeros_like(total_steps) | numpy.zeros_like |
# Setting up all folders we can import from by adding them to python path
import sys, os, pdb
curr_path = os.getcwd();
sys.path.append(curr_path+'/..');
# Importing stuff from all folders in python path
import numpy as np
from focusfun import *
from refocus import *
from KSpaceFunctions import *
# TESTING CODE FOR FOCUS_DATA Below
import scipy.io as sio
from scipy.signal import hilbert, gausspulse
from scipy.interpolate import RectBivariateSpline
import matplotlib.pyplot as plt
# Methods of Recovery
#method = 'Adjoint';
method = 'Tikhonov';
# Pulse Definition
fc = 5.0e6; # Hz
fracBW = 0.7;
fs = 20e6; # Hz
# Create Pulse in Both Time and Frequency Domain
Nf = 1024; t = np.arange(-Nf,Nf+1)/fs; # (s) Time Vector centered about t=0
impResp = gausspulse(t, fc=fc, bw=fracBW); # Calculate Transmit Pulse
n = impResp.size; P_f = np.fft.fftshift(np.fft.fft(impResp));
f = np.mod(np.fft.fftshift(np.arange(n)*fs/n)+fs/2,fs)-fs/2;
P_f = (f/(f+fc/10))*np.abs(P_f);
P_f = P_f[f>0]; f = f[f>0];
# Aperture Definition
c = 1540; # m/usec
LAMBDA = c/fc;
elemSpace = 0.15e-3; # m
Nelem = 96;
xpos = np.arange(-(Nelem-1)/2, 1+(Nelem-1)/2)*elemSpace;
tx_origin_x = np.arange(-0.00365, 0.00370, 0.00005); # Transmit Origin in [m]
focDepth = 0.020; # m
# Transmit Apodization
X_XDCR, TX_ORIGIN_X = np.meshgrid(xpos, tx_origin_x);
rect = lambda x: np.heaviside(x+1/2,1/2)-np.heaviside(x-1/2,1/2);
sigma_rect = 0.008; # [m]
tx_apod = rect((X_XDCR-TX_ORIGIN_X)/sigma_rect);
# Simulation Space and Time
Nx0 = 256; m = 2; n = 2; dov = 0.060; # m
x = np.arange(-(Nx0*m-1)/2,1+(Nx0*m-1)/2)*(elemSpace/m);
Nu1 = np.round(dov/(elemSpace/n));
z = (np.arange(Nu1))*(elemSpace/n);
t = np.arange(0,2,0.05)*np.abs(focDepth)/c;
## Ground-Truth Multistatic-Transmit Synthetic Aperture
# Calculate [K-Space, Wavefield, etc.] for Each Individual Transmit Element
multistatic_pwResp = np.zeros((x.size, f.size, Nelem), dtype=np.complex); # Pulse-Wave Frequency Response
multistatic_kspace = np.zeros((z.size, x.size, Nelem), dtype=np.complex); # K-Space Response
for elem_idx in np.arange(Nelem):
single_element = np.zeros(Nelem);
single_element[elem_idx] = 1; # Single Element Apodization
# Pulse-Wave Frequency Response
kx, multistatic_pwResp[:,:,elem_idx] = \
pwResp(x, elemSpace, single_element, np.zeros(Nelem), P_f, f, c);
# K-Space Response
kz, multistatic_kspace[:,:,elem_idx] = \
pwResp2kSpace(kx, f, multistatic_pwResp[:,:,elem_idx], z, c);
Kx, Kz = np.meshgrid(kx, kz); # K-Space Grid
K = np.sqrt(Kx**2 + Kz**2); # Radius in K-Space
## Transmit Pulse-Wave Frequency Response for Each Transmit Beam
# Pulse-Wave Frequency Response for Each Transmit Beam
tx_pwResp = np.zeros((x.size, f.size, tx_origin_x.size), dtype=np.complex);
tx_delays = np.zeros((tx_origin_x.size, Nelem), dtype=np.complex);
for tx_origin_x_idx in np.arange(tx_origin_x.size):
# Calculating Transmit Delays for Each Transmit Beam
if np.isinf(focDepth):
tx_delays[tx_origin_x_idx, :] = np.zeros(xpos.shape);
else:
tx_delays[tx_origin_x_idx, :] = (np.sign(focDepth) * \
np.sqrt((xpos-tx_origin_x[tx_origin_x_idx])**2+focDepth**2)-focDepth)/c;
# Pulse-Wave Frequency Response for Each Transmit Beam
kx, tx_pwResp[:,:,tx_origin_x_idx] = pwResp(x, elemSpace, \
tx_apod[tx_origin_x_idx, :], tx_delays[tx_origin_x_idx, :], P_f, f, c);
# Calculate K-Space Response For Each Transmit Beam
tx_kspace = np.zeros((z.size, x.size, tx_origin_x.size), dtype=np.complex); # K-Space Response
for tx_origin_x_idx in np.arange(tx_origin_x.size): # K-Space Response
_, tx_kspace[:,:,tx_origin_x_idx] = \
pwResp2kSpace(kx, f, tx_pwResp[:,:,tx_origin_x_idx], z, c);
# Reconstruct Transmit Wavefield for Transmit Beam
tx_origin_x_idx = 74;
_, _, psf_t = kspace2wavefield(kx, kz, (Kz>0)*tx_kspace[:,:,tx_origin_x_idx], c, t);
# K-Space of a Single Transmit Beam
plt.figure(); imagesc(kx, kz, np.abs(tx_kspace[:,:,tx_origin_x_idx]), \
(0, np.max(np.abs(tx_kspace[:,:,tx_origin_x_idx]))) );
plt.xlabel('lateral frequency [1/m]');
plt.ylabel('axial frequency [1/m]');
plt.title('K-Space of Selected Transmit Beam');
## Simulate Multistatic Synthetic Aperture Recovery Techniques
# Decode Multistatic data Using REFoCUS
if method == 'Adjoint':
multistatic_recov_pwResp = \
multistatic_recov(kx, f, tx_pwResp, tx_apod, tx_delays, Hinv_adjoint, lambda f: 1);
elif method == 'Tikhonov':
multistatic_recov_pwResp = \
multistatic_recov(kx, f, tx_pwResp, tx_apod, tx_delays, Hinv_tikhonov, 1e-3);
# Calculate K-Space Responses For Each Recovered Element
multistatic_recov_kspace = np.zeros((z.size, x.size, Nelem), dtype=np.complex); # K-Space Response
for elem_idx in np.arange(Nelem): # K-Space Response
_, multistatic_recov_kspace[:,:,elem_idx] = \
pwResp2kSpace(kx, f, multistatic_recov_pwResp[:,:,elem_idx], z, c);
## K-Space and Wavefield for Single Element Transmits
# K-Space of the Adjoint-Based Transmit Response
plt.figure(); plt.subplot(1,2,1);
imagesc(kx, kz, np.mean( | np.abs(multistatic_kspace) | numpy.abs |
#Copyright (c) 2014, <NAME>
#All rights reserved.
#
#Redistribution and use in source and binary forms, with or without modification, are permitted provided that the following conditions are met:
#
#1. Redistributions of source code must retain the above copyright notice, this list of conditions and the following disclaimer.#
#
#2. Redistributions in binary form must reproduce the above copyright notice, this list of conditions and the following disclaimer in the documentation and/or other materials provided with the distribution.
#
#THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE
#DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR
#SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
#THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
import numpy as np
#select based on maximum distance to dot product=0 hyperplane.
#this version does not normalize to get actual euclidean distance
def lwta_select_func(self):
nodes_per_group = self.nodes_per_group
num_groups = self.weighted_sums.shape[0]/nodes_per_group
batch_size = self.weighted_sums.shape[1]
#print('weighted_sums shape: ' + str(self.weighted_sums.shape))
#print('num_groups: ' + str(num_groups) + ' nodes_per_group: ' + str(nodes_per_group) + ' batch_size: ' + str(batch_size))
#note we remove the bias node for this grouping
activations_grouped = np.reshape(self.weighted_sums[0:-1,:],(num_groups,nodes_per_group,batch_size))
#need to swap axes for the broadcasting to work properly
activations_grouped = np.swapaxes(activations_grouped,0,1);
#we want "True" neurons that are selected FOR REMOVAL
activations_selected = (activations_grouped != np.max(activations_grouped,axis=0))
#swap axes back and reshape back to 2d matrix
activations_selected = np.swapaxes(activations_selected,0,1)
activations_selected = np.reshape(activations_selected,(num_groups*nodes_per_group,batch_size))
#append the bias back
#print('selected neurons dtype: ' + str(activations_selected.dtype))
self.selected_neurons = np.append(activations_selected,np.ones((1,activations_selected.shape[1]),dtype=np.bool),axis=0)
#print('selected neurons dtype: ' + str(self.selected_neurons.dtype))
self.output[self.selected_neurons] = 0;
def maxout_select_func(self):
nodes_per_group = self.nodes_per_group
num_groups = self.weighted_sums.shape[0]/nodes_per_group
batch_size = self.weighted_sums.shape[1]
#print('weighted_sums shape: ' + str(self.weighted_sums.shape))
#print('num_groups: ' + str(num_groups) + ' nodes_per_group: ' + str(nodes_per_group) + ' batch_size: ' + str(batch_size))
#note we remove the bias node for this grouping
activations_grouped = np.reshape(self.weighted_sums[0:-1,:],(num_groups,nodes_per_group,batch_size))
#need to swap axes for the broadcasting to work properly
activations_grouped = | np.swapaxes(activations_grouped,0,1) | numpy.swapaxes |
# Copyright 2019-2021 ETH Zurich and the DaCe authors. All rights reserved.
""" Tests dace.program as class methods """
import dace
import numpy as np
import sys
import time
class MyTestClass:
""" Test class with various values, lifetimes, and call types. """
classvalue = 2
def __init__(self, n=5) -> None:
self.n = n
@dace.method
def method_jit(self, A):
return A + self.n
@dace.method
def method(self, A: dace.float64[20]):
return A + self.n
@dace.method
def __call__(self, A: dace.float64[20]):
return A * self.n
@dace.method
def other_method_caller(self, A: dace.float64[20]):
return self.method(A) + 2 + self(A)
@staticmethod
@dace.program
def static(A: dace.float64[20]):
return A + A
@staticmethod
@dace.program
def static_withclass(A: dace.float64[20]):
return A + MyTestClass.classvalue
@classmethod
@dace.method
def clsmethod(cls, A):
return A + cls.classvalue
class MyTestCallAttributesClass:
class SDFGMethodTestClass:
def __sdfg__(self, *args, **kwargs):
@dace.program
def call(A):
A[:] = 7.0
return call.__sdfg__(*args)
def __sdfg_signature__(self):
return ['A'], []
def __init__(self, n=5) -> None:
self.n = n
self.call_me = MyTestCallAttributesClass.SDFGMethodTestClass()
@dace.method
def method_jit(self, A):
self.call_me(A)
return A + self.n
@dace.method
def __call__(self, A):
self.call_me(A)
return A * self.n
@dace.method
def method(self, A: dace.float64[20]):
self.call_me(A)
return A + self.n
@dace.method
def method_jit_with_scalar_arg(self, A, b):
self.call_me(A)
return A + b
def test_method_jit():
A = np.random.rand(20)
cls = MyTestClass(10)
assert np.allclose(cls.method_jit(A), A + 10)
def test_method():
A = np.random.rand(20)
cls = MyTestClass(10)
assert np.allclose(cls.method(A), A + 10)
def test_method_cache():
A = np.random.rand(20)
cls1 = MyTestClass(10)
cls2 = MyTestClass(11)
assert np.allclose(cls1.method(A), A + 10)
assert np.allclose(cls1.method(A), A + 10)
assert np.allclose(cls2.method(A), A + 11)
def test_callable():
A = np.random.rand(20)
cls = MyTestClass(12)
assert np.allclose(cls(A), A * 12)
def test_static():
A = np.random.rand(20)
assert np.allclose(MyTestClass.static(A), A + A)
def test_static_withclass():
A = np.random.rand(20)
# TODO(later): Make cache strict w.r.t. globals and locals used in program
# assert np.allclose(MyTestClass.static_withclass(A), A + 2)
# Modify value
MyTestClass.classvalue = 3
assert np.allclose(MyTestClass.static_withclass(A), A + 3)
def test_classmethod():
# Only available in Python 3.9+
if sys.version_info >= (3, 9):
A = np.random.rand(20)
# Modify value first
MyTestClass.classvalue = 4
assert np.allclose(MyTestClass.clsmethod(A), A + 4)
def test_nested_methods():
A = np.random.rand(20)
cls = MyTestClass()
assert np.allclose(cls.other_method_caller(A), (A * 5) + (A + 5) + 2)
def mydec(a):
def mutator(func):
dp = dace.program(func)
@dace.program
def mmm(A: dace.float64[20]):
res = dp(A, a)
return res
sdfg = mmm.to_sdfg()
return sdfg
return mutator
def someprog(A: dace.float64[20], a: dace.float64):
res = A + a
return res
def someprog_indirection(a):
return mydec(a)(someprog)
def test_decorator():
@dace.program(constant_functions=True)
def otherprog(A: dace.float64[20]):
res = np.empty_like(A)
someprog_indirection(3)(A=A, __return=res)
return res
sdfg = otherprog.to_sdfg()
A = np.random.rand(20)
assert np.allclose(sdfg(A), A + 3)
def test_sdfgattr_method_jit():
A = np.random.rand(20)
cls = MyTestCallAttributesClass(10)
assert np.allclose(cls.method_jit(A), 17)
def test_sdfgattr_callable_jit():
A = np.random.rand(20)
cls = MyTestCallAttributesClass(12)
assert np.allclose(cls(A), 84)
def test_sdfgattr_method_annotated_jit():
A = np.random.rand(20)
cls = MyTestCallAttributesClass(14)
assert np.allclose(cls.method(A), 21)
def test_sdfgattr_method_jit_with_scalar():
A = np.random.rand(20)
cls = MyTestCallAttributesClass(10)
assert np.allclose(cls.method_jit_with_scalar_arg(A, 2.0), 9.0)
def test_nested_field_in_map():
class B:
def __init__(self) -> None:
self.field = np.random.rand(10, 10)
@dace.method
def callee(self):
return self.field[1, 1]
class A:
def __init__(self, nested: B):
self.nested = nested
@dace.method
def tester(self):
val = | np.ndarray([2], np.float64) | numpy.ndarray |
import numpy as np
import numpy.ma as ma
#import sys
import os
from scipy import linalg
from scipy.signal import detrend, butter, lfilter
from scipy import signal
from scipy.interpolate import griddata
from joblib import Parallel, delayed
#from joblib import load, dump
#from netCDF4 import Dataset
import tempfile
import shutil
import xarray as xr
#import dist
#import math
import datetime
from numpy.linalg import eig, inv
import scipy.spatial.qhull as qhull
#
def distance(origin,destination,radius=6371):
'''
# Haversine formula
# Author: <NAME>
#
# INPUT DATA
# origin :: (lat1, lon1)
# destination :: (lat2, lon2)
#
# RETURNS
#
# d :: distance in km
'''
#
lat1, lon1 = origin
lat2, lon2 = destination
#radius = 6371 # km
#
dlat = np.radians(lat2-lat1)
dlon = np.radians(lon2-lon1)
#
a = np.sin(dlat/2) * np.sin(dlat/2) + np.cos(np.radians(lat1))* np.cos(np.radians(lat2)) * np.sin(dlon/2) * np.sin(dlon/2)
c = 2 * np.arctan2(np.sqrt(a), np.sqrt(1-a))
d = radius * c
#
return d
def rot_m(theta):
'''
# Create a rotation matrix for a given angle (rotations counter-clokwise)
'''
#
c,s = np.cos(theta), np.sin(theta)
#
return np.array(((c,-s), (s, c)))
def create_A(angles=range(90)):
'''
# Create a counter-clockwise rotation matrix A in the matrix equation k=A*K
# note that this *counter-clockwise* so make sure the angle makes sense
# for your case. For example if your data is at 10 deg rotation from x-y plane
# you should call the function with angles=np.array([360-10])
# -> this will rotate a full circle back to x-y plane
#
# A[angle0,:]=[cos(angle0)**2, sin(angle0)**2, sin(2*angle0)]
# A[angle0,:]=[sin(angle0)**2, cos(angle0)**2, -sin(2*angle0)]
# .
# .
# .
# A[angleN,:]=[cos(angleN)**2, sin(angleN)**2, sin(2*angleN)]
# A[angleN,:]=[sin(angleN)**2, cos(angleN)**2, -sin(2*angleN)]
#
# the input variable is a list (or an array) of angles
'''
#
A=np.zeros((len(angles)*2,3))
c=0
for ang in angles:
A[c,0]=np.cos(np.radians(ang))**2
A[c,1]=np.sin(np.radians(ang))**2
A[c,2]=np.sin(np.radians(2*ang))
A[c+1,0]=np.sin(np.radians(ang))**2
A[c+1,1]=np.cos(np.radians(ang))**2
A[c+1,2]=-np.sin(np.radians(2*ang))
c=c+2
#
return A
def griddata_interp_weights(in_points, out_points, d=2):
'''
# This function returns the triangulations weights used by scipy.griddata
# the weights can then be used with the griddata_interpolation below to
# produce the same results as griddata, but without the need to re-calculate the weights
# -> overall much faster than looping over griddata calls
#
# * This is direct copy from https://stackoverflow.com/questions/20915502/speedup-scipy-griddata-for-multiple-interpolations-between-two-irregular-grids
# Big thanks to Jaime/unutbu for saving my day
'''
tri = qhull.Delaunay(in_points)
simplex = tri.find_simplex(out_points)
vertices = np.take(tri.simplices, simplex, axis=0)
temp = np.take(tri.transform, simplex, axis=0)
delta = out_points - temp[:, d]
bary = np.einsum('njk,nk->nj', temp[:, :d, :], delta)
return vertices, np.hstack((bary, 1 - bary.sum(axis=1, keepdims=True)))
def griddata_interpolation(values, vtx, wts):
'''
# This is essentially the interpolation part of griddata
# Use griddata_interp_weights to get the vtx, wts (vertices and weights)
# and then call this function to do the interpolation
'''
return np.einsum('nj,nj->n', np.take(values, vtx), wts)
def smooth2D_loop(k2,h2,n,ymax,xmax,jind,iind,lat,lon,datain,data_out,weights_out,use_weights,weights_only,use_median,use_dist,xscaling):
"""This is the loop to be run paralllel by smooth2D_parallel. Should not be called directly. """
for k in range(k2,min([k2+h2,len(jind)])):
j = jind[k]
i = iind[k]
nx = xscaling*n
jind2 = []
iind2 = []
dxy = []
c = 0
for ib in range(-nx,nx+1):
for jb in range(-n,n+1):
if ((j+jb)>=ymax or (j+jb)<0):
jind2.append(j)
else:
jind2.append(j+jb)
if (i+ib)>=xmax: #note that xmin case is automatically covered thanks to python indexing
iind2.append((i+ib)-xmax)
elif (i+ib)<0:
iind2.append(xmax+(i+ib))
else:
iind2.append(i+ib)
if datain.mask[jind2[-1],iind2[-1]]:
jind2[-1]=j
iind2[-1]=i
if use_weights and use_dist:
if len(lon.shape)==1:
dxy.append(distance([lat[j],lon[i]],[lat[jind2[c]],lon[iind2[c]]]))
else:
dxy.append(distance([lat[j,i],lon[j,i]],[lat[jind2[c],iind2[c]],lon[jind2[c],iind2[c]]]))
c=c+1
if k%10000.==0:
print(k, c, j, i)
if use_weights:
if use_dist:
dxy=np.array(dxy)
else:
if len(lon.shape)==1:
lon2,lat2=np.meshgrid(lon,lat)
else:
lon2=lon
lat2=lat
dxy=np.cos(lat2[jind2,iind2]*np.pi/180.)
if ma.sum(dxy)==0:
weights=np.ones(len(dxy))
diind=np.argsort(dxy)
else:
diind=np.argsort(dxy)
weights=(float(ma.sum(np.sort(dxy)))-np.sort(dxy))/ma.sum(float(ma.sum(np.sort(dxy)))-np.sort(dxy))
weights_out[k,:,0]=weights
weights_out[k,:,1]=np.array(jind2)[diind]
weights_out[k,:,2]=np.array(iind2)[diind]
else:
weights_out[k,:,0]=0
weights_out[k,:,1]=np.array(jind2)
weights_out[k,:,2]=np.array(iind2)
if not weights_only:
if use_weights:
data_out[j,i]=ma.sum(datain[jind2[diind],iind2[diind]]*weights)/ma.sum(weights)
elif use_median:
data_out[j,i]=ma.median(datain[jind2,iind2])
else:
data_out[j,i]=ma.mean(datain[jind2,iind2])
def smooth2D_parallel(lon,lat,datain,n=1,num_cores=30,use_weights=False,weights_only=False,use_median=False,save_weights=False,save_path='', use_dist=False, xscaling=2):
"""
2D smoothing of (preferably masked) array datain (should be shape (lat,lon)), will be using halo of n, if n=1 (default) then the each point will be 9 point average. Option to use distance weights.
Parameters
----------
lon : longitudes of the input data (1D or 2D array)
lat : latitudes of the input data (1D or 2D array)
datain : input data (should be shape (lat,lon)) and prefereably masked
n : Size of the halo over which the smoothing is applied.
If n=1 (default) then the each point will be 9 point average
Use xscaling to use a different halo in x direction
xscaling : Scale the halo in x-direction (default 2), this is reasonable if data is on lat, lon grid
num_cores : number of cores to use (default 30)
use_weights : Controls if specific weights will be calculated (default is False)
If False then will return the indices of the grid cells that should be used for smoothing
with equal weights (set to 0). If True then weights will be calculated (see below for different options)
use_dist : If true then the weights will be calculated based on distance (in km) from the central cell.
Default is False in which case distance in degrees will be used.
weights_only : If True only calculate weights, do not apply to the data (dataout will be empty).
Default is False i.e. weights will be applied!
use_median : Only used if weights_only=False and use_weights=False
In this case one has an option to smooth either by calculating the median (use_median=True)
or by using the mean of the surrounding points (use_median=False)
save_weights : If True the weights will be saved to npz file (default is False).
This is usefull if the domain is large and the smoothing will be applied often
save_path : Location in which the weights will be saved. Default is to save in the work directory
"""
#dataout=ma.zeros(datain.shape)
ymax,xmax=datain.shape
if ma.is_masked(datain):
jind,iind=ma.where(1-datain.mask)
else:
jind,iind=ma.where(np.ones(datain.shape))
#
h2 = len(jind)/num_cores
folder1 = tempfile.mkdtemp()
path1 = os.path.join(folder1, 'dum1.mmap')
data_out = np.memmap(path1, dtype=float, shape=(datain.shape), mode='w+')
#
folder2 = tempfile.mkdtemp()
path2 = os.path.join(folder2, 'dum2.mmap')
weights_out = np.memmap(path2, dtype=float, shape=((len(jind),len(range(-n,n+1))*len(range(-2*n,2*n+1)),3)), mode='w+')
#weights_out=np.memmap(path2, dtype=float, shape=((len(jind),len(range(-n,n+1))**2,3)), mode='w+')
#
Parallel(n_jobs=num_cores)(delayed(smooth2D_loop)(k2,h2,n,ymax,xmax,jind,iind,lat,lon,datain,data_out,weights_out,use_weights,weights_only,use_median,use_dist,xscaling) for k2 in range(0,len(jind),h2))
data_out=ma.masked_array(np.asarray(data_out),mask=datain.mask)
weights_out=np.asarray(weights_out)
if save_weights:
np.savez(save_path+str(n)+'_degree_smoothing_weights_coslat_y'+str(n)+'_x'+str(xscaling*n)+'.npz',weights_out=weights_out,jind=jind,iind=iind)
try:
shutil.rmtree(folder1)
except OSError:
pass
#
try:
shutil.rmtree(folder2)
except OSError:
pass
#
return data_out,weights_out
def smooth_with_weights_loop(k2,h2,datain,data_out,weights,jind,iind,use_weights,use_median,loop=False):
if loop:
for k in range(k2,min([k2+h2,len(jind)])):
if k%10000.==0:
print(k)
j=jind[k]
i=iind[k]
c=0
if use_weights:
data_out[j,i]=ma.sum(datain[weights[k,:,1].astype('int'),weights[k,:,2].astype('int')]*weights[k,:,0])/ma.sum(weights[k,:,0])
elif use_median:
data_out[j,i]=ma.median(datain[weights[k,:,1].astype('int'),weights[k,:,2].astype('int')])
else:
data_out[j,i]=ma.mean(datain[weights[k,:,1].astype('int'),weights[k,:,2].astype('int')])
else:
k3=min([k2+h2,len(jind)])
if use_weights:
data_out[k2:k2+h2]=ma.sum(datain[weights[k2:k3,:,1].astype('int'),weights[k2:k3,:,2].astype('int')]*weights[k2:k3,:,0],-1)/ma.sum(weights[k2:k3,:,0])
elif use_median:
data_out[k2:k3]=ma.median(datain[weights[k2:k3,:,1].astype('int'),weights[k2:k3,:,2].astype('int')],-1)
else:
data_out[k2:k3]=ma.mean(datain[weights[k2:k3,:,1].astype('int'),weights[k2:k3,:,2].astype('int')],-1)
def smooth_with_weights_parallel(datain,n=1,num_cores=30,weights=None,jind=None,iind=None,use_weights=False,use_median=False,loop=False,save_path=''):
"""
Given that one has already calculated and saved smoothing weights/indices with smooth2D_parallel one can simply apply them with this script
Turns out this is fastest to do in serial!! so do that instead!1
"""
# load the data if needed - don't use this if you're smoothing a timeseries
if weights is None:
data=np.load(save_path+str(n)+'_degree_smoothing_weights_new.npz')
weights=data['weights_out'][:]
jind=data['jind'][:]
iind=data['iind'][:]
# prepara for the parallel loop
h2=len(jind)/num_cores
folder1 = tempfile.mkdtemp()
path1 = os.path.join(folder1, 'dum1.mmap')
if loop:
data_out=np.memmap(path1, dtype=float, shape=(datain.shape), mode='w+')
# Parallel(n_jobs=num_cores)(delayed(smooth_with_weights_loop)(k2,h2,datain,data_out,weights,jind,iind,use_weights,use_median,loop) for k2 in range(0,len(jind),h2))
else:
data_out=np.memmap(path1, dtype=float, shape=(len(jind)), mode='w+')
# Parallel(n_jobs=num_cores)(delayed(smooth_with_weights_loop)(k2,h2,datain.flatten(),data_out,weights,jind,iind,use_weights,use_median,loop) for k2 in range(0,len(jind),h2))
# this should work but seemps to be slow
#
Parallel(n_jobs=num_cores)(delayed(smooth_with_weights_loop)(k2,h2,datain,data_out,weights,jind,iind,use_weights,use_median,loop) for k2 in range(0,len(jind),h2))
# mask output
if loop:
data_out=ma.masked_array(np.asarray(data_out),mask=datain.mask)
else:
data_out2=np.zeros(datain.shape)
data_out2[jind,iind]=data_out
data_out=ma.masked_array(data_out2,mask=datain.mask)
# close temp file
try:
shutil.rmtree(folder1)
except OSError:
pass
#
return data_out
def butter_bandstop(lowcut, highcut, fs, btype, order=5):
nyq = 0.5 * fs
low = lowcut / nyq
high = highcut / nyq
b, a = butter(order, [low, high], btype=btype)
return b, a
def butter_bandstop_filter(data, lowcut, highcut, fs, order=5, ax=-1, btype='bandstop'):
"""
bandstop filter, usage:
x_grid = MicroInverse_utils.butter_bandstop_filter((x_grid-np.nanmean(x_grid,0)), 7./375., 7/355., 1, order=3,ax=0)
"""
b, a = butter_bandstop(lowcut, highcut, fs, btype, order=order)
y = signal.filtfilt(b, a, data, axis=ax)
return y
def Implement_Notch_Filter(data, lowcut, highcut, fs=1, order=3, ripple=20, atten=20, filter_type='butter', ax=-1, btype='bandstop'):
"""
Required input defintions are as follows
Parameters
----------
fs : Sampling frequency
lowcut,highcut : The bandwidth bounds you wish to filter
ripple : The maximum passband ripple that is allowed in db
order : The filter order. For FIR notch filters this is best set to 2 or 3,
IIR filters are best suited for high values of order. This algorithm
is hard coded to FIR filters
filter_type : 'butter', 'bessel', 'cheby1', 'cheby2', 'ellip'
data : the data to be filtered
"""
nyq = 0.5 * fs
# low = freq - band/2.0
# high = freq + band/2.0
low = lowcut/nyq
high = highcut/nyq
b, a = signal.iirfilter(order, [low, high], rp=ripple, rs=atten, btype=btype,analog=False, ftype=filter_type)
filtered_data = signal.filtfilt(b, a, data,axis=ax)
#
return filtered_data
def remove_climatology_loop(jj,h2,dum,dum_out,dt,rem6month):
"""
Remove climatology, i.e. 12 month and optionally 6 month
(rem6month=True, default setting) from the data
"""
print(jj, 'removing climatology...')
dum1=dum[:,jj:jj+h2] # .data
f1=1*dt/365.
f2=2*dt/365.
t=np.arange(dum1.shape[0])
#
x1 = np.ones((len(t),3))
x1[:,1] = np.cos((2*np.pi)*f1*t)
x1[:,2] = np.sin((2*np.pi)*f1*t)
#
if rem6month:
x2 = np.ones((len(t),3))
x2[:,1] = np.cos((2*np.pi)*f2*t)
x2[:,2] = np.sin((2*np.pi)*f2*t)
#
inds=np.where(np.isfinite(dum1[0,:]))[0]
#
if len(inds)>0: # do nothing if only land points otherwise enter the loop
for j in inds:
y = dum1[:,j]
# fit one year signal
beta = np.linalg.lstsq(x1, y, rcond=None)[0]
y12mo = beta[0]+beta[1]*np.cos((2*np.pi)*f1*t)+beta[2]*np.sin((2*np.pi)*f1*t)
#
if rem6month:
# fit 6 month signal
beta=np.linalg.lstsq(x2, y, rcond=None)[0]
y6mo = beta[0]+beta[1]*np.cos((2*np.pi)*f2*t)+beta[2]*np.sin((2*np.pi)*f2*t)
dum_out[:,jj+j]=y-y12mo-y6mo
else:
dum_out[:,jj+j]=y-y12mo
def remove_climatology(var,dt,num_cores=18,rem6month=True):
"""
Remove annual cycle (fitted sine curve) from a numpy.array which has dimensions (nt,nx*ny)
Parameters
----------
var : numpy.array
Data from which annual cycle is to be removed.
Dimensions (nt,nx*ny), no nan's allowed!
dt : int
timestep in days
num_cores : int, optional
Number of cores for multiprocessing (default 18)
rem6month : bool, optional
If True (default) also 6 month (180 day) signal is removed
Returns
-------
output : numpy.array
The output array from which the annual cycle has been removed
"""
# num_cores=20
h2=var.shape[-1]//num_cores
#
var=var-np.nanmean(var,0)
#
folder1 = tempfile.mkdtemp()
path1 = os.path.join(folder1, 'dum1.mmap')
dum=np.memmap(path1, dtype=float, shape=(var.shape), mode='w+')
dum[:]=var[:]
#
folder2 = tempfile.mkdtemp()
path2 = os.path.join(folder2, 'dum2.mmap')
X_par=np.memmap(path2, dtype=float, shape=(var.shape), mode='w+')
#
# Parallel(n_jobs=num_cores)(delayed(remove_climatology_loop)(jj,h2,dum1,X_par) for jj in range(0,var.shape[-1],h2))
# Parallel(n_jobs=num_cores)(delayed(remove_climatology_loop)(jj,h2,dum[:,jj:jj+h2],X_par[:,jj:jj+h2]) for jj in range(0,var.shape[-1],h2))
Parallel(n_jobs=num_cores)(delayed(remove_climatology_loop)(jj,h2,dum,X_par,dt,rem6month) for jj in range(0,var.shape[-1],h2))
# Parallel(n_jobs=num_cores)(delayed(remove_climatology_loop)(jj,h2,dum1,X_par) for jj in range(0,block_num_lons))
#
output=np.asarray(X_par)
try:
shutil.rmtree(folder1)
except OSError:
pass
try:
shutil.rmtree(folder2)
except OSError:
pass
#
return output
def remove_climatology2(dum,rem6month=True):
"""
Remove climatology, serial code
"""
print('removing climatology...')
f1=1/365.
f2=2/365.
t=np.arange(dum.shape[0])
dum=dum-np.nanmean(dum,0)
dum2=np.zeros(dum.shape)
for j in range(dum.shape[-1]):
y = dum[:,j].data
# fit one year signal
x = np.ones((len(y),3))
x[:,1] = np.cos((2*np.pi)*f1*t)
x[:,2] = np.sin((2*np.pi)*f1*t)
beta, resid, rank, sigma = np.linalg.lstsq(x, y)
y12mo = beta[0]+beta[1]*np.cos((2*np.pi)*f1*t)+beta[2]*np.sin((2*np.pi)*f1*t)
#
# fit 6 month signal
if rem6month:
x = np.ones((len(y),3))
x[:,1] = np.cos((2*np.pi)*f2*t)
x[:,2] = np.sin((2*np.pi)*f2*t)
beta, resid, rank, sigma = np.linalg.lstsq(x, y)
y6mo = beta[0]+beta[1]*np.cos((2*np.pi)*f2*t)+beta[2]*np.sin((2*np.pi)*f2*t)
dum2[:,j]=y-y12mo-y6mo
else:
dum2[:,j]=y-y12mo
return dum2
def read_files(j,nts,jinds,iinds,filepath,fnames2,var_par,varname,sum_over_depth, depth_lim, depth_lim0, model_data=False):
"""
Read files in parallel. This function should not be called directly, but via load_files() function
var_par should be of shape (len(filenames), time_steps_per_file, ny, nx)
"""
#
fname=fnames2[j]
print(fname)
ds = xr.open_dataset(filepath+fname,decode_times=False)
ds = ds.squeeze() #just in case depth etc.
#
# reading a file with a timeseries of 2D field (i.e. 3D matrix)
if len(var_par.shape)==3 and sum_over_depth==False:
nt,ny,nx=ds[varname].shape
nts[j]=nt
# this is a quick fix without a need to change upstream calls - supposedly faster?
if False:
jlen=np.unique(jinds).shape[0]
ilen=np.unique(iinds).shape[0]
j1=np.reshape(jinds,(jlen,ilen))[:,0]
i1=np.reshape(iinds,(jlen,ilen))[1,:]
exec('dum=ds.'+varname+'[:,j1,i1].values')
dum=ds[varname][:,j1,i1].values
dum=np.reshape(dum,(nt,-1))
else:
# old version - very slow!
dum=ds[varname].values[:,jinds,iinds]
dum[np.where(dum>1E30)]=np.nan
#
var_par[j,:nt,:]=dum
var_par[j,nt:,:]=np.nan # in order to calculate the climatology
# reading a model data file, with a timeseries of 3D field (i.e. 4D matrix) and calculating the volume mean over depth)
elif len(var_par.shape)==3 and sum_over_depth==True and model_data==True:
nt,nz,ny,nx=ds[varname].shape
zaxis=ds['st_edges_ocean'].values
dz=np.diff(zaxis)[depth_lim0:depth_lim]
nts[j]=nt
var_par[j,:nt,:]=np.sum(np.swapaxes(ds[varname].values[:,depth_lim0:depth_lim,jinds,iinds],1,0).T*dz,-1).T/np.sum(dz)
# reading a file with only one 2D field in one file
elif len(var_par.shape)==2 and sum_over_depth==False:
ny,nx=ds[varname].squeeze().shape
var_par[j,:]=ds[varname].squeeze().values[jinds,iinds]
var_par[np.where(var_par>1E30)]=np.nan
# reading a file with only one 3D field in one file, and calculating the volume mean over depth
elif len(var_par.shape)==2 and sum_over_depth==True:
# this is for sum(dz*T)/sum(dz) so weighted mean temperature - areas where depth is less than depth_lim are nan
# var_par[j,:]=np.sum(ds[varname].values[depth_lim0:depth_lim,jinds,iinds].T*np.diff(ds['depth'].values)[depth_lim0:depth_lim],-1).T/np.sum(np.diff(ds['depth'].values)[depth_lim0:depth_lim])
# this is for dz*T
# var_par[j,:]=np.nansum(ds[varname].values[depth_lim0:depth_lim,jinds,iinds].T*np.diff(ds['depth'].values)[depth_lim0:depth_lim],-1).T
#
# this is for nansum(dz*T)/nansum(dz) so weighted mean temperature - areas where depth is less than depth_lim will have a temperature
var_par[j,:]=np.nansum(ds[varname].values[depth_lim0:depth_lim,jinds,iinds].T*np.diff(ds['depth'].values)[depth_lim0:depth_lim],-1).T/np.nansum(abs(np.sign(ds[varname].values[depth_lim0:depth_lim,jinds,iinds].T))*np.diff(ds['depth'].values)[depth_lim0:depth_lim],-1).T
# consider making another one here which is heat content ds[varname]*density where density=gsw.density(ds[varname],ds['salinity'],p=0)
#
print('closing the file')
ds.close()
def load_data(filepath,fnames,jinds,iinds,varname,num_cores=20,dim4D=True, sum_over_depth=False, depth_lim=13, model_data=False, remove_clim=False,dt=1, depth_lim0=0):
"""
Load a timeseries of a 2D field (where possibly summing over depth if a 3D variable) in parallel
Parameters
----------
filepath : str
Directory path pointing to the data folder
Can be empty string if path is included in fnames
fnames : list
List of file names
jinds : list
List of non-nan indices in y-direction.
iinds : list
List of non-nan indices in x-direction
Note that one should create jinds and iinds as follows
1) create a 2D mask: 1 where nan, else 0
usually landmask for ocean data
2) then do the following
jinds,iinds = np.where(mask)
jinds,iinds = np.meshgrid(jinds,iinds)
jinds = jinds.flatten()
iinds = iinds.flatten()
varname : str
Name of the variable of interest in the data file
num_cores : int
Number of cores to use (default 20)
dim4D : bool
True (default) if a file has more than one timestep
sum_over_depth : bool
False (default) if the data has a depth axis
and one wants a sum over a depth range.
depth_lim0 : integer
Upper limit for the depth average
depth_lim0 : integer
Lower limit for the depth average
remove_clim : boolean
If True a daily climatology will be removed.
Best used only if the data is at daily time resolution
dt : integer
Time resolution of the input data in days
Returns
-------
var : numpy.array
Timeseries of the requested variable (varname).
Has the shape (time,jinds,iinds).
var_clim : numpy.array
Climatology of the requested variable (varname).
None if remove_clim=False (default)
"""
# create temp files to host the shared memory variables
folder1 = tempfile.mkdtemp()
folder2 = tempfile.mkdtemp()
path1 = os.path.join(folder1, 'dum0.mmap')
path2 = os.path.join(folder2, 'dum1.mmap')
if dim4D: # incase the files have more than one timestep in each file
vshape=(len(fnames),366,len(jinds))
var_par=np.memmap(path1, dtype=float, shape=vshape, mode='w+')
else: # incase there is only one timestep in a file
vshape=(len(fnames),len(jinds))
var_par=np.memmap(path1, dtype=float, shape=vshape, mode='w+')
# nts will keep track of number of days in a year
nts=np.memmap(path2, dtype=float, shape=(len(fnames)), mode='w+')
fnames2=np.memmap(path2, dtype='U'+str(len(fnames[0])+1), shape=(len(fnames)), mode='w+')
fnames2[:]=fnames #np.asarray(fnames[:])
# launch the parallel reading
Parallel(n_jobs=num_cores)(delayed(read_files)(j,nts,jinds,iinds,filepath,fnames2,var_par,varname,sum_over_depth, depth_lim, depth_lim0, model_data=model_data) for j,fname in enumerate(fnames))
if dim4D:
print('removing climatology')
var_clim=np.nanmean(var_par,0)
if remove_clim:
print('removing climatology')
# smooth the daily climatology with monthly filter, as the climatology will be still noisy at daily scales
var_clim=np.concatenate([var_clim[-120//dt:,],var_clim,var_clim[:120//dt,]],axis=0)
b,a=signal.butter(3,2./(30/dt))
jnonan=np.where(np.isfinite(np.sum(var_clim,0)))
var_clim[:,jnonan]=signal.filtfilt(b,a,var_clim[:,jnonan],axis=0)
var_clim=var_clim[120//dt:120//dt+366//dt,]
#
# this is the on off switch for removing the climatology
var_clim=var_clim*int(remove_clim)
var=var_par[0,:int(nts[0]),:]-var_clim[:int(nts[0]),:]
# concatenate the data - note that here nts is used to strip down the 366th day when it's not a leap year
# and include the 366th day when it is a leap year
for j in range(1,len(fnames)):
print(j)
var=np.concatenate([var,var_par[j,:int(nts[j]),:]-var_clim[:int(nts[j]),:]],axis=0)
#
else:
# if only one timestep per file
var=np.asarray(var_par)
var[np.where(var==0)]=np.nan
if remove_clim:
print('removing climatology')
year0=datetime.date(int(fnames[0][-20:-16]),int(fnames[0][-16:-14]),int(fnames[0][-14:-12])).isocalendar()[0]
year1=datetime.date(int(fnames[-1][-20:-16]),int(fnames[-1][-16:-14]),int(fnames[-1][-14:-12])).isocalendar()[0]
var2=np.ones((year1-year0+1,int(np.ceil(366./dt)),var.shape[1]))*np.nan
#
for j, fname in enumerate(fnames):
year = int(fname[-20:-16])
month = int(fname[-16:-14])
day = int(fname[-14:-12])
c,c1 = datetime.date(year,month,day).isocalendar()[:2]
c = c-year0
c1 = c1-1
var2[c,c1,:] = var[j,:]
#
var_clim=np.nanmean(var2,0)
ind=np.where(np.nansum(var2,-1)[0,:]>0)[0]
var=var2[0,ind,:]-var_clim[ind,:]
for j in range(1,var2.shape[0]):
ind=np.where(np.nansum(var2,-1)[j,:]>0)[0]
var=np.concatenate([var,var2[j,ind,:]-var_clim[ind,:]],axis=0)
else:
var_clim=None
#
print('close files')
#
try:
shutil.rmtree(folder1)
except OSError:
pass
try:
shutil.rmtree(folder2)
except OSError:
pass
#
return var, var_clim
#
def parallel_inversion_9point(j,x_grid,block_vars,Stencil_center,Stencil_size,block_num_samp,block_num_lats,block_num_lons,block_lat,block_lon,Tau,Dt_secs,inversion_method='integral',dx_const=None, dy_const=None, DistType='interp', radius=6371, nn=4):
"""
"""
for i in range(2,block_num_lats-2):
if np.isfinite(np.sum(x_grid[i,j,:])):
xn = np.zeros((Stencil_size,block_num_samp))
# count non-border neighbors of grid point
numnebs = 0
#
for inn in range(i-1,i+2):
for jnn in range(j-1,j+2):
if np.isfinite(x_grid[inn,jnn,0]):
numnebs=numnebs+1
# only invert if point has 9 non-border neighbors
if numnebs==9:
ib = i
jb = j
#
sads = [-1,+1,-2,+2,-3,+3,-4,+4] # indices for ds - Stencil_center will be the central point - these are spiraling out
jads = [-1,+1, 0, 0,-1,+1,+1,-1] # left,right,down,up,down-left,up-right,right-down,left-up
iads = [ 0, 0,-1,+1,-1,+1,-1,+1]
#
s_ads = [-1,+1,-2,+2,-3,+3,-4,+4,-5,+5,-6,+6,-7,+7,-8,+8,-9,+9,-10,+10,-11,+11,-12,+12]
j_ads = [-1,+1, 0, 0,-1,+1,+1,-1,-2,-2,-2,+2,+2,+2,-1, 0,+1,+1, 0, -1, -2, +2, +2, -2]
i_ads = [ 0, 0,-1,+1,-1,+1,-1,+1,+1, 0,-1,+1, 0,-1,-2,-2,-2,+2, +2, +2, -2, +2, -2, +2]
#
ds = np.zeros(len(s_ads)+1) # distance to the Stencil_center
dx = np.zeros(len(s_ads)+1)
dy = np.zeros(len(s_ads)+1)
ang2 = [180,0,270,90,225,45,315,135] # left,right,down,up, down-left,up-right,right-down,left-up
ds2 = np.zeros((len(ang2),len(ds)))
cent = len(s_ads)//2
#
# CALCULATE THE DISTANCE BETWEEN THE CENTRAL AND SURROUNDING POINTS
for s,ss in enumerate(s_ads):
#
ds[cent+ss] = distance([block_lat[ib,jb],block_lon[ib,jb]],[block_lat[ib+i_ads[s],jb+j_ads[s]],block_lon[ib+i_ads[s],jb+j_ads[s]]], radius=radius)*1000
dx[cent+ss] = np.sign(j_ads[s])*distance([block_lat[ib,jb],block_lon[ib,jb]],[block_lat[ib,jb],block_lon[ib+i_ads[s],jb+j_ads[s]]], radius=radius)*1000
dy[cent+ss] = np.sign(i_ads[s])*distance([block_lat[ib,jb],block_lon[ib,jb]],[block_lat[ib+i_ads[s],jb+j_ads[s]],block_lon[ib,jb]], radius=radius)*1000
#
ang=np.arctan2(dy,dx)*180/np.pi
ang[np.where(ang<0)]=ang[np.where(ang<0)]+360
#
if DistType in ['interp'] and np.any(dx_const==None) and np.any(dy_const==None):
# we need to interpolate x_grid values to be at the same distance from the central point - this is because the inversion doesn't know about the distance.
#
# CHOOSE A DISTANCE TO USE - HERE 1km off from the median of the 4 closest cells
dr = np.nanmedian(ds[[cent-2,cent-1,cent+1,cent+2]])+1E3
ds2[:,Stencil_center] = dr
# find out how far each point is from the unit circle point facing each grid cell.
# axis=0 loops over each point of interest, and axis=1 loops over all the surrounding points
for s,a2 in enumerate(ang2):
for s2,ss2 in enumerate(s_ads):
ds2[s,cent+ss2]=np.sqrt(ds[cent+ss2]**2+dr**2-2*dr*ds[cent+ss2]*np.cos((ang[cent+ss2]-a2)*np.pi/180.))
#
# calculate weighted mean of the surrounding cells (linear interpolation)
ds2[:,cent] = dr
winds = np.argsort(ds2,axis=1) #
ds2_sort = np.sort(ds2,axis=1)
weigths = ((1/ds2_sort[:,:nn]).T/(np.sum(1/ds2_sort[:,:nn],1))).T # 6 closest points
weigths[np.where(np.isnan(weigths))] = 1
#
xn[Stencil_center+np.array(sads),:] = np.sum(x_grid[ib+np.array(i_ads),jb+np.array(j_ads),:][winds[:,:nn],:].T*weigths.T,1).T
xn[Stencil_center,:] = x_grid[ib,jb,:]
else:
#
dr = ds
xn[Stencil_center+np.array(sads),:] = x_grid[ib+np.array(iads),jb+np.array(jads),:]
xn[Stencil_center,:] = x_grid[ib,jb,:]
#
# use only those stencil members that are finite - setting others to zero
fin_inds=np.isfinite(xn[:,0])
xn[np.where(~fin_inds)[0],:]=0
Stencil_center2=Stencil_center
# integral method
if inversion_method in ['integral']:
xnlag = np.concatenate((xn[:,Tau:], np.zeros((xn.shape[0],Tau))),axis=1)
a=np.dot(xnlag,xn.T)
b=np.dot(xn,xn.T)
a[np.where(np.isnan(a))]=0
b[np.where(np.isnan(b))]=0
tmp = np.dot(a, np.linalg.pinv(b)) #pseudo-inverse
# tmp = np.dot(a.data, np.linalg.inv(b.data))
tmp[np.isnan(tmp)] = 0
tmp[np.isinf(tmp)] = 0
#
if np.isfinite(np.sum(tmp)) and np.sum(abs(tmp-tmp[0]))>1E-10:
try:
bb = (1./(Tau*Dt_secs))*linalg.logm(tmp)
except (ValueError,ZeroDivisionError,OverflowError):
bn = np.zeros(Stencil_size)
else:
bn = np.real(bb[Stencil_center,:])
else:
bn=np.zeros(Stencil_size)
bn[~np.isfinite(bn)] = 0
# inverse by derivative method
elif inversion_method in ['derivative']:
xnfut = np.concatenate((xn[:,1:], np.zeros((xn.shape[0],1))),axis=1)
xnlag = np.concatenate((np.zeros((xn.shape[0],Tau)), xn[:,1:xn.shape[1]-Tau+1]),axis=1)
a=np.dot((xnfut-xn),xnlag.T)
b=np.dot(xn,xnlag.T)
a[np.where(np.isnan(a))]=0
b[np.where(np.isnan(b))]=0
tmp = np.dot(a, np.linalg.pinv(b))
bn_matrix = (1./Dt_secs)*tmp
bn = np.real(bn_matrix[Stencil_center2,:])
bn[np.isnan(bn)] = 0
bn[np.isinf(bn)] = 0
# Alternative integral method
elif inversion_method in ['integral_2']:
xnfut = np.concatenate((xn[:,1:], np.zeros((Stencil_size,1))),axis=1)
xnlag = np.concatenate((np.zeros((Stencil_size,Tau)), xn[:,1:xn.shape[1]-Tau+1]),axis=1)
a=np.dot(xnfut,xnlag.T)
b=np.dot(xn,xnlag.T)
a[np.where(np.isnan(a))]=0
b[np.where(np.isnan(b))]=0
# tmp = np.linalg.lstsq(b.T, a.T)[0] #one way to do it
tmp = np.dot(a, np.linalg.pinv(b)) #another way
tmp[np.isnan(tmp)] = 0
tmp[np.isinf(tmp)] = 0
if np.isfinite(np.sum(tmp)) and np.sum(abs(tmp-tmp[0]))>1E-10: #check that not all the values are the same
try:
bb = (1./(Dt_secs))*linalg.logm(tmp) #this is not working for somereason
except (ValueError,ZeroDivisionError,OverflowError):
bn = np.zeros(Stencil_size)
else:
bn = np.real(bb[Stencil_center,:])
else:
bn=np.zeros(Stencil_size)
bn[~np.isfinite(bn)] = 0
else:
bn = np.zeros(tmp.shape[0])
############################################
# -- solve for U K and R from row of bn -- #
############################################
# actually just save bn - calculate the rest later
block_vars[0,:,i,j]=bn
block_vars[1,:,i,j]=dr
block_vars[2,:,i,j]=fin_inds
def parallel_monte_carlo_inversion(j,x_grid,block_vars,Stencil_center,Stencil_size,block_num_samp,block_num_lats,block_num_lons,block_lat,block_lon,Tau,Dt_secs,block_vars2=None,inversion_method='integral',dx_const=None,dy_const=None, DistType='mean',radius=6371,dt_min=365*5, ens=30, percentiles=[25,50,75]):
"""
Invert 2D data using a 5 point stencil. This function should be not be called directly, instad call the inversion() function.
"""
sads=[-1,+1,-2,+2,-1,+1,-2,+2][:4]
jads=[-1,+1, 0, 0,-1,+1,+1,-1][:4]
iads=[ 0, 0,-1,+1,-1,+1,-1,+1][:4]
#
s_ads=[-1,+1,-2,+2,-3,+3,-4,+4,-5,+5,-6,+6,-7,+7,-8,+8,-9,+9,-10,+10,-11,+11,-12,+12]
j_ads=[-1,+1, 0, 0,-1,+1,+1,-1,-2,-2,-2,+2,+2,+2,-1, 0,+1,+1, 0, -1, -2, +2, +2, -2]
i_ads=[ 0, 0,-1,+1,-1,+1,-1,+1,+1, 0,-1,+1, 0,-1,-2,-2,-2,+2, +2, +2, -2, +2, -2, +2]
#
tstep = (block_num_samp-dt_min)//ens
#
for i in range(1,block_num_lats-1):
numnebs=np.sum(np.isfinite(x_grid[i+np.array(iads),j+np.array(jads),0]))
if numnebs==len(sads):
xn = np.zeros((Stencil_size,block_num_samp))
ib = i
jb = j
if DistType in ['mean'] and np.any(dx_const==None) and np.any(dy_const==None):
# USING MEAN DISTANCE
ds=np.zeros(Stencil_size)
for s,ss in enumerate(sads):
ds[Stencil_center+ss]=distance([block_lat[ib,jb],block_lon[ib,jb]],[block_lat[ib+iads[s],jb+jads[s]],block_lon[ib+iads[s],jb+jads[s]]],radius=radius)*1000
#
xn[Stencil_center+np.array(sads),:]=x_grid[i+np.array(iads),j+np.array(jads),:]
xn[Stencil_center,:] = x_grid[i,j,:]
# calculate the mean dx,dy along two major axes
dx = np.mean(ds[Stencil_center+np.array(sads[:2])])
dy = np.mean(ds[Stencil_center+np.array(sads[2:])])
elif DistType in ['interp'] and np.any(dx_const==None) and np.any(dy_const==None):
# INTERPOLATED VERSION
# Interpolate x_grid values to be at the same distance from the central point - this is because the inversion doesn't know about the distance.
# first find the minimum distance - we will interpolate all the other points to be at this distance
cent=len(s_ads)/2
ds=np.zeros(len(s_ads)+1)
ang=np.zeros(len(s_ads)+1)
for s,ss in enumerate(s_ads):
ds[cent+ss]=distance([block_lat[ib,jb],block_lon[ib,jb]],[block_lat[ib+i_ads[s],jb+j_ads[s]],block_lon[ib+i_ads[s],jb+j_ads[s]]],radius=radius)*1000
ang[cent+np.array(s_ads)]=np.arctan2(i_ads,j_ads)*180/np.pi
ang[np.where(ang<0)]=ang[np.where(ang<0)]+360
#
dr=np.median(ds[np.where(ds>0)])
ds2=np.zeros((5,len(ds)))
# find out how far each point is from the unit circle point facing each grid cell.
for s,ss in enumerate(sads):
for s2,ss2 in enumerate(s_ads):
ds2[2+ss,cent+ss2]=np.sqrt(ds[cent+ss2]**2+dr**2-2*dr*ds[cent+ss2]*np.cos((ang[cent+ss2]-ang[cent+ss])*np.pi/180.))
#
ds2=np.delete(ds2,2,axis=0) # remove the central point from the points of interest - we know the value already
ds2=np.delete(ds2,cent,axis=1) # remove the central point from the points that affect interpolation - we don't want to transform any information outside
winds=np.argsort(ds2,axis=1) #
ds2_sort=np.sort(ds2,axis=1) #
weigths=((1/ds2_sort[:,:3]).T/(np.sum(1/ds2_sort[:,:3],1))).T #
weigths[np.where(np.isnan(weigths))]=1
# interpolate the surrounding points to the new unit circle
xn[Stencil_center+np.array(sads),:]=np.sum(x_grid[i+np.array(i_ads),j+np.array(j_ads),:][winds[:,:3],:].T*weigths.T,1).T
xn[Stencil_center,:] = x_grid[i,j,:]
# distance is the same to each direction
dx=dy=dr
#
elif np.any(dx_const!=None) and np.any(dy_const!=None):
# if the
xn[Stencil_center+np.array(sads),:]=x_grid[i+np.array(iads),j+np.array(jads),:]
xn[Stencil_center,:] = x_grid[i,j,:]
dx=dx_const
dy=dy_const
else:
# ORIGINAL VERSION
# calc distances
dx = distance([block_lat[ib,jb],block_lon[ib,jb-1]],[block_lat[ib,jb],block_lon[ib,jb]],radius=radius)*1000
dy = distance([block_lat[ib,jb],block_lon[ib,jb]],[block_lat[ib-1,jb],block_lon[ib,jb]],radius=radius)*1000
# correct negative distances due to blocks spanning meridian
if (block_lon[ib,jb]*block_lon[ib,jb+1]<0):
dx = distance([block_lat[ib,jb],block_lon[ib,jb-1]],[block_lat[ib,jb],block_lon[ib,jb]],radius=radius)*1000
#
if (block_lat[ib,jb]*block_lat[ib+1,jb]<0):
dy = distance([block_lat[ib,jb],block_lon[ib,jb]],[block_lat[ib-1,jb],block_lon[ib,jb]],radius=radius)*1000
# fill xn with timeseries of center point and neighbors
for ci in range(Stencil_center):
if ci==0:
xn[Stencil_center-1,:] = x_grid[i,j-1,:]
xn[Stencil_center+1,:] = x_grid[i,j+1,:]
elif ci==1:
xn[Stencil_center-2,:] = x_grid[i+1,j,:]
xn[Stencil_center+2,:] = x_grid[i-1,j,:]
xn[Stencil_center,:] = x_grid[i,j,:]
#
if inversion_method in ['integral']:
#
tstep = (block_num_samp-dt_min)//ens
bn = np.zeros((Stencil_size,ens))
res = np.zeros((Stencil_size,ens))
#
for e in range(ens):
t0=e*tstep
t1=e*tstep+dt_min
if block_num_samp-(t1+Tau)>=0:
xnlag = xn[:,t0+Tau:t1+Tau]
else:
xnlag=np.concatenate([xn[:,t0+Tau:t1+Tau], np.zeros((Stencil_size,(t1+Tau)-block_num_samp))],axis=1)
#
a=np.dot(xnlag,xn[:,t0:t1].T)
b=np.dot(xn[:,t0:t1],xn[:,t0:t1].T)
a[np.where(np.isnan(a))]=0
b[np.where(np.isnan(b))]=0
tmp = np.dot(a.data, np.linalg.pinv(b.data))
tmp[np.isnan(tmp)] = 0
tmp[np.isinf(tmp)] = 0
# this is very robust - time variability is perhaps more interesting
res[:,e] = abs((a-np.dot(tmp,b))[Stencil_center,:]/a[Stencil_center,:])
#
if np.isfinite(np.sum(tmp)) and np.sum(abs(tmp-tmp[0]))>1E-10: #check that not all the values are the same
try:
bb, bb_err = linalg.logm(tmp,disp=False)
bb = (1./(Tau*Dt_secs))*bb
bb_err = (1./(Tau*Dt_secs))*bb_err
#bb, bb_err = (1./(Tau*Dt_secs))*linalg.logm(tmp,disp=False)
except (ValueError,ZeroDivisionError,OverflowError):
bn[:,e] = np.zeros(Stencil_size)
else:
bn[:,e] = np.real(bb[Stencil_center,:])
#res[:,e] = np.real(bb_err[])
else:
bn[:,e]=np.zeros(Stencil_size)
#
#bb = (1./(Tau*Dt_secs))*linalg.logm(tmp)
#bn[:,e] = np.real(bb[Stencil_center,:])
#
############################################
# -- solve for U K and R from row of bn -- #
############################################
#
block_vars[:,0,i,j] = -dx*(bn[Stencil_center+1,:]-bn[Stencil_center-1,:]) #-dx*np.nanpercentile(bn[Stencil_center+1,:]-bn[Stencil_center-1,:],percentiles) # u
block_vars[:,1,i,j] = -dy*(bn[Stencil_center+2,:]-bn[Stencil_center-2,:]) #-dy*np.nanpercentile(bn[Stencil_center+2,:]-bn[Stencil_center-2,:],percentiles) # v
block_vars[:,2,i,j] = 1./2*dx**2*(bn[Stencil_center+1,:]+bn[Stencil_center-1,:]) #1./2*dx**2*np.nanpercentile(bn[Stencil_center+1,:]+bn[Stencil_center-1,:],percentiles) # Kx
block_vars[:,3,i,j] = 1./2*dy**2*(bn[Stencil_center+2,:]+bn[Stencil_center-2,:]) #1./2*dy**2*np.nanpercentile(bn[Stencil_center+2,:]+bn[Stencil_center-2,:],percentiles) # Ky
block_vars[:,4,i,j] = -1./np.nansum(bn,0) #np.nanpercentile(-1./np.nansum(bn,0),percentiles) # R
if not (block_vars2 is None):
block_vars2[:,i,j] = np.nanmean(res,0)
def parallel_inversion(j,x_grid,block_vars,Stencil_center,Stencil_size,block_num_samp,block_num_lats,block_num_lons,block_lat,block_lon,Tau,Dt_secs,rot=False,block_vars2=None,inversion_method='integral',dx_const=None,dy_const=None, DistType='mean',radius=6371):
"""
Invert 2D data using a 5 point stencil. This function should be not be called directly, instad call the inversion() function
Possibility to use either 'classic' north-south, east-west stencil (rot=False, default), or a stencil rotated 45 deg counter-clockwise (west).
"""
#
#
if not rot:
# indices for the surrounding 8 points
sads=[-1,+1,-2,+2,-1,+1,-2,+2][:4] # indices for ds - Stencil_center will be the central point - these are spiraling out
jads=[-1,+1, 0, 0,-1,+1,+1,-1][:4] # left,right,down,up,down-left,up-right,right-down,left-up
iads=[ 0, 0,-1,+1,-1,+1,-1,+1][:4]
# indices for the surrounding 24 points -important to have the same first 4 points (the rest don't matter)
s_ads=[-1,+1,-2,+2,-3,+3,-4,+4,-5,+5,-6,+6,-7,+7,-8,+8,-9,+9,-10,+10,-11,+11,-12,+12]
j_ads=[-1,+1, 0, 0,-1,+1,+1,-1,-2,-2,-2,+2,+2,+2,-1, 0,+1,+1, 0, -1, -2, +2, +2, -2]
i_ads=[ 0, 0,-1,+1,-1,+1,-1,+1,+1, 0,-1,+1, 0,-1,-2,-2,-2,+2, +2, +2, -2, +2, -2, +2]
else:
# x and y axis are rotated 45 to the left
# indices for the surrounding 8 points
sads=[-1,+1,-2,+2,-1,+1,-2,+2][4:] # indices for ds - Stencil_center will be the central point - these are spiraling out
jads=[-1,+1, 0, 0,-1,+1,+1,-1][4:] # left,right,down,up,down-left,up-right,right-down,left-up
iads=[ 0, 0,-1,+1,-1,+1,-1,+1][4:]
# indices for the surroundig 24 points
s_ads=[-1,+1,-2,+2,-3,+3,-4,+4,-5,+5,-6,+6,-7,+7,-8,+8,-9,+9,-10,+10,-11,+11,-12,+12]
j_ads=[-1,+1,+1,-1,-1,+1, 0, 0,-2,-2,+2,+2,+2,+2,-2,-2,-2,+2, 0, 0, +1, +1, -1, -1]
i_ads=[-1,+1,-1,+1, 0, 0,-1,+1,-2,-1,+2,+1,-2,-1,+2,+1, 0, 0, -2, +2, +2, -2, +2, -2]
for i in range(1,block_num_lats-1): #change this back if no interpolatiion is used
# for i in range(2,block_num_lats-2): #if interpolation is used
numnebs=np.sum(np.isfinite(x_grid[i+np.array(iads),j+np.array(jads),0]))
# only invert all the points in the stencil are finite
if numnebs==len(sads):
xn = np.zeros((Stencil_size,block_num_samp))
ib = i
jb = j
# calculate the dx and dy and fill the stencil
if DistType in ['mean'] and np.any(dx_const==None) and np.any(dy_const==None):
# USING MEAN DISTANCE
ds=np.zeros(Stencil_size)
for s,ss in enumerate(sads):
ds[Stencil_center+ss]=distance([block_lat[ib,jb],block_lon[ib,jb]],[block_lat[ib+iads[s],jb+jads[s]],block_lon[ib+iads[s],jb+jads[s]]],radius=radius)*1000
#
xn[Stencil_center+np.array(sads),:]=x_grid[i+np.array(iads),j+np.array(jads),:]
xn[Stencil_center,:] = x_grid[i,j,:]
# calculate the mean dx,dy along two major axes
dx = np.mean(ds[Stencil_center+np.array(sads[:2])])
dy = np.mean(ds[Stencil_center+np.array(sads[2:])])
elif DistType in ['interp'] and np.any(dx_const==None) and np.any(dy_const==None):
# INTERPOLATED VERSION
# Interpolate x_grid values to be at the same distance from the central point - this is because the inversion doesn't know about the distance.
# first find the minimum distance - we will interpolate all the other points to be at this distance
cent=len(s_ads)/2
ds=np.zeros(len(s_ads)+1)
ang=np.zeros(len(s_ads)+1)
for s,ss in enumerate(s_ads):
ds[cent+ss]=distance([block_lat[ib,jb],block_lon[ib,jb]],[block_lat[ib+i_ads[s],jb+j_ads[s]],block_lon[ib+i_ads[s],jb+j_ads[s]]],radius=radius)*1000
ang[cent+np.array(s_ads)]=np.arctan2(i_ads,j_ads)*180/np.pi
ang[np.where(ang<0)]=ang[np.where(ang<0)]+360
#
dr=np.median(ds[np.where(ds>0)])
ds2=np.zeros((5,len(ds)))
# find out how far each point is from the unit circle point facing each grid cell.
for s,ss in enumerate(sads):
for s2,ss2 in enumerate(s_ads):
ds2[2+ss,cent+ss2]=np.sqrt(ds[cent+ss2]**2+dr**2-2*dr*ds[cent+ss2]*np.cos((ang[cent+ss2]-ang[cent+ss])*np.pi/180.))
#
ds2=np.delete(ds2,2,axis=0) # remove the central point from the points of interest - we know the value already
ds2=np.delete(ds2,cent,axis=1) # remove the central point from the points that affect interpolation - we don't want to transform any information outside
winds=np.argsort(ds2,axis=1) #
ds2_sort=np.sort(ds2,axis=1) #
weigths=((1/ds2_sort[:,:3]).T/(np.sum(1/ds2_sort[:,:3],1))).T #
weigths[np.where(np.isnan(weigths))]=1
# interpolate the surrounding points to the new unit circle
xn[Stencil_center+np.array(sads),:]=np.sum(x_grid[i+np.array(i_ads),j+np.array(j_ads),:][winds[:,:3],:].T*weigths.T,1).T
xn[Stencil_center,:] = x_grid[i,j,:]
# distance is the same to each direction
dx=dy=dr
#
elif np.any(dx_const!=None) and np.any(dy_const!=None):
# if the
xn[Stencil_center+np.array(sads),:]=x_grid[i+np.array(iads),j+np.array(jads),:]
xn[Stencil_center,:] = x_grid[i,j,:]
dx=dx_const
dy=dy_const
else:
# ORIGINAL VERSION
# calc distances
dx = distance([block_lat[ib,jb],block_lon[ib,jb-1]],[block_lat[ib,jb],block_lon[ib,jb]],radius=radius)*1000
dy = distance([block_lat[ib,jb],block_lon[ib,jb]],[block_lat[ib-1,jb],block_lon[ib,jb]],radius=radius)*1000
# correct negative distances due to blocks spanning meridian
if (block_lon[ib,jb]*block_lon[ib,jb+1]<0):
dx = distance([block_lat[ib,jb],block_lon[ib,jb-1]],[block_lat[ib,jb],block_lon[ib,jb]],radius=radius)*1000
#
if (block_lat[ib,jb]*block_lat[ib+1,jb]<0):
dy = distance([block_lat[ib,jb],block_lon[ib,jb]],[block_lat[ib-1,jb],block_lon[ib,jb]],radius=radius)*1000
# fill xn with timeseries of center point and neighbors
for ci in range(Stencil_center):
if ci==0:
xn[Stencil_center-1,:] = x_grid[i,j-1,:]
xn[Stencil_center+1,:] = x_grid[i,j+1,:]
elif ci==1:
xn[Stencil_center-2,:] = x_grid[i+1,j,:]
xn[Stencil_center+2,:] = x_grid[i-1,j,:]
xn[Stencil_center,:] = x_grid[i,j,:]
# TODO : HERE IS AN OPTION TO RUN ALL THE TAUS AT ONCE
if False: #inversion_method in ['integral'] and False:
bns=np.zeros((Stencil_size,Tau-1))
for tau in range(1,Tau):
xnlag = np.concatenate((xn[:,tau:], np.zeros((Stencil_size,tau))),axis=1)
a=np.dot(xnlag,xn.T)
b=np.dot(xn,xn.T)
a[np.where(np.isnan(a))]=0
b[np.where(np.isnan(b))]=0
tmp = np.dot(a, np.linalg.pinv(b))
tmp[np.isnan(tmp)] = 0
tmp[np.isinf(tmp)] = 0
if np.isfinite(np.sum(tmp)) and np.sum(abs(tmp-tmp[0]))>1E-10:
try:
bb = (1./(tau*Dt_secs))*linalg.logm(tmp)
except (ValueError,ZeroDivisionError,OverflowError):
continue
else:
bns[:,tau-1] = np.real(bb[Stencil_center,:])
#
bns[~np.isfinite(bns)] = 0
# select the case when the central cell is most negative
b_ind=np.where(bns[Stencil_center,:].squeeze()==np.min(bns[Stencil_center,:],0))[0]
if len(b_ind)>1:
b_ind=b_ind[0]
bn=bns[:,b_ind[0]]
#
elif inversion_method in ['integral']:
# inverse by integral method
xnlag = np.concatenate((xn[:,Tau:], np.zeros((Stencil_size,Tau))),axis=1)
# tmp = (np.dot(xnlag,xn.T))/(np.dot(xn,xn.T))
# in matlab: tmp = (xnlag*xn')/(xn*xn') let's take a=xnlag*xn' and b=xn*xn'
# this line in matlab basically means solving for xb=a
# what we can do in python is # xb = a: solve b.T x.T = a.T
# see http://stackoverflow.com/questions/1007442/mrdivide-function-in-matlab-what-is-it-doing-and-how-can-i-do-it-in-python
#
a=np.dot(xnlag,xn.T)
b=np.dot(xn,xn.T)
a[np.where(np.isnan(a))]=0
b[np.where(np.isnan(b))]=0
# tmp = np.linalg.lstsq(b.data.T, a.data.T)[0] # one way to do it
tmp = np.dot(a, np.linalg.pinv(b)) # another way
tmp[np.isnan(tmp)] = 0
tmp[np.isinf(tmp)] = 0
if np.isfinite(np.sum(tmp)) and np.sum(abs(tmp-tmp[0]))>1E-10: #check that not all the values are the same
try:
bb = (1./(Tau*Dt_secs))*linalg.logm(tmp)
except (ValueError,ZeroDivisionError,OverflowError):
bn = np.zeros(Stencil_size)
else:
bn = np.real(bb[Stencil_center,:])
else:
bn=np.zeros(Stencil_size)
#
bn[~np.isfinite(bn)] = 0
#
# inverse by derivative method
elif inversion_method in ['derivative']:
# central differential
xnfut = np.concatenate((xn[:,1:], np.zeros((Stencil_size,1))),axis=1)
xnpast = np.concatenate((np.zeros((Stencil_size,1)), xn[:,:-1]),axis=1)
xnlag = np.concatenate((np.zeros((Stencil_size,Tau)), xn[:,1:xn.shape[1]-Tau+1]),axis=1)
a=np.dot((xnfut-xnpast),xnlag.T)
b=np.dot(xn,xnlag.T)
a[np.where(np.isnan(a))]=0
b[np.where(np.isnan(b))]=0
#tmp = np.linalg.lstsq(b.data.T, a.data.T)[0] #one way to do it
tmp = np.dot(a.data, np.linalg.pinv(b.data))
if np.isfinite(np.sum(tmp)) and np.sum(abs(tmp-tmp[0]))>1E-10:
bn_matrix = (0.5/Dt_secs)*tmp
bn = np.real(bn_matrix[Stencil_center,:])
bn[~np.isfinite(bn)] = 0
else:
bn = np.zeros(Stencil_size)
elif inversion_method in ['integral_2']:
# alternative integral - but no wit backward time difference
xnfut = np.concatenate((xn[:,1:], np.zeros((Stencil_size,1))),axis=1)
xnlag = np.concatenate((np.zeros((Stencil_size,Tau)), xn[:,1:xn.shape[1]-Tau+1]),axis=1)
a=np.dot(xnfut,xnlag.T)
b=np.dot(xn,xnlag.T)
a[np.where(np.isnan(a))]=0
b[np.where(np.isnan(b))]=0
#tmp = np.linalg.lstsq(b.data.T, a.data.T)[0] #one way to do it
tmp = np.dot(a.data, np.linalg.pinv(b.data)) #another way
tmp[np.isnan(tmp)] = 0
tmp[ | np.isinf(tmp) | numpy.isinf |
import numpy as np
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import tensorflow as tf
import sys
import os
import pickle as pickle
from six.moves import urllib
import tarfile
import scipy.stats.mstats
from load_cifar10 import load_data10
# training parameters
initial_learning_rate = 0.001
training_epochs = 200
batch_size = 128
# architecture parameters
n_labels = 10
crop_length = 32
n_channels = 3
image_width = 32
n_input = 32 * 32
mode = 'normal' # 'normal', 'mix', or 'fast'
nonlinearity_name = 'relu'
try:
num_to_make = int(sys.argv[1])
print('Number of foolers to generate:', num_to_make)
except:
print('Defaulted to making one fooling image')
num_to_make = 1
try:
mode = sys.argv[2] # 'normal', 'mix', or 'fast'
print('Chosen mode:', mode)
except:
print('Defaulted to normal mode since no mode given through command line')
mode = 'normal'
def str2bool(v):
return v.lower() in ("yes", "true", "t", "1")
graph = tf.Graph()
with graph.as_default():
x = tf.placeholder(dtype=tf.float32, shape=[None, crop_length, crop_length, n_channels])
y = tf.placeholder(dtype=tf.int64, shape=[None])
is_training = tf.constant(False) # tf.placeholder(tf.bool)
W = {}
bn = {}
params = pickle.load(open("./r32.pkl", "rb"), encoding='latin1')
bn['beta0'] = tf.Variable(params[0])
bn['gamma0'] = tf.Variable(params[1])
bn['mu0'] = tf.constant(params[2])
bn['inv_std0'] = tf.constant(params[3])
for layer in range(1, 32):
# awkward offset because of bn for input
l_str = str(layer)
W['filter' + l_str] = tf.Variable(np.moveaxis(params[layer * 5 - 1], [0, 1, 2, 3], [3, 2, 0, 1]))
bn['beta' + l_str] = tf.Variable(params[layer * 5 + 0])
bn['gamma' + l_str] = tf.Variable(params[layer * 5 + 1])
bn['mu' + l_str] = tf.constant(params[layer * 5 + 2])
bn['inv_std' + l_str] = tf.constant(params[layer * 5 + 3])
W['w_out'] = tf.Variable(params[159])
W['b_out'] = tf.Variable(params[160])
def feedforward(_x, n=5):
rho = tf.nn.relu
def residual_block(h, layer_number=1, input_num_filters=32, increase_dim=False):
l_num = str(layer_number)
if increase_dim:
first_stride = [1, 2, 2, 1]
out_num_filters = input_num_filters * 2
else:
first_stride = [1, 1, 1, 1]
out_num_filters = input_num_filters
stack1 = rho((tf.nn.conv2d(h, W['filter' + l_num], strides=first_stride, padding='SAME') -
bn['mu' + l_num]) * bn['inv_std' + l_num] * bn['gamma' + l_num] + bn['beta' + l_num])
l_num = str(layer_number + 1)
stack2 = (tf.nn.conv2d(stack1, W['filter' + l_num], strides=[1, 1, 1, 1], padding='SAME') -
bn['mu' + l_num]) * bn['inv_std' + l_num] * bn['gamma' + l_num] + bn['beta' + l_num]
if increase_dim:
# upgrade tensorflow h[:, fd00:a516:7c1b:17cd:6d81:2137:bd2a:2c5b, ::2, :]
# array_ops.strided_slice(h, [0,0,0,0], [2000,-1,-1,input_num_filters], [1,2,2,1])
h_squished = tf.nn.max_pool(h, ksize=[1, 3, 3, 1], strides=[1, 2, 2, 1], padding='SAME')
padded = tf.pad(h_squished, [[0, 0], [0, 0], [0, 0], [out_num_filters // 4, out_num_filters // 4]])
block = rho(stack2 + padded)
else:
block = rho(stack2 + h)
return block
x_input = (_x - bn['mu0']) * bn['inv_std0'] * bn['gamma0'] + bn['beta0']
# bsize x 32 x 32 x 16
l = rho((tf.nn.conv2d(x_input, W['filter1'], strides=[1, 1, 1, 1], padding='SAME') -
bn['mu1']) * bn['inv_std1'] * bn['gamma1'] + bn['beta1'])
# bsize x 32 x 32 x 16
for i in range(n):
l = residual_block(l, layer_number=2 * i + 2)
# bsize x 16 x 16 x 32
l = residual_block(l, increase_dim=True, layer_number=2 * n + 2, input_num_filters=16)
for i in range(1, n):
l = residual_block(l, layer_number=2 * n + 2 * i + 2)
# bsize x 8 x 8 x 64
l = residual_block(l, increase_dim=True, layer_number=4 * n + 2, input_num_filters=32)
for i in range(1, n):
l = residual_block(l, layer_number=4 * n + 2 * i + 2)
l = tf.reduce_mean(l, reduction_indices=[1, 2])
return tf.matmul(l, W['w_out']) + W['b_out']
def normal(_x):
return feedforward(_x)
def energy_blur(_x):
_x = tf.reshape(_x, [-1, image_width, image_width, 3])
# 5x5, sigma = 0.7
filter = tf.reshape(tf.constant([[0.000252, 0.00352, 0.008344, 0.00352, 0.000252],
[0.00352, 0.049081, 0.11634, 0.049081, 0.00352],
[0.008344, 0.11634, 0.275768, 0.11634, 0.008344],
[0.00352, 0.049081, 0.11634, 0.049081, 0.00352],
[0.000252, 0.00352, 0.008344, 0.00352, 0.000252]],
dtype=tf.float32), [5, 5, 1, 1])
h, s, v = tf.split(3, 3, _x)
h = tf.nn.conv2d(tf.square(h), filter, strides=[1, 1, 1, 1], padding='SAME')
h = tf.sqrt(tf.reshape(h, [-1, 32, 32, 1]) + 1e-12)
s = tf.nn.conv2d(tf.square(s), filter, strides=[1, 1, 1, 1], padding='SAME')
s = tf.sqrt(tf.reshape(s, [-1, 32, 32, 1]) + 1e-12)
v = tf.nn.conv2d(tf.square(v), filter, strides=[1, 1, 1, 1], padding='SAME')
v = tf.sqrt(tf.reshape(v, [-1, 32, 32, 1]) + 1e-12)
_x = tf.concat(3, [h, s, v])
return feedforward(_x)
pred_normal = normal(x)
loss_normal = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(pred_normal, y))
pred_energy_blur = energy_blur(x)
loss_energy_blur = tf.reduce_mean(tf.nn.sparse_softmax_cross_entropy_with_logits(pred_energy_blur, y))
if mode == 'normal' or mode == 'fast':
pred = pred_normal
loss = loss_normal
elif mode == 'mix':
pred = (pred_normal + pred_energy_blur) / 2.
loss = loss_normal + loss_energy_blur
sess = tf.InteractiveSession(graph=graph)
tf.initialize_all_variables().run()
train_dataset, train_labels, test_dataset, test_labels = load_data10(randomize=False)
# mean_img = np.reshape(np.mean(train_dataset, 0), (32, 32, 3))
train_dataset = train_dataset.astype(np.float32)
test_dataset = test_dataset.astype(np.float32)
# pred = sess.run(pred, feed_dict={x: train_dataset[0:3000,:,:,:]})
# error = np.argmax(pred, 1) != np.argmax(train_labels[0:3000, :], 1)
# print(np.mean(error))
class_names = ['airplane', 'auto', 'bird', 'cat', 'deer', 'dog', 'frog', 'horse', 'ship', 'truck']
def show_image(image, rescale=False, add_mean=False):
img = image.copy()
img = img.reshape(32,32,3)
# if add_mean:
# img += mean_img
# if rescale:
# low, high = np.min(img), np.max(img)
# img = (img - low) / (high - low)
plt.imshow(img)
plt.gca().axis('off')
def make_fooling_image(image, target, reg=1e-3, step=1/255., max_iters=100, confidence_thresh=0.5):
# NOTE: we clip as a consequence of our discussion about improperly plotted images
orig_image = image.copy() # paranoia
fooling_image = image.copy()
for _ in range(max_iters):
dFool, predictions = sess.run([tf.gradients(loss, x)[0], pred], feed_dict={x: fooling_image, y: [target]})
fooling_image[0] -= step * (np.squeeze(dFool[0]) + reg * (fooling_image[0] - orig_image[0]))
fooling_image[0] = np.clip(fooling_image[0], 0, 1)
fool_prob = sess.run(tf.nn.softmax(predictions)[0, target])
if fool_prob > confidence_thresh:
break
return fooling_image
def make_fooling_image_fast(image, target, reg=1e-3, step=10/255.):
# NOTE: we clip as a consequence of our discussion about improperly plotted images
orig_image = image.copy() # paranoia
fooling_image = image.copy()
dFool = sess.run(tf.gradients(loss, x)[0], feed_dict={x: fooling_image, y: [target]})
fooling_image[0] -= step * np.sign( | np.squeeze(dFool[0]) | numpy.squeeze |
#!/usr/bin/env python2
import rospy
from tf import TransformBroadcaster, TransformerROS, transformations as tfs
import tf
from geometry_msgs.msg import Transform
import numpy as np
rospy.init_node('handeye_calibration_publisher')
print("Publishing handeye matrix!")
while rospy.get_time() == 0.0:
pass
d = np.load("calibration_data.npz")
observed_pts = d['arr_0']
measured_pts = d['arr_1']
def get_rigid_transform(A, B):
assert len(A) == len(B)
N = A.shape[0]; # Total points
centroid_A = np.mean(A, axis=0)
centroid_B = np.mean(B, axis=0)
AA = A - np.tile(centroid_A, (N, 1)) # Centre the points
BB = B - np.tile(centroid_B, (N, 1))
H = np.dot(np.transpose(AA), BB) # Dot is matrix multiplication for array
U, S, Vt = np.linalg.svd(H)
R = | np.dot(Vt.T, U.T) | numpy.dot |
# Copyright 2020 The FastEstimator Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
import os
import tempfile
import unittest
from unittest.mock import patch
import numpy as np
import fastestimator as fe
from fastestimator.dataset import NumpyDataset
from fastestimator.op.tensorop.model import ModelOp
from fastestimator.test.unittest_util import OneLayerTorchModel, one_layer_tf_model
from fastestimator.trace import Trace
from fastestimator.trace.io.test_report import TestCase, TestReport
from fastestimator.util import to_number
class SampleTrace(Trace):
""" custom trace that gets average of all samples
"""
def on_begin(self, data):
self.buffer = []
def on_batch_end(self, data):
self.buffer.append(to_number(data[self.inputs[0]]))
def on_epoch_end(self, data):
data.write_without_log(self.outputs[0], np.mean(np.concatenate(self.buffer)))
class TestTestReport(unittest.TestCase):
@classmethod
def setUpClass(cls):
dataset = NumpyDataset({
"x": np.array([[1, 1, 1], [1, -1, -0.5]], dtype=np.float32), "id": | np.array([0, 1], dtype=np.int32) | numpy.array |
# This module has been generated automatically from space group information
# obtained from the Computational Crystallography Toolbox
#
"""
Space groups
This module contains a list of all the 230 space groups that can occur in
a crystal. The variable space_groups contains a dictionary that maps
space group numbers and space group names to the corresponding space
group objects.
.. moduleauthor:: <NAME> <<EMAIL>>
"""
#-----------------------------------------------------------------------------
# Copyright (C) 2013 The Mosaic Development Team
#
# Distributed under the terms of the BSD License. The full license is in
# the file LICENSE.txt, distributed as part of this software.
#-----------------------------------------------------------------------------
import numpy as N
class SpaceGroup(object):
"""
Space group
All possible space group objects are created in this module. Other
modules should access these objects through the dictionary
space_groups rather than create their own space group objects.
"""
def __init__(self, number, symbol, transformations):
"""
:param number: the number assigned to the space group by
international convention
:type number: int
:param symbol: the Hermann-Mauguin space-group symbol as used
in PDB and mmCIF files
:type symbol: str
:param transformations: a list of space group transformations,
each consisting of a tuple of three
integer arrays (rot, tn, td), where
rot is the rotation matrix and tn/td
are the numerator and denominator of the
translation vector. The transformations
are defined in fractional coordinates.
:type transformations: list
"""
self.number = number
self.symbol = symbol
self.transformations = transformations
self.transposed_rotations = N.array([N.transpose(t[0])
for t in transformations])
self.phase_factors = N.exp(N.array([(-2j*N.pi*t[1])/t[2]
for t in transformations]))
def __repr__(self):
return "SpaceGroup(%d, %s)" % (self.number, repr(self.symbol))
def __len__(self):
"""
:return: the number of space group transformations
:rtype: int
"""
return len(self.transformations)
def symmetryEquivalentMillerIndices(self, hkl):
"""
:param hkl: a set of Miller indices
:type hkl: Scientific.N.array_type
:return: a tuple (miller_indices, phase_factor) of two arrays
of length equal to the number of space group
transformations. miller_indices contains the Miller
indices of each reflection equivalent by symmetry to the
reflection hkl (including hkl itself as the first element).
phase_factor contains the phase factors that must be applied
to the structure factor of reflection hkl to obtain the
structure factor of the symmetry equivalent reflection.
:rtype: tuple
"""
hkls = N.dot(self.transposed_rotations, hkl)
p = N.multiply.reduce(self.phase_factors**hkl, -1)
return hkls, p
space_groups = {}
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(1, 'P 1', transformations)
space_groups[1] = sg
space_groups['P 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(2, 'P -1', transformations)
space_groups[2] = sg
space_groups['P -1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(3, 'P 1 2 1', transformations)
space_groups[3] = sg
space_groups['P 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(4, 'P 1 21 1', transformations)
space_groups[4] = sg
space_groups['P 1 21 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(5, 'C 1 2 1', transformations)
space_groups[5] = sg
space_groups['C 1 2 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(6, 'P 1 m 1', transformations)
space_groups[6] = sg
space_groups['P 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(7, 'P 1 c 1', transformations)
space_groups[7] = sg
space_groups['P 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(8, 'C 1 m 1', transformations)
space_groups[8] = sg
space_groups['C 1 m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(9, 'C 1 c 1', transformations)
space_groups[9] = sg
space_groups['C 1 c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(10, 'P 1 2/m 1', transformations)
space_groups[10] = sg
space_groups['P 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(11, 'P 1 21/m 1', transformations)
space_groups[11] = sg
space_groups['P 1 21/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(12, 'C 1 2/m 1', transformations)
space_groups[12] = sg
space_groups['C 1 2/m 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(13, 'P 1 2/c 1', transformations)
space_groups[13] = sg
space_groups['P 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(14, 'P 1 21/c 1', transformations)
space_groups[14] = sg
space_groups['P 1 21/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(15, 'C 1 2/c 1', transformations)
space_groups[15] = sg
space_groups['C 1 2/c 1'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(16, 'P 2 2 2', transformations)
space_groups[16] = sg
space_groups['P 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(17, 'P 2 2 21', transformations)
space_groups[17] = sg
space_groups['P 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(18, 'P 21 21 2', transformations)
space_groups[18] = sg
space_groups['P 21 21 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(19, 'P 21 21 21', transformations)
space_groups[19] = sg
space_groups['P 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(20, 'C 2 2 21', transformations)
space_groups[20] = sg
space_groups['C 2 2 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(21, 'C 2 2 2', transformations)
space_groups[21] = sg
space_groups['C 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(22, 'F 2 2 2', transformations)
space_groups[22] = sg
space_groups['F 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(23, 'I 2 2 2', transformations)
space_groups[23] = sg
space_groups['I 2 2 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(24, 'I 21 21 21', transformations)
space_groups[24] = sg
space_groups['I 21 21 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(25, 'P m m 2', transformations)
space_groups[25] = sg
space_groups['P m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(26, 'P m c 21', transformations)
space_groups[26] = sg
space_groups['P m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(27, 'P c c 2', transformations)
space_groups[27] = sg
space_groups['P c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(28, 'P m a 2', transformations)
space_groups[28] = sg
space_groups['P m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(29, 'P c a 21', transformations)
space_groups[29] = sg
space_groups['P c a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(30, 'P n c 2', transformations)
space_groups[30] = sg
space_groups['P n c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(31, 'P m n 21', transformations)
space_groups[31] = sg
space_groups['P m n 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(32, 'P b a 2', transformations)
space_groups[32] = sg
space_groups['P b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(33, 'P n a 21', transformations)
space_groups[33] = sg
space_groups['P n a 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(34, 'P n n 2', transformations)
space_groups[34] = sg
space_groups['P n n 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(35, 'C m m 2', transformations)
space_groups[35] = sg
space_groups['C m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(36, 'C m c 21', transformations)
space_groups[36] = sg
space_groups['C m c 21'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(37, 'C c c 2', transformations)
space_groups[37] = sg
space_groups['C c c 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(38, 'A m m 2', transformations)
space_groups[38] = sg
space_groups['A m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(39, 'A b m 2', transformations)
space_groups[39] = sg
space_groups['A b m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(40, 'A m a 2', transformations)
space_groups[40] = sg
space_groups['A m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(41, 'A b a 2', transformations)
space_groups[41] = sg
space_groups['A b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(42, 'F m m 2', transformations)
space_groups[42] = sg
space_groups['F m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,3])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,1])
trans_den = N.array([4,4,4])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(43, 'F d d 2', transformations)
space_groups[43] = sg
space_groups['F d d 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(44, 'I m m 2', transformations)
space_groups[44] = sg
space_groups['I m m 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(45, 'I b a 2', transformations)
space_groups[45] = sg
space_groups['I b a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(46, 'I m a 2', transformations)
space_groups[46] = sg
space_groups['I m a 2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(47, 'P m m m', transformations)
space_groups[47] = sg
space_groups['P m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(48, 'P n n n :2', transformations)
space_groups[48] = sg
space_groups['P n n n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(49, 'P c c m', transformations)
space_groups[49] = sg
space_groups['P c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(50, 'P b a n :2', transformations)
space_groups[50] = sg
space_groups['P b a n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(51, 'P m m a', transformations)
space_groups[51] = sg
space_groups['P m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(52, 'P n n a', transformations)
space_groups[52] = sg
space_groups['P n n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(53, 'P m n a', transformations)
space_groups[53] = sg
space_groups['P m n a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(54, 'P c c a', transformations)
space_groups[54] = sg
space_groups['P c c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(55, 'P b a m', transformations)
space_groups[55] = sg
space_groups['P b a m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(56, 'P c c n', transformations)
space_groups[56] = sg
space_groups['P c c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(57, 'P b c m', transformations)
space_groups[57] = sg
space_groups['P b c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(58, 'P n n m', transformations)
space_groups[58] = sg
space_groups['P n n m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(59, 'P m m n :2', transformations)
space_groups[59] = sg
space_groups['P m m n :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(60, 'P b c n', transformations)
space_groups[60] = sg
space_groups['P b c n'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(61, 'P b c a', transformations)
space_groups[61] = sg
space_groups['P b c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(62, 'P n m a', transformations)
space_groups[62] = sg
space_groups['P n m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(63, 'C m c m', transformations)
space_groups[63] = sg
space_groups['C m c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(64, 'C m c a', transformations)
space_groups[64] = sg
space_groups['C m c a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(65, 'C m m m', transformations)
space_groups[65] = sg
space_groups['C m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(66, 'C c c m', transformations)
space_groups[66] = sg
space_groups['C c c m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(67, 'C m m a', transformations)
space_groups[67] = sg
space_groups['C m m a'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,-1])
trans_den = N.array([1,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,0])
trans_den = N.array([2,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,-1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,0])
trans_den = N.array([1,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(68, 'C c c a :2', transformations)
space_groups[68] = sg
space_groups['C c c a :2'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(69, 'F m m m', transformations)
space_groups[69] = sg
space_groups['F m m m'] = sg
transformations = []
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,0,0])
trans_den = N.array([1,1,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,-1,-1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,0,-1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,-1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,3,3])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,2,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([0,1,1])
trans_den = N.array([1,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([-1,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,3])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,0,3])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([2,1,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,0,1])
trans_den = N.array([4,1,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,-1,1])
trans_den = N.array([4,4,2])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,3,1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([3,1,1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([3,3,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,-1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([2,2,1])
transformations.append((rot, trans_num, trans_den))
rot = N.array([-1,0,0,0,1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([2,4,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,-1,0,0,0,1])
rot.shape = (3, 3)
trans_num = N.array([1,1,-1])
trans_den = N.array([4,2,4])
transformations.append((rot, trans_num, trans_den))
rot = N.array([1,0,0,0,1,0,0,0,-1])
rot.shape = (3, 3)
trans_num = N.array([1,1,0])
trans_den = N.array([4,4,1])
transformations.append((rot, trans_num, trans_den))
sg = SpaceGroup(70, 'F d d d :2', transformations)
space_groups[70] = sg
space_groups['F d d d :2'] = sg
transformations = []
rot = | N.array([1,0,0,0,1,0,0,0,1]) | numpy.array |
import copy
import glob
import os
import time
from collections import deque
import gym
import numpy as np
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torchvision
import torchvision.transforms as transforms
# from torch.utils.tensorboard import SumxmaryWriter
from a2c_ppo_acktr import algo, utils
from a2c_ppo_acktr.algo import gail, gail_lipschitz, ppo_lipschitz
import a2c_ppo_acktr.arguments as arguments #import get_args, get_init, get_init_Lipschitz
from a2c_ppo_acktr.envs import make_vec_envs
from a2c_ppo_acktr.model import Policy
from a2c_ppo_acktr.storage import RolloutStorage
from evaluation import evaluate
from a2c_ppo_acktr.envs import VecPyTorch
from baselines.common.atari_wrappers import FrameStack, ClipRewardEnv, WarpFrame
from procgen import ProcgenEnv
import argparse
import sys
sys.path.append(os.getcwd())
import pickle
from a2c_ppo_acktr.utils import init
from utils import myutils
import mujoco_py
from os import listdir
from os.path import isfile, join
import math
import psutil
import tracemalloc
import linecache
# from memory_profiler import profile
# breakpoint()
# ------------ REWARD NETWORKS ------------
class net_MLP(nn.Module):
def __init__(self,
input_size,
rew_sign,
rew_mag,
FC1_dim = 256,
FC2_dim = 256,
FC3_dim = 256,
out_dim=1):
super().__init__()
# an affine operation: y = Wx + b
self.n_dim = input_size
self.fc1 = nn.Linear(self.n_dim, FC1_dim)
self.fc2 = nn.Linear(FC1_dim, FC2_dim)
self.fc3 = nn.Linear(FC2_dim, FC3_dim)
self.fc4 = nn.Linear(FC3_dim, out_dim)
self.rew_sign = rew_sign
self.rew_mag = rew_mag
def forward(self, x):
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
if self.rew_sign == "free":
x = self.fc4(x)
elif self.rew_sign == "neg":
x = - F.relu(self.fc4(x))
elif self.rew_sign == "pos":
x = F.relu(self.fc4(x))
elif self.rew_sign == "pos_sigmoid":
x = self.rew_mag*torch.sigmoid(self.fc4(x))
elif self.rew_sign == "neg_sigmoid":
x = - self.rew_mag*torch.sigmoid(self.fc4(x))
elif self.rew_sign == "tanh":
x = self.rew_mag*torch.tanh(self.fc4(x))
return x
def _num_flat_features(self, x):
size = x.size()[1:] # all dimensions except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features
class net_CNN(nn.Module):
def __init__(self,
observation_space_shape,
rew_sign,
rew_mag,
final_conv_channels=10):
super().__init__()
depth = observation_space_shape[0]
n_dim = observation_space_shape[1]
init_ = lambda m: init(m, nn.init.orthogonal_, lambda x: nn.init.
constant_(x, 0), nn.init.calculate_gain('relu'))
self.rew_sign = rew_sign
self.rew_mag = rew_mag
def conv2d_size_out(size, kernel_size, stride):
return (size - (kernel_size - 1) - 1) // stride + 1
conv1_output_width = conv2d_size_out(n_dim, 8,4)
conv2_output_width = conv2d_size_out(conv1_output_width, 4,2)
conv3_output_width = conv2d_size_out(conv2_output_width, 3,1)
conv4_output_width = conv2d_size_out(conv3_output_width, 7,1)
FC_input_size = conv4_output_width * conv4_output_width * final_conv_channels
self.conv1 = nn.Conv2d(depth, 32, 8, stride=4)
self.conv2 = nn.Conv2d(32, 64, 4, stride=2)
self.conv3 = nn.Conv2d(64, 64, 3, stride=1)
self.conv4 = nn.Conv2d(64, final_conv_channels, 7, stride=1)
self.FC = nn.Linear(FC_input_size, 1)
# self.main = nn.Sequential(
# nn.Conv2d(n_dim, 32, 8, stride=4), nn.ReLU(),
# nn.Conv2d(32, 64, 4, stride=2), nn.ReLU(),
# nn.Conv2d(64, 66, 3, stride=1), nn.ReLU(),
# nn.Conv2d(64, final_conv_channels, 7, stride=1), nn.ReLU(),
# Flatten(),
# nn.Linear(FC_input_size, 1)
# )
def forward(self, x):
x = F.relu(self.conv1(x))
x = F.relu(self.conv2(x))
x = F.relu(self.conv3(x))
x = F.relu(self.conv4(x))
x = x.view(x.size(0), -1) # flatten
if self.rew_sign == "free":
x = self.FC(x)
elif self.rew_sign == "neg":
x = - F.relu(self.FC(x))
elif self.rew_sign == "pos":
x = F.relu(self.FC(x))
elif self.rew_sign == "pos_sigmoid":
x = self.rew_mag*torch.sigmoid(self.FC(x))
elif self.rew_sign == "neg_sigmoid":
x = - self.rew_mag*torch.sigmoid(self.FC(x))
elif self.rew_sign == "tanh":
x = self.rew_mag*torch.tanh(self.FC(x))
return x
def num_flat_features(self, x):
size = x.size()[1:] # all dimensions except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features
# Auxiliary functions
class reward_cl():
def __init__(self, device, observation_space_shape, lr, rew_sign, rew_mag, rew_kwargs):
# new networks
self.device = device
if len(observation_space_shape) == 1:
# num_stacked_obs = 4
self.reward_net = net_MLP(observation_space_shape[0], rew_sign, rew_mag, FC1_dim=rew_kwargs['FC_dim'], FC2_dim=rew_kwargs['FC_dim'], out_dim=1).to(self.device)
elif len(observation_space_shape) == 3:
self.reward_net = net_CNN(observation_space_shape, rew_sign, rew_mag).to(self.device)
# NOTE: by commenting the following lines, we rely on Pytorch's initialization.
# Pytorch uses Kaiming Initialization which is good for linear layers with ReLu activations
# self.init_weights_var = 0.05
# self._init_weights(self.reward_net)
self.lr = lr
# create the optimizer
self.optimizer = optim.Adam(self.reward_net.parameters(), lr=self.lr, betas=(0.9, 0.999), eps=1e-06, weight_decay=0.001, amsgrad=False)
# theta based variables
self.reward_input_batch = None
self.theta_size = self._get_size_theta()
# self.grad_R_theta = np.zeros((self.MDP.n_dim, self.theta_size))
def _get_size_theta(self):
size = 0
for f in self.reward_net.parameters():
# SHOULD BE DOUBLE CHECKED 1234567891011
dims = f.size()
layer_size = 1
for dim in dims:
layer_size *= dim
size += layer_size
return size
def _init_weights(self, reward_net):
with torch.no_grad():
for layer_w in reward_net.parameters():
torch.nn.init.normal_(layer_w, mean=0.0, std=self.init_weights_var)
# torch.nn.init.xavier_normal_(layer_w, gain=1.0)
def reward_net_input_method(self,obs):
return obs
def reward_net_input_batch_traj_method(self, traj):
reward_input_batch = torch.cat([torch.unsqueeze(trans, dim=0) for trans in traj], dim=0)
# reward_input_batch.requires_grad_(True)
return reward_input_batch
def reward_net_input_batch_traj_method_stacked(self, traj):
stacked_obs_list = []
for idx in range(len(traj)):
if idx == 0:
stacked_obs = torch.cat([traj[0][0], traj[0][0], traj[0][0], traj[0][0]], dim=1)
elif idx == 1:
stacked_obs = torch.cat([traj[1][0], traj[0][0], traj[0][0], traj[0][0]], dim=1)
elif idx == 2:
stacked_obs = torch.cat([traj[2][0], traj[1][0], traj[0][0], traj[0][0]], dim=1)
else:
stacked_obs = torch.cat([traj[idx][0], traj[idx-1][0], traj[idx-2][0], traj[idx-3][0]], dim=1)
stacked_obs_list.append(stacked_obs)
return torch.cat(stacked_obs_list, dim=0)
def _get_flat_grad(self):
# this part is to get the thetas to be used for l2 regularization
# grads_flat = torch.zeros(self.theta_size)
grads_flat_list = []
start_pos = 0
for idx, weights in enumerate(self.reward_net.parameters()):
# SHOULD BE DOUBLE CHECKED 1234567891011
num_flat_features = self._num_flat_features(weights)
try:
grads = copy.deepcopy(weights.grad.view(-1, num_flat_features))
except Exception as e:
print("No gradient error")
# grads_flat[start_pos:start_pos+num_flat_features] = grads[:]
# start_pos += num_flat_features
grads_flat_list.append(grads)
grads_flat = torch.unsqueeze(torch.cat(grads_flat_list, dim=1), dim=0)
return grads_flat
def get_flat_weights(self):
# this part is to get the thetas to be used for l2 regularization
weights_flat = torch.zeros(self.theta_size)
start_pos = 0
for weights in self.reward_net.parameters():
# SHOULD BE DOUBLE CHECKED 1234567891011
num_flat_features = self._num_flat_features(weights)
weights = copy.deepcopy(weights.view(-1, num_flat_features).detach())
weights_flat[start_pos:start_pos+num_flat_features] = weights[:]
start_pos += num_flat_features
return weights_flat
def _num_flat_features(self, x):
size = x.size() # all dimensions except the batch dimension
num_features = 1
for s in size:
num_features *= s
return num_features
def _get_size_theta(self):
size = 0
for f in self.reward_net.parameters():
# SHOULD BE DOUBLE CHECKED 1234567891011
dims = f.size()
layer_size = 1
for dim in dims:
layer_size *= dim
size += layer_size
return size
class train_sparse_rank():
def __init__(self, kwargs, myargs, init_params):
if not myargs.seed == -12345:
# seed is provided as command line argument and nothing needs to be done
pass
else:
if os.getenv('SEED'):
myargs.seed = int(os.getenv('SEED'))
else:
raise ValueError('SEED not provided as command line argument or as an enviornment variable')
if myargs.save_name:
add_to_save_name = f"-{myargs.save_name}"
else:
add_to_save_name = ""
if myargs.shaping:
shaping_str = "-shaping"
else:
shaping_str = ""
if myargs.sparse_cntr:
sparse_cntr_str = f"-sparseCntr"
else:
sparse_cntr_str = ""
if myargs.sparseness:
sparseness_str = f"-sp{myargs.sparseness}"
else:
sparseness_str = ""
myargs.save_name = f"RLfRD-{myargs.env_name}{shaping_str}-{myargs.sparse_rew_type}-{myargs.rew_sign}{sparseness_str}{sparse_cntr_str}" + add_to_save_name + f"-s{myargs.seed}"
self.kwargs = kwargs
self.myargs = myargs
self.init_params = init_params
self.device = myutils.assign_gpu_device(self.myargs)
self.log_dir = myargs.log_dir + "/" + myargs.save_name
eval_log_dir = self.log_dir + "_eval"
self.log_file_name = myargs.env_name
# utils.cleanup_log_dir(log_dir)
# utils.cleanup_log_dir(eval_log_dir)
if not myargs.continue_:
utils.create_dir(self.log_dir)
# utils.create_dir(eval_log_dir)
# self.save_path_trained_models is for storing the trained model
self.save_path_trained_models = os.path.join(myargs.save_dir, myargs.algo, myargs.save_name)
if self.myargs.train_from_given_ranked_demos:
self.save_path_new_trajs = self.myargs.ranked_demos_address + "/train"
self.save_path_new_trajs_val = self.myargs.ranked_demos_address + "/val"
else:
self.save_path_new_trajs = os.path.join(myargs.save_dir, myargs.algo, myargs.save_name, "new_trajs")
if not myargs.continue_:
utils.create_dir(self.save_path_trained_models)
if not self.myargs.train_from_given_ranked_demos:
utils.create_dir(self.save_path_new_trajs)
# # Create forlder for tensorboard
# self.writer = SummaryWriter(f'runs/visualization')
torch.manual_seed(myargs.seed)
torch.cuda.manual_seed_all(myargs.seed)
np.random.seed(myargs.seed)
if myargs.cuda and torch.cuda.is_available() and myargs.cuda_deterministic:
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
torch.set_num_threads(1)
self.envs = make_vec_envs(myargs.env_name, myargs.seed, myargs.num_processes,
myargs.gamma, self.log_dir, self.device, allow_early_resets=True, num_frame_stack=2, **kwargs)
# envs = ProcgenEnv(num_envs=myargs.env_name, env_name="heistpp", **kwargs)
# envs = gym.make(myargs.env_name, **kwargs)
self.is_atari = myargs.is_atari
if myargs.env_name in ["MountainCar-v0", "Reacher-v2", "Acrobot-v1", "Thrower-v2"]:
hidden_size_policy = 10
else:
hidden_size_policy = 64
if self.myargs.continue_:
# Load the pretrained policy
print(f'Loading policy for continuing run {self.myargs.save_name} .....')
model_save_address_policy = os.path.join(self.save_path_trained_models, self.myargs.save_name + f"_policy.pt")
self.actor_critic, ob_rms = torch.load(model_save_address_policy, map_location=self.device)
else:
self.actor_critic = Policy(
self.envs.observation_space.shape,
self.envs.action_space,
self.device,
base_kwargs={'recurrent': myargs.recurrent_policy, 'hidden_size': hidden_size_policy})
self.actor_critic.to(self.device)
if myargs.algo == 'a2c':
self.agent = algo.A2C_ACKTR(
self.actor_critic,
myargs.value_loss_coef,
myargs.entropy_coef,
lr=myargs.lr,
eps=myargs.eps,
alpha=myargs.alpha,
max_grad_norm=myargs.max_grad_norm)
elif myargs.algo == 'ppo':
self.agent = algo.PPO(
self.actor_critic,
myargs.clip_param,
myargs.ppo_epoch,
myargs.num_mini_batch,
myargs.value_loss_coef,
myargs.entropy_coef,
lr=myargs.lr,
eps=myargs.eps,
max_grad_norm=myargs.max_grad_norm)
elif myargs.algo == 'acktr':
self.agent = algo.A2C_ACKTR(
self.actor_critic, myargs.value_loss_coef, myargs.entropy_coef, acktr=True)
# Initialize the reward function
# self.reward_obj = reward_cl(myargs.num_processes, self.device, self.envs.observation_space.shape, myargs.rew_lr, myargs.rew_sign, myargs.rew_mag)
self.num_rew_nets = myargs.num_rew_nets
if self.myargs.env_name in ["MountainCar-v0", "Reacher-v2", "Acrobot-v1", "Thrower-v2"]:
FC_dim_rew = 10
else:
FC_dim_rew = 60
self.reward_objs = [reward_cl(self.device, self.envs.observation_space.shape, myargs.rew_lr, myargs.rew_sign, myargs.rew_mag, rew_kwargs={'FC_dim':FC_dim_rew}) for i in range(self.num_rew_nets)]
if self.myargs.continue_:
# Load the pretrained reward function
print(f'Loading the reward function for continuing run {self.myargs.save_name} .....')
for reward_idx, reward_obj in enumerate(self.reward_objs):
model_save_address = os.path.join(self.save_path_trained_models, self.myargs.save_name + f"_reward_{reward_idx}.pt")
checkpoint = torch.load(model_save_address, map_location=self.device)
self.reward_objs[reward_idx].reward_net.load_state_dict(checkpoint['model_state_dict'])
self.reward_objs[reward_idx].optimizer.load_state_dict(checkpoint['optimizer_state_dict'])
self.reward_objs[reward_idx].reward_net.train()
self.rollouts = RolloutStorage(myargs.num_steps, myargs.num_processes,
self.envs.observation_space.shape, self.envs.action_space,
self.actor_critic.recurrent_hidden_state_size)
obs = self.envs.reset()
self.rollouts.obs[0].copy_(obs)
self.rollouts.to(self.device)
with torch.no_grad():
rew_nets_step_list = [reward_obj.reward_net(obs) for reward_obj in self.reward_objs]
self.old_rews_nets_step = torch.mean(torch.cat(rew_nets_step_list, dim=1), dim=1)
if not myargs.continue_:
with open(self.log_dir + f"/policy_stats.txt", "w") as file:
file.write("overal_tr_iter_idx, updates , num timesteps , FPS, number of Last training episodes, dist_entropy, value_loss, action_loss, mean reward, median reward, min reward, max reward \n")
with open(self.log_dir + f"/rew_weights_stats.txt", "w") as file:
file.write("reward_mean, reward_std \n")
with open(self.log_dir + f"/rew_losses.txt", "w") as file:
file.write("g_value \n")
with open(self.log_dir + f"/rew_losses_val.txt", "w") as file:
file.write("g_value \n")
with open(self.log_dir + "/buffer_stats.txt", "w") as file:
file.write("mean, range, std, mean_new, range_new, std_new \n")
# if not myargs.run_type == "pretrain_only":
# self.new_trajs_list = deque(maxlen= self.init_params['size_of_new_trajs_list']) # deque for storing new trajs produced by the policy
# self.new_trajs_returns_list = deque(maxlen= self.init_params['size_of_new_trajs_list'])
self.new_trajs_list = []
self.new_trajs_list_val = []
if self.myargs.train_from_given_ranked_demos:
self.sample_trajs_from_memory = True
if "devv" in self.myargs.save_name:
max_train_load = 50
else:
max_train_load = -1
# *************************************************
# Load new_trajs_returns_list
with open(f"{self.save_path_new_trajs}/trajs_returns_all.pkl", "rb") as file:
new_trajs_returns_list_temp = pickle.load(file)
self.new_trajs_returns_list = new_trajs_returns_list_temp[:max_train_load]
print("loading training ranked trajs ....")
start_loading_trajs = time.time()
for idx in range(len(self.new_trajs_returns_list[0:max_train_load])):
# print(f"loading traj {idx}th out of {len(self.new_trajs_returns_list)} trajs ....")
traj = torch.load(self.save_path_new_trajs+f"/traj_{idx}.pt", map_location=self.device)
self.new_trajs_list.append(traj)
print(f"Total time loading training ranked trajs: {time.time()-start_loading_trajs}")
# *************************************************
# Load new_trajs_returns_list_val
with open(f"{self.save_path_new_trajs_val}/trajs_returns_all.pkl", "rb") as file:
new_trajs_returns_list_val_temp = pickle.load(file)
self.new_trajs_returns_list_val = new_trajs_returns_list_val_temp[:max_train_load]
print("loading val ranked trajs ....")
start_loading_trajs = time.time()
for idx in range(len(self.new_trajs_returns_list_val[0:max_train_load])):
# print(f"loading traj {idx}th out of {len(self.new_trajs_returns_list)} trajs ....")
traj = torch.load(self.save_path_new_trajs_val+f"/traj_{idx}.pt", map_location=self.device)
self.new_trajs_list_val.append(traj)
print(f"Total time loading val ranked trajs: {time.time()-start_loading_trajs}")
else:
self.new_trajs_returns_list = []
self.sample_trajs_from_memory = False
self.new_trajs_last_idx = 0
self.size_of_new_trajs_list = init_params['size_of_new_trajs_list']
if self.myargs.skip_rew_eval:
print(f"Skip loading validation trajectories ..... {self.myargs.save_name}")
elif self.myargs.run_type == "main_opt_demos":
# print(f"Loading policies and producing validation trajectories ..... {self.myargs.save_name}")
# save_path_policy = os.path.join("./trained_models", self.myargs.algo, f"{self.myargs.algo}_GT_{self.myargs.env_name}-stacked")
# names = listdir(save_path_policy)
# eval_trajs_list = []
# eval_trajs_return_list = []
# if "devv" in self.myargs.save_name:
# skip = 50
# else:
# skip = 5
# for name in names[0:-1:skip]:
# address = os.path.join(save_path_policy,name)
# actor_critic, ob_rms = torch.load(address, map_location=self.device)
# actor_critic.to(self.device)
# produced_traj = self.produce_trajs_from_policy_sparsified_reward(actor_critic, 1, self.myargs.sparseness, self.init_params['produced_traj_length'], self.myargs.env_name, is_random=False)
# produced_traj, produced_traj_return = myutils.trajs_calc_return_no_device(produced_traj, self.myargs.discounted_rew, self.myargs.gamma)
# eval_trajs_list.append(produced_traj[0])
# eval_trajs_return_list.append(produced_traj_return[0])
# # for i in range(68):
# # print (f'np.mean: { torch.mean( torch.tensor([item[0][0,i] for item in produced_traj[0] ])) }')
self.val_pairs, self.val_pairs_returns =[], []
if self.myargs.train_from_given_ranked_demos:
try:
self.val_pairs, self.val_pairs_returns, _ = myutils.create_pairs_distance_apart_device_memory(self.new_trajs_list_val, self.new_trajs_returns_list_val, self.init_params['num_eval_pairs'], self.myargs.priority_sampling, self.device, self.init_params["difference_factor"])
# # pairs, returns = myutils.create_pairs_no_step_no_subsample(ranked_traj_list, traj_returns, batch_size, self.myargs.priority_sampling)
# # if any pair is returned, the returns should be different as this is guaranteed in myutils.create_pairs_no_step
# # pairs.extend(pairs_raw), returns.extend(returns_raw)
except Exception as e:
print("********************************************** \n \
********************************* \n \
problems with create_pairs for evaluation ......")
# del eval_trajs_list
# del eval_trajs_return_list
# if myargs.num_opt_demo > 0 and not myargs.continue_:
# print(f"Loading policies and producing demo trajectories ..... {self.myargs.save_name}")
# # names = [myargs.saved_models_name+"_"+str(item)+".pt" for item in suffix_list]
# self.demos_opt, self.demos_opt_returns = [], []
# produced_trajs = []
# for name in names[-myargs.num_opt_demo:]:
# address = os.path.join(save_path_policy,name)
# actor_critic, ob_rms = torch.load(address, map_location=self.device)
# actor_critic.to(self.device)
# produced_traj = self.produce_trajs_from_policy_sparsified_reward(actor_critic, 1, self.myargs.sparseness, self.init_params['produced_traj_length'], self.myargs.env_name, is_random=False)
# produced_trajs.append(produced_traj[0])
# self._add_trajs_to_new_trajs_list_hardDrive(produced_trajs, 0, self.init_params['limited_buffer_length'], self.log_dir, add_only_non_zero_trajs=False, address=self.save_path_new_trajs)
# else:
# pass
# if self.myargs.num_overal_updates > 0: # if a non-zero value is provided for myargs.num_overal_updates,
# # the value of self.init_params["num_overal_updates"] will be overwritten
# self.init_params["num_overal_updates"] = self.myargs.num_overal_updates
# print(f"Loading training trajectories ..... {self.myargs.save_name}")
# with open(trajs_address + '/all_trajectories', 'rb') as f:
# trajs_init = torch.load(f, map_location=self.device)
# trajs_total_num = len(trajs_init)
# traj_idxs_tr = np.arange(0, trajs_total_num, init_params['training_trajectory_skip'])
# traj_idxs_val = traj_idxs_tr[:-3] + 1
# demos_train_init = [trajs_init[idx] for idx in traj_idxs_tr]
# demos_val_init = [trajs_init[idx] for idx in traj_idxs_val]
# self.demos_train, self.demos_train_returns = myutils.trajs_calc_return_no_device(demos_train_init)
# self.demos_val, self.demos_val_returns = myutils.trajs_calc_return_no_device(demos_val_init)
# self.ranked_trajs_list, self.returns = myutils.cut_trajs_calc_return_no_device(trajs, self.init_params['demo_horizon'])
# FOLLOWING 4 LINES ARE USED IF WE USE OLD METHOD OF SAMPLLING SUM-TRAJECTORIES FROM INITIAL DEMONSTRATIONS
# self.demos_subsampled_list, self.demos_returns_subsampled = myutils.subsample_demos(trajs, self.init_params['subsample_length'], self.init_params['subsample_increment'], self.init_params['demo_horizon'])
# self.demos_subsampled_list_val, self.demos_returns_subsampled_val = myutils.subsample_demos(trajs_val, self.init_params['subsample_length'], self.init_params['subsample_increment'], self.init_params['demo_horizon'])
# self.demos_subsampled_list, self.demos_returns_subsampled = myutils.subsample_demos_true_return(trajs, self.init_params['subsample_length'], self.init_params['subsample_increment'], self.init_params['demo_horizon'])
# self.demos_subsampled_list_val, self.demos_returns_subsampled_val = myutils.subsample_demos_true_return(trajs_val, self.init_params['subsample_length'], self.init_params['subsample_increment'], self.init_params['demo_horizon'])
if not myargs.continue_:
self._save_attributes_and_hyperparameters()
def train(self):
# if self.myargs.pretrain in ["yes"]:
# print(f"pretraining the reward function ....{self.myargs.save_name}")
# # UPDATE REWARD based on initial set of demonstrations *****************************************************
# # Pre-train the policy based on the pre-trained reward
# if self.myargs.run_type == "pretrain_only":
# print(f"***************** Stopped right after pretraining *****************")
# return
# MAIN TRAINING LOOP
start_time = time.time()
# num_pol_updates_at_each_overal_update = int(self.myargs.num_env_steps_tr) // self.myargs.num_steps // self.myargs.num_processes // self.init_params["num_overal_updates"]
self.init_params["num_overal_updates"] = int(self.myargs.num_env_steps_tr) // self.myargs.num_steps // self.myargs.num_processes // self.init_params["num_pol_updates_at_each_overal_update"]
num_pol_updates_at_each_overal_update = self.init_params["num_pol_updates_at_each_overal_update"]
if self.myargs.continue_:
start_rew_updates = True
start_shaping = True
add_only_non_zero_trajs = False # This is true until set to False
start_pol_updates = True
# read the last overal_rew_tr_iter_idx
rew_weights_stats_loaded = np.loadtxt(f"{self.log_dir}/rew_weights_stats.txt", skiprows=1)
overal_rew_tr_iter_idx = int(rew_weights_stats_loaded[-1,-1])
policy_stats_loaded = np.loadtxt(f"{self.log_dir}/policy_stats.txt", skiprows=1)
overal_pol_tr_iter_idx = int(policy_stats_loaded[-1,0])
del rew_weights_stats_loaded
del policy_stats_loaded
# Load new_trajs_returns_list
trajs_return_save_address = f"{self.save_path_new_trajs}/trajs_returns_all.pkl"
with open(trajs_return_save_address, "rb") as file:
self.new_trajs_returns_list = pickle.load(file)
else:
start_rew_updates = False
start_shaping = False
add_only_non_zero_trajs = True # This is true until set to False
overal_rew_tr_iter_idx = 0
overal_pol_tr_iter_idx = 0
if self.myargs.shaping:
start_pol_updates = True
else:
start_pol_updates = False
previous_overal_rew_tr_iter_idx = overal_rew_tr_iter_idx
overal_rew_pretrain_iter_idx = 0 # this includes pre-training iterations
total_time_policy = 0
total_time_reward = 0
while overal_rew_tr_iter_idx < self.init_params["num_overal_updates"] + previous_overal_rew_tr_iter_idx:
print(f"Training iter number: {overal_rew_tr_iter_idx}")
# UPDATE POLICY *****************************************************
if "devv" in self.myargs.save_name:
num_pol_updates_at_each_overal_update = 1
# pass
# note, the policy will not actually be updated if start_rew_updates == False
if start_pol_updates:
overal_pol_tr_iter_idx += 1
self.update_policy(overal_pol_tr_iter_idx, overal_rew_tr_iter_idx, num_pol_updates_at_each_overal_update,
start_pol_updates, start_shaping, add_only_non_zero_trajs, dont_collect_trajs=False)
# print(f"Memory usage in MiB line 576: {self.memory_usage_psutil()}")
# print("memory usage in line 576")
# print(torch.cuda.memory_summary(abbreviated=True))
# UPDATE REWARD *****************************************************
if not start_rew_updates and np.count_nonzero(self.new_trajs_returns_list)>=self.myargs.min_good_trajs:
# This if statement should only be executed once
# the following line is to add some bad trajectories from the untrained policy to increase the variance in the buffer
self.update_policy(overal_pol_tr_iter_idx, overal_rew_tr_iter_idx, num_pol_updates_at_each_overal_update,
start_pol_updates, start_shaping, add_only_non_zero_trajs=False, dont_collect_trajs=False)
if np.std(self.new_trajs_returns_list) > 0.0:
# we can't start reward learning unless there is a positive variance in the buffer
start_rew_updates = True
start_shaping = True
start_pol_updates = True
add_only_non_zero_trajs = False
if start_rew_updates:
overal_rew_tr_iter_idx += 1
_, _ = self.update_reward(overal_rew_tr_iter_idx)
# print(f"Memory usage in MiB line 599: {self.memory_usage_psutil()}")
else:
print(f"No reward update for this iteration")
# save both the policy and the reward every self.init_params['save_reward_int'] overal iterations
if overal_rew_tr_iter_idx % self.init_params['save_reward_int'] == 0 and overal_rew_tr_iter_idx != 0 and start_rew_updates:
# SAVE REWARD NETWORKS
for reward_idx, reward_obj in enumerate(self.reward_objs):
# model_save_address = os.path.join(self.save_path_trained_models, f"{self.myargs.save_name}_reward_{reward_idx}_iter_{overal_rew_tr_iter_idx}.pt")
model_save_address = os.path.join(self.save_path_trained_models, f"{self.myargs.save_name}_reward_{reward_idx}.pt")
torch.save({'stage': 'train',
'model_state_dict': reward_obj.reward_net.state_dict(),
'optimizer_state_dict': reward_obj.optimizer.state_dict()},
model_save_address)
# SAVE POLICY
# model_save_address = os.path.join(self.save_path_trained_models, f"{self.myargs.save_name}_policy_iter_{overal_rew_tr_iter_idx}.pt")
model_save_address = os.path.join(self.save_path_trained_models, f"{self.myargs.save_name}_policy.pt")
torch.save([
self.actor_critic,
getattr(utils.get_vec_normalize(self.envs), 'ob_rms', None)
], model_save_address)
if overal_rew_tr_iter_idx == 0:
overal_rew_pretrain_iter_idx += 1
print(f"overal_rew_pretrain_iter_idx: {overal_rew_pretrain_iter_idx}")
if overal_rew_pretrain_iter_idx > self.init_params["num_overal_updates"]/2:
print("Pretrain took too long and training did not start. Breaking the program now ...")
break # this breaks the while loop
# # sample trajectories from the new policy and add to the buffer
# produced_trajs = self.produce_trajs_from_policy_sparsified_reward(self.actor_critic, self.init_params['num_trajs_produced_each_iter'], self.myargs.sparseness, self.init_params['produced_traj_length'], is_random=False)
# rew_mean, rew_range, rew_std, rew_mean_new, rew_range_new, rew_std_new = self._add_trajs_to_new_trajs_list_hardDrive(produced_trajs)
# with open(self.log_dir + '/buffer_stats.txt', 'a') as f:
# f.write(f"{rew_mean:.10f} {rew_range:.10f} {rew_std:.10f} {rew_mean_new:.10f} {rew_range_new:.10f} {rew_std_new:.10f} \n")
total_training_time = time.time() - start_time
print(f"Total training time: {total_training_time}")
if not self.myargs.dont_remove_buffer:
# REMOVE THE REPLAY BUFFER FROM THE HARD-DRIVE TO AVOID TAKING TOO MUCH SPACE
save_path_new_trajs_rel = os.path.relpath(self.save_path_new_trajs, start = os.curdir)
print ("REMOVE THE REPLAY BUFFER FROM THE HARD DRIVE TO AVOID TAKING TOO MUCH SPACE ...")
os.system(f'rm -rf {save_path_new_trajs_rel}')
def train_from_given_ranked_demos(self):
# if self.myargs.pretrain in ["yes"]:
# print(f"pretraining the reward function ....{self.myargs.save_name}")
# # UPDATE REWARD based on initial set of demonstrations *****************************************************
# # Pre-train the policy based on the pre-trained reward
# if self.myargs.run_type == "pretrain_only":
# print(f"***************** Stopped right after pretraining *****************")
# return
# MAIN TRAINING LOOP
start_time = time.time()
# num_pol_updates_at_each_overal_update = int(self.myargs.num_env_steps_tr) // self.myargs.num_steps // self.myargs.num_processes
num_policy_updates = int(self.myargs.num_env_steps_tr) // self.myargs.num_steps // self.myargs.num_processes // self.init_params["num_pol_updates_at_each_overal_update"]
start_rew_updates = True
start_shaping = True
overal_rew_tr_iter_idx = 0
overal_pol_tr_iter_idx = 0
start_pol_updates = True
add_only_non_zero_trajs = False
total_time_policy = 0
total_time_reward = 0
while overal_rew_tr_iter_idx < self.init_params['num_rew_updates']:
print(f"Training iter number for reward: {overal_rew_tr_iter_idx}")
self.update_reward(overal_rew_tr_iter_idx)
overal_rew_tr_iter_idx += 1
# SAVE REWARD NETWORKS
for reward_idx, reward_obj in enumerate(self.reward_objs):
# model_save_address = os.path.join(self.save_path_trained_models, f"{self.myargs.save_name}_reward_{reward_idx}_iter_{overal_rew_tr_iter_idx}.pt")
model_save_address = os.path.join(self.save_path_trained_models, f"{self.myargs.save_name}_reward_{reward_idx}.pt")
torch.save({'stage': 'train',
'model_state_dict': reward_obj.reward_net.state_dict(),
'optimizer_state_dict': reward_obj.optimizer.state_dict()},
model_save_address)
while overal_pol_tr_iter_idx < num_policy_updates:
print(f"Training iter number for policy: {overal_pol_tr_iter_idx}")
self.update_policy(overal_pol_tr_iter_idx, overal_rew_tr_iter_idx, self.init_params["num_pol_updates_at_each_overal_update"],
start_pol_updates, start_shaping, add_only_non_zero_trajs, dont_collect_trajs=True)
overal_pol_tr_iter_idx += 1
# SAVE POLICY
# model_save_address = os.path.join(self.save_path_trained_models, f"{self.myargs.save_name}_policy_iter_{overal_rew_tr_iter_idx}.pt")
model_save_address = os.path.join(self.save_path_trained_models, f"{self.myargs.save_name}_policy.pt")
torch.save([
self.actor_critic,
getattr(utils.get_vec_normalize(self.envs), 'ob_rms', None)
], model_save_address)
def update_reward(self, overal_tr_iter_idx):
start = time.time()
if self.myargs.use_linear_lr_decay_rew:
# decrease learning rate linearly
for reward_obj in self.reward_objs:
utils.update_linear_schedule(
reward_obj.optimizer, overal_tr_iter_idx, self.init_params["num_overal_updates"],
self.myargs.rew_lr)
elif self.myargs.use_increase_decrease_lr_rew:
for reward_obj in self.reward_objs:
utils.update_linear_schedule_increase_decrease(
reward_obj.optimizer, overal_tr_iter_idx, self.init_params["num_overal_updates"],
self.myargs.rew_lr)
# Update g according to produced trajectories
pairs, returns, pair_select_time_total, rew_update_time_total, load_trajs_time_total = self.grad_g_theta_update(overal_tr_iter_idx, self.init_params['num_rew_training_batches'], self.num_rew_nets,
self.init_params['batch_size'], demos_or_policy='demos', pretrain_or_train="pretrain", discounted_rew=self.myargs.discounted_rew)
# # save reward after each self.init_params['save_reward_int'] overal iterations
# if overal_tr_iter_idx % self.init_params['save_reward_int'] == 0 and overal_tr_iter_idx != 0:
# for reward_idx, reward_obj in enumerate(self.reward_objs):
# model_save_address = os.path.join(self.save_path_trained_models, self.myargs.save_name + f"_reward_{reward_idx}_" + str(overal_tr_iter_idx) + ".pt")
# torch.save(reward_obj.reward_net, model_save_address)
rew_time_total = time.time()-start
print(f"rew_time_total: {rew_time_total}, pair_select_time_total: {pair_select_time_total}, load_trajs_time_total: {load_trajs_time_total}, in one training_iter \n")
return pairs, returns
def update_policy(self, overal_tr_iter_idx, overal_rew_tr_iter_idx, num_updates, start_pol_updates, start_shaping,
add_only_non_zero_trajs, dont_collect_trajs):
kwargs, myargs = self.kwargs, self.myargs
episode_rews_from_info = deque(maxlen=myargs.num_processes)
episode_rew_net_return = deque(maxlen=myargs.num_processes)
start = time.time()
total_update_time = 0
total_step_time = 0
total_dense_rew_calc_time = 0
if myargs.use_linear_lr_decay:
# decrease learning rate linearly
utils.update_linear_schedule(
self.agent.optimizer, overal_tr_iter_idx, self.init_params["num_overal_updates"],
self.agent.optimizer.lr if myargs.algo == "acktr" else myargs.lr)
elif myargs.use_increase_decrease_lr_pol:
utils.update_linear_schedule_increase_decrease(
self.agent.optimizer, overal_tr_iter_idx, self.init_params["num_overal_updates"],
self.agent.optimizer.lr if myargs.algo == "acktr" else myargs.lr)
unrolled_trajs_all = deque(maxlen=self.init_params["size_of_new_trajs_list"])
no_run, no_run_no_cntr = self._specify_env_rew_type(self.is_atari)
for j in range(num_updates):
# At each j, each of the policy will be unrolled on each of the myargs.num_processes environments
# for myargs.num_steps steps
return_nets_episode = torch.zeros((myargs.num_rew_nets, myargs.num_processes), device=self.device)
return_GT_episode_cntr = np.zeros(myargs.num_processes)
return_GT_episode_run = np.zeros(myargs.num_processes)
num_succ_run_forward = np.zeros(myargs.num_processes)
displacement_forward_till_rew = np.zeros(myargs.num_processes)
steps_till_rew = np.zeros(myargs.num_processes)
displacement_forward_episode_total = np.zeros(myargs.num_processes)
num_succ_not_done = np.zeros(myargs.num_processes)
return_sparse_episode = np.zeros(myargs.num_processes)
return_dense_plus_cntr_episode = np.zeros(myargs.num_processes)
num_succ_run_forward_avg_steps = np.zeros(myargs.num_processes)
num_succ_not_done_avg_steps = np.zeros(myargs.num_processes)
num_steps_taken_to_rew = np.zeros(myargs.num_processes)
displacement_total_from_infos = np.zeros(myargs.num_processes)
unrolled_trajs = [[] for _ in range(myargs.num_processes)]
for step in range(myargs.num_steps):
# Sample actions
with torch.no_grad():
value, action, action_log_prob, recurrent_hidden_states = self.actor_critic.act(
self.rollouts.obs[step], self.rollouts.recurrent_hidden_states[step],
self.rollouts.masks[step])
if myargs.env_name in ["CartPole-v0", "MountainCar-v0", "Acrobot-v1"]: # Added by <NAME>
# action = action[0]
action_fed = torch.squeeze(action)
else:
action_fed = action
step_time_start = time.time()
obs, reward_GT, done, infos = self.envs.step(action_fed)
total_step_time += time.time() - step_time_start
# reward for current state action pair and next obs
rews_run_step, rews_cntr_step = self._calc_rews_run_cntr_step(no_run_no_cntr, no_run, infos)
if self.is_atari or myargs.sparse_rew_type == "GT":
reward_GT = torch.squeeze(reward_GT)
reward_sparse = reward_GT
else:
(reward_sparse, displacement_forward_till_rew, steps_till_rew,
displacement_forward_episode_total, num_succ_run_forward,
num_steps_taken_to_rew, num_succ_not_done) = self.calc_sparse_reward(done, infos, displacement_forward_till_rew,
steps_till_rew, displacement_forward_episode_total, num_succ_run_forward,
num_steps_taken_to_rew, num_succ_not_done, reward_GT, myargs.num_processes, myargs)
num_succ_run_forward_avg_steps += num_succ_run_forward
num_succ_not_done_avg_steps += num_succ_not_done
rew_calc_start_time = time.time()
with torch.no_grad():
rew_nets_step_list = [reward_obj.reward_net(obs) for reward_obj in self.reward_objs]
total_dense_rew_calc_time += time.time() - rew_calc_start_time
for rew_idx in range(len(self.reward_objs)):
return_nets_episode[rew_idx, :] += rew_nets_step_list[rew_idx].reshape(myargs.num_processes)
# add rewards of the networks to the network calculated returns
for info in infos:
if 'episode' in info.keys():
episode_rews_from_info.append(info['episode']['r'])
# info stores the undiscounted return of each trajectory
# If done then clean the history of observations.
masks = torch.FloatTensor(
[[0.0] if done_ else [1.0] for done_ in done])
bad_masks = torch.FloatTensor(
[[0.0] if 'bad_transition' in info.keys() else [1.0]
for info in infos])
rews_nets_step = torch.mean(torch.cat(rew_nets_step_list, dim=1), dim=1)
if self.myargs.sparse_cntr:
cntr_coeff = torch.zeros(len(rews_cntr_step), device=self.device)
non_zero_idxs = torch.nonzero(reward_sparse)
if non_zero_idxs.size()[0] > 0:
for idx in non_zero_idxs:
cntr_coeff[idx] = self.myargs.cntr_coeff
else:
cntr_coeff = torch.ones(len(rews_cntr_step), device=self.device) * self.myargs.cntr_coeff
if self.myargs.shaping:
if start_shaping:
# Whether we use discounting in learning a reward or not, we use discounting when applying shaping
total_rews_step = reward_sparse.to(self.device) + cntr_coeff * torch.tensor(rews_cntr_step, device=self.device) + self.myargs.gamma * rews_nets_step - self.old_rews_nets_step
else:
total_rews_step = reward_sparse.to(self.device) + cntr_coeff * torch.tensor(rews_cntr_step, device=self.device)
else:
total_rews_step = self.myargs.rew_coeff * rews_nets_step + cntr_coeff * torch.tensor(rews_cntr_step, device=self.device)
total_rews_GT_step = reward_sparse.to(self.device) + cntr_coeff * torch.tensor(rews_cntr_step, device=self.device)
total_rews_step_torch = torch.unsqueeze(total_rews_step, dim=1)
for idx_proc, _done in enumerate(done):
return_GT_episode_cntr[idx_proc] += rews_cntr_step[idx_proc] # * myargs.gamma**step
return_GT_episode_run[idx_proc] += rews_run_step[idx_proc] # * myargs.gamma**step
return_sparse_episode[idx_proc] += reward_sparse[idx_proc]
return_dense_plus_cntr_episode[idx_proc] += total_rews_step[idx_proc].item()
if step < self.init_params["produced_traj_length"] and not dont_collect_trajs:
for idx, obs_item in enumerate(obs):
unrolled_trajs[idx].append([obs_item.clone().detach().cpu(), reward_sparse[idx] ])
if start_pol_updates:
self.rollouts.insert(obs, recurrent_hidden_states, action,
action_log_prob, value, total_rews_step_torch, masks, bad_masks)
self.old_rews_nets_step = rews_nets_step.clone()
if not dont_collect_trajs:
self._add_trajs_to_new_trajs_list_hardDrive(unrolled_trajs, overal_rew_tr_iter_idx,
self.init_params['limited_buffer_length'], self.log_dir, add_only_non_zero_trajs,
address=self.save_path_new_trajs, is_val=False)
# for traj in unrolled_trajs:
# unrolled_trajs_all.append(traj)
return_GT_episode_total_my_calc = return_GT_episode_cntr + return_GT_episode_run
# END OF EIPISODE OR MAXIMUM NUMBER OF STEPS
for idx in range(myargs.num_processes):
episode_rew_net_return.append(torch.mean(return_nets_episode[:, idx]).item())
if start_pol_updates:
start_update = time.time()
with torch.no_grad():
next_value = self.actor_critic.get_value(
self.rollouts.obs[-1], self.rollouts.recurrent_hidden_states[-1],
self.rollouts.masks[-1]).detach()
self.rollouts.compute_returns(next_value, myargs.use_gae, myargs.gamma,
myargs.gae_lambda, myargs.use_proper_time_limits)
value_loss, action_loss, dist_entropy = self.agent.update(self.rollouts)
num_succ_run_forward_avg_steps /= myargs.num_steps
num_succ_not_done_avg_steps /= myargs.num_steps
self.rollouts.after_update()
total_update_time += time.time() - start_update
if len(episode_rews_from_info) > 1:
total_num_steps = (j + 1) * myargs.num_processes * myargs.num_steps
end = time.time()
with open(self.log_dir + f"/policy_stats.txt", "a") as file:
file.write(
f'{overal_tr_iter_idx:>5} {j:>5} \
{total_num_steps:>8} {int(total_num_steps / (end - start)):.10f} \
{len(episode_rews_from_info):>4} {dist_entropy:.10} \
{value_loss:.10} {action_loss:.10}\
{np.mean(episode_rews_from_info):.10} {np.median(episode_rews_from_info):.10} \
{np.min(episode_rews_from_info):.10} {np.max(episode_rews_from_info):.10} {np.std(episode_rews_from_info):.10}\
{np.mean(episode_rew_net_return):.10} {np.std(episode_rew_net_return):.10} \
{np.mean(return_GT_episode_cntr):.10} {np.std(return_GT_episode_cntr):.10} \
{np.mean(return_GT_episode_run):.10} {np.std(return_GT_episode_run):.10} \
{np.mean(return_sparse_episode):.10} {np.std(return_sparse_episode):.10} \
{np.mean(return_dense_plus_cntr_episode):.10} {np.std(return_dense_plus_cntr_episode):.10} \
{np.mean(num_succ_run_forward_avg_steps):.10} {np.std(num_succ_run_forward_avg_steps):.10} \
{np.mean(num_succ_not_done_avg_steps):.10} {np.std(num_succ_not_done_avg_steps):.10} \
{np.mean(displacement_forward_episode_total):.10} {np.std(displacement_forward_episode_total):.10} \n' )
total_time = time.time()-start
print(f"total policy_time in one training_iter: {total_time}")
print(f"total policy update time in one overal training iter: {total_update_time}")
print(f"total step time: {total_step_time}")
print(f"total_dense_rew_calc_time: {total_dense_rew_calc_time}")
def _add_trajs_to_new_trajs_list_hardDrive(self, produced_trajs, overal_tr_iter_idx, limited_buffer_length, log_dir, add_only_non_zero_trajs, address, is_val=False):
"""
Stores new trajs into the hard drive to save memory
In the pretraining phase, it only adds trajectories with non-zero return to the buffer
"""
# print("Saving produced trajs on hard drive ....")
produced_trajs_list_all, produced_trajs_returns_all = myutils.trajs_calc_return_no_device(produced_trajs, self.myargs.discounted_rew, self.myargs.gamma)
if add_only_non_zero_trajs:
produced_trajs_list, produced_trajs_returns = [], []
for traj, ret in zip(produced_trajs_list_all, produced_trajs_returns_all):
if ret > 0:
produced_trajs_list.append(traj)
produced_trajs_returns.append(ret)
else:
produced_trajs_list, produced_trajs_returns = produced_trajs_list_all, produced_trajs_returns_all
if limited_buffer_length:
start_idx, end_idx = self._calc_start_stop_and_update_new_trajs_last_idx(len(produced_trajs_list))
for idx, total_idx in enumerate(np.arange(start_idx, end_idx+1)):
traj_save_address = f"{address}/traj_{total_idx}.pt"
torch.save(produced_trajs_list[idx], traj_save_address)
if not is_val:
self.new_trajs_returns_list[start_idx:end_idx+1] = produced_trajs_returns[:]
elif is_val:
self.new_trajs_returns_list_val[start_idx:end_idx+1] = produced_trajs_returns[:]
else:
if not is_val:
last_idx = len(self.new_trajs_returns_list)
for idx, traj in enumerate(produced_trajs_list):
total_idx = idx + last_idx
traj_save_address = f"{address}/traj_{total_idx}.pt"
torch.save(produced_trajs_list[idx], traj_save_address)
self.new_trajs_returns_list.extend(produced_trajs_returns)
elif is_val:
last_idx = len(self.new_trajs_returns_list_val)
for idx, traj in enumerate(produced_trajs_list):
total_idx = idx + last_idx
traj_save_address = f"{address}/traj_{total_idx}.pt"
torch.save(produced_trajs_list[idx], traj_save_address)
self.new_trajs_returns_list_val.extend(produced_trajs_returns)
trajs_return_save_address = f"{address}/trajs_returns_all.pkl"
with open(trajs_return_save_address, "wb") as file:
if not is_val:
pickle.dump(self.new_trajs_returns_list, file)
else:
pickle.dump(self.new_trajs_returns_list_val, file)
# Get statistics of the buffer and the new trajs
if len(produced_trajs_returns) > 0:
rew_mean = np.mean(self.new_trajs_returns_list)
rew_range = np.abs(np.max(self.new_trajs_returns_list) - np.min(self.new_trajs_returns_list))
rew_std = np.std(self.new_trajs_returns_list)
rew_mean_new = np.mean(produced_trajs_returns)
rew_range_new = np.abs(np.max(produced_trajs_returns) - np.min(produced_trajs_returns))
rew_std_new = np.std(produced_trajs_returns)
else:
rew_mean_new, rew_range_new, rew_std_new = 0, 0, 0
if len(self.new_trajs_returns_list) > 0:
rew_mean = np.mean(self.new_trajs_returns_list)
rew_range = np.abs(np.max(self.new_trajs_returns_list) - np.min(self.new_trajs_returns_list))
rew_std = np.std(self.new_trajs_returns_list)
else:
rew_mean, rew_range, rew_std = 0, 0, 0
if log_dir != "NONE":
with open( f"{log_dir}/buffer_stats.txt", 'a') as file:
file.write(f"{rew_mean:.10f} {rew_range:.10f} {rew_std:.10f} {rew_mean_new:.10f} {rew_range_new:.10f} {rew_std_new:.10f} {overal_tr_iter_idx:>5}\n")
def _add_trajs_to_new_trajs_list_memory_FIFO(self, produced_trajs, start_rew_updates):
"""
first in first out
"""
produced_trajs_list_all, produced_trajs_returns_all = myutils.trajs_calc_return_no_device(produced_trajs, self.myargs.discounted_rew, self.myargs.gamma)
if not start_rew_updates:
produced_trajs_list, produced_trajs_returns = [], []
for traj, rew in zip(produced_trajs_list_all, produced_trajs_returns_all):
if rew > 0:
produced_trajs_list.append(traj)
produced_trajs_returns.append(rew)
else:
produced_trajs_list, produced_trajs_returns = produced_trajs_list_all, produced_trajs_returns_all
start_idx, end_idx = self._calc_start_stop_and_update_new_trajs_last_idx(len(produced_trajs_list))
self.new_trajs_list[start_idx:end_idx] = produced_trajs_list[:]
self.new_trajs_returns_list[start_idx:end_idx] = produced_trajs_returns[:]
if len(produced_trajs_returns) > 0:
rew_mean = np.mean(self.new_trajs_returns_list)
rew_range = np.abs(np.max(self.new_trajs_returns_list) - np.min(self.new_trajs_returns_list))
rew_std = np.std(self.new_trajs_returns_list)
rew_mean_new = np.mean(produced_trajs_returns)
rew_range_new = np.abs(np.max(produced_trajs_returns) - np.min(produced_trajs_returns))
rew_std_new = np.std(produced_trajs_returns)
else:
rew_mean_new, rew_range_new, rew_std_new = 0, 0, 0
if len(self.new_trajs_returns_list) > 0:
rew_mean = np.mean(self.new_trajs_returns_list)
rew_range = np.abs(np.max(self.new_trajs_returns_list) - np.min(self.new_trajs_returns_list))
rew_std = np.std(self.new_trajs_returns_list)
else:
rew_mean, rew_range, rew_std = 0, 0, 0
return rew_mean, rew_range, rew_std, rew_mean_new, rew_range_new, rew_std_new
def _add_trajs_to_new_trajs_list_memory_RW(self, produced_trajs):
"""
only replaces the worst trajectories
trajectories stored on ram
RW --> stands for Replce worst
"""
# poduced_trajs = myutils.produce_trajs_from_policy(self.actor_critic, self.init_params['num_trajs_produced_each_iter'], self.init_params['produced_traj_length'], self.kwargs, self.myargs)
produced_trajs_list_all, produced_trajs_returns_all = myutils.trajs_calc_return_no_device(produced_trajs, self.myargs.discounted_rew, self.myargs.gamma)
if not start_rew_updates:
produced_trajs_list, produced_trajs_returns = [], []
for traj, rew in zip(produced_trajs_list_all, produced_trajs_returns_all):
if rew > 0:
produced_trajs_list.append(traj)
produced_trajs_returns.append(rew)
else:
produced_trajs_list, produced_trajs_returns = produced_trajs_list_all, produced_trajs_returns_all
start_idx, end_idx = self._calc_start_stop_and_update_new_trajs_last_idx_v1(len(produced_trajs_list))
if len(self.new_trajs_returns_list) > 1:
self.new_trajs_returns_list, self.new_trajs_list = (list(t) for t in zip(*sorted(zip(self.new_trajs_returns_list, self.new_trajs_list))))
self.new_trajs_list[start_idx:end_idx] = produced_trajs_list[:]
self.new_trajs_returns_list[start_idx:end_idx] = produced_trajs_returns[:]
if len(produced_trajs_returns) > 0:
rew_mean = np.mean(self.new_trajs_returns_list)
rew_range = np.abs(np.max(self.new_trajs_returns_list) - np.min(self.new_trajs_returns_list))
rew_std = np.std(self.new_trajs_returns_list)
rew_mean_new = np.mean(produced_trajs_returns)
rew_range_new = np.abs(np.max(produced_trajs_returns) - np.min(produced_trajs_returns))
rew_std_new = np.std(produced_trajs_returns)
else:
rew_mean_new, rew_range_new, rew_std_new = 0, 0, 0
if len(self.new_trajs_returns_list) > 0:
rew_mean = np.mean(self.new_trajs_returns_list)
rew_range = np.abs(np.max(self.new_trajs_returns_list) - np.min(self.new_trajs_returns_list))
rew_std = np.std(self.new_trajs_returns_list)
else:
rew_mean, rew_range, rew_std = 0, 0, 0
return rew_mean, rew_range, rew_std, rew_mean_new, rew_range_new, rew_std_new
def _calc_start_stop_and_update_new_trajs_last_idx(self, num_trajs):
# first in first out
if self.new_trajs_last_idx+num_trajs >= self.init_params["size_of_new_trajs_list"]:
self.new_trajs_last_idx = self.myargs.num_opt_demo
start_idx, end_idx = self.new_trajs_last_idx, self.new_trajs_last_idx+num_trajs-1
self.new_trajs_last_idx += num_trajs
return start_idx, end_idx
def _calc_start_stop_and_update_new_trajs_last_idx_v1(self, num_trajs):
# only replaces the last elements (of the sorted buffer)
if self.new_trajs_last_idx+num_trajs <= self.init_params["size_of_new_trajs_list"]:
start_idx, end_idx = self.new_trajs_last_idx, self.new_trajs_last_idx+num_trajs
self.new_trajs_last_idx += num_trajs
else:
start_idx, end_idx = self.init_params["size_of_new_trajs_list"]-1-num_trajs, self.init_params["size_of_new_trajs_list"]-1
return start_idx, end_idx
def grad_g_theta_update(self, overal_tr_iter_idx, num_batches, num_rew_nets, batch_size, demos_or_policy, pretrain_or_train, discounted_rew):
"""
this function should only be called when it's possible to produce trajectory pairs from the buffer
"""
# zero the gradient buffer for all the reward networks
criterion = torch.nn.CrossEntropyLoss(weight=None, reduction='mean')
losses_all_nets = []
accuracies_all_nets = []
# print(f"***************** Updating reward_obj: {rew_obj_idx} \n")
loss_per_rew_net = []
accuracy_per_rew_net = []
pairs_all = []
returns_all = []
pair_select_time_total = 0
load_trajs_time_total = 0
rew_update_time_total = 0
for batch_counter in range(num_batches):
# Iterate over all reward networks for training
for rew_obj_idx, reward_obj in enumerate(self.reward_objs):
loss_item, accuracy, pairs, returns, pair_select_time, rew_update_time, time_loading_trajs = self._grad_individual_rew_obj(batch_size, reward_obj, criterion, discounted_rew)
pair_select_time_total += pair_select_time
rew_update_time_total += rew_update_time
load_trajs_time_total += time_loading_trajs
pairs_all.extend(pairs)
returns_all.extend(returns)
if loss_item != "no pair":
loss_per_rew_net.append(loss_item)
accuracy_per_rew_net.append(accuracy)
# Here, after all updates, we write onto the rew_losses
assert len(loss_per_rew_net) > 0
mean_g = np.mean(loss_per_rew_net)
std_g = np.std(loss_per_rew_net)
mean_accuracy = np.mean(accuracy_per_rew_net)
std_accuracy = np.std(accuracy_per_rew_net)
with open(self.log_dir + f"/rew_losses.txt", "a") as file:
file.write(f" {mean_g:.10f} {std_g:.10f} {mean_accuracy:.10f} {std_accuracy:.10f} {pair_select_time_total:>5} {rew_update_time_total:>5} {overal_tr_iter_idx:>5} \n")
# log magnitute of reward weights
reward_weights_list = [torch.norm(reward_obj.get_flat_weights()) for reward_obj in self.reward_objs]
reward_weights_mean = np.mean([item.item() for item in reward_weights_list])
reward_weights_std = np.std([item.item() for item in reward_weights_list])
reward_weights_min = np.min([item.item() for item in reward_weights_list])
reward_weights_max = np.max([item.item() for item in reward_weights_list])
with open(self.log_dir + f"/rew_weights_stats.txt", "a") as file:
file.write(f" {reward_weights_mean:.10f} {reward_weights_std:.10f} {reward_weights_min:.10f} {reward_weights_max:.10f} {overal_tr_iter_idx:>5} \n")
# validation is performed after all batches are used for training
if not self.myargs.skip_rew_eval:
# Iterate over all reward networks for validation
start = time.time()
loss_per_rew_net = []
accuracy_per_rew_net = []
for rew_obj_idx, reward_obj in enumerate(self.reward_objs):
if self.val_pairs:
loss_item, accuracy = self._individual_rew_obj_validation(self.val_pairs, self.val_pairs_returns, reward_obj, criterion, self.myargs.discounted_rew)
loss_per_rew_net.append(loss_item)
accuracy_per_rew_net.append(accuracy)
mean_g = np.mean(loss_per_rew_net)
std_g = np.std(loss_per_rew_net)
mean_accuracy = np.mean(accuracy_per_rew_net)
std_accuracy = np.std(accuracy_per_rew_net)
end = time.time()
total_time_rew_eval = end - start
with open(self.log_dir + f"/rew_losses_val.txt", "a") as file:
file.write(f" {mean_g:.10f} {std_g:.10f} {mean_accuracy:.10f} {std_accuracy:.10f} {total_time_rew_eval:>5} {overal_tr_iter_idx:>5} \n")
return pairs_all, returns_all, pair_select_time_total, rew_update_time_total, load_trajs_time_total
def _individual_rew_obj_validation(self, pairs, returns, reward_obj, criterion, discounted_rew):
"""
uses the validation pairs and returns to compute the validaion accuracy of the reward networks
"""
# ***********************************
with torch.no_grad():
return_traj_preds_list = []
for (traj_i, traj_j), (rank_i, rank_j) in zip(pairs, returns):
# return_traj = torch.zeros((num_pairs,1), requires_grad=True, device=self.device)
# grad_theta = torch.zeros(agent.theta_size)
# return_theta_traj_j = self.return_theta_traj_calc(traj_j return_traj_j, idx)
# return_theta_traj_i = self.return_theta_traj_calc(traj_j return_traj_i, idx)
assert rank_i != rank_j
reward_input_batch_j = reward_obj.reward_net_input_batch_traj_method(traj_j)
reward_input_batch_i = reward_obj.reward_net_input_batch_traj_method(traj_i)
reward_output_batch_j = reward_obj.reward_net(reward_input_batch_j)
reward_output_batch_i = reward_obj.reward_net(reward_input_batch_i)
if discounted_rew:
num_rows = reward_output_batch_j.size()[0]
weights = torch.tensor([self.myargs.gamma**idx for idx in range(num_rows)], device=self.device)
weights = torch.unsqueeze(weights, dim=1)
reward_sum_j = torch.unsqueeze(torch.sum(weights * reward_output_batch_j, dim=0), dim=0) # element-wise multiplication
reward_sum_i = torch.unsqueeze(torch.sum(weights * reward_output_batch_i, dim=0), dim=0)
else:
reward_sum_j = torch.unsqueeze(torch.sum(reward_output_batch_j, dim=0), dim=0)
reward_sum_i = torch.unsqueeze(torch.sum(reward_output_batch_i, dim=0), dim=0)
if rank_j > rank_i:
return_sum_pair = torch.cat([reward_sum_j, reward_sum_i], dim=1)
return_traj_preds_list.append(return_sum_pair)
elif rank_j < rank_i:
return_sum_pair = torch.cat([reward_sum_i, reward_sum_j], dim=1)
return_traj_preds_list.append(return_sum_pair)
if len(return_traj_preds_list) > 0:
# update the reward function after every batch_size number of pairs
return_traj_preds = torch.cat(return_traj_preds_list, dim=0)
high_return_idx = torch.zeros((len(return_traj_preds_list)), dtype=torch.long, requires_grad=False, device=self.device)
accuracy = self.calc_accuracy(return_traj_preds)
loss = criterion(return_traj_preds, high_return_idx)
return loss.item(), accuracy
else:
return "no pair", "no pair"
def _grad_individual_rew_obj(self, batch_size, reward_obj, criterion, discounted_rew):
"""
reads collected returns from self.new_trajs_returns_list and uses self.save_path_new_trajs
"""
start = time.time()
if self.sample_trajs_from_memory:
pairs, returns, time_loading_trajs = myutils.create_pairs_distance_apart_device_memory(self.new_trajs_list, self.new_trajs_returns_list, batch_size, self.myargs.priority_sampling, self.device, self.init_params["difference_factor"])
else:
pairs, returns, time_loading_trajs = myutils.create_pairs_distance_apart_device_hardDrive(self.save_path_new_trajs, self.new_trajs_returns_list, batch_size, self.myargs.priority_sampling, self.device)
pair_select_time = time.time() - start
# pairs, returns = myutils.create_pairs_no_step_no_subsample(ranked_traj_list, traj_returns, batch_size, self.myargs.priority_sampling)
# if any pair is returned, the returns should be different as this is guaranteed in myutils.create_pairs_no_step
# pairs.extend(pairs_raw), returns.extend(returns_raw)
# ***********************************
start = time.time()
reward_obj.reward_net.zero_grad()
return_traj_preds_list = []
if pairs:
for (traj_i, traj_j), (rank_i, rank_j) in zip(pairs, returns):
# return_traj = torch.zeros((num_pairs,1), requires_grad=True, device=self.device)
# grad_theta = torch.zeros(agent.theta_size)
# return_theta_traj_j = self.return_theta_traj_calc(traj_j return_traj_j, idx)
# return_theta_traj_i = self.return_theta_traj_calc(traj_j return_traj_i, idx)
assert rank_i != rank_j
reward_input_batch_j = reward_obj.reward_net_input_batch_traj_method(traj_j)
reward_input_batch_i = reward_obj.reward_net_input_batch_traj_method(traj_i)
reward_output_batch_j = reward_obj.reward_net(reward_input_batch_j)
reward_output_batch_i = reward_obj.reward_net(reward_input_batch_i)
if discounted_rew:
num_rows = reward_output_batch_j.size()[0]
weights = torch.tensor([self.myargs.gamma**idx for idx in range(num_rows)], device=self.device)
weights = torch.unsqueeze(weights, dim=1)
reward_sum_j = torch.unsqueeze(torch.sum(weights * reward_output_batch_j, dim=0), dim=0) # element-wise multiplication
reward_sum_i = torch.unsqueeze(torch.sum(weights * reward_output_batch_i, dim=0), dim=0)
else:
reward_sum_j = torch.unsqueeze(torch.sum(reward_output_batch_j, dim=0), dim=0)
reward_sum_i = torch.unsqueeze(torch.sum(reward_output_batch_i, dim=0), dim=0)
if rank_j > rank_i:
return_sum_pair = torch.cat([reward_sum_j, reward_sum_i], dim=1)
return_traj_preds_list.append(return_sum_pair)
elif rank_j < rank_i:
return_sum_pair = torch.cat([reward_sum_i, reward_sum_j], dim=1)
return_traj_preds_list.append(return_sum_pair)
# update the reward function after every batch_size number of pairs
return_traj_preds = torch.cat(return_traj_preds_list, dim=0)
high_return_idx = torch.zeros((len(return_traj_preds_list)), dtype=torch.long, requires_grad=False, device=self.device)
accuracy = self.calc_accuracy(return_traj_preds)
loss = criterion(return_traj_preds, high_return_idx)
loss.backward()
reward_obj.optimizer.step()
rew_update_time = time.time() - start
return loss.item(), accuracy, pairs, returns, pair_select_time, rew_update_time, time_loading_trajs
else:
return "no pair", "no pair", "no pair", "no pair", pair_select_time, 0, 0
def calc_accuracy(self, return_traj_preds):
num_total = return_traj_preds.size()[0]
num_correct = 0
for i in range(num_total):
if return_traj_preds[i,0] > return_traj_preds[i,1]:
num_correct += 1
return num_correct / num_total
def calc_sparse_reward(self, done, infos, displacement_forward_till_rew, steps_till_rew, displacement_forward_episode_total,
num_succ_run_forward, num_steps_taken_to_rew, num_succ_not_done, reward_GT, num_envs, myargs):
"""
unitsV2 uses myargs.num_steps as the maximum possible number of steps
"""
sparseness = myargs.sparseness
vel_thresh = 0
reward_sparse = torch.zeros(num_envs)
displacement_forward_step = np.zeros(num_envs)
if myargs.sparse_rew_type == "steps":
if myargs.env_name in ["InvertedPendulum-v2", "CartPole-v0"]:
for idx, done_proc in enumerate(done):
if not done_proc:
num_succ_not_done[idx] += 1
else:
num_succ_not_done[idx] = 0
if num_succ_not_done[idx] >= sparseness:
reward_sparse[idx] = 1
else:
reward_sparse[idx] = 0
elif myargs.env_name == "InvertedDoublePendulum-v2":
for idx, info in enumerate(infos):
angle_range = sparseness
angles = info["angles"]
if abs(angles[1]) < angle_range*math.pi/180:
reward_sparse[idx] = 1
else:
reward_sparse[idx] = 0
else:
for idx, info in enumerate(infos):
if info['reward_run'] > vel_thresh:
num_succ_run_forward[idx] += 1
else:
num_succ_run_forward[idx] = 0
if num_succ_run_forward[idx] >= sparseness:
reward_sparse[idx] = 1
else:
reward_sparse[idx] = 0
elif myargs.sparse_rew_type in ["unitsV2", "units"]:
for idx, info in enumerate(infos):
displacement_forward_step[idx] = info["x_position"] - info["x_position_before"]
displacement_forward_till_rew += displacement_forward_step
displacement_forward_episode_total += displacement_forward_step
steps_till_rew += np.ones(num_envs)
for idx in range(np.shape(displacement_forward_till_rew)[0]):
# reward_sparse[idx] = myargs.num_steps * (displacement_forward_till_rew[idx] // sparseness) / steps_till_rew[idx]
if displacement_forward_till_rew[idx] > sparseness:
if myargs.sparse_rew_type == "unitsV2":
reward_sparse[idx] = (2 - (steps_till_rew[idx] / myargs.num_steps)) * (displacement_forward_till_rew[idx] // sparseness)
elif myargs.sparse_rew_type == "units":
reward_sparse[idx] = displacement_forward_till_rew[idx] // sparseness
displacement_forward_till_rew[idx] = 0
steps_till_rew[idx] = 0
else:
reward_sparse[idx] = 0
# elif myargs.sparse_rew_type in ["episodic"]:
# steps_till_rew += np.ones(num_envs)
# if myargs.env_name in ["Reacher-v2", "MountainCar-v0"]:
# done = [item['done_dist'] for item in infos]
# for idx, done_proc in enumerate(done):
# if done_proc:
# reward_sparse[idx] = 1000/steps_till_rew[idx]
# steps_till_rew[idx] = 0
# else:
# reward_sparse[idx] = 0 # This is redundant, reward_sparse is zero by default
elif myargs.sparse_rew_type in ["episodic"]:
if myargs.env_name in ["MountainCar-v0", "Reacher-v2", "Acrobot-v1", "Thrower-v2"] or myargs.env_name in ["HalfCheetah-v3", "Hopper-v3", "Walker2d-v3", "Swimmer-v3"]:
done = [item['done_dist'] for item in infos]
for idx, done_proc in enumerate(done):
if done_proc:
reward_sparse[idx] = 1000
else:
raise Exception("Issues with myargs.sparse_rew_type")
# IF done is reached, several variables need to be set to zero
# Whether does is reached because of end of episode, or because of failure
# or reaching a goal state
for idx, done_ in enumerate(done):
if done_:
displacement_forward_till_rew[idx] = 0
steps_till_rew[idx] = 0
num_succ_run_forward[idx] = 0
num_steps_taken_to_rew[idx] = 0
# reward_sparse[idx] = 0 # Farzan: We don't need to manually set the sparse reward to zero, the masks will take care of this
return reward_sparse, displacement_forward_till_rew, steps_till_rew, displacement_forward_episode_total, num_succ_run_forward, num_steps_taken_to_rew, num_succ_not_done
def produce_trajs_from_policy_sparsified_reward(self, actor_critic, number_trajs, sparseness, traj_length, env_name, is_random):
'''
This function attaches the sparsified reward to the produced states.
'''
kwargs, myargs = self.kwargs, self.myargs
torch.set_num_threads(1)
# CREATE AN ENVIRONMENT
num_envs = 1
log_dir = None
seed = np.random.randint(0, 10000)
env = make_vec_envs(myargs.env_name, seed, num_envs,
myargs.gamma, log_dir, self.device, allow_early_resets=False, num_frame_stack=2, **kwargs)
# We need to use the same statistics for normalization as used in training
# vec_norm = utils.get_vec_normalize(env)
vel_thresh = 0
all_trajs = []
for traj_idx in range(number_trajs):
traj = []
recurrent_hidden_states = torch.zeros(1,actor_critic.recurrent_hidden_state_size)
masks = torch.zeros(1, 1)
obs = env.reset()
# LOOP FOR ONE TRAJECTORY
transition_counter = 0
# num_succ_run_forward = 0
# num_succ_not_done = 0
num_succ_run_forward = np.zeros(num_envs)
num_succ_not_done = np.zeros(num_envs)
displacement_forward_till_rew = np.zeros(num_envs)
steps_till_rew = np.zeros(num_envs)
displacement_forward_episode_total = np.zeros(num_envs)
num_steps_taken_to_rew = np.zeros(num_envs)
while transition_counter <= traj_length:
transition_counter += 1
if not is_random:
with torch.no_grad():
value, action, _, recurrent_hidden_states = actor_critic.act(
obs, recurrent_hidden_states, masks, deterministic=False)
else:
action = env.action_space.sample()
action = torch.tensor(action, device=self.device)
if env_name in ["CartPole-v0", "MountainCar-v0", "Acrobot-v1"]: # Added by <NAME>
# action = action[0]
action_fed = torch.squeeze(action)
else:
action_fed = action
obs, reward_dense, done, infos = env.step(action_fed)
(reward_sparse, displacement_forward_till_rew, steps_till_rew, displacement_forward_episode_total,
num_succ_run_forward, num_steps_taken_to_rew, num_succ_not_done) = self.calc_sparse_reward(done, infos, displacement_forward_till_rew,
steps_till_rew, displacement_forward_episode_total, num_succ_run_forward, num_steps_taken_to_rew, num_succ_not_done, reward_GT, num_envs, myargs)
# if myargs.rew_cntr == "True":
# reward_cntr = torch.tensor(infos[0]['reward_cntr'], device=self.device)
# total_rews_step_sparse = reward_sparse + self.myargs.cntr_coeff * reward_cntr
# elif myargs.rew_cntr == "False":
traj.append([obs[0].clone().detach().cpu(), reward_sparse])
# ALL TRAJS ARE PRODUCED AT THIS POINT
all_trajs.append(traj)
env.close()
return all_trajs
def produce_trajs_from_policy(self, actor_critic, number_trajs, traj_length, kwargs, myargs):
# torch.set_num_threads(1)
# print (f"******************** myargs.cuda_num: {myargs.cuda_num}")
# print (f"******************** myargs.cuda: {myargs.cuda}")
# CUDA_VISIBLE_DEVICES_raw_str = os.environ["CUDA_VISIBLE_DEVICES"]
# print(f"******************** CUDA_VISIBLE_DEVICES: {CUDA_VISIBLE_DEVICES_raw_str} ********************")
# if not myargs.cuda_num:
# CUDA_VISIBLE_DEVICES_raw_str_splitted = CUDA_VISIBLE_DEVICES_raw_str.split(",")
# CUDA_VISIBLE_DEVICES_raw_str_splitted_list_int = [int(item) for item in CUDA_VISIBLE_DEVICES_raw_str_splitted]
# cuda_num = f"cuda" # :{CUDA_VISIBLE_DEVICES_raw_str_splitted_list_int[0]}
# print(f"******************** No cuda_num provided, cuda_num set to what slurm sets NVIDIA_VISIBLE_DEVICES")
# print(f"******************** cuda_num: {cuda_num} ********************")
# self.device = torch.device(cuda_num if myargs.cuda else "cpu")
# else:
# print(f"******************** cuda-num provided")
# self.device = torch.device(myargs.cuda_num if myargs.cuda else "cpu")
# print(f"******************** self.device: {self.device} \n******************** type(self.device): {type(self.device)} ")
# print(f"torch.cuda.is_available(): {torch.cuda.is_available()}")
# CREATE AN ENVIRONMENT
log_dir = None
seed = np.random.randint(0, 10000)
num_envs = 1
env = make_vec_envs(myargs.env_name, seed, num_envs,
myargs.gamma, log_dir, self.device, allow_early_resets=True, num_frame_stack=2, **kwargs)
# We need to use the same statistics for normalization as used in training
vec_norm = utils.get_vec_normalize(env)
all_trajs = []
for traj_idx in range(number_trajs):
traj = []
recurrent_hidden_states = torch.zeros(1,actor_critic.recurrent_hidden_state_size)
masks = torch.zeros(1, 1)
obs = env.reset()
# traj.append([obs,0])
# LOOP FOR ONE TRAJECTORY
transition_counter = 0
while transition_counter <= traj_length:
transition_counter += 1
with torch.no_grad():
value, action, _, recurrent_hidden_states = actor_critic.act(
obs, recurrent_hidden_states, masks, deterministic=False)
# Obser reward and next obs
obs, reward, done, _ = env.step(action)
# traj.append([copy.deepcopy(obs),copy.deepcopy(reward)])
traj.append([obs[0].clone().detach().cpu(), copy.deepcopy(reward)])
# ALL TRAJS ARE PRODUCED AT THIS POINT
all_trajs.append(traj)
env.close()
return all_trajs
def _save_attributes_and_hyperparameters(self):
print ("saving attributes_and_hyperparameters .....")
with open(self.log_dir + "/init_params.txt", "w") as file:
for key in self.init_params:
file.write(f"{key} : {self.init_params[key]} \n" )
args_dict = vars(self.myargs)
with open(self.log_dir +"/args_dict.pkl", "wb") as f:
pickle.dump(args_dict, f)
with open(self.log_dir + "/myargs.txt", "w") as file:
for key in args_dict:
file.write(f"{key} : {args_dict[key]} \n" )
with open(self.log_dir +"/kwargs.pkl", "wb") as f:
pickle.dump(self.kwargs, f)
with open(self.log_dir + "/kwargs.txt", "w") as file:
for key in self.kwargs:
file.write(f"{key} : {self.kwargs[key]} \n" )
def _specify_env_rew_type(self, is_atari):
no_run_no_cntr = False
no_run = False
if is_atari:
no_run_no_cntr, no_run = True, True
elif self.myargs.env_name in ["MountainCar-v0", "Acrobot-v1", "CartPole-v0", "InvertedPendulum-v2", "InvertedDoublePendulum-v2"]:
no_run_no_cntr = True
elif self.myargs.env_name in ["Reacher-v2", "Thrower-v2", "MountainCarContinuous-v0"]:
no_run = True
else:
pass
return no_run, no_run_no_cntr
def _calc_rews_run_cntr_step(self, no_run_no_cntr, no_run, infos):
if no_run_no_cntr:
rews_cntr_step = [0 for item in infos]
rews_run_step = [0 for item in infos]
elif no_run:
rews_cntr_step = [item['reward_cntr'] for item in infos]
rews_run_step = [0 for item in infos]
else:
rews_cntr_step = [item['reward_cntr'] for item in infos]
rews_run_step = [item['reward_run'] for item in infos]
return rews_run_step, rews_cntr_step
def memory_usage_psutil(self):
# return the memory usage in MB
import psutil
process = psutil.Process(os.getpid())
mem = process.memory_info().rss / float(2 ** 20)
return mem
class produce_ranked_trajs(train_sparse_rank):
# ADDED BY <NAME>
# *****************************
"""
Purpose: Reads checkpointed policies and produces a given number of rollouts for each policy
"""
def __init__(self, kwargs, myargs):
if not myargs.seed == -12345:
# seed is provided as command line argument and nothing needs to be done
pass
else:
if os.getenv('SEED'):
myargs.seed = int(os.getenv('SEED'))
else:
raise ValueError('SEED not provided as command line argument or as an enviornment variable')
myargs.save_name = myargs.save_name + f"-s{myargs.seed}"
myargs.saved_models_name = myargs.saved_models_name + f"-s{myargs.seed}"
self.kwargs, self.myargs = kwargs, myargs
# creating the folders
self.saved_models_name = myargs.saved_models_name
trajs_address = os.path.join("./ranked_trajs", myargs.save_name)
self.save_path_new_trajs = os.path.join(trajs_address, "train") # overwrites the self.save_path_new_trajs in the parent class
self.save_path_new_trajs_val = os.path.join(trajs_address, "val")
self.new_trajs_list = []
self.new_trajs_returns_list = []
self.new_trajs_returns_list_val = []
self.new_trajs_last_idx = 0
utils.create_dir(trajs_address)
utils.create_dir(self.save_path_new_trajs)
utils.create_dir(self.save_path_new_trajs_val)
# num_updates = int(myargs.num_env_steps_tr) // myargs.num_steps // myargs.num_processes
# suffix_list = np.arange(0, num_updates, myargs.save_interval)
# names = [myargs.saved_models_name+"_"+str(item)+".pt" for item in suffix_list]
from os import listdir
from os.path import isfile, join
# names.append(myargs.saved_models_name+"_"+str(num_updates-1)+".pt")
policies = []
log_dir = "NONE"
torch.set_num_threads(1)
self.device = myutils.assign_gpu_device(myargs)
np.random.seed(myargs.seed)
saved_models_address = os.path.join("./trained_models", myargs.algo, self.saved_models_name)
names = listdir(saved_models_address)
for name in names:
address = os.path.join(saved_models_address,name)
actor_critic, _ = torch.load(address, map_location=self.device)
policies.append(actor_critic)
# ***************************** END
# if "devv" in myargs.save_name:
# num_skip_policy = 20 # for dev put this 10
# else:
# num_skip_policy = 2
validation_ratio = 10
produced_trajs = []
num_pol_samples = 1
print(f"Loading policies and producing demo trajectories ..... {myargs.save_name}")
for pol_counter, actor_critic in enumerate(policies[0:-1]):
print(f"policy number: {pol_counter}")
actor_critic.to(self.device)
produced_traj = self.produce_trajs_from_policy_sparsified_reward(actor_critic, 1, myargs.sparseness, myargs.traj_length, myargs.env_name, is_random=False)
if pol_counter % validation_ratio == 0 and pol_counter > 0:
self._add_trajs_to_new_trajs_list_hardDrive([produced_traj[0]], 0, False, log_dir, add_only_non_zero_trajs=False, address=self.save_path_new_trajs_val, is_val=True)
else:
self._add_trajs_to_new_trajs_list_hardDrive([produced_traj[0]], 0, False, log_dir, add_only_non_zero_trajs=False, address=self.save_path_new_trajs, is_val=False)
# save the returns
with open(self.save_path_new_trajs + "/new_trajs_returns_list.pkl", "wb") as f:
pickle.dump(self.new_trajs_returns_list, f)
# produced_trajs = []
# print(f"Loading policies and producing validation trajectories ..... {myargs.save_name}")
# for counter, actor_critic in enumerate(policies[1:-1:num_skip_policy]):
# print(f"policy number: {counter}")
# actor_critic.to(self.device)
# produced_traj = self.produce_trajs_from_policy(actor_critic, 1, myargs.traj_length, kwargs, myargs)
# produced_trajs.append(produced_traj[0])
# rew_mean, rew_range, rew_std, rew_mean_new, rew_range_new, rew_std_new = self._add_trajs_to_new_trajs_list_hardDrive(produced_trajs, add_only_non_zero_trajs=False, address=self.save_path_new_trajs_val, is_val=True)
# # save the returns
# with open(self.save_path_new_trajs_val + "/new_trajs_returns_list.pkl", "wb") as f:
# pickle.dump(self.new_trajs_returns_list_val, f)
# for policy_counter, policy in enumerate(policies):
# if policy_counter % num_skip_policy == 0:
# print(f"policy number: {policy_counter}")
# for _ in range(num_pol_samples):
# actor_critic = policy
# actor_critic = actor_critic.to(device)
# traj = []
# # CREATE AN ENVIRONMENT
# seed = np.random.randint(1,100000)
# env = make_vec_envs(myargs.env_name, seed, 1,
# myargs.gamma, log_dir, device, False, num_frame_stack=2, **kwargs)
# # We need to use the same statistics for normalization as used in training
# vec_norm = utils.get_vec_normalize(env)
# if vec_norm is not None:
# vec_norm.eval()
# vec_norm.ob_rms = ob_rms
# recurrent_hidden_states = torch.zeros(1,actor_critic.recurrent_hidden_state_size)
# masks = torch.zeros(1, 1)
# obs = env.reset()
# # LOOP FOR ONE TRAJECTORY
# done = False
# done_penalty = torch.tensor(-1000) # TODO: this should be investigated
# episode_reward = 0
# counter = 0
# while counter <= myargs.traj_length:
# counter += 1
# with torch.no_grad():
# value, action, _, recurrent_hidden_states = actor_critic.act(
# obs, recurrent_hidden_states, masks, deterministic=False)
# # env.render()
# # Obser reward and next obs
# obs, reward, done, info = env.step(action)
# # if done:
# # reward = done_penalty
# traj.append([copy.deepcopy(obs),reward])
# # if 'episode' in info[0].keys():
# # episode_reward = info['episode']['r']
# # ALL TRAJS ARE PRODUCED AT THIS POINT
# all_trajs.append(traj)
# with open(trajs_address + '/all_trajectories', 'wb') as f:
# torch.save(all_trajs, f)
class train_GT_policy(train_sparse_rank):
def __init__(self, kwargs, myargs):
# ADDED BY <NAME>
# ***************************** START
# log_dir = os.path.expanduser(myargs.log_dir) + "/" + myargs.save_name
self.kwargs, self.myargs = kwargs, myargs
self.init_params = []
self.log_dir = myargs.log_dir + "/" + myargs.save_name
self.eval_log_dir = self.log_dir + "_eval"
log_file_name = myargs.env_name
# utils.cleanup_log_dir(log_dir)
# utils.cleanup_log_dir(eval_log_dir)
utils.create_dir(self.log_dir)
utils.create_dir(self.eval_log_dir)
# save_path_policy is for storing the trained model
save_path_policy = os.path.join(myargs.save_dir, myargs.algo, myargs.save_name)
utils.create_dir(save_path_policy)
# ***************************** END
self._save_attributes_and_hyperparameters()
torch.manual_seed(myargs.seed)
torch.cuda.manual_seed_all(myargs.seed)
np.random.seed(myargs.seed)
if myargs.cuda and torch.cuda.is_available() and myargs.cuda_deterministic:
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
torch.set_num_threads(1)
device = myutils.assign_gpu_device(myargs)
envs = make_vec_envs(myargs.env_name, myargs.seed, myargs.num_processes,
myargs.gamma, self.log_dir, device, allow_early_resets=False, num_frame_stack=2, **kwargs)
# envs = gym.make(myargs.env_name, **kwargs)
# envs = VecPyTorch(envs, device)
# The followint block added by <NAME>
# envs = ProcgenEnv(num_envs=myargs.num_processes, env_name="heistpp", **kwargs)
# if len(envs.observation_space.shape) == 3:
# envs = WarpFrame(envs, dict_space_key="rgb")
# if len(envs.observation_space.shape) == 1 and myargs.do_normalize == "True":
# if gamma is None:
# envs = VecNormalize(envs, ret=False)
# else:
# envs = VecNormalize(envs, gamma=gamma)
# envs = VecPyTorch(envs, device, myargs)
if self.myargs.env_name in ["MountainCar-v0", "Reacher-v2", "Acrobot-v1", "Thrower-v2"]:
hidden_size_policy = 10
else:
hidden_size_policy = 64
actor_critic = Policy(
envs.observation_space.shape,
envs.action_space,
device,
base_kwargs={'recurrent': myargs.recurrent_policy, 'hidden_size': hidden_size_policy})
actor_critic.to(device)
if myargs.algo == 'a2c':
agent = algo.A2C_ACKTR(
actor_critic,
myargs.value_loss_coef,
myargs.entropy_coef,
lr=myargs.lr,
eps=myargs.eps,
alpha=myargs.alpha,
max_grad_norm=myargs.max_grad_norm)
elif myargs.algo == 'ppo':
agent = algo.PPO(
actor_critic,
myargs.clip_param,
myargs.ppo_epoch,
myargs.num_mini_batch,
myargs.value_loss_coef,
myargs.entropy_coef,
lr=myargs.lr,
eps=myargs.eps,
max_grad_norm=myargs.max_grad_norm)
elif myargs.algo == 'acktr':
agent = algo.A2C_ACKTR(
actor_critic, myargs.value_loss_coef, myargs.entropy_coef, acktr=True)
if myargs.gail:
assert len(envs.observation_space.shape) == 1
discr = gail.Discriminator(
envs.observation_space.shape[0] + envs.action_space.shape[0], 100,
device)
file_name = os.path.join(
myargs.gail_experts_dir, "trajs_{}.pt".format(
myargs.env_name.split('-')[0].lower()))
expert_dataset = gail.ExpertDataset(
file_name, num_trajectories=4, subsample_frequency=20)
drop_last = len(expert_dataset) > myargs.gail_batch_size
gail_train_loader = torch.utils.data.DataLoader(
dataset=expert_dataset,
batch_size=myargs.gail_batch_size,
shuffle=True,
drop_last=drop_last)
rollouts = RolloutStorage(myargs.num_steps, myargs.num_processes,
envs.observation_space.shape, envs.action_space,
actor_critic.recurrent_hidden_state_size)
obs = envs.reset()
rollouts.obs[0].copy_(obs)
rollouts.to(device)
episode_rews_from_info = deque(maxlen=10)
start = time.time()
num_updates = int(myargs.num_env_steps_tr) // myargs.num_steps // myargs.num_processes
with open(self.log_dir + "/" + log_file_name + ".txt", "w") as file:
file.write("Updates , num timesteps , FPS, number of Last training episodes, dist_entropy, value_loss, action_loss, mean reward, median reward, min reward, max reward \n")
with open(self.eval_log_dir + "/" + log_file_name + "_eval.txt", "w") as file:
file.write("num_episodes, median_reward, max_reward \n")
# UPDATE POLICY *****************************************************
for j in range(num_updates):
if j % 5 == 0 and j != 0:
print (f'update number {j}, ...... {myargs.env_name}, total_time: {time.time()-start}')
if myargs.use_linear_lr_decay:
# decrease learning rate linearly
utils.update_linear_schedule(
agent.optimizer, j, num_updates,
agent.optimizer.lr if myargs.algo == "acktr" else myargs.lr)
for step in range(myargs.num_steps):
# Sample actions
with torch.no_grad():
value, action, action_log_prob, recurrent_hidden_states = actor_critic.act(
rollouts.obs[step], rollouts.recurrent_hidden_states[step],
rollouts.masks[step])
# Obser reward and next obs
if myargs.env_name in ["CartPole-v0", "MountainCar-v0", "Acrobot-v1"]: # Added by <NAME>
# action = action[0]
action_fed = torch.squeeze(action)
else:
action_fed = action
obs, reward, done, infos = envs.step(action_fed)
# reward = torch.zeros((8,1))
# envs.render()
for info in infos:
if 'episode' in info.keys():
episode_rews_from_info.append(info['episode']['r'])
# If done then clean the history of observations.
masks = torch.FloatTensor(
[[0.0] if done_ else [1.0] for done_ in done])
bad_masks = torch.FloatTensor(
[[0.0] if 'bad_transition' in info.keys() else [1.0]
for info in infos])
rollouts.insert(copy.deepcopy(obs), recurrent_hidden_states, action,
action_log_prob, value, reward, masks, bad_masks)
# Update value function at the end of episode
with torch.no_grad():
next_value = actor_critic.get_value(
rollouts.obs[-1], rollouts.recurrent_hidden_states[-1],
rollouts.masks[-1]).detach()
if myargs.gail:
if j >= 10:
envs.venv.eval()
gail_epoch = myargs.gail_epoch
if j < 10:
gail_epoch = 100 # Warm up
for _ in range(gail_epoch):
discr.update(gail_train_loader, rollouts,
utils.get_vec_normalize(envs)._obfilt)
for step in range(myargs.num_steps):
rollouts.rewards[step] = discr.predict_reward(
rollouts.obs[step], rollouts.actions[step], myargs.gamma,
rollouts.masks[step])
rollouts.compute_returns(next_value, myargs.use_gae, myargs.gamma,
myargs.gae_lambda, myargs.use_proper_time_limits)
value_loss, action_loss, dist_entropy = agent.update(rollouts)
rollouts.after_update()
# save policy
if (j % myargs.save_interval == 0 or j == num_updates - 1) and myargs.save_dir != "":
model_save_address = os.path.join(save_path_policy, myargs.save_name + "_" + str(j) + ".pt")
torch.save([
actor_critic,
getattr(utils.get_vec_normalize(envs), 'ob_rms', None)
], model_save_address)
if j % myargs.log_interval == 0 and len(episode_rews_from_info) > 1:
total_num_steps = (j + 1) * myargs.num_processes * myargs.num_steps
end = time.time()
# print(
# "Updates {}, num timesteps {}, FPS {} \n Last {} training episodes: mean/median reward {:.1f}/{:.1f}, min/max reward {:.1f}/{:.1f}\n"
# .format(j, total_num_steps,
# int(total_num_steps / (end - start)),
# len(episode_rews_from_info), np.mean(episode_rews_from_info),
# np.median(episode_rews_from_info), np.min(episode_rews_from_info),
# np.max(episode_rews_from_info), dist_entropy, value_loss,
# action_loss))
# with open(log_dir + "/" + log_file_name + ".txt", "a") as file2:
# file2.write("Updates {}, num timesteps {}, FPS {} \n Last {} training episodes: mean/median reward {:.1f}/{:.1f}, min/max reward {:.1f}/{:.1f}\n"
# .format(j, total_num_steps,
# int(total_num_steps / (end - start)),
# len(episode_rews_from_info), np.mean(episode_rews_from_info),
# np.median(episode_rews_from_info), np.min(episode_rews_from_info),
# np.max(episode_rews_from_info), dist_entropy, value_loss,
# action_loss))
with open(self.log_dir + "/" + log_file_name + ".txt", "a") as file:
file.write(f'{j:>5} {total_num_steps:>8} {int(total_num_steps / (end - start)):>7} {len(episode_rews_from_info):>4}\
{dist_entropy:.10} {value_loss:.10} {action_loss:.10} {np.mean(episode_rews_from_info):.10} \
{np.median(episode_rews_from_info):.10} {np.min(episode_rews_from_info):.10}\
{np.max(episode_rews_from_info):.10} \n')
# if (myargs.eval_interval is not None and len(episode_rewards) > 1
# and j % myargs.eval_interval == 0):
# ob_rms = utils.get_vec_normalize(envs).ob_rms
# evaluate(actor_critic, ob_rms, myargs.env_name, myargs.seed,
# myargs.num_processes, self.eval_log_dir, device)
class train_baseline_sparse_rew(train_sparse_rank):
def __init__(self, kwargs, myargs, init_params):
if myargs.seed == -12345: # this means the seed is not provided by the user in command line arguments, it must be read from enviornment variables
if os.getenv('SEED'):
myargs.seed = int(os.getenv('SEED'))
else:
raise ValueError('SEED not provided as command line argument or as an enviornment variable')
else:
# seed is provided as command line argument and nothing needs to be done
pass
myargs.save_name = myargs.save_name + f"-s{myargs.seed}"
self.kwargs = kwargs
self.myargs = myargs
self.init_params = init_params
self.device = myutils.assign_gpu_device(self.myargs)
# Read ranked trajs
self.log_dir = myargs.log_dir + "/" + myargs.save_name
eval_log_dir = self.log_dir + "_eval"
self.log_file_name = myargs.env_name
# utils.cleanup_log_dir(log_dir)
# utils.cleanup_log_dir(eval_log_dir)
if not myargs.continue_:
utils.create_dir(self.log_dir)
# utils.create_dir(eval_log_dir)
# self.save_path_trained_models is for storing the trained model
self.save_path_trained_models = os.path.join(myargs.save_dir, myargs.algo, myargs.save_name)
if not myargs.continue_:
utils.create_dir(self.save_path_trained_models)
# # Create forlder for tensorboard
# self.writer = SummaryWriter(f'runs/visualization')
torch.manual_seed(myargs.seed)
torch.cuda.manual_seed_all(myargs.seed)
np.random.seed(myargs.seed)
if myargs.cuda and torch.cuda.is_available() and myargs.cuda_deterministic:
torch.backends.cudnn.benchmark = False
torch.backends.cudnn.deterministic = True
torch.set_num_threads(1)
self.envs = make_vec_envs(myargs.env_name, myargs.seed, myargs.num_processes,
myargs.gamma, self.log_dir, self.device, allow_early_resets=False, num_frame_stack=2, **kwargs)
# envs = ProcgenEnv(num_envs=myargs.env_name, env_name="heistpp", **kwargs)
self.is_atari = self.myargs.is_atari
# envs = gym.make(myargs.env_name, **kwargs)
if self.myargs.env_name in ["MountainCar-v0", "Reacher-v2", "Acrobot-v1", "Thrower-v2"]:
hidden_size_policy = 10
else:
hidden_size_policy = 64
if self.myargs.continue_:
# Load the pretrained policy
print(f'Loading policy for continuing run {self.myargs.save_name} .....')
model_save_address_policy = os.path.join(self.save_path_trained_models, self.myargs.save_name + ".pt")
self.actor_critic, ob_rms = torch.load(model_save_address_policy, map_location=self.device)
else:
self.actor_critic = Policy(
self.envs.observation_space.shape,
self.envs.action_space,
self.device,
base_kwargs={'recurrent': myargs.recurrent_policy, 'hidden_size': hidden_size_policy})
self.actor_critic.to(self.device)
if myargs.algo == 'a2c':
self.agent = algo.A2C_ACKTR(
self.actor_critic,
myargs.value_loss_coef,
myargs.entropy_coef,
lr=myargs.lr,
eps=myargs.eps,
alpha=myargs.alpha,
max_grad_norm=myargs.max_grad_norm)
elif myargs.algo == 'ppo':
self.agent = algo.PPO(
self.actor_critic,
myargs.clip_param,
myargs.ppo_epoch,
myargs.num_mini_batch,
myargs.value_loss_coef,
myargs.entropy_coef,
lr=myargs.lr,
eps=myargs.eps,
max_grad_norm=myargs.max_grad_norm)
elif myargs.algo == 'acktr':
self.agent = algo.A2C_ACKTR(
self.actor_critic, myargs.value_loss_coef, myargs.entropy_coef, acktr=True)
self.rollouts = RolloutStorage(myargs.num_steps, myargs.num_processes,
self.envs.observation_space.shape, self.envs.action_space,
self.actor_critic.recurrent_hidden_state_size)
obs = self.envs.reset()
self.rollouts.obs[0].copy_(obs)
self.rollouts.to(self.device)
if not myargs.continue_:
with open(self.log_dir + f"/policy.txt", "w") as file:
file.write("overal_tr_iter_idx, updates , num timesteps , FPS, number of Last training episodes, dist_entropy, value_loss, action_loss, mean reward, median reward, min reward, max reward \n")
self._save_attributes_and_hyperparameters()
def train(self):
# UPDATE POLICY *****************************************************
self.update_policy(pretrain_or_train="train")
def update_policy(self, pretrain_or_train):
if self.myargs.continue_:
# read the last
policy_stats_loaded = np.loadtxt(f"{self.log_dir}/policy.txt", skiprows=1)
last_j = int(policy_stats_loaded[-1,1])
del policy_stats_loaded
first_time_min_good_trajs = True
else:
last_j = 0
first_time_min_good_trajs = False
kwargs, myargs = self.kwargs, self.myargs
episode_rews_from_info = deque(maxlen=myargs.num_processes)
self.unrolled_trajs_all = []
start = time.time()
sparseness = myargs.sparseness
num_updates = int(myargs.num_env_steps_tr) // myargs.num_steps // myargs.num_processes
vel_thresh = 0
no_run, no_run_no_cntr = self._specify_env_rew_type(self.is_atari)
for j in range(last_j+1, last_j+1+num_updates):
# at each j, each of the policies will be unrolled for up to myargs.num_steps steps, or until they get to an
# absorbing state (like failure)
if j % 5 == 0:
total_time = time.time() - start
print(f"Policy update {myargs.save_name}: {j}, total_time: {total_time}")
if myargs.use_linear_lr_decay:
# decrease learning rate linearly
utils.update_linear_schedule(
self.agent.optimizer, j, num_updates,
self.agent.optimizer.lr if myargs.algo == "acktr" else myargs.lr)
num_succ_run_forward = np.zeros(myargs.num_processes)
num_succ_not_done = np.zeros(myargs.num_processes)
return_GT_episode_cntr = np.zeros(myargs.num_processes)
return_GT_episode_run = np.zeros(myargs.num_processes)
return_sparse_episode = np.zeros(myargs.num_processes)
return_sparse_plus_cntr_episode = np.zeros(myargs.num_processes)
return_GT_episode_total = np.zeros(myargs.num_processes)
displacement_forward_till_rew = np.zeros(myargs.num_processes)
steps_till_rew = | np.zeros(myargs.num_processes) | numpy.zeros |
#!/usr/bin/env python2
"""
Script for running an FCIQMC calculation.
For help, run python fri.py -h
"""
import numpy
import argparse
from resipy import fci_utils
from resipy import fci_c_utils
from resipy import sparse_vector
from resipy import compress_utils
from resipy import io_utils
from resipy import near_uniform
def main():
args = _parse_args()
_describe_args(args)
h_core, eris, symm, hf_en = io_utils.read_in_hf(args.hf_path, args.frozen)
# Generate lookup tables for later use
byte_nums, byte_idx = fci_utils.gen_byte_table()
symm_lookup = fci_utils.gen_symm_lookup(8, symm)
n_orb = symm.shape[0]
hf_det = fci_utils.gen_hf_bitstring(n_orb, args.n_elec - args.frozen)
rngen_ptrs = near_uniform.initialize_mt(args.procs)
# Initialize solution vector
if args.restart:
ini_idx = numpy.load(args.restart + 'vec_idx.npy')
ini_val = numpy.load(args.restart + 'vec_val.npy').astype(numpy.float64)
en_shift = numpy.genfromtxt(args.restart + 'S.txt')[-1]
cmp_idx, cmp_vals = compress_utils.fri_1D(ini_val, args.sparsity)
ini_idx = ini_idx[cmp_idx]
ini_val = cmp_vals
last_norm = numpy.abs(cmp_vals).sum()
else:
ini_idx = numpy.array([hf_det], dtype=numpy.int64)
ini_val = numpy.array([1.])
# energy shift for controlling normalization
en_shift = args.initial_shift
last_norm = 1.
one_norm = last_norm
sol_vec = sparse_vector.SparseVector(ini_idx, ini_val)
occ_orbs = fci_c_utils.gen_orb_lists(sol_vec.indices, args.n_elec - args.frozen,
byte_nums, byte_idx)
results = io_utils.setup_results(args.result_int, args.result_dir,
args.rayleigh, args.interval, args.sampl_mode)
if args.sampl_mode != "all":
n_doub_ref = fci_utils.count_doubex(occ_orbs[0], symm, symm_lookup)
n_sing_ref = fci_utils.count_singex(
hf_det, occ_orbs[0], symm, symm_lookup)
num_hf = n_sing_ref + n_doub_ref
p_doub = n_doub_ref * 1.0 / num_hf
if args.sampl_mode != "all" and args.dist == "heat-bath_PP":
from resipy import heat_bath
occ1_probs, occ2_probs, exch_probs = heat_bath.set_up(args.frozen, eris)
if (args.sampl_mode == "fri" or args.sampl_mode == "fri_strat") and args.dist == "near_uniform":
from resipy import fri_near_uni
# Elements in the HF column of FCI matrix
hf_col = fci_utils.gen_hf_ex(
hf_det, occ_orbs[0], n_orb, symm, eris, args.frozen)
for iterat in range(args.max_iter):
mat_eval = 0
if args.sampl_mode == "all":
# Choose all double excitations
doub_orbs, doub_idx = fci_c_utils.all_doub_ex(
sol_vec.indices, occ_orbs, symm)
doub_probs = numpy.ones_like(doub_idx, dtype=numpy.float64)
# Choose all single excitations
sing_orbs, sing_idx = fci_c_utils.all_sing_ex(
sol_vec.indices, occ_orbs, symm)
sing_probs = numpy.ones_like(sing_idx, dtype=numpy.float64)
mat_eval = doub_probs.shape[0] + sing_probs.shape[0]
elif args.sampl_mode == "multinomial":
n_col, = compress_utils.sys_resample(numpy.abs(sol_vec.values) / one_norm, args.H_sample -
sol_vec.values.shape[0], ret_counts=True)
n_col += 1
n_doub_col, n_sing_col = near_uniform.bin_n_sing_doub(
n_col, p_doub)
if args.sampl_mode == "multinomial":
# Sample single excitations
sing_orbs, sing_probs, sing_idx = near_uniform.sing_multin(
sol_vec.indices, occ_orbs, symm, symm_lookup, n_sing_col, rngen_ptrs)
sing_probs *= (1 - p_doub) * n_col[sing_idx]
if args.dist == "near_uniform" and args.sampl_mode == "multinomial":
# Sample double excitations
doub_orbs, doub_probs, doub_idx = near_uniform.doub_multin(
sol_vec.indices, occ_orbs, symm, symm_lookup, n_doub_col, rngen_ptrs)
doub_probs *= p_doub * n_col[doub_idx]
elif args.dist == "near_uniform" and args.sampl_mode == "fri":
# Compress both excitations
doub_orbs, doub_probs, doub_idx, sing_orbs, sing_probs, sing_idx = fri_near_uni.cmp_hier(sol_vec, args.H_sample, p_doub,
occ_orbs, symm, symm_lookup)
elif args.dist == "near_uniform" and args.sampl_mode == "fri_strat":
# Compress both excitations
doub_orbs, doub_probs, doub_idx, sing_orbs, sing_probs, sing_idx = fri_near_uni.cmp_hier_strat(sol_vec, args.H_sample, p_doub,
occ_orbs, symm, symm_lookup,
num_hf, rngen_ptrs)
elif args.dist == "heat-bath_PP" and args.sampl_mode == "multinomial":
# Sample double excitations
doub_orbs, doub_probs, doub_idx = heat_bath.doub_multin(
occ1_probs, occ2_probs, exch_probs, sol_vec.indices, occ_orbs, symm, symm_lookup, n_doub_col, rngen_ptrs)
doub_probs *= p_doub * n_col[doub_idx]
elif args.dist == "heat-bath_PP" and args.sampl_mode == "fri":
doub_orbs, doub_probs, doub_idx, sing_orbs, sing_probs, sing_idx = heat_bath.fri_comp(
sol_vec, args.H_sample, occ1_probs, occ2_probs, exch_probs, p_doub, occ_orbs, symm, symm_lookup)
doub_matrel = fci_c_utils.doub_matr_el_nosgn(
doub_orbs, eris, args.frozen)
# Retain nonzero elements
doub_nonz = doub_matrel != 0
doub_idx = doub_idx[doub_nonz]
doub_orbs = doub_orbs[doub_nonz]
doub_matrel = doub_matrel[doub_nonz]
doub_probs = doub_probs[doub_nonz]
# Calculate determinants and matrix elements
doub_dets, doub_signs = fci_utils.doub_dets_parity(
sol_vec.indices[doub_idx], doub_orbs)
# origin_idx = numpy.searchsorted(deter_dets, sol_vec.indices[doub_idx])
doub_matrel *= args.epsilon / doub_probs * \
doub_signs * -sol_vec.values[doub_idx]
# Start forming next iterate
spawn_dets = doub_dets
spawn_vals = doub_matrel
sing_dets, sing_matrel = fci_c_utils.single_dets_matrel_nosgn(
sol_vec.indices[sing_idx], sing_orbs, eris, h_core, occ_orbs[sing_idx], args.frozen)
# Retain nonzero elements
sing_nonz = sing_matrel != 0
sing_idx = sing_idx[sing_nonz]
sing_dets = sing_dets[sing_nonz]
sing_matrel = sing_matrel[sing_nonz]
sing_probs = sing_probs[sing_nonz]
sing_orbs = sing_orbs[sing_nonz]
# Calculate determinants and matrix elements
sing_signs = fci_c_utils.excite_signs(sing_orbs[:, 1], sing_orbs[:, 0], sing_dets)
sing_matrel *= args.epsilon / sing_probs * -sol_vec.values[sing_idx] * sing_signs
# Add to next iterate
spawn_dets = numpy.append(spawn_dets, sing_dets)
spawn_vals = numpy.append(spawn_vals, sing_matrel)
# Diagonal matrix elements
diag_matrel = fci_c_utils.diag_matrel(
occ_orbs, h_core, eris, args.frozen) - hf_en
diag_vals = 1 - args.epsilon * (diag_matrel - en_shift)
diag_vals *= sol_vec.values
next_vec = sparse_vector.SparseVector(sol_vec.indices, diag_vals)
# Add vectors in sparse format
next_vec.add(spawn_dets, spawn_vals)
one_norm = next_vec.one_norm()
if (iterat + 1) % args.interval == 0:
en_shift -= args.damping / args.interval / args.epsilon * | numpy.log(one_norm / last_norm) | numpy.log |
"""
Tools for creating and manipulating 1,2, and 3D meshes.
.. inheritance-diagram:: proteus.MeshTools
:parts: 1
"""
from __future__ import print_function
from __future__ import absolute_import
from __future__ import division
from builtins import input
from builtins import zip
from builtins import next
from builtins import str
from builtins import range
from past.utils import old_div
from builtins import object
from .EGeometry import *
import numpy as np
import array
import h5py
import os
from xml.etree import ElementTree as ET
from .Archiver import *
from .LinearAlgebraTools import ParVec_petsc4py
from .Profiling import logEvent,memory
from . import Domain
from . import Comm
from subprocess import check_call, check_output
class Node(object):
"""A numbered point in 3D Euclidean space
:ivar N: node number
:ivar p: Euclidean coordinates
Comparison operators and a hash value are defined using the 3-tuple of
coordinates. This allows using Node objects and tuples of node objects as
dictionary keys, but in that use case one should be careful not to modify
the node coordinates.
>>> n0 = Node(nodeNumber=0,x=0.0,y=0.0,z=0.0)
>>> n1 = Node(nodeNumber=1,x=1.0,y=1.0,z=1.0)
>>> n1 >= n0
True
"""
xUnitVector = EVec(1.0,0.0,0.0)
yUnitVector = EVec(0.0,1.0,0.0)
zUnitVector = EVec(0.0,0.0,1.0)
def __init__(self,nodeNumber=0,x=0.0,y=0.0,z=0.0):
self.N=nodeNumber
self.p=EVec(x,y,z)
self.basis = [Node.xUnitVector,
Node.yUnitVector,
Node.zUnitVector]
self.elementBoundaries=[]
self.barycenter = self.p
self.length = 1.0
self.diameter=self.length
self.innerDiameter=self.length
self.hasGeometricInfo = True
self.unitNormal = Node.xUnitVector
self.nodes=(self,)
def computeGeometricInfo(self):
pass
def __str__(self):
return str(self.N)+":"+str(self.p)
def __hash__(self):
return hash((self.p[X],self.p[Y],self.p[Z]))
def __lt__(self,other):
return (self.p[X],self.p[Y],self.p[Z]) < \
(other.p[X],other.p[Y],other.p[Z])
def __le__(self,other):
return (self.p[X],self.p[Y],self.p[Z]) <= \
(other.p[X],other.p[Y],other.p[Z])
def __eq__(self,other):
return (self.p[X],self.p[Y],self.p[Z]) == \
(other.p[X],other.p[Y],other.p[Z])
def __ne__(self,other):
return (self.p[X],self.p[Y],self.p[Z]) != \
(other.p[X],other.p[Y],other.p[Z])
def __gt__(self,other):
return (self.p[X],self.p[Y],self.p[Z]) > \
(other.p[X],other.p[Y],other.p[Z])
def __ge__(self,other):
return (self.p[X],self.p[Y],self.p[Z]) >= \
(other.p[X],other.p[Y],other.p[Z])
class Element(object):
"""An numbered polytope in R^n
:ivar N: element number
:ivar nodes: sorted tuple of nodes defining the polytope
The nodes data member can be used as a dictionary key for the polytope as
long as the nodes aren't later modified.
"""
def __init__(self,elementNumber=0,nodes=[]):
self.N = elementNumber
nodeList = nodes
nodeList.sort()
self.nodes = tuple(nodeList)
self.elementBoundaries=[]
class Edge(Element):
xUnitVector = EVec(1.0,1.0,0.0)
yUnitVector = EVec(0.0,1.0,0.0)
zUnitVector = EVec(0.0,0.0,1.0)
"""
1D Element--a line connecting two Nodes
The nodes are stored as a lexicographically sorted node list.
"""
def __init__(self,edgeNumber=0,nodes=[]):
#Element.__init__(self,edgeNumber,nodes)
#inline Element.__init__
self.N = edgeNumber
nodeList = nodes
nodeList.sort()
self.nodes = tuple(nodeList)
#self.nodes=nodes
#self.nodes=nodes[:]
#self.nodes.sort()
self.elementBoundaries = [self.nodes[1],self.nodes[0]]
self.hasGeometricInfo = False
def computeGeometricInfo(self):
if not self.hasGeometricInfo:
self.basis = [self.nodes[1].p - self.nodes[0].p,
Edge.yUnitVector,
Edge.zUnitVector]
self.barycenter = old_div((self.nodes[0].p + self.nodes[1].p),2.0)
self.length = enorm(self.basis[0])
self.normal = EVec(-self.basis[0][Y], self.basis[0][X],0.0)
norm = enorm(self.normal)
if norm:
self.unitNormal = old_div(self.normal,norm)
else:
#in 3D edge normals don't make sense in general so above
#may divide by zero if edge has zero projection onto x-y plane
self.normal = EVec(0.0, -self.basis[0][Z], self.basis[0][Y])
self.unitNormal = old_div(self.normal,enorm(self.normal))
self.diameter=self.length
self.innerDiameter = self.length
self.hasGeometricInfo = True
self.nodeUnitNormalList=[]
self.nodeUnitNormalList.append(old_div(-self.basis[0],self.length))
self.nodeUnitNormalList.append(old_div(self.basis[0],self.length))
self.elementBoundaryUnitNormalList=self.nodeUnitNormalList
self.elementBoundaryJacobianList=[Edge.xUnitVector,Edge.xUnitVector]
def getNodesFromEdges(edges):
"""Extract the subset of nodes from a list of edges."""
nodes={}
for e in edges:
for n in e.nodes:
nodes[n]=n
return list(nodes.values())
class Polygon(Element):
"""An abstract 2D element--a closed set of Edges connecting a set of Nodes.
The nodes and edges are stored as lexicographically sorted lists."""
def __init__(self,polygonNumber=0,nodes=[]):
Element.__init__(self,polygonNumber,nodes)
#the edges have to be set up by the specific polygon
self.edges=[]
def getEdgesFromPolygons(polygons):
"""Extract the subset of edges from a list of polygons"""
edges={}
for p in polygons:
for e in p.edges:
edges[e.nodes] = e
return list(edges.values())
class Triangle(Polygon):
"""A 2D triangular element"""
edgeMap = {(1,2):0,(0,2):1,(0,1):2}
zUnitVector = EVec(0.0,0.0,1.0)
def __init__(self,triangleNumber=0,nodes=[],edgeDict=None):
#Polygon.__init__(self,triangleNumber,nodes)
#inline
self.edges=[]
#Element.__init__
#inline
self.N = triangleNumber
nodeList = nodes
nodeList.sort()
self.nodes = tuple(nodeList)
#self.nodes=nodes[:]
#self.nodes.sort()
#
edgeNodeList = [(self.nodes[1],self.nodes[2]),
(self.nodes[0],self.nodes[2]),
(self.nodes[0],self.nodes[1])]
if edgeDict is None:
self.edges = [Edge(eN,list(edgeNodes)) for \
eN,edgeNodes in enumerate(edgeNodeList)]
else:
self.edges = [edgeDict[edgeNodes] for edgeNodes in edgeNodeList]
self.hasGeometricInfo=False
self.elementBoundaries=self.edges
def computeGeometricInfo(self):
if not self.hasGeometricInfo:
self.barycenter = old_div((self.nodes[0].p +
self.nodes[1].p +
self.nodes[2].p),3.0)
self.basis = [ n.p - self.nodes[0].p for n in self.nodes[1:]]
self.basis.append(Triangle.zUnitVector)
self.linearMap = ETen(self.basis[0],self.basis[1],self.basis[2])
self.normal = ecross(self.basis[0],self.basis[1])
normNormal = enorm(self.normal)
self.unitNormal = old_div(self.normal,normNormal)
self.area = 0.5*normNormal
for e in self.edges: e.computeGeometricInfo()
self.diameter = max([e.length for e in self.edges])
self.innerDiameter = 4.0*self.area/sum(
[e.length for e in self.edges])
self.edgeUnitNormalList=[]
for nNt,eN in Triangle.edgeMap.items():
unitNormal = self.edges[eN].unitNormal
if edot(unitNormal,self.nodes[nNt[0]].p - self.nodes[eN].p) < 0:
unitNormal*=-1.0
self.edgeUnitNormalList.append(unitNormal)
self.elementBoundaryUnitNormalList = self.edgeUnitNormalList
self.hasGeometricInfo=True
class Quadrilateral(Polygon):
"""A 2D quadrilateral element"""
def __init__(self,quadrilateralNumber=0,edges=[],simple=True):
Polygon.__init__(self,quadrilateralNumber)
self.edges = edges
nodeList = getNodesFromEdges(self.edges)
nodeList = self.sortNodes(nodeList)
self.nodes = tuple(nodeList)
self.hasGeometricInfo = False
self.elementBoundaries = self.edges
# This boolean flags whether the quadrilateral is simple
# (eg. a rectangle). Certain features are more difficult
# to implement if this is not the case.
self.simple = True
def sortNodes(self,nodeList):
newList = [None] * 4
coordinate_list = [1,1,1]
# initialize coordinate mins and maxs
xMin = nodeList[0].p[X]
xMax = nodeList[0].p[X]
yMin = nodeList[0].p[Y]
yMax = nodeList[0].p[Y]
zMin = nodeList[0].p[Z]
zMax = nodeList[0].p[Z]
for node in nodeList:
if xMin > node.p[X]:
xMin = node.p[X]
if xMax < node.p[X]:
xMax = node.p[X]
if yMin > node.p[Y]:
yMin = node.p[Y]
if yMax < node.p[Y]:
yMax = node.p[Y]
if zMin > node.p[Z]:
zMin = node.p[Z]
if zMax < node.p[Z]:
zMax = node.p[Z]
# indentify degenerate coordinate space.
# NOTE - this is not entirely accurate, but assumes
# 2D quadrilateral objects are orthogonal to one of
# the cononical coordinate axes
if xMin==xMax:
coordinate_list[0] = 0
if yMin==yMax:
coordinate_list[1] = 0
if zMin==zMax:
coordinate_list[2] = 0
if sum(coordinate_list) !=2:
assert 0, 'Invalid 2D quadrilateral object'
for i, t in enumerate(coordinate_list):
if t == 0:
case = i
# x is degenerate variable
if case == 0:
var1 = 1 # y marked as first node
var2 = 2 # z marked as second
var1_min = yMin
var1_max = yMax
var2_min = zMin
var2_max = zMax
# y is degenerate variable
elif case == 1:
var1 = 0 # x marked as first node
var2 = 2 # z marked as second
var1_min = xMin
var1_max = xMax
var2_min = zMin
var2_max = zMax
# z is degenerate variable
elif case == 2:
var1 = 0 # x marked as first node
var2 = 1 # y marked as second
var1_min = xMin
var1_max = xMax
var2_min = yMin
var2_max = yMax
else:
assert 0, 'Invalide Quadrilateral Mesh Case'
for node in nodeList:
if node.p[var1]==var1_min and node.p[var2]==var2_min:
newList[0] = node
elif node.p[var1]==var1_min and node.p[var2]==var2_max:
newList[1] = node
elif node.p[var1]==var1_max and node.p[var2]==var2_max:
newList[2] = node
elif node.p[var1]==var1_max and node.p[var2]==var2_min:
newList[3] = node
for i,item in enumerate(newList):
if not newList[i]:
assert 0,'Quadrialteral Mesh Generation Error '+str(newList)+" i = "+str(i)
return newList
def computeGeometricInfo(self):
if not self.hasGeometricInfo:
for e in self.edges: e.computeGeometricInfo()
#the nodes must lie in a plane
#use triangles to compute area
#grab one triangle
t0 = Triangle(0,list(self.nodes[0:3]))
t0.computeGeometricInfo()
#find the nodes that lie on the new edge,diagonal0
for et in t0.edges:
edgeIsNew=True
for e in self.edges:
if e.nodes == et.nodes:
edgeIsNew=False
if edgeIsNew:
break
diagonal0=et
t1 = Triangle(0,[self.nodes[3],
diagonal0.nodes[0],
diagonal0.nodes[1]])
t1.computeGeometricInfo()
#get normal from one of the triangles
self.unitNormal = t0.unitNormal
self.area = t0.area + t1.area
#find the long diagonal
diagonalNode=0
for n in self.nodes[0:3]:
if n != diagonal0.nodes[0] and n != diagonal0.nodes[1]:
diagonalNode=n
break;
diagonal1 = Edge(0,[n,self.nodes[3]])
diagonal1.computeGeometricInfo()
self.diameter = max(diagonal1.length,diagonal0.length)
self.innerDiameter = 4.0*self.area/sum(
[e.length for e in self.edges])
# Calculate the coordinate of a simple quad
if self.simple==True:
self.xmin = self.nodes[0].p[X]
self.ymin = self.nodes[0].p[Y]
self.xmax = self.nodes[0].p[X]
self.ymax = self.nodes[0].p[Y]
for node in self.nodes:
if node.p[X] < self.xmin:
self.xmin = node.p[X]
elif node.p[X] > self.xmax:
self.xmax = node.p[X]
else:
pass
if node.p[Y] < self.ymin:
self.ymin = node.p[Y]
elif node.p[Y] > self.ymax:
self.ymax = node.p[Y]
else:
pass
self.xmid = old_div((self.xmin+self.xmax),2.)
self.ymid = old_div((self.ymin+self.ymax),2.)
self.zmid = 0.
class Polyhedron(Element):
"""
An abstract 3D Element--a closed set of Polygons connecting a set
of Edges.
The nodes and edges are stored as lexicographically sorted lists.
"""
def __init__(self,polyhedronNumber=0,nodes=[]):
Element.__init__(self,polyhedronNumber,nodes)
self.edges=[]
self.polygons=[]
def __cmp__(self,other):
return compareNodes(self.nodes,other.nodes)
class Tetrahedron(Polyhedron):
"""A 3D tetrahedral element"""
triangleMap = {(1,2,3):0,(0,2,3):1,(0,1,3):2,(0,1,2):3}
edgeMap = {(0,1): 0,
(0,2): 1,
(0,3): 2,
(1,2): 3,
(1,3): 4,
(2,3): 5}
def __init__(self,tetrahedronNumber,nodes,edgeDict=None,triangleDict=None):
#Polyhedron.__init__(self,tetrahedronNumber,nodes)
#inline
#Element.__init__
#inline
self.N = tetrahedronNumber
nodeList = nodes
nodeList.sort()
self.nodes = tuple(nodeList)
#self.nodes=nodes[:]
#self.nodes.sort()
#
triangleNodeList = [(self.nodes[1],
self.nodes[2],
self.nodes[3]),
(self.nodes[0],
self.nodes[2],
self.nodes[3]),
(self.nodes[0],
self.nodes[1],
self.nodes[3]),
(self.nodes[0],
self.nodes[1],
self.nodes[2])]
if triangleDict is None:
self.triangles = [Triangle(triangleNumber=tN,
nodes=list(triangleNodes))
for tN,triangleNodes in
enumerate(triangleNodeList)]
else:
self.triangles = [triangleDict[triangleNodes] for triangleNodes in
triangleNodeList]
self.polygons=self.triangles
edgeNodeList = [(self.nodes[0],self.nodes[1]),
(self.nodes[0],self.nodes[2]),
(self.nodes[0],self.nodes[3]),
(self.nodes[1],self.nodes[2]),
(self.nodes[1],self.nodes[3]),
(self.nodes[2],self.nodes[3])]
if edgeDict is None:
self.edges = [Edge(edgeNumber=eN,nodes=list(edgeNodes)) for
eN,edgeNodes in enumerate(edgeNodeList)]
else:
self.edges = [edgeDict[edgeNodes] for edgeNodes in edgeNodeList]
self.hasGeometricInfo=False
self.elementBoundaries = self.triangles
def computeGeometricInfo(self):
if not self.hasGeometricInfo:
for t in self.triangles: t.computeGeometricInfo()
self.barycenter =old_div((self.nodes[0].p +
self.nodes[1].p +
self.nodes[2].p +
self.nodes[3].p),4.0)
self.basis = [n.p - self.nodes[0].p for n in self.nodes[1:]]
self.linearMap = ETen(self.basis[0],self.basis[1],self.basis[2])
self.volume = old_div(abs(edet(self.linearMap)),6.0)
self.diameter = max([t.diameter for t in self.triangles])
#Zhang's formula for rho=innerDiameter of a simplex
self.innerDiameter = 6.0*self.volume/sum([t.area for t in
self.triangles])
self.triangleUnitNormalList=[]
for nNt,tN in Tetrahedron.triangleMap.items():
unitNormal = self.triangles[tN].unitNormal
if edot(unitNormal,self.nodes[nNt[0]].p - self.nodes[tN].p) < 0:
unitNormal *= -1.0
self.triangleUnitNormalList.append(unitNormal)
self.elementBoundaryUnitNormalList = self.triangleUnitNormalList
self.hasGeometricInfo=True
class Hexahedron(Polyhedron):
"""A 3D hexahedral element"""
def __init__(self,HN,quadrilaterals):
Polyhedron.__init__(self,HN)
self.N = HN
self.quadrilaterals = quadrilaterals
self.polygons = self.quadrilaterals
self.edges = getEdgesFromPolygons(quadrilaterals)
#self.nodes = getNodesFromEdges(self.edges)
#self.nodes.sort()
nodeList = getNodesFromEdges(self.edges)
nodeList.sort()
self.nodes = tuple(nodeList)
self.hasGeometricInfo=False
self.elementBoundaries = self.quadrilaterals
#todo add enum34 and replace with real Python enum
class MeshParallelPartitioningTypes(object):
"""
fake an enum for parallel partitioning options
"""
element = 0 ; node = 1
class Mesh(object):
"""A partition of a domain in R^n into elements.
This is the base class for meshes. Contains routines for
plotting the edges of the mesh in Matlab
Attributes
----------
elementBoundariesArray : array type
This array lists the global edge number associated with every
edge or face of an element.
"""
#cek adding parallel support
def __init__(self):
#array interface
self.nSubdomains_global=1
self.sN = 0
#node coordinates indexed by node number
self.nNodes_global=0
self.nNodes_subdomain=0
self.nodeArray=None
self.nodeVelocityArray=None
self.nNodes_element=0
#element node numbers, indexed by element number
self.nElements_global=0
self.nElements_proc=0
self.elementNodesArray=None
self.max_nElements_node=0
self.nElements_node=None #mwf warning not calculated in buildPythonFromC
self.nodeElementsArray=None
self.nodeElementOffsets=None
#element boundary numbers, indexed by element number
self.nElementBoundaries_element=0
self.elementBoundariesArray=None
#element numbers, indexed by element boundary number and left(0) and right(1) element
self.nElementBoundaries_global=0
self.elementBoundaryElementsArray=None
#local element boundary numbers, indexed by element boundary number and left(0) and right(1) element
self.elementBoundaryLocalElementBoundariesArray=None
#neighboring element numbers, indexed by local element boundary number
self.elementNeighborsArray=None
#node numbers, indexed by element boundary number
self.elementBoundaryNodesArray=None
#element boundary numbers, indexed by interior/exterior
#element boundary number
self.interiorElementBoundariesArray=None
self.nInteriorElementBoundaries_global=0
self.exteriorElementBoundariesArray=None
self.nExteriorElementBoundaries_global=0
#edge node numbers, indexed by edge number
self.nEdges_global=0
self.edgeNodesArray=None
self.nodeStarArray=None
self.nodeStarOffsets=None
self.h=0.0
self.hMin=0.0
self.hasGeometricInfo=False
self.boundaryMesh=None
#physical coordinates of element barycenters and elementBoundary barycenters
self.elementBarycentersArray=None
self.elementBoundaryBarycentersArray=None
self.nodeDiametersArray=None
self.nodeSupportArray=None
#unique labels for classes of elements, elementBoundaries, nodes,
self.elementMaterialTypes=None
self.elementBoundaryMaterialTypes=None
self.nodeMaterialTypes=None
#parallel stuff
self.nElements_owned=self.nElements_global
self.nNodes_owned=self.nNodes_global
self.nElementBoundaries_owned=self.nElementBoundaries_global
self.nEdges_owned=self.nEdges_global
self.elementOffsets_subdomain_owned=[0,self.nElements_global]
self.elementNumbering_subdomain2global=np.arange(self.nElements_global,dtype='i')
self.nodeOffsets_subdomain_owned=[0,self.nNodes_global]
self.nodeNumbering_subdomain2global=np.arange(self.nNodes_global,dtype='i')
self.elementBoundaryOffsets_subdomain_owned=[0,self.nElementBoundaries_global]
self.elementBoundaryNumbering_subdomain2global=np.arange(self.nElementBoundaries_global,dtype='i')
self.edgeOffsets_subdomain_owned=[0,self.nEdges_global]
self.edgeNumbering_subdomain2global=np.arange(self.nEdges_global,dtype='i')
self.subdomainMesh=self
self.globalMesh = None
self.arGridCollection=None
self.arGrid=None
self.nLayersOfOverlap = None
self.parallelPartitioningType = MeshParallelPartitioningTypes.element
def partitionMesh(self,nLayersOfOverlap=1,parallelPartitioningType=MeshParallelPartitioningTypes.node):
from . import cmeshTools
from . import Comm
from . import cpartitioning
comm = Comm.get()
self.comm=comm
logEvent(memory("partitionMesh 1","MeshTools"),level=4)
logEvent("Partitioning mesh among %d processors using partitioningType = %d" % (comm.size(),parallelPartitioningType))
self.subdomainMesh=self.__class__()
self.subdomainMesh.globalMesh = self
self.subdomainMesh.cmesh=cmeshTools.CMesh()
self.nLayersOfOverlap = nLayersOfOverlap; self.parallelPartitioningType = parallelPartitioningType
logEvent(memory("partitionMesh 2","MeshTools"),level=4)
if parallelPartitioningType == MeshParallelPartitioningTypes.node:
#mwf for now always gives 1 layer of overlap
(self.elementOffsets_subdomain_owned,
self.elementNumbering_subdomain2global,
self.nodeOffsets_subdomain_owned,
self.nodeNumbering_subdomain2global,
self.elementBoundaryOffsets_subdomain_owned,
self.elementBoundaryNumbering_subdomain2global,
self.edgeOffsets_subdomain_owned,
self.edgeNumbering_subdomain2global) = cpartitioning.partitionNodes(comm.comm.tompi4py(),
nLayersOfOverlap,
self.cmesh,
self.subdomainMesh.cmesh)
else:
(self.elementOffsets_subdomain_owned,
self.elementNumbering_subdomain2global,
self.nodeOffsets_subdomain_owned,
self.nodeNumbering_subdomain2global,
self.elementBoundaryOffsets_subdomain_owned,
self.elementBoundaryNumbering_subdomain2global,
self.edgeOffsets_subdomain_owned,
self.edgeNumbering_subdomain2global) = cpartitioning.partitionElements(comm.comm.tompi4py(),
nLayersOfOverlap,
self.cmesh,
self.subdomainMesh.cmesh)
#
logEvent(memory("partitionMesh 3","MeshTools"),level=4)
self.subdomainMesh.buildFromC(self.subdomainMesh.cmesh)
self.subdomainMesh.nElements_owned = self.elementOffsets_subdomain_owned[comm.rank()+1] - self.elementOffsets_subdomain_owned[comm.rank()]
self.subdomainMesh.nNodes_owned = self.nodeOffsets_subdomain_owned[comm.rank()+1] - self.nodeOffsets_subdomain_owned[comm.rank()]
self.subdomainMesh.nElementBoundaries_owned = self.elementBoundaryOffsets_subdomain_owned[comm.rank()+1] - self.elementBoundaryOffsets_subdomain_owned[comm.rank()]
self.subdomainMesh.nEdges_owned = self.edgeOffsets_subdomain_owned[comm.rank()+1] - self.edgeOffsets_subdomain_owned[comm.rank()]
comm.barrier()
logEvent(memory("partitionMesh 4","MeshTools"),level=4)
logEvent("Number of Subdomain Elements Owned= "+str(self.subdomainMesh.nElements_owned))
logEvent("Number of Subdomain Elements = "+str(self.subdomainMesh.nElements_global))
logEvent("Number of Subdomain Nodes Owned= "+str(self.subdomainMesh.nNodes_owned))
logEvent("Number of Subdomain Nodes = "+str(self.subdomainMesh.nNodes_global))
logEvent("Number of Subdomain elementBoundaries Owned= "+str(self.subdomainMesh.nElementBoundaries_owned))
logEvent("Number of Subdomain elementBoundaries = "+str(self.subdomainMesh.nElementBoundaries_global))
logEvent("Number of Subdomain Edges Owned= "+str(self.subdomainMesh.nEdges_owned))
logEvent("Number of Subdomain Edges = "+str(self.subdomainMesh.nEdges_global))
comm.barrier()
logEvent("Finished partitioning")
par_nodeDiametersArray = ParVec_petsc4py(self.subdomainMesh.nodeDiametersArray,
bs=1,
n=self.subdomainMesh.nNodes_owned,
N=self.nNodes_global,
nghosts=self.subdomainMesh.nNodes_global - self.subdomainMesh.nNodes_owned,
subdomain2global=self.nodeNumbering_subdomain2global)
par_nodeDiametersArray.scatter_forward_insert()
# comm.beginSequential()
# from Profiling import memory
# memory()
# logEvent(memory("Partitioning Mesh","Mesh"),level=1)
# del self.cmesh
# #cmeshTools.deleteMeshDataStructures(self.cmesh)
# logEvent(memory("Without global mesh","Mesh"),level=1)
# comm.endSequential()
def partitionMeshFromFiles(self,filebase,base,nLayersOfOverlap=1,parallelPartitioningType=MeshParallelPartitioningTypes.node):
from . import cmeshTools
from . import Comm
from . import cpartitioning
comm = Comm.get()
self.comm=comm
logEvent(memory("partitionMesh 1","MeshTools"),level=4)
logEvent("Partitioning mesh among %d processors using partitioningType = %d" % (comm.size(),parallelPartitioningType))
self.subdomainMesh=self.__class__()
self.subdomainMesh.globalMesh = self
self.subdomainMesh.cmesh=cmeshTools.CMesh()
self.nLayersOfOverlap = nLayersOfOverlap; self.parallelPartitioningType = parallelPartitioningType
logEvent(memory("partitionMesh 2","MeshTools"),level=4)
if parallelPartitioningType == MeshParallelPartitioningTypes.node:
logEvent("Starting nodal partitioning")#mwf for now always gives 1 layer of overlap
logEvent("filebase {0:s}".format(filebase))
logEvent("base {0:d}".format(base))
logEvent("nLayersOfOverlap {0:d}".format(nLayersOfOverlap))
logEvent("parallelPartitioningType {0:d}".format(parallelPartitioningType))
if isinstance(self,TetrahedralMesh):
(self.elementOffsets_subdomain_owned,
self.elementNumbering_subdomain2global,
self.nodeOffsets_subdomain_owned,
self.nodeNumbering_subdomain2global,
self.elementBoundaryOffsets_subdomain_owned,
self.elementBoundaryNumbering_subdomain2global,
self.edgeOffsets_subdomain_owned,
self.edgeNumbering_subdomain2global) = cpartitioning.partitionNodesFromTetgenFiles(comm.comm.tompi4py(),
filebase,
base,
nLayersOfOverlap,
self.cmesh,
self.subdomainMesh.cmesh)
elif isinstance(self,TriangularMesh):
(self.elementOffsets_subdomain_owned,
self.elementNumbering_subdomain2global,
self.nodeOffsets_subdomain_owned,
self.nodeNumbering_subdomain2global,
self.elementBoundaryOffsets_subdomain_owned,
self.elementBoundaryNumbering_subdomain2global,
self.edgeOffsets_subdomain_owned,
self.edgeNumbering_subdomain2global) = cpartitioning.partitionNodesFromTriangleFiles(comm.comm.tompi4py(),
filebase,
base,
nLayersOfOverlap,
self.cmesh,
self.subdomainMesh.cmesh)
else:
assert 0,"can't partition non-simplex mesh"
else:
logEvent("Starting element partitioning")
(self.elementOffsets_subdomain_owned,
self.elementNumbering_subdomain2global,
self.nodeOffsets_subdomain_owned,
self.nodeNumbering_subdomain2global,
self.elementBoundaryOffsets_subdomain_owned,
self.elementBoundaryNumbering_subdomain2global,
self.edgeOffsets_subdomain_owned,
self.edgeNumbering_subdomain2global) = cpartitioning.partitionElementsFromTetgenFiles(comm.comm.tompi4py(),
filebase,
base,
nLayersOfOverlap,
self.cmesh,
self.subdomainMesh.cmesh)
#
logEvent(memory("partitionMesh 3","MeshTools"),level=4)
self.buildFromCNoArrays(self.cmesh)
self.subdomainMesh.buildFromC(self.subdomainMesh.cmesh)
self.subdomainMesh.nElements_owned = self.elementOffsets_subdomain_owned[comm.rank()+1] - self.elementOffsets_subdomain_owned[comm.rank()]
self.subdomainMesh.nNodes_owned = self.nodeOffsets_subdomain_owned[comm.rank()+1] - self.nodeOffsets_subdomain_owned[comm.rank()]
self.subdomainMesh.nElementBoundaries_owned = self.elementBoundaryOffsets_subdomain_owned[comm.rank()+1] - self.elementBoundaryOffsets_subdomain_owned[comm.rank()]
self.subdomainMesh.nEdges_owned = self.edgeOffsets_subdomain_owned[comm.rank()+1] - self.edgeOffsets_subdomain_owned[comm.rank()]
comm.barrier()
logEvent(memory("partitionMesh 4","MeshTools"),level=4)
logEvent("Number of Subdomain Elements Owned= "+str(self.subdomainMesh.nElements_owned))
logEvent("Number of Subdomain Elements = "+str(self.subdomainMesh.nElements_global))
logEvent("Number of Subdomain Nodes Owned= "+str(self.subdomainMesh.nNodes_owned))
logEvent("Number of Subdomain Nodes = "+str(self.subdomainMesh.nNodes_global))
logEvent("Number of Subdomain elementBoundaries Owned= "+str(self.subdomainMesh.nElementBoundaries_owned))
logEvent("Number of Subdomain elementBoundaries = "+str(self.subdomainMesh.nElementBoundaries_global))
logEvent("Number of Subdomain Edges Owned= "+str(self.subdomainMesh.nEdges_owned))
logEvent("Number of Subdomain Edges = "+str(self.subdomainMesh.nEdges_global))
comm.barrier()
logEvent("Finished partitioning")
par_nodeDiametersArray = ParVec_petsc4py(self.subdomainMesh.nodeDiametersArray,
bs=1,
n=self.subdomainMesh.nNodes_owned,
N=self.nNodes_global,
nghosts=self.subdomainMesh.nNodes_global - self.subdomainMesh.nNodes_owned,
subdomain2global=self.nodeNumbering_subdomain2global)
par_nodeDiametersArray.scatter_forward_insert()
# comm.beginSequential()
# from Profiling import memory
# memory()
# logEvent(memory("Partitioning Mesh","Mesh"),level=1)
# del self.cmesh
# #cmeshTools.deleteMeshDataStructures(self.cmesh)
# logEvent(memory("Without global mesh","Mesh"),level=1)
# comm.endSequential()
def writeMeshXdmf(self,ar,name='',t=0.0,init=False,meshChanged=False,Xdmf_ElementTopology="Triangle",tCount=0, EB=False):
if self.arGridCollection is not None:
init = False
elif not init:
grids = ar.domain.findall("Grid")
self.arGridCollection = grids[0]
if EB:
assert(len(grids) > 1)
self.arEBGridCollection = grids[1]
if init:
self.arGridCollection = SubElement(ar.domain,"Grid",{"Name":"Mesh "+name,
"GridType":"Collection",
"CollectionType":"Temporal"})
if EB:
self.arEBGridCollection = SubElement(ar.domain,"Grid",{"Name":"EBMesh "+name,
"GridType":"Collection",
"CollectionType":"Temporal"})
if self.arGrid is None or self.arTime.get('Value') != "{0:e}".format(t):
#
#topology and geometry
#
if ar.global_sync:
self.arGrid = SubElement(self.arGridCollection,"Grid",{"GridType":"Uniform"})
self.arTime = SubElement(self.arGrid,"Time",{"Value":"%e" % (t,),"Name":"%i" % (tCount,)})
topology = SubElement(self.arGrid,"Topology",
{"Type":Xdmf_ElementTopology,
"NumberOfElements":"%i" % (self.globalMesh.nElements_global,)})
elements = SubElement(topology,"DataItem",
{"Format":ar.dataItemFormat,
"DataType":"Int",
"Dimensions":"%i %i" % (self.globalMesh.nElements_global,self.nNodes_element)})
geometry = SubElement(self.arGrid,"Geometry",{"Type":"XYZ"})
nodes = SubElement(geometry,"DataItem",
{"Format":ar.dataItemFormat,
"DataType":"Float",
"Precision":"8",
"Dimensions":"%i %i" % (self.globalMesh.nNodes_global,3)})
if ar.hdfFile is not None:
elements.text = ar.hdfFilename+":/elements"+name+str(tCount)
nodes.text = ar.hdfFilename+":/nodes"+name+str(tCount)
if init or meshChanged:
ar.create_dataset_sync('elements'+name+str(tCount),
offsets=self.globalMesh.elementOffsets_subdomain_owned,
data=self.globalMesh.nodeNumbering_subdomain2global[self.elementNodesArray[:self.nElements_owned]])
ar.create_dataset_sync('nodes'+name+str(tCount),
offsets=self.globalMesh.nodeOffsets_subdomain_owned,
data=self.nodeArray[:self.nNodes_owned])
else:
assert False, "global_sync not supported with text heavy data"
else:
self.arGrid = SubElement(self.arGridCollection,"Grid",{"GridType":"Uniform"})
self.arTime = SubElement(self.arGrid,"Time",{"Value":"%e" % (t,),"Name":"%i" % (tCount,)})
topology = SubElement(self.arGrid,"Topology",
{"Type":Xdmf_ElementTopology,
"NumberOfElements":"%i" % (self.nElements_owned,)})
elements = SubElement(topology,"DataItem",
{"Format":ar.dataItemFormat,
"DataType":"Int",
"Dimensions":"%i %i" % (self.nElements_owned,self.nNodes_element)})
geometry = SubElement(self.arGrid,"Geometry",{"Type":"XYZ"})
nodes = SubElement(geometry,"DataItem",
{"Format":ar.dataItemFormat,
"DataType":"Float",
"Precision":"8",
"Dimensions":"%i %i" % (self.nNodes_global,3)})
if ar.hdfFile is not None:
elements.text = ar.hdfFilename+":/elements"+str(ar.comm.rank())+name+str(tCount)
nodes.text = ar.hdfFilename+":/nodes"+str(ar.comm.rank())+name+str(tCount)
if init or meshChanged:
ar.create_dataset_async('elements'+str(ar.comm.rank())+name+str(tCount),data=self.elementNodesArray[:self.nElements_owned])
ar.create_dataset_async('nodes'+str(ar.comm.rank())+name+str(tCount),data=self.nodeArray)
else:
SubElement(elements,"xi:include",{"parse":"text","href":"./"+ar.textDataDir+"/elements"+name+".txt"})
SubElement(nodes,"xi:include",{"parse":"text","href":"./"+ar.textDataDir+"/nodes"+name+".txt"})
if init or meshChanged:
numpy.savetxt(ar.textDataDir+"/elements"+name+".txt",self.elementNodesArray[:self.nElements_owned],fmt='%d')
numpy.savetxt(ar.textDataDir+"/nodes"+name+".txt",self.nodeArray)
#
#element boundary topology and geometry
#
if EB:
self.arEBGrid = SubElement(self.arEBGridCollection,"Grid",{"GridType":"Uniform"})
self.arEBTime = SubElement(self.arEBGrid,"Time",{"Value":"%e" % (t,),"Name":"%i" % (tCount,)})
Xdmf_ElementEBTopology = "Triangle" #cek hack
ebtopology = SubElement(self.arEBGrid,"Topology",
{"Type":Xdmf_ElementEBTopology,
"NumberOfElements":"%i" % (self.nElementBoundaries_global,)})
ebelements = SubElement(ebtopology,"DataItem",
{"Format":ar.dataItemFormat,
"DataType":"Int",
"Dimensions":"%i %i" % (self.nElementBoundaries_global,self.nNodes_elementBoundary)})
ebgeometry = SubElement(self.arEBGrid,"Geometry",{"Type":"XYZ"})
ebnodes = SubElement(ebgeometry,"DataItem",
{"Format":ar.dataItemFormat,
"DataType":"Float",
"Precision":"8",
"Dimensions":"%i %i" % (self.nNodes_global,3)})
if ar.hdfFile is not None:
ebelements.text = ar.hdfFilename+":/elementBoundaries"+str(ar.comm.rank())+name+str(tCount)
ebnodes.text = ar.hdfFilename+":/nodes"+str(ar.comm.rank())+name+str(tCount)
if init or meshChanged:
ar.create_dataset_async('elementBoundaries'+str(ar.comm.rank())+name+str(tCount), data = self.elementBoundaryNodesArray)
#ar.create_dataset_async('nodes'+`ar.comm.rank()`+name+`tCount`, data = self.nodeArray)
else:
SubElement(ebelements,"xi:include",{"parse":"text","href":"./"+ar.textDataDir+"/elementBoundaries"+name+".txt"})
SubElement(ebnodes,"xi:include",{"parse":"text","href":"./"+ar.textDataDir+"/nodes"+name+".txt"})
if init or meshChanged:
np.savetxt(ar.textDataDir+"/elementBoundaries"+name+".txt",self.elementBoundaryNodesArray,fmt='%d')
# Add the local->global index maps for collect.py and for
# reverse mapping in hotstarts from a global XDMF file.
if self.globalMesh is not None and not ar.global_sync:
nodeMapAtt = SubElement(self.arGrid,"Attribute",
{"Name":"NodeMapL2G",
"AttributeType":"Scalar",
"Center":"Node"})
nodeMap = SubElement(nodeMapAtt,"DataItem",
{"Format":ar.dataItemFormat,
"DataType":"Int",
"Precision":"4",
"Dimensions":"%i" % (self.nNodes_global,)})
elemMapAtt = SubElement(self.arGrid,"Attribute",
{"Name":"CellMapL2G",
"AttributeType":"Scalar",
"Center":"Cell"})
elemMap = SubElement(elemMapAtt,"DataItem",
{"Format":ar.dataItemFormat,
"DataType":"Int",
"Precision":"4",
"Dimensions":"%i" % (self.nElements_owned,)})
if ar.hdfFile is not None:
nodeMap.text = ar.hdfFilename+":/nodeMapL2G"+str(ar.comm.rank())+name+str(tCount)
elemMap.text = ar.hdfFilename+":/cellMapL2G"+str(ar.comm.rank())+name+str(tCount)
if init or meshChanged:
ar.create_dataset_async('nodeMapL2G'+str(ar.comm.rank())+name+str(tCount), data=self.globalMesh.nodeNumbering_subdomain2global)
ar.create_dataset_async('cellMapL2G'+str(ar.comm.rank())+name+str(tCount), data=self.globalMesh.elementNumbering_subdomain2global[:self.nElements_owned])
else:
SubElement(nodeMap,"xi:include",{"parse":"text","href":"./"+ar.textDataDir+"/nodeMapL2G"+name+".txt"})
SubElement(nodeMap,"xi:include",{"parse":"text","href":"./"+ar.textDataDir+"/cellMapL2G"+name+".txt"})
if init or meshChanged:
np.savetxt(ar.textDataDir+"/nodeMapL2G"+name+".txt",self.globalMesh.nodeNumbering_subdomain2global)
np.savetxt(ar.textDataDir+"/cellMapL2G"+name+".txt",self.globalMesh.elementNumbering_subdomain2global[:self.nElements_owned])
#
#material types
#
if ar.global_sync:
nodeMaterialTypes = SubElement(self.arGrid,"Attribute",{"Name":"nodeMaterialTypes",
"AttributeType":"Scalar",
"Center":"Node"})
nodeMaterialTypesValues = SubElement(nodeMaterialTypes,"DataItem",
{"Format":ar.dataItemFormat,
"DataType":"Int",
"Dimensions":"%i" % (self.globalMesh.nNodes_global,)})
elementMaterialTypes = SubElement(self.arGrid,"Attribute",{"Name":"elementMaterialTypes",
"AttributeType":"Scalar",
"Center":"Cell"})
elementMaterialTypesValues = SubElement(elementMaterialTypes,"DataItem",
{"Format":ar.dataItemFormat,
"DataType":"Int",
"Dimensions":"%i" % (self.globalMesh.nElements_global,)})
if EB:
ebnodeMaterialTypes = SubElement(self.arEBGrid,"Attribute",{"Name":"ebnodeMaterialTypes",
"AttributeType":"Scalar",
"Center":"Node"})
ebnodeMaterialTypesValues = SubElement(ebnodeMaterialTypes,"DataItem",
{"Format":ar.dataItemFormat,
"DataType":"Int",
"Dimensions":"%i" % (self.globalMesh.nNodes_global,)})
elementBoundaryMaterialTypes = SubElement(self.arEBGrid,"Attribute",{"Name":"elementBoundaryMaterialTypes",
"AttributeType":"Scalar",
"Center":"Cell"})
elementBoundaryMaterialTypesValues = SubElement(elementBoundaryMaterialTypes,"DataItem",
{"Format":ar.dataItemFormat,
"DataType":"Int",
"Dimensions":"%i" % (self.globalMesh.nElementBoundaries_global,)})
if ar.hdfFile is not None:
nodeMaterialTypesValues.text = ar.hdfFilename+":/"+"nodeMaterialTypes"+"_t"+str(tCount)
ar.create_dataset_sync("nodeMaterialTypes"+"_t"+str(tCount), offsets=self.globalMesh.nodeOffsets_subdomain_owned, data=self.nodeMaterialTypes[:self.nNodes_owned])
elementMaterialTypesValues.text = ar.hdfFilename+":/"+"elementMaterialTypes"+"_t"+str(tCount)
ar.create_dataset_sync("elementMaterialTypes"+"_t"+str(tCount), offsets=self.globalMesh.elementOffsets_subdomain_owned, data=self.elementMaterialTypes[:self.nElements_owned])
if EB:
ebnodeMaterialTypesValues.text = ar.hdfFilename+":/"+"nodeMaterialTypes"+"_t"+str(tCount)
elementBoundaryMaterialTypesValues.text = ar.hdfFilename+":/"+"elementBoundaryMaterialTypes"+"_t"+str(tCount)
ar.create_dataset_sync("elementBoundaryMaterialTypes"+"_t"+str(tCount), offsets = self.globalMesh.elementBoundaryOffsets_subdomain_owned, data=self.elementBoundaryMaterialTypes[:self.nElementBoundaries_owned])
else:
assert False, "global_sync not supported with text heavy data"
else:
nodeMaterialTypes = SubElement(self.arGrid,"Attribute",{"Name":"nodeMaterialTypes",
"AttributeType":"Scalar",
"Center":"Node"})
nodeMaterialTypesValues = SubElement(nodeMaterialTypes,"DataItem",
{"Format":ar.dataItemFormat,
"DataType":"Int",
"Dimensions":"%i" % (self.nNodes_global,)})
elementMaterialTypes = SubElement(self.arGrid,"Attribute",{"Name":"elementMaterialTypes",
"AttributeType":"Scalar",
"Center":"Cell"})
elementMaterialTypesValues = SubElement(elementMaterialTypes,"DataItem",
{"Format":ar.dataItemFormat,
"DataType":"Int",
"Dimensions":"%i" % (self.nElements_owned,)})
if EB:
ebnodeMaterialTypes = SubElement(self.arEBGrid,"Attribute",{"Name":"ebnodeMaterialTypes",
"AttributeType":"Scalar",
"Center":"Node"})
ebnodeMaterialTypesValues = SubElement(ebnodeMaterialTypes,"DataItem",
{"Format":ar.dataItemFormat,
"DataType":"Int",
"Dimensions":"%i" % (self.nNodes_global,)})
elementBoundaryMaterialTypes = SubElement(self.arEBGrid,"Attribute",{"Name":"elementBoundaryMaterialTypes",
"AttributeType":"Scalar",
"Center":"Cell"})
elementBoundaryMaterialTypesValues = SubElement(elementBoundaryMaterialTypes,"DataItem",
{"Format":ar.dataItemFormat,
"DataType":"Int",
"Dimensions":"%i" % (self.nElementBoundaries_global,)})
if ar.hdfFile is not None:
nodeMaterialTypesValues.text = ar.hdfFilename+":/"+"nodeMaterialTypes"+"_p"+str(ar.comm.rank())+"_t"+str(tCount)
ar.create_dataset_async("nodeMaterialTypes"+"_p"+str(ar.comm.rank())+"_t"+str(tCount), data=self.nodeMaterialTypes)
elementMaterialTypesValues.text = ar.hdfFilename+":/"+"elementMaterialTypes"+"_p"+str(ar.comm.rank())+"_t"+str(tCount)
ar.create_dataset_async("elementMaterialTypes"+"_p"+str(ar.comm.rank())+"_t"+str(tCount), data=self.elementMaterialTypes[:self.nElements_owned])
if EB:
ebnodeMaterialTypesValues.text = ar.hdfFilename+":/"+"nodeMaterialTypes"+"_p"+str(ar.comm.rank())+"_t"+str(tCount)
elementBoundaryMaterialTypesValues.text = ar.hdfFilename+":/"+"elementBoundaryMaterialTypes"+"_p"+str(ar.comm.rank())+"_t"+str(tCount)
ar.create_dataset_async("elementBoundaryMaterialTypes"+"_p"+str(ar.comm.rank())+"_t"+str(tCount), data=self.elementBoundaryMaterialTypes)
else:
numpy.savetxt(ar.textDataDir+"/"+"nodeMaterialTypes"+str(tCount)+".txt",self.nodeMaterialTypes)
SubElement(nodeMaterialTypesValues,"xi:include",{"parse":"text","href":"./"+ar.textDataDir+"/"+"nodeMaterialTypes"+str(tCount)+".txt"})
numpy.savetxt(ar.textDataDir+"/"+"elementMaterialTypes"+str(tCount)+".txt",self.elementMaterialTypes[:self.nElements_owned])
SubElement(elementMaterialTypesValues,"xi:include",{"parse":"text","href":"./"+ar.textDataDir+"/"+"elementMaterialTypes"+str(tCount)+".txt"})
#done with material types
def buildFromC(self,cmesh):
from . import cmeshTools
#
logEvent(memory("buildFromC","MeshTools"),level=4)
self.cmesh = cmesh
(self.nElements_global,
self.nNodes_global,
self.nNodes_element,
self.nNodes_elementBoundary,
self.nElementBoundaries_element,
self.nElementBoundaries_global,
self.nInteriorElementBoundaries_global,
self.nExteriorElementBoundaries_global,
self.max_nElements_node,
self.nEdges_global,
self.max_nNodeNeighbors_node,
self.elementNodesArray,
self.nodeElementsArray,
self.nodeElementOffsets,
self.elementNeighborsArray,
self.elementBoundariesArray,
self.elementBoundaryNodesArray,
self.elementBoundaryElementsArray,
self.elementBoundaryLocalElementBoundariesArray,
self.interiorElementBoundariesArray,
self.exteriorElementBoundariesArray,
self.edgeNodesArray,
self.nodeStarArray,
self.nodeStarOffsets,
self.elementMaterialTypes,
self.elementBoundaryMaterialTypes,
self.nodeMaterialTypes,
self.nodeArray,
self.nx,self.ny, self.nz, #NURBS
self.px,self.py, self.pz, #NURBS
self.elementIJK, #NURBS
self.weights, #NURBS
self.U_KNOT, #NURBS
self.V_KNOT, #NURBS
self.W_KNOT, #NURBS
self.elementDiametersArray,
self.elementInnerDiametersArray,
self.elementBoundaryDiametersArray,
self.elementBarycentersArray,
self.elementBoundaryBarycentersArray,
self.nodeDiametersArray,
self.nodeSupportArray,
self.h,
self.hMin,
self.sigmaMax,
self.volume) = cmeshTools.buildPythonMeshInterface(self.cmesh)
# print("from C")
# print (self.nElements_global,
# self.nNodes_global,
# self.nNodes_element,
# self.nNodes_elementBoundary,
# self.nElementBoundaries_element,
# self.nElementBoundaries_global,
# self.nInteriorElementBoundaries_global,
# self.nExteriorElementBoundaries_global,
# self.max_nElements_node,
# self.nEdges_global,
# self.max_nNodeNeighbors_node,
# self.elementNodesArray,
# self.nodeElementsArray,
# self.nodeElementOffsets,
# self.elementNeighborsArray,
# self.elementBoundariesArray,
# self.elementBoundaryNodesArray,
# self.elementBoundaryElementsArray,
# self.elementBoundaryLocalElementBoundariesArray,
# self.interiorElementBoundariesArray,
# self.exteriorElementBoundariesArray,
# self.edgeNodesArray,
# self.nodeStarArray,
# self.nodeStarOffsets,
# self.elementMaterialTypes,
# self.elementBoundaryMaterialTypes,
# self.nodeMaterialTypes,
# self.nodeArray,
# self.elementDiametersArray,
# self.elementInnerDiametersArray,
# self.elementBoundaryDiametersArray,
# self.elementBarycentersArray,
# self.elementBoundaryBarycentersArray,
# self.nodeDiametersArray,
# self.nodeSupportArray,
# self.h,
# self.hMin,
# self.volume)
self.hasGeometricInfo = True
#default to single processor
self.nNodes_owned = self.nNodes_global
self.nElements_owned = self.nElements_global
self.nElementBoundaries_owned = self.nElementBoundaries_global
self.nEdges_owned = self.nEdges_global
logEvent(memory("buildFromC","MeshTools"),level=4)
def buildFromCNoArrays(self,cmesh):
from . import cmeshTools
#
logEvent(memory("buildFromC","MeshTools"),level=4)
self.cmesh = cmesh
(self.nElements_global,
self.nNodes_global,
self.nNodes_element,
self.nNodes_elementBoundary,
self.nElementBoundaries_element,
self.nElementBoundaries_global,
self.nInteriorElementBoundaries_global,
self.nExteriorElementBoundaries_global,
self.max_nElements_node,
self.nEdges_global,
self.max_nNodeNeighbors_node,
self.h,
self.hMin,
self.sigmaMax,
self.volume) = cmeshTools.buildPythonMeshInterfaceNoArrays(self.cmesh)
self.hasGeometricInfo = False
logEvent(memory("buildFromCNoArrays","MeshTools"),level=4)
def buildNodeStarArrays(self):
import itertools
if self.nodeStarArray is None:
self.nodeStarList=[]
for n in range(self.nNodes_global):
self.nodeStarList.append(set())
for i_ele in range(self.nElements_global): #: is this OK for parallel mesh?
for n1,n2 in itertools.permutations(self.elementNodesArray[i_ele],2):#: works for combination of triangle and quadrilateral
#: if n1<self.nNodes_global: #: Saving only locally owned node is not enough; should include ghost node
self.nodeStarList[n1].add(n2) #: does not contain itself; use set() instead of list since each pair is visited 1 or 2 times for 2D mesh
self.nodeStarOffsets = np.zeros((self.nNodes_global+1,),'i')
lenNodeStarArray=0
for nN in range(1,self.nNodes_global+1):
self.nodeStarOffsets[nN] = self.nodeStarOffsets[nN-1] + len(self.nodeStarList[nN-1])
self.nodeStarArray =np.fromiter(itertools.chain.from_iterable(self.nodeStarList),'i')
del self.nodeStarList
def buildArraysFromLists(self):
#nodes
self.nNodes_global = len(self.nodeList)
self.nodeArray = np.zeros((self.nNodes_global,3),'d')
nodeElementsList=[]
for nN,n in enumerate(self.nodeList):
self.nodeArray[nN][:] = n.p
nodeElementsList.append([])
#elements
self.nNodes_element = len(self.elementList[0].nodes)
self.nElements_global = len(self.elementList)
self.elementNodesArray = np.zeros((self.nElements_global,
self.nNodes_element),
'i')
for en,e in enumerate(self.elementList):
for nN_element,n in enumerate(e.nodes):
self.elementNodesArray[en,nN_element]=n.N
nodeElementsList[n.N].append(en)
#elements per node
nodeElementsDict={}
for eN in range(self.nElements_global):
for nN_element in range(self.nNodes_element):
nN = self.elementNodesArray[eN,nN_element]
if nN in nodeElementsDict:
nodeElementsDict[nN].append(eN)
else:
nodeElementsDict[nN] = [eN]
self.max_nElements_node = max(len(nodeElementsDict[nN]) for nN in range(self.nNodes_global))
self.nElements_node = np.zeros((self.nNodes_global),'i')
#mwf make a 1d array now
#self.nodeElementsArrayOld = np.zeros((self.nNodes_global,self.max_nElements_node),'i')
self.nodeElementOffsets = np.zeros((self.nNodes_global+1,),'i')
for nN in range(len(nodeElementsDict)):
elementList = nodeElementsDict[nN]
self.nElements_node[nN] = len(elementList)
self.nodeElementOffsets[nN+1] = self.nodeElementOffsets[nN]+self.nElements_node[nN]
#for eN_element,eN in enumerate(elementList):
# self.nodeElementsArrayOld[nN,eN_element]=eN
self.nodeElementsArray = np.zeros((self.nodeElementOffsets[self.nNodes_global],),'i')
for nN,elementList in nodeElementsDict.items():
for eN_element,eN in enumerate(elementList):
self.nodeElementsArray[self.nodeElementOffsets[nN]+eN_element]=eN
#
#
#elementBoundariesArray
self.nElementBoundaries_element = len(
self.elementList[0].elementBoundaries)
self.elementBoundariesArray = np.zeros(
(self.nElements_global,self.nElementBoundaries_element),
'i')
#collect set of element boundaries while we're looping
elementBoundaryNumbers=set()
for eN,e in enumerate(self.elementList):
for ebN_element,eb in enumerate(e.elementBoundaries):
self.elementBoundariesArray[eN,ebN_element]=eb.N
elementBoundaryNumbers.add(eb.N)
self.nElementBoundaries_global=len(elementBoundaryNumbers)
#elementBoundaryElementsArray
self.elementBoundaryElementsArray=np.ones(
(self.nElementBoundaries_global,2),'i')
self.elementBoundaryElementsArray*=-1
self.elementBoundaryLocalElementBoundariesArray=np.zeros(
(self.nElementBoundaries_global,2),'i')
elementBoundaryElementsCardArray =np.zeros(
(self.nElementBoundaries_global),'i')
for eN in range(self.nElements_global):
for ebN_element in range(self.nElementBoundaries_element):
ebN = self.elementBoundariesArray[eN,ebN_element]
elementBoundaryElementsCardArray[ebN]+=1
eN_boundaryElement=elementBoundaryElementsCardArray[ebN]-1
self.elementBoundaryElementsArray[ebN,eN_boundaryElement]=eN
self.elementBoundaryLocalElementBoundariesArray[ebN,eN_boundaryElement]=ebN_element
if elementBoundaryElementsCardArray[ebN] > 2:
logEvent("WARNING, element neighbors of boundary element > 2")
elementBoundaryElementsCardArray[ebN]=2
#interior and exterior
self.nExteriorElementBoundaries_global=2*self.nElementBoundaries_global\
- np.sum(
elementBoundaryElementsCardArray)
self.nInteriorElementBoundaries_global= self.nElementBoundaries_global-\
self.nExteriorElementBoundaries_global
self.exteriorElementBoundariesArray=np.zeros(
(self.nExteriorElementBoundaries_global,),'i')
self.interiorElementBoundariesArray=np.zeros(
(self.nInteriorElementBoundaries_global,),'i')
interior=0
exterior=0
for ebN in range(self.nElementBoundaries_global):
if elementBoundaryElementsCardArray[ebN]==1:
self.exteriorElementBoundariesArray[exterior]=ebN
exterior+=1
else:
self.interiorElementBoundariesArray[interior]=ebN
interior+=1
del elementBoundaryElementsCardArray
self.nNodes_elementBoundary = len(self.elementBoundaryList[0].nodes)
self.elementBoundaryNodesArray = np.zeros((self.nElementBoundaries_global,
self.nNodes_elementBoundary),
'i')
for ebN,eb in enumerate(self.elementBoundaryList):
for nN_element,n in enumerate(eb.nodes):
self.elementBoundaryNodesArray[ebN,nN_element]=n.N
#element neighbors
self.elementNeighborsArray = np.zeros((self.nElements_global,self.nElementBoundaries_element),'i')
for eN in range(self.nElements_global):
for ebN_element in range(self.nElementBoundaries_element):
ebN = self.elementBoundariesArray[eN,ebN_element]
eN_left = self.elementBoundaryElementsArray[ebN,0]
eN_right = self.elementBoundaryElementsArray[ebN,1]
if eN == eN_left:
self.elementNeighborsArray[eN,ebN_element] = eN_right
elif eN == eN_right:
self.elementNeighborsArray[eN,ebN_element] = eN_left
else:
self.elementNeighborsArray[eN,ebN_element] = -1
#edges
self.edgeNodesArray = np.zeros(
(len(self.edgeList),2),'i')
for en,e in enumerate(self.edgeList):
self.edgeNodesArray[en,0]=e.nodes[0].N
self.edgeNodesArray[en,1]=e.nodes[1].N
#geometric info
self.computeGeometricInfo()
self.elementDiametersArray = np.zeros((self.nElements_global,),'d')
self.elementInnerDiametersArray = np.zeros((self.nElements_global,),'d')
for en in range(self.nElements_global):
self.elementDiametersArray[en] = self.elementList[en].diameter
self.elementInnerDiametersArray[en]=self.elementList[en].innerDiameter
self.elementBoundaryDiametersArray = np.zeros((self.nElementBoundaries_global,),'d')
for eN,e in enumerate(self.elementList):
for ebN_element,eb in enumerate(e.elementBoundaries):
self.elementBoundaryDiametersArray[self.elementBoundariesArray[eN,ebN_element]] = eb.diameter
self.elementMaterialTypes = np.zeros((self.nElements_global,),'i')
self.elementBoundaryMaterialTypes = np.zeros((self.nElementBoundaries_global,),'i')
self.nodeMaterialTypes = np.zeros((self.nNodes_global,),'i')
#
self.elementBarycentersArray = np.zeros((self.nElements_global,3),'d')
self.elementBoundaryBarycentersArray = np.zeros((self.nElementBoundaries_global,3),'d')
for eN in range(self.nElements_global):
self.elementBarycentersArray[eN,:] = 0.0
for ebN in range(self.nNodes_element):
self.elementBarycentersArray[eN,:] += self.nodeArray[self.elementNodesArray[eN,ebN],:]
self.elementBarycentersArray[eN,:] /= float(self.nNodes_element)
for ebN in range(self.nElementBoundaries_global):
self.elementBoundaryBarycentersArray[ebN,:] = 0.0
for nN in range(self.nNodes_elementBoundary):
self.elementBoundaryBarycentersArray[ebN,:] += self.nodeArray[self.elementBoundaryNodesArray[ebN,nN],:]
self.elementBoundaryBarycentersArray[ebN,:] /= float(self.nNodes_elementBoundary)
#
#now get rid of lists
del self.nodeList
del self.elementList
del self.elementBoundaryList
del self.edgeList
#self.partitionMesh()
def computeGeometricInfo(self):
self.elementList[0].computeGeometricInfo()
self.h=self.elementList[0].diameter
self.hMin=self.h
for e in self.elementList[1:]:
e.computeGeometricInfo()
self.h = max(self.h,e.diameter)
self.hMin=min(self.hMin,e.diameter)
for eb in e.elementBoundaries:
e.computeGeometricInfo()
self.hasGeometricInfo=True
def buildMatlabMeshDataStructures(self,meshFileBase='meshMatlab',writeToFile=True):
"""
build array data structures for matlab finite element mesh
representation and write to a file to view and play with in
matlatb. The current matlab support is mostly for 2d, but this
will return basic arrays for 1d and 3d too
in matlab can then print mesh with
pdemesh(p,e,t)
if one has pdetoolbox
where
p is the vertex or point matrix
e is the edge matrix, and
t is the element matrix
e will be the elementBoundary matrix in 1d and 3d, but perhaps
should remain the edge array?
points matrix is [nd x num vertices]
format :
row 1 = x coord,
row 2 = y coord for nodes in mesh
row 3 = z coord for nodes in mesh ...
edge matrix is [2*nd+3 x num faces]
format:
row 1 = start vertex number
...
row nd = end vertex number
row nd+1 = start value in edge parameterization, should be 0
row nd+2 = next value in edge parameterization, should be 1 or 2
row nd+nd= end value in edge parameterization, should be 2 or 1
row 2*nd+1 = global face id, base 1
row 2*nd+2 = subdomain on left? always 1 for now
row 2*nd+3 = subdomain on right? always 0 for now
element matrix is [nd+2 x num elements]
row 1 = vertex 1 global number
row 2 = vertex 2 global number
...
row nd+1 = vertex 3 global number
row 4 = triangle subdomain number
where 1,2,3 is a local counter clockwise numbering of vertices in
triangle
"""
matlabBase = 1
nd = self.nNodes_element-1
p = np.zeros((nd,self.nNodes_global),'d')
e = np.zeros((2*nd+3,self.nElementBoundaries_global),'d')
t = np.zeros((nd+2,self.nElements_global),'d')
#load p,e,t and write file
if writeToFile:
mfile = open(meshFileBase+'.m','w')
else:
mfile = open('/dev/null','w')
#
if writeToFile:
mfile.write('p = [ ... \n')
for nN in range(self.nNodes_global):
for I in range(nd):
p[I,nN]=self.nodeArray[nN,I]
if writeToFile:
mfile.write('%g ' % p[I,nN])
mfile.write('\n')
if writeToFile:
mfile.write(']; \n')
mfile.write("p = p\';\n") #need transpose for matlab
if writeToFile:
mfile.write('e = [ ... \n')
for ebN in range(self.nElementBoundaries_global):
eN_left = self.elementBoundaryElementsArray[ebN,0]
eN_right= self.elementBoundaryElementsArray[ebN,1]#-1 --> exterior
for nN in range(self.nNodes_elementBoundary):
e[nN,ebN]=self.elementBoundaryNodesArray[ebN,nN] + matlabBase #global node number of start node base 1
#assume for now existing parameterization ok
for nN in range(self.nNodes_elementBoundary):
e[self.nNodes_elementBoundary+nN,ebN]=nN #edge param. is 0 to 1
e[2*self.nNodes_elementBoundary+1,ebN] = ebN+matlabBase
e[2*self.nNodes_elementBoundary+1,ebN] = self.elementMaterialTypes[eN_left] #subdomain to left
if eN_right >= 0:
e[2*self.nNodes_elementBoundary+2,ebN]= self.elementMaterialTypes[eN_right] #subdomain to right
else:
e[2*self.nNodes_elementBoundary+2,ebN]= -1
if writeToFile:
for i in range(e.shape[0]):
mfile.write(' %g ' % e[i,ebN])
mfile.write(' \n ')
if writeToFile:
mfile.write(']; \n')
mfile.write("e = e\';\n") #need transpose for matlab
#write triangles last
if writeToFile:
mfile.write('t = [ ... \n')
for eN in range(self.nElements_global):
for nN in range(self.nNodes_element):
t[nN,eN]=self.elementNodesArray[eN,nN]+matlabBase #global node number for vertex nN
t[self.nNodes_element,eN]=self.elementMaterialTypes[eN] #subdomain id
if writeToFile:
for i in range(t.shape[0]):
mfile.write('%g ' % t[i,eN])
mfile.write('\n')
if writeToFile:
mfile.write(']; \n');
mfile.write("t = t\';\n") #need transpose for matlab
mfile.close()
return p,e,t
def writeEdgesMatlab(self,filename):
"""store coordinates in files formatted for Matlab"""
xfile=filename+'_x.grf'
yfile=filename+'_y.grf'
zfile=filename+'_z.grf'
print('Storing edge information in %s, %s, and %s' % \
(xfile,yfile,zfile))
xOut = open(xfile,'w')
yOut = open(yfile,'w')
zOut = open(zfile,'w')
for edge in self.edgeList:
xOut.write('%14.8e ' % edge.nodes[0].p[X] )
yOut.write('%14.8e ' % edge.nodes[0].p[Y] )
zOut.write('%14.8e ' % edge.nodes[0].p[Z] )
xOut.write('\n')
yOut.write('\n')
zOut.write('\n')
for edge in self.edgeList:
xOut.write('%14.8e ' % edge.nodes[1].p[X])
yOut.write('%14.8e ' % edge.nodes[1].p[Y])
zOut.write('%14.8e ' % edge.nodes[1].p[Z])
xOut.write('\n')
yOut.write('\n')
zOut.write('\n')
xOut.close()
yOut.close()
zOut.close()
def viewTetrahedraMatlab(self,filename):
"""plot the edges"""
cmdfile = filename +'.m'
xfile=filename+'_x.grf'
yfile=filename+'_y.grf'
zfile=filename+'_z.grf'
xedges=filename+'_x'
yedges=filename+'_y'
zedges=filename+'_z'
#the following is for debugging: plot each tet seperately
nT = old_div(len(self.edgeList),6)
plotcommand = "-r \"load " + xfile + \
", load " + yfile + \
", load " + zfile
plots=''
for i in range(nT):
plots = plots + \
", figure(" +str(i+1)+")" \
", axis([0 1 0 1 0 1]), plot3("+xedges+\
"(:,"+str(i)+"*6+1:("+str(i)+"+1)*6),"+yedges+\
"(:,"+str(i)+"*6+1:("+str(i)+"+1)*6),"+zedges+\
"(:,"+str(i)+"*6+1:("+str(i)+"+1)*6),\'b-\') "
plotcommand = plotcommand + plots +'\"'
cmdOut = open(cmdfile,'w')
cmdOut.write(plotcommand)
cmdOut.close()
print('Calling matlab to view mesh')
os.execlp('matlab',
'matlab',
'-nodesktop',
'-nosplash',
'-r',
filename)
def viewMeshMatlab(self,filename):
"""plot the edges"""
cmdfile = filename +'.m'
xfile=filename+'_x.grf'
yfile=filename+'_y.grf'
zfile=filename+'_z.grf'
xedges=filename+'_x'
yedges=filename+'_y'
zedges=filename+'_z'
plotcommand = "load " + xfile + \
", load " + yfile + \
", load " + zfile + \
", figure " + \
", axis([0 1 0 1 0 1]), plot3("+xedges+\
","+yedges+\
","+zedges+\
",\'b-\')"
print(plotcommand)
cmdOut = open(cmdfile,'w')
cmdOut.write(plotcommand)
cmdOut.close()
print('Calling matlab to view mesh')
os.execlp('matlab',
'matlab',
'-nodesktop',
'-nosplash',
'-r',
filename)
# from os import popen
# matlab = popen('matlab','w')
# matlab.write(plotcommand+'\n')
# matlab.flush()
# raw_input('Please press return to continue...\n')
def writeEdgesGnuplot(self,filename):
"""store coordinates in files formatted for Matlab"""
datfile=filename+'.dat'
print('Storing edge information in %s' % datfile)
edgesOut = open(datfile,'w')
for edge in self.edgeList:
dataline = '%14.8e %14.8e %14.8e \n' % \
(edge.nodes[0].p[X],
edge.nodes[0].p[Y],
edge.nodes[0].p[Z])
edgesOut.write(dataline)
dataline = '%14.8e %14.8e %14.8e \n \n \n' % \
(edge.nodes[1].p[X],
edge.nodes[1].p[Y],
edge.nodes[1].p[Z])
edgesOut.write(dataline)
edgesOut.close()
def writeEdgesGnuplot2(self,filename):
"""store coordinates in files formatted for Matlab"""
datfile=filename+'.dat'
print('Storing edge information in %s' % datfile)
edgesOut = open(datfile,'w')
for n0,n1 in self.edgeNodesArray:
dataline = '%14.8e %14.8e %14.8e \n' % \
(self.nodeArray[n0][0],
self.nodeArray[n0][1],
self.nodeArray[n0][2])
edgesOut.write(dataline)
dataline = '%14.8e %14.8e %14.8e \n \n \n' % \
(self.nodeArray[n1][0],
self.nodeArray[n1][1],
self.nodeArray[n1][2])
edgesOut.write(dataline)
edgesOut.close()
def viewMeshGnuplot(self,filename):
cmdfile = filename +'.cmd'
datfile = filename +'.dat'
cmd = "set pointsize 2.5 \n set term x11 \n splot \'"+datfile+"\' with linespoints pointsize 2.5 pt 2\n"+\
"set xlabel \'x\' \n set ylabel \'y\' \n set zlabel \'z\' \n "
cmdOut = open(cmdfile,'w')
cmdOut.write(cmd)
cmdOut.close()
from os import execlp
print('Calling gnuplot to view mesh')
execlp('gnuplot','gnuplot',cmdfile,'-')
def viewMeshGnuplotPipe(self,filename):
cmdfile = filename +'.cmd'
datfile = filename +'.dat'
cmd = "set pointsize 1.5 \n set term x11 \n splot \'"+datfile+"\' with linespoints pointsize 2.5 pt 2 \n"+\
"set xlabel \'x\' \n set ylabel \'y\' \n set zlabel \'z\' \n "
cmdOut = open(cmdfile,'w')
cmdOut.write(cmd)
cmdOut.close()
from os import execlp
print('Calling gnuplot to view mesh')
from os import popen
gnuplot = popen('gnuplot','w')
gnuplot.write(cmd+'\n')
gnuplot.flush()
input('Please press return to continue... \n')
def viewMeshGnuplotPipePar(self,filenames):
from os import popen
gnuplot = popen('gnuplot','w')
for i,filename in enumerate(filenames):
cmdfile = filename +'.cmd'
datfile = filename +'.dat'
cmd = ("set term x11 %i \n splot \'" % (i,))+datfile+"\' with linespoints \n"+\
"set xlabel \'x\' \n set ylabel \'y\' \n set zlabel \'z\'"
cmdOut = open(cmdfile,'w')
cmdOut.write(cmd)
cmdOut.close()
from os import execlp
print('Calling gnuplot to view mesh')
gnuplot.write(cmd+'\n')
gnuplot.flush()
input('Please press return to continue... \n')
def convertFromPUMI(self, domain, MeshAdapt, faceList,regList, parallel=False, dim=3):
from . import cmeshTools
from . import MeshAdaptPUMI
from . import cpartitioning
from . import Comm
comm = Comm.get()
self.cmesh = cmeshTools.CMesh()
if parallel:
self.subdomainMesh=self.__class__()
self.subdomainMesh.globalMesh = self
self.subdomainMesh.cmesh = cmeshTools.CMesh()
MeshAdapt.constructFromParallelPUMIMesh(self.cmesh,
self.subdomainMesh.cmesh)
if(domain.AdaptManager.reconstructedFlag==1):
logEvent("Material arrays updating based on reconstructed model.\n")
MeshAdapt.updateMaterialArrays(self.subdomainMesh.cmesh);
elif(domain.AdaptManager.reconstructedFlag==2):
logEvent("Material arrays updating based on better reconstructed model.\n")
MeshAdapt.updateMaterialArrays2(self.subdomainMesh.cmesh);
else:
logEvent("Material arrays updating based on geometric model.\n")
for i in range(len(faceList)):
for j in range(len(faceList[i])):
#MeshAdapt.updateMaterialArrays(self.subdomainMesh.cmesh,(dim-1), i+1,
# faceList[i][j])
MeshAdapt.updateMaterialArrays(self.subdomainMesh.cmesh,(dim-1), domain.boundaryLabels[i], faceList[i][j])
for i in range(len(regList)):
for j in range(len(regList[i])):
MeshAdapt.updateMaterialArrays(self.subdomainMesh.cmesh,dim, i+1, regList[i][j])
if dim == 3:
cmeshTools.allocateGeometricInfo_tetrahedron(self.subdomainMesh.cmesh)
cmeshTools.computeGeometricInfo_tetrahedron(self.subdomainMesh.cmesh)
if dim == 2:
cmeshTools.allocateGeometricInfo_triangle(self.subdomainMesh.cmesh)
cmeshTools.computeGeometricInfo_triangle(self.subdomainMesh.cmesh)
self.buildFromCNoArrays(self.cmesh)
(self.elementOffsets_subdomain_owned,
self.elementNumbering_subdomain2global,
self.nodeOffsets_subdomain_owned,
self.nodeNumbering_subdomain2global,
self.elementBoundaryOffsets_subdomain_owned,
self.elementBoundaryNumbering_subdomain2global,
self.edgeOffsets_subdomain_owned,
self.edgeNumbering_subdomain2global) = (
cpartitioning.convertPUMIPartitionToPython(comm.comm.tompi4py(),
self.cmesh,
self.subdomainMesh.cmesh))
self.subdomainMesh.buildFromC(self.subdomainMesh.cmesh)
self.subdomainMesh.nElements_owned = (
self.elementOffsets_subdomain_owned[comm.rank()+1] -
self.elementOffsets_subdomain_owned[comm.rank()])
self.subdomainMesh.nNodes_owned = (
self.nodeOffsets_subdomain_owned[comm.rank()+1] -
self.nodeOffsets_subdomain_owned[comm.rank()])
self.subdomainMesh.nElementBoundaries_owned = (
self.elementBoundaryOffsets_subdomain_owned[comm.rank()+1] -
self.elementBoundaryOffsets_subdomain_owned[comm.rank()])
self.subdomainMesh.nEdges_owned = (
self.edgeOffsets_subdomain_owned[comm.rank()+1] -
self.edgeOffsets_subdomain_owned[comm.rank()])
comm.barrier()
par_nodeDiametersArray = (
ParVec_petsc4py(self.subdomainMesh.nodeDiametersArray,
bs=1,
n=self.subdomainMesh.nNodes_owned,
N=self.nNodes_global,
nghosts = self.subdomainMesh.nNodes_global -
self.subdomainMesh.nNodes_owned,
subdomain2global =
self.nodeNumbering_subdomain2global))
par_nodeDiametersArray.scatter_forward_insert()
comm.barrier()
else:
MeshAdapt.constructFromSerialPUMIMesh(self.cmesh)
if(domain.AdaptManager.reconstructedFlag==1):
logEvent("Material arrays updating based on reconstructed model.\n")
MeshAdapt.updateMaterialArrays(self.cmesh);
elif(domain.AdaptManager.reconstructedFlag==2):
logEvent("Material arrays updating based on better reconstructed model.\n")
MeshAdapt.updateMaterialArrays2(self.cmesh);
else:
for i in range(len(faceList)):
for j in range(len(faceList[i])):
#MeshAdapt.updateMaterialArrays(self.cmesh,(dim-1), i+1, faceList[i][j])
MeshAdapt.updateMaterialArrays(self.cmesh,(dim-1), domain.boundaryLabels[i], faceList[i][j])
for i in range(len(regList)):
for j in range(len(regList[i])):
MeshAdapt.updateMaterialArrays(self.cmesh,dim, i+1, regList[i][j])
if dim == 3:
cmeshTools.allocateGeometricInfo_tetrahedron(self.cmesh)
cmeshTools.computeGeometricInfo_tetrahedron(self.cmesh)
if dim == 2:
cmeshTools.allocateGeometricInfo_triangle(self.cmesh)
cmeshTools.computeGeometricInfo_triangle(self.cmesh)
self.buildFromC(self.cmesh)
logEvent("meshInfo says : \n"+self.meshInfo())
class MultilevelMesh(Mesh):
"""A hierchical multilevel mesh"""
def __init__(self,levels=1):
self.meshList=[]
self.elementParents=None
def buildFromC(self,cmultilevelMesh):
from . import cmeshTools
self.cmultilevelMesh = cmultilevelMesh
(self.nLevels,
self.cmeshList,
self.elementParentsArrayList,
self.elementChildrenArrayList,
self.elementChildrenOffsetsList) = cmeshTools.buildPythonMultilevelMeshInterface(cmultilevelMesh)
def refine(self):
pass
def locallyRefine(self,elementTagArray):
pass
def buildArrayLists(self):
self.nLevels = len(self.meshList)
self.calculateElementParents()
self.elementParentsArrayList=[[]]
self.elementChildrenArrayList=[]
self.elementChildrenOffsetsList=[]
for l in range(1,self.nLevels):
self.elementParentsArrayList.append(self.elementParents[l])
len_children=0
for children in list(self.elementChildren[l-1].values()):
len_children += len(children)
self.elementChildrenArrayList.append(np.zeros((len_children,),'i'))
self.elementChildrenOffsetsList.append(np.zeros((self.meshList[l-1].nElements_global+1,),'i'))
index=0
for eN_p,children in enumerate(self.elementChildren[l-1].values()):
self.elementChildrenOffsetsList[l-1][eN_p] = index
for ec in children:
self.elementChildrenArrayList[l-1][index] = ec.N
index += 1
self.elementChildrenOffsetsList[l-1][-1] = index
def calculateElementParents(self,recalculate=False):
"""
get array elementParents[l,e] = e_c, where element e_c is the parent of element e
elementParents[0,:] = -1
"""
if (self.elementParents is None or recalculate):
self.elementParents = {}
nLevels = len(self.meshList)
for l in range(nLevels):
nE = self.meshList[l].nElements_global
self.elementParents[l] = np.ones((nE,),'i')
self.elementParents[l][:]=-1
for l in range(0,nLevels-1):
nEc = self.meshList[l].nElements_global
for ec in range(nEc):
for ef in self.elementChildren[l][ec]:
#print """l=%s ec= %s ef.N= %s """ % (l,ec,ef.N)
self.elementParents[l+1][ef.N] = ec
#ef
#ec
#l
class PointMesh(Mesh):
#Elements=Nodes
"""
0D mesh
"""
def __init__(self,points):
self.nodeArray=points
self.nNodes_global = points.shape[0]
self.elementNodesArray=np.arange(self.nNodes_global,dtype='i')
self.nElements_global = self.nNodes_global
MX=0
MY=1
MZ=2
I=0
J=1
K=1
class EdgeGrid(Mesh):
"""A 1D regular grid on an interval"""
def __init__(self,nx=2,Lx=1.0):
Mesh.__init__(self)
#dimensions and ranges
self.nx=nx
self.ex=nx-1
self.nRange_x = list(range(nx))
self.eRange_x = list(range(self.ex))
#lengths
self.Lx=Lx
self.dx = old_div(Lx,self.ex)
#node coordinates
self.nodeGridArray = np.zeros((self.nx,3),'d')
for i in self.nRange_x:
self.nodeGridArray[i,MX] = i*self.dx
#edge node numbers
self.edgeNodesArray=np.zeros((self.ex,2),'i')
#try to do this like we'll do 2d and 3d
#edge nodes
en=2
edgeNodeNumbers = np.zeros((en,),'i')
#reference edge
eI=1
refEdge_nodeIndeces = [-eI,eI]
refEdge_NodeDict={}
for rn,rnii in enumerate(refEdge_nodeIndeces):
refEdge_NodeDict[rnii] = rn
for i in self.eRange_x:
#edge number
eN=i
#fine grid index of edge
ii = 2*i + 1
#fine grid index of edge nodes
for rn,rnii in enumerate(refEdge_nodeIndeces):
nii = rnii + ii
edgeNodeNumbers[rn]=old_div(nii,2)
self.edgeNodesArray[eN,:]=edgeNodeNumbers
#Mesh interface
self.nNodes_global=self.nx
self.nEdges_global=self.ex
self.nElements_global=self.ex
self.nElementBoundaries_global=self.nx
self.nodeArray=self.nodeGridArray
self.elementNodesArray=self.edgeNodesArray
self.elementBoundariesArray=self.nodeArray
self.boundaryMesh=PointMesh(np.array([self.nodeArray[0],
self.nodeArray[-1]],dtype='d'))
class QuadrilateralGrid(Mesh):
"""A 2D regular grid of quadrilateral cells"""
def __init__(self,nx=2,ny=2,Lx=1.0,Ly=1.0):
Mesh.__init__(self)
#nodes
self.nx=nx
self.ny=ny
self.nxy=nx*ny
#edges
self.eXx=nx-1
self.eXy=ny
self.eXxy=self.eXx*self.eXy
self.eYx=nx
self.eYy=ny-1
self.eYxy = self.eYx*self.eYy
self.eXYx = self.eXx + self.eYx
self.eXYy = self.eXy + self.eYy
self.eXYxy = self.eXxy + self.eYxy
#quads
self.qx = nx-1
self.qy = ny-1
self.qxy = self.qx*self.qy
#ranges
self.nRange_x = list(range(self.nx))
self.nRange_y = list(range(self.ny))
self.qRange_x = list(range(self.qx))
self.qRange_y = list(range(self.qx))
#lengths
self.Lx=Lx
self.Ly=Ly
self.dx = old_div(Lx,self.eXx)
self.dy = old_div(Ly,self.eYy)
#node coordinates
self.nodeGridArray=np.zeros((nx,ny,3),'d')
for i in self.nRange_x:
for j in self.nRange_y:
self.nodeGridArray[i,j,MX]=i*self.dx
self.nodeGridArray[i,j,MY]=j*self.dy
#edge node numbers
en=2
edgeNodeNumbers = np.zeros((en,),'i')
self.edgeNodesArray=np.zeros((self.eXYxy,en),'i')
#quad node numbers
qn=4
quadNodeNumbers = np.zeros((qn,),'i')
self.quadrilateralNodesArray=np.zeros((self.qxy,qn),'i')
#quad edge numbers
qe=4
quadEdgeNumbers = np.zeros((qe,),'i')
self.quadrilateralEdgesArray=np.zeros((self.qxy,qe),'i')
#reference quad
refQuad_NodeIndeces = [(-1,-1),
(-1, 1),
( 1,-1),
( 1, 1)]
refQuad_NodeIndeces.sort()
#a map between reference node indeces and numbers
refQuad_NodeDict={}
for rn,rniijj in enumerate(refQuad_NodeIndeces):
refQuad_NodeDict[rniijj]=rn
refQuad_EdgeIndeces = [(-1,0),
( 0,-1),
( 0, 1),
( 1, 0)]
refQuad_EdgeIndeces.sort()
refQuad_EdgeNodes=[]
#use the map between indeces and numbers to
#map edge indeces to the edge's node numbers
for reiijj in refQuad_EdgeIndeces:
if reiijj[I] == 0:
refQuad_EdgeNodes.append([
refQuad_NodeDict[(-1,reiijj[J])],
refQuad_NodeDict[( 1,reiijj[J])]])
else:
refQuad_EdgeNodes.append([
refQuad_NodeDict[(reiijj[I],-1)],
refQuad_NodeDict[(reiijj[I], 1)]])
for i in self.qRange_x:
for j in self.qRange_y:
#quad number
qN = i*self.qy + j
#fine grid indeces of quad
ii = 2*i + 1
jj = 2*j + 1
#nodes
for rn,rniijj in enumerate(refQuad_NodeIndeces):
nii = rniijj[I] + ii
njj = rniijj[J] + jj
nN = (old_div(nii,2))*self.ny + old_div(njj,2)
quadNodeNumbers[rn]=nN
self.quadrilateralNodesArray[qN][:]=quadNodeNumbers
#edges
for re,reiijj in enumerate(refQuad_EdgeIndeces):
eii = reiijj[I] + ii
ejj = reiijj[J] + jj
eN = (old_div(eii,2))*self.eXYy + (eii%2)*self.eYy + old_div(ejj,2)
quadEdgeNumbers[re]=eN
#nodes
for n,rn in enumerate(refQuad_EdgeNodes[re]):
self.edgeNodesArray[eN][n] = quadNodeNumbers[rn]
self.quadrilateralEdgesArray[qN][:]=quadEdgeNumbers
#Mesh interface (dimensions)
self.nNodes_global=self.nxy
self.nEdges_global=self.eXYxy
self.nElements_global=self.qxy
self.nElementBoundaries_global=self.eXYxy
self.nodeArray=np.reshape(self.nodeGridArray,(self.nxy,3))
self.elementNodesArray=self.quadrilateralNodesArray
self.elementBoundariesArray=self.edgeNodesArray
#todo extract boundary mesh
class RectangularGrid(Mesh):
"""A regular partition into rectangles.
Nodes, edges, and faces can be indexed by (i,j,k) as follows.
The edges and faces are divided according to orientation (i.e. x-edge...).
An (i,j,k) index is associated with the type of edge or face
having node (i,j,k) as the first node in a lexicographically sorted
list of nodes corresponding to the edge or face."""
def __init__(self,nx=1,ny=1,nz=1,Lx=1.0,Ly=1.0,Lz=1.0):
Mesh.__init__(self)
self.Lx = Lx
self.Ly = Ly
self.Lz = Lz
#nodes
self.nx=nx
self.ny=ny
self.nz=nz
self.nxy = nx*ny;
self.nxyz = nx*ny*nz;
#edges
self.nXex=nx-1 #number of x-edges in the x dimension
self.nXey=ny
self.nXez=nz
self.nYex=nx
self.nYey=ny-1
self.nYez=nz
self.nZex=nx
self.nZey=ny
self.nZez=nz-1
#number of edges of all types associated with a row of nodes
self.nXYZex = self.nXex + self.nYex + self.nZex
#number of edges associated with an xy plane of nodes
self.nXexy=self.nXex*self.nXey
self.nYexy=self.nYex*self.nYey
self.nZexy=self.nZex*self.nZey
self.nXYZexy = self.nXexy + self.nYexy + self.nZexy
#number of edges of each type in the grid
self.nXexyz = self.nXexy*self.nXez
self.nYexyz = self.nYexy*self.nYez
self.nZexyz = self.nZexy*self.nZez
#total number of edges
self.nXYZexyz = self.nXexyz + self.nYexyz + self.nZexyz
#quadrilaterals
self.nXYhx=nx-1 #number of XY quadrilaterals in x-dimension
self.nXYhy=ny-1
self.nXYhz=nz
self.nXZhx=nx-1
self.nXZhy=ny
self.nXZhz=nz-1
self.nYZhx=nx
self.nYZhy=ny-1
self.nYZhz=nz-1
#number of quadrilaterals of all types associate with a row of nodes
self.nXY_XZ_YZhx =self.nXYhx + self.nXZhx + self.nYZhx
#number of quadrilaterals associated with an xy plane of nodes
self.nXYhxy=self.nXYhx*self.nXYhy
self.nXZhxy=self.nXZhx*self.nXZhy
self.nYZhxy=self.nYZhx*self.nYZhy
self.nXY_XZ_YZhxy =self.nXYhxy + self.nXZhxy + self.nYZhxy
#number of quadrilaterals of each type in the grid
self.nXYhxyz = self.nXYhxy*self.nXYhz
self.nXZhxyz = self.nXZhxy*self.nXZhz
self.nYZhxyz = self.nYZhxy*self.nYZhz
#total number of quadrilaterals
self.nXY_XZ_YZhxyz =self.nXYhxyz + self.nXZhxyz + self.nYZhxyz
#hexahedra
self.nHx=nx-1
self.nHy=ny-1
self.nHz=nz-1
self.nHxy = self.nHx*self.nHy
self.nHxyz = self.nHxy*self.nHz
#encode red and black
self.black=0
self.red=1
#dimensions of hexahedra
if self.nHx>0:
hx = old_div(float(Lx),(nx-1))
else:
hx = 1.0
if self.nHy>0:
hy = old_div(float(Ly),(ny-1))
else:
hy=1.0
if self.nHz>0:
hz = old_div(float(Lz),(nz-1))
else:
hz=1.0
self.nodeDict={}
self.xedgeDict={}
self.yedgeDict={}
self.zedgeDict={}
self.xedgeList=[]
self.yedgeList=[]
self.zedgeList=[]
self.XYQuadrilateralDict={}
self.XZQuadrilateralDict={}
self.YZQuadrilateralDict={}
self.XYQuadrilateralList=[]
self.XZQuadrilateralList=[]
self.YZQuadrilateralList=[]
self.hexahedronDict={}
self.hexahedronList=[]
self.nodeList=[]
for k in range(self.nz):
for j in range(self.ny):
for i in range(self.nx):
n = self.getNodeNumber(i,j,k)
x = i*hx
y = j*hy
z = k*hz
self.nodeDict[(i,j,k)]=Node(n,x,y,z)
self.nodeList.append(self.nodeDict[(i,j,k)])
for k in range(self.nXez):
for j in range(self.nXey):
for i in range(self.nXex):
en = self.getXEdgeNumber(i,j,k)
self.xedgeDict[(i,j,k)] = Edge(en,
[self.getNode(i,j,k),
self.getNode(i+1,j,k)])
self.xedgeList.append(self.xedgeDict[(i,j,k)])
for k in range(self.nYez):
for j in range(self.nYey):
for i in range(self.nYex):
en = self.getYEdgeNumber(i,j,k)
self.yedgeDict[(i,j,k)] = Edge(en,
[self.getNode(i,j,k),
self.getNode(i,j+1,k)])
self.yedgeList.append(self.yedgeDict[(i,j,k)])
for k in range(self.nZez):
for j in range(self.nZey):
for i in range(self.nZex):
en = self.getZEdgeNumber(i,j,k)
self.zedgeDict[(i,j,k)] = Edge(en,
[self.getNode(i,j,k),
self.getNode(i,j,k+1)])
self.zedgeList.append(self.zedgeDict[(i,j,k)])
for k in range(self.nXYhz):
for j in range(self.nXYhy):
for i in range(self.nXYhx):
qn = self.getXYQuadrilateralNumber(i,j,k)
edges = [self.getXEdge(i,j,k),
self.getXEdge(i,j+1,k),
self.getYEdge(i,j,k),
self.getYEdge(i+1,j,k)]
self.XYQuadrilateralDict[(i,j,k)] = Quadrilateral(qn,edges)
self.XYQuadrilateralList.append(
self.XYQuadrilateralDict[(i,j,k)])
for k in range(self.nXZhz):
for j in range(self.nXZhy):
for i in range(self.nXZhx):
qn = self.getXZQuadrilateralNumber(i,j,k)
edges = [self.getXEdge(i,j,k),
self.getXEdge(i,j,k+1),
self.getZEdge(i,j,k),
self.getZEdge(i+1,j,k)]
self.XZQuadrilateralDict[(i,j,k)] = Quadrilateral(qn,edges)
self.XZQuadrilateralList.append(
self.XZQuadrilateralDict[(i,j,k)])
for k in range(self.nYZhz):
for j in range(self.nYZhy):
for i in range(self.nYZhx):
qn = self.getYZQuadrilateralNumber(i,j,k)
edges = [self.getYEdge(i,j,k),
self.getYEdge(i,j,k+1),
self.getZEdge(i,j,k),
self.getZEdge(i,j+1,k)]
self.YZQuadrilateralDict[(i,j,k)] = Quadrilateral(qn,edges)
self.YZQuadrilateralList.append(
self.YZQuadrilateralDict[(i,j,k)])
for k in range(self.nHz):
for j in range(self.nHy):
for i in range(self.nHx):
Hn = self.getHexahedronNumber(i,j,k)
quadrilaterals = [self.getXYQuadrilateral(i,j,k),
self.getXYQuadrilateral(i,j,k+1),
self.getXZQuadrilateral(i,j,k),
self.getXZQuadrilateral(i,j+1,k),
self.getYZQuadrilateral(i,j,k),
self.getYZQuadrilateral(i+1,j,k)]
self.hexahedronDict[(i,j,k)] = Hexahedron(Hn,
quadrilaterals)
self.hexahedronList.append(self.hexahedronDict[(i,j,k)])
#build lists for mesh base class
self.edgeList = self.xedgeList + \
self.yedgeList + \
self.zedgeList
#figure out if this is a 1D,2D, or 3D grid
if self.nz > 1:
self.elementList = self.hexahedronList
self.elementDict = self.hexahedronDict
elif self.ny > 1:
self.elementList = self.XYQuadrilateralList
self.elementDict = self.XYQuadrilateralDict
else:
self.elementList = self.xedgeList
self.elementDict = self.xedgeDict
#self.buildArraysFromLists()
#todo: extract boundary mesh
def getNodeNumber(self,i,j,k):
return i + j*self.nx + k*self.nxy
def getNode(self,i,j,k):
return self.nodeDict[(i,j,k)]
def getXEdgeNumber(self,ie,je,ke):
return ie + je*self.nXex + ke*self.nXexy
def getYEdgeNumber(self,ie,je,ke):
return ie + je*self.nYex + ke*self.nYexy
def getZEdgeNumber(self,ie,je,ke):
return ie + je*self.nZex + ke*self.nZexy
def getXEdge(self,ie,je,ke):
return self.xedgeDict[(ie,je,ke)]
def getYEdge(self,ie,je,ke):
return self.yedgeDict[(ie,je,ke)]
def getZEdge(self,ie,je,ke):
return self.zedgeDict[(ie,je,ke)]
def getXYQuadrilateralNumber(self,ih,jh,kh):
return ih + jh*self.nXYhx + kh*self.nXYhxy
def getXZQuadrilateralNumber(self,ih,jh,kh):
return ih + jh*self.nXZhx + kh*self.nXZhxy
def getYZQuadrilateralNumber(self,ih,jh,kh):
return ih + jh*self.nYZhx + kh*self.nYZhxy
def getXYQuadrilateral(self,ih,jh,kh):
return self.XYQuadrilateralDict[(ih,jh,kh)]
def getXZQuadrilateral(self,ih,jh,kh):
return self.XZQuadrilateralDict[(ih,jh,kh)]
def getYZQuadrilateral(self,ih,jh,kh):
return self.YZQuadrilateralDict[(ih,jh,kh)]
def getHexahedronNumber(self,iH,jH,kH):
return iH + jH*self.nHx + kH*self.nHxy
def getHexahedron(self,iH,jH,kH):
return self.hexahedronDict[(iH,jH,kH)]
def getColor(self,i,j,k):
return (i%2 + j%2 + k%2)%2
def refine(self,oldMesh,refineFactorX=2,refineFactorY=2,refineFactorZ=2):
NX = oldMesh.nx
NY = oldMesh.ny
NZ = oldMesh.nz
if NX > 1:
NX = (NX-1)*refineFactorX + 1
else:
refineFactorX=1
if NY > 1:
NY = (NY-1)*refineFactorY + 1
else:
refineFactorY=1
if NZ > 1:
NZ = (NZ-1)*refineFactorZ + 1
else:
refineFactorZ=1
RectangularGrid.__init__(self,NX,NY,NZ,
oldMesh.Lx,oldMesh.Ly,oldMesh.Lz)
childrenDict={}
for IJK,e in oldMesh.elementDict.items():
I = IJK[0]
J = IJK[1]
K = IJK[2]
childrenDict[e.N]=[]
for xOffset in range(refineFactorX):
for yOffset in range(refineFactorY):
for zOffset in range(refineFactorZ):
i = I*refineFactorX + xOffset
j = J*refineFactorY + yOffset
k = K*refineFactorZ + zOffset
childrenDict[e.N].append(self.elementDict[(i,j,k)])
return childrenDict
class MultilevelRectangularGrid(MultilevelMesh):
"""A hierarchical multilevel grid"""
def __init__(self,levels,nx,ny=1,nz=1,
Lx=1.0,Ly=1.0,Lz=1.0,
refinementLevels=1):
MultilevelMesh.__init__(self)
self.refineFactorList=[EVec(0,0,0)]
self.meshList.append(RectangularGrid(nx,ny,nz,Lx,Ly,Lz))
self.elementChildren = []
logEvent(self.meshList[0].meshInfo())
for l in range(1,refinementLevels+1):
self.refine()
logEvent(self.meshList[-1].meshInfo())
def refine():
self.meshList.append(RectangularMesh())
childrenDict = self.meshList[-1].refine(self.meshList[-2])
self.elementChildren.append(childrenDict)
class TetrahedralMesh(Mesh):
"""A mesh of tetrahedra.
The nodes, edges, triangles, and tetrahedra are indexed by their
node tuples. The corresponding lists are derived from the dictionaries, and
sorted lexicographically. The global node numbers are redefined to
give a lexicographic ordering.
The mesh can be generated from a rectangular grid and refined using either
4T or Freudenthal-Bey global refinement.
Attributes
----------
elementNodesArray : array_like
A list of lists storing the node values associated with each element
in the triangulation. The first index refers to the element number,
while the second index refers to the global node value.
nodeArray : array_like
A list of lists storing node coordinates. The first index referes
to the global node number, while the second index refers to the x, y
and z coordinates of the node respectively.
"""
def __init__(self):
Mesh.__init__(self)
self.nodeDict={}
self.edgeDict={}
self.triangleDict={}
self.triangleList=[]
self.tetrahedronDict={}
self.tetrahedronList=[]
self.oldToNewNode=[]
self.boundaryMesh=TriangularMesh()
def meshType(self):
return 'simplex'
def computeGeometricInfo(self):
from . import cmeshTools
cmeshTools.computeGeometricInfo_tetrahedron(self.cmesh)
def generateTetrahedralMeshFromRectangularGrid(self,nx,ny,nz,Lx,Ly,Lz):
from . import cmeshTools
self.cmesh = cmeshTools.CMesh()
logEvent("Generating grid and mesh")
cmeshTools.generateTetrahedralMeshFromRectangularGrid(nx,ny,nz,Lx,Ly,Lz,self.cmesh)
logEvent("Allocating geometric info")
cmeshTools.allocateGeometricInfo_tetrahedron(self.cmesh)
logEvent("Computing geometric info")
cmeshTools.computeGeometricInfo_tetrahedron(self.cmesh)
self.buildFromC(self.cmesh)
def rectangularToTetrahedral6T(self,grid):
#copy the nodes from the rectangular mesh
#I want to be able to renumber later without
#changing the grid nodes, so I do deep copies here
self.nodeList = [Node(n.N,n.p[X],n.p[Y],n.p[Z]) for n in grid.nodeList]
self.nodeDict = dict([(n,n) for n in self.nodeList])
for i in range(grid.nHx):
for j in range(grid.nHy):
for k in range(grid.nHz):
#associate the element (i,j,k) with the
#left, front, bottom node
#do a top down numbering to match Ong's dissertation
n1 = self.nodeList[grid.getNodeNumber(i,j,k+1)]
n2 = self.nodeList[grid.getNodeNumber(i,j+1,k+1)]
n3 = self.nodeList[grid.getNodeNumber(i+1,j+1,k+1)]
n4 = self.nodeList[grid.getNodeNumber(i+1,j,k+1)]
n5 = self.nodeList[grid.getNodeNumber(i,j,k)]
n6 = self.nodeList[grid.getNodeNumber(i,j+1,k)]
n7 = self.nodeList[grid.getNodeNumber(i+1,j+1,k)]
n8 = self.nodeList[grid.getNodeNumber(i+1,j,k)]
self.newTetrahedron(nodes=[n1,n2,n3,n6])
self.newTetrahedron(nodes=[n1,n3,n5,n6])
self.newTetrahedron(nodes=[n3,n5,n6,n7])
self.newTetrahedron(nodes=[n1,n3,n4,n5])
self.newTetrahedron(nodes=[n3,n4,n5,n7])
self.newTetrahedron(nodes=[n4,n5,n7,n8])
self.finalize()
def rectangularToTetrahedral5T(self,grid):
#copy the nodes from the rectangular mesh
#I want to be able to renumber later without
#changing the grid nodes, so I do deep copies here
self.nodeList = [Node(n.N,n.p[X],n.p[Y],n.p[Z]) for n in grid.nodeList]
self.nodeDict = dict([(n,n) for n in self.nodeList])
for i in range(grid.nHx):
for j in range(grid.nHy):
for k in range(grid.nHz):
#associate the element (i,j,k) with the
#left, front, bottom node
#get the left,front,bottom,node and its color
if (grid.getColor(i,j,k) == grid.black):
b0 = self.nodeList[grid.getNodeNumber(i,j,k)]
rx = self.nodeList[grid.getNodeNumber(i+1,j,k)]
ry = self.nodeList[grid.getNodeNumber(i,j+1,k)]
rz = self.nodeList[grid.getNodeNumber(i,j,k+1)]
r0 = self.nodeList[grid.getNodeNumber(i+1,j+1,k+1)]
bx = self.nodeList[grid.getNodeNumber(i,j+1,k+1)]
by = self.nodeList[grid.getNodeNumber(i+1,j,k+1)]
bz = self.nodeList[grid.getNodeNumber(i+1,j+1,k)]
else:
r0 = self.nodeList[grid.getNodeNumber(i,j,k)]
bx = self.nodeList[grid.getNodeNumber(i+1,j,k)]
by = self.nodeList[grid.getNodeNumber(i,j+1,k)]
bz = self.nodeList[grid.getNodeNumber(i,j,k+1)]
b0 = self.nodeList[grid.getNodeNumber(i+1,j+1,k+1)]
rx = self.nodeList[grid.getNodeNumber(i,j+1,k+1)]
ry = self.nodeList[grid.getNodeNumber(i+1,j,k+1)]
rz = self.nodeList[grid.getNodeNumber(i+1,j+1,k)]
self.newTetrahedron(nodes=[rx,by,bz,b0])
self.newTetrahedron(nodes=[ry,bz,bx,b0])
self.newTetrahedron(nodes=[rz,b0,bx,by])
self.newTetrahedron(nodes=[r0,bx,by,bz])
self.newTetrahedron(nodes=[b0,bx,by,bz])
self.finalize()
rectangularToTetrahedral = rectangularToTetrahedral6T
def fixLocalNumbering(self):
for TN in range(len(self.tetrahedronList)):
self.tetrahedronList[TN].computeGeometricInfo()
if edet(self.tetrahedronList[TN].linearMap) < 0:
newNodes = list(self.tetrahedronList[TN].nodes)
newNodes[2] = self.tetrahedronList[TN].nodes[1]
newNodes[1] = self.tetrahedronList[TN].nodes[2]
self.tetrahedronList[TN].nodes = newNodes
def finalize(self):
self.buildLists()
#self.fixLocalNumbering()
self.buildBoundaryMaps()
self.buildArraysFromLists()
self.hMax = 0.0
self.hMin = 1.0e16
self.sigmaMax = 0.0
self.totalVolume = 0.0
for T in self.tetrahedronList:
T.computeGeometricInfo()
self.hMax = max(T.diameter,self.hMax)
self.hMin = min(T.diameter,self.hMin)
self.sigmaMax = max(old_div(T.diameter,T.innerDiameter),self.sigmaMax)
self.totalVolume += T.volume
def buildLists(self):
self.buildListsNodes()
self.buildListsEdges()
self.buildListsTriangles()
self.buildListsTetrahedra()
self.elementList = self.tetrahedronList
self.elementBoundaryList = self.triangleList
def buildListsNodes(self):
keyList = list(self.nodeDict.keys())
keyList.sort()
self.nodeList=[]
self.oldToNewNode=list(range(len(self.nodeDict)))
for nN,k in enumerate(keyList):
self.oldToNewNode[self.nodeDict[k].N]=nN
self.nodeDict[k].N = nN
self.nodeList.append(self.nodeDict[k])
def buildListsEdges(self):
keyList = list(self.edgeDict.keys())
keyList.sort()
self.edgeList=[]
for eN,k in enumerate(keyList):
self.edgeDict[k].N = eN
self.edgeList.append(self.edgeDict[k])
def buildListsTriangles(self):
keyList = list(self.triangleDict.keys())
keyList.sort()
self.triangleList=[]
for tN,k in enumerate(keyList):
self.triangleDict[k].N = tN
self.triangleList.append(self.triangleDict[k])
self.polygonList = self.triangleList
def buildListsTetrahedra(self):
keyList = list(self.tetrahedronDict.keys())
keyList.sort()
self.tetrahedronList=[]
for TN,k in enumerate(keyList):
self.tetrahedronDict[k].N = TN
self.tetrahedronList.append(self.tetrahedronDict[k])
self.polyhedronList = self.tetrahedronList
def buildBoundaryMaps(self):
"""
Extract a mapping tn -> list((TN,tnLocal)) that
provides all elements with the boundary face (triangle) tn
and the local triangle number for that triangle.
Likewise build mappings for edges and nodes
Also extract a list of the triangles with only one associate
element; these are the external boundary triangles. Then extract
the edges and nodes from the boundary triangles.
"""
self.triangleMap=[[] for t in self.triangleList]
self.edgeMap=[[] for e in self.edgeList]
self.nodeMap=[[] for n in self.nodeList]
self.boundaryTriangles=set()
self.interiorTriangles=set()
self.boundaryEdges=set()
self.boundaryNodes=set()
self.interiorEdges=set()
self.interiorNodes=set()
logEvent("Building triangle,edge, and node maps")
for T in self.tetrahedronList:
for localTriangleNumber,t in enumerate(T.triangles):
self.triangleMap[t.N].append((T.N,localTriangleNumber))
for localEdgeNumber,e in enumerate(T.edges):
self.edgeMap[e.N].append((T.N,localEdgeNumber))
for localNodeNumber,n in enumerate(T.nodes):
self.nodeMap[n.N].append((T.N,localNodeNumber))
logEvent("Extracting boundary and interior triangles")
for tN,etList in enumerate(self.triangleMap):
if len(etList) == 1:
self.boundaryTriangles.add(self.triangleList[tN])
else:
self.interiorTriangles.add(self.triangleList[tN])
logEvent("Extracting boundary edges and nodes")
for t in self.boundaryTriangles:
self.boundaryEdges.update(t.edges)
self.boundaryNodes.update(t.nodes)
logEvent("Extracting interior edges and nodes")
for t in self.interiorTriangles:
self.interiorEdges.update(t.edges)
self.interiorNodes.update(t.nodes)
self.boundaryMesh.buildFromSets(self.boundaryTriangles,
self.boundaryEdges,self.boundaryNodes)
def newTetrahedron(self,nodes):
T = Tetrahedron(tetrahedronNumber=len(self.tetrahedronDict),
nodes=nodes)
self.tetrahedronDict[T.nodes] = T
self.registerTriangles(T)
return T
def registerEdges(self,t):
for en,e in enumerate(t.edges):
if e.nodes in self.edgeDict:
t.edges[en]=self.edgeDict[e.nodes]
else:
eN=len(self.edgeDict)
e.N=eN
self.edgeDict[e.nodes]=e
def registerTriangles(self,T):
for tn,t in enumerate(T.triangles):
if t.nodes in self.triangleDict:
T.triangles[tn]=self.triangleDict[t.nodes]
else:
t.N=len(self.triangleDict)
self.triangleDict[t.nodes]=t
self.registerEdges(t)
def registerNode(self,node):
if node in self.nodeDict:
node = self.nodeDict[node]
else:
node.N = len(self.nodeDict)
self.nodeDict[node] = node
return node
def readMeshADH(self,filename,adhBase=1):
meshIn = open(filename+'.3dm','r')
firstLine = meshIn.readline()
firstWords = firstLine.split()
logEvent("Reading object=%s from file=%s" % (firstWords[0],filename))
line = meshIn.readline()
columns = line.split()
tets = []
tetEdges=set()
tetTriangles=set()
logEvent("Reading "+str(filename)+" and building node lists for tetrahedra,triangles, and edges")
#assume test are ordered by tet number
while (columns[0] == 'E4T'):
nodeNumbers = [int(c) - adhBase for c in columns[2:6]]
nodeNumbers.sort()
tets.append(array.array('i',nodeNumbers))
tetTriangles.update([(nodeNumbers[1],nodeNumbers[2],nodeNumbers[3]),
(nodeNumbers[0],nodeNumbers[2],nodeNumbers[3]),
(nodeNumbers[0],nodeNumbers[1],nodeNumbers[3]),
(nodeNumbers[0],nodeNumbers[1],nodeNumbers[2])])
tetEdges.update([(nodeNumbers[0],nodeNumbers[1]),
(nodeNumbers[0],nodeNumbers[2]),
(nodeNumbers[0],nodeNumbers[3]),
(nodeNumbers[1],nodeNumbers[2]),
(nodeNumbers[1],nodeNumbers[3]),
(nodeNumbers[2],nodeNumbers[3])])
line = meshIn.readline()
columns = line.split()
print("Building node list and dict")
#assume nodes are ordered by node number
while (len(columns) == 5):
newNode = Node(int(columns[1]) - adhBase,
float(columns[2]),
float(columns[3]),
float(columns[4]))
self.nodeList.append(newNode)
self.nodeDict[newNode]=newNode
line = meshIn.readline()
columns = line.split()
print("Number of tetrahedra:"+str(len(tets)))
print("Number of triangles :"+str(len(tetTriangles)))
print("Number of edges :"+str(len(tetEdges)))
print("Number of nodes :"+str(len(self.nodeList)))
print("Number of objects :"+str(len(tetEdges)+len(tetTriangles)+len(tets)+len(self.nodeList)))
print("Building edge list")
self.edgeList =[Edge(edgeNumber=eN,nodes=[self.nodeList[nN[0]],self.nodeList[nN[1]]]) \
for eN,nN in enumerate(tetEdges)]
print("Building edge dict")
self.edgeDict = dict([(e.nodes,e) for e in self.edgeList])
print("Building triangle list")
self.triangleList =[Triangle(triangleNumber=tN,nodes=[self.nodeList[nN[0]],self.nodeList[nN[1]],self.nodeList[nN[2]]],edgeDict=self.edgeDict) \
for tN,nN in enumerate(tetTriangles)]
print("Building triangle dict")
self.triangleDict = dict([(t.nodes,t) for t in self.triangleList])
print("Building tetredron list")
self.tetrahedronList = [Tetrahedron(tetrahedronNumber=TN,
nodes=[self.nodeList[nN[0]],self.nodeList[nN[1]],self.nodeList[nN[2]],self.nodeList[nN[3]]],
edgeDict=self.edgeDict,
triangleDict=self.triangleDict) \
for TN,nN in enumerate(tets)]
self.elementList = self.tetrahedronList
self.elementBoundaryList = self.triangleList
print("Building tetrahedron dict")
self.tetrahedronDict = dict([(T.nodes,T) for T in self.tetrahedronList])
print("Building boundary maps")
self.buildBoundaryMaps()
def writeMeshXdmf(self,ar,name='',t=0.0,init=False,meshChanged=False,tCount=0, EB=False):
#print "Warning mwf hack for EB printing for tet writeMeshXdmf for now"
#EB = True
Mesh.writeMeshXdmf(self,ar,name,t,init,meshChanged,"Tetrahedron",tCount,EB=EB)
def writeMeshEnsight(self,filename,description=None):
base=1
#write the casefile
caseOut=open(filename+'.case','w')
caseOut.write('FORMAT\n'+'type: ensight gold\n')
caseOut.write('GEOMETRY\n'+'model: '+filename+'.geo\n')
caseOut.close()
meshOut=open(filename+'.geo','w')
meshOut.write('Ensight Gold\n')
meshOut.write('Unstructured Tetrahedral Mesh\n')
meshOut.write('node id given\n')
meshOut.write('element id given\n')
#extents = 'extents\n %12.5E %12.5E\n %12.5E %12.5E\n %12.5E %12.5E\n' % (self.xmin,self.xmax,self.ymin,self.ymax,self.zmin,self.zmax)
#meshOut.write('extents\n'+`self.xmin`+' '+`self.xmax`+'\n')
meshOut.write('part \n'+'%10i\n' % 1)
if description:
meshOut.write(description+'\n')
else:
meshOut.write('A Mesh\n')
meshOut.write('coordinates\n'+'%10i\n' % self.nNodes_global)
for nN in range(self.nNodes_global):
meshOut.write('%10i\n' % (nN+base))
for nN in range(self.nNodes_global):
meshOut.write('%12.5E\n' % self.nodeArray[nN,0])
for nN in range(self.nNodes_global):
meshOut.write('%12.5E\n' % self.nodeArray[nN,1])
for nN in range(self.nNodes_global):
meshOut.write('%12.5E\n' % self.nodeArray[nN,2])
meshOut.write('tetra4\n'+'%10i\n' % self.nElements_global)
for eN in range(self.nElements_global):
meshOut.write('%10i\n' % (eN+base))
for eN in range(self.nElements_global):
meshOut.write('%10i%10i%10i%10i\n' % tuple((nN+base) for nN in self.elementNodesArray[eN,:]))
meshOut.close()
def appendMeshEnsight(self,filename,description=None):
base=1
#write the casefile
caseOut=open(filename+'.case','a')
caseOut.write('GEOMETRY\n'+'model: '+filename+'.geo\n')
caseOut.close()
meshOut=open(filename+'.geo','w')
meshOut.write('Unstructured Tetrahedral Mesh\n\n')
meshOut.write('node id given\n')
meshOut.write('element id given\n')
#extents = 'extents\n %12.5E %12.5E\n %12.5E %12.5E\n %12.5E %12.5E\n' % (self.xmin,self.xmax,self.ymin,self.ymax,self.zmin,self.zmax)
#meshOut.write('extents\n'+`self.xmin`+' '+`self.xmax`+'\n')
meshOut.write('part \n'+'%10i\n' % 1)
if description:
meshOut.write(description+'\n')
else:
meshOut.write("A Mesh")
meshOut.write('coordinates\n'+'%10i\n' % len(self.nodeList))
for n in self.nodeList:
nN = n.N+base
meshOut.write('%10i\n' % nN)
for n in self.nodeList:
meshOut.write('%12.5E\n' % n.p[X])
for n in self.nodeList:
meshOut.write('%12.5E\n' % n.p[Y])
for n in self.nodeList:
meshOut.write('%12.5E\n' % n.p[Z])
meshOut.write('tetra4\n'+'%10i\n' % len(self.elementList))
for e in self.elementList:
eN = e.N + base
meshOut.write('%10i\n' % eN)
for e in self.elementList:
meshOut.write('%10i%10i%10i%10i\n' % tuple(n.N+base for n in e.nodes))
meshOut.close()
def writeMeshADH(self,filename,adhBase=1):
from . import cmeshTools
cmeshTools.write3dmFiles(self.cmesh,filename,adhBase)
def writeBoundaryFacesADH(self,filename,adhBase=1):
boundaryFacesOut=open(filename,'w')
for t in self.boundaryTriangles:
TN = self.triangleMap[t.N][0][0]
T = self.tetrahedronList[TN]
localFaceNumber = self.triangleMap[t.N][0][1]
T.computeGeometricInfo()
DJ = edet(T.linearMap)
if DJ < 0:
#print "Negative determinant ="+`DJ`+" Swapping two nodes"
newNodes = list(T.nodes)
newNodes[3] = T.nodes[2]
newNodes[2] = T.nodes[3]
newBasis = [n - newNodes[0] for n in newNodes[1:]]
newMap = ETen(newBasis[0],newBasis[1],newBasis[2])
#print "New Determinant "+`edet(newMap)`
if localFaceNumber == T.nodes[2]:
localFaceNumber = T.nodes[3]
elif localFaceNumber == T.nodes[3]:
localFaceNumber = T.nodes[2]
line = 'FCS %5i %5i %5i' % \
(T.N + adhBase,
localFaceNumber + adhBase,
1)
#print line
boundaryFacesOut.write(line+'\n')
boundaryFacesOut.close()
def writeBoundaryNodesADH(self,filename,adhBase=1):
boundaryNodesOut=open(filename,'w')
for n in self.boundaryNodes:
line = 'NDS %5i %5i' % \
(n.N + adhBase,
1)
#print line
boundaryNodesOut.write(line+'\n')
boundaryNodesOut.close()
def refine4T(self,oldMesh):
childrenDict={}
for T in oldMesh.tetrahedronList:
#deep copy old nodes because we'll renumber
TNodes = [Node(eN,n.p[X],n.p[Y],n.p[Z]) for eN,n in enumerate(T.nodes)]
for lnN,n in enumerate(TNodes): TNodes[lnN]=self.registerNode(n)
#add new node
T.computeGeometricInfo()
newNode = Node(len(self.nodeDict),
T.barycenter[X],
T.barycenter[Y],
T.barycenter[Z])
newNode = self.registerNode(newNode)
T1=self.newTetrahedron([TNodes[0],TNodes[1],TNodes[2],newNode])
T2=self.newTetrahedron([TNodes[1],TNodes[2],TNodes[3],newNode])
T3=self.newTetrahedron([TNodes[2],TNodes[3],TNodes[0],newNode])
T4=self.newTetrahedron([TNodes[3],TNodes[0],TNodes[1],newNode])
childrenDict[T.N]=[T1,T2,T3,T4]
self.finalize()
return childrenDict
def refineFreudenthalBey(self,oldMesh):
logEvent("Refining the mesh using Freudenthal-Bey refinement")
childrenDict={}
for T in list(oldMesh.tetrahedronDict.values()):
#deep copy old nodes because we'll renumber
TNodes = [Node(nN,n.p[X],n.p[Y],n.p[Z]) for nN,n in \
enumerate(T.nodes)]
for lnN,n in enumerate(TNodes): TNodes[lnN]=self.registerNode(n)
#add new nodes (midpoints of edges)
#use local edge tuples as keys
newNodes={}
for et,en in T.edgeMap.items():
T.edges[en].computeGeometricInfo()
p = T.edges[en].barycenter
newNodes[et] = Node(en,p[X],p[Y],p[Z])
#set the global node numbers
for k,n in newNodes.items(): newNodes[k]=self.registerNode(n)
#add corner tets
T1=self.newTetrahedron([TNodes[0],
newNodes[(0,1)],
newNodes[(0,2)],
newNodes[(0,3)]])
T2=self.newTetrahedron([TNodes[1],
newNodes[(0,1)],
newNodes[(1,2)],
newNodes[(1,3)]])
T3=self.newTetrahedron([TNodes[2],
newNodes[(0,2)],
newNodes[(1,2)],
newNodes[(2,3)]])
T4=self.newTetrahedron([TNodes[3],
newNodes[(0,3)],
newNodes[(1,3)],
newNodes[(2,3)]])
#add center tets
#choose the shortest diagonal of the octahedron
dLengths = [enorm(newNodes[(0,1)].p-newNodes[(2,3)].p),
enorm(newNodes[(0,2)].p-newNodes[(1,3)].p),
enorm(newNodes[(0,3)].p-newNodes[(1,2)].p)]
shortestEdgeLength = min(dLengths)
if shortestEdgeLength == dLengths[0]:
#diagonal (0,1)(2,3)
T5=self.newTetrahedron([newNodes[(0,1)],
newNodes[(2,3)],
newNodes[(0,3)],
newNodes[(1,3)]])
T6=self.newTetrahedron([newNodes[(0,1)],
newNodes[(2,3)],
newNodes[(0,3)],
newNodes[(0,2)]])
T7=self.newTetrahedron([newNodes[(0,1)],
newNodes[(2,3)],
newNodes[(0,2)],
newNodes[(1,2)]])
T8=self.newTetrahedron([newNodes[(0,1)],
newNodes[(2,3)],
newNodes[(1,2)],
newNodes[(1,3)]])
elif shortestEdgeLength == dLengths[1]:
#diagonal (0,2)(1,3)
T5=self.newTetrahedron([newNodes[(0,2)],
newNodes[(1,3)],
newNodes[(0,3)],
newNodes[(2,3)]])
T6=self.newTetrahedron([newNodes[(0,2)],
newNodes[(1,3)],
newNodes[(2,3)],
newNodes[(1,2)]])
T7=self.newTetrahedron([newNodes[(0,2)],
newNodes[(1,3)],
newNodes[(1,2)],
newNodes[(0,1)]])
T8=self.newTetrahedron([newNodes[(0,2)],
newNodes[(1,3)],
newNodes[(0,1)],
newNodes[(0,3)]])
else:
#diagonal (0,3)(1,2)
T5=self.newTetrahedron([newNodes[(0,3)],
newNodes[(1,2)],
newNodes[(0,1)],
newNodes[(1,3)]])
T6=self.newTetrahedron([newNodes[(0,3)],
newNodes[(1,2)],
newNodes[(1,3)],
newNodes[(2,3)]])
T7=self.newTetrahedron([newNodes[(0,3)],
newNodes[(1,2)],
newNodes[(2,3)],
newNodes[(0,2)]])
T8=self.newTetrahedron([newNodes[(0,3)],
newNodes[(1,2)],
newNodes[(0,2)],
newNodes[(0,1)]])
childrenDict[T.N]=[T1,T2,T3,T4,T5,T6,T7,T8]
self.finalize()
return childrenDict
#for debugging: print each tet
#self.edgeList=[]
#Tlist = self.tetrahedronDict.values()
#for T in Tlist:
# self.edgeList = self.edgeList + T.edges
def refine(self,oldMesh):
return self.refineFreudenthalBey(oldMesh)
def generateFromTetgenFiles(self,filebase,base,skipGeometricInit=False,parallel=False):
from . import cmeshTools
logEvent(memory("declaring CMesh"),level=4)
self.cmesh = cmeshTools.CMesh()
logEvent(memory("Initializing CMesh"),level=4)
if parallel:
cmeshTools.generateFromTetgenFilesParallel(self.cmesh,filebase,base)
else:
cmeshTools.generateFromTetgenFiles(self.cmesh,filebase,base)
logEvent(memory("calling cmeshTools.generateFromTetgenFiles","cmeshTools"),level=4)
if skipGeometricInit == False:
cmeshTools.allocateGeometricInfo_tetrahedron(self.cmesh)
cmeshTools.computeGeometricInfo_tetrahedron(self.cmesh)
self.buildFromC(self.cmesh)
logEvent(memory("calling buildFromC"),level=4)
def generateFrom3DMFile(self,filebase,base=1):
from . import cmeshTools
self.cmesh = cmeshTools.CMesh()
cmeshTools.generateFrom3DMFile(self.cmesh,filebase,base)
cmeshTools.allocateGeometricInfo_tetrahedron(self.cmesh)
cmeshTools.computeGeometricInfo_tetrahedron(self.cmesh)
self.buildFromC(self.cmesh)
def writeTetgenFiles(self,filebase,base):
from . import cmeshTools
cmeshTools.writeTetgenFiles(self.cmesh,filebase,base)
def meshInfo(self):
minfo = """Number of tetrahedra : %d
Number of triangles : %d
Number of edges : %d
Number of nodes : %d
max(sigma_k) : %f
min(h_k) : %f\n""" % (self.nElements_global,
self.nElementBoundaries_global,
self.nEdges_global,
self.nNodes_global,
self.sigmaMax,
self.hMin)
if self.subdomainMesh != self:
sinfo = self.subdomainMesh.meshInfo()
info = "*** Global ***\n" + minfo + "\n*** Local ***\n" + sinfo
return info
return minfo
class HexahedralMesh(Mesh):
"""A mesh of hexahedra.
"""
def __init__(self):
Mesh.__init__(self)
self.nodeDict={}
self.edgeDict={}
self.faceDict={}
self.faceList=[]
self.elemDict={}
self.elemList=[]
self.oldToNewNode=[]
self.boundaryMesh=QuadrilateralMesh()
def meshType(self):
return 'cuboid'
def computeGeometricInfo(self):
from . import cmeshTools
print("no info yet for hexahedral mesh")
#cmeshTools.computeGeometricInfo_tetrahedron(self.cmesh)
def generateHexahedralMeshFromRectangularGrid(self,nx,ny,nz,Lx,Ly,Lz):
from . import cmeshTools
self.cmesh = cmeshTools.CMesh()
cmeshTools.generateHexahedralMeshFromRectangularGrid(nx,ny,nz,0,0,0,Lx,Ly,Lz,self.cmesh)
cmeshTools.allocateGeometricInfo_hexahedron(self.cmesh)
cmeshTools.computeGeometricInfo_hexahedron(self.cmesh)
self.buildFromC(self.cmesh)
def finalize(self):
self.buildLists()
#self.fixLocalNumbering()
self.buildBoundaryMaps()
self.buildArraysFromLists()
self.hMax = 0.0
self.hMin = 1.0e16
self.sigmaMax = 0.0
self.totalVolume = 0.0
for T in self.tetrahedronList:
T.computeGeometricInfo()
self.hMax = max(T.diameter,self.hMax)
self.hMin = min(T.diameter,self.hMin)
self.sigmaMax = max(old_div(T.diameter,T.innerDiameter),self.sigmaMax)
self.totalVolume += T.volume
def buildLists(self):
self.buildListsNodes()
self.buildListsEdges()
self.buildListsFaces()
self.buildListsElems()
self.elementList = self.elemList
self.elementBoundaryList = self.faceList
def buildListsNodes(self):
keyList = list(self.nodeDict.keys())
keyList.sort()
self.nodeList=[]
self.oldToNewNode=list(range(len(self.nodeDict)))
for nN,k in enumerate(keyList):
self.oldToNewNode[self.nodeDict[k].N]=nN
self.nodeDict[k].N = nN
self.nodeList.append(self.nodeDict[k])
def buildListsEdges(self):
keyList = list(self.edgeDict.keys())
keyList.sort()
self.edgeList=[]
for eN,k in enumerate(keyList):
self.edgeDict[k].N = eN
self.edgeList.append(self.edgeDict[k])
def buildListsFaces(self):
keyList = list(self.faceDict.keys())
keyList.sort()
self.triangleList=[]
for tN,k in enumerate(keyList):
self.faceDict[k].N = tN
self.faceList.append(self.faceDict[k])
self.polygonList = self.faceList
def buildListsElems(self):
keyList = list(self.elemDict.keys())
keyList.sort()
self.elemList=[]
for TN,k in enumerate(keyList):
self.elemDict[k].N = TN
self.elemList.append(self.elemDict[k])
self.polyhedronList = self.elemList
def buildBoundaryMaps(self):
"""
Extract a mapping tn -> list((TN,tnLocal)) that
provides all elements with the boundary face tn
and the local triangle number for that face
Likewise build mappings for edges and nodes
Also extract a list of the triangles with only one associate
element; these are the external boundary triangles. Then extract
the edges and nodes from the boundary triangles.
"""
self.faceMap=[[] for t in self.faceList]
self.edgeMap=[[] for e in self.edgeList]
self.nodeMap=[[] for n in self.nodeList]
self.boundaryTriangles=set()
self.interiorTriangles=set()
self.boundaryEdges=set()
self.boundaryNodes=set()
self.interiorEdges=set()
self.interiorNodes=set()
logEvent("Building triangle,edge, and node maps")
for T in self.elemList:
for localFaceNumber,t in enumerate(T.faces):
self.faceMap[t.N].append((T.N,localFaceNumber))
for localEdgeNumber,e in enumerate(T.edges):
self.edgeMap[e.N].append((T.N,localEdgeNumber))
for localNodeNumber,n in enumerate(T.nodes):
self.nodeMap[n.N].append((T.N,localNodeNumber))
logEvent("Extracting boundary and interior triangles")
for tN,etList in enumerate(self.faceMap):
if len(etList) == 1:
self.boundaryFaces.add(self.faceList[tN])
else:
self.interiorFaces.add(self.faceList[tN])
logEvent("Extracting boundary edges and nodes")
for t in self.boundaryTriangles:
self.boundaryEdges.update(t.edges)
self.boundaryNodes.update(t.nodes)
logEvent("Extracting interior edges and nodes")
for t in self.interiorTriangles:
self.interiorEdges.update(t.edges)
self.interiorNodes.update(t.nodes)
self.boundaryMesh.buildFromSets(self.boundaryFaces,
self.boundaryEdges,self.boundaryNodes)
def registerEdges(self,t):
for en,e in enumerate(t.edges):
if e.nodes in self.edgeDict:
t.edges[en]=self.edgeDict[e.nodes]
else:
eN=len(self.edgeDict)
e.N=eN
self.edgeDict[e.nodes]=e
def registerFaces(self,T):
for tn,t in enumerate(T.faces):
if t.nodes in self.faceDict:
T.faces[tn]=self.faceDict[t.nodes]
else:
t.N=len(self.faceDict)
self.faceDict[t.nodes]=t
self.registerEdges(t)
def registerNode(self,node):
if node in self.nodeDict:
node = self.nodeDict[node]
else:
node.N = len(self.nodeDict)
self.nodeDict[node] = node
return node
# def refine(self,oldMesh):
# return self.refineFreudenthalBey(oldMesh)
def meshInfo(self):
minfo = """Number of hexahedra : %d
Number of faces : %d
Number of edges : %d
Number of nodes : %d
max(sigma_k) : %d
min(h_k) : %d\n""" % (self.nElements_global,
self.nElementBoundaries_global,
self.nEdges_global,
self.nNodes_global,
self.sigmaMax,
self.hMin)
if self.subdomainMesh != self:
sinfo = self.subdomainMesh.meshInfo()
info = "*** Global ***\n" + minfo + "\n*** Local ***\n" + sinfo
return info
return minfo
def writeMeshXdmf(self,ar,name='',t=0.0,init=False,meshChanged=False,tCount=0,EB=False):
Mesh.writeMeshXdmf(self,ar,name,t,init,meshChanged,"Hexahedron",tCount,EB=EB)
def generateFromHexFile(self,filebase,base=0):
from . import cmeshTools
self.cmesh = cmeshTools.CMesh()
cmeshTools.generateFromHexFile(self.cmesh,filebase,base)
cmeshTools.allocateGeometricInfo_hexahedron(self.cmesh)
cmeshTools.computeGeometricInfo_hexahedron(self.cmesh)
self.buildFromC(self.cmesh)
class Mesh2DM(Mesh):
"""A triangular mesh based on an ADH 3dm file"""
def __init__(self,filename,adhBase=1):
meshIn = open(filename+'.3dm','r')
firstLine = meshIn.readline()
firstWords = firstLine.split()
logEvent("Reading object=%s from file=%s" % (firstWords[0],filename))
line = meshIn.readline()
columns = line.split()
#read in the tetrahedra and nodes as memory-efficiently as possible
tn0 = array.array('i')
tn1 = array.array('i')
tn2 = array.array('i')
material = array.array('i')
nx = array.array('d')
ny = array.array('d')
nz = array.array('d')
print("Reading "+str(filename))
#assume tets are ordered by tet number
while (len(columns) > 0 and (columns[0] == 'E3T' or columns[0] == 'GE3')):
tn0.append(int(columns[2]))
tn1.append(int(columns[3]))
tn2.append(int(columns[4]))
material.append(int(columns[5]))
line = meshIn.readline()
columns = line.split()
#allow for missing lines
while (len(columns) == 0):
line = meshIn.readline()
columns = line.split()
#assume nodes are ordered by node number
while (len(columns) == 5):
nx.append(float(columns[2]))
ny.append(float(columns[3]))
nz.append(float(columns[4]))
line = meshIn.readline()
columns = line.split()
meshIn.close()
print("Allocating node and element arrays")
self.nTriangles_global = len(tn0)
self.triangleArray = np.zeros(
(self.nTriangles_global,3),'i')
tA = self.triangleArray
self.triangleMaterialArray = np.zeros(
(self.nTriangles_global,),'i')
tMA = self.triangleMaterialArray
self.nNodes_global = len(nx)
self.nodeArray = np.zeros((self.nNodes_global,3),'d')
for tN in range(self.nTriangles_global):
tA[tN,0] = tn0[tN] - adhBase
tA[tN,1] = tn1[tN] - adhBase
tA[tN,2] = tn2[tN] - adhBase
tMA[tN] = material[tN] - adhBase
for nN in range(self.nNodes_global):
self.nodeArray[nN,0]= nx[nN]
self.nodeArray[nN,1]= ny[nN]
self.nodeArray[nN,2]= nz[nN]
print("Deleting temporary storage")
del tn0,tn1,tn2,nx,ny,nz
self.nElements_global = self.nTriangles_global
self.elementNodesArray = self.triangleArray
self.elementMaterialTypes = self.triangleMaterialArray
print("Number of triangles:"+str(self.nElements_global))
print("Number of nodes :"+str(self.nNodes_global))
#archive with Xdmf
self.nNodes_element = 3
self.arGridCollection = None
self.arGrid = None; self.arTime = None
def buildEdgeArrays(self):
print("Extracting edges triangles dictionary")
edges_triangles={}
t=self.triangleArray
self.nInteriorEdges_global=0
for N in range(self.nTriangles_global):
#sort node numbers so the nodes can
#uniquely identify the triangles/edges
n = list(t[N,:])
n.sort()
edges = [(n[0],n[1]),
(n[0],n[2]),
(n[1],n[2])]
for t in triangles:
if t in edges_triangles:
edges_triangles[t].append(N)
self.nInteriorTriangles_global+=1
else:
edges_triangles[t]=[N]
print("Building edge and exterior arrays")
self.nEdges_global = len(edges_triangles)
self.edgeArray = np.zeros(
(self.nEdges_global,2),'i')
self.edgeMaterialArray = np.zeros(
(self.nEdges_global,2),'i')
self.interiorEdgeArray = np.zeros(
(self.nInteriorEdges_global,),'i')
self.nExteriorEdges_global = self.nEdges_global - \
self.nInteriorEdges_global
self.exteriorEdgeArray = np.zeros(
(self.nExteriorEdges_global,),'i')
eN=0
ieN=0
eeN=0
exteriorNodes=set()
eA = self.edgeArray
eMA = self.edgeMaterialArray
tMA = self.triangleMaterialArray
for eNodes,tlist in edges_triangles.items():
eA[eN,0]=eNodes[0]
eA[eN,1]=eNodes[1]
if len(tlist)==2:
self.interiorEdgeArray[ieN]=eN
eMA[eN][0]= tMA[tlist[0]]
eMA[eN][1]= tMA[Tlist[1]]
ieN+=1
else:
exteriorNodes.update(tNodes)
self.exteriorEdgeArray[eeN]=eN
eMA[eN][0]=tMA[tlist[0]]
eeN+=1
eN+=1
self.nExteriorNodes_global = len(exteriorNodes)
self.exteriorNodeArray = np.zeros(
(self.nExteriorNodes_global,),'i')
self.globalToExteriorNodeArray = np.zeros(
(self.nNodes_global,),'i')
for nExtN,nN in enumerate(exteriorNodes):
self.exteriorNodeArray[nExtN]=nN
self.globalToExteriorNodeArray[nN]=nExtN
print("Number of edges :"+str(self.nEdges_global))
print("Number on interior :"+str(self.nInteriorEdges_global))
print("Number on exterior :"+str(self.nExteriorEdges_global))
print("Number of exterior nodes:"+str(self.nExteriorNodes_global))
#at this point we can easily build a boundary mesh by renumbering using
#exteriorNodeArray and exteriorEdgeArray to renumber
#and the info in nodeArray and edgeArray
def writeBoundaryMeshADH(self,filename,adhBase=1):
#I'll print it using node numbers from the 3D mesh
meshOut = open(filename+'Boundary.3dm','w')
meshOut.write('MESH1D\n')
for eeN in range(self.nExteriorEdges_global):
eN = self.exteriorEdgeArray[eeN]
n0 = self.edgeArray[eN][0] + adhBase
n1 = self.edgeArray[eN][1] + adhBase
m = self.edgeMaterialArray[eN][0] + adhBase
line = 'E3T %5i %5i %5i %5i' % \
(tN+adhBase,n0,n1,m)
meshOut.write(line+'\n')
meshOut.close()
def writeMeshEnsight(self,filename,description=None):
base=1
#write the casefile
caseOut=open(filename+'.case','w')
caseOut.write('FORMAT\n'+'type: ensight gold\n')
caseOut.write('GEOMETRY\n'+'model: '+filename+'.geo\n')
caseOut.close()
meshOut=open(filename+'.geo','w')
meshOut.write('Ensight Gold\n')
meshOut.write('Unstructured Triangular Mesh\n')
meshOut.write('node id given\n')
meshOut.write('element id given\n')
#extents = 'extents\n %12.5E %12.5E\n %12.5E %12.5E\n %12.5E %12.5E\n' % (self.xmin,self.xmax,self.ymin,self.ymax,self.zmin,self.zmax)
#meshOut.write('extents\n'+`self.xmin`+' '+`self.xmax`+'\n')
meshOut.write('part \n'+'%10i\n' % 1)
if description:
meshOut.write(description+'\n')
else:
meshOut.write('A Mesh\n')
meshOut.write('coordinates\n'+'%10i\n' % self.nNodes_global)
for nN in range(self.nNodes_global):
ensightNodeNumber = (nN+base)
meshOut.write('%10i\n' % ensightNodeNumber)
for nN in range(self.nNodes_global):
meshOut.write('%12.5E\n' % self.nodeArray[nN,0])
for nN in range(self.nNodes_global):
meshOut.write('%12.5E\n' % self.nodeArray[nN,1])
for nN in range(self.nNodes_global):
meshOut.write('%12.5E\n' % self.nodeArray[nN,2])
meshOut.write('tria3\n'+'%10i\n' % self.nTriangles_global)
for tN in range(self.nTriangles_global):
ensightElementNumber = tN + base
meshOut.write('%10i\n' % ensightElementNumber)
tA = self.triangleArray
for tN in range(self.nTriangles_global):
meshOut.write('%10i%10i%10i\n' % (tA[tN,0]+base,tA[tN,1]+base,tA[tN,2]+base))
meshOut.close()
def writeMeshXdmf(self,ar,name='',t=0.0,init=False,meshChanged=False,Xdmf_ElementTopology="Triangle",tCount=0):
if self.arGridCollection is not None:
init = False
elif not init:
self.arGridCollection = ar.domain.find("Grid")
if init:
self.arGridCollection = SubElement(ar.domain,"Grid",{"Name":"Mesh "+name,
"GridType":"Collection",
"CollectionType":"Temporal"})
if self.arGrid is None or self.arTime.get('Value') != "{0:e}".format(t):
#
#topology and geometry
#
if ar.global_sync:
self.arGrid = SubElement(self.arGridCollection,"Grid",{"GridType":"Uniform"})
self.arTime = SubElement(self.arGrid,"Time",{"Value":str(t),"Name":str(tCount)})
topology = SubElement(self.arGrid,"Topology",
{"Type":Xdmf_ElementTopology,
"NumberOfElements":"%i" % (self.globalMesh.nElements_global,)})
elements = SubElement(topology,"DataItem",
{"Format":ar.dataItemFormat,
"DataType":"Int",
"Dimensions":"%i %i" % (self.globalMesh.nElements_global,
self.nNodes_element)})
geometry = SubElement(self.arGrid,"Geometry",{"Type":"XYZ"})
nodes = SubElement(geometry,"DataItem",
{"Format":ar.dataItemFormat,
"DataType":"Float",
"Precision":"8",
"Dimensions":"%i %i" % (self.globalMesh.nNodes_global,3)})
#material types
elementMaterialTypes = SubElement(self.arGrid,"Attribute",{"Name":"elementMaterialTypes",
"AttributeType":"Scalar",
"Center":"Cell"})
elementMaterialTypesValues = SubElement(elementMaterialTypes,"DataItem",
{"Format":ar.dataItemFormat,
"DataType":"Int",
"Dimensions":"%i" % (self.globalMesh.nElements_global,)})
if ar.hdfFile is not None:
elements.text = ar.hdfFilename+":/elements"+name+str(tCount)
nodes.text = ar.hdfFilename+":/nodes"+name+str(tCount)
elementMaterialTypesValues.text = ar.hdfFilename+":/"+"elementMaterialTypes"+"_t"+str(tCount)
if init or meshChanged:
ar.create_dataset_sync('elements'+name+str(tCount),
offsets = self.globalMesh.elementOffsets_subdomain_owned,
data = self.globalMesh.nodeNumbering_subdomain2global[self.elementNodesArray[:self.nElements_owned]])
ar.create_dataset_sync('nodes'+name+str(tCount),
offsets = self.globalMesh.nodeOffsets_subdomain_owned,
data = self.nodeArray[:self.nNodes_owned])
ar.create_dataset_sync("elementMaterialTypes"+"_t"+str(tCount),
offsets = self.globalMesh.elementOffsets_subdomain_owned,
data = self.elementMaterialTypes[:self.nElements_owned])
else:
assert False, "global_sync with text heavy data not supported"
else:
self.arGrid = SubElement(self.arGridCollection,"Grid",{"GridType":"Uniform"})
self.arTime = SubElement(self.arGrid,"Time",{"Value":str(t),"Name":str(tCount)})
topology = SubElement(self.arGrid,"Topology",
{"Type":Xdmf_ElementTopology,
"NumberOfElements":"%i" % (self.nElements_owned,)})
elements = SubElement(topology,"DataItem",
{"Format":ar.dataItemFormat,
"DataType":"Int",
"Dimensions":"%i %i" % (self.nElements_owned,self.nNodes_element)})
geometry = SubElement(self.arGrid,"Geometry",{"Type":"XYZ"})
nodes = SubElement(geometry,"DataItem",
{"Format":ar.dataItemFormat,
"DataType":"Float",
"Precision":"8",
"Dimensions":"%i %i" % (self.nNodes_global,3)})
#material types
elementMaterialTypes = SubElement(self.arGrid,"Attribute",{"Name":"elementMaterialTypes",
"AttributeType":"Scalar",
"Center":"Cell"})
elementMaterialTypesValues = SubElement(elementMaterialTypes,"DataItem",
{"Format":ar.dataItemFormat,
"DataType":"Int",
"Dimensions":"%i" % (self.nElements_owned,)})
if ar.hdfFile is not None:
elements.text = ar.hdfFilename+":/elements"+str(ar.comm.rank())+name+str(tCount)
nodes.text = ar.hdfFilename+":/nodes"+str(ar.comm.rank())+name+str(tCount)
elementMaterialTypesValues.text = ar.hdfFilename+":/"+"elementMaterialTypes"+"_p"+str(ar.comm.rank())+"_t"+str(tCount)
if init or meshChanged:
ar.create_dataset_async('elements'+str(ar.comm.rank())+name+str(tCount), data = self.elementNodesArray[:self.nElements_owned])
ar.create_dataset_async('nodes'+str(ar.comm.rank())+name+str(tCount), data = self.nodeArray)
ar.create_dataset_async("elementMaterialTypes"+"_p"+str(ar.comm.rank())+"_t"+str(tCount), data = self.elementMaterialTypes[:self.nElements_owned])
else:
SubElement(elements,"xi:include",{"parse":"text","href":"./"+ar.textDataDir+"/elements"+name+".txt"})
SubElement(nodes,"xi:include",{"parse":"text","href":"./"+ar.textDataDir+"/nodes"+name+".txt"})
SubElement(elementMaterialTypesValues,"xi:include",{"parse":"text","href":"./"+ar.textDataDir+"/"+"elementMaterialTypes"+str(tCount)+".txt"})
if init or meshChanged:
np.savetxt(ar.textDataDir+"/elements"+name+".txt",self.elementNodesArray[:self.nElements_owned],fmt='%d')
np.savetxt(ar.textDataDir+"/nodes"+name+".txt",self.nodeArray)
np.savetxt(ar.textDataDir+"/"+"elementMaterialTypes"+str(tCount)+".txt",self.elementMaterialTypes[:self.nElements_owned])
class Mesh3DM(Mesh):
"""
A Mesh for reading in tetrahedral meshes in the .3dm format
"""
def __init__(self,filename,adhBase=1):
meshIn = open(filename+'.3dm','r')
firstLine = meshIn.readline()
firstWords = firstLine.split()
print("Reading object=%s from file=%s" % (firstWords[0],filename))
line = meshIn.readline()
columns = line.split()
#read in the tetrahedra and nodes as memory-efficiently as possible
Tn0 = array.array('i')
Tn1 = array.array('i')
Tn2 = array.array('i')
Tn3 = array.array('i')
material = array.array('i')
nx = array.array('d')
ny = array.array('d')
nz = array.array('d')
print("Reading "+str(filename))
#assume tets are ordered by tet number
while (len(columns) > 0 and (columns[0] == 'E4T' or columns[0] == 'GE4')):
Tn0.append(int(columns[2]))
Tn1.append(int(columns[3]))
Tn2.append(int(columns[4]))
Tn3.append(int(columns[5]))
material.append(int(columns[6]))
line = meshIn.readline()
columns = line.split()
#assume nodes are ordered by node number
while (len(columns) == 5):
nx.append(float(columns[2]))
ny.append(float(columns[3]))
nz.append(float(columns[4]))
line = meshIn.readline()
columns = line.split()
meshIn.close()
print("Allocating node and element arrays")
self.nTetrahedra_global = len(Tn0)
self.tetrahedronArray = np.zeros(
(self.nTetrahedra_global,4),'i')
TA = self.tetrahedronArray
self.tetrahedronMaterialArray = np.zeros(
(self.nTetrahedra_global,),'i')
TMA = self.tetrahedronMaterialArray
self.nNodes_global = len(nx)
self.nodeArray = np.zeros((self.nNodes_global,3),'d')
for TN in range(self.nTetrahedra_global):
TA[TN,0] = Tn0[TN] - adhBase
TA[TN,1] = Tn1[TN] - adhBase
TA[TN,2] = Tn2[TN] - adhBase
TA[TN,3] = Tn3[TN] - adhBase
TMA[TN] = material[TN] - adhBase
for nN in range(self.nNodes_global):
self.nodeArray[nN,0]= nx[nN]
self.nodeArray[nN,1]= ny[nN]
self.nodeArray[nN,2]= nz[nN]
print("Deleting temporary storage")
del Tn0,Tn1,Tn2,Tn3,nx,ny,nz
self.nElements_global = self.nTetrahedra_global
self.elementNodesArray = self.tetrahedronArray
self.elementMaterialTypes = self.tetrahedronMaterialArray
self.arGridCollection=None
print("Number of tetrahedra:"+str(self.nElements_global))
print("Number of nodes :"+str(self.nNodes_global))
def buildTriangleArrays(self):
print("Extracting triangles tetrahedra dictionary")
triangles_tetrahedra={}
T=self.tetrahedronArray
self.nInteriorTriangles_global=0
for N in range(self.nTetrahedra_global):
#sort node numbers so the nodes can
#uniquely identify the triangles/edges
n = list(T[N,:])
n.sort()
triangles = [(n[0],n[1],n[2]),
(n[0],n[1],n[3]),
(n[0],n[2],n[3]),
(n[1],n[2],n[3])]
for t in triangles:
if t in triangles_tetrahedra:
triangles_tetrahedra[t].append(N)
self.nInteriorTriangles_global+=1
else:
triangles_tetrahedra[t]=[N]
print("Building triangle and exterior arrays")
self.nTriangles_global = len(triangles_tetrahedra)
self.triangleArray = np.zeros(
(self.nTriangles_global,3),'i')
self.triangleMaterialArray = np.zeros(
(self.nTriangles_global,2),'i')
self.interiorTriangleArray = np.zeros(
(self.nInteriorTriangles_global,),'i')
self.nExteriorTriangles_global = self.nTriangles_global - \
self.nInteriorTriangles_global
self.exteriorTriangleArray = np.zeros(
(self.nExteriorTriangles_global,),'i')
tN=0
itN=0
etN=0
exteriorNodes=set()
tA = self.triangleArray
tMA = self.triangleMaterialArray
TMA = self.tetrahedronMaterialArray
for tNodes,Tlist in triangles_tetrahedra.items():
tA[tN,0]=tNodes[0]
tA[tN,1]=tNodes[1]
tA[tN,2]=tNodes[2]
if len(Tlist)==2:
self.interiorTriangleArray[itN]=tN
tMA[tN][0]= TMA[Tlist[0]]
tMA[tN][1]= TMA[Tlist[1]]
itN+=1
else:
exteriorNodes.update(tNodes)
self.exteriorTriangleArray[etN]=tN
tMA[tN][0]=TMA[Tlist[0]]
etN+=1
tN+=1
self.nExteriorNodes_global = len(exteriorNodes)
self.exteriorNodeArray = np.zeros(
(self.nExteriorNodes_global,),'i')
self.globalToExteriorNodeArray = np.zeros(
(self.nNodes_global,),'i')
for nExtN,nN in enumerate(exteriorNodes):
self.exteriorNodeArray[nExtN]=nN
self.globalToExteriorNodeArray[nN]=nExtN
print("Number of triangles :"+str(self.nTriangles_global))
print("Number on interior :"+str(self.nInteriorTriangles_global))
print("Number on exterior :"+str(self.nExteriorTriangles_global))
print("Number of exterior nodes:"+str(self.nExteriorNodes_global))
#at this point we can easily build a boundary mesh by renumbering using
#exteriorNodeArray and exteriorTriangleArray to renumber
#and the info in nodeArray and triangleArray
def buildEdgeArray(self):
print("Extracting set of edges")
edges = set()
t=self.triangleArray
for N in range(self.nTriangles_global):
#triangle nodes are assumed sorted
edges.update([(t[N,0],t[N,1]),
(t[N,0],t[N,2]),
(t[N,1],t[N,2])])
print("Building edgeArray")
self.nEdges_global = len(edges)
self.edgeArray = np.zeros(
(self.nEdges_global,2),'i')
eN=0
for e in edges:
self.edgeArray[eN][0] = e[0]
self.edgeArray[eN][1] = e[1]
del edges
print("Number of edges :"+str(self.nEdges_global))
def writeBoundaryMeshADH(self,filename,adhBase=1):
#I'll print it using node numbers from the 3D mesh
meshOut = open(filename+'Boundary.3dm','w')
meshOut.write('MESH2D\n')
for tN in self.exteriorTriangleArray:
n0 = self.triangleArray[tN][0] + adhBase
n1 = self.triangleArray[tN][1] + adhBase
n2 = self.triangleArray[tN][2] + adhBase
m = self.triangleMaterialArray[tN][0] + adhBase
line = 'E3T %5i %5i %5i %5i %5i' % \
(tN+adhBase,n0,n1,n2,m)
meshOut.write(line+'\n')
for nN in self.exteriorNodeArray:
n = self.nodeArray[nN]
line = 'ND %5i %14.8e %14.8e %14.8e' % \
(nN + adhBase,n[0],n[1],n[2])
#print line
meshOut.write(line+'\n')
meshOut.close()
def writeMeshEnsight(self,filename,description=None):
base=1
#write the casefile
caseOut=open(filename+'.case','w')
caseOut.write('FORMAT\n'+'type: ensight gold\n')
caseOut.write('GEOMETRY\n'+'model: '+filename+'.geo\n')
caseOut.close()
meshOut=open(filename+'.geo','w')
meshOut.write('Ensight Gold\n')
meshOut.write('Unstructured Tetrahedral Mesh\n')
meshOut.write('node id given\n')
meshOut.write('element id given\n')
#extents = 'extents\n %12.5E %12.5E\n %12.5E %12.5E\n %12.5E %12.5E\n' % (self.xmin,self.xmax,self.ymin,self.ymax,self.zmin,self.zmax)
#meshOut.write('extents\n'+`self.xmin`+' '+`self.xmax`+'\n')
meshOut.write('part \n'+'%10i\n' % 1)
if description:
meshOut.write(description+'\n')
else:
meshOut.write('A Mesh\n')
meshOut.write('coordinates\n'+'%10i\n' % self.nNodes_global)
for nN in range(self.nNodes_global):
ensightNodeNumber = (nN+base)
meshOut.write('%10i\n' % ensightNodeNumber)
for nN in range(self.nNodes_global):
meshOut.write('%12.5E\n' % self.nodeArray[nN,0])
for nN in range(self.nNodes_global):
meshOut.write('%12.5E\n' % self.nodeArray[nN,1])
for nN in range(self.nNodes_global):
meshOut.write('%12.5E\n' % self.nodeArray[nN,2])
meshOut.write('tetra4\n'+'%10i\n' % self.nTetrahedra_global)
for TN in range(self.nTetrahedra_global):
ensightElementNumber = TN + base
meshOut.write('%10i\n' % ensightElementNumber)
TA = self.tetrahedronArray
for TN in range(self.nTetrahedra_global):
meshOut.write('%10i%10i%10i%10i\n' % (TA[TN,0]+base,
TA[TN,1]+base,
TA[TN,2]+base,
TA[TN,3]+base))
meshOut.close()
def writeBoundaryMeshEnsight(self,filename,description=None):
base=1
#write the casefile
caseOut=open(filename+'Boundary.case','w')
caseOut.write('FORMAT\n'+'type: ensight gold\n')
caseOut.write('GEOMETRY\n'+'model: '+filename+'Boundary.geo\n')
caseOut.close()
meshOut=open(filename+'Boundary.geo','w')
meshOut.write('Unstructured Triangular Surface Mesh\n\n')
meshOut.write('node id given\n')
meshOut.write('element id given\n')
meshOut.write('part \n'+'%10i\n' % 1)
if description:
meshOut.write(description+'\n')
else:
meshOut.write('A Mesh\n')
meshOut.write('coordinates\n'+'%10i\n' % self.nExteriorNodes_global)
for nN in range(self.nExteriorNodes_global):
ensightNodeNumber = (nN+base)
meshOut.write('%10i\n' % ensightNodeNumber)
for nN in range(self.nExteriorNodes_global):
meshOut.write('%12.5E\n' %
self.nodeArray[self.exteriorNodeArray[nN],0])
for nN in range(self.nExteriorNodes_global):
meshOut.write('%12.5E\n' %
self.nodeArray[self.exteriorNodeArray[nN],1])
for nN in range(self.nExteriorNodes_global):
meshOut.write('%12.5E\n' %
self.nodeArray[self.exteriorNodeArray[nN],2])
meshOut.write('tria3\n'+'%10i\n' % self.nExteriorTriangles_global)
for tN in range(self.nExteriorTriangles_global):
ensightElementNumber = tN + base
meshOut.write('%10i\n' % ensightElementNumber)
tA = self.triangleArray
for tN in self.exteriorTriangleArray:
meshOut.write('%10i%10i%10i\n' %
(self.globalToExteriorNodeArray[tA[tN,0]]+base,
self.globalToExteriorNodeArray[tA[tN,1]]+base,
self.globalToExteriorNodeArray[tA[tN,2]]+base))
meshOut.close()
def writeMeshXdmf(self,ar,name='',t=0.0,init=False,meshChanged=False,Xdmf_ElementTopology="Tetrahedron",tCount=0):
if self.arGridCollection is not None:
init = False
elif not init:
self.arGridCollection = ar.domain.find("Grid")
if init:
self.arGridCollection = SubElement(ar.domain,"Grid",{"Name":"Mesh "+name,
"GridType":"Collection",
"CollectionType":"Temporal"})
if self.arGrid is None or self.arTime.get('Value') != "{0:e}".format(t):
if ar.global_sync:
#
#topology and geometry
#
self.arGrid = SubElement(self.arGridCollection,"Grid",{"GridType":"Uniform"})
self.arTime = SubElement(self.arGrid,"Time",{"Value":str(t),"Name":str(tCount)})
topology = SubElement(self.arGrid,"Topology",
{"Type":Xdmf_ElementTopology,
"NumberOfElements":"%i" % (self.globalMesh.nElements_global,)})
elements = SubElement(topology,"DataItem",
{"Format":ar.dataItemFormat,
"DataType":"Int",
"Dimensions":"%i %i" % (self.globalMesh.nElements_owned,
self.nNodes_element)})
geometry = SubElement(self.arGrid,"Geometry",{"Type":"XYZ"})
nodes = SubElement(geometry,"DataItem",
{"Format":ar.dataItemFormat,
"DataType":"Float",
"Precision":"8",
"Dimensions":"%i %i" % (self.globalMesh.nNodes_global,3)})
#material types
elementMaterialTypes = SubElement(self.arGrid,"Attribute",{"Name":"elementMaterialTypes",
"AttributeType":"Scalar",
"Center":"Cell"})
elementMaterialTypesValues = SubElement(elementMaterialTypes,"DataItem",
{"Format":ar.dataItemFormat,
"DataType":"Int",
"Dimensions":"%i" % (self.globalMesh.nElements_owned,)})
if ar.hdfFile is not None:
elements.text = ar.hdfFilename+":/elements"+name+str(tCount)
nodes.text = ar.hdfFilename+":/nodes"+name+str(tCount)
elementMaterialTypesValues.text = ar.hdfFilename+":/"+"elementMaterialTypes"+"_t"+str(tCount)
if init or meshChanged:
ar.create_dataset_sync('elements'+name+str(tCount),
offsets = self.globalMesh.elementOffsets_subdomain_owned,
data = self.globalMesh.nodeNumbering_subdomain2global[self.elementNodesArray[:self.nElements_owned]])
ar.create_dataset_sync('nodes'+name+str(tCount),
offsets = self.globalMesh.nodeOffsets_subdomain_owned,
data = self.nodeArray[:self.nNodes_owned])
ar.create_dataset_sync("elementMaterialTypes"+"_t"+str(tCount),
offsets = self.globalMesh.elementOffsets_subdomain_owned,
data = self.elementMaterialTypes[:self.nElements_owned])
else:
assert False, "global_sync not supported with text heavy data"
else:
#
#topology and geometry
#
self.arGrid = SubElement(self.arGridCollection,"Grid",{"GridType":"Uniform"})
self.arTime = SubElement(self.arGrid,"Time",{"Value":str(t),"Name":str(tCount)})
topology = SubElement(self.arGrid,"Topology",
{"Type":Xdmf_ElementTopology,
"NumberOfElements":"%i" % (self.nElements_owned,)})
elements = SubElement(topology,"DataItem",
{"Format":ar.dataItemFormat,
"DataType":"Int",
"Dimensions":"%i %i" % (self.nElements_owned,self.nNodes_element)})
geometry = SubElement(self.arGrid,"Geometry",{"Type":"XYZ"})
nodes = SubElement(geometry,"DataItem",
{"Format":ar.dataItemFormat,
"DataType":"Float",
"Precision":"8",
"Dimensions":"%i %i" % (self.nNodes_global,3)})
#material types
elementMaterialTypes = SubElement(self.arGrid,"Attribute",{"Name":"elementMaterialTypes",
"AttributeType":"Scalar",
"Center":"Cell"})
elementMaterialTypesValues = SubElement(elementMaterialTypes,"DataItem",
{"Format":ar.dataItemFormat,
"DataType":"Int",
"Dimensions":"%i" % (self.nElements_owned,)})
if ar.hdfFile is not None:
elements.text = ar.hdfFilename+":/elements"+str(ar.comm.rank())+name+str(tCount)
nodes.text = ar.hdfFilename+":/nodes"+str(ar.comm.rank())+name+str(tCount)
elementMaterialTypesValues.text = ar.hdfFilename+":/"+"elementMaterialTypes"+"_p"+str(ar.comm.rank())+"_t"+str(tCount)
if init or meshChanged:
ar.create_dataset_async('elements'+str(ar.comm.rank())+name+str(tCount), data = self.elementNodesArray[:self.nElements_owned])
ar.create_dataset_async('nodes'+str(ar.comm.rank())+name+str(tCount), data = self.nodeArray)
ar.create_dataset_async("elementMaterialTypes"+"_p"+str(ar.comm.rank())+"_t"+str(tCount), data = self.elementMaterialTypes[:self.nElements_owned])
else:
SubElement(elements,"xi:include",{"parse":"text","href":"./"+ar.textDataDir+"/elements"+name+".txt"})
SubElement(nodes,"xi:include",{"parse":"text","href":"./"+ar.textDataDir+"/nodes"+name+".txt"})
SubElement(elementMaterialTypesValues,"xi:include",{"parse":"text","href":"./"+ar.textDataDir+"/"+"elementMaterialTypes"+str(tCount)+".txt"})
if init or meshChanged:
np.savetxt(ar.textDataDir+"/elements"+name+".txt",self.elementNodesArray[:self.nElements_owned],fmt='%d')
np.savetxt(ar.textDataDir+"/nodes"+name+".txt",self.nodeArray)
np.savetxt(ar.textDataDir+"/"+"elementMaterialTypes"+str(tCount)+".txt",self.elementMaterialTypes[:self.nElements_owned])
class MultilevelTetrahedralMesh(MultilevelMesh):
"""A hierarchical multilevel mesh with tetrahedral cells"""
def __init__(self,
nx, ny, nz,
x=0.0, y=0.0, z=0.0,
Lx=1.0, Ly=1.0, Lz=1.0,
refinementLevels=1,
skipInit=False,
nLayersOfOverlap=1,
parallelPartitioningType=MeshParallelPartitioningTypes.node):
from . import cmeshTools
from . import Comm
MultilevelMesh.__init__(self)
self.useC = True
self.nLayersOfOverlap = nLayersOfOverlap; self.parallelPartitioningType = parallelPartitioningType
logEvent("Generating tetrahedral mesh")
if not skipInit:
if self.useC:
self.meshList.append(TetrahedralMesh())
self.meshList[0].generateTetrahedralMeshFromRectangularGrid(nx,ny,nz,Lx,Ly,Lz)
self.cmultilevelMesh = cmeshTools.CMultilevelMesh(self.meshList[0].cmesh,refinementLevels)
self.buildFromC(self.cmultilevelMesh)
self.meshList[0].nodeArray[:,0] += x
self.meshList[0].nodeArray[:,1] += y
self.meshList[0].nodeArray[:,2] += z
self.meshList[0].partitionMesh(nLayersOfOverlap=nLayersOfOverlap,parallelPartitioningType=parallelPartitioningType)
for l in range(1,refinementLevels):
self.meshList.append(TetrahedralMesh())
self.meshList[l].cmesh = self.cmeshList[l]
self.meshList[l].buildFromC(self.cmeshList[l])
self.meshList[l].partitionMesh(nLayersOfOverlap=nLayersOfOverlap,parallelPartitioningType=parallelPartitioningType)
else:
grid=RectangularGrid(nx,ny,nz,Lx,Ly,Lz)
self.meshList.append(TetrahedralMesh())
self.meshList[0].rectangularToTetrahedral(grid)
self.meshList[0].nodeArray[:,0] += x
self.meshList[0].nodeArray[:,1] += y
self.meshList[0].nodeArray[:,2] += z
self.elementChildren=[]
logEvent(self.meshList[0].meshInfo())
for l in range(1,refinementLevels):
self.refine()
logEvent(self.meshList[-1].meshInfo())
self.buildArrayLists()
def generateFromExistingCoarseMesh(self,mesh0,refinementLevels,nLayersOfOverlap=1,
parallelPartitioningType=MeshParallelPartitioningTypes.node):
from . import cmeshTools
#blow away or just trust garbage collection
self.nLayersOfOverlap=nLayersOfOverlap;self.parallelPartitioningType=parallelPartitioningType
self.meshList = []
self.elementParents = None
self.cmultilevelMesh = None
if self.useC:
self.meshList.append(mesh0)
logEvent("cmeshTools.CMultilevelMesh")
self.cmultilevelMesh = cmeshTools.CMultilevelMesh(self.meshList[0].cmesh,refinementLevels)
logEvent("buildFromC")
self.buildFromC(self.cmultilevelMesh)
logEvent("partitionMesh")
self.meshList[0].partitionMesh(nLayersOfOverlap=nLayersOfOverlap,parallelPartitioningType=parallelPartitioningType)
for l in range(1,refinementLevels):
self.meshList.append(TetrahedralMesh())
self.meshList[l].cmesh = self.cmeshList[l]
self.meshList[l].buildFromC(self.meshList[l].cmesh)
self.meshList[l].partitionMesh(nLayersOfOverlap=nLayersOfOverlap,parallelPartitioningType=parallelPartitioningType)
else:
grid=RectangularGrid(nx,ny,nz,Lx,Ly,Lz)
self.meshList.append(TetrahedralMesh())
self.meshList[0].rectangularToTetrahedral(grid)
self.meshList[0].subdomainMesh = self.meshList[0]
self.elementChildren=[]
logEvent(self.meshList[0].meshInfo())
for l in range(1,refinementLevels):
self.refine()
self.meshList[l].subdomainMesh = self.meshList[l]
logEvent(self.meshList[-1].meshInfo())
self.buildArrayLists()
def generatePartitionedMeshFromPUMI(self,mesh0,refinementLevels,nLayersOfOverlap=1):
from . import cmeshTools
self.meshList = []
self.meshList.append(mesh0)
self.cmultilevelMesh = cmeshTools.CMultilevelMesh(self.meshList[0].cmesh,refinementLevels)
self.buildFromC(self.cmultilevelMesh)
self.elementParents = None
self.elementChildren=[]
def generatePartitionedMeshFromTetgenFiles(self,filebase,base,mesh0,refinementLevels,nLayersOfOverlap=1,
parallelPartitioningType=MeshParallelPartitioningTypes.node):
from . import cmeshTools
if filebase==None:
filebase="mesh"
assert(refinementLevels==1)
assert(parallelPartitioningType==MeshParallelPartitioningTypes.node)
assert(nLayersOfOverlap<=1)
mesh0.cmesh = cmeshTools.CMesh()
#blow away or just trust garbage collection
self.nLayersOfOverlap=nLayersOfOverlap;self.parallelPartitioningType=parallelPartitioningType
self.meshList = []
self.elementParents = None
self.cmultilevelMesh = None
self.meshList.append(mesh0)
logEvent("cmeshTools.CMultilevelMesh")
self.cmultilevelMesh = cmeshTools.CMultilevelMesh(self.meshList[0].cmesh,refinementLevels)
logEvent("buildFromC")
self.buildFromC(self.cmultilevelMesh)
logEvent("partitionMesh")
self.meshList[0].partitionMeshFromFiles(filebase,base,nLayersOfOverlap=nLayersOfOverlap,parallelPartitioningType=parallelPartitioningType)
def refine(self):
self.meshList.append(TetrahedralMesh())
childrenDict = self.meshList[-1].refine(self.meshList[-2])
self.elementChildren.append(childrenDict)
def computeGeometricInfo(self):
for m in self.meshList:
m.computeGeometricInfo()
class MultilevelHexahedralMesh(MultilevelMesh):
"""A hierarchical multilevel mesh with hexahedral cells"""
def __init__(self,
nx, ny, nz,
px=0, py=0, pz=0,
x=0.0, y=0.0, z=0.0,
Lx=1.0, Ly=1.0, Lz=1.0,
refinementLevels=1,
skipInit=False,
nLayersOfOverlap=1,
parallelPartitioningType=MeshParallelPartitioningTypes.node):
from . import cmeshTools
from . import Comm
MultilevelMesh.__init__(self)
if refinementLevels == 1:
self.useC = True
else:
self.useC = False
self.nLayersOfOverlap = nLayersOfOverlap; self.parallelPartitioningType = parallelPartitioningType
logEvent("Generating hexahedral mesh")
if not skipInit:
if self.useC:
self.meshList.append(HexahedralMesh())
self.meshList[0].generateHexahedralMeshFromRectangularGrid(nx,ny,nz,Lx,Ly,Lz)
self.meshList[0].nodeArray[:,0] += x
self.meshList[0].nodeArray[:,1] += y
self.meshList[0].nodeArray[:,2] += z
self.cmultilevelMesh = cmeshTools.CMultilevelMesh(self.meshList[0].cmesh,refinementLevels)
self.buildFromC(self.cmultilevelMesh)
self.meshList[0].partitionMesh(nLayersOfOverlap=nLayersOfOverlap,parallelPartitioningType=parallelPartitioningType)
for l in range(1,refinementLevels):
self.meshList.append(HexahedralMesh())
self.meshList[l].cmesh = self.cmeshList[l]
self.meshList[l].buildFromC(self.cmeshList[l])
self.meshList[l].partitionMesh(nLayersOfOverlap=nLayersOfOverlap,parallelPartitioningType=parallelPartitioningType)
else:
grid=RectangularGrid(nx,ny,nz,Lx,Ly,Lz)
self.meshList.append(HexahedralMesh())
self.elementChildren=[]
self.meshList[0].sigmaMax=0.0
logEvent(self.meshList[0].meshInfo())
for l in range(1,refinementLevels):
self.refine()
self.meshList[-1].sigmaMax=0.0
logEvent(self.meshList[-1].meshInfo())
self.buildArrayLists()
def generateFromExistingCoarseMesh(self,mesh0,refinementLevels,nLayersOfOverlap=1,
parallelPartitioningType=MeshParallelPartitioningTypes.node):
from . import cmeshTools
#blow away or just trust garbage collection
self.nLayersOfOverlap=nLayersOfOverlap;self.parallelPartitioningType=parallelPartitioningType
self.meshList = []
self.elementParents = None
self.cmultilevelMesh = None
self.meshList.append(mesh0)
self.cmultilevelMesh = cmeshTools.CMultilevelMesh(self.meshList[0].cmesh,refinementLevels)
self.buildFromC(self.cmultilevelMesh)
self.meshList[0].partitionMesh(nLayersOfOverlap=nLayersOfOverlap,parallelPartitioningType=parallelPartitioningType)
for l in range(1,refinementLevels):
self.meshList.append(HexahedralMesh())
self.meshList[l].cmesh = self.cmeshList[l]
self.meshList[l].buildFromC(self.meshList[l].cmesh)
self.meshList[l].partitionMesh(nLayersOfOverlap=nLayersOfOverlap,parallelPartitioningType=parallelPartitioningType)
def refine(self):
raise NotImplementedError
self.meshList.append(HexahedralMesh())
childrenDict = self.meshList[-1].refine(self.meshList[-2])
self.elementChildren.append(childrenDict)
def computeGeometricInfo(self):
for m in self.meshList:
m.computeGeometricInfo()
def buildReferenceSimplex(nd=2):
"""
Create and return a Proteus mesh object for the reference
element.
Parameters
----------
nd : int
Dimension of reference element
Returns
-------
mesh : :class:`proteus.MeshTools.TriangularMesh`
Simplex mesh
"""
from proteus import Domain
assert(nd in [1,2,3])
if nd==1:
pass # Note sure what needs to go here?!
unit_simplex_domain = Domain.unitSimplex(nd)
polyfile = "reference_element"
unit_simplex_domain.writePoly(polyfile)
if nd==2:
runTriangle(polyfile,
"Yp")
mesh = genMeshWithTriangle(polyfile,
nbase=1)
mesh.partitionMesh()
mesh.globalMesh = mesh
return mesh
if nd==3:
runTetgen(polyfile,
"Yp")
mesh = genMeshWithTetgen(polyfile,
nbase = 1)
return mesh
class TriangularMesh(Mesh):
"""A mesh of triangles
The nodes, edges, and triangles are indexed by their
node tuples. The corresponding lists are derived from the dictionaries, and
sorted lexicographically. The global node numbers are redefined to
give a lexicographic ordering.
The mesh can be generated from a rectangular grid and refined using either
3t or Freudenthal-Bey global refinement.
"""
def __init__(self):
Mesh.__init__(self)
self.nodeDict={}
self.edgeDict={}
self.triangleDict={}
self.triangleList=[]
self.oldToNewNode=[]
def meshType(self):
return 'simplex'
def computeGeometricInfo(self):
from . import cmeshTools
cmeshTools.computeGeometricInfo_triangle(self.cmesh)
def generateTriangularMeshFromRectangularGrid(self,nx,ny,Lx,Ly,triangleFlag=1):
from . import cmeshTools
self.cmesh = cmeshTools.CMesh()
cmeshTools.generateTriangularMeshFromRectangularGrid(nx,ny,Lx,Ly,self.cmesh,triangleFlag)
cmeshTools.allocateGeometricInfo_triangle(self.cmesh)
cmeshTools.computeGeometricInfo_triangle(self.cmesh)
self.buildFromC(self.cmesh)
def rectangularToTriangularOriented(self,grid):
#copy the nodes from the rectangular mesh
#I want to be able to renumber latter without
#changing the grid nodes, so I do deep copies here
self.nodeList = [Node(n.N,n.p[X],n.p[Y],n.p[Z]) for n in grid.nodeList]
self.nodeDict = dict([(n,n) for n in self.nodeList])
for i in range(grid.nHx):
for j in range(grid.nHy):
k=0
n0 = self.nodeList[grid.getNodeNumber(i,j,k)]
n1 = self.nodeList[grid.getNodeNumber(i,j+1,k)]
n2 = self.nodeList[grid.getNodeNumber(i+1,j,k)]
n3 = self.nodeList[grid.getNodeNumber(i+1,j+1,k)]
self.newTriangle([n0,n1,n3])
self.newTriangle([n0,n2,n3])
self.finalize()
#self.buildListsEdges()
#self.buildListsTriangles()
def rectangularToTriangularOrientedOtherWay(self,grid):
#copy the nodes from the rectangular mesh
#I want to be able to renumber latter without
#changing the grid nodes, so I do deep copies here
self.nodeList = [Node(n.N,n.p[X],n.p[Y],n.p[Z]) for n in grid.nodeList]
self.nodeDict = dict([(n,n) for n in self.nodeList])
for i in range(grid.nHx):
for j in range(grid.nHy):
k=0
n0 = self.nodeList[grid.getNodeNumber(i,j,k)]
n1 = self.nodeList[grid.getNodeNumber(i,j+1,k)]
n2 = self.nodeList[grid.getNodeNumber(i+1,j,k)]
n3 = self.nodeList[grid.getNodeNumber(i+1,j+1,k)]
self.newTriangle([n0,n2,n1])
self.newTriangle([n2,n3,n1])
self.finalize()
#self.buildListsEdges()
#self.buildListsTriangles()
def rectangularToTriangularRedBlack(self,grid):
#copy the nodes from the rectangular mesh
#I want to be able to renumber latter without
#changing the grid nodes, so I do deep copies here
self.nodeList = [Node(n.N,n.p[X],n.p[Y],n.p[Z]) for n in grid.nodeList]
self.nodeDict = dict([(n,n) for n in self.nodeList])
self.triangleDict={}
for i in range(grid.nHx):
for j in range(grid.nHy):
k=0
#associate the element (i,j,k) with the
#left, front, bottom node
#get the left,front,bottom,node and its color
if (grid.getColor(i,j,k) == grid.black):
b0 = self.nodeList[grid.getNodeNumber(i,j,k)]
r0 = self.nodeList[grid.getNodeNumber(i+1,j,k)]
r1 = self.nodeList[grid.getNodeNumber(i,j+1,k)]
b1 = self.nodeList[grid.getNodeNumber(i+1,j+1,k)]
else:
r0 = self.nodeList[grid.getNodeNumber(i,j,k)]
b0 = self.nodeList[grid.getNodeNumber(i+1,j,k)]
b1 = self.nodeList[grid.getNodeNumber(i,j+1,k)]
r1 = self.nodeList[grid.getNodeNumber(i+1,j+1,k)]
self.newTriangle([b0,r0,r1])
self.newTriangle([b1,r0,r1])
self.finalize()
#self.buildListsEdges()
#self.buildListsTriangles()
#mwf debug switch to redblac
rectangularToTriangular = rectangularToTriangularOrientedOtherWay#rectangularToTriangularOriented
def generateFromTriangleMesh(self,ctrirep,base):
from .import cmeshTools
self.cmesh = cmeshTools.CMesh()
cmeshTools.generateFromTriangleMesh(self.cmesh,ctrirep,base)
cmeshTools.allocateGeometricInfo_triangle(self.cmesh)
cmeshTools.computeGeometricInfo_triangle(self.cmesh)
self.buildFromC(self.cmesh)
def generateFromTriangleFiles(self,filebase,base):
from .import cmeshTools
self.cmesh = cmeshTools.CMesh()
cmeshTools.generateFromTriangleFiles(self.cmesh,filebase,base)
cmeshTools.allocateGeometricInfo_triangle(self.cmesh)
cmeshTools.computeGeometricInfo_triangle(self.cmesh)
self.buildFromC(self.cmesh)
def writeTriangleFiles(self,filebase,base):
from .import cmeshTools
cmeshTools.writeTriangleFiles(self.cmesh,filebase,base)
def generateFrom2DMFile(self,filebase,base=1):
from .import cmeshTools
self.cmesh = cmeshTools.CMesh()
cmeshTools.generateFrom2DMFile(self.cmesh,filebase,base)
cmeshTools.allocateGeometricInfo_triangle(self.cmesh)
cmeshTools.computeGeometricInfo_triangle(self.cmesh)
self.buildFromC(self.cmesh)
def constructTriangularMeshOnRectangle(self,Lx,Ly,nx,ny,writeMesh=0,
meshFileBase='mesh2d'):
"""
wrapper function for making a triangular mesh on the rectangle
[0,Lx] x [0,Ly].
"""
nz = 1
Lz = 1.0
grid2d = RectangularGrid(nx,ny,nz,Lx,Ly,Lz)
#grid2d.writeEdgesGnuplot('grid2d')
#grid2d.viewMeshGnuplotPipe('grid2d')
self.rectangularToTriangular(grid2d)
if writeMesh == 1:
#print mesh in gnuplot format
self.writeEdgesGnuplot(meshFileBase)
#can view with
#self.viewMeshGnuplotPipe(meshFileBase)
elif writeMesh == 2:
self.writeEdgesMatlab(meshFileBase)
#view in matlab with meshFileBase.m
#end else
return self
def buildFromSets(self,triangleSet,edgeSet,nodeSet):
self.nodeList = list(nodeSet)
self.nodeDict = dict([(n,n) for n in self.nodeList])
self.edgeList = list(edgeSet)
self.edgeDict = dict([(e.nodes,e) for e in self.edgeList])
self.triangleList = list(triangleSet)
self.triangleDict = dict([(t.nodes,t) for t in self.triangleList])
self.elementList = self.triangleList
self.elementBoundaryList = self.edgeList
def fixLocalNumbering(self):
for tN in range(len(self.triangleList)):
self.triangleList[tN].computeGeometricInfo()
if edet(self.triangleList[tN].linearMap) < 0:
newNodes = list(self.triangleList[tN].nodes)
newNodes[2] = self.triangleList[tN].nodes[1]
newNodes[1] = self.triangleList[tN].nodes[2]
self.triangleList[tN].nodes = newNodes
def finalize(self):
self.buildLists()
#self.fixLocalNumbering()
self.buildArraysFromLists()
#todo: build boundary mesh
def buildLists(self):
self.buildListsNodes()
self.buildListsEdges()
self.buildListsTriangles()
self.elementList = self.triangleList
self.elementBoundaryList = self.edgeList
def buildListsNodes(self):
keyList = list(self.nodeDict.keys())
keyList.sort()
self.nodeList=[]
self.oldToNewNode=list(range(len(self.nodeDict)))
for nN,k in enumerate(keyList):
self.oldToNewNode[self.nodeDict[k].N]=nN
self.nodeDict[k].N = nN
self.nodeList.append(self.nodeDict[k])
def buildListsEdges(self):
keyList = list(self.edgeDict.keys())
keyList.sort()
self.edgeList=[]
for eN,k in enumerate(keyList):
self.edgeDict[k].N = eN
self.edgeList.append(self.edgeDict[k])
def buildListsTriangles(self):
keyList = list(self.triangleDict.keys())
keyList.sort()
self.triangleList=[]
for tN,k in enumerate(keyList):
self.triangleDict[k].N = tN
self.triangleList.append(self.triangleDict[k])
self.polygonList = self.triangleList
def newTriangle(self,nodes):
t = Triangle(len(self.triangleDict),nodes)
self.triangleDict[t.nodes] = t
self.registerEdges(t)
return t
def registerEdges(self,t):
for en,e in enumerate(t.edges):
if e.nodes in self.edgeDict:
t.edges[en]=self.edgeDict[e.nodes]
else:
eN=len(self.edgeDict)
e.N=eN
self.edgeDict[e.nodes]=e
def registerNode(self,node):
if node in self.nodeDict:
node = self.nodeDict[node]
else:
node.N = len(self.nodeDict)
self.nodeDict[node] = node
return node
def buildLevelSetMesh(self,value,nodalValues):
levelSetMesh = EdgeMesh()
self.levelSetNodeNumbers = set()
for t in self.triangleList:
nodes={}
for e in t.edges:
nl = e.nodes[0]
vl = nodalValues[nl.N]
nr = e.nodes[1]
vr = nodalValues[nr.N]
if ((vl >= value and value >= vr) or
(vl <= value and value <= vr)):
if vl == vr:
newNl = Node(len(levelSetMesh.nodeDict),
nl.p[X],
nl.p[Y],
nl.p[Z])
newNl = levelSetMesh.registerNode(newNl)
newNr = Node(len(levelSetMesh.nodeDict),
nr.p[X],
nr.p[Y],
nr.p[Z])
newNr = levelSetMesh.registerNode(newNr)
levelSetMesh.newEdge([newNl,newNr])
self.levelSetNodeNumbers.add(nl.N)
self.levelSetNodeNumbers.add(nr.N)
elif value == vl:
newNode = Node(len(levelSetMesh.nodeDict),
nl.p[X],
nl.p[Y],
nl.p[Z])
nodes[newNode] = newNode
self.levelSetNodeNumbers.add(nl.N)
elif value == vr and len(nodes) < 2:
newNode = Node(len(levelSetMesh.nodeDict),
nr.p[X],
nr.p[Y],
nr.p[Z])
nodes[newNode] = newNode
self.levelSetNodeNumbers.add(nr.N)
else:
wr = old_div((value - vl), (vr - vl))
wl = old_div((value - vr), (vl - vr))
newPoint = nl.p*wl + nr.p*wr
newNode = Node(len(levelSetMesh.nodeDict),
newPoint[X],
newPoint[Y],
newPoint[Z])
nodes[newNode] = newNode
self.levelSetNodeNumbers.add(nl.N)
self.levelSetNodeNumbers.add(nr.N)
elif vl < value:
self.levelSetNodeNumbers.add(nl.N)
elif vr < value:
self.levelSetNodeNumbers.add(nr.N)
if len(nodes) == 0:
pass
elif len(nodes) == 1:
print("singleton")
elif len(nodes) == 2:
newNodes=[]
for n in list(nodes.values()):
newNodes.append(levelSetMesh.registerNode(n))
levelSetMesh.newEdge(newNodes)
else:
print("unexpected case in buildLevelSetMesh")
print(t.N)
for e in t.edges:
print(e.N)
for n in e.nodes:
print(n.N)
print(n.p)
print("level set triangle")
for n in list(nodes.values()):
print(n.p)
if len(levelSetMesh.edgeDict) == 0:
print("level set does not cross any edges")
return None
else:
levelSetMesh.finalize()
return levelSetMesh
def refine3t(self,oldMesh):
childrenDict={}
for t in oldMesh.triangleList:
#deep copy old nodes because we'll renumber
tNodes = [Node(eN,n.p[X],n.p[Y],n.p[Z])
for eN,n in enumerate(t.nodes)]
for lnN,n in enumerate(tNodes): tNodes[lnN]=self.registerNode(n)
#add new node
t.computeGeometricInfo()
newNode = Node(len(self.nodeDict),
t.barycenter[X],
t.barycenter[Y],
t.barycenter[Z])
newNode = self.registerNode(newNode)
t1=self.newTriangle([tNodes[0],tNodes[1],newNode])
t2=self.newTriangle([tNodes[1],tNodes[2],newNode])
t3=self.newTriangle([tNodes[2],tNodes[0],newNode])
childrenDict[t.N]=[t1,t2,t3]
self.finalize()
return childrenDict
def refineFreudenthalBey(self,oldMesh):
logEvent("Refining the mesh using Freudenthal-Bey refinement")
childrenDict={}
for t in list(oldMesh.triangleDict.values()):
#deep copy old nodes because we'll renumber
tNodes = [Node(nN,n.p[X],n.p[Y],n.p[Z])
for nN,n in enumerate(t.nodes)]
for lnN,n in enumerate(tNodes): tNodes[lnN]=self.registerNode(n)
#add new nodes (midpoints of edges)
#use local edge tuples as keys
newNodes={}
for et,en in t.edgeMap.items():
t.edges[en].computeGeometricInfo()
p = t.edges[en].barycenter
newNodes[et] = Node(en,p[X],p[Y],p[Z])
#set the global node numbers
for k,n in newNodes.items(): newNodes[k]=self.registerNode(n)
#add corner triangles
t1=self.newTriangle([tNodes[0],
newNodes[(0,1)],
newNodes[(0,2)]])
t2=self.newTriangle([tNodes[1],
newNodes[(0,1)],
newNodes[(1,2)]])
t3=self.newTriangle([tNodes[2],
newNodes[(0,2)],
newNodes[(1,2)]])
#add center triangle
t4=self.newTriangle([newNodes[(0,1)],
newNodes[(1,2)],
newNodes[(0,2)]])
childrenDict[t.N]=[t1,t2,t3,t4]
self.finalize()
return childrenDict
#for debugging: print each tet
#self.edgeList=[]
#Tlist = self.tetrahedronDict.values()
#for T in Tlist:
# self.edgeList = self.edgeList + T.edges
def refine(self,oldMesh):
return self.refineFreudenthalBey(oldMesh)
def meshInfo(self):
minfo = """Number of triangles : %d
Number of edges : %d
Number of nodes : %d\n""" % (self.nElements_global,
self.nElementBoundaries_global,
self.nNodes_global)
if self.subdomainMesh != self:
sinfo = self.subdomainMesh.meshInfo()
info = "*** Global ***\n" + minfo + "\n*** Local ***\n" + sinfo
return info
return minfo
def readMeshADH(self,filename,adhBase=1,suffix='3dm'):
meshIn = open(filename+'.'+suffix,'r')
firstLine = meshIn.readline()
firstWords = firstLine.split()
print("Reading object=%s from file=%s" % (firstWords[0],filename))
line = meshIn.readline()
columns = line.split()
triangles = []
triangleEdges=set()
logEvent("Reading "+str(filename)+ \
" and building node lists for triangles, and edges")
#assume triangles are ordered by triangle number
while (columns[0] == 'E3T'):
nodeNumbers = [int(c) - adhBase for c in columns[2:5]]
nodeNumbers.sort()
triangles.append(array.array('i',nodeNumbers))
triangleEdges.update([(nodeNumbers[0],nodeNumbers[1]),
(nodeNumbers[0],nodeNumbers[2]),
(nodeNumbers[1],nodeNumbers[2])])
line = meshIn.readline()
columns = line.split()
print("Building node list and dict")
#assume nodes are ordered by node number
while (len(columns) == 5):
newNode = Node(int(columns[1]) - adhBase,
float(columns[2]),
float(columns[3]),
float(columns[4]))
self.nodeList.append(newNode)
self.nodeDict[newNode]=newNode
line = meshIn.readline()
columns = line.split()
print("Number of triangles :"+str(len(triangles)))
print("Number of edges :"+str(len(triangleEdges)))
print("Number of nodes :"+str(len(self.nodeList)))
print("Number of objects :"+\
str(len(triangleEdges)+len(triangles)+len(self.nodeList)))
print("Building edge list")
self.edgeList =[Edge(edgeNumber=eN,nodes=[self.nodeList[nN[0]],
self.nodeList[nN[1]]])
for eN,nN in enumerate(triangleEdges)]
print("Building edge dict")
self.edgeDict = dict([(e.nodes,e) for e in self.edgeList])
print("Building triangle list")
self.triangleList =[Triangle(triangleNumber=tN,
nodes=[self.nodeList[nN[0]],
self.nodeList[nN[1]],
self.nodeList[nN[2]]],
edgeDict=self.edgeDict)
for tN,nN in enumerate(triangles)]
print("Building triangle dict")
self.triangleDict = dict([(t.nodes,t) for t in self.triangleList])
self.elementList = self.triangleList
self.elementBoundaryList = self.edgeList
def writeMeshXdmf(self,ar,name='',t=0.0,init=False,meshChanged=False,tCount=0,EB=False):
Mesh.writeMeshXdmf(self,ar,name,t,init,meshChanged,"Triangle",tCount,EB=EB)
def writeMeshEnsight(self,filename,description=None):
base=1
#write the casefile
caseOut=open(filename+'.case','w')
caseOut.write('FORMAT\n'+'type: ensight gold\n')
caseOut.write('GEOMETRY\n'+'model: '+filename+'.geo\n')
caseOut.close()
meshOut=open(filename+'.geo','w')
meshOut.write('Ensight Gold\n')
meshOut.write('Unstructured Triangular Mesh\n')
meshOut.write('node id given\n')
meshOut.write('element id given\n')
meshOut.write('part \n'+'%10i\n' % 1)
if description:
meshOut.write(description+'\n')
else:
meshOut.write('A Mesh\n')
meshOut.write('coordinates\n'+'%10i\n' % self.nNodes_global)
for nN in range(self.nNodes_global):
meshOut.write('%10i\n' % (nN+base))
for nN in range(self.nNodes_global):
meshOut.write('%12.5E\n' % self.nodeArray[nN,0])
for nN in range(self.nNodes_global):
meshOut.write('%12.5E\n' % self.nodeArray[nN,1])
for nN in range(self.nNodes_global):
meshOut.write('%12.5E\n' % self.nodeArray[nN,2])
meshOut.write('tria3\n'+'%10i\n' % self.nElements_global)
for eN in range(self.nElements_global):
meshOut.write('%10i\n' % (eN+base))
for eN in range(self.nElements_global):
meshOut.write('%10i%10i%10i\n' % tuple((nN+base) for nN in self.elementNodesArray[eN,:]))
meshOut.close()
def appendMeshEnsight(self,filename,description=None):
base=1
#write the casefile
caseOut=open(filename+'.case','a')
caseOut.write('GEOMETRY\n'+'model: '+filename+'.geo\n')
caseOut.close()
meshOut=open(filename+'.geo','w')
meshOut.write('Unstructured Triangular Mesh\n\n')
meshOut.write('node id given\n')
meshOut.write('element id given\n')
meshOut.write('part \n'+'%10i\n' % 1)
if description:
meshOut.write(description+'\n')
else:
meshOut.write('The whole mesh\n')
meshOut.write('coordinates\n'+'%10i\n' % len(self.nodeList))
for n in self.nodeList:
nN = n.N+base
meshOut.write('%10i\n' % nN)
for n in self.nodeList:
meshOut.write('%12.5E\n' % n.p[X])
for n in self.nodeList:
meshOut.write('%12.5E\n' % n.p[Y])
for n in self.nodeList:
meshOut.write('%12.5E\n' % n.p[Z])
meshOut.write('tria3\n'+'%10i\n' % len(self.elementList))
for e in self.elementList:
eN = e.N + base
meshOut.write('%10i\n' % eN)
for e in self.elementList:
meshOut.write('%10i%10i%10i\n' % tuple(n.N+base for n in e.nodes))
meshOut.close()
def writeMeshADH(self,filename,adhBase=1):
from .import cmeshTools
cmeshTools.write2dmFiles(self.cmesh,filename,adhBase)
def writeAsymptote(self,fileprefix,L,x,units="m"):
"""
Write a representation of the triangular mesh in the Asymptote vector graphics language
"""
unitsize=old_div(4.0,L[0])
f = open(fileprefix+".asy",'w')
fileString="""
unitsize(4.0 inches / %(Lx)f);
size(5 inches);
real Lx=%(Lx)f;
real Ly=%(Ly)f;
real offset=0.0125Lx;
real x=%(x)f;
real y=%(y)f;
string strx="$%(Lx)2.2f\mbox{%(units)s}$";
string stry="$%(Ly)2.2f\mbox{%(units)s}$";
draw(strx,(x,y-offset)--(x+Lx,y-offset),S,black,Bars,Arrows,PenMargins);
draw(stry,(x-offset,y)--(x-offset,y+Ly),W,black,Bars,Arrows,PenMargins);
import graph;
import palette;
pen[] regionPens = Rainbow(NColors=%(nRegionFlags)d);
pen[] boundaryPens = Rainbow(NColors=%(nBoundaryFlags)d);
""" % {'Lx':L[0],'Ly':L[1],'x':x[0],'y':x[1],'units':units,
'nRegionFlags':(max(self.elementMaterialTypes) - min(self.elementMaterialTypes)),
'nBoundaryFlags':(max(self.elementBoundaryMaterialTypes)-min(self.elementBoundaryMaterialTypes))}
#now draw triangles
for t,tFlag in zip(self.elementNodesArray,self.elementMaterialTypes):
fileString+="fill((%f,%f)--(%f,%f)--(%f,%f)--cycle,regionPens[%d]);\n" % (self.nodeArray[t[0]][0],self.nodeArray[t[0]][1],
self.nodeArray[t[1]][0],self.nodeArray[t[1]][1],
self.nodeArray[t[2]][0],self.nodeArray[t[2]][1],
tFlag-min(self.elementMaterialTypes))
for eb,ebFlag in zip(self.elementBoundaryNodesArray,self.elementBoundaryMaterialTypes):
if True:#ebFlag > 0:
fileString+="draw((%f,%f)--(%f,%f),boundaryPens[%d]+linewidth(0.01));\n" % (self.nodeArray[eb[0]][0],self.nodeArray[eb[0]][1],
self.nodeArray[eb[1]][0],self.nodeArray[eb[1]][1],
ebFlag-min(self.elementBoundaryMaterialTypes))
f.write(fileString)
f.close()
# def buildMatlabMeshDataStructures(self,meshFileBase='meshMatlab',writeToFile=True):
# """
# build array data structures for matlab finite element mesh representation
# and write to a file to view and play with in matlatb
# in matlab can then print mesh with
# pdemesh(p,e,t)
# where
# p is the vertex or point matrix
# e is the edge matrix, and
# t is the element matrix
# points matrix is [2 x num vertices]
# format :
# row 1 = x coord,
# row 2 = y coord for nodes in mesh
# edge matrix is [7 x num edges]
# format:
# row 1 = start vertex number
# row 2 = end vertex number
# row 3 = start value in edge parameterization, should be 0
# row 4 = end value in edge parameterization, should be 1
# row 5 = global edge id, base 1
# row 6 = subdomain on left? always 1 for now
# row 7 = subdomain on right? always 0 for now
# element matrix is [4 x num elements]
# row 1 = vertex 1 global number
# row 2 = vertex 2 global number
# row 3 = vertex 3 global number
# row 4 = triangle subdomain number
# where 1,2,3 is a local counter clockwise numbering of vertices in
# triangle
# """
# matlabBase = 1
# p = np.zeros((2,self.nNodes_global),'d')
# e = np.zeros((7,self.nElementBoundaries_global),'d')
# t = np.zeros((4,self.nElements_global),'d')
# #load p,e,t and write file
# if writeToFile:
# mfile = open(meshFileBase+'.m','w')
# else:
# mfile = open('/dev/null','w')
# #
# if writeToFile:
# mfile.write('p = [ ... \n')
# for nN in range(self.nNodes_global):
# p[0,nN]=self.nodeArray[nN,0]
# p[1,nN]=self.nodeArray[nN,1]
# if writeToFile:
# mfile.write('%g %g \n' % tuple(p[:,nN]))
# if writeToFile:
# mfile.write(']; \n')
# mfile.write("p = p\';\n") #need transpose for matlab
# if writeToFile:
# mfile.write('e = [ ... \n')
# for ebN in range(self.nElementBoundaries_global):
# e[0,ebN]=self.elementBoundaryNodesArray[ebN,0] + matlabBase #global node number of start node base 1
# e[1,ebN]=self.elementBoundaryNodesArray[ebN,1] + matlabBase #global node number of end node base 1
# e[2,ebN]=0.0 #edge param. is 0 to 1
# e[3,ebN]=1.0
# e[4,ebN]=ebN + matlabBase #global edge number base 1
# e[5,ebN]=0 #subdomain to left
# e[6,ebN]=1 #subdomain to right
# if writeToFile:
# mfile.write('%g %g %g %g %g %g %g \n' % tuple(e[:,ebN]))
# if writeToFile:
# mfile.write(']; \n')
# mfile.write("e = e\';\n") #need transpose for matlab
# #write triangles last
# if writeToFile:
# mfile.write('t = [ ... \n')
# for eN in range(self.nElements_global):
# t[0,eN]=self.elementNodesArray[eN,0]+matlabBase #global node number for vertex 0
# t[1,eN]=self.elementNodesArray[eN,1]+matlabBase #global node number for vertex 0
# t[2,eN]=self.elementNodesArray[eN,2]+matlabBase #global node number for vertex 0
# t[3,eN]=1 #subdomain id
# if writeToFile:
# mfile.write('%g %g %g %g \n' % tuple(t[:,eN]))
# if writeToFile:
# mfile.write(']; \n');
# mfile.write("t = t\';\n") #need transpose for matlab
class QuadrilateralMesh(Mesh):
"""A mesh of quads
The nodes, edges, and triangles are indexed by their
node tuples. The corresponding lists are derived from the dictionaries, and
sorted lexicographically. The global node numbers are redefined to
give a lexicographic ordering.
The mesh can be generated from a rectangular grid and refined using either
3t or Freudenthal-Bey global refinement.
"""
def __init__(self):
Mesh.__init__(self)
self.nodeDict={}
self.edgeDict={}
self.quadDict={}
self.quadList=[]
self.oldToNewNode=[]
# tempoaray
self.max_nNodeNeighbors_node = 4
def buildFromSets(self,faceSet,edgeSet,nodeSet):
self.nodeList = list(nodeSet)
self.nodeDict = dict([(n,n) for n in self.nodeList])
self.edgeList = list(edgeSet)
self.edgeDict = dict([(e.nodes,e) for e in self.edgeList])
self.quadList = list(faceSet)
self.quadDict = dict([(t.nodes,t) for t in self.faceList])
self.elementList = self.triangleList
self.elementBoundaryList = self.edgeList
def rectangularToQuadrilateral(self,grid,x=0.0,y=0.0,z=0.0):
''' WIP - I think this is the first function that needs to be
written so that MultilevelQuadrilateralMesh can work. This
function does not call C functions.
'''
self.nodeList = [Node(n.N,n.p[X]+x,n.p[Y]+y,n.p[Z]+z) for n in grid.nodeList]
# Is the following line necessary?
self.nodeDict = dict([(n,n) for n in self.nodeList])
for i in range(grid.nHx):
for j in range(grid.nHy):
k=0
n0 = self.nodeList[grid.getNodeNumber(i,j,k)]
n1 = self.nodeList[grid.getNodeNumber(i,j+1,k)]
n2 = self.nodeList[grid.getNodeNumber(i+1,j+1,k)]
n3 = self.nodeList[grid.getNodeNumber(i+1,j,k)]
e0 = Edge(nodes=[n0,n1])
e1 = Edge(nodes=[n1,n2])
e2 = Edge(nodes=[n2,n3])
e3 = Edge(nodes=[n3,n0])
self.newQuadrilateral([e0,e1,e2,e3])
self.finalize()
self.buildNodeDiameterArray()
def generateQuadrilateralMeshFromRectangularGrid(self,nx,ny,Lx,Ly):
from .import cmeshTools
self.cmesh = cmeshTools.CMesh()
cmeshTools.generateQuadrilateralMeshFromRectangularGrid(nx,ny,0,0,Lx,Ly,self.cmesh)
cmeshTools.allocateGeometricInfo_quadrilateral(self.cmesh)
cmeshTools.computeGeometricInfo_quadrilateral(self.cmesh)
self.buildFromC(self.cmesh)
def generateFromQuadFileIFISS(self,meshfile):
''' WIP - read a matlab.mat file containing IFISS vertices
and elements
'''
import scipy.io
griddata = scipy.io.loadmat(meshfile+'.mat')
self.nodeList = [Node(nN,n[0],n[1],0.0) for nN,n in enumerate(griddata['vertices'])]
# Is the following line necessary?
self.nodeDict = dict([(n,n) for n in self.nodeList])
for q in griddata['quads']:
n0,n3,n2,n1 = q # clockwise ordering needed
e0 = Edge(nodes=[self.nodeList[n0],self.nodeList[n1]])
e1 = Edge(nodes=[self.nodeList[n1],self.nodeList[n2]])
e2 = Edge(nodes=[self.nodeList[n2],self.nodeList[n3]])
e3 = Edge(nodes=[self.nodeList[n3],self.nodeList[n0]])
self.newQuadrilateral([e0,e1,e2,e3])
self.finalize()
for F,nN in griddata['bdyflags']:
self.nodeMaterialTypes[nN] = F
for ebNE in range(self.nExteriorElementBoundaries_global):
ebN = self.exteriorElementBoundariesArray[ebNE]
n0,n1 = self.elementBoundaryNodesArray[ebN]
self.elementBoundaryMaterialTypes[ebN]=max(self.nodeMaterialTypes[n0],
self.nodeMaterialTypes[n1])
def meshType(self):
return 'cuboid'
def meshInfo(self):
minfo = """Number of quadrilaterals : %d
Number of edges : %d
Number of nodes : %d\n""" % (self.nElements_global,
self.nElementBoundaries_global,
self.nNodes_global)
if self.subdomainMesh != self:
sinfo = self.subdomainMesh.meshInfo()
info = "*** Global ***\n" + minfo + "\n*** Local ***\n" + sinfo
return info
return minfo
def newQuadrilateral(self,edges):
q = Quadrilateral(len(self.quadDict),edges)
self.quadDict[q.nodes] = q
self.registerEdges(q)
return q
def registerEdges(self,q):
'''check if an edge is in the mesh dictionary
if it is, point to existing entry
otherwise, create a new entry
'''
for en,e in enumerate(q.edges):
if e.nodes in self.edgeDict:
q.edges[en]=self.edgeDict[e.nodes]
else:
eN=len(self.edgeDict)
e.N=eN
self.edgeDict[e.nodes]=e
def registerNode(self,node):
''' check if a node is in the mesh dictionary
if it is, point to existing entry
otherwise, create a new entry
'''
if node in self.nodeDict:
node = self.nodeDict[node]
else:
node.N = len(self.nodeDict)
self.nodeDict[node] = node
return node
def refine(self,oldMesh):
logEvent("Refining Using Standard Quadrilateral Refinement")
childrenDict={}
for q in list(oldMesh.quadDict.values()):
qNodes = [Node(nN,n.p[X],n.p[Y],n.p[Z]) for nN,n in enumerate(q.nodes)]
for lnN,n in enumerate(qNodes): qNodes[lnN] = self.registerNode(n)
q.computeGeometricInfo()
newNodeLeft = Node(len(self.nodeDict),q.xmin,q.ymid,q.zmid)
newNodeLeft = self.registerNode(newNodeLeft)
newNodeTop = Node(len(self.nodeDict),q.xmid,q.ymax,q.zmid)
newNodeTop = self.registerNode(newNodeTop)
newNodeRight = Node(len(self.nodeDict),q.xmax,q.ymid,q.zmid)
newNodeRight = self.registerNode(newNodeRight)
newNodeBottom = Node(len(self.nodeDict),q.xmid,q.ymin,q.zmid)
newNodeBottom = self.registerNode(newNodeBottom)
newNodeMid = Node(len(self.nodeDict),q.xmid,q.ymid,q.zmid)
newNodeMid = self.registerNode(newNodeMid)
e1 = Edge(nodes=[qNodes[0],newNodeLeft])
e2 = Edge(nodes=[newNodeLeft,newNodeMid])
e3 = Edge(nodes=[newNodeMid,newNodeBottom])
e4 = Edge(nodes=[newNodeBottom,qNodes[0]])
e5 = Edge(nodes=[newNodeLeft,qNodes[1]])
e6 = Edge(nodes=[qNodes[1],newNodeTop])
e7 = Edge(nodes=[newNodeTop,newNodeMid])
e8 = Edge(nodes=[newNodeTop,qNodes[2]])
e9 = Edge(nodes=[qNodes[2], newNodeRight])
e10 = Edge(nodes=[newNodeRight, newNodeMid])
e11 = Edge(nodes=[qNodes[3],newNodeBottom])
e12 = Edge(nodes=[newNodeRight,qNodes[3]])
q1 = self.newQuadrilateral([e1,e2,e3,e4])
self.registerEdges(q1)
q2 = self.newQuadrilateral([e5,e6,e7,e2])
self.registerEdges(q2)
q3 = self.newQuadrilateral([e3,e10,e12,e11])
self.registerEdges(q3)
q4 = self.newQuadrilateral([e7,e8,e9,e10])
self.registerEdges(q4)
childrenDict[q.N]=[q1,q2,q3,q4]
self.finalize()
return childrenDict
def finalize(self):
''' WIP '''
self.buildLists()
self.buildArraysFromLists()
def buildLists(self):
''' WIP '''
self.buildListsNodes()
self.buildListsEdges()
self.buildListsQuadrilaterals()
self.elementList = self.quadList
self.elementBoundaryList = self.edgeList
def buildListsNodes(self):
keyList = list(self.nodeDict.keys())
keyList.sort()
self.nodeList=[]
self.oldToNewNode=list(range(len(self.nodeDict)))
for nN,k in enumerate(keyList):
self.oldToNewNode[self.nodeDict[k].N]=nN
self.nodeDict[k].N = nN
self.nodeList.append(self.nodeDict[k])
def buildListsEdges(self):
keyList = list(self.edgeDict.keys())
keyList.sort()
self.edgeList=[]
for eN,k in enumerate(keyList):
self.edgeDict[k].N = eN
self.edgeList.append(self.edgeDict[k])
def buildListsQuadrilaterals(self):
keyList = list(self.quadDict.keys())
keyList.sort()
self.quadList = []
for qN,q in enumerate(keyList):
self.quadDict[q].N = qN
self.quadList.append(self.quadDict[q])
self.polygonList = self.quadList
def writeMeshXdmf(self,ar,name='',t=0.0,init=False,meshChanged=False,tCount=0,EB=False):
Mesh.writeMeshXdmf(self,ar,name,t,init,meshChanged,"Quadrilateral",tCount,EB=EB)
def buildNodeDiameterArray(self):
nNodes = len(self.nodeArray)
self.nodeDiametersArray = np.zeros(nNodes)
self.nodeSupportArray = np.zeros(nNodes)
self.volume = 0.
for eN in range(self.nElements_global):
area = self._calc_quad_area(eN)
self.volume += area
hMax = self.elementDiametersArray[eN]
for nN in range(self.nNodes_element):
nodeDiameter = hMax*area
idx = self.elementNodesArray[eN][nN]
self.nodeDiametersArray[idx]+=nodeDiameter
self.nodeSupportArray[idx]+=area
for nN in range(nNodes):
self.nodeDiametersArray[nN] /= self.nodeSupportArray[nN]
@staticmethod
def _calc_pt_distance(pt1,pt2):
""" Calculate the distance between two points.
Arguments
---------
pt1: lst
Coordinates of the first point
pt2: lst
Coordinates of the second point
Returns
-------
distance : float
"""
d = 0.
for i,j in zip(pt1,pt2):
d += (i-j)**2
return math.sqrt(d)
def _calc_hmax(self,i):
""" Find the largest edge length of an element.
Arguments
---------
i : int
Element number
Returns
-------
hmax : float
The largest edge length of element i
"""
hMax = 0.
element_nodes = self.nodeArray[self.elementNodesArray[i]]
for j, nN_L in enumerate(element_nodes):
print('nN_L = ' + str(nN_L))
for nN_R in element_nodes[j+1:]:
print('nN_R = ' + str(nN_R))
hMax = max(hMax,self._calc_pt_distance(nN_L,nN_R))
return hMax
def _calc_quad_area(self,i):
""" Calculates the area of a quadrilateral.
Arguments
---------
i : int
The quadrilateral whose volume is being calculated.
Returns
-------
A : float
The quadrilateral's area
"""
n = [n0,n1,n2,n3] = self.nodeArray[self.elementNodesArray[0]]
d = [self._calc_pt_distance(n0,n[1]),
self._calc_pt_distance(n0,n[-1])]
A = d[0]*d[1]
return A
class MultilevelTriangularMesh(MultilevelMesh):
"""A hierarchical multilevel mesh of triangular cells"""
from .import cmeshTools
def __init__(self,
nx, ny, nz,
x=0.0, y=0.0, z=0.0,
Lx=1.0, Ly=1.0, Lz=1.0,
refinementLevels=1,
skipInit=False,
nLayersOfOverlap=1,
parallelPartitioningType=MeshParallelPartitioningTypes.node,triangleFlag=0):
from . import cmeshTools
MultilevelMesh.__init__(self)
self.useC = True
self.nLayersOfOverlap=nLayersOfOverlap; self.parallelPartitioningType = parallelPartitioningType
#self.useC = False
if not skipInit:
if self.useC:
self.meshList.append(TriangularMesh())
self.meshList[0].generateTriangularMeshFromRectangularGrid(nx,ny,Lx,Ly,triangleFlag=triangleFlag)
self.cmultilevelMesh = cmeshTools.CMultilevelMesh(self.meshList[0].cmesh,refinementLevels)
self.buildFromC(self.cmultilevelMesh)
self.meshList[0].nodeArray[:,0] += x
self.meshList[0].nodeArray[:,1] += y
self.meshList[0].nodeArray[:,2] += z
self.meshList[0].partitionMesh(nLayersOfOverlap=nLayersOfOverlap,parallelPartitioningType=parallelPartitioningType)
for l in range(1,refinementLevels):
self.meshList.append(TriangularMesh())
self.meshList[l].cmesh = self.cmeshList[l]
self.meshList[l].buildFromC(self.meshList[l].cmesh)
self.meshList[l].partitionMesh(nLayersOfOverlap=nLayersOfOverlap,parallelPartitioningType=parallelPartitioningType)
else:
grid=RectangularGrid(nx,ny,nz,Lx,Ly,Lz)
self.meshList.append(TriangularMesh())
self.meshList[0].rectangularToTriangular(grid)
self.meshList[0].nodeArray[:,0] += x
self.meshList[0].nodeArray[:,1] += y
self.meshList[0].nodeArray[:,2] += z
self.meshList[0].subdomainMesh = self.meshList[0]
self.elementChildren=[]
logEvent(self.meshList[0].meshInfo())
for l in range(1,refinementLevels):
self.refine()
self.meshList[l].subdomainMesh = self.meshList[l]
logEvent(self.meshList[-1].meshInfo())
self.buildArrayLists()
#
#mwf what's the best way to build from an existing mesh
def generateFromExistingCoarseMesh(self,mesh0,refinementLevels,nLayersOfOverlap=1,
parallelPartitioningType=MeshParallelPartitioningTypes.node):
from .import cmeshTools
#blow away or just trust garbage collection
self.nLayersOfOverlap = nLayersOfOverlap; self.parallelPartitioningType = parallelPartitioningType
self.meshList = []
self.elementParents = None
self.cmultilevelMesh = None
if self.useC:
self.meshList.append(mesh0)
self.cmultilevelMesh = cmeshTools.CMultilevelMesh(self.meshList[0].cmesh,refinementLevels)
self.buildFromC(self.cmultilevelMesh)
self.meshList[0].partitionMesh(nLayersOfOverlap=nLayersOfOverlap,parallelPartitioningType=parallelPartitioningType)
for l in range(1,refinementLevels):
self.meshList.append(TriangularMesh())
self.meshList[l].cmesh = self.cmeshList[l]
self.meshList[l].buildFromC(self.meshList[l].cmesh)
self.meshList[l].partitionMesh(nLayersOfOverlap=nLayersOfOverlap,parallelPartitioningType=parallelPartitioningType)
else:
grid=RectangularGrid(nx,ny,nz,Lx,Ly,Lz)
self.meshList.append(TriangularMesh())
self.meshList[0].rectangularToTriangular(grid)
self.meshList[0].subdomainMesh = self.meshList[0]
self.elementChildren=[]
logEvent(self.meshList[0].meshInfo())
for l in range(1,refinementLevels):
self.refine()
self.meshList[l].subdomainMesh = self.meshList[l]
logEvent(self.meshList[-1].meshInfo())
self.buildArrayLists()
def generatePartitionedMeshFromPUMI(self,mesh0,refinementLevels,nLayersOfOverlap=1):
from .import cmeshTools
self.meshList = []
self.meshList.append(mesh0)
self.cmultilevelMesh = cmeshTools.CMultilevelMesh(self.meshList[0].cmesh,refinementLevels)
self.buildFromC(self.cmultilevelMesh)
self.elementParents = None
self.elementChildren=[]
def generatePartitionedMeshFromTriangleFiles(self,filebase,base,mesh0,refinementLevels,nLayersOfOverlap=1,
parallelPartitioningType=MeshParallelPartitioningTypes.node):
from . import cmeshTools
if filebase==None:
filebase="mesh"
assert(refinementLevels==1)
assert(parallelPartitioningType==MeshParallelPartitioningTypes.node)
assert(nLayersOfOverlap<=1)
mesh0.cmesh = cmeshTools.CMesh()
#blow away or just trust garbage collection
self.nLayersOfOverlap=nLayersOfOverlap;self.parallelPartitioningType=parallelPartitioningType
self.meshList = []
self.elementParents = None
self.cmultilevelMesh = None
self.meshList.append(mesh0)
logEvent("cmeshTools.CMultilevelMesh")
self.cmultilevelMesh = cmeshTools.CMultilevelMesh(self.meshList[0].cmesh,refinementLevels)
logEvent("buildFromC")
self.buildFromC(self.cmultilevelMesh)
logEvent("partitionMesh")
self.meshList[0].partitionMeshFromFiles(filebase,base,nLayersOfOverlap=nLayersOfOverlap,parallelPartitioningType=parallelPartitioningType)
def refine(self):
self.meshList.append(TriangularMesh())
childrenDict = self.meshList[-1].refine(self.meshList[-2])
self.elementChildren.append(childrenDict)
def computeGeometricInfo(self):
for m in self.meshList:
m.computeGeometricInfo()
def locallyRefine(self,elementTagArray,flagForRefineType=0):
"""
simple local refinement assuming elementTagArray[eN]=1 --> bisect
flagForRefineType = 0 -- newest node, 1 -- 4T, 2 -- U4T
"""
logEvent("MultilevelTriangularMesh:locallyRefine")
if flagForRefineType == 0:
logEvent("MultilevelTriangularMesh: calling cmeshTools.setNewestNodeBases")
self.cmeshTools.setNewestNodeBases(2,self.cmultilevelMesh)
if self.useC:
logEvent("MultilevelTriangularMesh: calling locallRefineMultilevelMesh")
self.cmeshTools.locallyRefineMultilevelMesh(2,self.cmultilevelMesh,elementTagArray,flagForRefineType)
logEvent("MultilevelTriangularMesh: calling buildFromC")
self.buildFromC(self.cmultilevelMesh)
self.meshList.append(TriangularMesh())
self.meshList[self.nLevels-1].cmesh = self.cmeshList[self.nLevels-1]
self.meshList[self.nLevels-1].buildFromC(self.meshList[self.nLevels-1].cmesh)
self.meshList[self.nLevels-1].partitionMesh(nLayersOfOverlap=self.nLayersOfOverlap,parallelPartitioningType=self.parallelPartitioningType)
else:
print("""locallyRefine not implemented for self.useC= %s """ % (self.useC))
#
class MultilevelQuadrilateralMesh(MultilevelMesh):
""" A heirarchical multilevel mesh of quadrilaterals
WIP """
def __init__(self,
nx,ny,nz,
x=0.0,y=0.0,z=0.0,
Lx=1.0,Ly=1.0,Lz=1.0,
refinementLevels=1,
skipInit=False,
nLayersOfOverlap=1,
parallelPartitioningType=MeshParallelPartitioningTypes.node,triangleFlag=0,
useC=True):
from .import cmeshTools
MultilevelMesh.__init__(self)
self.useC = useC # Implementing with C will take a bit more work. Disabling for now.
if refinementLevels > 1:
logEvent("Quad refinement is not supported in C routines, switching off c-mesh");
self.useC = False # Currently quad refinement is not supported in C routines.
self.nLayersOfOverlap=nLayersOfOverlap ; self.parallelPartitioningType = parallelPartitioningType
if not skipInit:
if self.useC:
self.meshList.append(QuadrilateralMesh())
self.meshList[0].generateQuadrilateralMeshFromRectangularGrid(nx,ny,Lx,Ly)
self.meshList[0].nodeArray[:,0] += x
self.meshList[0].nodeArray[:,1] += y
self.cmultilevelMesh = cmeshTools.CMultilevelMesh(self.meshList[0].cmesh,refinementLevels)
self.buildFromC(self.cmultilevelMesh)
self.meshList[0].partitionMesh(nLayersOfOverlap=nLayersOfOverlap,parallelPartitioningType=parallelPartitioningType)
for l in range(1,refinementLevels):
self.meshList.append(QuadrilateralMesh())
self.meshList[l].cmesh = self.cmeshList[l]
self.meshList[l].buildFromC(self.cmeshList[l])
self.meshList[l].partitionMesh(nLayersOfOverlap=nLayersOfOverlap,parallelPartitioningType=parallelPartitioningType)
else:
grid=RectangularGrid(nx,ny,nz,Lx,Ly,Lz)
self.meshList.append(QuadrilateralMesh())
self.meshList[0].rectangularToQuadrilateral(grid,x,y,z)
self.meshList[0].subdomainMesh = self.meshList[0]
self.elementChildren=[]
logEvent(self.meshList[0].meshInfo())
self.meshList[0].globalMesh = self.meshList[0]
# The following four lines should be called elsewhere...Most of this is don in
# the c-function calls that are not implemented yet for 2D quads
self.meshList[0].nElements_owned = self.meshList[0].nElements_global
self.meshList[0].nodeNumbering_subdomain2global = np.zeros((self.meshList[0].nNodes_global,), 'd')
self.meshList[0].elementNumbering_subdomain2global = np.zeros((self.meshList[0].nElements_global,), 'd')
self.meshList[0].nodeOffsets_subdomain_owned[-1] = self.meshList[0].nNodes_global
self.meshList[0].nNodes_owned = self.meshList[0].nNodes_global
self.meshList[0].elementOffsets_subdomain_owned[-1] = self.meshList[0].nElements_global
for node in range(self.meshList[0].nNodes_global):
self.meshList[0].nodeNumbering_subdomain2global.itemset(node,node)
for element in range(self.meshList[0].nElements_global):
self.meshList[0].elementNumbering_subdomain2global.itemset(element,element)
self.meshList[0].buildNodeStarArrays()
for l in range(1,refinementLevels):
self.refine()
self.meshList[l].subdomainMesh = self.meshList[l]
logEvent(self.meshList[-1].meshInfo())
self.meshList[l].buildNodeStarArrays()
self.buildArrayLists()
# print("from Python")
# print (self.meshList[0].nElements_global,
# self.meshList[0].nNodes_global,
# self.meshList[0].nNodes_element,
# self.meshList[0].nNodes_elementBoundary,
# self.meshList[0].nElementBoundaries_element,
# self.meshList[0].nElementBoundaries_global,
# self.meshList[0].nInteriorElementBoundaries_global,
# self.meshList[0].nExteriorElementBoundaries_global,
# self.meshList[0].max_nElements_node,
# self.meshList[0].nEdges_global,
# self.meshList[0].max_nNodeNeighbors_node,
# self.meshList[0].elementNodesArray,
# self.meshList[0].nodeElementsArray,
# self.meshList[0].nodeElementOffsets,
# self.meshList[0].elementNeighborsArray,
# self.meshList[0].elementBoundariesArray,
# self.meshList[0].elementBoundaryNodesArray,
# self.meshList[0].elementBoundaryElementsArray,
# self.meshList[0].elementBoundaryLocalElementBoundariesArray,
# self.meshList[0].interiorElementBoundariesArray,
# self.meshList[0].exteriorElementBoundariesArray,
# self.meshList[0].edgeNodesArray,
# self.meshList[0].nodeStarArray,
# self.meshList[0].nodeStarOffsets,
# self.meshList[0].elementMaterialTypes,
# self.meshList[0].elementBoundaryMaterialTypes,
# self.meshList[0].nodeMaterialTypes,
# self.meshList[0].nodeArray,
# self.meshList[0].elementDiametersArray,
# self.meshList[0].elementInnerDiametersArray,
# self.meshList[0].elementBoundaryDiametersArray,
# self.meshList[0].elementBarycentersArray,
# self.meshList[0].elementBoundaryBarycentersArray,
# self.meshList[0].nodeDiametersArray,
# self.meshList[0].nodeSupportArray,
# self.meshList[0].h,
# self.meshList[0].hMin,
# self.meshList[0].volume)
def refine(self):
self.meshList.append(QuadrilateralMesh())
self.meshList[-1].globalMesh = self.meshList[-1]
childrenDict = self.meshList[-1].refine(self.meshList[-2])
# The following four lines should be called elsewhere...Most of this is don in
# the c-function calls that are not implemented yet for 2D quads
self.meshList[-1].nElements_owned = self.meshList[-1].nElements_global
self.meshList[-1].nodeNumbering_subdomain2global = np.zeros((self.meshList[-1].nNodes_global,), 'i')
self.meshList[-1].elementNumbering_subdomain2global = np.zeros((self.meshList[-1].nElements_global,), 'i')
self.meshList[-1].nodeOffsets_subdomain_owned[-1] = self.meshList[-1].nNodes_global
self.meshList[-1].nNodes_owned = self.meshList[-1].nNodes_global
self.meshList[-1].elementOffsets_subdomain_owned[-1] = self.meshList[-1].nElements_global
for node in range(self.meshList[-1].nNodes_global):
self.meshList[-1].nodeNumbering_subdomain2global.itemset(node,node)
for element in range(self.meshList[-1].nElements_global):
self.meshList[-1].elementNumbering_subdomain2global.itemset(element,element)
self.elementChildren.append(childrenDict)
class InterpolatedBathymetryMesh(MultilevelTriangularMesh):
"""A triangular mesh that interpolates bathymetry from a point cloud"""
def __init__(self,
domain,
triangleOptions,
atol=1.0e-4,
rtol=1.0e-4,
maxElementDiameter=None,
maxLevels=20,
maxNodes=100000,
bathyType="points",#"grid"
bathyAssignmentScheme="interpolation",#"localAveraging","L2-projection","H1-projection"
errorNormType="L2", #L1,Linfty
refineType=0,
):
from scipy import interpolate as scipy_interpolate
if maxElementDiameter:
self.maxElementDiameter = maxElementDiameter
else:
self.maxElementDiameter = np.inf
self.atol = atol
self.rtol = rtol
self.maxLevels=maxLevels
self.maxNodes=maxNodes
self.domain = domain
self.triangleOptions = triangleOptions
self.bathyType=bathyType
self.bathyAssignmentScheme=bathyAssignmentScheme
self.errorNormType = errorNormType
logEvent("InterpolatedBathymetryMesh: Calling Triangle to generate 2D coarse mesh for "+self.domain.name)
runTriangle(domain.polyfile,
self.triangleOptions)
logEvent("InterpolatedBathymetryMesh: Converting to Proteus Mesh")
self.coarseMesh = TriangularMesh()
self.coarseMesh.generateFromTriangleFiles(filebase=domain.polyfile,base=1)
MultilevelTriangularMesh.__init__(self,0,0,0,skipInit=True,nLayersOfOverlap=0,
parallelPartitioningType=MeshParallelPartitioningTypes.node)
self.generateFromExistingCoarseMesh(self.coarseMesh,1,
parallelPartitioningType=MeshParallelPartitioningTypes.node)
self.computeGeometricInfo()
#allocate some arrays based on the bathymetry data
logEvent("InterpolatedBathymetryMesh:Allocating data structures for bathymetry interpolation algorithm")
if bathyType == "points":
self.nPoints_global = self.domain.bathy.shape[0]
self.pointElementsArray_old = -np.ones((self.nPoints_global,),'i')
self.pointElementsArray = -np.ones((self.nPoints_global,),'i')
self.pointNodeWeightsArray = np.zeros((self.nPoints_global,3),'d')
self.bathyInterpolant = scipy_interpolate.LinearNDInterpolator(self.domain.bathy[:,:2],self.domain.bathy[:,2])
self.bathyNearestNeighbor = scipy_interpolate.NearestNDInterpolator(self.domain.bathy[:,:2], self.domain.bathy[:,2])
elif bathyType == "grid":
self.nPoints_global = self.domain.bathy.shape[0]
self.pointElementsArray_old = -np.ones((self.nPoints_global,),'i')
self.pointElementsArray = -np.ones((self.nPoints_global,),'i')
self.pointNodeWeightsArray = np.zeros((self.nPoints_global,3),'d')
x = self.domain.bathy[:self.domain.bathyGridDim[1],0]
y = self.domain.bathy[:self.domain.bathyGridDim[0]*self.domain.bathyGridDim[1]:self.domain.bathyGridDim[1],1]
z = self.domain.bathy[:,2].reshape(self.domain.bathyGridDim).transpose()
self.bathyInterpolant = scipy_interpolate.RectBivariateSpline(x,y,z,kx=1,ky=1)
#self.bathyInterpolant = scipy_interpolate.interp2d(x,y,z)
#
logEvent("InterpolatedBathymetryMesh: Locating points on initial mesh")
self.locatePoints_initial(self.meshList[-1])
logEvent("InterpolatedBathymetryMesh:setting mesh bathymetry from data")
self.setMeshBathymetry(self.meshList[-1])
logEvent("InterpolatedBathymetryMesh: tagging elements for refinement")
self.tagElements(self.meshList[-1])
levels = 0
error = 1.0;
while error >= 1.0 and self.meshList[-1].nNodes_global < self.maxNodes and levels < self.maxLevels:
levels += 1
logEvent("InterpolatedBathymetryMesh: Locally refining, level = %i" % (levels,))
self.locallyRefine(self.meshList[-1].elementTags,flagForRefineType=refineType)
logEvent("InterpolatedBathymetryMesh: interpolating bathymetry from parent mesh to refined mesh")
self.interpolateBathymetry()
logEvent("InterpolatedBathymetryMesh: Locating points on child mesh")
self.locatePoints_refined(self.meshList[-1])
logEvent("InterpolatedBathymetryMesh: setting mesh bathmetry from data")
self.setMeshBathymetry(self.meshList[-1])
logEvent("InterpolatedBathymetryMesh: tagging elements for refinement")
error = self.tagElements(self.meshList[-1])
logEvent("InterpolatedBathymetryMesh: error = %f atol = %f rtol = %f number of elements tagged = %i" % (error,self.atol,self.rtol,self.meshList[-1].elementTags.sum()))
def setMeshBathymetry(self,mesh):
if self.bathyAssignmentScheme == "interpolation":
self.setMeshBathymetry_interpolate(mesh)
elif self.bathyAssignmentScheme == "localAveraging":
self.setMeshBathymetry_localAveraging(mesh)
elif self.bathyAssignmentScheme == "L2-projection":
raise NotImplementedError
elif self.bathyAssignmentScheme == "H1-projection":
raise NotImplementedError
def setMeshBathymetry_interpolate(self,mesh):
if self.bathyType == 'grid':
mesh.nodeArray[:,2] = self.bathyInterpolant.ev(mesh.nodeArray[:,0],mesh.nodeArray[:,1])
else:
mesh.nodeArray[:,2] = self.bathyInterpolant(mesh.nodeArray[:,0],mesh.nodeArray[:,1])
nI = np.isnan(mesh.nodeArray[:,2])
mesh.nodeArray[nI,2] = self.bathyNearestNeighbor(mesh.nodeArray[nI,0],mesh.nodeArray[nI,1])
def setMeshBathymetry_localAveraging(self,mesh):
"""
calculate the arithmetic mean bathymetry of points inside each triangle and then assign the area-weighted average of the element means to each node
"""
from .FemTools import AffineMaps,ReferenceSimplex,LinearOnSimplexWithNodalBasis
interpolationSpace = LinearOnSimplexWithNodalBasis(nd=2)
#maps = AffineMaps(mesh,interpolationSpace.referenceElement,interpolationSpace)
#maps.useC = True
#calculate mean element height for each element
#uses arithmetic mean, so it assumes the "patch" associated with each point the same size (weight)
mesh.elementMeanZ = np.zeros((mesh.nElements_global,),'d')
for pN in range(self.nPoints_global):
eN = self.pointElementsArray[pN]
if eN >= 0:
if mesh.nPoints_element[eN] > 0:
mesh.elementMeanZ[eN] += old_div(self.domain.bathy[pN,2],float(mesh.nPoints_element[eN]))
mesh.nodeArray[mesh.elementNodesArray[eN,0],2] = 0.0
mesh.nodeArray[mesh.elementNodesArray[eN,1],2] = 0.0
mesh.nodeArray[mesh.elementNodesArray[eN,2],2] = 0.0
#now assign the mesh node bathmetry as an area weighted average of the element mean
sumArray = mesh.nodeArray[:,2].copy()
sumArray[:]=0.0
for eN in range(mesh.nElements_global):
if mesh.nPoints_element[eN] > 0:#only calculate a contribution if this element contains a point
#calculate triangle area and assign weighted average of element means to node
xiArray = np.zeros((2,),'d')
#
grad_psi = np.zeros((interpolationSpace.dim,
interpolationSpace.referenceElement.dim),
'd')
dx = np.zeros((interpolationSpace.referenceElement.dim),
'd')
jacobian = np.zeros((interpolationSpace.referenceElement.dim,
interpolationSpace.referenceElement.dim),
'd')
inverseJacobian = np.zeros((interpolationSpace.referenceElement.dim,
interpolationSpace.referenceElement.dim),
'd')
for j in interpolationSpace.range_dim:
grad_psi[j,:] = interpolationSpace.basisGradients[j](xiArray)#evaluate at zero because we can (psi is linear)
jacobian.flat[:]=0.0
inverseJacobian.flat[:]=0.0
for j in interpolationSpace.range_dim:
J = mesh.elementNodesArray[eN,j]
for m in interpolationSpace.referenceElement.range_dim:
for n in interpolationSpace.referenceElement.range_dim:
jacobian[m,n] += mesh.nodeArray[J,m]*grad_psi[j,n]
J = mesh.elementNodesArray[eN,0]
inverseJacobian = inv(jacobian)
area = 0.5*det(jacobian)
sumArray[mesh.elementNodesArray[eN,0]] += old_div(area,mesh.nodeSupportArray[mesh.elementNodesArray[eN,0]])
sumArray[mesh.elementNodesArray[eN,1]] += old_div(area,mesh.nodeSupportArray[mesh.elementNodesArray[eN,1]])
sumArray[mesh.elementNodesArray[eN,2]] += old_div(area,mesh.nodeSupportArray[mesh.elementNodesArray[eN,2]])
mesh.nodeArray[mesh.elementNodesArray[eN,0],2] += area*mesh.elementMeanZ[eN]/mesh.nodeSupportArray[mesh.elementNodesArray[eN,0]]
mesh.nodeArray[mesh.elementNodesArray[eN,1],2] += area*mesh.elementMeanZ[eN]/mesh.nodeSupportArray[mesh.elementNodesArray[eN,1]]
mesh.nodeArray[mesh.elementNodesArray[eN,2],2] += area*mesh.elementMeanZ[eN]/mesh.nodeSupportArray[mesh.elementNodesArray[eN,2]]
#cek debug
#print "sum of a nodes element areas divided by node support shoudl be 1 ",sumArray
def locatePoints(self,mesh):
"""
locate the element containing each point
this should only be used on very coarse meshes
"""
from .FemTools import AffineMaps,ReferenceSimplex,LinearOnSimplexWithNodalBasis
interpolationSpace = LinearOnSimplexWithNodalBasis(nd=2)
#maps = AffineMaps(mesh,interpolationSpace.referenceElement,interpolationSpace)
#maps.useC = False
#find the elements that contain bathymetry points and calculate:
# - for each element, the number of bathmetry points in that element
# - for each node, the total area of the nodes elements that containing bathmetry points
# - the area of each element
# - the total area covered by elements containing bathmetry points
mesh.nPoints_element = np.zeros((mesh.nElements_global,),'i')
mesh.nodeSupportArray = np.zeros((mesh.nNodes_global,),'d')
mesh.area_element = np.zeros((mesh.nElements_global,),'d')
self.pointElementsArray[:] = -1
self.totalArea = 0.0
for eN in range(mesh.nElements_global):
#map points to reference space and test if it lies in the reference triangle
xiArray = np.zeros((2,),'d')
xiArray[:] = 0.0
#
grad_psi = np.zeros((interpolationSpace.dim,
interpolationSpace.referenceElement.dim),
'd')
dx = np.zeros((interpolationSpace.referenceElement.dim),
'd')
jacobian = np.zeros((interpolationSpace.referenceElement.dim,
interpolationSpace.referenceElement.dim),
'd')
inverseJacobian = np.zeros((interpolationSpace.referenceElement.dim,
interpolationSpace.referenceElement.dim),
'd')
for j in interpolationSpace.range_dim:
grad_psi[j,:] = interpolationSpace.basisGradients[j](xiArray[0])#evalute at zero because we can (psi is linear)
jacobian.flat[:]=0.0
inverseJacobian.flat[:]=0.0
for j in interpolationSpace.range_dim:
J = mesh.elementNodesArray[eN,j]
for m in interpolationSpace.referenceElement.range_dim:
for n in interpolationSpace.referenceElement.range_dim:
jacobian[m,n] += mesh.nodeArray[J,m]*grad_psi[j,n]
J = mesh.elementNodesArray[eN,0]
inverseJacobian = inv(jacobian)
area = 0.5*det(jacobian)
mesh.area_element[eN] = area
self.totalArea += area
for pN in range(self.nPoints_global):#can optimize by skipping previously found points
xiArray[:] = 0.0
dx[:]=self.domain.bathy[pN,:2]
for m in interpolationSpace.referenceElement.range_dim:
dx[m]-=mesh.nodeArray[J,m]
for m in interpolationSpace.referenceElement.range_dim:
for n in interpolationSpace.referenceElement.range_dim:
xiArray[m] += inverseJacobian[m,n]*dx[n]
#barycentric coordinates are non-negative so we're in this element
if xiArray[0] >=0.0 and xiArray[1] >= 0.0 and 1.0 - xiArray[0] - xiArray[1] >= 0.0:
self.pointElementsArray[pN] = eN
self.pointNodeWeightsArray[pN,0] = interpolationSpace.basis[0](xiArray)
self.pointNodeWeightsArray[pN,1] = interpolationSpace.basis[1](xiArray)
self.pointNodeWeightsArray[pN,2] = interpolationSpace.basis[2](xiArray)
#count the number of points inside each element
for pN in range(self.nPoints_global):
if self.pointElementsArray[pN] >= 0:
mesh.nPoints_element[self.pointElementsArray[pN]] += 1
#add up the support area for each node
for eN in range(mesh.nElements_global):
if mesh.nPoints_element[eN] > 0:
mesh.nodeSupportArray[mesh.elementNodesArray[eN,0]] += mesh.area_element[eN]
mesh.nodeSupportArray[mesh.elementNodesArray[eN,1]] += mesh.area_element[eN]
mesh.nodeSupportArray[mesh.elementNodesArray[eN,2]] += mesh.area_element[eN]
def locatePoints_refined(self,mesh):
"""
locate the element containing each point
this should only be used on very coarse meshes
"""
from .FemTools import AffineMaps,ReferenceSimplex,LinearOnSimplexWithNodalBasis
interpolationSpace = LinearOnSimplexWithNodalBasis(nd=2)
#maps = AffineMaps(mesh,interpolationSpace.referenceElement,interpolationSpace)
#maps.useC = False
#find the elements that contain bathymetry points and calculate:
# - for each element, the number of bathmetry points in that element
# - for each node, the total area of the nodes elements that containing bathmetry points
# - the area of each element
# - the total area covered by elements containing bathmetry points
mesh.nPoints_element = np.zeros((mesh.nElements_global,),'i')
mesh.nodeSupportArray = np.zeros((mesh.nNodes_global,),'d')
mesh.area_element = np.zeros((mesh.nElements_global,),'d')
self.totalArea = 0.0
self.pointElementsArray_old[:] = self.pointElementsArray
self.pointElementsArray[:] = -1
for pN in range(self.nPoints_global):
eN_parent = self.pointElementsArray_old[pN]
for eN in self.elementChildrenArrayList[-1][self.elementChildrenOffsetsList[-1][eN_parent]:self.elementChildrenOffsetsList[-1][eN_parent+1]]:
xiArray = np.zeros((2,),'d')
xiArray[:] = 0.0
grad_psi = np.zeros((interpolationSpace.dim,
interpolationSpace.referenceElement.dim),
'd')
dx = np.zeros((interpolationSpace.referenceElement.dim),
'd')
jacobian = np.zeros((interpolationSpace.referenceElement.dim,
interpolationSpace.referenceElement.dim),
'd')
inverseJacobian = np.zeros((interpolationSpace.referenceElement.dim,
interpolationSpace.referenceElement.dim),
'd')
for j in interpolationSpace.range_dim:
grad_psi[j,:] = interpolationSpace.basisGradients[j](xiArray[0])#evalute at zero because we can (psi is linear)
jacobian.flat[:]=0.0
inverseJacobian.flat[:]=0.0
for j in interpolationSpace.range_dim:
J = mesh.elementNodesArray[eN,j]
for m in interpolationSpace.referenceElement.range_dim:
for n in interpolationSpace.referenceElement.range_dim:
jacobian[m,n] += mesh.nodeArray[J,m]*grad_psi[j,n]
J = mesh.elementNodesArray[eN,0]
inverseJacobian = inv(jacobian)
area = 0.5*det(jacobian)
mesh.area_element[eN] = area
self.totalArea += area
xiArray[:] = 0.0
dx[:]=self.domain.bathy[pN,:2]
for m in interpolationSpace.referenceElement.range_dim:
dx[m]-=mesh.nodeArray[J,m]
for m in interpolationSpace.referenceElement.range_dim:
for n in interpolationSpace.referenceElement.range_dim:
xiArray[m] += inverseJacobian[m,n]*dx[n]
#barycentric coordinates are non-negative so we're in this element
if xiArray[0] >=0.0 and xiArray[1] >= 0.0 and 1.0 - xiArray[0] - xiArray[1] >= 0.0:
self.pointElementsArray[pN] = eN
self.pointNodeWeightsArray[pN,0] = interpolationSpace.basis[0](xiArray)
self.pointNodeWeightsArray[pN,1] = interpolationSpace.basis[1](xiArray)
self.pointNodeWeightsArray[pN,2] = interpolationSpace.basis[2](xiArray)
#count the number of points inside each element
for pN in range(self.nPoints_global):
if self.pointElementsArray[pN] >= 0:
mesh.nPoints_element[self.pointElementsArray[pN]] += 1
#add up the support area for each node
for eN in range(mesh.nElements_global):
if mesh.nPoints_element[eN] > 0:
mesh.nodeSupportArray[mesh.elementNodesArray[eN,0]] += mesh.area_element[eN]
mesh.nodeSupportArray[mesh.elementNodesArray[eN,1]] += mesh.area_element[eN]
mesh.nodeSupportArray[mesh.elementNodesArray[eN,2]] += mesh.area_element[eN]
def locatePoints_initial(self,mesh):
"""
locate the element containing each point
first find the nearest node, then loop over that node's elements
"""
from scipy.spatial import cKDTree
from .FemTools import AffineMaps,ReferenceSimplex,LinearOnSimplexWithNodalBasis
interpolationSpace = LinearOnSimplexWithNodalBasis(nd=2)
#find the elements that contain bathymetry points and calculate:
# - for each element, the number of bathmetry points in that element
# - for each node, the total area of the nodes elements that contain bathmetry points
# - the area of each element
# - the total area covered by elements containing bathmetry points
mesh.nPoints_element = np.zeros((mesh.nElements_global,),'i')
mesh.nodeSupportArray = np.zeros((mesh.nNodes_global,),'d')
mesh.area_element = np.zeros((mesh.nElements_global,),'d')
self.totalArea = 0.0
self.pointElementsArray[:] = -1
tree = cKDTree(mesh.nodeArray[:,:2])
xiArray = np.zeros((2,),'d')
grad_psi = np.zeros((interpolationSpace.dim,
interpolationSpace.referenceElement.dim),
'd')
dx = np.zeros((interpolationSpace.referenceElement.dim),
'd')
jacobian = np.zeros((interpolationSpace.referenceElement.dim,
interpolationSpace.referenceElement.dim),
'd')
inverseJacobian = np.zeros((interpolationSpace.referenceElement.dim,
interpolationSpace.referenceElement.dim),
'd')
for pN in range(self.nPoints_global):
(distance,nN) = tree.query(self.domain.bathy[pN,:2])
for eN in mesh.nodeElementsArray[mesh.nodeElementOffsets[nN]:mesh.nodeElementOffsets[nN+1]]:
xiArray[:] = 0.0
for j in interpolationSpace.range_dim:
grad_psi[j,:] = interpolationSpace.basisGradients[j](xiArray[0])#evalute at zero because we can (psi is linear)
jacobian.flat[:]=0.0
inverseJacobian.flat[:]=0.0
for j in interpolationSpace.range_dim:
J = mesh.elementNodesArray[eN,j]
for m in interpolationSpace.referenceElement.range_dim:
for n in interpolationSpace.referenceElement.range_dim:
jacobian[m,n] += mesh.nodeArray[J,m]*grad_psi[j,n]
J = mesh.elementNodesArray[eN,0]
inverseJacobian = inv(jacobian)
area = 0.5*det(jacobian)
mesh.area_element[eN] = area
xiArray[:] = 0.0
dx[:]=self.domain.bathy[pN,:2]
for m in interpolationSpace.referenceElement.range_dim:
dx[m]-=mesh.nodeArray[J,m]
for m in interpolationSpace.referenceElement.range_dim:
for n in interpolationSpace.referenceElement.range_dim:
xiArray[m] += inverseJacobian[m,n]*dx[n]
#if the barycentric coordinates are non-negative we're in this element
if xiArray[0] >=0.0 and xiArray[1] >= 0.0 and 1.0 - xiArray[0] - xiArray[1] >= 0.0:
self.pointElementsArray[pN] = eN
self.pointNodeWeightsArray[pN,0] = interpolationSpace.basis[0](xiArray)
self.pointNodeWeightsArray[pN,1] = interpolationSpace.basis[1](xiArray)
self.pointNodeWeightsArray[pN,2] = interpolationSpace.basis[2](xiArray)
self.totalArea += mesh.area_element.sum()
#count the number of points inside each element
for pN in range(self.nPoints_global):
if self.pointElementsArray[pN] >= 0:
mesh.nPoints_element[self.pointElementsArray[pN]] += 1
#add up the support area for each node
for eN in range(mesh.nElements_global):
if mesh.nPoints_element[eN] > 0:
mesh.nodeSupportArray[mesh.elementNodesArray[eN,0]] += mesh.area_element[eN]
mesh.nodeSupportArray[mesh.elementNodesArray[eN,1]] += mesh.area_element[eN]
mesh.nodeSupportArray[mesh.elementNodesArray[eN,2]] += mesh.area_element[eN]
def interpolateBathymetry(self):
"""
interpolate bathymetry for the refinement from the parent mesh
"""
from proteus.FemTools import C0_AffineLinearOnSimplexWithNodalBasis,DOFBoundaryConditions,MultilevelProjectionOperators
mlMeshTemp = MultilevelMesh(levels=2)
mlMeshTemp.meshList = self.meshList[-2:]
mlMeshTemp.nLevels=2
mlMeshTemp.cmeshList = self.cmeshList[-2:]
mlMeshTemp.elementParentsArrayList = self.elementParentsArrayList[-2:]
mlMeshTemp.elementChildrenArrayList = self.elementChildrenArrayList[-1:]
mlMeshTemp.elementChildrenOffsetsList = self.elementChildrenOffsetsList[-1:]
nd=2
TrialSpaceTypeDict = {0:C0_AffineLinearOnSimplexWithNodalBasis}
trialSpaceDictParent = dict([ (cj,TrialSpaceType(mlMeshTemp.meshList[0],nd)) for (cj,TrialSpaceType) in TrialSpaceTypeDict.items()])
trialSpaceDictChild = dict([ (cj,TrialSpaceType(mlMeshTemp.meshList[1],nd)) for (cj,TrialSpaceType) in TrialSpaceTypeDict.items()])
trialSpaceDictList = [trialSpaceDictParent,trialSpaceDictChild]
offsetListList=[[0],[0]]
strideListList=[[1],[1]]
def getDBC(x,flag):
return None
bcDictList=[dict([(0,DOFBoundaryConditions(trialSpaceDictParent[0],getPointwiseBoundaryConditions=getDBC,weakDirichletConditions=False))]),
dict([(0,DOFBoundaryConditions(trialSpaceDictChild[0],getPointwiseBoundaryConditions=getDBC,weakDirichletConditions=False))])]
self.meshTransfers = MultilevelProjectionOperators(
mlMeshTemp,
trialSpaceDictList,
offsetListList,
strideListList,
bcDictList)
zParent = self.meshList[-2].nodeArray[:,2].copy()
zChild = self.meshList[-1].nodeArray[:,2].copy()
self.meshTransfers.prolongList[-1].matvec(zParent,zChild)
self.meshList[-1].nodeArray[:,2] = zChild
def tagElements(self,mesh):
"""
loop over points and calculate whether the interpolation error is within the tolerance
this should only be used on very coarse meshes
"""
mesh.elementTags = np.zeros((mesh.nElements_global,),'i')
mesh.errorAverage_element = np.zeros((mesh.nElements_global,),'d')
errorInfty = 0.0
mesh.elementTags[mesh.elementDiametersArray > self.maxElementDiameter ] = 1
for pN in range(self.nPoints_global):
eN = self.pointElementsArray[pN]
if eN >= 0:
zInterp = self.pointNodeWeightsArray[pN,0]*mesh.nodeArray[mesh.elementNodesArray[eN,0],2] + \
self.pointNodeWeightsArray[pN,1]*mesh.nodeArray[mesh.elementNodesArray[eN,1],2] + \
self.pointNodeWeightsArray[pN,2]*mesh.nodeArray[mesh.elementNodesArray[eN,2],2]
errorPointwise = fabs(zInterp - self.domain.bathy[pN,2])/(fabs(self.domain.bathy[pN,2])*self.rtol + self.atol)
errorInfty = max(errorPointwise,errorInfty)
mesh.errorAverage_element[eN] += (errorPointwise/float(mesh.nPoints_element[eN]))
#print "error average",mesh.errorAverage_element[eN]
if errorPointwise >= 1.0:
mesh.elementTags[eN] = 1
if self.errorNormType == "L1":
mesh.elementTags[:] = 0
errorL1 = 0.0
for eN in range(mesh.nElements_global):
errorL1 += mesh.errorAverage_element[eN]*mesh.area_element[eN]
if mesh.errorAverage_element[eN] >= 1.0:
mesh.elementTags[eN] = 1
errorL1 /= self.totalArea#normalize by domain error to make error have units of length
return errorL1
if self.errorNormType == "L2":
mesh.elementTags[:] = 0
errorL2 = 0.0
for eN in range(mesh.nElements_global):
errorL2 += (mesh.errorAverage_element[eN])**2 * mesh.area_element[eN]
if mesh.errorAverage_element[eN] >= 1.0:
mesh.elementTags[eN] = 1
errorL2 = old_div(sqrt(errorL2),self.totalArea)#normalize by domain error to make error have units of length
return errorL2
else:
print("Interpolation Error, L_infty ",errorInfty)
return errorInfty
class EdgeMesh(Mesh):
"""A mesh of edges
The nodes, and edges are indexed by their node tuples. The
corresponding lists are derived from the dictionaries, and sorted
lexicographically. The global node numbers are redefined to give a
lexicographic ordering.
"""
def __init__(self):
Mesh.__init__(self)
self.nodeDict={}
self.edgeDict={}
self.oldToNewNode=[]
def computeGeometricInfo(self):
from .import cmeshTools
cmeshTools.computeGeometricInfo_edge(self.cmesh)
def generateEdgeMeshFromRectangularGrid(self,nx,Lx):
from .import cmeshTools
self.cmesh = cmeshTools.CMesh()
cmeshTools.generateEdgeMeshFromRectangularGrid(nx,Lx,self.cmesh)
cmeshTools.allocateGeometricInfo_edge(self.cmesh)
cmeshTools.computeGeometricInfo_edge(self.cmesh)
self.buildFromC(self.cmesh)
#mwf debug
#print "EdgeMesh rect->edge after build nodes=%s " % (self.nodeArray)
def rectangularToEdge(self,grid):
#copy the nodes from the rectangular mesh
#I want to be able to renumber latter without
#changing the grid nodes, so I do deep copies here
self.nodeList = [Node(n.N,n.p[X],n.p[Y],n.p[Z]) for n in grid.nodeList]
self.nodeDict = dict([(n,n) for n in self.nodeList])
for e in grid.edgeList:
self.newEdge([self.nodeDict[e.nodes[0]],self.nodeDict[e.nodes[1]]])
self.finalize()
#self.buildListsEdges()
def finalize(self):
self.buildLists()
self.buildArraysFromLists()
#todo: build boundary mesh
def buildLists(self):
self.buildListsNodes()
self.buildListsEdges()
self.elementList = self.edgeList
self.elementBoundaryList = self.nodeList
def buildListsNodes(self):
keyList = list(self.nodeDict.keys())
keyList.sort()
self.nodeList=[]
self.oldToNewNode=list(range(len(self.nodeDict)))
for nN,k in enumerate(keyList):
self.oldToNewNode[self.nodeDict[k].N]=nN
self.nodeDict[k].N = nN
self.nodeList.append(self.nodeDict[k])
def buildListsEdges(self):
keyList = list(self.edgeDict.keys())
keyList.sort()
self.edgeList=[]
for eN,k in enumerate(keyList):
self.edgeDict[k].N = eN
self.edgeList.append(self.edgeDict[k])
def newEdge(self,nodes):
e = Edge(len(self.edgeDict),nodes)
self.edgeDict[e.nodes] = e
return e
def registerNode(self,node):
if node in self.nodeDict:
node = self.nodeDict[node]
else:
node.N = len(self.nodeDict)
self.nodeDict[node] = node
return node
def refine2e(self,oldMesh):
childrenDict={}
for e in oldMesh.edgeList:
#deep copy old nodes because we'll renumber
eNodes = [Node(eN,n.p[X],n.p[Y],n.p[Z])
for eN,n in enumerate(e.nodes)]
for lnN,n in enumerate(eNodes): eNodes[lnN]=self.registerNode(n)
#add new node
e.computeGeometricInfo()
newNode = Node(len(self.nodeDict),
e.barycenter[X],
e.barycenter[Y],
e.barycenter[Z])
newNode = self.registerNode(newNode)
e1=self.newEdge([eNodes[0],newNode])
e2=self.newEdge([newNode,eNodes[1]])
childrenDict[e.N]=[e1,e2]
self.finalize()
return childrenDict
def refine(self,oldMesh):
return self.refine2e(oldMesh)
def meshInfo(self):
minfo = """Number of edges : %d
Number of nodes : %d\n""" % (self.nElements_global,self.nNodes_global)
if self.subdomainMesh != self:
sinfo = self.subdomainMesh.meshInfo()
info = "*** Global ***\n" + minfo + "\n*** Local ***\n" + sinfo
return info
return minfo
def writeMeshADH(self,filename):
pass
def writeMeshXdmf(self,ar,name='',t=0.0,init=False,meshChanged=False,tCount=0):
Mesh.writeMeshXdmf(self,ar,name,t,init,meshChanged,"Polyline",tCount)
def writeMeshEnsight(self,filename,description=None):
base=1
#write the casefile
caseOut=open(filename+'.case','w')
caseOut.write('FORMAT\n'+'type: ensight gold\n')
caseOut.write('GEOMETRY\n'+'model: '+filename+'.geo\n')
caseOut.close()
meshOut=open(filename+'.geo','w')
meshOut.write('Ensight Gold\n')
meshOut.write('Unstructured Edge Mesh\n')
meshOut.write('node id given\n')
meshOut.write('element id given\n')
meshOut.write('part \n'+'%10i\n' % 1)
if description:
meshOut.write(description+'\n')
else:
meshOut.write('A Mesh\n')
meshOut.write('coordinates\n'+'%10i\n' % self.nNodes_global)
for nN in range(self.nNodes_global):
meshOut.write('%10i\n' % (nN+base))
for nN in range(self.nNodes_global):
meshOut.write('%12.5E\n' % self.nodeArray[nN,0])
for nN in range(self.nNodes_global):
meshOut.write('%12.5E\n' % self.nodeArray[nN,1])
for nN in range(self.nNodes_global):
meshOut.write('%12.5E\n' % self.nodeArray[nN,2])
meshOut.write('bar2\n'+'%10i\n' % self.nElements_global)
for eN in range(self.nElements_global):
meshOut.write('%10i\n' % (eN+base))
for eN in range(self.nElements_global):
meshOut.write('%10i%10i\n' % tuple((nN+base) for nN in self.elementNodesArray[eN,:]))
meshOut.close()
class MultilevelEdgeMesh(MultilevelMesh):
"""A hierarchical multilevel mesh of intervals (edges)"""
from .import cmeshTools
def __init__(self,
nx, ny, nz,
x=0.0, y=0.0, z=0.0,
Lx=1.0, Ly=1.0, Lz=1.0,
refinementLevels=1,
nLayersOfOverlap=1,
parallelPartitioningType=MeshParallelPartitioningTypes.node):
from . import cmeshTools
MultilevelMesh.__init__(self)
self.useC=True
self.nLayersOfOverlap=nLayersOfOverlap; self.parallelPartitioningType = parallelPartitioningType
if self.useC:
self.meshList.append(EdgeMesh())
self.meshList[0].generateEdgeMeshFromRectangularGrid(nx,Lx)
self.cmultilevelMesh = cmeshTools.CMultilevelMesh(self.meshList[0].cmesh,refinementLevels)
self.buildFromC(self.cmultilevelMesh)
self.meshList[0].nodeArray[:,0] += x
self.meshList[0].nodeArray[:,1] += y
self.meshList[0].nodeArray[:,2] += z
self.meshList[0].partitionMesh(nLayersOfOverlap=nLayersOfOverlap,parallelPartitioningType=parallelPartitioningType)
for l in range(1,refinementLevels):
self.meshList.append(EdgeMesh())
self.meshList[l].cmesh = self.cmeshList[l]
self.meshList[l].buildFromC(self.meshList[l].cmesh)
self.meshList[l].partitionMesh(nLayersOfOverlap=nLayersOfOverlap,parallelPartitioningType=parallelPartitioningType)
else:
grid=RectangularGrid(nx,ny,nz,Lx,Ly,Lz)
self.meshList.append(EdgeMesh())
self.meshList[0].rectangularToEdge(grid)
self.meshList[0].nodeArray[:,0] += x
self.meshList[0].nodeArray[:,1] += y
self.meshList[0].nodeArray[:,2] += z
self.elementChildren=[]
print(self.meshList[0].meshInfo())
for l in range(1,refinementLevels):
self.refine()
print(self.meshList[-1].meshInfo())
def refine(self):
self.meshList.append(EdgeMesh())
childrenDict = self.meshList[-1].refine(self.meshList[-2])
self.elementChildren.append(childrenDict)
def computeGeometricInfo(self):
for m in self.meshList:
m.computeGeometricInfo()
def locallyRefine(self,elementTagArray):
"""
simple local refinement assuming elementTagArray[eN]=1 --> bisect
"""
if self.useC:
self.cmeshTools.locallyRefineMultilevelMesh(1,self.cmultilevelMesh,elementTagArray)
self.buildFromC(self.cmultilevelMesh)
self.meshList.append(EdgeMesh())
self.meshList[self.nLevels-1].cmesh = self.cmeshList[self.nLevels-1]
self.meshList[self.nLevels-1].buildFromC(self.meshList[self.nLevels-1].cmesh)
self.meshList[self.nLevels-1].partitionMesh(nLayersOfOverlap=self.nLayersOfOverlap,parallelPartitioningType=self.parallelPartitioningType)
else:
print("""locallyRefine not implemented for self.useC= %s """ % (self.useC))
#
#
class MultilevelSimplicialMesh(MultilevelMesh):
"""A wrapper for all the simplicial hierarchical meshes in 1,2, and 3D"""
def __init__(self,nd,nx,ny=1,nz=1,Lx=1.0,Ly=1.0,Lz=1.0,refinementLevels=1):
if nd==1:
MultilevelEdgeMesh.__init__(self,nx,ny,nz,
Lx,Ly,Lz,
refinementLevels)
elif nd==2:
MultilevelTriangularMesh.__init__(self,nx,ny,nz,
Lx,Ly,Lz,
refinementLevels)
elif nd==3:
MultilevelTetrahedralMesh.__init__(self,nx,ny,nz,
Lz,Ly,Lz,
refineMentLevels)
def refine(self):
if nd==1:
MultilevelEdgeMesh.refine(self)
elif nd==2:
MultilevelTrianglularMesh.refine(self)
elif nd==3:
MultilevelTetrahedralMesh.refine(self)
## @}
###utility functions for reading meshes from Xdmf
def findXMLgridElement(xmf,MeshTag='Spatial_Domain',id_in_collection=-1,verbose=0):
"""Try to find the element of the xml tree xmf that holds a uniform
grid with the name given in MeshTag by searching through Temporal
Grid Collections and Grid Collections.
If MeshTag isn't found, uses the first entry in the Domain
"""
Domain = xmf.getroot()[-1]
GridCollection = None
Grid = None
for collection in Domain:
if 'Name' in collection.attrib and MeshTag in collection.attrib['Name']:
GridCollection = collection
break
if GridCollection is None:
GridCollection = Domain[0]
logEvent("Trying GridCollection.tag= %s" % (GridCollection.tag),4)
if GridCollection.attrib['GridType'] == 'Collection':
Grid = GridCollection[-1]
elif GridCollection.attrib['GridType'] == 'Uniform':
Grid = GridCollection
assert Grid.tag == 'Grid'
assert Grid.attrib['GridType'] == 'Uniform'
return Grid
def extractPropertiesFromXdmfGridNode(Grid):
"""unpack the Topology, Geometry, NodeMaterials, and ElementMaterials
nodes from xdmf node for a uniform grid
"""
#Geometry first
Topology = None; Geometry = None; NodeMaterials= None; ElementMaterials = None
for i,leaf in enumerate(Grid):
logEvent("Grid leaf %d tag= %s " % (i,leaf.tag),4)
if leaf.tag == 'Topology':
Topology = Grid[i]
logEvent("Topology found in leaf %d " % i,4)
elif leaf.tag == 'Geometry':
Geometry = Grid[i]
logEvent("Geometry found in leaf %d " % i,4)
elif leaf.tag == 'Attribute' and leaf.attrib['Name'] == 'nodeMaterialTypes':
NodeMaterials = Grid[i]
logEvent("NodeMaterials found in leaf %d " % i,4)
elif leaf.tag == 'Attribute' and leaf.attrib['Name'] == 'elementMaterialTypes':
ElementMaterials = Grid[i]
logEvent("ElementMaterials found in leaf %d " % i,4)
return Topology,Geometry,NodeMaterials,ElementMaterials
def readUniformElementTopologyFromXdmf(elementTopologyName,Topology,hdf5,topologyid2name,topology2nodes):
"""
Read xmdf element topology information when there are uniform elements in the mesh
Type of element given by elementTopologyName
Heavy data stored in hdf5
topologyid2name -- lookup for number of nodes in a given element type
returns
nElements_global -- the number of elements in the mesh
nNodes_element -- number of nodes per element
elementNodesArray -- element --> node connectivity stored as flattened array accessed using elementNodes_offset
elementNodes_offset -- offsets into the elementNodesArray storage for element connectivity,
element eN nodes are in elementNodesArray[elementNodes_offset[eN]:elementNodes_offset[eN+1]]
"""
nNodes_element = topology2nodes[elementTopologyName]
entry = Topology[0].text.split(':')[-1]
logEvent("Reading elementNodesArray from %s " % entry,3)
elementNodesArray = hdf5["/"+entry][:]
assert elementNodesArray.shape[1] == nNodes_element
nElements_global = elementNodesArray.shape[0]
logEvent("nElements_global,nNodes_element= (%d,%d) " % (nElements_global,nNodes_element),3)
elementNodes_offset = np.arange(nElements_global*nNodes_element+1,step=nNodes_element,dtype='i')
return nElements_global, nNodes_element, elementNodesArray, elementNodes_offset
def readMixedElementTopologyFromXdmf(elementTopologyName,Topology,hdf5,topologyid2name,topology2nodes):
"""
Read xmdf element topology information when there are mixed elements in the mesh
Heavy data stored in hdf5
topologyid2name -- lookup for number of nodes in a given element type
returns
nElements_global -- the number of elements in the mesh
elementNodesArray -- element --> node connectivity stored as flattened
array accessed using elementNodes_offset
elementNodes_offset -- offsets into the elementNodesArray storage for element
connectivity, element eN nodes are
inelementNodesArray[elementNodes_offset[eN]:elementNodes_offset[eN+1]]
"""
assert elementTopologyName == 'Mixed'
entry = Topology[0].text.split(':')[-1]
logEvent("Reading xdmf_topology from %s " % entry,3)
xdmf_topology = hdf5["/"+entry][:]
#build elementNodesArray and offsets now
nElements_global = 0
i = 0
while i < len(xdmf_topology):
nElements_global += 1
nNodes_local = topology2nodes[topologyid2name[xdmf_topology[i]]]
i += nNodes_local+1
#
logEvent("Mixed topology found %s elements " % nElements_global,3)
elementNodes_offset = np.zeros((nElements_global+1,),'i')
i = 0; eN = 0
while i < len(xdmf_topology):
nNodes_local = topology2nodes[topologyid2name[xdmf_topology[i]]]
elementNodes_offset[eN+1] = elementNodes_offset[eN] + nNodes_local
eN += 1; i += nNodes_local+1
elementNodesArray = np.zeros((elementNodes_offset[nElements_global],),'i')
i = 0; eN = 0
while i < len(self.xdmf_topology):
nNodes_local = topology2nodes[topologyid2name[xdmf_topology[i]]]
elementNodesArray[elementNodes_offset[eN]:elementNodes_offset[eN+1]][:] = xdmf_topology[i+1:i+1+nNodes_local][:]
eN += 1; i += nNodes_local+1
return nElements_global, elementNodesArray, elementNodes_offset
def readMeshXdmf(xmf_archive_base,heavy_file_base,MeshTag="Spatial_Domain",hasHDF5=True,verbose=0):
"""Read in a mesh from XDMF, assuming heavy data is in hdf5
:return: a BasicMeshInfo object with the minimal information read
"""
# start trying to read an xdmf archive with name xmf_archive_base.xmf
# assumes heavy_file_base.h5 has heavy data
# root Element is Xdmf
# last child of Xdmf which should be a Domain Element
# find child of Domain that is a Temporal Grid Collection with a name containing MeshTag, if None use first collection
# last child of Temporal Grid Collection should be a Uniform Grid at final time
# Attribute (usually 1) of child is Topology
# set elementTopologyName to Type
# if Type != Mixed
# get text attribute and read this entry from hdf5 file
# set nNodes_element based on Type, nElements_global from leading dimension of elementNodesArray
# create elementNodes_offset from Type and flatten elementNodesArray
# else
# get text attribute and read this entry from hdf5 file to place in into xdmf_topology
# generate elementNodesArray from xdmf_topology, calculating the number of elements using
# walk through xdmf_topology
# Attribute (usually 2) of child is Geometry --> load data into nodeArray
# set nNodes_global from nodeArray
# If has Attribute nodeMaterials read this from hdf file, else set to default of all zeros
# If has Attribute elementMaterialTypes, read this from hdf file, else set to default of all zeros
assert os.path.isfile(xmf_archive_base+'.xmf')
assert os.path.isfile(heavy_file_base+'.h5')
###information about allowed Xdmf topologies
#Xdmf cell type id to Name
topologyid2name = {2:'Polyline',4:'Triangle',5:'Quadrilateral',6:'Tetrahedron',8:'Wedge',9:'Hexahedron',
112:'Mixed'} #Mixed isn't actually used 0x070
#Topology name to number of local nodes
topology2nodes = {'Polyline':2,'Triangle':3,'Quadrilateral':4,'Tetrahedron':4,'Wedge':6,'Hexahedron':8}
#for output
class BasicMeshInfo(object):
def __init__(self):
self.nNodes_global = None
self.nodeArray = None
self.nodeMaterialTypes = None
self.nNodes_element = None
self.nElements_global = None
self.elementTopologyName = None
self.elementNodesArray = None
self.elementNodes_offset = None
self.elementMaterialTypes = None
self.nNodes_owned = None
self.nElements_owned = None
#
#
MeshInfo = BasicMeshInfo()
xmf = ET.parse(xmf_archive_base+'.xmf')
hdf5= h5py.File(heavy_file_base+'.h5',"r")
assert hasHDF5
Grid = findXMLgridElement(xmf,MeshTag,id_in_collection=-1,verbose=verbose)
Topology,Geometry,NodeMaterials,ElementMaterials = extractPropertiesFromXdmfGridNode(Grid)
assert Geometry is not None
entry = Geometry[0].text.split(':')[-1]
logEvent("Reading nodeArray from %s " % entry,3)
MeshInfo.nodeArray = hdf5["/"+entry][:]
MeshInfo.nNodes_global = MeshInfo.nodeArray.shape[0]
if NodeMaterials is not None:
entry = NodeMaterials[0].text.split(':')[-1]
logEvent("Reading nodeMaterialTypes from %s " % entry,4)
MeshInfo.nodeMaterialTypes = hdf5["/"+entry][:]
else:
MeshInfo.nodeMaterialTypes = np.zeros((MeshInfo.nNodes_global,),'i')
assert Topology is not None
if 'Type' in Topology.attrib:
MeshInfo.elementTopologyName = Topology.attrib['Type']
elif 'TopologyType' in Topology.attrib:
MeshInfo.elementTopologyName = Topology.attrib['TopologyType']
assert MeshInfo.elementTopologyName is not None
logEvent("elementTopologyName= %s " % MeshInfo.elementTopologyName,3)
assert MeshInfo.elementTopologyName in list(topologyid2name.values())
if MeshInfo.elementTopologyName != 'Mixed':
MeshInfo.nElements_global, MeshInfo.nNodes_element, \
MeshInfo.elementNodesArray, MeshInfo.elementNodes_offset = readUniformElementTopologyFromXdmf(MeshInfo.elementTopologyName,Topology,
hdf5,topologyid2name,topology2nodes)
else:
MeshInfo.nElements_global, MeshInfo.elementNodesArray, \
MeshInfo.elementNodes_offset = readMixedElementTopologyFromXdmf(MeshInfo.elementTopologyName,Topology,hdf5,topologyid2name,topology2nodes)
#
if ElementMaterials is not None:
entry = ElementMaterials[0].text.split(':')[-1]
logEvent("Reading elementMaterialTypes from %s " % entry,3)
MeshInfo.elementMaterialTypes = hdf5["/"+entry][:]
else:
MeshInfo.elementMaterialTypes = | np.zeros((MeshInfo.nElements_global,),'i') | numpy.zeros |
from typing import List
import numpy as np
from numpy import sqrt
Gx_0 = np.array([
[0],
])
Gx_1 = np.array([
[0, 0, 0],
[0, 0, -1],
[0, 1, 0],
])
Gx_2 = np.array([
[0, 1, 0, 0, 0],
[-1, 0, 0, 0, 0],
[0, 0, 0, -sqrt(3), 0],
[0, 0, sqrt(3), 0, -1],
[0, 0, 0, 1, 0],
])
Gx_3 = np.array([
[0, sqrt(6)/2, 0, 0, 0, 0, 0],
[-sqrt(6)/2, 0, sqrt(10)/2, 0, 0, 0, 0],
[0, -sqrt(10)/2, 0, 0, 0, 0, 0],
[0, 0, 0, 0, -sqrt(6), 0, 0],
[0, 0, 0, sqrt(6), 0, -sqrt(10)/2, 0],
[0, 0, 0, 0, sqrt(10)/2, 0, -sqrt(6)/2],
[0, 0, 0, 0, 0, sqrt(6)/2, 0],
])
Gx_4 = np.array([
[0, sqrt(2), 0, 0, 0, 0, 0, 0, 0],
[-sqrt(2), 0, sqrt(14)/2, 0, 0, 0, 0, 0, 0],
[0, -sqrt(14)/2, 0, 3*sqrt(2)/2, 0, 0, 0, 0, 0],
[0, 0, -3*sqrt(2)/2, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, -sqrt(10), 0, 0, 0],
[0, 0, 0, 0, sqrt(10), 0, -3*sqrt(2)/2, 0, 0],
[0, 0, 0, 0, 0, 3*sqrt(2)/2, 0, -sqrt(14)/2, 0],
[0, 0, 0, 0, 0, 0, sqrt(14)/2, 0, -sqrt(2)],
[0, 0, 0, 0, 0, 0, 0, sqrt(2), 0],
])
Gx_5 = np.array([
[0, sqrt(10)/2, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[-sqrt(10)/2, 0, 3*sqrt(2)/2, 0, 0, 0, 0, 0, 0, 0, 0],
[0, -3*sqrt(2)/2, 0, sqrt(6), 0, 0, 0, 0, 0, 0, 0],
[0, 0, -sqrt(6), 0, sqrt(7), 0, 0, 0, 0, 0, 0],
[0, 0, 0, -sqrt(7), 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, -sqrt(15), 0, 0, 0, 0],
[0, 0, 0, 0, 0, sqrt(15), 0, -sqrt(7), 0, 0, 0],
[0, 0, 0, 0, 0, 0, sqrt(7), 0, -sqrt(6), 0, 0],
[0, 0, 0, 0, 0, 0, 0, sqrt(6), 0, -3*sqrt(2)/2, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 3*sqrt(2)/2, 0, -sqrt(10)/2],
[0, 0, 0, 0, 0, 0, 0, 0, 0, sqrt(10)/2, 0],
])
Gx_6 = np.array([
[0, sqrt(3), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[-sqrt(3), 0, sqrt(22)/2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, -sqrt(22)/2, 0, sqrt(30)/2, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, -sqrt(30)/2, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, -3, 0, sqrt(10), 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, -sqrt(10), 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, -sqrt(21), 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, sqrt(21), 0, -sqrt(10), 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, sqrt(10), 0, -3, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 3, 0, -sqrt(30)/2, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, sqrt(30)/2, 0, -sqrt(22)/2, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, sqrt(22)/2, 0, -sqrt(3)],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, sqrt(3), 0],
])
Gx_7 = np.array([
[0, sqrt(14)/2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[-sqrt(14)/2, 0, sqrt(26)/2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, -sqrt(26)/2, 0, 3, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, -3, 0, sqrt(11), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, -sqrt(11), 0, 5*sqrt(2)/2, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, -5*sqrt(2)/2, 0, 3*sqrt(6)/2, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, -3*sqrt(6)/2, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, -2*sqrt(7), 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 2*sqrt(7), 0, -3*sqrt(6)/2, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 3*sqrt(6)/2, 0, -5*sqrt(2)/2, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 5*sqrt(2)/2, 0, -sqrt(11), 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, sqrt(11), 0, -3, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3, 0, -sqrt(26)/2, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, sqrt(26)/2, 0, -sqrt(14)/2],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, sqrt(14)/2, 0],
])
Gx_8 = np.array([
[0, 2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[-2, 0, sqrt(30)/2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, -sqrt(30)/2, 0, sqrt(42)/2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, -sqrt(42)/2, 0, sqrt(13), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, -sqrt(13), 0, sqrt(15), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, -sqrt(15), 0, sqrt(66)/2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, -sqrt(66)/2, 0, sqrt(70)/2, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, -sqrt(70)/2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, -6, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 6, 0, -sqrt(70)/2, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, sqrt(70)/2, 0, -sqrt(66)/2, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, sqrt(66)/2, 0, -sqrt(15), 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, sqrt(15), 0, -sqrt(13), 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, sqrt(13), 0, -sqrt(42)/2, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, sqrt(42)/2, 0, -sqrt(30)/2, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, sqrt(30)/2, 0, -2],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2, 0],
])
Gx_9 = np.array([
[0, 3*sqrt(2)/2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[-3*sqrt(2)/2, 0, sqrt(34)/2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, -sqrt(34)/2, 0, 2*sqrt(3), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, -2*sqrt(3), 0, sqrt(15), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, -sqrt(15), 0, sqrt(70)/2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, -sqrt(70)/2, 0, sqrt(78)/2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, -sqrt(78)/2, 0, sqrt(21), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, -sqrt(21), 0, sqrt(22), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, -sqrt(22), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -3*sqrt(5), 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 3*sqrt(5), 0, -sqrt(22), 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, sqrt(22), 0, -sqrt(21), 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, sqrt(21), 0, -sqrt(78)/2, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, sqrt(78)/2, 0, -sqrt(70)/2, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, sqrt(70)/2, 0, -sqrt(15), 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, sqrt(15), 0, -2*sqrt(3), 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2*sqrt(3), 0, -sqrt(34)/2, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, sqrt(34)/2, 0, -3*sqrt(2)/2],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3*sqrt(2)/2, 0],
])
Gx_10 = np.array([
[0, sqrt(5), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[-sqrt(5), 0, sqrt(38)/2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, -sqrt(38)/2, 0, 3*sqrt(6)/2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, -3*sqrt(6)/2, 0, sqrt(17), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, -sqrt(17), 0, 2*sqrt(5), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, -2*sqrt(5), 0, 3*sqrt(10)/2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, -3*sqrt(10)/2, 0, 7*sqrt(2)/2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, -7*sqrt(2)/2, 0, sqrt(26), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, -sqrt(26), 0, 3*sqrt(3), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, -3*sqrt(3), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -sqrt(55), 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, sqrt(55), 0, -3*sqrt(3), 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3*sqrt(3), 0, -sqrt(26), 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, sqrt(26), 0, -7*sqrt(2)/2, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 7*sqrt(2)/2, 0, -3*sqrt(10)/2, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3*sqrt(10)/2, 0, -2*sqrt(5), 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 2*sqrt(5), 0, -sqrt(17), 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, sqrt(17), 0, -3*sqrt(6)/2, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3*sqrt(6)/2, 0, -sqrt(38)/2, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, sqrt(38)/2, 0, -sqrt(5)],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, sqrt(5), 0],
])
Gx_11 = np.array([
[0, sqrt(22)/2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[-sqrt(22)/2, 0, sqrt(42)/2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, -sqrt(42)/2, 0, sqrt(15), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, -sqrt(15), 0, sqrt(19), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, -sqrt(19), 0, 3*sqrt(10)/2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, -3*sqrt(10)/2, 0, sqrt(102)/2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, -sqrt(102)/2, 0, 2*sqrt(7), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, -2*sqrt(7), 0, sqrt(30), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, -sqrt(30), 0, 3*sqrt(14)/2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, -3*sqrt(14)/2, 0, sqrt(130)/2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, -sqrt(130)/2, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, -sqrt(66), 0, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, sqrt(66), 0, -sqrt(130)/2, 0, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, sqrt(130)/2, 0, -3*sqrt(14)/2, 0, 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 3*sqrt(14)/2, 0, -sqrt(30), 0, 0, 0, 0, 0, 0, 0],
[0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, | sqrt(30) | numpy.sqrt |
from __future__ import absolute_import
import os
import sys
import numpy as np
import nibabel as nib
from spinalcordtoolbox.utils import __sct_dir__
sys.path.append(os.path.join(__sct_dir__, 'scripts'))
from spinalcordtoolbox.image import Image
from spinalcordtoolbox.deepseg_lesion import core as deepseg_lesion
import sct_utils as sct
def test_model_file_exists():
for model_name in deepseg_lesion.MODEL_LST:
model_path = os.path.join(sct.__sct_dir__, 'data', 'deepseg_lesion_models', '{}_lesion.h5'.format(model_name))
assert os.path.isfile(model_path)
def test_segment():
contrast_test = 't2'
model_path = os.path.join(sct.__sct_dir__, 'data', 'deepseg_lesion_models', '{}_lesion.h5'.format(contrast_test))
# create fake data
data = np.zeros((48,48,96))
xx, yy = np.mgrid[:48, :48]
circle = (xx - 24) ** 2 + (yy - 24) ** 2
for zz in range(data.shape[2]):
data[:,:,zz] += np.logical_and(circle < 400, circle >= 200) * 2400 # CSF
data[:,:,zz] += (circle < 200) * 500 # SC
data[16:22, 16:22, 64:90] = 1000 # fake lesion
affine = np.eye(4)
nii = nib.nifti1.Nifti1Image(data, affine)
img = Image(data, hdr=nii.header, dim=nii.header.get_data_shape())
seg = deepseg_lesion.segment_3d(model_path, contrast_test, img.copy())
assert | np.any(seg.data[16:22, 16:22, 64:90]) | numpy.any |
# Copyright 2020 <NAME> (@usimarit)
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import sys
from tensorflow_asr.utils import setup_environment
setup_environment()
import librosa
import numpy as np
import matplotlib.pyplot as plt
from tensorflow_asr.featurizers.speech_featurizers import read_raw_audio, TFSpeechFeaturizer, NumpySpeechFeaturizer
def main(argv):
speech_file = argv[1]
feature_type = argv[2]
speech_conf = {
"sample_rate": 16000,
"frame_ms": 25,
"stride_ms": 10,
"feature_type": feature_type,
"preemphasis": 0.97,
"normalize_signal": True,
"normalize_feature": True,
"normalize_per_feature": False,
"num_feature_bins": 80,
}
signal = read_raw_audio(speech_file, speech_conf["sample_rate"])
nsf = NumpySpeechFeaturizer(speech_conf)
sf = TFSpeechFeaturizer(speech_conf)
ft = nsf.stft(signal)
print(ft.shape, np.mean(ft))
ft = sf.stft(signal).numpy()
print(ft.shape, | np.mean(ft) | numpy.mean |
import numpy as np
class Perceptron:
def __init__(self, w, threshold=0.5):
self.weight = w
self.threshold = threshold
def work(self, x):
sum = np.sum(self.weight * x)
if sum <= self.threshold:
return 0
else: # sum > self.threshold
return 1
AND_weight = np.array([0.5, 0.5])
AND_threshold = 0.7
OR_weight = np.array([0.5, 0.5])
OR_threshold = 0.3
NAND_weight = np.array([-0.5, -0.5])
NAND_threshold = -0.7
AND = Perceptron(AND_weight, AND_threshold)
OR = Perceptron(OR_weight, OR_threshold)
NAND = Perceptron(NAND_weight, NAND_threshold)
for input in [(0, 0), (0, 1), (1, 0), (1, 1)]:
AND_output = AND.work( | np.array([input[0], input[1]]) | numpy.array |
import numpy
import six
from chainer import cuda
from chainer import function
from chainer.utils import type_check
from chainer.utils import walker_alias
class NegativeSampling(function.Function):
"""Implementation of negative sampling.
In natural language processing, especially language modeling, the number of
vocabulary is very large.
Therefore, you need to spend a lot of time to calculate the gradient of the
embedding matrix.
Instead, in negative sampling trick, you only need to calculate the
gradient for a few sampled negative examples.
The objective function is below:
.. math::
f(x, p) = \log\sigma(x^\\top w_p) + \\
k E_{i \sim P(i)}[\log\sigma(- x^\\top w_i)],
where :math:`\sigma(\cdot)` is a sigmoid function, :math:`w_i` is the
weight vector for the word :math:`i`, and :math:`p` is a positive example.
It is approximeted with :math:`k` examples :math:`N` sampled from
probability :math:`P(i)`, like this:
.. math::
f(x, p) \\approx \log\sigma(x^\\top w_p) + \\
\sum_{n \in N} \log\sigma(-x^\\top w_n).
Each sample of :math:`N` is drawn from the word distribution :math:`P(w)`.
This is calculated as :math:`P(w) = \\frac{1}{Z} c(w)^\\alpha`, where
:math:`c(w)` is the unigram count of the word :math:`w`, :math:`\\alpha` is
a hyper-parameter, and :math:`Z` is the normalization constant.
Args:
in_size (int): Dimension of input vectors.
counts (int list): Number of each identifiers.
sample_size (int): Number of negative samples.
power (float): Power factor :math:`\\alpha`.
See: `Distributed Representations of Words and Phrases and their\
Compositionality <http://arxiv.org/abs/1310.4546>`_
"""
parameter_names = ('W',)
gradient_names = ('gW',)
def __init__(self, in_size, counts, sample_size, power=0.75):
self.sample_size = sample_size
p = numpy.array(counts, numpy.float32)
p = numpy.power(p, p.dtype.type(power))
self.sampler = walker_alias.WalkerAlias(p)
vocab_size = len(counts)
self.W = numpy.zeros((vocab_size, in_size)).astype(numpy.float32)
self.gW = numpy.zeros_like(self.W)
def _make_samples(self, t):
if hasattr(self, 'samples'):
return self.samples
size = int(t.shape[0])
# first one is the positive, and others are sampled negatives
samples = self.sampler.sample((size, self.sample_size + 1))
if isinstance(samples, numpy.ndarray):
samples.T[0] = t
else:
cuda.elementwise(
'T t, int32 m', 'raw T s', 's[i * m] = t;',
'negative_sampling_assign'
)(t, self.sample_size + 1, samples)
self.samples = samples
def check_type_forward(self, in_types):
type_check.expect(in_types.size() == 2)
x_type, t_type = in_types
type_check.expect(
x_type.dtype == numpy.float32,
x_type.ndim == 2,
t_type.dtype == numpy.int32,
t_type.ndim == 1,
x_type.shape[0] == t_type.shape[0]
)
def to_gpu(self, device=None):
super(NegativeSampling, self).to_gpu(device)
self.sampler.to_gpu()
def to_cpu(self):
super(NegativeSampling, self).to_cpu()
self.sampler.to_cpu()
def forward_cpu(self, inputs):
x, t = inputs
self._make_samples(t)
loss = numpy.float32(0.0)
for i, (ix, k) in enumerate(six.moves.zip(x, self.samples)):
w = self.W[k]
f = w.dot(ix)
f[0] *= -1 # positive sample
loss += numpy.sum(numpy.logaddexp(f, 0))
return | numpy.array(loss, numpy.float32) | numpy.array |
# This file is part of the pyMOR project (https://www.pymor.org).
# Copyright pyMOR developers and contributors. All rights reserved.
# License: BSD 2-Clause License (https://opensource.org/licenses/BSD-2-Clause)
import os
import tempfile
import numpy as np
import pytest
import scipy.io as spio
import scipy.sparse as sps
from pymor.models.iosys import LTIModel, SecondOrderModel
def _build_matrices_lti(with_D, with_E):
A = sps.csc_matrix([[1, 2], [3, 4]])
B = np.array([[1], [2]])
C = | np.array([[1, 2]]) | numpy.array |
# Append + memory saver + 1 core
# Memory conservative version
print("Setting up environment...")
from npy_append_array import NpyAppendArray
import numpy as np
import sys
# Read in arguments from command line
parameters = np.genfromtxt(sys.argv[1], delimiter = ',', names = True)
filepath = sys.argv[2]
nchunks = int(sys.argv[3])
# Parse relevant parameters
sims = parameters.shape[0]
indivs = parameters['indvs'].astype('int32')[0]
snps = parameters['snps'].astype('int32')[0]
m = int(sims / nchunks)
np.save('cnn_params.npy', parameters)
del parameters
# Creating chunk generator
print("Creating chunk generator...")
def chunkify(nchunks=nchunks, filepath=filepath):
chunk_size = int((sims / nchunks) * (indivs+8))
chunk_end = 0
chunk_count = -1
while chunk_end < chunk_size * nchunks:
chunk_start = chunk_end
chunk_end = chunk_end + chunk_size
chunk_count += 1
with open(filepath) as f:
chunk = f.readlines()[chunk_start:chunk_end]
yield chunk, chunk_count
# Extract data from input file
print("Creating data extractor...")
def data_extractor(chunk, chunk_count):
cc = chunk_count
# Initialize appendable array in first chunk
if cc == 0:
# Find position data
print("Initializing position data...")
tmp_p = np.empty((m, snps))
posits = [z for z in chunk if "pos" in z]
for i in range(len(posits)):
tmp_p[i] = np.fromstring(posits[i][11:], sep=" ")
pos_dat_initialize = tmp_p
np.save('pos_dat.npy', pos_dat_initialize)
global pos_dat_array
pos_dat_array = NpyAppendArray('pos_dat.npy')
del tmp_p
# Find simulation data
print("Initializing simulation data...")
tmp_bd = np.empty((m, indivs, snps))
inds = np.array([i for i, s in enumerate(chunk) if 'pos' in s])
inds = inds + 1
big_dat_inds = np.zeros(shape=0, dtype='int')
for i in range(indivs):
big_dat_inds = np.append(big_dat_inds, inds + i)
big_dat_inds = np.sort(big_dat_inds)
k=0
for i in range(int(m)):
for j in range(indivs):
tmp_bd[i,j] = np.array(list(chunk[big_dat_inds[k]].strip()))
k+=1
big_dat_initialize = tmp_bd
np.save('big_dat.npy', big_dat_initialize)
global big_dat_array
big_dat_array = NpyAppendArray('big_dat.npy')
del tmp_bd
del chunk
else:
# Find position data
print("Extracting position data...")
tmp_p = np.empty((m, snps))
posits = [z for z in chunk if "pos" in z]
for i in range(len(posits)):
tmp_p[i] = | np.fromstring(posits[i][11:], sep=" ") | numpy.fromstring |
import copy
import numpy as np
from scipy.special import wofz
from scipy.integrate import quad
from typing import List, Tuple
import autoarray as aa
from autogalaxy.profiles.mass_profiles import MassProfile
from autogalaxy.profiles.mass_profiles.mass_profiles import (
MassProfileMGE,
MassProfileCSE,
)
from autogalaxy.profiles.mass_profiles.mass_profiles import psi_from
class StellarProfile:
pass
class EllGaussian(MassProfile, StellarProfile):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
elliptical_comps: Tuple[float, float] = (0.0, 0.0),
intensity: float = 0.1,
sigma: float = 0.01,
mass_to_light_ratio: float = 1.0,
):
"""
The elliptical Gaussian light profile.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
elliptical_comps
The first and second ellipticity components of the elliptical coordinate system, (see the module
`autogalaxy -> convert.py` for the convention).
intensity
Overall intensity normalisation of the light profile (units are dimensionless and derived from the data
the light profile's image is compared too, which is expected to be electrons per second).
sigma
The sigma value of the Gaussian.
"""
super(EllGaussian, self).__init__(
centre=centre, elliptical_comps=elliptical_comps
)
super(MassProfile, self).__init__(
centre=centre, elliptical_comps=elliptical_comps
)
self.mass_to_light_ratio = mass_to_light_ratio
self.intensity = intensity
self.sigma = sigma
def deflections_yx_2d_from(self, grid: aa.type.Grid2DLike):
"""
Calculate the deflection angles at a given set of arc-second gridded coordinates.
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the deflection angles are computed on.
"""
return self.deflections_2d_via_analytic_from(grid=grid)
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def deflections_2d_via_analytic_from(self, grid: aa.type.Grid2DLike):
"""
Calculate the deflection angles at a given set of arc-second gridded coordinates.
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the deflection angles are computed on.
"""
deflections = (
self.mass_to_light_ratio
* self.intensity
* self.sigma
* np.sqrt((2 * np.pi) / (1.0 - self.axis_ratio ** 2.0))
* self.zeta_from(grid=grid)
)
return self.rotate_grid_from_reference_frame(
np.multiply(
1.0, np.vstack((-1.0 * np.imag(deflections), np.real(deflections))).T
)
)
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def deflections_2d_via_integral_from(self, grid: aa.type.Grid2DLike):
"""
Calculate the deflection angles at a given set of arc-second gridded coordinates.
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the deflection angles are computed on.
Note: sigma is divided by sqrt(q) here.
"""
def calculate_deflection_component(npow, index):
deflection_grid = self.axis_ratio * grid[:, index]
for i in range(grid.shape[0]):
deflection_grid[i] *= (
self.intensity
* self.mass_to_light_ratio
* quad(
self.deflection_func,
a=0.0,
b=1.0,
args=(
grid[i, 0],
grid[i, 1],
npow,
self.axis_ratio,
self.sigma / np.sqrt(self.axis_ratio),
),
)[0]
)
return deflection_grid
deflection_y = calculate_deflection_component(1.0, 0)
deflection_x = calculate_deflection_component(0.0, 1)
return self.rotate_grid_from_reference_frame(
np.multiply(1.0, np.vstack((deflection_y, deflection_x)).T)
)
@staticmethod
def deflection_func(u, y, x, npow, axis_ratio, sigma):
eta_u = np.sqrt(axis_ratio) * np.sqrt(
(u * ((x ** 2) + (y ** 2 / (1 - (1 - axis_ratio ** 2) * u))))
)
return np.exp(-0.5 * np.square(np.divide(eta_u, sigma))) / (
(1 - (1 - axis_ratio ** 2) * u) ** (npow + 0.5)
)
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def convergence_2d_from(self, grid: aa.type.Grid2DLike):
"""Calculate the projected convergence at a given set of arc-second gridded coordinates.
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the convergence is computed on.
"""
return self.convergence_func(self.grid_to_eccentric_radii(grid))
def convergence_func(self, grid_radius: float) -> float:
return self.mass_to_light_ratio * self.image_2d_via_radii_from(grid_radius)
@aa.grid_dec.grid_2d_to_structure
def potential_2d_from(self, grid: aa.type.Grid2DLike):
return np.zeros(shape=grid.shape[0])
def image_2d_via_radii_from(self, grid_radii: np.ndarray):
"""Calculate the intensity of the Gaussian light profile on a grid of radial coordinates.
Parameters
----------
grid_radii
The radial distance from the centre of the profile. for each coordinate on the grid.
Note: sigma is divided by sqrt(q) here.
"""
return np.multiply(
self.intensity,
np.exp(
-0.5
* np.square(
np.divide(grid_radii, self.sigma / np.sqrt(self.axis_ratio))
)
),
)
@property
def axis_ratio(self):
axis_ratio = super().axis_ratio
return axis_ratio if axis_ratio < 0.9999 else 0.9999
def zeta_from(self, grid: aa.type.Grid2DLike):
q2 = self.axis_ratio ** 2.0
ind_pos_y = grid[:, 0] >= 0
shape_grid = np.shape(grid)
output_grid = np.zeros((shape_grid[0]), dtype=np.complex128)
scale_factor = self.axis_ratio / (self.sigma * np.sqrt(2.0 * (1.0 - q2)))
xs_0 = grid[:, 1][ind_pos_y] * scale_factor
ys_0 = grid[:, 0][ind_pos_y] * scale_factor
xs_1 = grid[:, 1][~ind_pos_y] * scale_factor
ys_1 = -grid[:, 0][~ind_pos_y] * scale_factor
output_grid[ind_pos_y] = -1j * (
wofz(xs_0 + 1j * ys_0)
- np.exp(-(xs_0 ** 2.0) * (1.0 - q2) - ys_0 * ys_0 * (1.0 / q2 - 1.0))
* wofz(self.axis_ratio * xs_0 + 1j * ys_0 / self.axis_ratio)
)
output_grid[~ind_pos_y] = np.conj(
-1j
* (
wofz(xs_1 + 1j * ys_1)
- np.exp(-(xs_1 ** 2.0) * (1.0 - q2) - ys_1 * ys_1 * (1.0 / q2 - 1.0))
* wofz(self.axis_ratio * xs_1 + 1j * ys_1 / self.axis_ratio)
)
)
return output_grid
def with_new_normalization(self, normalization):
mass_profile = copy.copy(self)
mass_profile.mass_to_light_ratio = normalization
return mass_profile
# noinspection PyAbstractClass
class AbstractEllSersic(MassProfile, MassProfileMGE, MassProfileCSE, StellarProfile):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
elliptical_comps: Tuple[float, float] = (0.0, 0.0),
intensity: float = 0.1,
effective_radius: float = 0.6,
sersic_index: float = 0.6,
mass_to_light_ratio: float = 1.0,
):
"""
The Sersic mass profile, the mass profiles of the light profiles that are used to fit and subtract the lens \
model_galaxy's light.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
elliptical_comps
The first and second ellipticity components of the elliptical coordinate system, (see the module
`autogalaxy -> convert.py` for the convention).
intensity
Overall flux intensity normalisation in the light profiles (electrons per second).
effective_radius
The radius containing half the light of this profile.
sersic_index
Controls the concentration of the profile (lower -> less concentrated, higher -> more concentrated).
mass_to_light_ratio
The mass-to-light ratio of the light profiles
"""
super(AbstractEllSersic, self).__init__(
centre=centre, elliptical_comps=elliptical_comps
)
super(MassProfile, self).__init__(
centre=centre, elliptical_comps=elliptical_comps
)
super(MassProfileMGE, self).__init__()
super(MassProfileCSE, self).__init__()
self.mass_to_light_ratio = mass_to_light_ratio
self.intensity = intensity
self.effective_radius = effective_radius
self.sersic_index = sersic_index
def deflections_yx_2d_from(self, grid: aa.type.Grid2DLike):
return self.deflections_2d_via_cse_from(grid=grid)
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def deflections_2d_via_mge_from(self, grid: aa.type.Grid2DLike):
"""
Calculate the projected 2D deflection angles from a grid of (y,x) arc second coordinates, by computing and
summing the convergence of each individual cse used to decompose the mass profile.
The cored steep elliptical (cse) decomposition of a the elliptical NFW mass
profile (e.g. `decompose_convergence_via_cse`) is using equation (12) of
Oguri 2021 (https://arxiv.org/abs/2106.11464).
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the convergence is computed on.
"""
return self._deflections_2d_via_mge_from(
grid=grid, sigmas_factor=np.sqrt(self.axis_ratio)
)
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def deflections_2d_via_cse_from(self, grid: aa.type.Grid2DLike):
"""
Calculate the projected 2D deflection angles from a grid of (y,x) arc second coordinates, by computing and
summing the convergence of each individual cse used to decompose the mass profile.
The cored steep elliptical (cse) decomposition of a the elliptical NFW mass
profile (e.g. `decompose_convergence_via_cse`) is using equation (12) of
Oguri 2021 (https://arxiv.org/abs/2106.11464).
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the convergence is computed on.
"""
return self._deflections_2d_via_cse_from(grid=grid)
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def convergence_2d_from(self, grid: aa.type.Grid2DLike):
"""Calculate the projected convergence at a given set of arc-second gridded coordinates.
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the convergence is computed on.
"""
return self.convergence_func(self.grid_to_eccentric_radii(grid))
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def convergence_2d_via_mge_from(self, grid: aa.type.Grid2DLike):
"""
Calculate the projected convergence at a given set of arc-second gridded coordinates.
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the convergence is computed on.
"""
eccentric_radii = self.grid_to_eccentric_radii(grid=grid)
return self._convergence_2d_via_mge_from(grid_radii=eccentric_radii)
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def convergence_2d_via_cse_from(self, grid: aa.type.Grid2DLike):
"""
Calculate the projected 2D convergence from a grid of (y,x) arc second coordinates, by computing and summing
the convergence of each individual cse used to decompose the mass profile.
The cored steep elliptical (cse) decomposition of a the elliptical NFW mass
profile (e.g. `decompose_convergence_via_cse`) is using equation (12) of
Oguri 2021 (https://arxiv.org/abs/2106.11464).
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the convergence is computed on.
"""
elliptical_radii = self.grid_to_elliptical_radii(grid=grid)
return self._convergence_2d_via_cse_from(grid_radii=elliptical_radii)
def convergence_func(self, grid_radius: float) -> float:
return self.mass_to_light_ratio * self.image_2d_via_radii_from(grid_radius)
@aa.grid_dec.grid_2d_to_structure
def potential_2d_from(self, grid: aa.type.Grid2DLike):
return np.zeros(shape=grid.shape[0])
def image_2d_via_radii_from(self, radius: np.ndarray):
"""
Returns the intensity of the profile at a given radius.
Parameters
----------
radius
The distance from the centre of the profile.
"""
return self.intensity * np.exp(
-self.sersic_constant
* (((radius / self.effective_radius) ** (1.0 / self.sersic_index)) - 1)
)
def decompose_convergence_via_mge(self) -> Tuple[List, List]:
radii_min = self.effective_radius / 100.0
radii_max = self.effective_radius * 20.0
def sersic_2d(r):
return (
self.mass_to_light_ratio
* self.intensity
* np.exp(
-self.sersic_constant
* (((r / self.effective_radius) ** (1.0 / self.sersic_index)) - 1.0)
)
)
return self._decompose_convergence_via_mge(
func=sersic_2d, radii_min=radii_min, radii_max=radii_max
)
def decompose_convergence_via_cse(self,) -> Tuple[List, List]:
"""
Decompose the convergence of the Sersic profile into cored steep elliptical (cse) profiles.
This decomposition uses the standard 2d profile of a Sersic mass profile.
Parameters
----------
func
The function representing the profile that is decomposed into CSEs.
radii_min:
The minimum radius to fit
radii_max:
The maximum radius to fit
total_cses
The number of CSEs used to approximate the input func.
sample_points: int (should be larger than 'total_cses')
The number of data points to fit
Returns
-------
Tuple[List, List]
A list of amplitudes and core radii of every cored steep elliptical (cse) the mass profile is decomposed
into.
"""
upper_dex, lower_dex, total_cses, sample_points = cse_settings_from(
effective_radius=self.effective_radius,
sersic_index=self.sersic_index,
sersic_constant=self.sersic_constant,
mass_to_light_gradient=0.0,
)
scaled_effective_radius = self.effective_radius / np.sqrt(self.axis_ratio)
radii_min = scaled_effective_radius / 10.0 ** lower_dex
radii_max = scaled_effective_radius * 10.0 ** upper_dex
def sersic_2d(r):
return (
self.mass_to_light_ratio
* self.intensity
* np.exp(
-self.sersic_constant
* (
((r / scaled_effective_radius) ** (1.0 / self.sersic_index))
- 1.0
)
)
)
return self._decompose_convergence_via_cse_from(
func=sersic_2d,
radii_min=radii_min,
radii_max=radii_max,
total_cses=total_cses,
sample_points=sample_points,
)
@property
def sersic_constant(self):
"""A parameter derived from Sersic index which ensures that effective radius contains 50% of the profile's
total integrated light.
"""
return (
(2 * self.sersic_index)
- (1.0 / 3.0)
+ (4.0 / (405.0 * self.sersic_index))
+ (46.0 / (25515.0 * self.sersic_index ** 2))
+ (131.0 / (1148175.0 * self.sersic_index ** 3))
- (2194697.0 / (30690717750.0 * self.sersic_index ** 4))
)
@property
def ellipticity_rescale(self):
return 1.0 - ((1.0 - self.axis_ratio) / 2.0)
@property
def elliptical_effective_radius(self):
"""
The effective_radius of a Sersic light profile is defined as the circular effective radius. This is the \
radius within which a circular aperture contains half the profiles's total integrated light. For elliptical \
systems, this won't robustly capture the light profile's elliptical shape.
The elliptical effective radius instead describes the major-axis radius of the ellipse containing \
half the light, and may be more appropriate for highly flattened systems like disk galaxies.
"""
return self.effective_radius / np.sqrt(self.axis_ratio)
def with_new_normalization(self, normalization):
mass_profile = copy.copy(self)
mass_profile.mass_to_light_ratio = normalization
return mass_profile
class EllSersic(AbstractEllSersic, MassProfileMGE, MassProfileCSE):
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def deflections_2d_via_integral_from(self, grid: aa.type.Grid2DLike):
"""
Calculate the deflection angles at a given set of arc-second gridded coordinates.
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the deflection angles are computed on.
"""
def calculate_deflection_component(npow, index):
sersic_constant = self.sersic_constant
deflection_grid = self.axis_ratio * grid[:, index]
for i in range(grid.shape[0]):
deflection_grid[i] *= (
self.intensity
* self.mass_to_light_ratio
* quad(
self.deflection_func,
a=0.0,
b=1.0,
args=(
grid[i, 0],
grid[i, 1],
npow,
self.axis_ratio,
self.sersic_index,
self.effective_radius,
sersic_constant,
),
)[0]
)
return deflection_grid
deflection_y = calculate_deflection_component(1.0, 0)
deflection_x = calculate_deflection_component(0.0, 1)
return self.rotate_grid_from_reference_frame(
np.multiply(1.0, np.vstack((deflection_y, deflection_x)).T)
)
@staticmethod
def deflection_func(
u, y, x, npow, axis_ratio, sersic_index, effective_radius, sersic_constant
):
eta_u = np.sqrt(axis_ratio) * np.sqrt(
(u * ((x ** 2) + (y ** 2 / (1 - (1 - axis_ratio ** 2) * u))))
)
return np.exp(
-sersic_constant
* (((eta_u / effective_radius) ** (1.0 / sersic_index)) - 1)
) / ((1 - (1 - axis_ratio ** 2) * u) ** (npow + 0.5))
class SphSersic(EllSersic):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
intensity: float = 0.1,
effective_radius: float = 0.6,
sersic_index: float = 0.6,
mass_to_light_ratio: float = 1.0,
):
"""
The Sersic mass profile, the mass profiles of the light profiles that are used to fit and subtract the lens
model_galaxy's light.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre
intensity
Overall flux intensity normalisation in the light profiles (electrons per second)
effective_radius
The circular radius containing half the light of this profile.
sersic_index
Controls the concentration of the profile (lower -> less concentrated, higher -> more concentrated).
mass_to_light_ratio
The mass-to-light ratio of the light profile.
"""
super().__init__(
centre=centre,
elliptical_comps=(0.0, 0.0),
intensity=intensity,
effective_radius=effective_radius,
sersic_index=sersic_index,
mass_to_light_ratio=mass_to_light_ratio,
)
class EllExponential(EllSersic):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
elliptical_comps: Tuple[float, float] = (0.0, 0.0),
intensity: float = 0.1,
effective_radius: float = 0.6,
mass_to_light_ratio: float = 1.0,
):
"""
The EllExponential mass profile, the mass profiles of the light profiles that are used to fit and
subtract the lens model_galaxy's light.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
elliptical_comps
The first and second ellipticity components of the elliptical coordinate system, (see the module
`autogalaxy -> convert.py` for the convention).
intensity
Overall flux intensity normalisation in the light profiles (electrons per second).
effective_radius
The circular radius containing half the light of this profile.
mass_to_light_ratio
The mass-to-light ratio of the light profiles
"""
super().__init__(
centre=centre,
elliptical_comps=elliptical_comps,
intensity=intensity,
effective_radius=effective_radius,
sersic_index=1.0,
mass_to_light_ratio=mass_to_light_ratio,
)
class SphExponential(EllExponential):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
intensity: float = 0.1,
effective_radius: float = 0.6,
mass_to_light_ratio: float = 1.0,
):
"""
The Exponential mass profile, the mass profiles of the light profiles that are used to fit and subtract the lens
model_galaxy's light.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
intensity
Overall flux intensity normalisation in the light profiles (electrons per second).
effective_radius
The circular radius containing half the light of this profile.
mass_to_light_ratio
The mass-to-light ratio of the light profiles.
"""
super().__init__(
centre=centre,
elliptical_comps=(0.0, 0.0),
intensity=intensity,
effective_radius=effective_radius,
mass_to_light_ratio=mass_to_light_ratio,
)
class EllDevVaucouleurs(EllSersic):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
elliptical_comps: Tuple[float, float] = (0.0, 0.0),
intensity: float = 0.1,
effective_radius: float = 0.6,
mass_to_light_ratio: float = 1.0,
):
"""
The EllDevVaucouleurs mass profile, the mass profiles of the light profiles that are used to fit and
subtract the lens model_galaxy's light.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
elliptical_comps
The first and second ellipticity components of the elliptical coordinate system, (see the module
`autogalaxy -> convert.py` for the convention).
intensity
Overall flux intensity normalisation in the light profiles (electrons per second).
effective_radius
The radius containing half the light of this profile.
mass_to_light_ratio
The mass-to-light ratio of the light profile.
"""
super().__init__(
centre=centre,
elliptical_comps=elliptical_comps,
intensity=intensity,
effective_radius=effective_radius,
sersic_index=4.0,
mass_to_light_ratio=mass_to_light_ratio,
)
class SphDevVaucouleurs(EllDevVaucouleurs):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
intensity: float = 0.1,
effective_radius: float = 0.6,
mass_to_light_ratio: float = 1.0,
):
"""
The DevVaucouleurs mass profile, the mass profiles of the light profiles that are used to fit and subtract the
lens model_galaxy's light.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
intensity
Overall flux intensity normalisation in the light profiles (electrons per second).
effective_radius
The circular radius containing half the light of this profile.
mass_to_light_ratio
The mass-to-light ratio of the light profiles.
"""
super().__init__(
centre=centre,
elliptical_comps=(0.0, 0.0),
intensity=intensity,
effective_radius=effective_radius,
mass_to_light_ratio=mass_to_light_ratio,
)
class EllSersicRadialGradient(AbstractEllSersic):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
elliptical_comps: Tuple[float, float] = (0.0, 0.0),
intensity: float = 0.1,
effective_radius: float = 0.6,
sersic_index: float = 0.6,
mass_to_light_ratio: float = 1.0,
mass_to_light_gradient: float = 0.0,
):
"""
Setup a Sersic mass and light profiles.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
elliptical_comps
The first and second ellipticity components of the elliptical coordinate system, (see the module
`autogalaxy -> convert.py` for the convention).
intensity
Overall flux intensity normalisation in the light profiles (electrons per second).
effective_radius
The circular radius containing half the light of this profile.
sersic_index
Controls the concentration of the profile (lower -> less concentrated, higher -> more concentrated).
mass_to_light_ratio
The mass-to-light ratio of the light profile.
mass_to_light_gradient
The mass-to-light radial gradient.
"""
super().__init__(
centre=centre,
elliptical_comps=elliptical_comps,
intensity=intensity,
effective_radius=effective_radius,
sersic_index=sersic_index,
mass_to_light_ratio=mass_to_light_ratio,
)
self.mass_to_light_gradient = mass_to_light_gradient
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def deflections_2d_via_integral_from(self, grid: aa.type.Grid2DLike):
"""
Calculate the deflection angles at a given set of arc-second gridded coordinates.
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the deflection angles are computed on.
"""
def calculate_deflection_component(npow, index):
sersic_constant = self.sersic_constant
deflection_grid = self.axis_ratio * grid[:, index]
for i in range(grid.shape[0]):
deflection_grid[i] *= (
self.intensity
* self.mass_to_light_ratio
* quad(
self.deflection_func,
a=0.0,
b=1.0,
args=(
grid[i, 0],
grid[i, 1],
npow,
self.axis_ratio,
self.sersic_index,
self.effective_radius,
self.mass_to_light_gradient,
sersic_constant,
),
)[0]
)
return deflection_grid
deflection_y = calculate_deflection_component(1.0, 0)
deflection_x = calculate_deflection_component(0.0, 1)
return self.rotate_grid_from_reference_frame(
np.multiply(1.0, np.vstack((deflection_y, deflection_x)).T)
)
@staticmethod
def deflection_func(
u,
y,
x,
npow,
axis_ratio,
sersic_index,
effective_radius,
mass_to_light_gradient,
sersic_constant,
):
eta_u = np.sqrt(axis_ratio) * np.sqrt(
(u * ((x ** 2) + (y ** 2 / (1 - (1 - axis_ratio ** 2) * u))))
)
return (
(((axis_ratio * eta_u) / effective_radius) ** -mass_to_light_gradient)
* np.exp(
-sersic_constant
* (((eta_u / effective_radius) ** (1.0 / sersic_index)) - 1)
)
/ ((1 - (1 - axis_ratio ** 2) * u) ** (npow + 0.5))
)
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def convergence_2d_from(self, grid: aa.type.Grid2DLike):
"""Calculate the projected convergence at a given set of arc-second gridded coordinates.
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the convergence is computed on.
"""
return self.convergence_func(self.grid_to_eccentric_radii(grid))
def convergence_func(self, grid_radius: float) -> float:
return (
self.mass_to_light_ratio
* (
((self.axis_ratio * grid_radius) / self.effective_radius)
** -self.mass_to_light_gradient
)
* self.image_2d_via_radii_from(grid_radius)
)
def decompose_convergence_via_mge(self):
radii_min = self.effective_radius / 100.0
radii_max = self.effective_radius * 20.0
def sersic_radial_gradient_2D(r):
return (
self.mass_to_light_ratio
* self.intensity
* (
((self.axis_ratio * r) / self.effective_radius)
** -self.mass_to_light_gradient
)
* np.exp(
-self.sersic_constant
* (((r / self.effective_radius) ** (1.0 / self.sersic_index)) - 1.0)
)
)
return self._decompose_convergence_via_mge(
func=sersic_radial_gradient_2D, radii_min=radii_min, radii_max=radii_max
)
def decompose_convergence_via_cse(self) -> Tuple[List, List]:
"""
Decompose the convergence of the Sersic profile into singular isothermal elliptical (sie) profiles.
This decomposition uses the standard 2d profile of a Sersic mass profile.
Parameters
----------
func
The function representing the profile that is decomposed into CSEs.
radii_min:
The minimum radius to fit
radii_max:
The maximum radius to fit
total_sies
The number of SIEs used to approximate the input func.
sample_points: int (should be larger than 'total_sies')
The number of data points to fit
Returns
-------
Tuple[List, List]
A list of amplitudes and core radii of every singular isothernal ellipsoids (sie) the mass profile is decomposed
into.
"""
upper_dex, lower_dex, total_cses, sample_points = cse_settings_from(
effective_radius=self.effective_radius,
sersic_index=self.sersic_index,
sersic_constant=self.sersic_constant,
mass_to_light_gradient=self.mass_to_light_gradient,
)
scaled_effective_radius = self.effective_radius / np.sqrt(self.axis_ratio)
radii_min = scaled_effective_radius / 10.0 ** lower_dex
radii_max = scaled_effective_radius * 10.0 ** upper_dex
def sersic_radial_gradient_2D(r):
return (
self.mass_to_light_ratio
* self.intensity
* (
((self.axis_ratio * r) / scaled_effective_radius)
** -self.mass_to_light_gradient
)
* np.exp(
-self.sersic_constant
* (
((r / scaled_effective_radius) ** (1.0 / self.sersic_index))
- 1.0
)
)
)
return self._decompose_convergence_via_cse_from(
func=sersic_radial_gradient_2D,
radii_min=radii_min,
radii_max=radii_max,
total_cses=total_cses,
sample_points=sample_points,
)
class SphSersicRadialGradient(EllSersicRadialGradient):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
intensity: float = 0.1,
effective_radius: float = 0.6,
sersic_index: float = 0.6,
mass_to_light_ratio: float = 1.0,
mass_to_light_gradient: float = 0.0,
):
"""
Setup a Sersic mass and light profiles.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
intensity
Overall flux intensity normalisation in the light profiles (electrons per second).
effective_radius
The circular radius containing half the light of this profile.
sersic_index
Controls the concentration of the profile (lower -> less concentrated, higher -> more concentrated).
mass_to_light_ratio
The mass-to-light ratio of the light profile.
mass_to_light_gradient
The mass-to-light radial gradient.
"""
super().__init__(
centre=centre,
elliptical_comps=(0.0, 0.0),
intensity=intensity,
effective_radius=effective_radius,
sersic_index=sersic_index,
mass_to_light_ratio=mass_to_light_ratio,
mass_to_light_gradient=mass_to_light_gradient,
)
class EllSersicCore(EllSersic):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
elliptical_comps: Tuple[float, float] = (0.0, 0.0),
effective_radius: float = 0.6,
sersic_index: float = 4.0,
radius_break: float = 0.01,
intensity_break: float = 0.05,
gamma: float = 0.25,
alpha: float = 3.0,
mass_to_light_ratio: float = 1.0,
):
"""
The elliptical cored-Sersic light profile.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
elliptical_comps
The first and second ellipticity components of the elliptical coordinate system, (see the module
`autogalaxy -> convert.py` for the convention).
intensity
Overall intensity normalisation of the light profile (units are dimensionless and derived from the data
the light profile's image is compared too, which is expected to be electrons per second).
effective_radius
The circular radius containing half the light of this profile.
sersic_index
Controls the concentration of the profile (lower -> less concentrated, higher -> more concentrated).
radius_break
The break radius separating the inner power-law (with logarithmic slope gamma) and outer Sersic function.
intensity_break
The intensity at the break radius.
gamma
The logarithmic power-law slope of the inner core profiles
alpha :
Controls the sharpness of the transition between the inner core / outer Sersic profiles.
"""
super().__init__(
centre=centre,
elliptical_comps=elliptical_comps,
intensity=intensity_break,
effective_radius=effective_radius,
sersic_index=sersic_index,
mass_to_light_ratio=mass_to_light_ratio,
)
self.radius_break = radius_break
self.intensity_break = intensity_break
self.alpha = alpha
self.gamma = gamma
def deflections_yx_2d_from(self, grid: aa.type.Grid2DLike):
return self.deflections_2d_via_mge_from(grid=grid)
def image_2d_via_radii_from(self, grid_radii: np.ndarray):
"""
Calculate the intensity of the cored-Sersic light profile on a grid of radial coordinates.
Parameters
----------
grid_radii
The radial distance from the centre of the profile. for each coordinate on the grid.
"""
return np.multiply(
np.multiply(
self.intensity_prime,
np.power(
np.add(
1,
np.power(np.divide(self.radius_break, grid_radii), self.alpha),
),
(self.gamma / self.alpha),
),
),
np.exp(
np.multiply(
-self.sersic_constant,
(
np.power(
np.divide(
np.add(
np.power(grid_radii, self.alpha),
(self.radius_break ** self.alpha),
),
(self.effective_radius ** self.alpha),
),
(1.0 / (self.alpha * self.sersic_index)),
)
),
)
),
)
def decompose_convergence_via_mge(self):
radii_min = self.effective_radius / 50.0
radii_max = self.effective_radius * 20.0
def core_sersic_2D(r):
return (
self.mass_to_light_ratio
* self.intensity_prime
* (1.0 + (self.radius_break / r) ** self.alpha)
** (self.gamma / self.alpha)
* np.exp(
-self.sersic_constant
* (
(r ** self.alpha + self.radius_break ** self.alpha)
/ self.effective_radius ** self.alpha
)
** (1.0 / (self.sersic_index * self.alpha))
)
)
return self._decompose_convergence_via_mge(
func=core_sersic_2D, radii_min=radii_min, radii_max=radii_max
)
@property
def intensity_prime(self):
"""Overall intensity normalisation in the rescaled Core-Sersic light profiles (electrons per second)"""
return (
self.intensity_break
* (2.0 ** (-self.gamma / self.alpha))
* np.exp(
self.sersic_constant
* (
((2.0 ** (1.0 / self.alpha)) * self.radius_break)
/ self.effective_radius
)
** (1.0 / self.sersic_index)
)
)
class SphSersicCore(EllSersicCore):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
effective_radius: float = 0.6,
sersic_index: float = 4.0,
radius_break: float = 0.01,
intensity_break: float = 0.05,
gamma: float = 0.25,
alpha: float = 3.0,
):
"""
The elliptical cored-Sersic light profile.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
intensity
Overall intensity normalisation of the light profile (units are dimensionless and derived from the data
the light profile's image is compared too, which is expected to be electrons per second).
effective_radius
The circular radius containing half the light of this profile.
sersic_index
Controls the concentration of the profile (lower -> less concentrated, higher -> more concentrated).
radius_break
The break radius separating the inner power-law (with logarithmic slope gamma) and outer Sersic function.
intensity_break
The intensity at the break radius.
gamma
The logarithmic power-law slope of the inner core profiles
alpha :
Controls the sharpness of the transition between the inner core / outer Sersic profiles.
"""
super().__init__(
centre=centre,
elliptical_comps=(0.0, 0.0),
effective_radius=effective_radius,
sersic_index=sersic_index,
radius_break=radius_break,
intensity_break=intensity_break,
gamma=gamma,
alpha=alpha,
)
self.radius_break = radius_break
self.intensity_break = intensity_break
self.alpha = alpha
self.gamma = gamma
class EllChameleon(MassProfile, StellarProfile):
def __init__(
self,
centre: Tuple[float, float] = (0.0, 0.0),
elliptical_comps: Tuple[float, float] = (0.0, 0.0),
intensity: float = 0.1,
core_radius_0: float = 0.01,
core_radius_1: float = 0.02,
mass_to_light_ratio: float = 1.0,
):
"""
The elliptical Chamelon mass profile.
Parameters
----------
centre
The (y,x) arc-second coordinates of the profile centre.
elliptical_comps
The first and second ellipticity components of the elliptical coordinate system, (see the module
`autogalaxy -> convert.py` for the convention).
intensity
Overall intensity normalisation of the light profile (units are dimensionless and derived from the data
the light profile's image is compared too, which is expected to be electrons per second).
core_radius_0 : the core size of the first elliptical cored Isothermal profile.
core_radius_1 : core_radius_0 + core_radius_1 is the core size of the second elliptical cored Isothermal profile.
We use core_radius_1 here is to avoid negative values.
Profile form:
mass_to_light_ratio * intensity *\
(1.0 / Sqrt(x^2 + (y/q)^2 + core_radius_0^2) - 1.0 / Sqrt(x^2 + (y/q)^2 + (core_radius_0 + core_radius_1)**2.0))
"""
super(EllChameleon, self).__init__(
centre=centre, elliptical_comps=elliptical_comps
)
super(MassProfile, self).__init__(
centre=centre, elliptical_comps=elliptical_comps
)
self.mass_to_light_ratio = mass_to_light_ratio
self.intensity = intensity
self.core_radius_0 = core_radius_0
self.core_radius_1 = core_radius_1
def deflections_yx_2d_from(self, grid: aa.type.Grid2DLike):
return self.deflections_2d_via_analytic_from(grid=grid)
@aa.grid_dec.grid_2d_to_structure
@aa.grid_dec.transform
@aa.grid_dec.relocate_to_radial_minimum
def deflections_2d_via_analytic_from(self, grid: aa.type.Grid2DLike):
"""
Calculate the deflection angles at a given set of arc-second gridded coordinates.
Following Eq. (15) and (16), but the parameters are slightly different.
Parameters
----------
grid
The grid of (y,x) arc-second coordinates the deflection angles are computed on.
"""
factor = (
2.0
* self.mass_to_light_ratio
* self.intensity
/ (1 + self.axis_ratio)
* self.axis_ratio
/ np.sqrt(1.0 - self.axis_ratio ** 2.0)
)
core_radius_0 = np.sqrt(
(4.0 * self.core_radius_0 ** 2.0) / (1.0 + self.axis_ratio) ** 2
)
core_radius_1 = np.sqrt(
(4.0 * self.core_radius_1 ** 2.0) / (1.0 + self.axis_ratio) ** 2
)
psi0 = psi_from(
grid=grid, axis_ratio=self.axis_ratio, core_radius=core_radius_0
)
psi1 = psi_from(
grid=grid, axis_ratio=self.axis_ratio, core_radius=core_radius_1
)
deflection_y0 = np.arctanh(
np.divide(
np.multiply( | np.sqrt(1.0 - self.axis_ratio ** 2.0) | numpy.sqrt |
from __future__ import division
from __future__ import absolute_import
from builtins import object
from past.utils import old_div
from nose.tools import (assert_equal, assert_not_equal, raises,
assert_almost_equal)
from nose.plugins.skip import SkipTest
from .test_helpers import assert_items_almost_equal, assert_items_equal
import pandas as pd
import numpy as np
import openpathsampling as paths
import logging
logging.getLogger('openpathsampling.initialization').setLevel(logging.CRITICAL)
logging.getLogger('openpathsampling.ensemble').setLevel(logging.CRITICAL)
logging.getLogger('openpathsampling.storage').setLevel(logging.CRITICAL)
logging.getLogger('openpathsampling.netcdfplus').setLevel(logging.CRITICAL)
class TestWHAM(object):
def setup(self):
self.exact = [1.0, 0.5, 0.25, 0.125, 0.0625, 0.03125, 0.015625]
self.iface1 = [2.0, 1.0, 0.5, 0.25, 0.125, 0.0625, 0.0]
self.iface2 = [1.0, 1.0, 1.0, 0.5, 0.25, 0.125, 0.0625]
self.iface3 = [3.0, 3.0, 3.0, 3.0, 3.0, 1.5, 0.75]
# self.iface1 = [1.0, 0.5, 0.25, 0.125, 0.0625, 0.0, 0.0]
# self.iface2 = [1.0, 1.0, 1.0, 0.5, 0.25, 0.125, 0.0625]
# self.iface3 = [1.0, 1.0, 1.0, 1.0, 1.0, 0.5, 0.25]
# self.iface1 = [2.0, 0.5, 0.125, 0.0]
# self.iface2 = [1.0, 1.0, 0.25, 0.0625]
# self.iface3 = [3.0, 3.0, 3.0, 0.75]
# self.index = [0.0, 0.2, 0.4, 0.6]
self.columns = ["Interface 1", "Interface 2", "Interface 3"]
self.index = [0.0, 0.1, 0.2, 0.3, 0.4, 0.5, 0.6]
self.input_df = pd.DataFrame(
data= | np.array([self.iface1, self.iface2, self.iface3]) | numpy.array |
from __future__ import print_function, division, absolute_import
import functools
import sys
import warnings
# unittest only added in 3.4 self.subTest()
if sys.version_info[0] < 3 or sys.version_info[1] < 4:
import unittest2 as unittest
else:
import unittest
# unittest.mock is not available in 2.7 (though unittest2 might contain it?)
try:
import unittest.mock as mock
except ImportError:
import mock
import matplotlib
matplotlib.use('Agg') # fix execution of tests involving matplotlib on travis
import numpy as np
import six.moves as sm
import imgaug as ia
from imgaug import augmenters as iaa
from imgaug import parameters as iap
from imgaug import dtypes as iadt
from imgaug.testutils import (array_equal_lists, keypoints_equal, reseed,
runtest_pickleable_uint8_img)
import imgaug.augmenters.arithmetic as arithmetic_lib
import imgaug.augmenters.contrast as contrast_lib
class TestAdd(unittest.TestCase):
def setUp(self):
reseed()
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.Add(value="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_ = iaa.Add(value=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_add_zero(self):
# no add, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Add(value=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_add_one(self):
# add > 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Add(value=1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
def test_minus_one(self):
# add < 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Add(value=-1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
def test_uint8_every_possible_value(self):
# uint8, every possible addition for base value 127
for value_type in [float, int]:
for per_channel in [False, True]:
for value in np.arange(-255, 255+1):
aug = iaa.Add(value=value_type(value), per_channel=per_channel)
expected = np.clip(127 + value_type(value), 0, 255)
img = np.full((1, 1), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == expected
img = np.full((1, 1, 3), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert np.all(img_aug == expected)
def test_add_floats(self):
# specific tests with floats
aug = iaa.Add(value=0.75)
img = np.full((1, 1), 1, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 2
img = np.full((1, 1), 1, dtype=np.uint16)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 2
aug = iaa.Add(value=0.45)
img = np.full((1, 1), 1, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 1
img = np.full((1, 1), 1, dtype=np.uint16)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == 1
def test_stochastic_parameters_as_value(self):
# test other parameters
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.Add(value=iap.DiscreteUniform(1, 10))
observed = aug.augment_images(images)
assert 100 + 1 <= np.average(observed) <= 100 + 10
aug = iaa.Add(value=iap.Uniform(1, 10))
observed = aug.augment_images(images)
assert 100 + 1 <= np.average(observed) <= 100 + 10
aug = iaa.Add(value=iap.Clip(iap.Normal(1, 1), -3, 3))
observed = aug.augment_images(images)
assert 100 - 3 <= np.average(observed) <= 100 + 3
aug = iaa.Add(value=iap.Discretize(iap.Clip(iap.Normal(1, 1), -3, 3)))
observed = aug.augment_images(images)
assert 100 - 3 <= np.average(observed) <= 100 + 3
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.Add(value=1)
aug_det = iaa.Add(value=1).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_tuple_as_value(self):
# varying values
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.Add(value=(0, 10))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.7)
assert nb_changed_aug_det == 0
def test_per_channel(self):
# test channelwise
aug = iaa.Add(value=iap.Choice([0, 1]), per_channel=True)
observed = aug.augment_image(np.zeros((1, 1, 100), dtype=np.uint8))
uq = np.unique(observed)
assert observed.shape == (1, 1, 100)
assert 0 in uq
assert 1 in uq
assert len(uq) == 2
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.Add(value=iap.Choice([0, 1]), per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.zeros((1, 1, 20), dtype=np.uint8))
assert observed.shape == (1, 1, 20)
uq = np.unique(observed)
per_channel = (len(uq) == 2)
if per_channel:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.Add(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.Add(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.Add(value=1, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == 0
def test_heatmaps(self):
# test heatmaps (not affected by augmenter)
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
aug = iaa.Add(value=10)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
image = np.zeros((3, 3), dtype=bool)
aug = iaa.Add(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Add(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Add(value=-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Add(value=-2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
for _ in sm.xrange(10):
image = np.full((1, 1, 3), 20, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) == 1
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
image = np.full((1, 1, 3), 20, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) == 1
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
if dtype == np.float16:
atol = 1e-3 * max_value
else:
atol = 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.Add(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.Add(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
for _ in sm.xrange(10):
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.Add(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.Add(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
def test_pickleable(self):
aug = iaa.Add((0, 50), per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=10)
class TestAddElementwise(unittest.TestCase):
def setUp(self):
reseed()
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_aug = iaa.AddElementwise(value="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_aug = iaa.AddElementwise(value=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_add_zero(self):
# no add, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.AddElementwise(value=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_add_one(self):
# add > 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.AddElementwise(value=1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images + 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] + 1]
assert array_equal_lists(observed, expected)
def test_add_minus_one(self):
# add < 0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.AddElementwise(value=-1)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images - 1
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [images_list[0] - 1]
assert array_equal_lists(observed, expected)
def test_uint8_every_possible_value(self):
# uint8, every possible addition for base value 127
for value_type in [int]:
for per_channel in [False, True]:
for value in np.arange(-255, 255+1):
aug = iaa.AddElementwise(value=value_type(value), per_channel=per_channel)
expected = np.clip(127 + value_type(value), 0, 255)
img = np.full((1, 1), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert img_aug.item(0) == expected
img = np.full((1, 1, 3), 127, dtype=np.uint8)
img_aug = aug.augment_image(img)
assert np.all(img_aug == expected)
def test_stochastic_parameters_as_value(self):
# test other parameters
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.AddElementwise(value=iap.DiscreteUniform(1, 10))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 + 1
assert np.max(observed) <= 100 + 10
aug = iaa.AddElementwise(value=iap.Uniform(1, 10))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 + 1
assert np.max(observed) <= 100 + 10
aug = iaa.AddElementwise(value=iap.Clip(iap.Normal(1, 1), -3, 3))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 - 3
assert np.max(observed) <= 100 + 3
aug = iaa.AddElementwise(value=iap.Discretize(iap.Clip(iap.Normal(1, 1), -3, 3)))
observed = aug.augment_images(images)
assert np.min(observed) >= 100 - 3
assert np.max(observed) <= 100 + 3
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.AddElementwise(value=1)
aug_det = iaa.AddElementwise(value=1).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_tuple_as_value(self):
# varying values
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.AddElementwise(value=(0, 10))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.7)
assert nb_changed_aug_det == 0
def test_samples_change_by_spatial_location(self):
# values should change between pixels
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.AddElementwise(value=(-50, 50))
nb_same = 0
nb_different = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_flat = observed_aug.flatten()
last = None
for j in sm.xrange(observed_aug_flat.size):
if last is not None:
v = observed_aug_flat[j]
if v - 0.0001 <= last <= v + 0.0001:
nb_same += 1
else:
nb_different += 1
last = observed_aug_flat[j]
assert nb_different > 0.9 * (nb_different + nb_same)
def test_per_channel(self):
# test channelwise
aug = iaa.AddElementwise(value=iap.Choice([0, 1]), per_channel=True)
observed = aug.augment_image(np.zeros((100, 100, 3), dtype=np.uint8))
sums = np.sum(observed, axis=2)
values = np.unique(sums)
assert all([(value in values) for value in [0, 1, 2, 3]])
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.AddElementwise(value=iap.Choice([0, 1]), per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.zeros((20, 20, 3), dtype=np.uint8))
sums = np.sum(observed, axis=2)
values = np.unique(sums)
all_values_found = all([(value in values) for value in [0, 1, 2, 3]])
if all_values_found:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.AddElementwise(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.AddElementwise(1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.AddElementwise(value=1, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == 0
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.AddElementwise(value=10)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
# bool
image = np.zeros((3, 3), dtype=bool)
aug = iaa.AddElementwise(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.AddElementwise(value=1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.AddElementwise(value=-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.AddElementwise(value=-2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
# uint, int
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
for _ in sm.xrange(10):
image = np.full((5, 5, 3), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
assert np.all(image_aug[..., 0] == image_aug[..., 1])
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
image = np.full((5, 5, 3), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
assert np.all(image_aug[..., 0] == image_aug[..., 1])
image = np.full((1, 1, 100), 20, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
if dtype == np.float16:
atol = 1e-3 * max_value
else:
atol = 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 21)
image = np.full((3, 3), max_value - 2, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value - 1)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), max_value - 1, dtype=dtype)
aug = iaa.AddElementwise(2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-9)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value + 1)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
image = np.full((3, 3), min_value + 10, dtype=dtype)
aug = iaa.AddElementwise(-11)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
for _ in sm.xrange(10):
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
image = np.full((50, 1, 3), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 0, dtype=dtype)
aug = iaa.AddElementwise(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-10 - 1e-2 < image_aug, image_aug < 10 + 1e-2))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
def test_pickleable(self):
aug = iaa.AddElementwise((0, 50), per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=2)
class AdditiveGaussianNoise(unittest.TestCase):
def setUp(self):
reseed()
def test_loc_zero_scale_zero(self):
# no noise, shouldnt change anything
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
images = np.array([base_img])
aug = iaa.AdditiveGaussianNoise(loc=0, scale=0)
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
def test_loc_zero_scale_nonzero(self):
# zero-centered noise
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
images = np.array([base_img])
images_list = [base_img]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.AdditiveGaussianNoise(loc=0, scale=0.2 * 255)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert not np.array_equal(observed, images)
observed = aug_det.augment_images(images)
assert not np.array_equal(observed, images)
observed = aug.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
observed = aug_det.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
def test_std_dev_of_added_noise_matches_scale(self):
# std correct?
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0, scale=0.2 * 255)
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
nb_iterations = 1000
values = []
for i in sm.xrange(nb_iterations):
images_aug = aug.augment_images(images)
values.append(images_aug[0, 0, 0, 0])
values = np.array(values)
assert np.min(values) == 0
assert 0.1 < np.std(values) / 255.0 < 0.4
def test_nonzero_loc(self):
# non-zero loc
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0.25 * 255, scale=0.01 * 255)
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
nb_iterations = 1000
values = []
for i in sm.xrange(nb_iterations):
images_aug = aug.augment_images(images)
values.append(images_aug[0, 0, 0, 0] - 128)
values = np.array(values)
assert 54 < np.average(values) < 74 # loc=0.25 should be around 255*0.25=64 average
def test_tuple_as_loc(self):
# varying locs
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=(0, 0.5 * 255), scale=0.0001 * 255)
aug_det = aug.to_deterministic()
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_stochastic_parameter_as_loc(self):
# varying locs by stochastic param
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=iap.Choice([-20, 20]), scale=0.0001 * 255)
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
seen = [0, 0]
for i in sm.xrange(200):
observed = aug.augment_images(images)
mean = np.mean(observed)
diff_m20 = abs(mean - (128-20))
diff_p20 = abs(mean - (128+20))
if diff_m20 <= 1:
seen[0] += 1
elif diff_p20 <= 1:
seen[1] += 1
else:
assert False
assert 75 < seen[0] < 125
assert 75 < seen[1] < 125
def test_tuple_as_scale(self):
# varying stds
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0, scale=(0.01 * 255, 0.2 * 255))
aug_det = aug.to_deterministic()
images = np.ones((1, 1, 1, 1), dtype=np.uint8) * 128
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_stochastic_parameter_as_scale(self):
# varying stds by stochastic param
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0, scale=iap.Choice([1, 20]))
images = np.ones((1, 20, 20, 1), dtype=np.uint8) * 128
seen = [0, 0, 0]
for i in sm.xrange(200):
observed = aug.augment_images(images)
std = np.std(observed.astype(np.int32) - 128)
diff_1 = abs(std - 1)
diff_20 = abs(std - 20)
if diff_1 <= 2:
seen[0] += 1
elif diff_20 <= 5:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] <= 5
assert 75 < seen[0] < 125
assert 75 < seen[1] < 125
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.AdditiveGaussianNoise(loc="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_ = iaa.AdditiveGaussianNoise(scale="test")
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 128
aug = iaa.AdditiveGaussianNoise(loc=0.5, scale=10)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.AdditiveGaussianNoise(scale=(0.1, 10), per_channel=True,
random_state=1)
runtest_pickleable_uint8_img(aug, iterations=2)
class TestDropout(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_zero(self):
# no dropout, shouldnt change anything
base_img = np.ones((512, 512, 1), dtype=np.uint8) * 255
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Dropout(p=0)
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
# 100% dropout, should drop everything
aug = iaa.Dropout(p=1.0)
observed = aug.augment_images(images)
expected = np.zeros((1, 512, 512, 1), dtype=np.uint8)
assert np.array_equal(observed, expected)
observed = aug.augment_images(images_list)
expected = [np.zeros((512, 512, 1), dtype=np.uint8)]
assert array_equal_lists(observed, expected)
def test_p_is_50_percent(self):
# 50% dropout
base_img = np.ones((512, 512, 1), dtype=np.uint8) * 255
images = np.array([base_img])
images_list = [base_img]
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.Dropout(p=0.5)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
assert not np.array_equal(observed, images)
percent_nonzero = len(observed.flatten().nonzero()[0]) \
/ (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug_det.augment_images(images)
assert not np.array_equal(observed, images)
percent_nonzero = len(observed.flatten().nonzero()[0]) \
/ (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
percent_nonzero = len(observed[0].flatten().nonzero()[0]) \
/ (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug_det.augment_images(images_list)
assert not array_equal_lists(observed, images_list)
percent_nonzero = len(observed[0].flatten().nonzero()[0]) \
/ (base_img.shape[0] * base_img.shape[1] * base_img.shape[2])
assert 0.35 <= (1 - percent_nonzero) <= 0.65
observed = aug.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
observed = aug_det.augment_keypoints(keypoints)
assert keypoints_equal(observed, keypoints)
def test_tuple_as_p(self):
# varying p
aug = iaa.Dropout(p=(0.0, 1.0))
aug_det = aug.to_deterministic()
images = np.ones((1, 8, 8, 1), dtype=np.uint8) * 255
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_list_as_p(self):
aug = iaa.Dropout(p=[0.0, 0.5, 1.0])
images = np.ones((1, 20, 20, 1), dtype=np.uint8) * 255
nb_seen = [0, 0, 0, 0]
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
n_dropped = np.sum(observed_aug == 0)
p_observed = n_dropped / observed_aug.size
if 0 <= p_observed <= 0.01:
nb_seen[0] += 1
elif 0.5 - 0.05 <= p_observed <= 0.5 + 0.05:
nb_seen[1] += 1
elif 1.0-0.01 <= p_observed <= 1.0:
nb_seen[2] += 1
else:
nb_seen[3] += 1
assert np.allclose(nb_seen[0:3], nb_iterations*0.33, rtol=0, atol=75)
assert nb_seen[3] < 30
def test_stochastic_parameter_as_p(self):
# varying p by stochastic parameter
aug = iaa.Dropout(p=iap.Binomial(1-iap.Choice([0.0, 0.5])))
images = np.ones((1, 20, 20, 1), dtype=np.uint8) * 255
seen = [0, 0, 0]
for i in sm.xrange(400):
observed = aug.augment_images(images)
p = np.mean(observed == 0)
if 0.4 < p < 0.6:
seen[0] += 1
elif p < 0.1:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] <= 10
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test___init___bad_datatypes(self):
# test exception for wrong parameter datatype
got_exception = False
try:
_aug = iaa.Dropout(p="test")
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.Dropout(p=1.0)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.Dropout(p=0.5, per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=3)
class TestCoarseDropout(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_zero(self):
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 100
aug = iaa.CoarseDropout(p=0, size_px=4, size_percent=None, per_channel=False, min_size=4)
observed = aug.augment_image(base_img)
expected = base_img
assert np.array_equal(observed, expected)
def test_p_is_one(self):
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 100
aug = iaa.CoarseDropout(p=1.0, size_px=4, size_percent=None, per_channel=False, min_size=4)
observed = aug.augment_image(base_img)
expected = np.zeros_like(base_img)
assert np.array_equal(observed, expected)
def test_p_is_50_percent(self):
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 100
aug = iaa.CoarseDropout(p=0.5, size_px=1, size_percent=None, per_channel=False, min_size=1)
averages = []
for _ in sm.xrange(50):
observed = aug.augment_image(base_img)
averages.append(np.average(observed))
assert all([v in [0, 100] for v in averages])
assert 50 - 20 < np.average(averages) < 50 + 20
def test_size_percent(self):
base_img = np.ones((16, 16, 1), dtype=np.uint8) * 100
aug = iaa.CoarseDropout(p=0.5, size_px=None, size_percent=0.001, per_channel=False, min_size=1)
averages = []
for _ in sm.xrange(50):
observed = aug.augment_image(base_img)
averages.append(np.average(observed))
assert all([v in [0, 100] for v in averages])
assert 50 - 20 < np.average(averages) < 50 + 20
def test_per_channel(self):
aug = iaa.CoarseDropout(p=0.5, size_px=1, size_percent=None, per_channel=True, min_size=1)
base_img = np.ones((4, 4, 3), dtype=np.uint8) * 100
found = False
for _ in sm.xrange(100):
observed = aug.augment_image(base_img)
avgs = np.average(observed, axis=(0, 1))
if len(set(avgs)) >= 2:
found = True
break
assert found
def test_stochastic_parameter_as_p(self):
# varying p by stochastic parameter
aug = iaa.CoarseDropout(p=iap.Binomial(1-iap.Choice([0.0, 0.5])), size_px=50)
images = np.ones((1, 100, 100, 1), dtype=np.uint8) * 255
seen = [0, 0, 0]
for i in sm.xrange(400):
observed = aug.augment_images(images)
p = np.mean(observed == 0)
if 0.4 < p < 0.6:
seen[0] += 1
elif p < 0.1:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] <= 10
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test___init___bad_datatypes(self):
# test exception for bad parameters
got_exception = False
try:
_ = iaa.CoarseDropout(p="test")
except Exception:
got_exception = True
assert got_exception
def test___init___size_px_and_size_percent_both_none(self):
got_exception = False
try:
_ = iaa.CoarseDropout(p=0.5, size_px=None, size_percent=None)
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.CoarseDropout(p=1.0, size_px=2)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.CoarseDropout(p=0.5, size_px=10, per_channel=True,
random_state=1)
runtest_pickleable_uint8_img(aug, iterations=10, shape=(40, 40, 3))
class TestDropout2d(unittest.TestCase):
def setUp(self):
reseed()
def test___init___defaults(self):
aug = iaa.Dropout2d(p=0)
assert isinstance(aug.p, iap.Binomial)
assert np.isclose(aug.p.p.value, 1.0)
assert aug.nb_keep_channels == 1
def test___init___p_is_float(self):
aug = iaa.Dropout2d(p=0.7)
assert isinstance(aug.p, iap.Binomial)
assert np.isclose(aug.p.p.value, 0.3)
assert aug.nb_keep_channels == 1
def test___init___nb_keep_channels_is_int(self):
aug = iaa.Dropout2d(p=0, nb_keep_channels=2)
assert isinstance(aug.p, iap.Binomial)
assert np.isclose(aug.p.p.value, 1.0)
assert aug.nb_keep_channels == 2
def test_no_images_in_batch(self):
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
heatmaps = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
heatmaps = ia.HeatmapsOnImage(heatmaps, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=heatmaps)
assert np.allclose(heatmaps_aug.arr_0to1, heatmaps.arr_0to1)
def test_p_is_1(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.sum(image_aug) == 0
def test_p_is_1_heatmaps(self):
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=0)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, 0.0)
def test_p_is_1_segmentation_maps(self):
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=0)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, 0.0)
def test_p_is_1_cbaois(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=0)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert cbaoi_aug.items == []
def test_p_is_1_heatmaps__keep_one_channel(self):
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=1)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, hm.arr_0to1)
def test_p_is_1_segmentation_maps__keep_one_channel(self):
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=1)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, segmaps.arr)
def test_p_is_1_cbaois__keep_one_channel(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=1)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert np.allclose(
cbaoi_aug.items[0].coords,
cbaoi.items[0].coords
)
def test_p_is_0(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.array_equal(image_aug, image)
def test_p_is_0_heatmaps(self):
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, hm.arr_0to1)
def test_p_is_0_segmentation_maps(self):
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, segmaps.arr)
def test_p_is_0_cbaois(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.Dropout2d(p=0.0, nb_keep_channels=0)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert np.allclose(
cbaoi_aug.items[0].coords,
cbaoi.items[0].coords
)
def test_p_is_075(self):
image = np.full((1, 1, 3000), 255, dtype=np.uint8)
aug = iaa.Dropout2d(p=0.75, nb_keep_channels=0)
image_aug = aug(image=image)
nb_kept = np.sum(image_aug == 255)
nb_dropped = image.shape[2] - nb_kept
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.isclose(nb_dropped, image.shape[2]*0.75, atol=75)
def test_force_nb_keep_channels(self):
image = np.full((1, 1, 3), 255, dtype=np.uint8)
images = np.array([image] * 1000)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=1)
images_aug = aug(images=images)
ids_kept = [np.nonzero(image[0, 0, :]) for image in images_aug]
ids_kept_uq = np.unique(ids_kept)
nb_kept = np.sum(images_aug == 255)
nb_dropped = (len(images) * images.shape[3]) - nb_kept
assert images_aug.shape == images.shape
assert images_aug.dtype.name == images.dtype.name
# on average, keep 1 of 3 channels
# due to p=1.0 we expect to get exactly 2/3 dropped
assert np.isclose(nb_dropped,
(len(images)*images.shape[3])*(2/3), atol=1)
# every channel dropped at least once, i.e. which one is kept is random
assert sorted(ids_kept_uq.tolist()) == [0, 1, 2]
def test_some_images_below_nb_keep_channels(self):
image_2c = np.full((1, 1, 2), 255, dtype=np.uint8)
image_3c = np.full((1, 1, 3), 255, dtype=np.uint8)
images = [image_2c if i % 2 == 0 else image_3c
for i in sm.xrange(100)]
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=2)
images_aug = aug(images=images)
for i, image_aug in enumerate(images_aug):
assert np.sum(image_aug == 255) == 2
if i % 2 == 0:
assert np.sum(image_aug == 0) == 0
else:
assert np.sum(image_aug == 0) == 1
def test_all_images_below_nb_keep_channels(self):
image = np.full((1, 1, 2), 255, dtype=np.uint8)
images = np.array([image] * 100)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=3)
images_aug = aug(images=images)
nb_kept = np.sum(images_aug == 255)
nb_dropped = (len(images) * images.shape[3]) - nb_kept
assert nb_dropped == 0
def test_get_parameters(self):
aug = iaa.Dropout2d(p=0.7, nb_keep_channels=2)
params = aug.get_parameters()
assert isinstance(params[0], iap.Binomial)
assert np.isclose(params[0].p.value, 0.3)
assert params[1] == 2
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.full(shape, 255, dtype=np.uint8)
aug = iaa.Dropout2d(1.0, nb_keep_channels=0)
image_aug = aug(image=image)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_other_dtypes_bool(self):
image = np.full((1, 1, 10), 1, dtype=bool)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=3)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == "bool"
assert np.sum(image_aug == 1) == 3
assert np.sum(image_aug == 0) == 7
def test_other_dtypes_uint_int(self):
dts = ["uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64"]
for dt in dts:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
values = [min_value, int(center_value), max_value]
for value in values:
with self.subTest(dtype=dt, value=value):
image = np.full((1, 1, 10), value, dtype=dt)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=3)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == dt
if value == 0:
assert np.sum(image_aug == value) == 10
else:
assert np.sum(image_aug == value) == 3
assert np.sum(image_aug == 0) == 7
def test_other_dtypes_float(self):
dts = ["float16", "float32", "float64", "float128"]
for dt in dts:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
values = [min_value, -10.0, center_value, 10.0, max_value]
atol = 1e-3*max_value if dt == "float16" else 1e-9 * max_value
_isclose = functools.partial(np.isclose, atol=atol, rtol=0)
for value in values:
with self.subTest(dtype=dt, value=value):
image = np.full((1, 1, 10), value, dtype=dt)
aug = iaa.Dropout2d(p=1.0, nb_keep_channels=3)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == dt
if _isclose(value, 0.0):
assert np.sum(_isclose(image_aug, value)) == 10
else:
assert (
np.sum(_isclose(image_aug, np.float128(value)))
== 3)
assert np.sum(image_aug == 0) == 7
def test_pickleable(self):
aug = iaa.Dropout2d(p=0.5, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=3, shape=(1, 1, 50))
class TestTotalDropout(unittest.TestCase):
def setUp(self):
reseed()
def test___init___p(self):
aug = iaa.TotalDropout(p=0)
assert isinstance(aug.p, iap.Binomial)
assert np.isclose(aug.p.p.value, 1.0)
def test_p_is_1(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
aug = iaa.TotalDropout(p=1.0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.sum(image_aug) == 0
def test_p_is_1_multiple_images_list(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
images = [image, image, image]
aug = iaa.TotalDropout(p=1.0)
images_aug = aug(images=images)
for image_aug, image_ in zip(images_aug, images):
assert image_aug.shape == image_.shape
assert image_aug.dtype.name == image_.dtype.name
assert np.sum(image_aug) == 0
def test_p_is_1_multiple_images_array(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
images = np.array([image, image, image], dtype=np.uint8)
aug = iaa.TotalDropout(p=1.0)
images_aug = aug(images=images)
assert images_aug.shape == images.shape
assert images_aug.dtype.name == images.dtype.name
assert np.sum(images_aug) == 0
def test_p_is_1_heatmaps(self):
aug = iaa.TotalDropout(p=1.0)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, 0.0)
def test_p_is_1_segmentation_maps(self):
aug = iaa.TotalDropout(p=1.0)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, 0.0)
def test_p_is_1_cbaois(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.TotalDropout(p=1.0)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert cbaoi_aug.items == []
def test_p_is_0(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
aug = iaa.TotalDropout(p=0.0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == image.dtype.name
assert np.array_equal(image_aug, image)
def test_p_is_0_multiple_images_list(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
images = [image, image, image]
aug = iaa.TotalDropout(p=0.0)
images_aug = aug(images=images)
for image_aug, image_ in zip(images_aug, images):
assert image_aug.shape == image_.shape
assert image_aug.dtype.name == image_.dtype.name
assert np.array_equal(image_aug, image_)
def test_p_is_0_multiple_images_array(self):
image = np.full((1, 2, 3), 255, dtype=np.uint8)
images = np.array([image, image, image], dtype=np.uint8)
aug = iaa.TotalDropout(p=0.0)
images_aug = aug(images=images)
for image_aug, image_ in zip(images_aug, images):
assert image_aug.shape == image_.shape
assert image_aug.dtype.name == image_.dtype.name
assert np.array_equal(image_aug, image_)
def test_p_is_0_heatmaps(self):
aug = iaa.TotalDropout(p=0.0)
arr = np.float32([
[0.0, 1.0],
[0.0, 1.0]
])
hm = ia.HeatmapsOnImage(arr, shape=(2, 2, 3))
heatmaps_aug = aug(heatmaps=hm)
assert np.allclose(heatmaps_aug.arr_0to1, hm.arr_0to1)
def test_p_is_0_segmentation_maps(self):
aug = iaa.TotalDropout(p=0.0)
arr = np.int32([
[0, 1],
[0, 1]
])
segmaps = ia.SegmentationMapsOnImage(arr, shape=(2, 2, 3))
segmaps_aug = aug(segmentation_maps=segmaps)
assert np.allclose(segmaps_aug.arr, segmaps.arr)
def test_p_is_0_cbaois(self):
cbaois = [
ia.KeypointsOnImage([ia.Keypoint(x=0, y=1)], shape=(2, 2, 3)),
ia.BoundingBoxesOnImage([ia.BoundingBox(x1=0, y1=1, x2=2, y2=3)],
shape=(2, 2, 3)),
ia.PolygonsOnImage([ia.Polygon([(0, 0), (1, 0), (1, 1)])],
shape=(2, 2, 3)),
ia.LineStringsOnImage([ia.LineString([(0, 0), (1, 0)])],
shape=(2, 2, 3))
]
cbaoi_names = ["keypoints", "bounding_boxes", "polygons",
"line_strings"]
aug = iaa.TotalDropout(p=0.0)
for name, cbaoi in zip(cbaoi_names, cbaois):
with self.subTest(datatype=name):
cbaoi_aug = aug(**{name: cbaoi})
assert cbaoi_aug.shape == (2, 2, 3)
assert np.allclose(
cbaoi_aug.items[0].coords,
cbaoi.items[0].coords
)
def test_p_is_075_multiple_images_list(self):
images = [np.full((1, 1, 1), 255, dtype=np.uint8)] * 3000
aug = iaa.TotalDropout(p=0.75)
images_aug = aug(images=images)
nb_kept = np.sum([np.sum(image_aug == 255) for image_aug in images_aug])
nb_dropped = len(images) - nb_kept
for image_aug in images_aug:
assert image_aug.shape == images[0].shape
assert image_aug.dtype.name == images[0].dtype.name
assert np.isclose(nb_dropped, len(images)*0.75, atol=75)
def test_p_is_075_multiple_images_array(self):
images = np.full((3000, 1, 1, 1), 255, dtype=np.uint8)
aug = iaa.TotalDropout(p=0.75)
images_aug = aug(images=images)
nb_kept = np.sum(images_aug == 255)
nb_dropped = len(images) - nb_kept
assert images_aug.shape == images.shape
assert images_aug.dtype.name == images.dtype.name
assert np.isclose(nb_dropped, len(images)*0.75, atol=75)
def test_get_parameters(self):
aug = iaa.TotalDropout(p=0.0)
params = aug.get_parameters()
assert params[0] is aug.p
def test_unusual_channel_numbers(self):
shapes = [
(5, 1, 1, 4),
(5, 1, 1, 5),
(5, 1, 1, 512),
(5, 1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
images = np.zeros(shape, dtype=np.uint8)
aug = iaa.TotalDropout(1.0)
images_aug = aug(images=images)
assert np.all(images_aug == 0)
assert images_aug.dtype.name == "uint8"
assert images_aug.shape == shape
def test_zero_sized_axes(self):
shapes = [
(5, 0, 0),
(5, 0, 1),
(5, 1, 0),
(5, 0, 1, 0),
(5, 1, 0, 0),
(5, 0, 1, 1),
(5, 1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
images = np.full(shape, 255, dtype=np.uint8)
aug = iaa.TotalDropout(1.0)
images_aug = aug(images=images)
assert images_aug.dtype.name == "uint8"
assert images_aug.shape == images.shape
def test_other_dtypes_bool(self):
image = np.full((1, 1, 10), 1, dtype=bool)
aug = iaa.TotalDropout(p=1.0)
image_aug = aug(image=image)
assert image_aug.shape == image.shape
assert image_aug.dtype.name == "bool"
assert np.sum(image_aug == 1) == 0
def test_other_dtypes_uint_int(self):
dts = ["uint8", "uint16", "uint32", "uint64",
"int8", "int16", "int32", "int64"]
for dt in dts:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
values = [min_value, int(center_value), max_value]
for value in values:
for p in [1.0, 0.0]:
with self.subTest(dtype=dt, value=value, p=p):
images = np.full((5, 1, 1, 3), value, dtype=dt)
aug = iaa.TotalDropout(p=p)
images_aug = aug(images=images)
assert images_aug.shape == images.shape
assert images_aug.dtype.name == dt
if np.isclose(p, 1.0) or value == 0:
assert np.sum(images_aug == 0) == 5*3
else:
assert np.sum(images_aug == value) == 5*3
def test_other_dtypes_float(self):
dts = ["float16", "float32", "float64", "float128"]
for dt in dts:
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
values = [min_value, -10.0, center_value, 10.0, max_value]
atol = 1e-3*max_value if dt == "float16" else 1e-9 * max_value
_isclose = functools.partial(np.isclose, atol=atol, rtol=0)
for value in values:
for p in [1.0, 0.0]:
with self.subTest(dtype=dt, value=value, p=p):
images = np.full((5, 1, 1, 3), value, dtype=dt)
aug = iaa.TotalDropout(p=p)
images_aug = aug(images=images)
assert images_aug.shape == images.shape
assert images_aug.dtype.name == dt
if np.isclose(p, 1.0):
assert np.sum(_isclose(images_aug, 0.0)) == 5*3
else:
assert (
np.sum(_isclose(images_aug, np.float128(value)))
== 5*3)
def test_pickleable(self):
aug = iaa.TotalDropout(p=0.5, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=30, shape=(4, 4, 2))
class TestMultiply(unittest.TestCase):
def setUp(self):
reseed()
def test_mul_is_one(self):
# no multiply, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Multiply(mul=1.0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_mul_is_above_one(self):
# multiply >1.0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Multiply(mul=1.2)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 120
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 120]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 120
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 120]
assert array_equal_lists(observed, expected)
def test_mul_is_below_one(self):
# multiply <1.0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.Multiply(mul=0.8)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 80
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 80]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 80
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 80]
assert array_equal_lists(observed, expected)
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.Multiply(mul=1.2)
aug_det = iaa.Multiply(mul=1.2).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_tuple_as_mul(self):
# varying multiply factors
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.Multiply(mul=(0, 2.0))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_per_channel(self):
aug = iaa.Multiply(mul=iap.Choice([0, 2]), per_channel=True)
observed = aug.augment_image(np.ones((1, 1, 100), dtype=np.uint8))
uq = np.unique(observed)
assert observed.shape == (1, 1, 100)
assert 0 in uq
assert 2 in uq
assert len(uq) == 2
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.Multiply(mul=iap.Choice([0, 2]), per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.ones((1, 1, 20), dtype=np.uint8))
assert observed.shape == (1, 1, 20)
uq = np.unique(observed)
per_channel = (len(uq) == 2)
if per_channel:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.Multiply(mul="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_ = iaa.Multiply(mul=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.ones(shape, dtype=np.uint8)
aug = iaa.Multiply(1)
image_aug = aug(image=image)
assert np.all(image_aug == 2)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.ones(shape, dtype=np.uint8)
aug = iaa.Multiply(2)
image_aug = aug(image=image)
assert np.all(image_aug == 2)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.Multiply(mul=1, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == 0
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.Multiply(mul=2)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
# bool
image = np.zeros((3, 3), dtype=bool)
aug = iaa.Multiply(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Multiply(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Multiply(2.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Multiply(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.Multiply(-1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
# uint, int
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
dtype = np.dtype(dtype)
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.Multiply(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 10)
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.Multiply(10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 100)
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.Multiply(0.5)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 5)
image = np.full((3, 3), 0, dtype=dtype)
aug = iaa.Multiply(0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
if np.dtype(dtype).kind == "u":
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.Multiply(-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
else:
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.Multiply(-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == -10)
image = np.full((3, 3), int(center_value), dtype=dtype)
aug = iaa.Multiply(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == int(center_value))
image = np.full((3, 3), int(center_value), dtype=dtype)
aug = iaa.Multiply(1.2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == int(1.2 * int(center_value)))
if np.dtype(dtype).kind == "u":
image = np.full((3, 3), int(center_value), dtype=dtype)
aug = iaa.Multiply(100)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
# non-uint8 currently don't increase the itemsize
if dtype.name == "uint8":
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(10)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
# non-uint8 currently don't increase the itemsize
if dtype.name == "uint8":
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(-2)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
# non-uint8 currently don't increase the itemsize
if dtype.name == "uint8":
for _ in sm.xrange(10):
image = np.full((1, 1, 3), 10, dtype=dtype)
aug = iaa.Multiply(iap.Uniform(0.5, 1.5))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(5 <= image_aug, image_aug <= 15))
assert len(np.unique(image_aug)) == 1
image = np.full((1, 1, 100), 10, dtype=dtype)
aug = iaa.Multiply(iap.Uniform(0.5, 1.5), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(5 <= image_aug, image_aug <= 15))
assert len(np.unique(image_aug)) > 1
image = np.full((1, 1, 3), 10, dtype=dtype)
aug = iaa.Multiply(iap.DiscreteUniform(1, 3))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) == 1
image = np.full((1, 1, 100), 10, dtype=dtype)
aug = iaa.Multiply(iap.DiscreteUniform(1, 3), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32]:
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
if dtype == np.float16:
atol = 1e-3 * max_value
else:
atol = 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
image = np.full((3, 3), 10.0, dtype=dtype)
aug = iaa.Multiply(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 10.0)
image = np.full((3, 3), 10.0, dtype=dtype)
aug = iaa.Multiply(2.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 20.0)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), max_value, dtype=dtype)
# aug = iaa.Multiply(-10)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert _allclose(image_aug, min_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.0)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.Multiply(0.5)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.5*max_value)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), min_value, dtype=dtype)
# aug = iaa.Multiply(-2.0)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert _allclose(image_aug, max_value)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.Multiply(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.0)
# using tolerances of -100 - 1e-2 and 100 + 1e-2 is not enough for float16, had to be increased to -/+ 1e-1
# deactivated, because itemsize increase was deactivated
"""
for _ in sm.xrange(10):
image = np.full((1, 1, 3), 10.0, dtype=dtype)
aug = iaa.Multiply(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 10.0, dtype=dtype)
aug = iaa.Multiply(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
image = np.full((1, 1, 3), 10.0, dtype=dtype)
aug = iaa.Multiply(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 10.0, dtype=dtype)
aug = iaa.Multiply(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
"""
def test_pickleable(self):
aug = iaa.Multiply((0.5, 1.5), per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=20)
class TestMultiplyElementwise(unittest.TestCase):
def setUp(self):
reseed()
def test_mul_is_one(self):
# no multiply, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.MultiplyElementwise(mul=1.0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_mul_is_above_one(self):
# multiply >1.0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.MultiplyElementwise(mul=1.2)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 120
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 120]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 120
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 120]
assert array_equal_lists(observed, expected)
def test_mul_is_below_one(self):
# multiply <1.0
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
images_list = [base_img]
aug = iaa.MultiplyElementwise(mul=0.8)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 80
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 80]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = np.ones((1, 3, 3, 1), dtype=np.uint8) * 80
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [np.ones((3, 3, 1), dtype=np.uint8) * 80]
assert array_equal_lists(observed, expected)
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.MultiplyElementwise(mul=1.2)
aug_det = iaa.Multiply(mul=1.2).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_tuple_as_mul(self):
# varying multiply factors
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.MultiplyElementwise(mul=(0, 2.0))
aug_det = aug.to_deterministic()
last_aug = None
last_aug_det = None
nb_changed_aug = 0
nb_changed_aug_det = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_det = aug_det.augment_images(images)
if i == 0:
last_aug = observed_aug
last_aug_det = observed_aug_det
else:
if not np.array_equal(observed_aug, last_aug):
nb_changed_aug += 1
if not np.array_equal(observed_aug_det, last_aug_det):
nb_changed_aug_det += 1
last_aug = observed_aug
last_aug_det = observed_aug_det
assert nb_changed_aug >= int(nb_iterations * 0.95)
assert nb_changed_aug_det == 0
def test_samples_change_by_spatial_location(self):
# values should change between pixels
base_img = np.ones((3, 3, 1), dtype=np.uint8) * 100
images = np.array([base_img])
aug = iaa.MultiplyElementwise(mul=(0.5, 1.5))
nb_same = 0
nb_different = 0
nb_iterations = 1000
for i in sm.xrange(nb_iterations):
observed_aug = aug.augment_images(images)
observed_aug_flat = observed_aug.flatten()
last = None
for j in sm.xrange(observed_aug_flat.size):
if last is not None:
v = observed_aug_flat[j]
if v - 0.0001 <= last <= v + 0.0001:
nb_same += 1
else:
nb_different += 1
last = observed_aug_flat[j]
assert nb_different > 0.95 * (nb_different + nb_same)
def test_per_channel(self):
# test channelwise
aug = iaa.MultiplyElementwise(mul=iap.Choice([0, 1]), per_channel=True)
observed = aug.augment_image(np.ones((100, 100, 3), dtype=np.uint8))
sums = np.sum(observed, axis=2)
values = np.unique(sums)
assert all([(value in values) for value in [0, 1, 2, 3]])
assert observed.shape == (100, 100, 3)
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.MultiplyElementwise(mul=iap.Choice([0, 1]), per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.ones((20, 20, 3), dtype=np.uint8))
assert observed.shape == (20, 20, 3)
sums = np.sum(observed, axis=2)
values = np.unique(sums)
all_values_found = all([(value in values) for value in [0, 1, 2, 3]])
if all_values_found:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_aug = iaa.MultiplyElementwise(mul="test")
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_aug = iaa.MultiplyElementwise(mul=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.ones(shape, dtype=np.uint8)
aug = iaa.MultiplyElementwise(2)
image_aug = aug(image=image)
assert np.all(image_aug == 2)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.ones(shape, dtype=np.uint8)
aug = iaa.MultiplyElementwise(2)
image_aug = aug(image=image)
assert np.all(image_aug == 2)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.MultiplyElementwise(mul=1, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert params[0].value == 1
assert params[1].value == 0
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.MultiplyElementwise(mul=2)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
# bool
image = np.zeros((3, 3), dtype=bool)
aug = iaa.MultiplyElementwise(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.MultiplyElementwise(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.MultiplyElementwise(2.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.MultiplyElementwise(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
image = np.full((3, 3), True, dtype=bool)
aug = iaa.MultiplyElementwise(-1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
# uint, int
for dtype in [np.uint8, np.uint16, np.int8, np.int16]:
dtype = np.dtype(dtype)
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 10)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), 10, dtype=dtype)
# aug = iaa.MultiplyElementwise(10)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert np.all(image_aug == 100)
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(0.5)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 5)
image = np.full((3, 3), 0, dtype=dtype)
aug = iaa.MultiplyElementwise(0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
# partially deactivated, because itemsize increase was deactivated
if dtype.name == "uint8":
if dtype.kind == "u":
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
else:
image = np.full((3, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(-1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == -10)
image = np.full((3, 3), int(center_value), dtype=dtype)
aug = iaa.MultiplyElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == int(center_value))
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), int(center_value), dtype=dtype)
# aug = iaa.MultiplyElementwise(1.2)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert np.all(image_aug == int(1.2 * int(center_value)))
# deactivated, because itemsize increase was deactivated
if dtype.name == "uint8":
if dtype.kind == "u":
image = np.full((3, 3), int(center_value), dtype=dtype)
aug = iaa.MultiplyElementwise(100)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.MultiplyElementwise(1)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), max_value, dtype=dtype)
# aug = iaa.MultiplyElementwise(10)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert np.all(image_aug == max_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.MultiplyElementwise(0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 0)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), max_value, dtype=dtype)
# aug = iaa.MultiplyElementwise(-2)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert np.all(image_aug == min_value)
# partially deactivated, because itemsize increase was deactivated
if dtype.name == "uint8":
for _ in sm.xrange(10):
image = np.full((5, 5, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.Uniform(0.5, 1.5))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(5 <= image_aug, image_aug <= 15))
assert len(np.unique(image_aug)) > 1
assert np.all(image_aug[..., 0] == image_aug[..., 1])
image = np.full((1, 1, 100), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.Uniform(0.5, 1.5), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(5 <= image_aug, image_aug <= 15))
assert len(np.unique(image_aug)) > 1
image = np.full((5, 5, 3), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.DiscreteUniform(1, 3))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
assert np.all(image_aug[..., 0] == image_aug[..., 1])
image = np.full((1, 1, 100), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.DiscreteUniform(1, 3), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(10 <= image_aug, image_aug <= 30))
assert len(np.unique(image_aug)) > 1
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32]:
dtype = np.dtype(dtype)
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
if dtype == np.float16:
atol = 1e-3 * max_value
else:
atol = 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
image = np.full((3, 3), 10.0, dtype=dtype)
aug = iaa.MultiplyElementwise(1.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 10.0)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), 10.0, dtype=dtype)
# aug = iaa.MultiplyElementwise(2.0)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert _allclose(image_aug, 20.0)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), max_value, dtype=dtype)
# aug = iaa.MultiplyElementwise(-10)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert _allclose(image_aug, min_value)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.MultiplyElementwise(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.0)
image = np.full((3, 3), max_value, dtype=dtype)
aug = iaa.MultiplyElementwise(0.5)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.5*max_value)
# deactivated, because itemsize increase was deactivated
# image = np.full((3, 3), min_value, dtype=dtype)
# aug = iaa.MultiplyElementwise(-2.0)
# image_aug = aug.augment_image(image)
# assert image_aug.dtype.type == dtype
# assert _allclose(image_aug, max_value)
image = np.full((3, 3), min_value, dtype=dtype)
aug = iaa.MultiplyElementwise(0.0)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, 0.0)
# using tolerances of -100 - 1e-2 and 100 + 1e-2 is not enough for float16, had to be increased to -/+ 1e-1
# deactivated, because itemsize increase was deactivated
"""
for _ in sm.xrange(10):
image = np.full((50, 1, 3), 10.0, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.Uniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert not np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 10.0, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.Uniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
image = np.full((50, 1, 3), 10.0, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.DiscreteUniform(-10, 10))
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert not np.allclose(image_aug[1:, :, 0], image_aug[:-1, :, 0])
assert np.allclose(image_aug[..., 0], image_aug[..., 1])
image = np.full((1, 1, 100), 10, dtype=dtype)
aug = iaa.MultiplyElementwise(iap.DiscreteUniform(-10, 10), per_channel=True)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(-100 - 1e-1 < image_aug, image_aug < 100 + 1e-1))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1])
"""
def test_pickleable(self):
aug = iaa.MultiplyElementwise((0.5, 1.5), per_channel=True,
random_state=1)
runtest_pickleable_uint8_img(aug, iterations=3)
class TestReplaceElementwise(unittest.TestCase):
def setUp(self):
reseed()
def test_mask_is_always_zero(self):
# no replace, shouldnt change anything
base_img = np.ones((3, 3, 1), dtype=np.uint8) + 99
images = np.array([base_img])
images_list = [base_img]
aug = iaa.ReplaceElementwise(mask=0, replacement=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = images
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = images_list
assert array_equal_lists(observed, expected)
def test_mask_is_always_one(self):
# replace at 100 percent prob., should change everything
base_img = np.ones((3, 3, 1), dtype=np.uint8) + 99
images = np.array([base_img])
images_list = [base_img]
aug = iaa.ReplaceElementwise(mask=1, replacement=0)
aug_det = aug.to_deterministic()
observed = aug.augment_images(images)
expected = np.zeros((1, 3, 3, 1), dtype=np.uint8)
assert np.array_equal(observed, expected)
assert observed.shape == (1, 3, 3, 1)
observed = aug.augment_images(images_list)
expected = [np.zeros((3, 3, 1), dtype=np.uint8)]
assert array_equal_lists(observed, expected)
observed = aug_det.augment_images(images)
expected = np.zeros((1, 3, 3, 1), dtype=np.uint8)
assert np.array_equal(observed, expected)
observed = aug_det.augment_images(images_list)
expected = [np.zeros((3, 3, 1), dtype=np.uint8)]
assert array_equal_lists(observed, expected)
def test_mask_is_stochastic_parameter(self):
# replace half
aug = iaa.ReplaceElementwise(mask=iap.Binomial(p=0.5), replacement=0)
img = np.ones((100, 100, 1), dtype=np.uint8)
nb_iterations = 100
nb_diff_all = 0
for i in sm.xrange(nb_iterations):
observed = aug.augment_image(img)
nb_diff = np.sum(img != observed)
nb_diff_all += nb_diff
p = nb_diff_all / (nb_iterations * 100 * 100)
assert 0.45 <= p <= 0.55
def test_mask_is_list(self):
# mask is list
aug = iaa.ReplaceElementwise(mask=[0.2, 0.7], replacement=1)
img = np.zeros((20, 20, 1), dtype=np.uint8)
seen = [0, 0, 0]
for i in sm.xrange(400):
observed = aug.augment_image(img)
p = np.mean(observed)
if 0.1 < p < 0.3:
seen[0] += 1
elif 0.6 < p < 0.8:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] <= 10
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test_keypoints_dont_change(self):
# keypoints shouldnt be changed
base_img = np.ones((3, 3, 1), dtype=np.uint8) + 99
keypoints = [ia.KeypointsOnImage([ia.Keypoint(x=0, y=0), ia.Keypoint(x=1, y=1),
ia.Keypoint(x=2, y=2)], shape=base_img.shape)]
aug = iaa.ReplaceElementwise(mask=iap.Binomial(p=0.5), replacement=0)
aug_det = iaa.ReplaceElementwise(mask=iap.Binomial(p=0.5), replacement=0).to_deterministic()
observed = aug.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
observed = aug_det.augment_keypoints(keypoints)
expected = keypoints
assert keypoints_equal(observed, expected)
def test_replacement_is_stochastic_parameter(self):
# different replacements
aug = iaa.ReplaceElementwise(mask=1, replacement=iap.Choice([100, 200]))
img = np.zeros((1000, 1000, 1), dtype=np.uint8)
img100 = img + 100
img200 = img + 200
observed = aug.augment_image(img)
nb_diff_100 = np.sum(img100 != observed)
nb_diff_200 = np.sum(img200 != observed)
p100 = nb_diff_100 / (1000 * 1000)
p200 = nb_diff_200 / (1000 * 1000)
assert 0.45 <= p100 <= 0.55
assert 0.45 <= p200 <= 0.55
# test channelwise
aug = iaa.MultiplyElementwise(mul=iap.Choice([0, 1]), per_channel=True)
observed = aug.augment_image(np.ones((100, 100, 3), dtype=np.uint8))
sums = np.sum(observed, axis=2)
values = np.unique(sums)
assert all([(value in values) for value in [0, 1, 2, 3]])
def test_per_channel_with_probability(self):
# test channelwise with probability
aug = iaa.ReplaceElementwise(mask=iap.Choice([0, 1]), replacement=1, per_channel=0.5)
seen = [0, 0]
for _ in sm.xrange(400):
observed = aug.augment_image(np.zeros((20, 20, 3), dtype=np.uint8))
assert observed.shape == (20, 20, 3)
sums = np.sum(observed, axis=2)
values = np.unique(sums)
all_values_found = all([(value in values) for value in [0, 1, 2, 3]])
if all_values_found:
seen[0] += 1
else:
seen[1] += 1
assert 150 < seen[0] < 250
assert 150 < seen[1] < 250
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_aug = iaa.ReplaceElementwise(mask="test", replacement=1)
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_aug = iaa.ReplaceElementwise(mask=1, replacement=1, per_channel="test")
except Exception:
got_exception = True
assert got_exception
def test_zero_sized_axes(self):
shapes = [
(0, 0),
(0, 1),
(1, 0),
(0, 1, 0),
(1, 0, 0),
(0, 1, 1),
(1, 0, 1)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.ReplaceElementwise(1.0, 1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_unusual_channel_numbers(self):
shapes = [
(1, 1, 4),
(1, 1, 5),
(1, 1, 512),
(1, 1, 513)
]
for shape in shapes:
with self.subTest(shape=shape):
image = np.zeros(shape, dtype=np.uint8)
aug = iaa.ReplaceElementwise(1.0, 1)
image_aug = aug(image=image)
assert np.all(image_aug == 1)
assert image_aug.dtype.name == "uint8"
assert image_aug.shape == image.shape
def test_get_parameters(self):
# test get_parameters()
aug = iaa.ReplaceElementwise(mask=0.5, replacement=2, per_channel=False)
params = aug.get_parameters()
assert isinstance(params[0], iap.Binomial)
assert isinstance(params[0].p, iap.Deterministic)
assert isinstance(params[1], iap.Deterministic)
assert isinstance(params[2], iap.Deterministic)
assert 0.5 - 1e-6 < params[0].p.value < 0.5 + 1e-6
assert params[1].value == 2
assert params[2].value == 0
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.ReplaceElementwise(mask=1, replacement=0.5)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_other_dtypes_bool(self):
# bool
aug = iaa.ReplaceElementwise(mask=1, replacement=0)
image = np.full((3, 3), False, dtype=bool)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
aug = iaa.ReplaceElementwise(mask=1, replacement=1)
image = np.full((3, 3), False, dtype=bool)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
aug = iaa.ReplaceElementwise(mask=1, replacement=0)
image = np.full((3, 3), True, dtype=bool)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
aug = iaa.ReplaceElementwise(mask=1, replacement=1)
image = np.full((3, 3), True, dtype=bool)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
aug = iaa.ReplaceElementwise(mask=1, replacement=0.7)
image = np.full((3, 3), False, dtype=bool)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 1)
aug = iaa.ReplaceElementwise(mask=1, replacement=0.2)
image = np.full((3, 3), False, dtype=bool)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == np.bool_
assert np.all(image_aug == 0)
def test_other_dtypes_uint_int(self):
# uint, int
for dtype in [np.uint8, np.uint16, np.uint32, np.int8, np.int16, np.int32]:
dtype = np.dtype(dtype)
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
aug = iaa.ReplaceElementwise(mask=1, replacement=1)
image = np.full((3, 3), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 1)
aug = iaa.ReplaceElementwise(mask=1, replacement=2)
image = np.full((3, 3), 1, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == 2)
# deterministic stochastic parameters are by default int32 for
# any integer value and hence cannot cover the full uint32 value
# range
if dtype.name != "uint32":
aug = iaa.ReplaceElementwise(mask=1, replacement=max_value)
image = np.full((3, 3), min_value, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == max_value)
aug = iaa.ReplaceElementwise(mask=1, replacement=min_value)
image = np.full((3, 3), max_value, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(image_aug == min_value)
aug = iaa.ReplaceElementwise(mask=1, replacement=iap.Uniform(1.0, 10.0))
image = np.full((100, 1), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(1 <= image_aug, image_aug <= 10))
assert len(np.unique(image_aug)) > 1
aug = iaa.ReplaceElementwise(mask=1, replacement=iap.DiscreteUniform(1, 10))
image = np.full((100, 1), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(1 <= image_aug, image_aug <= 10))
assert len(np.unique(image_aug)) > 1
aug = iaa.ReplaceElementwise(mask=0.5, replacement=iap.DiscreteUniform(1, 10), per_channel=True)
image = np.full((1, 1, 100), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(0 <= image_aug, image_aug <= 10))
assert len(np.unique(image_aug)) > 2
def test_other_dtypes_float(self):
# float
for dtype in [np.float16, np.float32, np.float64]:
dtype = np.dtype(dtype)
min_value, center_value, max_value = iadt.get_value_range_of_dtype(dtype)
atol = 1e-3*max_value if dtype == np.float16 else 1e-9 * max_value
_allclose = functools.partial(np.allclose, atol=atol, rtol=0)
aug = iaa.ReplaceElementwise(mask=1, replacement=1.0)
image = np.full((3, 3), 0.0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.allclose(image_aug, 1.0)
aug = iaa.ReplaceElementwise(mask=1, replacement=2.0)
image = np.full((3, 3), 1.0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.allclose(image_aug, 2.0)
# deterministic stochastic parameters are by default float32 for
# any float value and hence cannot cover the full float64 value
# range
if dtype.name != "float64":
aug = iaa.ReplaceElementwise(mask=1, replacement=max_value)
image = np.full((3, 3), min_value, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, max_value)
aug = iaa.ReplaceElementwise(mask=1, replacement=min_value)
image = np.full((3, 3), max_value, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert _allclose(image_aug, min_value)
aug = iaa.ReplaceElementwise(mask=1, replacement=iap.Uniform(1.0, 10.0))
image = np.full((100, 1), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(1 <= image_aug, image_aug <= 10))
assert not np.allclose(image_aug[1:, :], image_aug[:-1, :], atol=0.01)
aug = iaa.ReplaceElementwise(mask=1, replacement=iap.DiscreteUniform(1, 10))
image = np.full((100, 1), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(1 <= image_aug, image_aug <= 10))
assert not np.allclose(image_aug[1:, :], image_aug[:-1, :], atol=0.01)
aug = iaa.ReplaceElementwise(mask=0.5, replacement=iap.DiscreteUniform(1, 10), per_channel=True)
image = np.full((1, 1, 100), 0, dtype=dtype)
image_aug = aug.augment_image(image)
assert image_aug.dtype.type == dtype
assert np.all(np.logical_and(0 <= image_aug, image_aug <= 10))
assert not np.allclose(image_aug[:, :, 1:], image_aug[:, :, :-1], atol=0.01)
def test_pickleable(self):
aug = iaa.ReplaceElementwise(mask=0.5, replacement=(0, 255),
per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=3)
# not more tests necessary here as SaltAndPepper is just a tiny wrapper around
# ReplaceElementwise
class TestSaltAndPepper(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_fifty_percent(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.SaltAndPepper(p=0.5)
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
assert 0.4 < p < 0.6
def test_p_is_one(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.SaltAndPepper(p=1.0)
observed = aug.augment_image(base_img)
nb_pepper = np.sum(observed < 40)
nb_salt = np.sum(observed > 255 - 40)
assert nb_pepper > 200
assert nb_salt > 200
def test_pickleable(self):
aug = iaa.SaltAndPepper(p=0.5, per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=3)
class TestCoarseSaltAndPepper(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_fifty_percent(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.CoarseSaltAndPepper(p=0.5, size_px=100)
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
assert 0.4 < p < 0.6
def test_size_px(self):
aug1 = iaa.CoarseSaltAndPepper(p=0.5, size_px=100)
aug2 = iaa.CoarseSaltAndPepper(p=0.5, size_px=10)
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
ps1 = []
ps2 = []
for _ in sm.xrange(100):
observed1 = aug1.augment_image(base_img)
observed2 = aug2.augment_image(base_img)
p1 = np.mean(observed1 != 128)
p2 = np.mean(observed2 != 128)
ps1.append(p1)
ps2.append(p2)
assert 0.4 < np.mean(ps2) < 0.6
assert np.std(ps1)*1.5 < np.std(ps2)
def test_p_is_list(self):
aug = iaa.CoarseSaltAndPepper(p=[0.2, 0.5], size_px=100)
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
seen = [0, 0, 0]
for _ in sm.xrange(200):
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
diff_020 = abs(0.2 - p)
diff_050 = abs(0.5 - p)
if diff_020 < 0.025:
seen[0] += 1
elif diff_050 < 0.025:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] < 10
assert 75 < seen[0] < 125
assert 75 < seen[1] < 125
def test_p_is_tuple(self):
aug = iaa.CoarseSaltAndPepper(p=(0.0, 1.0), size_px=50)
base_img = np.zeros((50, 50, 1), dtype=np.uint8) + 128
ps = []
for _ in sm.xrange(200):
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
ps.append(p)
nb_bins = 5
hist, _ = np.histogram(ps, bins=nb_bins, range=(0.0, 1.0), density=False)
tolerance = 0.05
for nb_seen in hist:
density = nb_seen / len(ps)
assert density - tolerance < density < density + tolerance
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.CoarseSaltAndPepper(p="test", size_px=100)
except Exception:
got_exception = True
assert got_exception
got_exception = False
try:
_ = iaa.CoarseSaltAndPepper(p=0.5, size_px=None, size_percent=None)
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.CoarseSaltAndPepper(p=1.0, size_px=2)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.CoarseSaltAndPepper(p=0.5, size_px=(4, 15),
per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=20)
# not more tests necessary here as Salt is just a tiny wrapper around
# ReplaceElementwise
class TestSalt(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_fifty_percent(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.Salt(p=0.5)
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
assert 0.4 < p < 0.6
# Salt() occasionally replaces with 127, which probably should be the center-point here anyways
assert np.all(observed >= 127)
def test_p_is_one(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.Salt(p=1.0)
observed = aug.augment_image(base_img)
nb_pepper = np.sum(observed < 40)
nb_salt = np.sum(observed > 255 - 40)
assert nb_pepper == 0
assert nb_salt > 200
def test_pickleable(self):
aug = iaa.Salt(p=0.5, per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=3)
class TestCoarseSalt(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_fifty_percent(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.CoarseSalt(p=0.5, size_px=100)
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
assert 0.4 < p < 0.6
def test_size_px(self):
aug1 = iaa.CoarseSalt(p=0.5, size_px=100)
aug2 = iaa.CoarseSalt(p=0.5, size_px=10)
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
ps1 = []
ps2 = []
for _ in sm.xrange(100):
observed1 = aug1.augment_image(base_img)
observed2 = aug2.augment_image(base_img)
p1 = np.mean(observed1 != 128)
p2 = np.mean(observed2 != 128)
ps1.append(p1)
ps2.append(p2)
assert 0.4 < np.mean(ps2) < 0.6
assert np.std(ps1)*1.5 < np.std(ps2)
def test_p_is_list(self):
aug = iaa.CoarseSalt(p=[0.2, 0.5], size_px=100)
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
seen = [0, 0, 0]
for _ in sm.xrange(200):
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
diff_020 = abs(0.2 - p)
diff_050 = abs(0.5 - p)
if diff_020 < 0.025:
seen[0] += 1
elif diff_050 < 0.025:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] < 10
assert 75 < seen[0] < 125
assert 75 < seen[1] < 125
def test_p_is_tuple(self):
aug = iaa.CoarseSalt(p=(0.0, 1.0), size_px=50)
base_img = np.zeros((50, 50, 1), dtype=np.uint8) + 128
ps = []
for _ in sm.xrange(200):
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
ps.append(p)
nb_bins = 5
hist, _ = np.histogram(ps, bins=nb_bins, range=(0.0, 1.0), density=False)
tolerance = 0.05
for nb_seen in hist:
density = nb_seen / len(ps)
assert density - tolerance < density < density + tolerance
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.CoarseSalt(p="test", size_px=100)
except Exception:
got_exception = True
assert got_exception
def test_size_px_or_size_percent_not_none(self):
got_exception = False
try:
_ = iaa.CoarseSalt(p=0.5, size_px=None, size_percent=None)
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.CoarseSalt(p=1.0, size_px=2)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.CoarseSalt(p=0.5, size_px=(4, 15),
per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=20)
# not more tests necessary here as Salt is just a tiny wrapper around
# ReplaceElementwise
class TestPepper(unittest.TestCase):
def setUp(self):
reseed()
def test_probability_is_fifty_percent(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.Pepper(p=0.5)
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
assert 0.4 < p < 0.6
assert np.all(observed <= 128)
def test_probability_is_one(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.Pepper(p=1.0)
observed = aug.augment_image(base_img)
nb_pepper = np.sum(observed < 40)
nb_salt = np.sum(observed > 255 - 40)
assert nb_pepper > 200
assert nb_salt == 0
def test_pickleable(self):
aug = iaa.Pepper(p=0.5, per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=3)
class TestCoarsePepper(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_fifty_percent(self):
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
aug = iaa.CoarsePepper(p=0.5, size_px=100)
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
assert 0.4 < p < 0.6
def test_size_px(self):
aug1 = iaa.CoarsePepper(p=0.5, size_px=100)
aug2 = iaa.CoarsePepper(p=0.5, size_px=10)
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
ps1 = []
ps2 = []
for _ in sm.xrange(100):
observed1 = aug1.augment_image(base_img)
observed2 = aug2.augment_image(base_img)
p1 = np.mean(observed1 != 128)
p2 = np.mean(observed2 != 128)
ps1.append(p1)
ps2.append(p2)
assert 0.4 < np.mean(ps2) < 0.6
assert np.std(ps1)*1.5 < np.std(ps2)
def test_p_is_list(self):
aug = iaa.CoarsePepper(p=[0.2, 0.5], size_px=100)
base_img = np.zeros((100, 100, 1), dtype=np.uint8) + 128
seen = [0, 0, 0]
for _ in sm.xrange(200):
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
diff_020 = abs(0.2 - p)
diff_050 = abs(0.5 - p)
if diff_020 < 0.025:
seen[0] += 1
elif diff_050 < 0.025:
seen[1] += 1
else:
seen[2] += 1
assert seen[2] < 10
assert 75 < seen[0] < 125
assert 75 < seen[1] < 125
def test_p_is_tuple(self):
aug = iaa.CoarsePepper(p=(0.0, 1.0), size_px=50)
base_img = np.zeros((50, 50, 1), dtype=np.uint8) + 128
ps = []
for _ in sm.xrange(200):
observed = aug.augment_image(base_img)
p = np.mean(observed != 128)
ps.append(p)
nb_bins = 5
hist, _ = np.histogram(ps, bins=nb_bins, range=(0.0, 1.0), density=False)
tolerance = 0.05
for nb_seen in hist:
density = nb_seen / len(ps)
assert density - tolerance < density < density + tolerance
def test___init___bad_datatypes(self):
# test exceptions for wrong parameter types
got_exception = False
try:
_ = iaa.CoarsePepper(p="test", size_px=100)
except Exception:
got_exception = True
assert got_exception
def test_size_px_or_size_percent_not_none(self):
got_exception = False
try:
_ = iaa.CoarsePepper(p=0.5, size_px=None, size_percent=None)
except Exception:
got_exception = True
assert got_exception
def test_heatmaps_dont_change(self):
# test heatmaps (not affected by augmenter)
aug = iaa.CoarsePepper(p=1.0, size_px=2)
hm = ia.quokka_heatmap()
hm_aug = aug.augment_heatmaps([hm])[0]
assert np.allclose(hm.arr_0to1, hm_aug.arr_0to1)
def test_pickleable(self):
aug = iaa.CoarsePepper(p=0.5, size_px=(4, 15),
per_channel=True, random_state=1)
runtest_pickleable_uint8_img(aug, iterations=20)
class Test_invert(unittest.TestCase):
@mock.patch("imgaug.augmenters.arithmetic.invert_")
def test_mocked_defaults(self, mock_invert):
mock_invert.return_value = "foo"
arr = np.zeros((1,), dtype=np.uint8)
observed = iaa.invert(arr)
assert observed == "foo"
args = mock_invert.call_args_list[0]
assert np.array_equal(mock_invert.call_args_list[0][0][0], arr)
assert args[1]["min_value"] is None
assert args[1]["max_value"] is None
assert args[1]["threshold"] is None
assert args[1]["invert_above_threshold"] is True
@mock.patch("imgaug.augmenters.arithmetic.invert_")
def test_mocked(self, mock_invert):
mock_invert.return_value = "foo"
arr = np.zeros((1,), dtype=np.uint8)
observed = iaa.invert(arr, min_value=1, max_value=10, threshold=5,
invert_above_threshold=False)
assert observed == "foo"
args = mock_invert.call_args_list[0]
assert np.array_equal(mock_invert.call_args_list[0][0][0], arr)
assert args[1]["min_value"] == 1
assert args[1]["max_value"] == 10
assert args[1]["threshold"] == 5
assert args[1]["invert_above_threshold"] is False
def test_uint8(self):
values = np.array([0, 20, 45, 60, 128, 255], dtype=np.uint8)
expected = np.array([
255,
255-20,
255-45,
255-60,
255-128,
255-255
], dtype=np.uint8)
observed = iaa.invert(values)
assert np.array_equal(observed, expected)
assert observed is not values
# most parts of this function are tested via Invert
class Test_invert_(unittest.TestCase):
def test_arr_is_noncontiguous_uint8(self):
zeros = np.zeros((4, 4, 3), dtype=np.uint8)
max_vr_flipped = np.fliplr(np.copy(zeros + 255))
observed = iaa.invert_(max_vr_flipped)
expected = zeros
assert observed.dtype.name == "uint8"
assert np.array_equal(observed, expected)
def test_arr_is_view_uint8(self):
zeros = np.zeros((4, 4, 3), dtype=np.uint8)
max_vr_view = np.copy(zeros + 255)[:, :, [0, 2]]
observed = iaa.invert_(max_vr_view)
expected = zeros[:, :, [0, 2]]
assert observed.dtype.name == "uint8"
assert np.array_equal(observed, expected)
def test_uint(self):
dtypes = ["uint8", "uint16", "uint32", "uint64"]
for dt in dtypes:
with self.subTest(dtype=dt):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
center_value = int(center_value)
values = np.array([0, 20, 45, 60, center_value, max_value],
dtype=dt)
expected = np.array([
max_value - 0,
max_value - 20,
max_value - 45,
max_value - 60,
max_value - center_value,
min_value
], dtype=dt)
observed = iaa.invert_(np.copy(values))
assert np.array_equal(observed, expected)
def test_uint_with_threshold_50_inv_above(self):
threshold = 50
dtypes = ["uint8", "uint16", "uint32", "uint64"]
for dt in dtypes:
with self.subTest(dtype=dt):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
center_value = int(center_value)
values = np.array([0, 20, 45, 60, center_value, max_value],
dtype=dt)
expected = np.array([
0,
20,
45,
max_value - 60,
max_value - center_value,
min_value
], dtype=dt)
observed = iaa.invert_(np.copy(values),
threshold=threshold,
invert_above_threshold=True)
assert np.array_equal(observed, expected)
def test_uint_with_threshold_0_inv_above(self):
threshold = 0
dtypes = ["uint8", "uint16", "uint32", "uint64"]
for dt in dtypes:
with self.subTest(dtype=dt):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
center_value = int(center_value)
values = np.array([0, 20, 45, 60, center_value, max_value],
dtype=dt)
expected = np.array([
max_value - 0,
max_value - 20,
max_value - 45,
max_value - 60,
max_value - center_value,
min_value
], dtype=dt)
observed = iaa.invert_(np.copy(values),
threshold=threshold,
invert_above_threshold=True)
assert np.array_equal(observed, expected)
def test_uint8_with_threshold_255_inv_above(self):
threshold = 255
dtypes = ["uint8"]
for dt in dtypes:
with self.subTest(dtype=dt):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
center_value = int(center_value)
values = np.array([0, 20, 45, 60, center_value, max_value],
dtype=dt)
expected = np.array([
0,
20,
45,
60,
center_value,
min_value
], dtype=dt)
observed = iaa.invert_(np.copy(values),
threshold=threshold,
invert_above_threshold=True)
assert np.array_equal(observed, expected)
def test_uint8_with_threshold_256_inv_above(self):
threshold = 256
dtypes = ["uint8"]
for dt in dtypes:
with self.subTest(dtype=dt):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
center_value = int(center_value)
values = np.array([0, 20, 45, 60, center_value, max_value],
dtype=dt)
expected = np.array([
0,
20,
45,
60,
center_value,
max_value
], dtype=dt)
observed = iaa.invert_(np.copy(values),
threshold=threshold,
invert_above_threshold=True)
assert np.array_equal(observed, expected)
def test_uint_with_threshold_50_inv_below(self):
threshold = 50
dtypes = ["uint8", "uint16", "uint32", "uint64"]
for dt in dtypes:
with self.subTest(dtype=dt):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
center_value = int(center_value)
values = np.array([0, 20, 45, 60, center_value, max_value],
dtype=dt)
expected = np.array([
max_value - 0,
max_value - 20,
max_value - 45,
60,
center_value,
max_value
], dtype=dt)
observed = iaa.invert_(np.copy(values),
threshold=threshold,
invert_above_threshold=False)
assert np.array_equal(observed, expected)
def test_uint_with_threshold_50_inv_above_with_min_max(self):
threshold = 50
# uint64 does not support custom min/max, hence removed it here
dtypes = ["uint8", "uint16", "uint32"]
for dt in dtypes:
with self.subTest(dtype=dt):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
center_value = int(center_value)
values = np.array([0, 20, 45, 60, center_value, max_value],
dtype=dt)
expected = np.array([
0, # not clipped to 10 as only >thresh affected
20,
45,
100 - 50,
100 - 90,
100 - 90
], dtype=dt)
observed = iaa.invert_(np.copy(values),
min_value=10,
max_value=100,
threshold=threshold,
invert_above_threshold=True)
assert np.array_equal(observed, expected)
def test_int_with_threshold_50_inv_above(self):
threshold = 50
dtypes = ["int8", "int16", "int32", "int64"]
for dt in dtypes:
with self.subTest(dtype=dt):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
center_value = int(center_value)
values = np.array([-45, -20, center_value, 20, 45, max_value],
dtype=dt)
expected = np.array([
-45,
-20,
center_value,
20,
45,
min_value
], dtype=dt)
observed = iaa.invert_(np.copy(values),
threshold=threshold,
invert_above_threshold=True)
assert np.array_equal(observed, expected)
def test_int_with_threshold_50_inv_below(self):
threshold = 50
dtypes = ["int8", "int16", "int32", "int64"]
for dt in dtypes:
with self.subTest(dtype=dt):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
center_value = int(center_value)
values = np.array([-45, -20, center_value, 20, 45, max_value],
dtype=dt)
expected = np.array([
(-1) * (-45) - 1,
(-1) * (-20) - 1,
(-1) * center_value - 1,
(-1) * 20 - 1,
(-1) * 45 - 1,
max_value
], dtype=dt)
observed = iaa.invert_(np.copy(values),
threshold=threshold,
invert_above_threshold=False)
assert np.array_equal(observed, expected)
def test_float_with_threshold_50_inv_above(self):
threshold = 50
dtypes = ["float16", "float32", "float64", "float128"]
for dt in dtypes:
with self.subTest(dtype=dt):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
center_value = center_value
values = np.array([-45.5, -20.5, center_value, 20.5, 45.5,
max_value],
dtype=dt)
expected = np.array([
-45.5,
-20.5,
center_value,
20.5,
45.5,
min_value
], dtype=dt)
observed = iaa.invert_(np.copy(values),
threshold=threshold,
invert_above_threshold=True)
assert np.allclose(observed, expected, rtol=0, atol=1e-4)
def test_float_with_threshold_50_inv_below(self):
threshold = 50
dtypes = ["float16", "float32", "float64", "float128"]
for dt in dtypes:
with self.subTest(dtype=dt):
min_value, center_value, max_value = \
iadt.get_value_range_of_dtype(dt)
center_value = center_value
values = np.array([-45.5, -20.5, center_value, 20.5, 45.5,
max_value],
dtype=dt)
expected = np.array([
(-1) * (-45.5),
(-1) * (-20.5),
(-1) * center_value,
(-1) * 20.5,
(-1) * 45.5,
max_value
], dtype=dt)
observed = iaa.invert_(np.copy(values),
threshold=threshold,
invert_above_threshold=False)
assert np.allclose(observed, expected, rtol=0, atol=1e-4)
class Test_solarize(unittest.TestCase):
@mock.patch("imgaug.augmenters.arithmetic.solarize_")
def test_mocked_defaults(self, mock_sol):
arr = np.zeros((1,), dtype=np.uint8)
mock_sol.return_value = "foo"
observed = iaa.solarize(arr)
args = mock_sol.call_args_list[0][0]
kwargs = mock_sol.call_args_list[0][1]
assert args[0] is not arr
assert np.array_equal(args[0], arr)
assert kwargs["threshold"] == 128
assert observed == "foo"
@mock.patch("imgaug.augmenters.arithmetic.solarize_")
def test_mocked(self, mock_sol):
arr = np.zeros((1,), dtype=np.uint8)
mock_sol.return_value = "foo"
observed = iaa.solarize(arr, threshold=5)
args = mock_sol.call_args_list[0][0]
kwargs = mock_sol.call_args_list[0][1]
assert args[0] is not arr
assert np.array_equal(args[0], arr)
assert kwargs["threshold"] == 5
assert observed == "foo"
def test_uint8(self):
arr = np.array([0, 10, 50, 150, 200, 255], dtype=np.uint8)
arr = arr.reshape((2, 3, 1))
observed = iaa.solarize(arr)
expected = np.array([0, 10, 50, 255-150, 255-200, 255-255],
dtype=np.uint8).reshape((2, 3, 1))
assert observed.dtype.name == "uint8"
assert np.array_equal(observed, expected)
def test_compare_with_pil(self):
import PIL.Image
import PIL.ImageOps
def _solarize_pil(image, threshold):
img = PIL.Image.fromarray(image)
return np.asarray(PIL.ImageOps.solarize(img, threshold))
image = np.mod(np.arange(20*20*3), 255).astype(np.uint8)\
.reshape((20, 20, 3))
for threshold in np.arange(256):
image_pil = _solarize_pil(image, threshold)
image_iaa = iaa.solarize(image, threshold)
assert np.array_equal(image_pil, image_iaa)
class Test_solarize_(unittest.TestCase):
@mock.patch("imgaug.augmenters.arithmetic.invert_")
def test_mocked_defaults(self, mock_sol):
arr = np.zeros((1,), dtype=np.uint8)
mock_sol.return_value = "foo"
observed = iaa.solarize_(arr)
args = mock_sol.call_args_list[0][0]
kwargs = mock_sol.call_args_list[0][1]
assert args[0] is arr
assert kwargs["threshold"] == 128
assert observed == "foo"
@mock.patch("imgaug.augmenters.arithmetic.invert_")
def test_mocked(self, mock_sol):
arr = np.zeros((1,), dtype=np.uint8)
mock_sol.return_value = "foo"
observed = iaa.solarize_(arr, threshold=5)
args = mock_sol.call_args_list[0][0]
kwargs = mock_sol.call_args_list[0][1]
assert args[0] is arr
assert kwargs["threshold"] == 5
assert observed == "foo"
class TestInvert(unittest.TestCase):
def setUp(self):
reseed()
def test_p_is_one(self):
zeros = np.zeros((4, 4, 3), dtype=np.uint8)
observed = iaa.Invert(p=1.0).augment_image(zeros + 255)
expected = zeros
assert observed.dtype.name == "uint8"
assert np.array_equal(observed, expected)
def test_p_is_zero(self):
zeros = np.zeros((4, 4, 3), dtype=np.uint8)
observed = iaa.Invert(p=0.0).augment_image(zeros + 255)
expected = zeros + 255
assert observed.dtype.name == "uint8"
assert np.array_equal(observed, expected)
def test_max_value_set(self):
zeros = np.zeros((4, 4, 3), dtype=np.uint8)
observed = iaa.Invert(p=1.0, max_value=200).augment_image(zeros + 200)
expected = zeros
assert observed.dtype.name == "uint8"
assert np.array_equal(observed, expected)
def test_min_value_and_max_value_set(self):
zeros = np.zeros((4, 4, 3), dtype=np.uint8)
observed = iaa.Invert(p=1.0, max_value=200, min_value=100).augment_image(zeros + 200)
expected = zeros + 100
assert observed.dtype.name == "uint8"
assert np.array_equal(observed, expected)
observed = iaa.Invert(p=1.0, max_value=200, min_value=100).augment_image(zeros + 100)
expected = zeros + 200
assert observed.dtype.name == "uint8"
assert np.array_equal(observed, expected)
def test_min_value_and_max_value_set_with_float_image(self):
# with min/max and float inputs
zeros = np.zeros((4, 4, 3), dtype=np.uint8)
zeros_f32 = zeros.astype(np.float32)
observed = iaa.Invert(p=1.0, max_value=200, min_value=100).augment_image(zeros_f32 + 200)
expected = zeros_f32 + 100
assert observed.dtype.name == "float32"
assert np.array_equal(observed, expected)
observed = iaa.Invert(p=1.0, max_value=200, min_value=100).augment_image(zeros_f32 + 100)
expected = zeros_f32 + 200
assert observed.dtype.name == "float32"
assert np.array_equal(observed, expected)
def test_p_is_80_percent(self):
nb_iterations = 1000
nb_inverted = 0
aug = iaa.Invert(p=0.8)
img = np.zeros((1, 1, 1), dtype=np.uint8) + 255
expected = np.zeros((1, 1, 1), dtype=np.uint8)
for i in sm.xrange(nb_iterations):
observed = aug.augment_image(img)
if np.array_equal(observed, expected):
nb_inverted += 1
pinv = nb_inverted / nb_iterations
assert 0.75 <= pinv <= 0.85
nb_iterations = 1000
nb_inverted = 0
aug = iaa.Invert(p=iap.Binomial(0.8))
img = np.zeros((1, 1, 1), dtype=np.uint8) + 255
expected = np.zeros((1, 1, 1), dtype=np.uint8)
for i in sm.xrange(nb_iterations):
observed = aug.augment_image(img)
if np.array_equal(observed, expected):
nb_inverted += 1
pinv = nb_inverted / nb_iterations
assert 0.75 <= pinv <= 0.85
def test_per_channel(self):
aug = iaa.Invert(p=0.5, per_channel=True)
img = np.zeros((1, 1, 100), dtype=np.uint8) + 255
observed = aug.augment_image(img)
assert len(np.unique(observed)) == 2
# TODO split into two tests
def test_p_is_stochastic_parameter_per_channel_is_probability(self):
nb_iterations = 1000
aug = iaa.Invert(p=iap.Binomial(0.8), per_channel=0.7)
img = np.zeros((1, 1, 20), dtype=np.uint8) + 255
seen = [0, 0]
for i in sm.xrange(nb_iterations):
observed = aug.augment_image(img)
uq = np.unique(observed)
if len(uq) == 1:
seen[0] += 1
elif len(uq) == 2:
seen[1] += 1
else:
assert False
assert 300 - 75 < seen[0] < 300 + 75
assert 700 - 75 < seen[1] < 700 + 75
def test_threshold(self):
arr = np.array([0, 10, 50, 150, 200, 255], dtype=np.uint8)
arr = arr.reshape((2, 3, 1))
aug = iaa.Invert(p=1.0, threshold=128, invert_above_threshold=True)
observed = aug.augment_image(arr)
expected = np.array([0, 10, 50, 255-150, 255-200, 255-255],
dtype=np.uint8).reshape((2, 3, 1))
assert observed.dtype.name == "uint8"
assert | np.array_equal(observed, expected) | numpy.array_equal |
from __future__ import print_function
from orphics import maps,io,cosmology,stats
from pixell import enmap,curvedsky as cs
import numpy as np
import os,sys
from tilec import utils as tutils
import healpy as hp
#Port of healpix module coord_v_convert.f90 to python by JLS
#Feb 28, 2017
from numpy import sin,cos
from numpy import arctan2 as atan2
from numpy import sqrt
froot = "/scratch/r/rbond/msyriac/data/depot/hpymap/"
DTOR = np.pi/180.0
def angdist(v1,v2):
sprod=np.dot(v1,v2)
v3=np.cross(v1,v2)
vprod=sqrt(np.dot(v3,v3))
return atan2(vprod,sprod)
def py_coordsys2euler_zyz(iepoch, oepoch, isys, osys):
#, psi, theta, phi)
v1=np.asarray([1.0, 0.0, 0.0])
v2=np.asarray([0.0, 1.0, 0.0])
v3=np.asarray([0.0, 0.0, 1.0])
v1p=py_xcc_v_convert(v1,iepoch,oepoch,isys,osys)
v2p=py_xcc_v_convert(v2,iepoch,oepoch,isys,osys)
v3p=py_xcc_v_convert(v3,iepoch,oepoch,isys,osys)
v1p=v1p/sqrt(np.dot(v1p,v1p))
v2p=v2p/sqrt(np.dot(v1p,v1p))
v3p=v3p/sqrt(np.dot(v1p,v1p))
theta=angdist(v3,v3p)
psi=atan2(v2p[2],-v1p[2])
phi=atan2(v3p[1],v3p[0])
return psi,theta,phi
def py_xcc_v_convert(ivector,iepoch,oepoch,isys,osys):
isys=isys.lower()
osys=osys.lower()
isys=isys[0]
osys=osys[0]
if (isys=='c'):
isys='q'
if (osys=='c'):
osys='q'
if (isys=='q'):
ivector=py_xcc_dp_q_to_e(ivector,iepoch)
if (isys=='g'):
ivector=py_xcc_dp_g_to_e(ivector,iepoch)
if (iepoch!=oepoch):
ivector=py_xcc_dp_precess(ivector,iepoch,oepoch)
if (osys=='q'):
ivector=py_xcc_dp_e_to_q(ivector,oepoch)
if (osys=='g'):
ivector=py_xcc_dp_e_to_g(ivector,oepoch)
return ivector
def py_xcc_dp_e_to_q(ivector,epoch):
T = (epoch - 1900.e0) / 100.e0
epsilon = 23.452294e0 - 0.0130125e0*T - 1.63889e-6*T**2 + 5.02778e-7*T**3
hvector=np.zeros(ivector.shape)
dc = cos(DTOR * epsilon)
ds = sin(DTOR * epsilon)
hvector[0] = ivector[0]
hvector[1] = dc*ivector[1] - ds*ivector[2]
hvector[2] = dc*ivector[2] + ds*ivector[1]
return hvector
def py_xcc_dp_q_to_e(ivector,epoch):
hvector=np.zeros(ivector.shape)
T = (epoch - 1900.e0) / 100.e0
epsilon = 23.452294e0 - 0.0130125e0*T - 1.63889e-6*T**2 + 5.02778e-7*T**3
dc = cos(DTOR * epsilon)
ds = sin(DTOR * epsilon)
hvector[0] = ivector(1)
hvector[1] = dc*ivector[1] + ds*ivector[2]
hvector[2] = dc*ivector[2] - ds*ivector[1]
return hvector
def py_xcc_dp_e_to_g(ivector,epoch):
T=np.asarray([-0.054882486e0, -0.993821033e0, -0.096476249e0, 0.494116468e0, -0.110993846e0, 0.862281440e0, -0.867661702e0, -0.000346354e0, 0.497154957e0])
T=np.reshape(T,[3,3])
#T=T.transpose()
if (epoch != 2000.0):
ivector=py_xcc_dp_precess(ivector,epoch,2000.0)
return np.dot(T,ivector)
def py_xcc_dp_g_to_e(ivector,epoch):
T=np.asarray([-0.054882486e0, -0.993821033e0, -0.096476249e0, 0.494116468e0, -0.110993846e0, 0.862281440e0, -0.867661702e0, -0.000346354e0, 0.497154957e0])
T=np.reshape(T,[3,3])
T=T.transpose()
hvector=np.dot(T,ivector)
if (epoch != 2000.0):
return py_xcc_dp_precess(hvector,2000.0,epoch)
else:
return hvector
assert(1==0) #never get here
def py_xcc_dp_q_to_e(ivector,epoch):
# Set-up:
T = (epoch - 1900.0) / 100.0
epsilon = 23.452294 - 0.0130125*T - 1.63889e-6*T**2 + 5.02778e-7*T**3
hvector=np.zeros(ivector.shape)
# Conversion
dc = | cos(DTOR * epsilon) | numpy.cos |
import numpy as np
import cv2
import augmentation
from skimage.util import img_as_float
def _compute_scale_and_crop(image_size, crop_size, padding, random_crop):
padded_size = crop_size[0] + padding[0], crop_size[1] + padding[1]
# Compute size ratio from the padded region size to the image_size
scale_y = float(image_size[0]) / float(padded_size[0])
scale_x = float(image_size[1]) / float(padded_size[1])
# Take the minimum as this is the factor by which we must scale to take a `padded_size` sized chunk
scale_factor = min(scale_y, scale_x)
# Compute the size of the region that we must extract from the image
region_height = int(float(crop_size[0]) * scale_factor + 0.5)
region_width = int(float(crop_size[1]) * scale_factor + 0.5)
# Compute the additional space available
if scale_x > scale_y:
# Crop in X
extra_x = image_size[1] - region_width
extra_y = padding[0]
else:
# Crop in Y
extra_y = image_size[0] - region_height
extra_x = padding[1]
# Either choose the centre piece or choose a random piece
if random_crop:
pos_y = np.random.randint(0, extra_y + 1, size=(1,))[0]
pos_x = np.random.randint(0, extra_x + 1, size=(1,))[0]
else:
pos_y = extra_y // 2
pos_x = extra_x // 2
return (pos_y, pos_x), (region_height, region_width)
def _compute_scales_and_crops(image_sizes, crop_size, padding, random_crop):
padded_size = crop_size[0] + padding[0], crop_size[1] + padding[1]
# Compute size ratio from the padded region size to the image_size
image_sizes = image_sizes.astype(float)
scale_ys = image_sizes[:, 0] / float(padded_size[0])
scale_xs = image_sizes[:, 1] / float(padded_size[1])
# Take the minimum as this is the factor by which we must scale to take a `padded_size` sized chunk
scale_factors = np.minimum(scale_ys, scale_xs)
# Compute the size of the region that we must extract from the image
region_sizes = (np.array(crop_size)[None, :] * scale_factors[:, None] + 0.5).astype(int)
# Compute the additional space available
extra_space = np.repeat(np.array(padding, dtype=int)[None, :], image_sizes.shape[0], axis=0)
# Crop in X
crop_in_x = scale_xs > scale_ys
extra_space[crop_in_x, 1] = image_sizes[crop_in_x, 1] - region_sizes[crop_in_x, 1]
# Crop in Y
crop_in_y = ~crop_in_x
extra_space[crop_in_y, 0] = image_sizes[crop_in_y, 0] - region_sizes[crop_in_y, 0]
# Either choose the centre piece or choose a random piece
if random_crop:
t = | np.random.uniform(0.0, 1.0, size=image_sizes.shape) | numpy.random.uniform |
import numpy as np
from eqdes import fns
from eqdes.extensions.exceptions import DesignError
def assess(fb, storey_forces, mom_ratio=0.6, verbose=0):
"""
Distribute the applied loads to a frame structure
Parameters
----------
fb: FrameBuilding object
mom_ratio: float
ratio of overturning moment that is resisted by column base hinges
verbose:
level of verbosity
Returns
-------
[beam moments, column base moments, seismic axial loads in exterior columns]
"""
if hasattr(fb, 'column_depth') and np.std(fb.column_depth) > 1e-2:
print('Does not work with odd column depths')
print(fb.column_depth)
raise NotImplementedError
mom_running = 0
mom_storey = np.zeros(fb.n_storeys)
v_storey = np.zeros(fb.n_storeys)
for i in range(fb.n_storeys):
if i == 0:
v_storey[-1 - i] = storey_forces[-1 - i]
else:
v_storey[-1 - i] = v_storey[-i] + storey_forces[-1 - i]
mom_storey[-1 - i] = (v_storey[-1 - i] * fb.interstorey_heights[-1 - i] + mom_running)
mom_running = mom_storey[-1 - i]
cumulative_total_shear = sum(v_storey)
base_shear = sum(storey_forces)
# Column_base_moment_total=mom_storey[0]*Base_moment_contribution
column_base_moment_total = base_shear * mom_ratio * fb.interstorey_heights[0]
moment_column_bases = (column_base_moment_total / fb.n_bays * np.ones((fb.n_bays + 1)))
moment_column_bases[0] = moment_column_bases[0] / 2
moment_column_bases[-1] = moment_column_bases[-1] / 2
axial_seismic = (mom_storey[0] - column_base_moment_total) / sum(fb.bay_lengths)
if verbose == 1:
print('Storey shear forces: \n', v_storey)
print('Moments', mom_storey)
print('Total overturning moment: ', mom_storey[0])
print('column_base_moment_total: ', column_base_moment_total)
print('Seismic axial: ', axial_seismic)
beam_shear_force = np.zeros(fb.n_storeys)
for i in range(int(np.ceil(fb.n_storeys / fb.beam_group_size))):
group_shear = np.average(
v_storey[i * fb.beam_group_size:(i + 1) * fb.beam_group_size]) / cumulative_total_shear * axial_seismic
if verbose > 1:
print('group shear: ', group_shear)
for j in range(int(fb.beam_group_size)):
if i * fb.beam_group_size + j == fb.n_storeys:
if verbose:
print('odd number of storeys')
break
beam_shear_force[i * fb.beam_group_size + j] = group_shear
if (sum(beam_shear_force) - axial_seismic) / axial_seismic > 1e-2:
raise DesignError('Beam shear force incorrect!')
moment_beams_cl = | np.zeros((fb.n_storeys, fb.n_bays, 2)) | numpy.zeros |
import copy
import logging.config
import os
import pickle
# for Logging handling
import sys
import time
import numpy as np
from numpy.linalg import LinAlgError
from scipy.optimize import minimize
import model
logger = logging.getLogger(__name__)
def nonzero_indices(a):
"""Get an index with non-zero element.
Parameters
----------
a : numpy.ndarray
array
Returns
-------
np.nonzero() : numpy.ndarray
Index with non-zero element
"""
return (np.nonzero(a)[0])
def create_directory(dir_name):
"""create directory
Parameters
----------
dir_name : str(file path)
create directory name
Returns
-------
None
"""
if not os.path.isdir(dir_name):
os.makedirs(dir_name)
else:
pass
def calc_diff(C_pre, C_pos, t_pre, t_pos, rss_pre, rss_pos):
"""calculate difference
Parameters
----------
C_pre : numpy.ndarray
initialize control points
C_pos : numpy.ndarray
control points
t_pre : numpy.ndarray
initialize parameters
t_pos : numpy.ndarray
parameters
rss_pre : int
initialize rss
rss_pos : int
rss
Returns
-------
np.abs() : numpy.ndarray
absolute value
"""
if t_pre.shape[1] > t_pos.shape[1]:
t_pos = np.c_[t_pos, 1 - np.sum(t_pos, axis=1)]
else:
t_pre = np.c_[t_pre, 1 - np.sum(t_pre, axis=1)]
t_pos = np.c_[t_pos, 1 - np.sum(t_pos, axis=1)]
ratio_sum = 0
for key in C_pre:
ratio_sum += np.linalg.norm(C_pre[key] - C_pos[key]) / np.linalg.norm(
C_pre[key])
diff = rss_pre - rss_pos
logger.debug("{} {} {}".format(rss_pre, rss_pos, diff))
return (np.abs(diff))
def calc_gd_igd(dd1, dd2):
"""Calculate gd and igd.
Parameters
----------
dd1 : numpy.ndarray
estimated bezier simplex sample
dd2 : numpy.ndarray
validation data
Returns
-------
gd : float
Generational Distance
igd : float
Inverted Generational Distance
"""
gd = 0
igd = 0
for i in range(dd2.shape[0]):
d2 = dd2[i, :]
tmp = dd1 - d2
norm = np.linalg.norm(tmp, 1, axis=1)
v = np.min(norm)
gd += v
for i in range(dd1.shape[0]):
d1 = dd1[i, :]
tmp = dd2 - d1
norm = np.linalg.norm(tmp, 1, axis=1)
v = np.min(norm)
igd += v
return (gd / dd2.shape[0], igd / dd1.shape[0])
class BorgesPastvaTrainer:
"""Polynomial Regression Trainer.
Attributes
----------
dimSpace : int
degree
dimSimplex : int
dimension
degree : int
dimension of constol point
"""
def __init__(self, dimSpace, degree, dimSimplex):
"""Borges Pastva Trainer initialize.
Parameters
----------
dimSpace : int
degree
degree : int
dimension of constol point
dimSimplex : int
dimension
Returns
----------
None
"""
self.dimSpace = dimSpace # degree of bezier simplex
self.dimSimplex = dimSimplex # dimension of bezier simplex
self.degree = degree # dimension of constol point
self.bezier_simplex = model.BezierSimplex(dimSpace=self.dimSpace,
dimSimplex=self.dimSimplex,
degree=self.degree)
def initialize_control_point(self, data):
"""Initialize control point.
Parameters
----------
data : list
test data
Returns
----------
C : dict
control point
"""
bezier_simplex = model.BezierSimplex(dimSpace=self.dimSpace,
dimSimplex=self.dimSimplex,
degree=self.degree)
C = bezier_simplex.initialize_control_point(data)
return (C)
def gradient(self, c, t):
"""Calculate gradient.
Parameters
----------
c : dict
control point
t : [t[0], t[1], t[2], t[3]]
parameter
Returns
----------
g : float
gradient
"""
g = {}
x = {}
for d in range(self.dimSimplex - 1):
x[d] = np.zeros(self.dimSpace)
for d in range(self.dimSimplex - 1):
for key in self.bezier_simplex.Mf_all.keys():
for i in range(self.dimSpace):
x[d][i] += self.bezier_simplex.monomial_diff(
multi_index=key, d0=d, d1=None)(
*t[0:self.dimSimplex - 1]) * c[key][i]
for d in x:
g[(d, )] = x[d]
return (g)
def hessian(self, c, t):
"""Calculate hessian.
Parameters
----------
c : dict
control point
t : [t[0], t[1], t[2], t[3]]
parameter
Returns
----------
h : dict
hessian matrix
"""
h = {}
x = {}
for d1 in range(self.dimSimplex - 1):
for d2 in range(self.dimSimplex - 1):
x[(d1, d2)] = np.zeros(self.dimSpace)
for d1 in range(self.dimSimplex - 1):
for d2 in range(self.dimSimplex - 1):
for key in self.bezier_simplex.Mf_all.keys():
for i in range(self.dimSpace):
x[(d1, d2)][i] += self.bezier_simplex.monomial_diff(
multi_index=key, d0=d1, d1=d2)(
*t[0:self.dimSimplex - 1]) * c[key][i]
for (d1, d2) in x:
h[(d1, d2)] = x[(d1, d2)]
return (h)
def initialize_parameter(self, c, data):
"""Initialize parameter.
Parameters
----------
c : dict
control point
data : numpy.ndarray
sample points
Returns
----------
tt_ : numpy.ndarray
nearest parameter of each sample points
xx_ : numpy.ndarray
nearest points on the current bezier simplex
"""
tt, xx = self.bezier_simplex.meshgrid(c)
tt_ = np.empty([data.shape[0], self.dimSimplex])
xx_ = np.empty([data.shape[0], self.dimSpace])
for i in range(data.shape[0]):
a = data[i, :]
tmp = xx - a
norm = np.linalg.norm(tmp, axis=1)
amin = np.argmin(norm)
tt_[i, :] = tt[amin, :]
xx_[i, :] = xx[amin, :]
return (tt_, xx_)
def inner_product(self, c, t, x):
"""Inner product.
Parameters
----------
c : dict
control point
t : [t[0], t[1], t[2], t[3]]
parameter
x : numpy.ndarray
point
Returns
----------
f : numpy.ndarray
point
"""
g = self.gradient(c, t)
b = self.bezier_simplex.sampling(c, t)
f = np.array(np.zeros(self.dimSimplex - 1))
for d in range(self.dimSimplex - 1):
f[d] = sum(g[(d, )][i] * (b[i] - x[i])
for i in range(self.dimSpace))
return (f)
def inner_product_jaccobian(self, c, t, x):
"""Inner product(jaccobian).
Parameters
----------
c : dict
control point
t : [t[0], t[1], t[2], t[3]]
parameter
x : numpy.ndarray
point
Returns
----------
j : numpy.ndarray
jaccobian matrix
"""
g = self.gradient(c, t)
b = self.bezier_simplex.sampling(c, t)
h = self.hessian(c, t)
j = np.zeros([self.dimSimplex - 1, self.dimSimplex - 1])
for d1 in range(self.dimSimplex - 1):
for d2 in range(self.dimSimplex - 1):
j[d1, d2] = sum(h[(d1, d2)][i] * (b[i] - x[i]) +
g[(d1, )][i] * g[(d2, )][i]
for i in range(self.dimSpace))
return (j)
def newton_method(self, c, t_init, x, newton_itr=20, tolerance=10**(-5)):
"""Newton method.
Parameters
----------
c : dict
control point
t_init : list
parameter
x : numpy.ndarray
point
newton_itr : int
iterate value
tolerance : int
tolerance
Returns
----------
t_k : numpy.ndarray
output point
"""
t_k = copy.deepcopy(t_init)
for k in range(newton_itr):
f = self.inner_product(c, t_k, x)
if np.linalg.norm(f) > tolerance:
j = self.inner_product_jaccobian(c, t_k, x)
# for Logging handling
try:
d = | np.linalg.solve(j, f) | numpy.linalg.solve |
import math
import numpy as np
import numpy.matlib
import matplotlib.pyplot as plt
class CFARNeuralNet:
def __init__(self,
x_train,
train_labels,
validation,
validation_labels,
activation,
activation_derivative,
hiddens,
learning_rate=0.3,
decay_rate=0.002,
xavier_init=True):
self.x_train = x_train
self.train_labels = train_labels
self.validation = validation
self.validation_labels = validation_labels
self.accuracies = []
self.tested_models = []
self.n_samples, img_size = self.x_train.shape
self.n_labels = np.unique(train_labels).size
self.eta = learning_rate
self.decay = decay_rate
self.hiddens = hiddens
self.activation = activation
self.activation_derivative = activation_derivative
self.errors = np.array
self.y_train = np.zeros((self.train_labels.shape[0], self.n_labels))
for i in range(0, self.train_labels.shape[0]):
self.y_train[i, self.train_labels[i].astype(int)] = 1
n_input_layer = img_size
n_output_layer = self.n_labels
if xavier_init:
self.weight1 = np.random.randn(self.hiddens[0], n_input_layer) * np.sqrt(1 / n_input_layer)
if self.hiddens[1] > 0:
self.weight2 = np.random.randn(self.hiddens[1], self.hiddens[0]) * np.sqrt(1 / self.hiddens[0])
self.weight3 = np.random.randn(n_output_layer, self.hiddens[1]) * np.sqrt(1 / self.hiddens[1])
else:
self.weight2 = np.random.randn(n_output_layer, self.hiddens[0]) * np.sqrt(1 / self.hiddens[0])
else:
self.weight1 = np.random.uniform(0, 1, (self.hiddens[0], n_input_layer))
self.weight2 = np.random.uniform(0, 1, (n_output_layer, self.hiddens[0]))
self.weight1 = np.divide(self.weight1, np.matlib.repmat(np.sum(self.weight1, 1)[:, None], 1, n_input_layer))
self.weight2 = np.divide(self.weight2,
np.matlib.repmat(np.sum(self.weight2, 1)[:, None], 1, self.hiddens[0]))
if self.hiddens[1] > 0:
self.weight3 = np.random.uniform(0, 1, (n_output_layer, self.hiddens[1]))
self.weight3 = np.divide(self.weight3,
np.matlib.repmat(np.sum(self.weight3, 1)[:, None], 1, self.hiddens[1]))
self.weight2 = np.random.uniform(0, 1, (self.hiddens[1], self.hiddens[1]))
self.weight2 = np.divide(self.weight2,
np.matlib.repmat(np.sum(self.weight2, 1)[:, None], 1, self.hiddens[0]))
self.bias_weight1 = np.ones((self.hiddens[0],)) * (-self.x_train.mean())
self.bias_weight2 = np.zeros((n_output_layer,))
if self.hiddens[1] > 0:
self.bias_weight3 = np.ones((n_output_layer,)) * (-0.5)
self.bias_weight2 = np.ones((self.hiddens[1],)) * (-0.5)
def __dropout(self, activation, dropout_prob=0.0001):
if dropout_prob < 0 or dropout_prob > 1:
return activation
activation /= dropout_prob
mult = np.random.rand(*activation.shape) < dropout_prob
activation *= mult
return activation
def train(self, n_epochs=100, n_batches=100):
self.errors = np.zeros((n_epochs,))
batch_size = math.ceil(self.n_samples / n_batches)
for i in range(0, n_epochs):
shuffled_idxs = np.random.permutation(self.n_samples)
for j in range(0, n_batches):
delta_weight1 = np.zeros(self.weight1.shape)
delta_weight2 = np.zeros(self.weight2.shape)
delta_bias1 = np.zeros(self.bias_weight1.shape)
delta_bias2 = np.zeros(self.bias_weight2.shape)
delta_weight3 = np.array
delta_bias3 = np.array
if self.hiddens[1] > 0:
delta_weight3 = np.zeros(self.weight3.shape)
delta_bias3 = np.zeros(self.bias_weight3.shape)
for k in range(0, batch_size):
idx = shuffled_idxs[j * batch_size + k]
x = self.x_train[idx]
desired_output = self.y_train[idx]
act1 = np.dot(self.weight1, x) + self.bias_weight1
out1 = self.activation(act1)
act2 = np.dot(self.weight2, out1) + self.bias_weight2
if self.hiddens[1] > 0:
out2 = self.activation(act2)
act3 = np.dot(self.weight3, out2) + self.bias_weight3
out3 = softmax(act3)
e_n = desired_output - out3
out3delta = e_n
delta_weight3 += np.outer(out3delta, out2)
delta_bias3 += out3delta
out2delta = self.activation_derivative(out2) * np.dot(self.weight3.T, out3delta)
else:
out2 = softmax(act2)
e_n = desired_output - out2
out2delta = e_n
delta_weight2 += np.outer(out2delta, out1)
delta_bias2 += out2delta
out1delta = self.activation_derivative(out1) * np.dot(self.weight2.T, out2delta)
delta_weight1 += | np.outer(out1delta, x) | numpy.outer |
r"""Tools for Hankel transformations."""
import numpy as np
from mpmath import fp as mpm
from scipy.special import gamma, j0, j1, jn
from scipy.special import jn_zeros as _jn_zeros
from scipy.special import jv, yv
SRPI2 = np.sqrt(np.pi / 2.0)
def psi(t):
"""Compute the variable transform from Ogata 2005."""
return t * np.tanh(np.pi * np.sinh(t) / 2)
def d_psi(t):
"""Compute the derivative of the variable transform from Ogata 2005."""
t = np.array(t, dtype=float)
a = np.ones_like(t)
mask = t < 6
t = t[mask]
a[mask] = (np.pi * t * np.cosh(t) + np.sinh(np.pi * np.sinh(t))) / (
1.0 + np.cosh(np.pi * np.sinh(t))
)
return a
def weight(nu, zeros):
"""Get weights for the summation in the hankel transformation."""
return yv(nu, np.pi * zeros) / kernel(np.pi * zeros, nu + 1)
def roots(N, nu):
"""Get the first N Roots of the Bessel J(nu) functions divided by pi."""
if np.isclose(nu, np.floor(nu)):
return _jn_zeros(nu, N) / np.pi
if np.isclose(nu, 0.5):
# J(0.5) is just sqrt(2/(x*pi))*sin(x)
return np.arange(1, N + 1)
if np.isclose(nu, -0.5):
# J(-0.5) is just sqrt(2/(x*pi))*cos(x)
return np.arange(1, N + 1) - 0.5
return np.array([mpm.besseljzero(nu, i + 1) for i in range(N)]) / np.pi
def j_lim(nu):
"""
Compute the timit factor of Bessel J(nu, 0) = 0.5 ** nu / Gamma(nu + 1).
Parameters
----------
nu : float
Order of the Bessel function.
Returns
-------
float
The factor.
"""
return 0.5 ** nu / gamma(nu + 1)
def kernel(x, nu, alt=False):
"""
Compute kernel functions for the hankel transformation.
J(nu, x) or for alt=True: J(nu, x) * sqrt(x).
Parameters
----------
x : array-like
input values.
nu : int or float
order of the bessel function.
alt : bool, optional
Whether the alternative defintion of the hankel transform should be
used: J(nu, x)*sqrt(x). The default is False.
Returns
-------
array-like
The needed function for the hankel transformation.
Notes
-----
J(nu, x) is approximately (x/2)^nu / Gamma(nu+1) for small x.
"""
if alt:
if np.isclose(nu, 0):
return j0(x) * np.sqrt(x)
if np.isclose(nu, 1):
return j1(x) * np.sqrt(x)
if np.isclose(nu, 0.5): # J[0.5] = sqrt(2/(x*pi))*sin(x)
return np.sin(x) / SRPI2
if np.isclose(nu, -0.5): # J[-0.5] = sqrt(2/(x*pi))*cos(x)
return np.cos(x) / SRPI2
if np.isclose(nu, np.floor(nu)):
return jn(int(nu), x) * np.sqrt(x)
return jv(nu, x) * np.sqrt(x)
if np.isclose(nu, 0):
return j0(x)
if np.isclose(nu, 1):
return j1(x)
if np.isclose(nu, np.floor(nu)):
return jn(int(nu), x)
return jv(nu, x)
def safe_power(x, p):
"""
Safely calculate x**p.
Parameters
----------
x : array-like
value.
p : float
exponent.
Returns
-------
array-like
The result x**p.
"""
return np.ones_like(x) if np.isclose(p, 0) else | np.array(x ** p) | numpy.array |
import os
import numpy as np
import flopy
import warnings
from io import StringIO
from struct import pack
from tempfile import TemporaryFile
from textwrap import dedent
from flopy.utils.util_array import Util2d, Util3d, Transient2d, Transient3d
from ci_framework import base_test_dir, FlopyTestSetup
base_dir = base_test_dir(__file__, rel_path="temp", verbose=True)
def test_load_txt_free():
a = np.ones((10,), dtype=np.float32) * 250.0
fp = StringIO("10*250.0")
fa = Util2d.load_txt(a.shape, fp, a.dtype, "(FREE)")
np.testing.assert_equal(fa, a)
assert fa.dtype == a.dtype
a = np.arange(10, dtype=np.int32).reshape((2, 5))
fp = StringIO(
dedent(
"""\
0 1,2,3, 4
5 6, 7, 8 9
"""
)
)
fa = Util2d.load_txt(a.shape, fp, a.dtype, "(FREE)")
np.testing.assert_equal(fa, a)
assert fa.dtype == a.dtype
a = np.ones((2, 5), dtype=np.float32)
a[1, 0] = 2.2
fp = StringIO(
dedent(
"""\
5*1.0
2.2 2*1.0, +1E-00 1.0
"""
)
)
fa = Util2d.load_txt(a.shape, fp, a.dtype, "(FREE)")
np.testing.assert_equal(fa, a)
assert fa.dtype == a.dtype
def test_load_txt_fixed():
a = np.arange(10, dtype=np.int32).reshape((2, 5))
fp = StringIO(
dedent(
"""\
01234X
56789
"""
)
)
fa = Util2d.load_txt(a.shape, fp, a.dtype, "(5I1)")
np.testing.assert_equal(fa, a)
assert fa.dtype == a.dtype
fp = StringIO(
dedent(
"""\
0123X
4
5678
9
"""
)
)
fa = Util2d.load_txt(a.shape, fp, a.dtype, "(4I1)")
np.testing.assert_equal(fa, a)
assert fa.dtype == a.dtype
a = np.array([[-1, 1, -2, 2, -3], [3, -4, 4, -5, 5]], np.int32)
fp = StringIO(
dedent(
"""\
-1 1-2 2-3
3 -44 -55
"""
)
)
fa = Util2d.load_txt(a.shape, fp, a.dtype, "(5I2)")
np.testing.assert_equal(fa, a)
assert fa.dtype == a.dtype
def test_load_block():
a = np.ones((2, 5), dtype=np.int32) * 4
fp = StringIO(
dedent(
"""\
1
1 2 1 5 4
"""
)
)
fa = Util2d.load_block(a.shape, fp, a.dtype)
np.testing.assert_equal(fa, a)
assert fa.dtype == a.dtype
a = np.ones((2, 5), dtype=np.float32) * 4
a[0:2, 1:2] = 9.0
a[0, 2:4] = 6.0
fp = StringIO(
dedent(
"""\
3
1 2 1 5 4.0
1 2 2 2 9.0
1 1 3 4 6.0
"""
)
)
fa = Util2d.load_block(a.shape, fp, a.dtype)
np.testing.assert_equal(fa, a)
assert fa.dtype == a.dtype
a = np.zeros((2, 5), dtype=np.int32)
a[0, 2:4] = 8
fp = StringIO(
dedent(
"""\
1
1 1 3 4 8
"""
)
)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
fa = Util2d.load_block(a.shape, fp, a.dtype)
assert len(w) == 1
assert "blocks do not cover full array" in str(w[-1].message)
np.testing.assert_equal(fa, a)
assert fa.dtype == a.dtype
def test_load_bin():
model_ws = f"{base_dir}_test_load_bin"
test_setup = FlopyTestSetup(test_dirs=model_ws)
def temp_file(data):
# writable file that is destroyed as soon as it is closed
f = TemporaryFile(dir=model_ws)
f.write(data)
f.seek(0)
return f
# INTEGER
a = np.arange(3 * 4, dtype=np.int32).reshape((3, 4)) - 1
fp = temp_file(a.tobytes())
fh, fa = Util2d.load_bin((3, 4), fp, np.int32)
assert fh is None # no header_dtype
np.testing.assert_equal(fa, a)
assert fa.dtype == a.dtype
# check warning if wrong integer type is used to read 4-byte integers
# e.g. on platforms where int -> int64
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
fp.seek(0)
fh, fa = Util2d.load_bin((3, 4), fp, np.int64)
fp.close()
assert len(w) == 1
assert a.dtype == np.int32
assert fh is None # no header_dtype
np.testing.assert_equal(fa, a)
# REAL
real_header_fmt = "2i2f16s3i"
header_data = (1, 2, 3.5, 4.5, b"Hello", 6, 7, 8)
real_header = pack(real_header_fmt, *header_data)
assert len(real_header) == 44
a = np.arange(10).reshape((2, 5))
fp = temp_file(real_header + pack("10f", *list(range(10))))
fh, fa = Util2d.load_bin((2, 5), fp, np.float32, "Head")
fp.close()
for h1, h2 in zip(fh[0], header_data):
assert h1 == h2
np.testing.assert_equal(a.astype(np.float32), fa)
assert fa.dtype == np.float32
# DOUBLE PRECISION
dbl_header_fmt = "2i2d16s3i"
dbl_header = pack(dbl_header_fmt, *header_data)
assert len(dbl_header) == 52
fp = temp_file(real_header + pack("10d", *list(range(10))))
fh, fa = Util2d.load_bin((2, 5), fp, np.float64, "Head")
fp.close()
for h1, h2 in zip(fh[0], header_data):
assert h1 == h2
np.testing.assert_equal(a.astype(np.float64), fa)
assert fa.dtype == np.float64
def test_transient2d():
ml = flopy.modflow.Modflow()
dis = flopy.modflow.ModflowDis(ml, nlay=10, nrow=10, ncol=10, nper=3)
t2d = Transient2d(ml, (10, 10), np.float32, 10.0, "fake")
a1 = t2d.array
assert a1.shape == (3, 1, 10, 10), a1.shape
t2d.cnstnt = 2.0
assert np.array_equal(t2d.array, np.zeros((3, 1, 10, 10)) + 20.0)
t2d[0] = 1.0
t2d[2] = 999
assert np.array_equal(t2d[0].array, np.ones((ml.nrow, ml.ncol)))
assert np.array_equal(t2d[2].array, np.ones((ml.nrow, ml.ncol)) * 999)
m4d = t2d.array
t2d2 = Transient2d.from_4d(ml, "rch", {"rech": m4d})
m4d2 = t2d2.array
assert np.array_equal(m4d, m4d2)
def test_transient3d():
nlay = 3
nrow = 4
ncol = 5
nper = 5
ml = flopy.modflow.Modflow()
dis = flopy.modflow.ModflowDis(
ml, nlay=nlay, nrow=nrow, ncol=ncol, nper=nper
)
# Make a transient 3d array of a constant value
t3d = Transient3d(ml, (nlay, nrow, ncol), np.float32, 10.0, "fake")
a1 = t3d.array
assert a1.shape == (nper, nlay, nrow, ncol), a1.shape
# Make a transient 3d array with changing entries and then verify that
# they can be reproduced through indexing
a = np.arange((nlay * nrow * ncol), dtype=np.float32).reshape(
(nlay, nrow, ncol)
)
t3d = {0: a, 2: 1025, 3: a, 4: 1000.0}
t3d = Transient3d(ml, (nlay, nrow, ncol), np.float32, t3d, "fake")
assert np.array_equal(t3d[0].array, a)
assert np.array_equal(t3d[1].array, a)
assert np.array_equal(t3d[2].array, np.zeros((nlay, nrow, ncol)) + 1025.0)
assert np.array_equal(t3d[3].array, a)
assert np.array_equal(t3d[4].array, np.zeros((nlay, nrow, ncol)) + 1000.0)
# Test changing a value
t3d[0] = 1.0
assert np.array_equal(t3d[0].array, | np.zeros((nlay, nrow, ncol)) | numpy.zeros |
"""
Test Surrogates Overview
========================
"""
# Author: <NAME> <<EMAIL>>
# License: new BSD
from PIL import Image
import numpy as np
import scripts.surrogates_overview as exo
import scripts.image_classifier as imgclf
import sklearn.datasets
import sklearn.linear_model
SAMPLES = 10
BATCH = 50
SAMPLE_IRIS = False
IRIS_SAMPLES = 50000
def test_bilmey_image():
"""Tests surrogate image bLIMEy."""
# Load the image
doggo_img = Image.open('surrogates_overview/img/doggo.jpg')
doggo_array = np.array(doggo_img)
# Load the classifier
clf = imgclf.ImageClassifier()
explain_classes = [('tennis ball', 852),
('golden retriever', 207),
('Labrador retriever', 208)]
# Configure widgets to select occlusion colour, segmentation granularity
# and explained class
colour_selection = {
i: i for i in ['mean', 'black', 'white', 'randomise-patch', 'green']
}
granularity_selection = {'low': 13, 'medium': 30, 'high': 50}
# Generate explanations
blimey_image_collection = {}
for gran_name, gran_number in granularity_selection.items():
blimey_image_collection[gran_name] = {}
for col_name in colour_selection:
blimey_image_collection[gran_name][col_name] = \
exo.build_image_blimey(
doggo_array,
clf.predict_proba,
explain_classes,
explanation_size=5,
segments_number=gran_number,
occlusion_colour=col_name,
samples_number=SAMPLES,
batch_size=BATCH,
random_seed=42)
exp = []
for gran_ in blimey_image_collection:
for col_ in blimey_image_collection[gran_]:
exp.append(blimey_image_collection[gran_][col_]['surrogates'])
assert len(exp) == len(EXP_IMG)
for e, E in zip(exp, EXP_IMG):
assert sorted(list(e.keys())) == sorted(list(E.keys()))
for key in e.keys():
assert e[key]['name'] == E[key]['name']
assert len(e[key]['explanation']) == len(E[key]['explanation'])
for e_, E_ in zip(e[key]['explanation'], E[key]['explanation']):
assert e_[0] == E_[0]
assert np.allclose(e_[1], E_[1], atol=.001, equal_nan=True)
def test_bilmey_tabular():
"""Tests surrogate tabular bLIMEy."""
# Load the iris data set
iris = sklearn.datasets.load_iris()
iris_X = iris.data # [:, :2] # take the first two features only
iris_y = iris.target
iris_labels = iris.target_names
iris_feature_names = iris.feature_names
label2class = {lab: i for i, lab in enumerate(iris_labels)}
# Fit the classifier
logreg = sklearn.linear_model.LogisticRegression(C=1e5)
logreg.fit(iris_X, iris_y)
# explained class
_dtype = iris_X.dtype
explained_instances = {
'setosa': np.array([5, 3.5, 1.5, 0.25]).astype(_dtype),
'versicolor': np.array([5.5, 2.75, 4.5, 1.25]).astype(_dtype),
'virginica': np.array([7, 3, 5.5, 2.25]).astype(_dtype)
}
petal_length_idx = iris_feature_names.index('petal length (cm)')
petal_length_bins = [1, 2, 3, 4, 5, 6, 7]
petal_width_idx = iris_feature_names.index('petal width (cm)')
petal_width_bins = [0, .5, 1, 1.5, 2, 2.5]
discs_ = []
for i, ix in enumerate(petal_length_bins): # X-axis
for iix in petal_length_bins[i + 1:]:
for j, jy in enumerate(petal_width_bins): # Y-axis
for jjy in petal_width_bins[j + 1:]:
discs_.append({
petal_length_idx: [ix, iix],
petal_width_idx: [jy, jjy]
})
for inst_i in explained_instances:
for cls_i in iris_labels:
for disc_i, disc in enumerate(discs_):
inst = explained_instances[inst_i]
cls = label2class[cls_i]
exp = exo.build_tabular_blimey(
inst, cls, iris_X, iris_y, logreg.predict_proba, disc,
IRIS_SAMPLES, SAMPLE_IRIS, 42)
key = '{}&{}&{}'.format(inst_i, cls, disc_i)
exp_ = EXP_TAB[key]
assert exp['explanation'].shape[0] == exp_.shape[0]
assert np.allclose(
exp['explanation'], exp_, atol=.001, equal_nan=True)
EXP_IMG = [
{207: {'explanation': [(13, -0.24406872165780585),
(11, -0.20456180387430317),
(9, -0.1866779131424261),
(4, 0.15001224157793785),
(3, 0.11589480417160983)],
'name': 'golden retriever'},
208: {'explanation': [(13, -0.08395966359346249),
(0, -0.0644986107387837),
(9, 0.05845584633658977),
(1, 0.04369763085720947),
(11, -0.035958188394941866)],
'name': '<NAME>'},
852: {'explanation': [(13, 0.3463529698715463),
(11, 0.2678050131923326),
(4, -0.10639863421417416),
(6, 0.08345792378117327),
(9, 0.07366945242386444)],
'name': '<NAME>'}},
{207: {'explanation': [(13, -0.0624167912596456),
(7, 0.06083359545295548),
(3, 0.0495953943686462),
(11, -0.04819787147412231),
(2, -0.03858823761391199)],
'name': '<NAME>'},
208: {'explanation': [(13, -0.08408428146916162),
(7, 0.07704235920590158),
(3, 0.06646468388122273),
(11, -0.0638326572126609),
(2, -0.052621478002380796)],
'name': '<NAME>'},
852: {'explanation': [(11, 0.35248212611685886),
(13, 0.2516925608037859),
(2, 0.13682853028454384),
(9, 0.12930134856644754),
(6, 0.1257747954095489)],
'name': '<NAME>'}},
{207: {'explanation': [(3, 0.21351937934930917),
(10, 0.16933456312772083),
(11, -0.13447244552856766),
(8, 0.11058919217055371),
(2, -0.06269239798368743)],
'name': '<NAME>'},
208: {'explanation': [(8, 0.05995551486884414),
(9, -0.05375302972380482),
(11, -0.051997353324246445),
(6, 0.04213181405953071),
(2, -0.039169895361928275)],
'name': '<NAME>'},
852: {'explanation': [(7, 0.31382219776986503),
(11, 0.24126214884275987),
(13, 0.21075924370226598),
(2, 0.11937652039885377),
(8, -0.11911265319329697)],
'name': '<NAME>'}},
{207: {'explanation': [(3, 0.39254403293049134),
(9, 0.19357165018747347),
(6, 0.16592079671652987),
(0, 0.14042059731407297),
(1, 0.09793027079765507)],
'name': '<NAME>'},
208: {'explanation': [(9, -0.19351859273276703),
(1, -0.15262967987262344),
(3, 0.12205127112235375),
(2, 0.11352141032313934),
(6, -0.11164209893429898)],
'name': '<NAME>'},
852: {'explanation': [(7, 0.17213007100844877),
(0, -0.1583030948868859),
(3, -0.13748574615069775),
(5, 0.13273283867075436),
(11, 0.12309551170070354)],
'name': '<NAME>'}},
{207: {'explanation': [(3, 0.4073533182995105),
(10, 0.20711667988142463),
(8, 0.15360813290032324),
(6, 0.1405424759832785),
(1, 0.1332920685413575)],
'name': '<NAME>'},
208: {'explanation': [(9, -0.14747910525112617),
(1, -0.13977061235228924),
(2, 0.10526833898161611),
(6, -0.10416022118399552),
(3, 0.09555992655161764)],
'name': '<NAME>'},
852: {'explanation': [(11, 0.2232260929107954),
(7, 0.21638443149433054),
(5, 0.21100464215582274),
(13, 0.145614853795006),
(1, -0.11416523431311262)],
'name': '<NAME>'}},
{207: {'explanation': [(1, 0.14700178977744183),
(0, 0.10346667279328238),
(2, 0.10346667279328238),
(7, 0.10346667279328238),
(8, 0.10162900633690726)],
'name': '<NAME>'},
208: {'explanation': [(10, -0.10845134816658476),
(8, -0.1026920429226184),
(6, -0.10238154733842847),
(18, 0.10094164937411244),
(16, 0.08646888450232793)],
'name': '<NAME>'},
852: {'explanation': [(18, -0.20542297091894474),
(13, 0.2012751176130666),
(8, -0.19194747162742365),
(20, 0.14686930696710473),
(15, 0.11796990086271067)],
'name': '<NAME>'}},
{207: {'explanation': [(13, 0.12446259821701779),
(17, 0.11859084421095789),
(15, 0.09690553833007137),
(12, -0.08869743701731962),
(4, 0.08124900427893789)],
'name': '<NAME>'},
208: {'explanation': [(10, -0.09478194981909983),
(20, -0.09173392507039077),
(9, 0.08768898801254493),
(17, -0.07553994244536394),
(4, 0.07422905503397653)],
'name': '<NAME>'},
852: {'explanation': [(21, 0.1327882942965061),
(1, 0.1238236573086363),
(18, -0.10911712271717902),
(19, 0.09707191051320978),
(6, 0.08593672504338913)],
'name': '<NAME>'}},
{207: {'explanation': [(6, 0.14931728779865114),
(14, 0.14092073957103526),
(1, 0.11071480021464616),
(4, 0.10655287976934531),
(8, 0.08705404649152573)],
'name': '<NAME>'},
208: {'explanation': [(8, -0.12242580400886727),
(9, 0.12142729544158742),
(14, -0.1148252787068248),
(16, -0.09562322208795092),
(4, 0.09350160975513132)],
'name': '<NAME>'},
852: {'explanation': [(6, 0.04227675072263027),
(9, -0.03107924340879173),
(14, 0.028007115650713045),
(13, 0.02771190348545554),
(19, 0.02640441416071482)],
'name': '<NAME>'}},
{207: {'explanation': [(19, 0.14313680656283245),
(18, 0.12866508562342843),
(8, 0.11809779264185447),
(0, 0.11286255403442104),
(2, 0.11286255403442104)],
'name': '<NAME>'},
208: {'explanation': [(9, 0.2397917428082761),
(14, -0.19435572812170654),
(6, -0.1760894833446507),
(18, -0.12243333818399058),
(15, 0.10986343675377105)],
'name': '<NAME>'},
852: {'explanation': [(14, 0.15378038774613365),
(9, -0.14245940635481966),
(6, 0.10213601012183973),
(20, 0.1009180838986786),
(3, 0.09780065767815548)],
'name': '<NAME>'}},
{207: {'explanation': [(15, 0.06525850448807077),
(9, 0.06286791243851698),
(19, 0.055189970374185854),
(8, 0.05499197604401475),
(13, 0.04748220842936177)],
'name': '<NAME>'},
208: {'explanation': [(6, -0.31549091899770765),
(5, 0.1862302670824446),
(8, -0.17381478451341995),
(10, -0.17353516098662508),
(14, -0.13591542421754205)],
'name': '<NAME>'},
852: {'explanation': [(14, 0.2163853942943355),
(6, 0.17565046338282214),
(1, 0.12446193028474549),
(9, -0.11365789839746396),
(10, 0.09239073691962967)],
'name': '<NAME>'}},
{207: {'explanation': [(19, 0.1141207265647932),
(36, -0.08861425922625768),
(30, 0.07219209872026074),
(9, -0.07150939547859836),
(38, -0.06988288637544438)],
'name': '<NAME>'},
208: {'explanation': [(29, 0.10531073909547647),
(13, 0.08279642208039652),
(34, -0.0817952443980797),
(33, -0.08086848205765082),
(12, 0.08086848205765082)],
'name': '<NAME>'},
852: {'explanation': [(13, -0.1330452414595897),
(4, 0.09942366413042845),
(12, -0.09881995683190645),
(33, 0.09881995683190645),
(19, -0.09596925317560831)],
'name': '<NAME>'}},
{207: {'explanation': [(37, 0.08193926967758253),
(35, 0.06804043021426347),
(15, 0.06396269230810163),
(11, 0.062255657227065296),
(8, 0.05529200233091672)],
'name': '<NAME>'},
208: {'explanation': [(19, 0.05711957286614678),
(27, -0.050230108135410824),
(16, -0.04743034616549999),
(5, -0.046717346734255705),
(9, -0.04419100026638039)],
'name': '<NAME>'},
852: {'explanation': [(3, -0.08390967998497496),
(30, -0.07037680222442452),
(22, 0.07029819368543713),
(8, -0.06861396187180349),
(37, -0.06662511956402824)],
'name': '<NAME>'}},
{207: {'explanation': [(19, 0.048418845359024805),
(9, -0.0423869575883795),
(30, 0.04012650790044438),
(36, -0.03787242980067195),
(10, 0.036557999380695635)],
'name': '<NAME>'},
208: {'explanation': [(10, 0.12120686823129677),
(17, 0.10196564232230493),
(7, 0.09495133975425854),
(25, -0.0759657891182803),
(2, -0.07035244568286837)],
'name': '<NAME>'},
852: {'explanation': [(3, -0.0770578003457272),
(28, 0.0769372258280398),
(6, -0.06044725989272927),
(22, 0.05550155775286349),
(31, -0.05399028046597057)],
'name': '<NAME>'}},
{207: {'explanation': [(14, 0.05371383110181226),
(0, -0.04442539316084218),
(18, 0.042589475382826494),
(19, 0.04227647855354252),
(17, 0.041685661662754295)],
'name': '<NAME>'},
208: {'explanation': [(29, 0.14419601354489464),
(17, 0.11785174500536676),
(36, 0.1000501679652906),
(10, 0.09679790134851017),
(35, 0.08710376081189208)],
'name': '<NAME>'},
852: {'explanation': [(8, -0.02486237985832769),
(3, -0.022559886154747102),
(11, -0.021878686669239856),
(36, 0.021847953817988534),
(19, -0.018317598300716522)],
'name': '<NAME>'}},
{207: {'explanation': [(37, 0.08098729255605368),
(35, 0.06639102704982619),
(15, 0.06033721190370432),
(34, 0.05826267856117829),
(28, 0.05549505160798173)],
'name': '<NAME>'},
208: {'explanation': [(17, 0.13839012042250542),
(10, 0.11312187488346881),
(7, 0.10729071207480922),
(25, -0.09529127965797404),
(11, -0.09279834572979286)],
'name': '<NAME>'},
852: {'explanation': [(3, -0.028385651836694076),
(22, 0.023364702783498722),
(8, -0.023097812578270233),
(30, -0.022931236620034406),
(37, -0.022040170736525342)],
'name': '<NAME>'}}
]
EXP_TAB = {
'setosa&0&0': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&1': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&2': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&3': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&4': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&5': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&6': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&7': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&8': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&9': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&10': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&11': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&12': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&13': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&14': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&15': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&16': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&17': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&18': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&19': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&20': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&21': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&22': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&23': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&24': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&25': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&26': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&27': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&28': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&29': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&30': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&31': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&32': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&33': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&34': np.array([0.7974072911132786, 0.006894018772033576]),
'setosa&0&35': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&36': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&37': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&38': np.array([0.19685199412911678, 0.7845879230594391]),
'setosa&0&39': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&40': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&41': np.array([0.07476043598366156, 0.9062715528547001]),
'setosa&0&42': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&43': np.array([0.7770298852793471, 0.0294434304771479]),
'setosa&0&44': np.array([0.7936433456054741, 0.01258375207649658]),
'setosa&0&45': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&46': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&47': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&48': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&49': np.array([0.4656481363306145, 0.007982539480288167]),
'setosa&0&50': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&51': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&52': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&53': np.array([0.050316962184345455, 0.9292276112117481]),
'setosa&0&54': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&55': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&56': np.array([0.0171486447659196, 0.9632117581295891]),
'setosa&0&57': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&58': np.array([0.06151571389390039, 0.524561199322281]),
'setosa&0&59': np.array([0.4329463382004908, 0.057167210150691136]),
'setosa&0&60': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&61': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&62': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&63': np.array([0.13694026920485936, 0.36331091829858003]),
'setosa&0&64': np.array([0.3094460464703627, 0.11400643817329122]),
'setosa&0&65': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&66': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&67': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&68': np.array([0.029402442458921055, 0.9481684282717416]),
'setosa&0&69': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&70': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&71': np.array([0.00988785935411159, 0.9698143912008228]),
'setosa&0&72': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&73': np.array([0.009595083643662688, 0.5643652067423869]),
'setosa&0&74': np.array([0.13694026920485936, 0.36331091829858003]),
'setosa&0&75': np.array([0.0, 0.95124502153736]),
'setosa&0&76': np.array([0.0, 0.9708703761803881]),
'setosa&0&77': np.array([0.0, 0.5659706098422994]),
'setosa&0&78': np.array([0.0, 0.3962828716108186]),
'setosa&0&79': np.array([0.0, 0.2538069363248767]),
'setosa&0&80': np.array([0.0, 0.95124502153736]),
'setosa&0&81': np.array([0.0, 0.95124502153736]),
'setosa&0&82': np.array([0.0, 0.95124502153736]),
'setosa&0&83': np.array([0.0, 0.95124502153736]),
'setosa&0&84': np.array([0.0, 0.9708703761803881]),
'setosa&0&85': np.array([0.0, 0.9708703761803881]),
'setosa&0&86': np.array([0.0, 0.9708703761803881]),
'setosa&0&87': np.array([0.0, 0.5659706098422994]),
'setosa&0&88': np.array([0.0, 0.5659706098422994]),
'setosa&0&89': np.array([0.0, 0.3962828716108186]),
'setosa&0&90': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&91': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&92': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&93': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&94': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&95': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&96': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&97': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&98': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&99': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&100': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&101': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&102': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&103': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&104': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&105': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&106': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&107': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&108': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&109': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&110': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&111': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&112': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&113': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&114': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&115': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&116': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&117': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&118': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&119': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&120': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&121': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&122': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&123': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&124': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&125': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&126': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&127': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&128': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&129': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&130': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&131': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&132': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&133': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&134': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&135': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&136': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&137': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&138': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&139': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&140': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&141': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&142': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&143': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&144': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&145': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&146': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&147': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&148': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&149': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&150': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&151': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&152': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&153': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&154': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&155': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&156': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&157': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&158': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&159': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&160': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&161': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&162': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&163': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&164': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&165': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&166': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&167': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&168': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&169': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&170': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&171': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&172': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&173': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&174': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&175': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&176': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&177': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&178': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&179': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&180': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&181': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&182': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&183': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&184': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&185': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&186': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&187': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&188': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&189': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&190': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&191': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&192': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&193': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&194': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&195': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&196': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&197': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&198': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&199': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&200': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&201': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&202': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&203': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&204': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&205': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&206': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&207': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&208': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&209': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&210': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&211': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&212': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&213': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&214': np.array([0.9706534384443797, 0.007448195602953232]),
'setosa&0&215': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&216': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&217': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&218': np.array([0.7431524521056113, 0.24432235603856345]),
'setosa&0&219': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&220': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&221': np.array([0.4926091071260067, 0.49260910712601286]),
'setosa&0&222': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&223': np.array([0.9550700362273441, 0.025428672111930138]),
'setosa&0&224': np.array([0.9672121512728677, 0.012993005706020341]),
'setosa&0&225': | np.array([0.19685199412911678, 0.7845879230594391]) | numpy.array |
import numpy as np
import os
from scipy.spatial import ConvexHull
from sklearn.cluster import KMeans
from sklearn.decomposition import PCA
import matplotlib
matplotlib.use('Agg')
from matplotlib import pyplot as plt
import matplotlib.cm as cm
class Generator(object):
def __init__(
self, num_examples_train, num_examples_test, num_clusters,
dataset_path, batch_size
):
self.num_examples_train = num_examples_train
self.num_examples_test = num_examples_test
self.batch_size = batch_size
self.dataset_path = dataset_path
self.input_size = 2
self.task = 'kmeans'
# clusters_train = [4, 8, 16]
clusters_train = [num_clusters]
clusters_test = [num_clusters]
self.clusters = {'train': clusters_train, 'test': clusters_test}
self.data = {'train': {}, 'test': {}}
def load_dataset(self):
for mode in ['train', 'test']:
for cl in self.clusters[mode]:
path = os.path.join(self.dataset_path, mode + str(cl))
path = path + 'kmeans_gauss.npz'
if os.path.exists(path):
print('Reading {} dataset for {} scales'
.format(mode, cl))
npz = np.load(path)
self.data[mode][cl] = {'x': npz['x'], 'y': npz['y']}
else:
x, y = self.create(clusters=cl, mode=mode)
self.data[mode][cl] = {'x': x, 'y': y}
# save
np.savez(path, x=x, y=y)
print('Created {} dataset for {} scales'
.format(mode, cl))
def get_batch(self, batch=0, clusters=3, mode="train"):
bs = self.batch_size
batch_x = self.data[mode][clusters]['x'][batch * bs: (batch + 1) * bs]
batch_y = self.data[mode][clusters]['y'][batch * bs: (batch + 1) * bs]
return batch_x, batch_y
def compute_length(self, clusters):
length = np.random.randint(10 * clusters, 10 * clusters + 1)
max_length = 10 * clusters
return length, max_length
def kmeans_example(self, length, clusters):
points = np.random.uniform(0, 1, [length, 2])
kmeans = KMeans(n_clusters=clusters).fit(points)
labels = kmeans.labels_.astype(int)
target = np.array(labels)
# target = np.zeros([length])
return points, target
def pca_example(self, length):
points = np.random.uniform(0, 1, [length, 2])
ind1 = np.where(points[:, 0] < 0.5)[0]
target = np.zeros([length])
target[ind1] = 1
return points, target
def gaussian_example(self, length, clusters):
centers = np.random.uniform(0, 1, [clusters, 2])
per_cl = length // clusters
Pts = []
cov = 0.001 * np.eye(2, 2)
target = np.zeros([length])
for c in range(clusters):
points = np.random.multivariate_normal(centers[c], cov, per_cl)
target[c * per_cl: (c + 1) * per_cl] = c
Pts.append(points)
points = np.reshape(Pts, [-1, 2])
rand_perm = np.random.permutation(length)
points = points[rand_perm]
target = target[rand_perm]
return points, target
def plot_example(self, x, y, clusters, length):
plt.figure(0)
plt.clf()
colors = cm.rainbow(np.linspace(0, 1, clusters))
for c in range(clusters):
ind = np.where(y == c)[0]
plt.scatter(x[ind, 0], x[ind, 1], c=colors[c])
path = '/home/anowak/DynamicProgramming/DP/plots/example.png'
plt.savefig(path)
def create(self, clusters=3, mode='train'):
if mode == 'train':
num_examples = self.num_examples_train
else:
num_examples = self.num_examples_test
_, max_length = self.compute_length(clusters)
x = -1 * | np.ones([num_examples, max_length, self.input_size]) | numpy.ones |
# coding: utf-8
# Copyright (c) Max-Planck-Institut für Eisenforschung GmbH - Computational Materials Design (CM) Department
# Distributed under the terms of "New BSD License", see the LICENSE file.
from pyiron_base._tests import PyironTestCase
from pyiron_continuum.mesh import (
RectMesh,
callable_to_array,
takes_scalar_field,
takes_vector_field,
has_default_accuracy
)
import numpy as np
import pyiron_continuum.mesh as mesh_mod
class TestDecorators(PyironTestCase):
@classmethod
def setUpClass(cls):
super().setUpClass()
cls.mesh = RectMesh([1, 2, 3], [30, 20, 10])
@staticmethod
def give_vector(mesh):
return np.ones(mesh.shape)
@staticmethod
def give_scalar(mesh):
return np.ones(mesh.divisions)
def test_callable_to_array(self):
scalar_field = self.give_scalar(self.mesh)
@callable_to_array
def method(mesh, callable_or_array, some_kwarg=1):
return callable_or_array + some_kwarg
self.assertTrue(np.allclose(scalar_field + 1, method(self.mesh, self.give_scalar)), msg="Accept functions")
self.assertTrue(np.allclose(scalar_field + 1, method(self.mesh, scalar_field)), msg="Accept arrays")
self.assertTrue(np.allclose(scalar_field + 2, method(self.mesh, self.give_scalar, some_kwarg=2)),
msg="Pass kwargs")
def test_takes_scalar_field(self):
scalar_field = self.give_scalar(self.mesh)
@takes_scalar_field
def method(mesh, scalar_field, some_kwarg=1):
return some_kwarg
self.assertEqual(1, method(self.mesh, scalar_field), msg="Accept arrays")
self.assertEqual(2, method(self.mesh, scalar_field, some_kwarg=2), msg="Pass kwargs")
self.assertEqual(1, method(self.mesh, scalar_field.tolist()), msg="Should work with listlike stuff too")
self.assertRaises(TypeError, method, self.mesh, np.ones(2)) # Reject the wrong shape
self.assertRaises(TypeError, method, self.mesh, "not even numeric") # Duh
def test_takes_vector_field(self):
vector_field = self.give_vector(self.mesh)
@takes_vector_field
def method(mesh, vector_field, some_kwarg=1):
return some_kwarg
self.assertEqual(1, method(self.mesh, vector_field), msg="Accept arrays")
self.assertEqual(2, method(self.mesh, vector_field, some_kwarg=2), msg="Pass kwargs")
self.assertEqual(1, method(self.mesh, vector_field.tolist()), msg="Should work with listlike stuff too")
self.assertRaises(TypeError, method, self.mesh, np.ones(2)) # Reject the wrong shape
self.assertRaises(TypeError, method, self.mesh, "not even numeric") # Duh
def test_has_default_accuracy(self):
some_field = self.give_vector(self.mesh)
@has_default_accuracy
def method(mesh, field, accuracy=None, some_kwarg=1):
return accuracy + some_kwarg
mesh = RectMesh(1, 1, accuracy=2)
self.assertEqual(3, method(mesh, some_field), 'Use mesh accuracy')
self.assertEqual(0, method(mesh, some_field, accuracy=4, some_kwarg=-4), 'Use passed accuracy')
self.assertRaises(ValueError, method, mesh, some_field, accuracy=1) # Even accuracy only
self.assertRaises(ValueError, method, mesh, some_field, accuracy=0) # Positive accuracy only
@has_default_accuracy
def method(mesh, field, accuracy_not_a_kwarg=42):
return None
self.assertRaises(TypeError, method, mesh, some_field) # Methods need to define accuracy
class TestRectMesh(PyironTestCase):
@staticmethod
def scalar_sines(mesh):
L = mesh.lengths
omega = (2 * np.pi / L).reshape(len(L), *[1] * mesh.dim)
return np.prod(np.sin(omega * mesh.mesh), axis=0)
def vector_sines(self, mesh):
scalar = self.scalar_sines(mesh)
return np.array(mesh.dim * [scalar])
@property
def docstring_module(self):
return mesh_mod
def test_input(self):
L = np.pi
n = 2
mesh = RectMesh(L, n)
self.assertTrue(np.allclose(mesh.bounds, [[0, L]]),
msg='Expected float to be converted to (1,2) array.')
self.assertTrue(np.all(mesh.divisions == [n]),
msg='Expected int to be converted to (1,) array.')
mesh = RectMesh([L, L], n)
self.assertTrue(np.allclose(mesh.bounds, [[0, L], [0, L]]),
msg='Expected 1D bounds to be interpreted as endpoints of 2D bounds.')
self.assertTrue(np.all(mesh.divisions == [n, n]),
msg='Expected divisions to be extended to match bounds.')
mesh = RectMesh([[0, L], [L / 2, L]], [n, 2 * n])
self.assertTrue(np.allclose(mesh.bounds, [[0, L], [L / 2, L]]),
msg='Expected float to be converted to (1,2) array.')
self.assertTrue(np.all(mesh.divisions == [n, 2 * n]),
msg='Expected divisions to be preserved.')
bounds = np.array([1, 2, 3, 4])
self.assertAlmostEqual(
bounds.prod(),
RectMesh(bounds=bounds).volume,
msg="Four dimensions should be ok, and hyper-volume should be a product of side lengths"
)
self.assertRaises(ValueError, RectMesh, [[0, 1, 2]], 1) # Bounds can't exceed shape (n, 2)
self.assertRaises(ValueError, RectMesh, [[1, 1 + 1e-12]]) # Bounds must enclose a space noticeably > 0
self.assertRaises(ValueError, RectMesh, 1, [1, 1]) # Divisions must be a single value or match bounds
self.assertRaises(TypeError, RectMesh, 1, np.pi) # Only int-like divisions
self.assertRaises(TypeError, RectMesh, 1, [[1]]) # Or lists of ints, but nothing else like lists of lists
def test_construction(self):
L = np.pi
n = 2
mesh = RectMesh(L, n)
self.assertTrue(np.allclose(mesh.mesh, [0, L / 2]), msg='1D should get simplified')
self.assertAlmostEqual(mesh.steps, L / 2, msg='1D should get simplified')
mesh.simplify_1d = False
self.assertTrue(np.allclose(mesh.steps, [L / 2]), msg='1D should stay list-like')
mesh = RectMesh([L, 2 * L], n)
self.assertTrue(
np.allclose(
mesh.mesh,
[
[
[0, 0],
[L / 2, L / 2],
],
[
[0, L],
[0, L],
]
]
)
)
self.assertTrue(np.allclose(mesh.steps, [L / 2, L]))
with self.assertRaises(ValueError):
RectMesh([[1, 1+1E-12]]) # Mesh needs finite length
def test_update(self):
L = np.pi
n = 2
mesh = RectMesh([L, L], n)
init_mesh = np.array(mesh.mesh)
mesh.bounds = [2 * L, 2 * L]
self.assertTrue(np.allclose(mesh.mesh, 2 * init_mesh), msg='Should have doubled extent')
mesh.divisions = 2 * n
self.assertEqual(2 * n, len(mesh.mesh[0]), msg='Should have doubled sampling density')
def test_length(self):
mesh = RectMesh([[0, 1], [1, 3]], 2)
self.assertAlmostEqual(1, mesh.lengths[0], msg='Real-space length in x-direction should be 1')
self.assertAlmostEqual(2, mesh.lengths[1], msg='Real-space length in y-direction should be 3-1=2')
def test_derivative(self):
L = np.pi
omega = 2 * np.pi / L
mesh = RectMesh(L, 100)
x = mesh.mesh[0]
def solution(order):
"""derivatives of sin(omega * x)"""
fnc = np.cos(omega * x) if order % 2 == 1 else np.sin(omega * x)
sign = -1 if order % 4 in [2, 3] else 1
return sign * omega**order * fnc
for order in [1, 2, 3, 4]:
errors = [
np.linalg.norm(solution(order) - mesh.derivative(self.scalar_sines, order=order, accuracy=accuracy))
for accuracy in [2, 4, 6]
]
self.assertTrue(np.all(np.diff(errors) < 0), msg="Increasing accuracy should give decreasing error.")
self.assertRaises(ValueError, mesh.derivative, mesh.mesh[0], order=1, accuracy=3) # No odd accuracies
def test_grad(self):
L = np.pi
omega = 2 * np.pi / L
mesh = RectMesh([L, L, L], [100, 200, 300])
x, y, z = mesh.mesh
solution = np.array([
omega * np.cos(x * omega) * np.sin(y * omega) * np.sin(z * omega),
omega * np.sin(x * omega) * np.cos(y * omega) * np.sin(z * omega),
omega * np.sin(x * omega) * | np.sin(y * omega) | numpy.sin |
# -*- coding: utf-8 -*-
# Author: <NAME>
# License: MIT
import os
import numpy as np
import scipy as sc
from ..tools import femio
from ..basefem import BaseFEM, get_file_path
class Periodic3D(BaseFEM):
"""A class for a finite element model of a 3D bi-periodic
medium using Gmsh_ and GetDP_.
.. _Gmsh:
http://gmsh.info/
.. _GetDP:
http://getdp.info/
"""
def __init__(
self,
analysis="direct",
A=1,
lambda0=1,
theta_deg=0.0,
phi_deg=0,
psi_deg=0,
period_x=1,
period_y=1,
thick_L1=0.1, #: flt: thickness layer 1 (superstrate)
thick_L2=0.1, #: flt: thickness layer 2
thick_L3=0.1, #: flt: thickness layer 3 (interp)
thick_L4=0.1, #: flt: thickSness layer 4
thick_L5=0.1, #: flt: thickness layer 5
thick_L6=0.1, #: flt: thickness layer 6 (substrate)
PML_top=1.0, # : flt: thickness pml top
PML_bot=1.0, # : flt: thickness pml bot
a_pml=1, #: flt: PMLs parameter, real part
b_pml=1, #: flt: PMLs parameter, imaginary part
eps_L1=1 - 0 * 1j, #: flt: permittivity layer 1 (superstrate)
eps_L2=1 - 0 * 1j, #: flt: permittivity layer 2
eps_L3=1 - 0 * 1j, #: flt: permittivity layer 3
eps_L4=1 - 0 * 1j, #: flt: permittivity layer 4
eps_L5=1 - 0 * 1j, #: flt: permittivity layer 5
eps_L6=1 - 0 * 1j, #: flt: permittivity layer 6 (substrate)
el_order=1,
):
super().__init__()
self.dir_path = get_file_path(__file__)
self.analysis = analysis
self.A = A
self.lambda0 = lambda0
self.theta_deg = theta_deg
self.phi_deg = phi_deg
self.psi_deg = psi_deg
# opto-geometric parameters -------------------------------------------
#: flt: periods
self.period_x = period_x
self.period_y = period_y
self.thick_L1 = thick_L1 #: flt: thickness layer 1 (superstrate)
self.thick_L2 = thick_L2 #: flt: thickness layer 2
self.thick_L3 = thick_L3 #: flt: thickness layer 3 (interp)
self.thick_L4 = thick_L4 #: flt: thickSness layer 4
self.thick_L5 = thick_L5 #: flt: thickness layer 5
self.thick_L6 = thick_L6 #: flt: thickness layer 6 (substrate)
self.PML_top = PML_top #: flt: thickness pml top
self.PML_bot = PML_bot #: flt: thickness pml bot
#: flt: PMLs parameter, real part
self.a_pml = a_pml #: flt: PMLs parameter, real part
self.b_pml = b_pml #: flt: PMLs parameter, imaginary part
self.eps_L1 = eps_L1 #: flt: permittivity layer 1 (superstrate)
self.eps_L2 = eps_L2 #: flt: permittivity layer 2
self.eps_L3 = eps_L3 #: flt: permittivity layer 3
self.eps_L4 = eps_L4 #: flt: permittivity layer 4
self.eps_L5 = eps_L5 #: flt: permittivity layer 5
self.eps_L6 = eps_L6 #: flt: permittivity layer 6 (substrate)
self.el_order = el_order
self.bg_mesh = False
# 2 #: design domain number (check .geo/.pro files)
self.dom_des = 5000
# postprocessing -------------------------------------------------
#: int: number of diffraction orders
#: for postprocessing diffraction efficiencies
self.N_d_order = 0
self.orders = False
self.cplx_effs = False
self.eff_verbose = False
#: int: number of x integration points
#: for postprocessing diffraction efficiencies
self.ninterv_integ = 60
#: int: number of z slices points
#: for postprocessing diffraction efficiencies
self.nb_slice = 3
#: flt: such that `scan_dist = min(h_sup, hsub)/scan_dist_ratio`
self.scan_dist_ratio = 5
self.dim = 3
self.adjoint = False
@property
def celltype(self):
return "tetra"
@property
def zmin_interp(self):
return self.thick_L5 + self.thick_L4
@property
def zmax_interp(self):
return self.zmin_interp + self.thick_L3
@property
def scan_dist(self):
return min(self.thick_L1, self.thick_L6) / self.scan_dist_ratio
@property
def theta_0(self):
return np.pi / 180.0 * (self.theta_deg)
@property
def phi_0(self):
return np.pi / 180.0 * (self.phi_deg)
@property
def psi_0(self):
return np.pi / 180.0 * (self.psi_deg)
@property
def corners_des(self):
return (
-self.period_x / 2,
+self.period_x / 2,
-self.period_y / 2,
+self.period_y / 2,
+self.zmin_interp,
+self.zmax_interp,
)
# @property
# def N_d_order(self):
# N = self.d/self.lambda0 * (np.sqrt([self.eps_L1, self.eps_L6]) - np.sin(self.theta))
# return int(max(N))
def _make_param_dict(self):
param_dict = super()._make_param_dict()
layer_diopter = self.ancillary_problem()
nb_layer = 6
layer = []
for k1 in range(0, nb_layer):
layer.append({})
layer[0]["epsilon"] = self.eps_L1
layer[1]["epsilon"] = self.eps_L2
layer[2]["epsilon"] = self.eps_L3
layer[3]["epsilon"] = self.eps_L4
layer[4]["epsilon"] = self.eps_L5
layer[5]["epsilon"] = self.eps_L6
layer[0]["thickness"] = self.thick_L1
layer[1]["thickness"] = self.thick_L2
layer[2]["thickness"] = self.thick_L3
layer[3]["thickness"] = self.thick_L4
layer[4]["thickness"] = self.thick_L5
layer[5]["thickness"] = self.thick_L6
layer[nb_layer - 2]["hh"] = 0
layer[nb_layer - 1]["hh"] = (
layer[nb_layer - 2]["hh"] - layer[nb_layer - 1]["thickness"]
)
for k in range(nb_layer - 3, -1, -1):
layer[k]["hh"] = layer[k + 1]["hh"] + layer[k + 1]["thickness"]
for i5 in range(0, nb_layer):
param_dict["thick_L" + str(i5 + 1)] = layer[i5]["thickness"]
param_dict["hh_L" + str(i5 + 1)] = layer[i5]["hh"]
param_dict["PML_bot_hh"] = layer[-1]["hh"] - self.PML_bot
param_dict["PML_top_hh"] = layer[0]["hh"] + self.thick_L1
param_dict["Expj_subs_re"] = layer_diopter[1]["Psi"][0].real
param_dict["Exmj_subs_re"] = layer_diopter[1]["Psi"][1].real
param_dict["Eypj_subs_re"] = layer_diopter[1]["Psi"][2].real
param_dict["Eymj_subs_re"] = layer_diopter[1]["Psi"][3].real
param_dict["Ezpj_subs_re"] = layer_diopter[1]["Psi"][4].real
param_dict["Ezmj_subs_re"] = layer_diopter[1]["Psi"][5].real
param_dict["Expj_subs_im"] = layer_diopter[1]["Psi"][0].imag
param_dict["Exmj_subs_im"] = layer_diopter[1]["Psi"][1].imag
param_dict["Eypj_subs_im"] = layer_diopter[1]["Psi"][2].imag
param_dict["Eymj_subs_im"] = layer_diopter[1]["Psi"][3].imag
param_dict["Ezpj_subs_im"] = layer_diopter[1]["Psi"][4].imag
param_dict["Ezmj_subs_im"] = layer_diopter[1]["Psi"][5].imag
param_dict["gamma_subs_re"] = layer_diopter[1]["gamma"].real
param_dict["gamma_subs_im"] = layer_diopter[1]["gamma"].imag
param_dict["Expj_super_re "] = layer_diopter[0]["Psi"][0].real
param_dict["Exmj_super_re "] = layer_diopter[0]["Psi"][1].real
param_dict["Eypj_super_re "] = layer_diopter[0]["Psi"][2].real
param_dict["Eymj_super_re "] = layer_diopter[0]["Psi"][3].real
param_dict["Ezpj_super_re "] = layer_diopter[0]["Psi"][4].real
param_dict["Ezmj_super_re "] = layer_diopter[0]["Psi"][5].real
param_dict["Expj_super_im "] = layer_diopter[0]["Psi"][0].imag
param_dict["Exmj_super_im "] = layer_diopter[0]["Psi"][1].imag
param_dict["Eypj_super_im "] = layer_diopter[0]["Psi"][2].imag
param_dict["Eymj_super_im "] = layer_diopter[0]["Psi"][3].imag
param_dict["Ezpj_super_im "] = layer_diopter[0]["Psi"][4].imag
param_dict["Ezmj_super_im "] = layer_diopter[0]["Psi"][5].imag
param_dict["gamma_super_re "] = layer_diopter[0]["gamma"].real
param_dict["gamma_super_im "] = layer_diopter[0]["gamma"].imag
return param_dict
def compute_solution(self, **kwargs):
res_list = ["helmholtz_vector", "helmholtz_vector_modal"]
return super().compute_solution(res_list=res_list)
def postpro_absorption(self):
self.postprocess("postopQ")
path = self.tmppath("Q.txt")
Q = np.loadtxt(path, skiprows=0, usecols=[1]) + 1j * np.loadtxt(
path, skiprows=0, usecols=[1]
)
return Q.real
def _postpro_fields_cuts(self):
npt_integ = self.ninterv_integ + 1
nb_slice = self.nb_slice
path_t = self.tmppath("Etot_XYcut.out")
path_r = self.tmppath("Edif_XYcut.out")
if os.path.isfile(path_t):
os.remove(path_t)
if os.path.isfile(path_r):
os.remove(path_r)
self.postprocess("Ed" + " -order 2")
Ex_t2, Ey_t2, Ez_t2 = femio.load_table_vect(path_t)
Ex_t2 = Ex_t2.reshape(npt_integ, npt_integ, nb_slice, order="F")
Ey_t2 = Ey_t2.reshape(npt_integ, npt_integ, nb_slice, order="F")
Ez_t2 = Ez_t2.reshape(npt_integ, npt_integ, nb_slice, order="F")
Ex_r2, Ey_r2, Ez_r2 = femio.load_table_vect(path_r)
Ex_r2 = Ex_r2.reshape(npt_integ, npt_integ, nb_slice, order="F")
Ey_r2 = Ey_r2.reshape(npt_integ, npt_integ, nb_slice, order="F")
Ez_r2 = Ez_r2.reshape(npt_integ, npt_integ, nb_slice, order="F")
return Ex_r2, Ey_r2, Ez_r2, Ex_t2, Ey_t2, Ez_t2
def postpro_epsilon(self):
self.postprocess("postop_epsilon" + " -order 2")
def diffraction_efficiencies(self):
Ex_r2, Ey_r2, Ez_r2, Ex_t2, Ey_t2, Ez_t2 = self._postpro_fields_cuts()
npt_integ = self.ninterv_integ + 1
# print('gmsh cuts done !')
period_x, period_y = self.period_x, self.period_y
N_d_order = self.N_d_order
lambda0 = self.lambda0
theta_0 = self.theta_0
phi_0 = self.phi_0
nb_slice = self.nb_slice
x_t = | np.linspace(-period_x / 2, period_x / 2, npt_integ) | numpy.linspace |
# Copyright 2018-2020 Xanadu Quantum Technologies Inc.
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
# http://www.apache.org/licenses/LICENSE-2.0
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Unit tests for the :func:`pennylane.math.is_independent` function.
"""
import pytest
import numpy as np
from pennylane import numpy as pnp
import pennylane as qml
from pennylane.math import is_independent
from pennylane.math.is_independent import _get_random_args
try:
import jax
have_jax = True
except ImportError:
have_jax = False
try:
import torch
have_torch = True
except ImportError:
have_torch = False
try:
import tensorflow as tf
have_tf = True
except ImportError:
have_tf = False
dependent_lambdas = [
lambda x: x,
lambda x: (x, x),
lambda x: [x] * 10,
lambda x: (2.0 * x, x),
lambda x: 0.0 * x,
lambda x, y: (0.0 * x, 0.0 * y),
lambda x: x if x > 0 else 0.0, # RELU for x>0 is okay numerically
lambda x: x if x > 0 else 0.0, # RELU for x<0 is okay numerically
lambda x: 1.0 if abs(x) < 1e-5 else 0.0, # delta for x=0 is okay numerically
lambda x: x if abs(x) < 1e-5 else 0.0, # x*delta for x=0 is okay
lambda x: 1.0 if x > 0 else 0.0, # Heaviside is okay numerically
lambda x: 1.0 if x > 0 else 0.0, # Heaviside is okay numerically
lambda x: qml.math.log(1 + qml.math.exp(100.0 * x)) / 100.0, # Softplus is okay
lambda x: qml.math.log(1 + qml.math.exp(100.0 * x)) / 100.0, # Softplus is okay
]
args_dependent_lambdas = [
(np.array(1.2),),
(2.19,),
(2.19,),
(1.0,),
(np.ones((2, 3)),),
(np.array([2.0, 5.0]), 1.2),
(1.6,),
(-2.0,),
(0.0,),
(0.0,),
(-2.0,),
(2.0,),
(-0.2,),
(0.9,),
]
lambdas_expect_torch_fail = [
False,
False,
False,
False,
True,
True,
False,
False,
False,
True,
False,
False,
False,
False,
]
overlooked_lambdas = [
lambda x: 1.0 if abs(x) < 1e-5 else 0.0, # delta for x!=0 is not okay
lambda x: 1.0 if abs(x) < 1e-5 else 0.0, # delta for x!=0 is not okay
]
args_overlooked_lambdas = [
(2.0,),
(-2.0,),
]
class TestIsIndependentAutograd:
"""Tests for is_independent, which tests a function to be
independent of its inputs, using Autograd."""
interface = "autograd"
@pytest.mark.parametrize("num", [0, 1, 2])
@pytest.mark.parametrize(
"args",
[
(0.2,),
(1.1, 3.2, 0.2),
(np.array([[0, 9.2], [-1.2, 3.2]]),),
(0.3, [1, 4, 2], np.array([0.3, 9.1])),
],
)
@pytest.mark.parametrize("bounds", [(-1, 1), (0.1, 1.0211)])
def test_get_random_args(self, args, num, bounds):
"""Tests the utility ``_get_random_args`` using a fixed seed."""
seed = 921
rnd_args = _get_random_args(args, self.interface, num, seed, bounds)
assert len(rnd_args) == num
np.random.seed(seed)
for _rnd_args in rnd_args:
expected = tuple(
np.random.random(np.shape(arg)) * (bounds[1] - bounds[0]) + bounds[0]
for arg in args
)
assert all(np.allclose(_exp, _rnd) for _exp, _rnd in zip(expected, _rnd_args))
dev = qml.device("default.qubit", wires=1)
@qml.qnode(dev, interface=interface)
def const_circuit(x, y):
qml.RX(0.1, wires=0)
return qml.expval(qml.PauliZ(0))
constant_functions = [
const_circuit,
lambda x: np.arange(20).reshape((2, 5, 2)),
lambda x: (np.ones(3), -0.1),
qml.jacobian(lambda x, y: 4 * x - 2.1 * y, argnum=[0, 1]),
]
args_constant = [
(0.1, np.array([-2.1, 0.1])),
(1.2,),
(np.ones((2, 3)),),
(np.ones((3, 8)) * 0.1, -0.2 * | np.ones((3, 8)) | numpy.ones |
# Copyright © 2019. <NAME>. All rights reserved.
import numpy as np
import pandas as pd
from collections import OrderedDict
import math
import warnings
from sklearn.discriminant_analysis import LinearDiscriminantAnalysis as LDA
from sklearn.neighbors import NearestNeighbors
from sklearn.metrics import silhouette_score
from scipy.spatial.distance import cdist
from scipy.stats import chi2
from scipy.ndimage.filters import gaussian_filter1d
from .utils import Epoch
from .utils import printProgressBar, get_spike_positions
def calculate_metrics(spike_times, spike_clusters, amplitudes, pc_features, pc_feature_ind, params,
duration, channel_locations=None, cluster_ids=None, epochs=None, seed=None, verbose=True):
""" Calculate metrics for all units on one probe
Inputs:
------
spike_times : numpy.ndarray (num_spikes x 0)
Spike times in seconds (same timebase as epochs)
spike_clusters : numpy.ndarray (num_spikes x 0)
Cluster IDs for each spike time
pc_features : numpy.ndarray (num_spikes x num_pcs x num_channels)
Pre-computed PCs for blocks of channels around each spike
pc_feature_ind : numpy.ndarray (num_units x num_channels)
Channel indices of PCs for each unit
epochs : list of Epoch objects
contains information on Epoch start and stop times
duration : length of recording (seconds)
channel_locations : numpy.ndarray (num_channels x 2)
Channel locations (if None, a linear geometry is assumed)
params : dict of parameters
'isi_threshold' : minimum time for isi violations
'min_isi'
'num_channels_to_compare'
'max_spikes_for_unit'
'max_spikes_for_nn'
'n_neighbors'
'drift_metrics_interval_s'
'drift_metrics_min_spikes_per_interval'
Outputs:
--------
metrics : pandas.DataFrame
one column for each metric
one row per unit per epoch
"""
metrics = pd.DataFrame()
if epochs is None:
epochs = [Epoch('complete_session', 0, np.inf)]
total_units = np.max(spike_clusters) + 1
total_epochs = len(epochs)
for epoch in epochs:
in_epoch = np.logical_and(spike_times >= epoch.start_time, spike_times < epoch.end_time)
spikes_in_epoch = np.sum(in_epoch)
spikes_for_nn = min(spikes_in_epoch, params['max_spikes_for_nn'])
spikes_for_silhouette = min(spikes_in_epoch, params['n_silhouette'])
print("Calculating isi violations")
isi_viol = calculate_isi_violations(spike_times=spike_times[in_epoch],
spike_clusters=spike_clusters[in_epoch],
total_units=total_units,
isi_threshold=params['isi_threshold'],
min_isi=params['min_isi'],
duration=duration,
verbose=verbose)
print("Calculating presence ratio")
presence_ratio = calculate_presence_ratio(spike_times=spike_times[in_epoch],
spike_clusters=spike_clusters[in_epoch],
total_units=total_units,
duration=duration, verbose=verbose)
print("Calculating firing rate")
firing_rate = calculate_firing_rates(spike_times=spike_times[in_epoch],
spike_clusters=spike_clusters[in_epoch],
total_units=total_units, duration=duration, verbose=verbose)
print("Calculating amplitude cutoff")
amplitude_cutoff = calculate_amplitude_cutoff(spike_clusters=spike_clusters[in_epoch],
amplitudes=amplitudes[in_epoch],
total_units=total_units,
verbose=verbose)
print("Calculating PC-based metrics")
isolation_distance, l_ratio, d_prime, nn_hit_rate, nn_miss_rate = \
calculate_pc_metrics(spike_clusters=spike_clusters[in_epoch],
total_units=total_units,
pc_features=pc_features[in_epoch, :, :],
pc_feature_ind=pc_feature_ind,
num_channels_to_compare=params['num_channels_to_compare'],
max_spikes_for_cluster=params['max_spikes_for_unit'],
spikes_for_nn=spikes_for_nn,
n_neighbors=params['n_neighbors'],
channel_locations=
channel_locations,
seed=seed,
verbose=verbose)
print("Calculating silhouette score")
silhouette_score = calculate_silhouette_score(spike_clusters=spike_clusters[in_epoch],
total_units=total_units,
pc_features=pc_features[in_epoch, :, :],
pc_feature_ind=pc_feature_ind,
spikes_for_silhouette=spikes_for_silhouette,
seed=seed, verbose=verbose)
print("Calculating drift metrics")
max_drift, cumulative_drift = calculate_drift_metrics(spike_times=spike_times[in_epoch],
spike_clusters=spike_clusters[in_epoch],
total_units=total_units,
pc_features=pc_features[in_epoch, :, :],
pc_feature_ind=pc_feature_ind,
interval_length=params['drift_metrics_interval_s'],
min_spikes_per_interval=
params['drift_metrics_min_spikes_per_interval'],
channel_locations=
channel_locations,
verbose=verbose)
if cluster_ids is None:
cluster_ids_out = np.arange(total_units)
else:
cluster_ids_out = cluster_ids
epoch_name = [epoch.name] * len(cluster_ids_out)
metrics = pd.concat((metrics, pd.DataFrame(data=OrderedDict((('cluster_id', cluster_ids_out),
('firing_rate', firing_rate),
('presence_ratio', presence_ratio),
('isi_violation', isi_viol),
('amplitude_cutoff', amplitude_cutoff),
('isolation_distance', isolation_distance),
('l_ratio', l_ratio),
('d_prime', d_prime),
('nn_hit_rate', nn_hit_rate),
('nn_miss_rate', nn_miss_rate),
('silhouette_score', silhouette_score),
('max_drift', max_drift),
('cumulative_drift', cumulative_drift),
('epoch_name', epoch_name),
)))))
return metrics
# ===============================================================
# HELPER FUNCTIONS TO LOOP THROUGH CLUSTERS:
# ===============================================================
def calculate_isi_violations(spike_times, spike_clusters, total_units, isi_threshold, min_isi, duration,
spike_cluster_subset=None, verbose=True):
if spike_cluster_subset is not None:
cluster_ids = spike_cluster_subset
else:
cluster_ids = np.unique(spike_clusters)
viol_rates = np.zeros((total_units,))
for idx, cluster_id in enumerate(cluster_ids):
if verbose:
printProgressBar(cluster_id + 1, total_units)
for_this_cluster = (spike_clusters == cluster_id)
viol_rates[cluster_id], num_violations = isi_violations(spike_times[for_this_cluster],
duration=duration,
isi_threshold=isi_threshold,
min_isi=min_isi)
return viol_rates
def calculate_presence_ratio(spike_times, spike_clusters, total_units, duration, spike_cluster_subset=None,
verbose=True):
if spike_cluster_subset is not None:
cluster_ids = spike_cluster_subset
else:
cluster_ids = np.unique(spike_clusters)
ratios = np.zeros((total_units,))
for idx, cluster_id in enumerate(cluster_ids):
if verbose:
printProgressBar(cluster_id + 1, total_units)
for_this_cluster = (spike_clusters == cluster_id)
ratios[cluster_id] = presence_ratio(spike_times[for_this_cluster],
duration=duration)
return ratios
def calculate_num_spikes(spike_times, spike_clusters, total_units, spike_cluster_subset=None, verbose=True):
num_spikes = np.zeros((total_units,))
if spike_cluster_subset is not None:
cluster_ids = spike_cluster_subset
else:
cluster_ids = np.unique(spike_clusters)
for idx, cluster_id in enumerate(cluster_ids):
if verbose:
printProgressBar(cluster_id + 1, total_units)
for_this_cluster = (spike_clusters == cluster_id)
num_spikes[cluster_id] = len(spike_times[for_this_cluster])
return num_spikes
def calculate_firing_rates(spike_times, spike_clusters, total_units, duration, spike_cluster_subset=None, verbose=True):
if spike_cluster_subset is not None:
cluster_ids = spike_cluster_subset
else:
cluster_ids = np.unique(spike_clusters)
firing_rates = np.zeros((total_units,))
for idx, cluster_id in enumerate(cluster_ids):
if verbose:
printProgressBar(cluster_id + 1, total_units)
for_this_cluster = (spike_clusters == cluster_id)
firing_rates[cluster_id] = firing_rate(spike_times[for_this_cluster],
duration=duration)
return firing_rates
def calculate_amplitude_cutoff(spike_clusters, amplitudes, total_units, spike_cluster_subset=None, verbose=True):
if spike_cluster_subset is not None:
cluster_ids = spike_cluster_subset
else:
cluster_ids = np.unique(spike_clusters)
amplitude_cutoffs = np.zeros((total_units,))
for idx, cluster_id in enumerate(cluster_ids):
if verbose:
printProgressBar(cluster_id + 1, total_units)
for_this_cluster = (spike_clusters == cluster_id)
amplitude_cutoffs[cluster_id] = amplitude_cutoff(amplitudes[for_this_cluster])
return amplitude_cutoffs
def calculate_pc_metrics(spike_clusters, total_units, pc_features, pc_feature_ind,
num_channels_to_compare, max_spikes_for_cluster, spikes_for_nn,
n_neighbors, channel_locations, min_num_pcs=10, metric_names=None,
seed=None, spike_cluster_subset=None, verbose=True):
"""
Computes metrics from projection of waveforms to principal components
including: isolation distance, l ratio, d prime, nn hit rate, nn miss rate
Parameters
----------
spike_clusters: numpy.ndarray (num_spikes,)
Unit ID for each spike time
total_units: int
Total number of units
pc_features: numpy.ndarray (num_spikes, num_pcs, num_channels)
Pre-computed PCs for blocks of channels around each spike
pc_feature_ind: numpy.ndarray (num_units, num_channels)
Channel indices of PCs for each unit
num_channels_to_compare: int
Number of channels around the max channel over which to compute the
metrics (e.g. only units from these channels will be considered for the
nearest neighbor metrics)
max_spikes_for_cluster: int
Total number of spikes to use for computing the metrics
spikes_for_nn: int
Number of spikes in a unit to use for computing nearest neighbor metrics
(nn_hit_rate, nn_miss_rate)
n_neighbors: int
Number of nearest neighbor spikes to compare membership
channel_locations: array, (channels, 2)
(x,y) location of channels; used to identify neighboring channels
min_num_pcs: int, default=10
Minimum number of spikes a unit must have to compute these metrics
metric_names: list of str, default=None
List of metrics to compute
seed: int, default=None
Random seed for subsampling spikes from the unit
spike_cluster_subset: numpy.array (units,), default=None
If specified compute metrics for only these units
verbose: bool, default=True
Prints out progress bar if True
Returns (all 1d numpy.arrays)
-------
isolation_distances
l_ratios
d_primes
nn_hit_rates
nn_miss_rates
"""
if metric_names is None:
metric_names = ['isolation_distance', 'l_ratio', 'd_prime', 'nearest_neighbor']
if num_channels_to_compare > channel_locations.shape[0]:
num_channels_to_compare = channel_locations.shape[0]
all_cluster_ids = np.unique(spike_clusters)
if spike_cluster_subset is not None:
cluster_ids = spike_cluster_subset
else:
cluster_ids = all_cluster_ids
peak_channels = np.zeros((total_units,), dtype='uint16')
neighboring_channels = np.zeros((total_units, num_channels_to_compare))
isolation_distances = np.zeros((total_units,))
l_ratios = np.zeros((total_units,))
d_primes = np.zeros((total_units,))
nn_hit_rates = np.zeros((total_units,))
nn_miss_rates = np.zeros((total_units,))
for idx, cluster_id in enumerate(all_cluster_ids):
for_unit = np.squeeze(spike_clusters == cluster_id)
pc_max = np.argmax(np.mean(pc_features[for_unit, 0, :], 0))
peak_channels[idx] = pc_feature_ind[idx, pc_max]
# find neighboring channels
neighboring_channels[idx] = find_neighboring_channels(pc_feature_ind[idx, pc_max],
pc_feature_ind[idx, :],
num_channels_to_compare,
channel_locations)
for idx, cluster_id in enumerate(cluster_ids):
if verbose:
printProgressBar(idx + 1, total_units)
peak_channel = peak_channels[idx]
# units_for_channel: index (not ID) of units defined at the target unit's peak channel
units_for_channel, channel_index = np.unravel_index(np.where(pc_feature_ind.flatten() == peak_channel)[0],
pc_feature_ind.shape)
# units_in_range: list of bool, True for units whose peak channels are in the neighborhood of target unit
units_in_range = [channel in neighboring_channels[idx] for channel in peak_channels[units_for_channel]]
channels_to_use = neighboring_channels[idx]
# only get index of units who are in the neighborhood of target unit
units_for_channel = units_for_channel[units_in_range]
spike_counts = np.zeros(units_for_channel.shape)
for idx2, cluster_id2 in enumerate(units_for_channel):
spike_counts[idx2] = np.sum(spike_clusters == all_cluster_ids[cluster_id2])
# index of target unit within the subset of units in its neighborhood (including itself)
this_unit_idx = np.where(units_for_channel == idx)[0]
if spike_counts[this_unit_idx] > max_spikes_for_cluster:
relative_counts = spike_counts / spike_counts[this_unit_idx] * max_spikes_for_cluster
else:
relative_counts = spike_counts
all_pcs = np.zeros((0, pc_features.shape[1], channels_to_use.size))
all_labels = np.zeros((0,))
for idx2, cluster_id2 in enumerate(units_for_channel):
try:
channel_mask = make_channel_mask(cluster_id2, pc_feature_ind, channels_to_use)
except IndexError:
# Occurs when pc_feature_ind does not contain all channels of interest
# In that case, we will exclude this unit for the calculation
print('Unit outside the range set by channel_to_use, skipping...')
pass
else:
subsample = int(relative_counts[idx2])
index_mask = make_index_mask(spike_clusters, all_cluster_ids[cluster_id2], min_num=0, max_num=subsample,
seed=seed)
pcs = get_unit_pcs(pc_features, index_mask, channel_mask)
labels = np.ones((pcs.shape[0],)) * all_cluster_ids[cluster_id2]
all_pcs = np.concatenate((all_pcs, pcs), 0)
all_labels = np.concatenate((all_labels, labels), 0)
all_pcs = np.reshape(all_pcs, (all_pcs.shape[0], pc_features.shape[1] * channels_to_use.size))
if all_pcs.shape[0] > min_num_pcs:
if 'isolation_distance' in metric_names or 'l_ratio' in metric_names:
isolation_distances[idx], l_ratios[idx] = mahalanobis_metrics(all_pcs, all_labels,
cluster_id)
else:
isolation_distances[idx] = np.nan
l_ratios[idx] = np.nan
if 'd_prime' in metric_names:
d_primes[idx] = lda_metrics(all_pcs, all_labels, cluster_id)
else:
d_primes[idx] = np.nan
if 'nearest_neighbor' in metric_names:
nn_hit_rates[idx], nn_miss_rates[idx] = nearest_neighbors_metrics(all_pcs, all_labels,
cluster_id,
spikes_for_nn,
n_neighbors)
else:
nn_hit_rates[idx] = np.nan
nn_miss_rates[idx] = np.nan
else:
print(f'Unit {str(cluster_id)} only has ' + str(
all_pcs.shape[0]) + ' spikes, which is not enough to compute metric; assigning nan...')
isolation_distances[idx] = np.nan
l_ratios[idx] = np.nan
d_primes[idx] = np.nan
nn_hit_rates[idx] = np.nan
nn_miss_rates[idx] = np.nan
return isolation_distances, l_ratios, d_primes, nn_hit_rates, nn_miss_rates
def calculate_silhouette_score(spike_clusters,
total_units,
pc_features,
pc_feature_ind,
spikes_for_silhouette,
seed=None,
spike_cluster_subset=None,
verbose=True):
random_spike_inds = np.random.RandomState(seed=seed).permutation(spike_clusters.size)
random_spike_inds = random_spike_inds[:spikes_for_silhouette]
num_pc_features = pc_features.shape[1]
num_channels = np.max(pc_feature_ind) + 1
all_pcs = np.zeros((spikes_for_silhouette, num_channels * num_pc_features))
for idx, i in enumerate(random_spike_inds):
unit_id = spike_clusters[i]
channels = pc_feature_ind[unit_id, :]
for j in range(0, num_pc_features):
all_pcs[idx, channels + num_channels * j] = pc_features[i, j, :]
cluster_labels = spike_clusters[random_spike_inds]
all_cluster_ids = np.unique(spike_clusters)
if spike_cluster_subset is not None:
cluster_ids = spike_cluster_subset
else:
cluster_ids = all_cluster_ids
SS = np.empty((total_units, total_units))
SS[:] = np.nan
seen_unit_pairs = set()
for idx1, i in enumerate(cluster_ids):
if verbose:
printProgressBar(idx1 + 1, len(cluster_ids))
for idx2, j in enumerate(all_cluster_ids):
if (i, j) not in seen_unit_pairs and (j, i) not in seen_unit_pairs and i != j:
inds = np.in1d(cluster_labels, np.array([i, j]))
X = all_pcs[inds, :]
labels = cluster_labels[inds]
if len(labels) > 2:
SS[i, j] = silhouette_score(X, labels, random_state=seed)
seen_unit_pairs.add((i, j))
with warnings.catch_warnings():
warnings.simplefilter("ignore")
a = np.nanmin(SS, 0)
b = np.nanmin(SS, 1)
return np.array([np.nanmin([a, b]) for a, b in zip(a, b)])
def calculate_drift_metrics(spike_times,
spike_clusters,
total_units,
pc_features,
pc_feature_ind,
interval_length,
min_spikes_per_interval,
vertical_channel_spacing=10,
channel_locations=None,
spike_cluster_subset=None,
verbose=True):
max_drift = np.zeros((total_units,))
cumulative_drift = np.zeros((total_units,))
positions = get_spike_positions(spike_clusters, pc_features, pc_feature_ind, channel_locations,
vertical_channel_spacing)
interval_starts = np.arange(np.min(spike_times), np.max(spike_times), interval_length)
interval_ends = interval_starts + interval_length
if spike_cluster_subset is not None:
cluster_ids = spike_cluster_subset
else:
cluster_ids = np.unique(spike_clusters)
for idx, cluster_id in enumerate(cluster_ids):
if verbose:
printProgressBar(cluster_id + 1, len(cluster_ids))
in_cluster = spike_clusters == cluster_id
times_for_cluster = spike_times[in_cluster]
positions_for_cluster = positions[in_cluster]
median_positions = []
for t1, t2 in zip(interval_starts, interval_ends):
in_range = (times_for_cluster > t1) * (times_for_cluster < t2)
if np.sum(in_range) >= min_spikes_per_interval:
median_positions.append(np.median(positions_for_cluster[in_range], 0))
else:
median_positions.append([np.nan, np.nan])
median_positions = np.array(median_positions)
# Extract emi-matrix of shifts in positions (used to extract max_drift and cum_drift)
position_diffs = np.zeros((len(median_positions), len(median_positions)))
for i, pos_i in enumerate(median_positions):
for j, pos_j in enumerate(median_positions):
if j > i:
if not np.isnan(pos_i[0]) and not | np.isnan(pos_j[0]) | numpy.isnan |
# -*- coding: utf-8 -*-
"""
Created on Tue Nov 15 12:05:40 2016
@author: sjjoo
"""
#%%
#import sys
import mne
#import imageio
from mne.utils import run_subprocess, logger
import os
from os import path as op
import copy
#import shutil
import numpy as np
from numpy.random import randn
from scipy import stats as stats
#import scipy.io as sio
import time
from functools import partial
from mne.stats import (spatio_temporal_cluster_1samp_test,
summarize_clusters_stc)
from mne import set_config
import matplotlib.pyplot as plt
import matplotlib.font_manager as font_manager
from pandas import DataFrame
from sklearn import linear_model
import statsmodels.api as sm
#import csv
os.chdir(os.path.join("D:\\", "git","BrainTools","projects","NLR_MEG"))
from plotit3 import plotit3
from plotsig3 import plotsig3
from plotit2 import plotit2
from plotsig2 import plotsig2
from plotcorr3 import plotcorr3
set_config('MNE_MEMMAP_MIN_SIZE', '1M')
set_config('MNE_CACHE_DIR', '.tmp')
mne.set_config('MNE_USE_CUDA', 'true')
this_env = copy.copy(os.environ)
fs_dir = 'D://subjects'
this_env['SUBJECTS_DIR'] = fs_dir
raw_dir = os.path.join("D:\\","NLR_MEG")
os.chdir(raw_dir)
import seaborn as sns
sns.set(style="darkgrid")
#%%
subs = ['NLR_102_RS','NLR_103_AC','NLR_105_BB','NLR_110_HH','NLR_127_AM',
'NLR_130_RW','NLR_132_WP','NLR_133_ML','NLR_145_AC','NLR_150_MG',
'NLR_151_RD','NLR_152_TC','NLR_160_EK','NLR_161_AK','NLR_163_LF',
'NLR_164_SF','NLR_170_GM','NLR_172_TH','NLR_174_HS','NLR_179_GM',
'NLR_180_ZD','NLR_187_NB','NLR_201_GS','NLR_203_AM',
'NLR_204_AM','NLR_205_AC','NLR_206_LM','NLR_207_AH','NLR_211_LB',
'NLR_GB310','NLR_KB218','NLR_JB423','NLR_GB267','NLR_JB420',
'NLR_HB275','NLR_197_BK','NLR_GB355','NLR_GB387','NLR_HB205',
'NLR_IB217','NLR_IB319','NLR_JB227','NLR_JB486','NLR_KB396',
'NLR_IB357']
session1 = ['102_rs160618','103_ac150609','105_bb150713','110_hh160608','127_am151022',
'130_rw151221','132_wp160919','133_ml151124','145_ac160621','150_mg160606',
'151_rd160620','152_tc160422','160_ek160627','161_ak160627','163_lf160707',
'164_sf160707','170_gm160613','172_th160614','174_hs160620','179_gm160701',
'180_zd160621','187_nb161017','201_gs150818','203_am150831',
'204_am150829','205_ac151123','206_lm151119','207_ah160608','211_lb160617',
'nlr_gb310170614','nlr_kb218170619','nlr_jb423170620','nlr_gb267170620','nlr_jb420170621',
'nlr_hb275170622','197_bk170622','nlr_gb355170606','nlr_gb387170608','nlr_hb205170825',
'nlr_ib217170831','nlr_ib319170825','nlr_jb227170811','nlr_jb486170803','nlr_kb396170808',
'nlr_ib357170912']
subs2 = ['NLR_102_RS','NLR_110_HH','NLR_145_AC','NLR_150_MG',
'NLR_152_TC','NLR_160_EK','NLR_161_AK','NLR_162_EF','NLR_163_LF', # 162, 201 only had the second session
'NLR_164_SF','NLR_170_GM','NLR_172_TH','NLR_174_HS','NLR_179_GM', # 'NLR_170_GM': no EOG channel
'NLR_180_ZD','NLR_201_GS',
'NLR_204_AM','NLR_205_AC','NLR_207_AH','NLR_210_SB','NLR_211_LB',
'NLR_GB310','NLR_KB218','NLR_GB267','NLR_JB420', 'NLR_HB275','NLR_GB355']
session2 = ['102_rs160815','110_hh160809','145_ac160823','150_mg160825',
'152_tc160623','160_ek160915','161_ak160916','162_ef160829','163_lf160920',
'164_sf160920','170_gm160822','172_th160825','174_hs160829','179_gm160913',
'180_zd160826','201_gs150925',
'204_am151120','205_ac160202','207_ah160809','210_sb160822','211_lb160823',
'nlr_gb310170829','nlr_kb218170829','nlr_gb267170911','nlr_jb420170828','nlr_hb275170828','nlr_gb355170907']
subIndex1 = np.nonzero(np.in1d(subs,subs2))[0]
subIndex2 = np.empty([1,len(subIndex1)],dtype=int)[0]
for i in range(0,len(subIndex1)):
subIndex2[i] = np.nonzero(np.in1d(subs2,subs[subIndex1[i]]))[0]
twre_index = [87,93,108,66,116,85,110,71,84,92,87,86,63,81,60,55,71,63,68,67,64,127,79,
73,59,84,79,91,57,67,77,57,80,53,72,58,85,79,116,117,107,78,66,101,67]
twre_index = np.array(twre_index)
brs = [87,102,108,78,122,91,121,77,91,93,93,88,75,90,66,59,81,84,81,72,71,121,
81,75,66,90,93,101,56,78,83,69,88,60,88,73,82,81,115,127,124,88,68,110,96]
brs = np.array(brs)
twre_index1 = twre_index[subIndex1]
twre_index2_all = [90,76,94,115,
85,75,82,64,75,
63,83,77,84,75,
68,79,
62,90,105,75,71,
69,83,76,62,73,94]
twre_index2_all = np.array(twre_index2_all)
twre_index2 = twre_index2_all[subIndex2]
brs1 = brs[subIndex1]
brs2_all = [98,88,102,110,99,91,88,79,105,86,81,88,89,77,83,81,86,98,116,104,86,90,91,97,57,99,102]
brs2_all = np.array(brs2_all)
brs2 = brs2_all[subIndex2]
twre_diff = np.subtract(twre_index2,twre_index1)
brs_diff = np.subtract(brs2,brs1)
swe_raw = [62, 76, 74, 42, 75, 67, 76, 21, 54, 35, 21, 61, 45, 48, 17, 11, 70, 19, 10, 57,
12, 86, 53, 51, 13, 28, 54, 25, 27, 10, 66, 18, 18, 20, 37, 23, 17, 36, 79, 82,
74, 64, 42, 78, 35]
swe_raw = np.array(swe_raw)
lwid = [49,60,60,51,62,54,65,23,44,35,31,52,44,39,27,30,57,33,24,48,19,66,45,
43,22,33,51,36,35,25,55,34,26,26,39,27,24,29,61,71,65,56,36,62,51]
lwid = np.array(lwid)
rf = [88,103,95,67,120,85,108,71,91,87,88,76,76,93,60,40,86,61,66,81,59,130,93,85,49,76,90,96,42,64,74,49,84,56,
76,61,80,89,111,120,132,88,65,102,72]
rf = np.array(rf)
age = [125.6885, 132.9501, 122.0434, 138.4349, 97.6347, 138.1420, 108.2457, 98.0631, 105.8147, 89.9132,
87.6465, 131.8660, 123.7174, 95.959, 112.416, 133.8042, 152.4639, 103.4823, 89.8475, 138.4020,
93.8568, 117.0814, 123.6202, 122.9304, 109.1656, 90.6058,
111.9593,86.0381,147.2063,95.8699,148.0802,122.5896,88.7162,123.0495,110.6645,105.3069,88.9143,95.2879,106.2852,
122.2915,114.4389,136.1496,128.6246,137.9216,122.7528]
age = np.divide(age, 12)
wasi_vocab = [51,62,52,39,80,59,56,np.nan,52,47,64,44,49,48,55,53,44,44,53,45,62,
76,45,55,48,56,41,43,40,52,54,50,62,67,59,48,60,60,62,79,74,44,49,50,60]
wasi_mr = [47,64,44,58,60,51,56,np.nan,56,43,37,37,51,55,36,33,52,48,49,41,51,
56,56,53,42,41,46,51,34,51,50,51,55,53,44,44,47,59,66,74,65,53,54,47,60]
n_subjects = len(subs)
c_table = ( (0.6510, 0.8078, 0.8902), # Blue, Green, Red, Orange, Purple, yellow
(0.1216, 0.4706, 0.7059),
(0.6980, 0.8745, 0.5412),
(0.2000, 0.6275, 0.1725),
(0.9843, 0.6039, 0.6000),
(0.8902, 0.1020, 0.1098),
(0.9922, 0.7490, 0.4353),
(1.0000, 0.4980, 0),
(0.7922, 0.6980, 0.8392),
(0.4157, 0.2392, 0.6039),
(1.0000, 1.0000, 0.6000),
(0.6941, 0.3490, 0.1569))
fname_data = op.join(raw_dir, 'session1_data_loose_depth8_normal.npy')
#%%
"""
Here we load the data for Session 1
"""
t0 = time.time()
os.chdir(raw_dir)
X13 = np.load(fname_data)
orig_times = np.load('session1_times.npy')
tstep = np.load('session1_tstep.npy')
n_epochs = np.load('session1_n_averages.npy')
tmin = -0.1
""" Downsample the data """
ss = 3 # was originally 2
sample = np.arange(0,len(orig_times),ss)
sRate = 600 / ss
times = orig_times[sample]
tstep = ss*tstep
X11 = X13[:,sample,:,:]
del X13
X11 = np.abs(X11)
print("\n\nElasped time: %0.2d mins %0.2d secs\n\n" % (divmod(time.time()-t0, 60)))
#%%
""" Grouping subjects """
reading_thresh = 80
m1 = np.logical_and(np.transpose(twre_index) > reading_thresh, np.transpose(age) <= 13)
m2 = np.logical_and(np.transpose(twre_index) <= reading_thresh, np.transpose(age) <= 13)
#m1 = np.logical_and(np.transpose(brs) >= reading_thresh, np.transpose(age) <= 13)
#m2 = np.logical_and(np.transpose(brs) < reading_thresh, np.transpose(age) <= 13)
#m1 = np.logical_and(np.transpose(swe_raw) >= np.median(swe_raw), np.transpose(age) <= 13)
#m2 = np.logical_and(np.transpose(swe_raw) < np.median(swe_raw), np.transpose(age) <= 13)
orig_twre = twre_index
orig_age = age
orig_swe = swe_raw
m3 = np.mean(n_epochs,axis=1) < 40
m1[np.where(m3)] = False
m2[np.where(m3)] = False
twre_index = twre_index[np.where(~m3)[0]]
age = age[np.where(~m3)[0]]
#swe_raw = swe_raw[np.where(~m3)[0]]
good_readers = np.where(m1)[0]
poor_readers = np.where(m2)[0]
a1 = np.transpose(age) > np.mean(age)
a2 = np.logical_not(a1)
a1[np.where(m3)] = False
a2[np.where(m3)] = False
old_readers = np.where(a1)[0]
young_readers = np.where(a2)[0]
#wasi_vocab_G = [wasi_vocab[i] for i in good_readers]
#wasi_vocab_P = [wasi_vocab[i] for i in poor_readers]
#wasi_mr_G = [wasi_mr[i] for i in good_readers]
#wasi_mr_P = [wasi_mr[i] for i in poor_readers]
#age_G = [orig_age[i] for i in good_readers]
#age_P = [orig_age[i] for i in poor_readers]
#twre_G = [orig_twre[i] for i in good_readers]
#twre_P = [orig_twre[i] for i in poor_readers]
#
#n,p = stats.ttest_ind(wasi_vocab_G,wasi_vocab_P,nan_policy='omit')
#n,p = stats.ttest_ind(wasi_mr_G,wasi_mr_P,nan_policy='omit')
#n,p = stats.ttest_ind(age_G,age_P,nan_policy='omit')
#n,p = stats.ttest_ind(twre_G,twre_P,nan_policy='omit')
all_subject = []
all_subject.extend(good_readers)
all_subject.extend(poor_readers)
all_subject.sort()
fs_vertices = [np.arange(10242)] * 2
n_epoch = np.empty((45,4))
n_epoch[:,0] = [np.int(n_epochs[i,0]) for i in range(0,45)]
n_epoch[:,1] = [np.int(n_epochs[i,3]) for i in range(0,45)]
n_epoch[:,2] = [np.int(n_epochs[i,5]) for i in range(0,45)]
n_epoch[:,3] = [np.int(n_epochs[i,8]) for i in range(0,45)]
removal = np.sum(60 - n_epoch, axis = 1)
a = [removal[i] for i in zip(good_readers)]
b = [removal[i] for i in zip(poor_readers)]
c = [removal[i] for i in zip(all_subject)]
d = [removal[i] for i in zip(young_readers)]
e = [removal[i] for i in zip(old_readers)]
stats.ttest_ind(a,b)
stats.ttest_ind(d,e)
stats.pearsonr(c,age)
stats.pearsonr(c,twre_index)
figureDir = '%s/figures' % raw_dir
plt.figure()
plt.clf()
ax = plt.subplot()
fit = np.polyfit(age, c, deg=1)
ax.plot(age, fit[0] * age + fit[1], color=[0,0,0])
ax.plot(age, c, 'o', markerfacecolor=[.5, .5, .5], markeredgecolor=[1,1,1], markersize=10)
plt.xlabel('Age')
plt.ylabel('# of rejected trials')
os.chdir(figureDir)
# plt.savefig('Corr_reject_age.png',dpi=600,papertype='letter',format='png')
# plt.savefig('Corr_reject_age.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
plt.figure()
plt.clf()
ax = plt.subplot()
fit = np.polyfit(twre_index, c, deg=1)
ax.plot(twre_index, fit[0] * twre_index + fit[1], color=[0,0,0])
ax.plot(twre_index, c, 'o', markerfacecolor=[.5, .5, .5], markeredgecolor=[1,1,1], markersize=10)
plt.xlabel('Reading skill')
plt.ylabel('# of rejected trials')
os.chdir(figureDir)
# plt.savefig('Corr_reject_reading.png',dpi=600,papertype='letter',format='png')
# plt.savefig('Corr_reject_reading.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
#%%
""" Read HCP labels """
labels = mne.read_labels_from_annot('fsaverage', parc='HCPMMP1', surf_name='white', subjects_dir=fs_dir) #regexp=aparc_label_name
#aparc_label_name = 'PHT_ROI'#'_IP'#'IFSp_ROI'#'STSvp_ROI'#'STSdp_ROI'#'PH_ROI'#'TE2p_ROI' #'SFL_ROI' #'IFSp_ROI' #'TE2p_ROI' #'inferiortemporal' #'pericalcarine'
anat_label = mne.read_labels_from_annot('fsaverage', parc='aparc.a2009s',surf_name='white',
subjects_dir=fs_dir) #, regexp=aparc_label_name)
#%%
#TE2p_mask_lh = mne.Label.get_vertices_used(TE2p_label[0])
#TE2p_mask_rh = mne.Label.get_vertices_used(TE2p_label[1])
PHT_label_lh = [label for label in labels if label.name == 'L_PHT_ROI-lh'][0]
PHT_label_rh = [label for label in labels if label.name == 'R_PHT_ROI-rh'][0]
TE1p_label_lh = [label for label in labels if label.name == 'L_TE1p_ROI-lh'][0]
TE1p_label_rh = [label for label in labels if label.name == 'R_TE1p_ROI-rh'][0]
TE2p_label_lh = [label for label in labels if label.name == 'L_TE2p_ROI-lh'][0]
TE2p_label_rh = [label for label in labels if label.name == 'R_TE2p_ROI-rh'][0]
TE2a_label_lh = [label for label in labels if label.name == 'L_TE2a_ROI-lh'][0]
TE2a_label_rh = [label for label in labels if label.name == 'R_TE2a_ROI-rh'][0]
TF_label_lh = [label for label in labels if label.name == 'L_TF_ROI-lh'][0]
TF_label_rh = [label for label in labels if label.name == 'R_TF_ROI-rh'][0]
PH_label_lh = [label for label in labels if label.name == 'L_PH_ROI-lh'][0]
PH_label_rh = [label for label in labels if label.name == 'R_PH_ROI-rh'][0]
FFC_label_lh = [label for label in labels if label.name == 'L_FFC_ROI-lh'][0]
FFC_label_rh = [label for label in labels if label.name == 'R_FFC_ROI-rh'][0]
a8C_label_lh = [label for label in labels if label.name == 'L_8C_ROI-lh'][0]
a8C_label_rh = [label for label in labels if label.name == 'R_8C_ROI-rh'][0]
p946v_label_lh = [label for label in labels if label.name == 'L_p9-46v_ROI-lh'][0]
p946v_label_rh = [label for label in labels if label.name == 'R_p9-46v_ROI-rh'][0]
IFSp_label_lh = [label for label in labels if label.name == 'L_IFSp_ROI-lh'][0]
IFSp_label_rh = [label for label in labels if label.name == 'R_IFSp_ROI-rh'][0]
IFSa_label_lh = [label for label in labels if label.name == 'L_IFSa_ROI-lh'][0]
IFSa_label_rh = [label for label in labels if label.name == 'R_IFSa_ROI-rh'][0]
IFJp_label_lh = [label for label in labels if label.name == 'L_IFJp_ROI-lh'][0]
IFJp_label_rh = [label for label in labels if label.name == 'R_IFJp_ROI-rh'][0]
IFJa_label_lh = [label for label in labels if label.name == 'L_IFJa_ROI-lh'][0]
IFJa_label_rh = [label for label in labels if label.name == 'R_IFJa_ROI-rh'][0]
a45_label_lh = [label for label in labels if label.name == 'L_45_ROI-lh'][0]
a45_label_rh = [label for label in labels if label.name == 'R_45_ROI-rh'][0]
a44_label_lh = [label for label in labels if label.name == 'L_44_ROI-lh'][0]
a44_label_rh = [label for label in labels if label.name == 'R_44_ROI-rh'][0]
a43_label_lh = [label for label in labels if label.name == 'L_43_ROI-lh'][0]
a43_label_rh = [label for label in labels if label.name == 'R_43_ROI-rh'][0]
a9_46v_lh = [label for label in labels if label.name == 'L_a9-46v_ROI-lh'][0]
a9_46v_rh = [label for label in labels if label.name == 'R_a9-46v_ROI-rh'][0]
PGi_label_lh = [label for label in labels if label.name == 'L_PGi_ROI-lh'][0]
PGi_label_rh = [label for label in labels if label.name == 'R_PGi_ROI-rh'][0]
PGs_label_lh = [label for label in labels if label.name == 'L_PGs_ROI-lh'][0]
PGs_label_rh = [label for label in labels if label.name == 'R_PGs_ROI-rh'][0]
STSvp_label_lh = [label for label in labels if label.name == 'L_STSvp_ROI-lh'][0]
STSvp_label_rh = [label for label in labels if label.name == 'R_STSvp_ROI-rh'][0]
STSdp_label_lh = [label for label in labels if label.name == 'L_STSdp_ROI-lh'][0]
STSdp_label_rh = [label for label in labels if label.name == 'R_STSdp_ROI-rh'][0]
STSva_label_lh = [label for label in labels if label.name == 'L_STSva_ROI-lh'][0]
STSva_label_rh = [label for label in labels if label.name == 'R_STSva_ROI-rh'][0]
STSda_label_lh = [label for label in labels if label.name == 'L_STSda_ROI-lh'][0]
STSda_label_rh = [label for label in labels if label.name == 'R_STSda_ROI-rh'][0]
TPOJ1_label_lh = [label for label in labels if label.name == 'L_TPOJ1_ROI-lh'][0]
TPOJ1_label_rh = [label for label in labels if label.name == 'R_TPOJ1_ROI-rh'][0]
TPOJ2_label_lh = [label for label in labels if label.name == 'L_TPOJ2_ROI-lh'][0]
TPOJ2_label_rh = [label for label in labels if label.name == 'R_TPOJ2_ROI-rh'][0]
V1_label_lh = [label for label in labels if label.name == 'L_V1_ROI-lh'][0]
V1_label_rh = [label for label in labels if label.name == 'R_V1_ROI-rh'][0]
V4_label_lh = [label for label in labels if label.name == 'L_V4_ROI-lh'][0]
V4_label_rh = [label for label in labels if label.name == 'R_V4_ROI-rh'][0]
LIPd_label_lh = [label for label in labels if label.name == 'L_LIPd_ROI-lh'][0]
LIPd_label_rh = [label for label in labels if label.name == 'R_LIPd_ROI-rh'][0]
LIPv_label_lh = [label for label in labels if label.name == 'L_LIPv_ROI-lh'][0]
LIPv_label_rh = [label for label in labels if label.name == 'R_LIPv_ROI-rh'][0]
IPS1_label_lh = [label for label in labels if label.name == 'L_IPS1_ROI-lh'][0]
IPS1_label_rh = [label for label in labels if label.name == 'R_IPS1_ROI-rh'][0]
_7Am_label_lh = [label for label in labels if label.name == 'L_7Am_ROI-lh'][0]
_7Am_label_rh = [label for label in labels if label.name == 'R_7Am_ROI-rh'][0]
VIP_label_lh = [label for label in labels if label.name == 'L_VIP_ROI-lh'][0]
VIP_label_rh = [label for label in labels if label.name == 'R_VIP_ROI-rh'][0]
_7AL_label_lh = [label for label in labels if label.name == 'L_7AL_ROI-lh'][0]
_7AL_label_rh = [label for label in labels if label.name == 'R_7AL_ROI-rh'][0]
PBelt_label_lh = [label for label in labels if label.name == 'L_PBelt_ROI-lh'][0]
PBelt_label_rh = [label for label in labels if label.name == 'R_PBelt_ROI-rh'][0]
PSL_label_lh = [label for label in labels if label.name == 'L_PSL_ROI-lh'][0]
PSL_label_rh = [label for label in labels if label.name == 'R_PSL_ROI-rh'][0]
LBelt_label_lh = [label for label in labels if label.name == 'L_LBelt_ROI-lh'][0]
LBelt_label_rh = [label for label in labels if label.name == 'R_LBelt_ROI-rh'][0]
A1_label_lh = [label for label in labels if label.name == 'L_A1_ROI-lh'][0]
A1_label_rh = [label for label in labels if label.name == 'R_A1_ROI-rh'][0]
MBelt_label_lh = [label for label in labels if label.name == 'L_MBelt_ROI-lh'][0]
MBelt_label_rh = [label for label in labels if label.name == 'R_MBelt_ROI-rh'][0]
RI_label_lh = [label for label in labels if label.name == 'L_RI_ROI-lh'][0]
RI_label_rh = [label for label in labels if label.name == 'R_RI_ROI-rh'][0]
A4_label_lh = [label for label in labels if label.name == 'L_A4_ROI-lh'][0]
A4_label_rh = [label for label in labels if label.name == 'R_A4_ROI-rh'][0]
PFcm_label_lh = [label for label in labels if label.name == 'L_PFcm_ROI-lh'][0]
PFcm_label_rh = [label for label in labels if label.name == 'R_PFcm_ROI-rh'][0]
PFm_label_lh = [label for label in labels if label.name == 'L_PFm_ROI-lh'][0]
PFm_label_rh = [label for label in labels if label.name == 'R_PFm_ROI-rh'][0]
_4_label_lh = [label for label in labels if label.name == 'L_4_ROI-lh'][0]
_4_label_rh = [label for label in labels if label.name == 'R_4_ROI-rh'][0]
_1_label_lh = [label for label in labels if label.name == 'L_1_ROI-lh'][0]
_1_label_rh = [label for label in labels if label.name == 'R_1_ROI-rh'][0]
_2_label_lh = [label for label in labels if label.name == 'L_2_ROI-lh'][0]
_2_label_rh = [label for label in labels if label.name == 'R_2_ROI-rh'][0]
_3a_label_lh = [label for label in labels if label.name == 'L_3a_ROI-lh'][0]
_3a_label_rh = [label for label in labels if label.name == 'R_3a_ROI-rh'][0]
_3b_label_lh = [label for label in labels if label.name == 'L_3b_ROI-lh'][0]
_3b_label_rh = [label for label in labels if label.name == 'R_3b_ROI-rh'][0]
_43_label_lh = [label for label in labels if label.name == 'L_43_ROI-lh'][0]
_43_label_rh = [label for label in labels if label.name == 'R_43_ROI-rh'][0]
_6r_label_lh = [label for label in labels if label.name == 'L_6r_ROI-lh'][0]
_6r_label_rh = [label for label in labels if label.name == 'R_6r_ROI-rh'][0]
OP1_label_lh = [label for label in labels if label.name == 'L_OP1_ROI-lh'][0]
OP1_label_rh = [label for label in labels if label.name == 'R_OP1_ROI-rh'][0]
OP23_label_lh = [label for label in labels if label.name == 'L_OP2-3_ROI-lh'][0]
OP23_label_rh = [label for label in labels if label.name == 'R_OP2-3_ROI-rh'][0]
OP4_label_lh = [label for label in labels if label.name == 'L_OP4_ROI-lh'][0]
OP4_label_rh = [label for label in labels if label.name == 'R_OP4_ROI-rh'][0]
PFop_label_lh = [label for label in labels if label.name == 'L_PFop_ROI-lh'][0]
PFop_label_rh = [label for label in labels if label.name == 'R_PFop_ROI-rh'][0]
A5_label_lh = [label for label in labels if label.name == 'L_A5_ROI-lh'][0]
A5_label_rh = [label for label in labels if label.name == 'R_A5_ROI-rh'][0]
STV_label_lh = [label for label in labels if label.name == 'L_STV_ROI-lh'][0]
STV_label_rh = [label for label in labels if label.name == 'R_STV_ROI-rh'][0]
RI_label_lh = [label for label in labels if label.name == 'L_RI_ROI-lh'][0]
RI_label_rh = [label for label in labels if label.name == 'R_RI_ROI-rh'][0]
PF_label_lh = [label for label in labels if label.name == 'L_PF_ROI-lh'][0]
PF_label_rh = [label for label in labels if label.name == 'R_PF_ROI-rh'][0]
PFt_label_lh = [label for label in labels if label.name == 'L_PFt_ROI-lh'][0]
PFt_label_rh = [label for label in labels if label.name == 'R_PFt_ROI-rh'][0]
p47r_label_lh = [label for label in labels if label.name == 'L_p47r_ROI-lh'][0]
p47r_label_rh = [label for label in labels if label.name == 'R_p47r_ROI-rh'][0]
FOP5_label_lh = [label for label in labels if label.name == 'L_FOP5_ROI-lh'][0]
FOP5_label_rh = [label for label in labels if label.name == 'R_FOP5_ROI-rh'][0]
FOP4_label_lh = [label for label in labels if label.name == 'L_FOP4_ROI-lh'][0]
FOP4_label_rh = [label for label in labels if label.name == 'R_FOP4_ROI-rh'][0]
FOP3_label_lh = [label for label in labels if label.name == 'L_FOP3_ROI-lh'][0]
FOP3_label_rh = [label for label in labels if label.name == 'R_FOP3_ROI-rh'][0]
FOP2_label_lh = [label for label in labels if label.name == 'L_FOP2_ROI-lh'][0]
FOP2_label_rh = [label for label in labels if label.name == 'R_FOP2_ROI-rh'][0]
Ig_label_lh = [label for label in labels if label.name == 'L_Ig_ROI-lh'][0]
Ig_label_rh = [label for label in labels if label.name == 'R_Ig_ROI-rh'][0]
AVI_label_lh = [label for label in labels if label.name == 'L_AVI_ROI-lh'][0]
AVI_label_rh = [label for label in labels if label.name == 'R_AVI_ROI-rh'][0]
_47l_label_lh = [label for label in labels if label.name == 'L_47l_ROI-lh'][0]
_47l_label_rh = [label for label in labels if label.name == 'R_47l_ROI-rh'][0]
temp1_label_lh = [label for label in anat_label if label.name == 'Pole_occipital-lh'][0]
#temp1_label_rh = [label for label in anat_label if label.name == 'parsopercularis-rh'][0]
temp2_label_lh = [label for label in anat_label if label.name == 'S_occipital_ant-lh'][0]
#temp2_label_rh = [label for label in anat_label if label.name == 'parsorbitalis-rh'][0]
temp3_label_lh = [label for label in anat_label if label.name == 'G_and_S_occipital_inf-lh'][0]
#temp3_label_rh = [label for label in anat_label if label.name == 'parstriangularis-rh'][0]
temp4_label_lh = [label for label in anat_label if label.name == 'S_calcarine-lh'][0]
#temp4_label_rh = [label for label in anat_label if label.name == 'precentral-rh'][0]
#%%
""" Lexical task: Word - Noise """
data11 = X11[:,:,all_subject,5] - X11[:,:,all_subject,8]
data11 = np.transpose(data11,[2,1,0])
data11_good = X11[:,:,good_readers,5] - X11[:,:,good_readers,8]
data11_good = np.transpose(data11_good,[2,1,0])
data11_poor = X11[:,:,poor_readers,5] - X11[:,:,poor_readers,8]
data11_poor = np.transpose(data11_poor,[2,1,0])
""" Dot task: Word - Noise """
data12 = X11[:,:,all_subject,0] - X11[:,:,all_subject,3]
data12 = np.transpose(data12,[2,1,0])
data12_good = X11[:,:,good_readers,0] - X11[:,:,good_readers,3]
data12_good = np.transpose(data12_good,[2,1,0])
data12_poor = X11[:,:,poor_readers,0] - X11[:,:,poor_readers,3]
data12_poor = np.transpose(data12_poor,[2,1,0])
""" Lexical task: High contrast - Low contrast """
#data12 = X11[:,31:65,all_subject,5] - X11[:,31:65,all_subject,7]
#data12 = np.transpose(data12,[2,1,0])
#data12[:,:,medial_vertices] = 0.
#%%
""" Spatio-temporal clustering: session 1 Lexical task"""
t0 = time.time()
print("\n\n Start time: %s \n\n" % time.ctime())
p_threshold = 0.05
t_threshold = -stats.distributions.t.ppf(p_threshold / 2., n_subjects - 1)
s_space = mne.grade_to_tris(5)
# Left hemisphere
s_space_lh = s_space[s_space[:,0] < 10242]
#connectivity = mne.spatial_tris_connectivity(s_space_lh, remap_vertices = True)
connectivity = mne.spatial_tris_connectivity(s_space)
T_obs, clusters, cluster_p_values, H0 = clu = \
mne.stats.spatio_temporal_cluster_1samp_test(data11[:,:,:], n_permutations=1024, connectivity=connectivity, n_jobs=12,
threshold=t_threshold)
good_cluster_inds = np.where(cluster_p_values < p_threshold)[0]
#fsave_vertices = [np.arange(10242), np.array([], int)]
fsave_vertices = [np.arange(10242), np.arange(10242)]
#fsave_vertices = [np.arange(10242), np.array([], int)]
stc_all_cluster_vis = summarize_clusters_stc(clu, tstep=tstep,
vertices=fsave_vertices,
subject='fsaverage')
print("\n\n Elasped time: %0.2d mins %0.2d secs" % (divmod(time.time()-t0, 60)))
#%%
""" Just source estimates """
stat_fun = partial(mne.stats.ttest_1samp_no_p)
p_threshold = 0.05
t_threshold = -stats.distributions.t.ppf(p_threshold / 2., len(good_readers) - 1)
temp3 = mne.SourceEstimate(np.transpose(stat_fun(data12_good[:,:,:])), fs_vertices, tmin, tstep, subject='fsaverage')
brain3_1 = temp3.plot(hemi='both', subjects_dir=fs_dir, views = 'lat', initial_time=0.35, #['lat','ven','med']
clim=dict(kind='value', lims=[1.7, t_threshold, 3.5]))#clim=dict(kind='value', lims=[2, t_threshold, 7]), size=(800,800))
#%%
""" Spatio-temporal clustering: session 1 Dot task"""
dur_thresh = 100
t0 = time.time()
T_obs, clusters, cluster_p_values, H0 = clu = \
mne.stats.permutation_cluster_1samp_test(data12[:,166:199,:], n_permutations=1024, connectivity=connectivity, n_jobs=12,
threshold=t_threshold)
good_cluster_inds = np.where(cluster_p_values < 0.05)[0]
fsave_vertices = [np.arange(10242), np.arange(10242)]
dot_stc_all_cluster_vis = summarize_clusters_stc(clu, tstep=tstep,
vertices=fsave_vertices,
subject='fsaverage')
print("\n\nElasped time: %0.2d mins %0.2d secs" % (divmod(time.time()-t0, 60)))
brain3 = dot_stc_all_cluster_vis.plot(
hemi='lh', views='lateral', subjects_dir=fs_dir,
time_label='Duration significant (ms)', size=(800, 800),
smoothing_steps=20, clim=dict(kind='value', lims=[0, 10, 50]),background='white',foreground='black')
#%%
""" ROI definition """
dur_thresh = 100
"""
plot(self, subject=None, surface='inflated', hemi='lh', colormap='auto',
time_label='auto', smoothing_steps=10, transparent=None, alpha=1.0,
time_viewer=False, subjects_dir=None, figure=None, views='lat',
colorbar=True, clim='auto', cortex='classic', size=800, background='black',
foreground='white', initial_time=None, time_unit='s')
"""
brain1 = stc_all_cluster_vis.plot(
hemi='lh', views='lateral', subjects_dir=fs_dir,
time_label='Duration significant (ms)', size=(800, 800),
smoothing_steps=20, clim=dict(kind='value', lims=[40, dur_thresh, 200]),background='white',foreground='black')
""" Sort out vertices here """
#temp_frontal_label_l = mne.Label(FOP4_label_lh.vertices, hemi='lh',name=u'sts_l',pos=FOP4_label_lh.pos, \
# values= FOP4_label_lh.values)
#
#brain1.add_label(temp_frontal_label_l, borders=True, color=c_table[8])
#
#lh_label = stc_all_cluster_vis.in_label(temp_frontal_label_l)
#data = lh_label.data
#lh_label.data[data < dur_thresh] = 0.
#
#temp_labels, _ = mne.stc_to_label(lh_label, src='fsaverage', smooth=False,
# subjects_dir=fs_dir, connected=False)
#temp = stc_all_cluster_vis.in_label(temp_labels)
#frontal_vertices_l = temp.vertices[0]
#
#new_label = mne.Label(frontal_vertices_l, hemi='lh')
#brain1.add_label(new_label, borders=True, color=c_table[8])
""" Done """
os.chdir('figures')
#brain1.save_image('Lexical_LH_STClustering.pdf', antialiased=True)
#brain1.save_image('Lexical_LH_STClustering.png', antialiased=True)
os.chdir('..')
brain1.add_label(A1_label_lh, borders=True, color=[0,0,0]) # Show A1
temp_auditory_label_l = mne.Label(A4_label_lh.vertices, hemi='lh',name=u'sts_l',pos=A4_label_lh.pos,values= A4_label_lh.values) + \
mne.Label(A5_label_lh.vertices, hemi='lh',name=u'sts_l',pos=A5_label_lh.pos,values= A5_label_lh.values) + \
mne.Label(STSdp_label_lh.vertices, hemi='lh',name=u'sts_l',pos=STSdp_label_lh.pos,values= STSdp_label_lh.values)+ \
mne.Label(TPOJ1_label_lh.vertices, hemi='lh',name=u'sts_l',pos=TPOJ1_label_lh.pos,values= TPOJ1_label_lh.values)+ \
mne.Label(PBelt_label_lh.vertices, hemi='lh',name=u'sts_l',pos=PBelt_label_lh.pos,values= PBelt_label_lh.values)+ \
mne.Label(LBelt_label_lh.vertices, hemi='lh',name=u'sts_l',pos=LBelt_label_lh.pos,values= LBelt_label_lh.values)
#brain1.add_label(temp_auditory_label_l, borders=True, color=c_table[2])
lh_label = stc_all_cluster_vis.in_label(temp_auditory_label_l)
data = lh_label.data
lh_label.data[data < dur_thresh] = 0.
temp_labels, _ = mne.stc_to_label(lh_label, src='fsaverage', smooth=False,
subjects_dir=fs_dir, connected=False)
temp = stc_all_cluster_vis.in_label(temp_labels)
stg_vertices_l = temp.vertices[0]
new_label = mne.Label(stg_vertices_l, hemi='lh')
brain1.add_label(new_label, borders=True, color=c_table[1])
#brain1.remove_labels()
temp_auditory2_label_l = mne.Label(PFcm_label_lh.vertices, hemi='lh',name=u'sts_l',pos=PFcm_label_lh.pos,values= PFcm_label_lh.values) + \
mne.Label(RI_label_lh.vertices, hemi='lh',name=u'sts_l',pos=RI_label_lh.pos,values= RI_label_lh.values)+ \
mne.Label(PF_label_lh.vertices, hemi='lh',name=u'sts_l',pos=PF_label_lh.pos,values= PF_label_lh.values)
#brain1.add_label(temp_auditory2_label_l, borders=True, color=c_table[0])
lh_label = stc_all_cluster_vis.in_label(temp_auditory2_label_l)
data = lh_label.data
lh_label.data[data < dur_thresh] = 0.
temp_labels, _ = mne.stc_to_label(lh_label, src='fsaverage', smooth=False,
subjects_dir=fs_dir, connected=False)
temp = stc_all_cluster_vis.in_label(temp_labels)
tpj_vertices_l = temp.vertices[0]
tpj_vertices_l = np.sort(np.concatenate((tpj_vertices_l, \
[16, 2051, 2677, 2678, 2679, 5042, 8296, 8297, 8299, 8722, 8723, 9376])))
new_label = mne.Label(tpj_vertices_l, hemi='lh')
brain1.add_label(new_label, borders=True, color=c_table[0])
#brain1.add_label(_1_label_lh, borders=True, color=c_table[4])
temp_motor_label_l = mne.Label(_3a_label_lh.vertices, hemi='lh',name=u'sts_l',pos=_3a_label_lh.pos,values= _3a_label_lh.values) + \
mne.Label(_3b_label_lh.vertices, hemi='lh',name=u'sts_l',pos=_3b_label_lh.pos,values= _3b_label_lh.values) + \
mne.Label(_4_label_lh.vertices, hemi='lh',name=u'sts_l',pos=_4_label_lh.pos,values= _4_label_lh.values) + \
mne.Label(_1_label_lh.vertices, hemi='lh',name=u'sts_l',pos=_1_label_lh.pos,values= _1_label_lh.values)
#brain1.add_label(temp_motor_label_l, borders=True, color=c_table[4])
lh_label = stc_all_cluster_vis.in_label(temp_motor_label_l)
data = lh_label.data
lh_label.data[data < dur_thresh] = 0.
temp_labels, _ = mne.stc_to_label(lh_label, src='fsaverage', smooth=False,
subjects_dir=fs_dir, connected=False)
temp = stc_all_cluster_vis.in_label(temp_labels)
motor_vertices_l = temp.vertices[0]
new_label = mne.Label(motor_vertices_l, hemi='lh')
brain1.add_label(new_label, borders=True, color=c_table[4])
temp_broca_label_l = \
mne.Label(a44_label_lh.vertices, hemi='lh',name=u'sts_l',pos=a44_label_lh.pos,values= a44_label_lh.values) + \
mne.Label(a45_label_lh.vertices, hemi='lh',name=u'sts_l',pos=a45_label_lh.pos,values= a45_label_lh.values) + \
mne.Label(AVI_label_lh.vertices, hemi='lh',name=u'sts_l',pos=AVI_label_lh.pos,values= AVI_label_lh.values) + \
mne.Label(FOP5_label_lh.vertices, hemi='lh',name=u'sts_l',pos=FOP5_label_lh.pos,values= FOP5_label_lh.values) + \
mne.Label(_47l_label_lh.vertices, hemi='lh',name=u'sts_l',pos=_47l_label_lh.pos,values= _47l_label_lh.values)
#brain1.add_label(temp_broca_label_l, borders=True, color=c_table[6])
lh_label = stc_all_cluster_vis.in_label(temp_broca_label_l)
data = lh_label.data
lh_label.data[data < dur_thresh] = 0.
temp_labels, _ = mne.stc_to_label(lh_label, src='fsaverage', smooth=False,
subjects_dir=fs_dir, connected=False)
temp = stc_all_cluster_vis.in_label(temp_labels)
broca_vertices_l = temp.vertices[0]
broca_vertices_l = np.sort(np.concatenate((broca_vertices_l,[1187,3107,3108,3109,6745,7690,7691])))
new_label = mne.Label(broca_vertices_l, hemi='lh')
brain1.add_label(new_label, borders=True, color=c_table[6])
temp_sylvian_label_l = mne.Label(OP23_label_lh.vertices, hemi='lh',name=u'sts_l',pos=OP23_label_lh.pos,values= OP23_label_lh.values) + \
mne.Label(Ig_label_lh.vertices, hemi='lh',name=u'sts_l',pos=Ig_label_lh.pos,values= Ig_label_lh.values) + \
mne.Label(OP4_label_lh.vertices, hemi='lh',name=u'sts_l',pos=OP4_label_lh.pos,values= OP4_label_lh.values) + \
mne.Label(OP1_label_lh.vertices, hemi='lh',name=u'sts_l',pos=OP1_label_lh.pos,values= OP1_label_lh.values) + \
mne.Label(FOP2_label_lh.vertices, hemi='lh',name=u'sts_l',pos=FOP2_label_lh.pos,values= FOP2_label_lh.values) + \
mne.Label(_6r_label_lh.vertices, hemi='lh',name=u'sts_l',pos=_6r_label_lh.pos,values= _6r_label_lh.values)
#brain1.add_label(temp_sylvian_label_l, borders=True, color=c_table[8])
lh_label = stc_all_cluster_vis.in_label(temp_sylvian_label_l)
data = lh_label.data
lh_label.data[data < dur_thresh] = 0.
temp_labels, _ = mne.stc_to_label(lh_label, src='fsaverage', smooth=False,
subjects_dir=fs_dir, connected=False)
temp = stc_all_cluster_vis.in_label(temp_labels)
sylvian_vertices_l = temp.vertices[0]
sylvian_vertices_l = np.sort(np.concatenate((sylvian_vertices_l,[905,1892,2825,2526,4157,4158,4159,6239,8290,8293,9194,9203])))
new_label = mne.Label(sylvian_vertices_l, hemi='lh')
brain1.add_label(new_label, borders=True, color=c_table[8])
# right hemisphere
#brain2 = stc_all_cluster_vis.plot(
# hemi='rh', views='lateral', subjects_dir=fs_dir,
# time_label='Duration significant (ms)', size=(800, 800),
# smoothing_steps=20, clim=dict(kind='value', lims=[40, dur_thresh, 200]),background='white',foreground='black')
#
#stg_vertices_r = A5_label_rh.vertices
#stg_vertices_r = np.sort([2001,2002,2419,2420,2421,2418,2754,2417,13075,13076,13077,13078,\
# 13079,13080,13081,12069,12070,12071,12072])
#new_label = mne.Label(stg_vertices_r, hemi='rh')
#brain2.add_label(new_label, borders=True, color=c_table[5])
#
#os.chdir('figures')
#brain2.save_image('RH_STClustering.pdf', antialiased=True)
#brain2.save_image('RH_STClustering.png', antialiased=True)
#os.chdir('..')
# V1
#lh_label = dot_stc_all_cluster_vis.in_label(V1_label_lh)
#data = lh_label.data
#lh_label.data[data < 50] = 0.
#
#temp_labels, _ = mne.stc_to_label(lh_label, src='fsaverage', smooth=False,
# subjects_dir=fs_dir, connected=False)
#temp = dot_stc_all_cluster_vis.in_label(temp_labels)
#tV1_vertices_l = temp.vertices[0]
#new_label = mne.Label(tV1_vertices_l, hemi='lh')
#brain1.add_label(new_label, borders=True, color='r')
#
#M = np.mean(np.mean(tX11[tV1_vertices_l,:,:,:],axis=0),axis=1)
#errM = np.std(np.mean(tX11[tV1_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(all_subject))
#t0 = time.time()
#plotit2(times, M, errM, 5, 0, yMin=0, yMax=2.7, subject = 'all')
#plotsig2(times,nReps,X, 5, 0, all_subject, boot_pVal)
np.save('STG_Vert', stg_vertices_l)
np.save('IFG_Vert', broca_vertices_l)
np.save('TPJ_Vert', tpj_vertices_l)
np.save('Motor_Vert', motor_vertices_l)
np.save('Sylvian_Vert', sylvian_vertices_l)
np.save('STG_Vert_r', stg_vertices_r)
#%%
figureDir = '%s/figures' % raw_dir
nReps = 3000
boot_pVal = 0.05
#%%
""" Left STG: Word vs. Noise """
stg_vertices_l = np.load('STG_Vert.npy')
temp1 = X11[:,:,all_subject,:]
M = np.mean(np.mean(temp1[stg_vertices_l,:,:,:],axis=0),axis=1)
errM = np.std(np.mean(temp1[stg_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(all_subject))
MM = np.mean(temp1[stg_vertices_l,:,:,:],axis=0)
diffScore = np.mean((MM[:,:,5]-MM[:,:,8]), axis = 1)
diffScore2 = np.mean((MM[:,:,0]-MM[:,:,3]), axis = 1)
del temp1
plt.figure()
plt.clf()
plt.plot(times, diffScore)
plt.ylim([-0.4,0.7])
plt.fill_between([0.35,0.55],-0.4,0.7,facecolor=[0,0,0],alpha=0.4)
plt.xlabel('Time after stimulus onset (s)')
plt.ylabel('Word - Scramble')
plt.title('STG: Lexical task')
os.chdir(figureDir)
plt.savefig('STG_Word_Scramble_lex.png',dpi=600,papertype='letter',format='png')
plt.savefig('STG_Word_Scramble_lex.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
plt.figure()
plt.clf()
plt.plot(times, diffScore2)
plt.ylim([-0.4,0.7])
plt.fill_between([0.35,0.55],-0.4,0.7,facecolor=[0,0,0],alpha=0.4)
plt.xlabel('Time after stimulus onset (s)')
plt.ylabel('Word - Scramble')
plt.title('STG: Fixation task')
os.chdir(figureDir)
plt.savefig('STG_Word_Scramble_fix.png',dpi=600,papertype='letter',format='png')
plt.savefig('STG_Word_Scramble_fix.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
temp1 = X11[:,:,good_readers,:]
M1 = np.mean(np.mean(temp1[stg_vertices_l,:,:,:],axis=0),axis=1)
errM1 = np.std(np.mean(temp1[stg_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(good_readers))
diffM1 = np.mean(np.mean(temp1[stg_vertices_l,:,:,5],axis=0),axis=1) - np.mean(np.mean(temp1[stg_vertices_l,:,:,8],axis=0),axis=1)
diffM2 = np.mean(np.mean(temp1[stg_vertices_l,:,:,0],axis=0),axis=1) - np.mean(np.mean(temp1[stg_vertices_l,:,:,3],axis=0),axis=1)
del temp1
temp1 = X11[:,:,poor_readers,:]
M2 = np.mean(np.mean(temp1[stg_vertices_l,:,:,:],axis=0),axis=1)
errM2 = np.std(np.mean(temp1[stg_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(poor_readers))
diffM3 = np.mean(np.mean(temp1[stg_vertices_l,:,:,5],axis=0),axis=1) - np.mean(np.mean(temp1[stg_vertices_l,:,:,8],axis=0),axis=1)
diffM4 = np.mean(np.mean(temp1[stg_vertices_l,:,:,0],axis=0),axis=1) - np.mean(np.mean(temp1[stg_vertices_l,:,:,3],axis=0),axis=1)
del temp1
# For calculating p-values
X = np.mean(X11[stg_vertices_l,:,:,:],axis=0)
###############################################################################
""" Timecourse: Lexical task """
task1 = 5
task2 = 8
plotit2(times, M, errM, task1, task2, yMin=0, yMax=2.3, subject = 'all: STG')
plotsig2(times,nReps,X, task1, task2, all_subject, boot_pVal)
os.chdir(figureDir)
plt.savefig('STG_lexical_all.png',dpi=600,papertype='letter',format='png')
plt.savefig('STG_lexical_all.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
plotit2(times, M1, errM1, task1, task2, yMin=0, yMax=2.7, subject = 'typical: STG')
plotsig2(times,nReps,X, task1, task2, good_readers, boot_pVal)
plt.fill_between([0.35,0.55],0.0,2.7,facecolor=[0,0,0],alpha=0.4)
os.chdir(figureDir)
plt.savefig('STG_lexical_good.png',dpi=600,papertype='letter',format='png')
plt.savefig('STG_lexical_good.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
plotit2(times, M2, errM2, task1, task2, yMin=0, yMax=2.7, subject = 'struggling: STG')
plotsig2(times,nReps,X, task1, task2, poor_readers, boot_pVal)
plt.fill_between([0.35,0.55],0.0,2.7,facecolor=[0,0,0],alpha=0.4)
os.chdir(figureDir)
plt.savefig('STG_lexical_poor.png',dpi=600,papertype='letter',format='png')
plt.savefig('STG_lexical_poor.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
""" Timecourse: Dot task """
task1 = 0
task2 = 3
plotit2(times, M, errM, task1, task2, yMin=0, yMax=2.3, subject = 'all: STG')
plotsig2(times,nReps,X, task1, task2, all_subject, boot_pVal)
os.chdir(figureDir)
plt.savefig('STG_dot_all.png',dpi=600,papertype='letter',format='png')
plt.savefig('STG_dot_all.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
plotit2(times, M1, errM1, task1, task2, yMin=0, yMax=2.7, subject = 'typical: STG')
plotsig2(times,nReps,X, task1, task2, good_readers, boot_pVal)
plt.fill_between([0.35,0.55],0.0,2.7,facecolor=[0,0,0],alpha=0.4)
os.chdir(figureDir)
plt.savefig('STG_dot_good.png',dpi=600,papertype='letter',format='png')
plt.savefig('STG_dot_good.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
plotit2(times, M2, errM2, task1, task2, yMin=0, yMax=2.7, subject = 'struggling: STG')
plotsig2(times,nReps,X, task1, task2, poor_readers, boot_pVal)
plt.fill_between([0.35,0.55],0.0,2.7,facecolor=[0,0,0],alpha=0.4)
os.chdir(figureDir)
plt.savefig('STG_dot_poor.png',dpi=600,papertype='letter',format='png')
plt.savefig('STG_dot_poor.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
#%%
""" Correlation """
temp1 = X11[:,:,all_subject,:]
M = np.mean(temp1[stg_vertices_l,:,:,:],axis=0)
temp1 = X11[:,:,good_readers,:]
M1 = np.mean(temp1[stg_vertices_l,:,:,:],axis=0)
temp2 = X11[:,:,poor_readers,:]
M2 = np.mean(temp2[stg_vertices_l,:,:,:],axis=0)
del temp1, temp2
#%%
""" Plot """
t1 = 350
t_window1 = np.multiply(np.divide(np.add([t1,t1+200],[100,100]),1000.), sRate)
t_window1 = [np.int(i) for i in t_window1]
task = 5
lowNoise1_good = np.mean(M1[np.int(t_window1[0]):np.int(t_window1[1]),:,task], axis = 0) - np.mean(M1[np.int(t_window1[0]):np.int(t_window1[1]),:,task+3], axis = 0)
lowNoise1_good_err = np.std(lowNoise1_good) / np.sqrt(len(lowNoise1_good))
lowNoise1_poor = np.mean(M2[t_window1[0]:t_window1[1],:,task], axis = 0) - np.mean(M2[t_window1[0]:t_window1[1],:,task+3], axis = 0)
lowNoise1_poor_err = np.std(lowNoise1_poor) / np.sqrt(len(lowNoise1_poor))
temp_meg = np.concatenate((lowNoise1_good,lowNoise1_poor))
temp_read = np.concatenate((orig_twre[good_readers],orig_twre[poor_readers]))
plt.figure()
plt.clf()
ax = plt.subplot()
fit = np.polyfit(temp_meg, temp_read, deg=1)
ax.plot(temp_meg, fit[0] * temp_meg + fit[1], color=[0,0,0])
ax.plot(temp_meg, temp_read, 'o', markerfacecolor=[.5, .5, .5], markeredgecolor=[1,1,1], markersize=10)
ax.plot(lowNoise1_good, orig_twre[good_readers], 'o', markerfacecolor=[.5, .5, .5], markeredgecolor=[1,1,1], markersize=10)
plt.xlim([-1.5,4.5])
os.chdir('figures')
plt.savefig('STG_corr_lexical_350.png',dpi=600,papertype='letter',format='png')
plt.savefig('STG_corr_lexical_350.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
r, p = stats.pearsonr(temp_read,temp_meg)
print('lexical(all): correlation = %.4f, p = %.4f' %(r, p))
r, p = stats.pearsonr(orig_twre[good_readers],lowNoise1_good)
print('lexical(good): correlation = %.4f, p = %.4f' %(r, p))
r, p = stats.pearsonr(orig_twre[poor_readers],lowNoise1_poor)
print('lexical(poor): correlation = %.4f, p = %.4f' %(r, p))
temp_meg_lex = temp_meg
""" Correlation: Dot task """
t1 = 300
t_window1_dot = np.multiply(np.divide(np.add([t1,t1+100],[100,100]),1000.), sRate)
t_window1_dot = [np.int(i) for i in t_window1_dot]
task = 0
lowNoise1_good = np.mean(M1[np.int(t_window1_dot[0]):np.int(t_window1_dot[1]),:,task], axis = 0) - np.mean(M1[np.int(t_window1_dot[0]):np.int(t_window1_dot[1]),:,task+3], axis = 0)
lowNoise1_good_err = np.std(lowNoise1_good) / np.sqrt(len(lowNoise1_good))
lowNoise1_poor = np.mean(M2[t_window1_dot[0]:t_window1_dot[1],:,task], axis = 0) - np.mean(M2[t_window1_dot[0]:t_window1_dot[1],:,task+3], axis = 0)
lowNoise1_poor_err = np.std(lowNoise1_poor) / np.sqrt(len(lowNoise1_poor))
temp_meg = np.concatenate((lowNoise1_good,lowNoise1_poor))
temp_read = np.concatenate((orig_twre[good_readers],orig_twre[poor_readers]))
plt.figure()
plt.clf()
ax = plt.subplot()
fit = np.polyfit(temp_meg, temp_read, deg=1)
ax.plot(temp_meg, fit[0] * temp_meg + fit[1], color=[0,0,0])
ax.plot(temp_meg, temp_read, 'o', markerfacecolor=[.5, .5, .5], markeredgecolor=[1,1,1], markersize=10)
ax.plot(lowNoise1_good, orig_twre[good_readers], 'o', markerfacecolor=[.5, .5, .5], markeredgecolor=[1,1,1], markersize=10)
plt.xlim([-1.5,4.5])
os.chdir('figures')
plt.savefig('STG_corr_dot_350.png',dpi=600,papertype='letter',format='png')
plt.savefig('STG_corr_dot_350.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
r, p = stats.pearsonr(temp_read,temp_meg)
print('Dot(all): correlation = %.4f, p = %.4f' %(r, p))
r, p = stats.pearsonr(orig_twre[good_readers],lowNoise1_good)
print('Dot(good): correlation = %.4f, p = %.4f' %(r, p))
r, p = stats.pearsonr(orig_twre[poor_readers],lowNoise1_poor)
print('Dot(poor): correlation = %.4f, p = %.4f' %(r, p))
temp_meg_fix = temp_meg
""" Corr: Difference score lexical vs. fixation """
plt.figure()
plt.clf()
ax = plt.subplot()
fit = np.polyfit(temp_meg_fix, temp_meg_lex, deg=1)
ax.plot(temp_meg_fix, fit[0] * temp_meg_fix + fit[1], color=[0,0,0])
ax.plot(temp_meg_fix, temp_meg_lex, 'o', markerfacecolor=[0.5,0.5,0.5], markeredgecolor=[1,1,1], markersize=10)
#ax.plot(temp3_good, temp2_good, 'o', markerfacecolor=c_table[3], markeredgecolor=[1,1,1], markersize=10)
plt.axis('square')
plt.ylim([-1.5, 4.5])
plt.xlim([-1.5, 4.5])
r, p = stats.pearsonr(temp_meg_fix,temp_meg_lex)
print('STG: lexical vs. dot task (all): correlation = %.4f, p = %.7f' %(r, p))
os.chdir(figureDir)
plt.savefig('STG_lexical_dot_350.png',dpi=600,papertype='letter',format='png')
plt.savefig('STG_lexical_dot_350.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
#%%
"""Equivalence test"""
import statsmodels
sstep = 10
p = np.empty((int(len(range(0,800,sstep))),1))
lower_p = np.empty((int(len(range(0,800,sstep))),1))
upper_p = np.empty((int(len(range(0,800,sstep))),1))
for tt, ttime in zip(range(0, len(range(0,800,sstep))),range(0,800,sstep)):
t_window1 = np.multiply(np.divide(np.add([ttime,ttime+sstep],[100,100]),1000.), sRate)
t_window1 = [np.int(i) for i in t_window1]
task = 5
lowNoise1_good = np.mean(M1[np.int(t_window1[0]):np.int(t_window1[1]),:,task], axis = 0) - np.mean(M1[np.int(t_window1[0]):np.int(t_window1[1]),:,task+3], axis = 0)
lowNoise1_good_err = np.std(lowNoise1_good) / np.sqrt(len(lowNoise1_good))
lowNoise1_poor = np.mean(M2[t_window1[0]:t_window1[1],:,task], axis = 0) - np.mean(M2[t_window1[0]:t_window1[1],:,task+3], axis = 0)
lowNoise1_poor_err = np.std(lowNoise1_poor) / np.sqrt(len(lowNoise1_poor))
temp_meg = np.concatenate((lowNoise1_good,lowNoise1_poor))
temp_meg_lex = temp_meg
""" Correlation: Dot task """
t_window1_dot = np.multiply(np.divide(np.add([ttime,ttime+sstep],[100,100]),1000.), sRate)
t_window1_dot = [np.int(i) for i in t_window1_dot]
task = 0
lowNoise1_good = np.mean(M1[np.int(t_window1_dot[0]):np.int(t_window1_dot[1]),:,task], axis = 0) - np.mean(M1[np.int(t_window1_dot[0]):np.int(t_window1_dot[1]),:,task+3], axis = 0)
lowNoise1_good_err = np.std(lowNoise1_good) / np.sqrt(len(lowNoise1_good))
lowNoise1_poor = np.mean(M2[t_window1_dot[0]:t_window1_dot[1],:,task], axis = 0) - np.mean(M2[t_window1_dot[0]:t_window1_dot[1],:,task+3], axis = 0)
lowNoise1_poor_err = np.std(lowNoise1_poor) / np.sqrt(len(lowNoise1_poor))
temp_meg = np.concatenate((lowNoise1_good,lowNoise1_poor))
temp_meg_fix = temp_meg
err = 0.8 * np.std(temp_meg_lex - temp_meg_fix)
# p[tt], a, b = statsmodels.stats.weightstats.ttost_paired(temp_meg_lex, temp_meg_fix, err, -err)
xx, lower_p[tt] = stats.ttest_1samp(temp_meg_lex-temp_meg_fix,-err)
xx, upper_p[tt] = stats.ttest_1samp(temp_meg_lex-temp_meg_fix,err)
p[tt] = max(lower_p[tt], upper_p[tt])*2
plt.figure()
plt.clf()
plt.plot(range(0,800,sstep), p)
plt.plot([0, 800],[0.05,0.05],'--')
plt.xlabel('Time after stimulus onset (ms)')
plt.ylabel('P-value from the equivalence test')
os.chdir(figureDir)
plt.savefig('STG_equivalence.png',dpi=600,papertype='letter',format='png')
plt.savefig('STG_equivalence.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
#tempa = np.random.normal(100,5,(1,100))
#tempb = np.random.normal(10,5,(1,100))
#err = 0.8*5
#tempp, fjsdk, fjdskl = statsmodels.stats.weightstats.ttost_paired(tempa[0], tempb[0], err, -err)
#xxx, xxxx = stats.ttest_rel(tempa[0],tempb[0])
#%%
"""Correlation over time"""
sstep = 10
tstart = 0
n_ttest = np.empty((len(range(tstart,800,sstep)),1))
p_ttest = np.empty((len(range(tstart,800,sstep)),1))
r_lex = np.empty((len(range(tstart,800,sstep)),1))
p_lex = np.empty((len(range(tstart,800,sstep)),1))
r_dot = np.empty((len(range(tstart,800,sstep)),1))
p_dot = np.empty((len(range(tstart,800,sstep)),1))
r_bet = np.empty((len(range(tstart,800,sstep)),1))
p_bet = np.empty((len(range(tstart,800,sstep)),1))
temp_meg_lex = np.empty((len(all_subject),len(range(tstart,800,sstep))))
temp_meg_fix = np.empty((len(all_subject),len(range(tstart,800,sstep))))
for ii, t1 in zip(range(0,len(range(tstart,800,sstep))), range(tstart,800,sstep)):
t_window1 = np.multiply(np.divide(np.add([t1,t1+10],[100,100]),1000.), sRate)
t_window1 = [np.int(i) for i in t_window1]
task = 5
lowNoise1_good = np.mean(M1[np.int(t_window1[0]):np.int(t_window1[1]),:,task], axis = 0) - np.mean(M1[np.int(t_window1[0]):np.int(t_window1[1]),:,task+3], axis = 0)
lowNoise1_good_err = np.std(lowNoise1_good) / np.sqrt(len(lowNoise1_good))
lowNoise1 = np.mean(M[np.int(t_window1[0]):np.int(t_window1[1]),:,task], axis = 0) - np.mean(M[np.int(t_window1[0]):np.int(t_window1[1]),:,task+3], axis = 0)
lowNoise1_poor = np.mean(M2[t_window1[0]:t_window1[1],:,task], axis = 0) - np.mean(M2[t_window1[0]:t_window1[1],:,task+3], axis = 0)
lowNoise1_poor_err = np.std(lowNoise1_poor) / np.sqrt(len(lowNoise1_poor))
temp_meg = np.concatenate((lowNoise1_good,lowNoise1_poor))
temp_read = np.concatenate((orig_twre[good_readers],orig_twre[poor_readers]))
r_lex[ii], p_lex[ii] = stats.pearsonr(temp_read,temp_meg)
n_ttest[ii], p_ttest[ii] = stats.ttest_1samp(lowNoise1,0)
temp_meg_lex[:,ii] = temp_meg
task = 0
lowNoise1_good = np.mean(M1[np.int(t_window1[0]):np.int(t_window1[1]),:,task], axis = 0) - np.mean(M1[np.int(t_window1_dot[0]):np.int(t_window1_dot[1]),:,task+3], axis = 0)
lowNoise1_good_err = np.std(lowNoise1_good) / np.sqrt(len(lowNoise1_good))
lowNoise1_poor = np.mean(M2[t_window1[0]:t_window1[1],:,task], axis = 0) - np.mean(M2[t_window1_dot[0]:t_window1_dot[1],:,task+3], axis = 0)
lowNoise1_poor_err = np.std(lowNoise1_poor) / np.sqrt(len(lowNoise1_poor))
temp_meg = np.concatenate((lowNoise1_good,lowNoise1_poor))
temp_read = np.concatenate((orig_twre[good_readers],orig_twre[poor_readers]))
r_dot[ii], p_dot[ii] = stats.pearsonr(temp_read,temp_meg)
temp_meg_fix[:,ii] = temp_meg
r_bet[ii], p_bet[ii] = stats.pearsonr(temp_meg_fix[:,ii],temp_meg_lex[:,ii])
#%%
"""Correlation over time"""
c = ( (0.6196, 0.0039, 0.2588),
(0.8353, 0.2431, 0.3098),
(0.9569, 0.4275, 0.2627),
(0.9922, 0.6824, 0.3804),
(0.9961, 0.8784, 0.5451),
(1.0000, 1.0000, 0.7490) )
plt.figure()
plt.clf()
x = range(tstart,800,sstep)
plt.plot(x, r_lex, color=[0,0,0])
plt.fill_between([430,530],-0.4,0.7,facecolor=[0,0,0],alpha=0.4)
plt.ylim([-0.4,0.7])
plt.xlabel('Time after stimulus onet (ms)')
plt.ylabel('Correlation (r-value)')
plt.title('STG: Lexical task')
for ttt in range(0, len(range(tstart,800,sstep))):
if p_lex[ttt] >= 0.05:
al = plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[0], markeredgecolor=c[0], markersize=10, alpha = 0.0)
elif p_lex[ttt] < 0.05 and p_lex[ttt] >= 0.01:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[1], markeredgecolor=c[1], markersize=10)
elif p_lex[ttt] < 0.01 and p_lex[ttt] >= 0.005:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[2], markeredgecolor=c[2], markersize=10)
elif p_lex[ttt] < 0.005 and p_lex[ttt] >= 0.001:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[3], markeredgecolor=c[3], markersize=10)
else:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[4], markeredgecolor=c[4], markersize=10)
os.chdir(figureDir)
plt.savefig('STG_corr_lex_overtime.png',dpi=600,papertype='letter',format='png')
plt.savefig('STG_corr_lex_overtime.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
plt.figure()
plt.clf()
x = range(tstart,800,sstep)
plt.plot(x, r_dot, color=[0,0,0])
plt.fill_between([430,530],-0.4,0.7,facecolor=[0,0,0],alpha=0.4)
plt.ylim([-0.4,0.7])
plt.xlabel('Time after stimulus onet (ms)')
plt.ylabel('Correlation (r-value)')
plt.title('STG: Fixation task')
for ttt in range(0, len(range(tstart,800,sstep))):
if p_dot[ttt] >= 0.05:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[0], markeredgecolor=c[0], markersize=10, alpha = 0.0)
elif p_dot[ttt] < 0.05 and p_lex[ttt] >= 0.01:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[1], markeredgecolor=c[1], markersize=10)
elif p_dot[ttt] < 0.01 and p_lex[ttt] >= 0.005:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[2], markeredgecolor=c[2], markersize=10)
elif p_dot[ttt] < 0.005 and p_lex[ttt] >= 0.001:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[3], markeredgecolor=c[3], markersize=10)
else:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[4], markeredgecolor=c[4], markersize=10)
os.chdir(figureDir)
plt.savefig('STG_corr_dot_overtime.png',dpi=600,papertype='letter',format='png')
plt.savefig('STG_corr_dot_overtime.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
plt.figure()
plt.clf()
x = range(tstart,800,sstep)
plt.plot(x, r_bet, color=[0,0,0])
plt.fill_between([430,530],-0.4,0.7,facecolor=[0,0,0],alpha=0.4)
plt.ylim([-0.4,0.7])
plt.xlabel('Time after stimulus onet (ms)')
plt.ylabel('Correlation between tasks (r-value)')
plt.title('STG')
for ttt in range(0, len(range(tstart,800,sstep))):
if p_bet[ttt] >= 0.05:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[0], markeredgecolor=c[0], markersize=10, alpha = 0.0)
elif p_bet[ttt] < 0.05 and p_lex[ttt] >= 0.01:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[1], markeredgecolor=c[1], markersize=10)
elif p_bet[ttt] < 0.01 and p_lex[ttt] >= 0.005:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[2], markeredgecolor=c[2], markersize=10)
elif p_bet[ttt] < 0.005 and p_lex[ttt] >= 0.001:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[3], markeredgecolor=c[3], markersize=10)
else:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[4], markeredgecolor=c[4], markersize=10)
os.chdir(figureDir)
plt.savefig('STG_corr_bettasks_overtime.png',dpi=600,papertype='letter',format='png')
plt.savefig('STG_corr_bettasks_overtime.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
#%%
del M, M1, M2
#%%
""" Broca """
broca_vertices_l = np.load('IFG_Vert.npy')
temp1 = X11[:,:,all_subject,:]
M = np.mean(np.mean(temp1[broca_vertices_l,:,:,:],axis=0),axis=1)
errM = np.std(np.mean(temp1[broca_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(all_subject))
del temp1
plt.figure()
plt.clf()
plt.plot(times, M[:,5]-M[:,8])
plt.ylim([-0.4,0.7])
plt.fill_between([0.35,0.55],-0.4,0.7,facecolor=[0,0,0],alpha=0.4)
plt.xlabel('Time after stimulus onset (s)')
plt.ylabel('Word - Scramble')
plt.title('IFG: Lexical task')
os.chdir(figureDir)
plt.savefig('IFG_Word_Scramble_lex.png',dpi=600,papertype='letter',format='png')
plt.savefig('IFG_Word_Scramble_lex.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
plt.figure()
plt.clf()
plt.plot(times, M[:,0]-M[:,3])
plt.ylim([-0.4,0.7])
plt.fill_between([0.35,0.55],-0.4,0.7,facecolor=[0,0,0],alpha=0.4)
plt.xlabel('Time after stimulus onset (s)')
plt.ylabel('Word - Scramble')
plt.title('IFG: Fixation task')
os.chdir(figureDir)
plt.savefig('IFG_Word_Scramble_fix.png',dpi=600,papertype='letter',format='png')
plt.savefig('IFG_Word_Scramble_fix.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
temp1 = X11[:,:,good_readers,:]
M1 = np.mean(np.mean(temp1[broca_vertices_l,:,:,:],axis=0),axis=1)
errM1 = np.std(np.mean(temp1[broca_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(good_readers))
del temp1
temp1 = X11[:,:,poor_readers,:]
M2 = np.mean(np.mean(temp1[broca_vertices_l,:,:,:],axis=0),axis=1)
errM2 = np.std(np.mean(temp1[broca_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(poor_readers))
del temp1
# For calculating p-values
X = np.mean(X11[broca_vertices_l,:,:,:],axis=0)
###############################################################################
""" Timecourse: Lexical task """
task1 = 5
task2 = 8
plotit2(times, M, errM, task1, task2, yMin=0, yMax=2.3, subject = 'all: IFG')
plotsig2(times,nReps,X, task1, task2, all_subject, boot_pVal)
os.chdir(figureDir)
plt.savefig('IFG_lexical_all.png',dpi=600,papertype='letter',format='png')
plt.savefig('IFG_lexical_all.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
plotit2(times, M1, errM1, task1, task2, yMin=0, yMax=2.7, subject = 'typical: IFG')
plotsig2(times,nReps,X, task1, task2, good_readers, boot_pVal)
plt.fill_between([0.35,0.55],0.0,2.7,facecolor=[0,0,0],alpha=0.4)
os.chdir(figureDir)
plt.savefig('IFG_lexical_good.png',dpi=600,papertype='letter',format='png')
plt.savefig('IFG_lexical_good.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
plotit2(times, M2, errM2, task1, task2, yMin=0, yMax=2.7, subject = 'struggling: IFG')
plotsig2(times,nReps,X, task1, task2, poor_readers, boot_pVal)
plt.fill_between([0.35,0.55],0.0,2.7,facecolor=[0,0,0],alpha=0.4)
os.chdir(figureDir)
plt.savefig('IFG_lexical_poor.png',dpi=600,papertype='letter',format='png')
plt.savefig('IFG_lexical_poor.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
""" Timecourse: Dot task """
task1 = 0
task2 = 3
plotit2(times, M, errM, task1, task2, yMin=0, yMax=2.3, subject = 'all: IFG')
plotsig2(times,nReps,X, task1, task2, all_subject, boot_pVal)
os.chdir(figureDir)
plt.savefig('IFG_dot_all.png',dpi=600,papertype='letter',format='png')
plt.savefig('IFG_dot_all.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
plotit2(times, M1, errM1, task1, task2, yMin=0, yMax=2.7, subject = 'typical: IFG')
plotsig2(times,nReps,X, task1, task2, good_readers, boot_pVal)
plt.fill_between([0.35,0.55],0.0,2.7,facecolor=[0,0,0],alpha=0.4)
os.chdir(figureDir)
plt.savefig('IFG_dot_good.png',dpi=600,papertype='letter',format='png')
plt.savefig('IFG_dot_good.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
plotit2(times, M2, errM2, task1, task2, yMin=0, yMax=2.7, subject = 'struggling: IFG')
plotsig2(times,nReps,X, task1, task2, poor_readers, boot_pVal)
plt.fill_between([0.35,0.55],0.0,2.7,facecolor=[0,0,0],alpha=0.4)
os.chdir(figureDir)
plt.savefig('IFG_dot_poor.png',dpi=600,papertype='letter',format='png')
plt.savefig('IFG_dot_poor.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
#%%
""" Correlation: Lexical """
temp1 = X11[:,:,good_readers,:]
M1 = np.mean(temp1[broca_vertices_l,:,:,:],axis=0)
temp2 = X11[:,:,poor_readers,:]
M2 = np.mean(temp2[broca_vertices_l,:,:,:],axis=0)
del temp1, temp2
#%%
"""Plot"""
t1 = 350
t_window1 = np.multiply(np.divide(np.add([t1,t1+200],[100,100]),1000.), sRate)
t_window1 = [np.int(i) for i in t_window1]
task = 5
lowNoise1_good = np.mean(M1[np.int(t_window1[0]):np.int(t_window1[1]),:,task], axis = 0) - np.mean(M1[np.int(t_window1[0]):np.int(t_window1[1]),:,task+3], axis = 0)
lowNoise1_good_err = np.std(lowNoise1_good) / np.sqrt(len(lowNoise1_good))
lowNoise1_poor = np.mean(M2[t_window1[0]:t_window1[1],:,task], axis = 0) - np.mean(M2[t_window1[0]:t_window1[1],:,task+3], axis = 0)
lowNoise1_poor_err = np.std(lowNoise1_poor) / np.sqrt(len(lowNoise1_poor))
temp_meg = np.concatenate((lowNoise1_good,lowNoise1_poor))
temp_read = np.concatenate((orig_twre[good_readers],orig_twre[poor_readers]))
plt.figure()
plt.clf()
ax = plt.subplot()
fit = np.polyfit(temp_meg, temp_read, deg=1)
ax.plot(temp_meg, fit[0] * temp_meg + fit[1], color=[0,0,0])
ax.plot(temp_meg, temp_read, 'o', markerfacecolor=[.5, .5, .5], markeredgecolor=[1,1,1], markersize=10)
ax.plot(lowNoise1_good, orig_twre[good_readers], 'o', markerfacecolor=[.5, .5, .5], markeredgecolor=[1,1,1], markersize=10)
plt.xlim([-2,3])
os.chdir('figures')
plt.savefig('IFG_corr_lexical_350.png',dpi=600,papertype='letter',format='png')
plt.savefig('IFG_corr_lexical_350.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
r, p = stats.pearsonr(temp_read,temp_meg)
print('lexical(all): correlation = %.4f, p = %.4f' %(r, p))
r, p = stats.pearsonr(orig_twre[good_readers],lowNoise1_good)
print('lexical(good): correlation = %.4f, p = %.4f' %(r, p))
r, p = stats.pearsonr(orig_twre[poor_readers],lowNoise1_poor)
print('lexical(poor): correlation = %.4f, p = %.4f' %(r, p))
temp_meg_lex = temp_meg
""" Correlation: Dot task """
#t1 = 400
t_window1_dot = np.multiply(np.divide(np.add([t1,t1+200],[100,100]),1000.), sRate)
t_window1_dot = [np.int(i) for i in t_window1_dot]
task = 0
lowNoise1_good = np.mean(M1[np.int(t_window1_dot[0]):np.int(t_window1_dot[1]),:,task], axis = 0) - np.mean(M1[np.int(t_window1_dot[0]):np.int(t_window1_dot[1]),:,task+3], axis = 0)
lowNoise1_good_err = np.std(lowNoise1_good) / np.sqrt(len(lowNoise1_good))
lowNoise1_poor = np.mean(M2[t_window1_dot[0]:t_window1_dot[1],:,task], axis = 0) - np.mean(M2[t_window1_dot[0]:t_window1_dot[1],:,task+3], axis = 0)
lowNoise1_poor_err = np.std(lowNoise1_poor) / np.sqrt(len(lowNoise1_poor))
temp_meg = np.concatenate((lowNoise1_good,lowNoise1_poor))
temp_read = np.concatenate((orig_twre[good_readers],orig_twre[poor_readers]))
plt.figure()
plt.clf()
ax = plt.subplot()
fit = np.polyfit(temp_meg, temp_read, deg=1)
ax.plot(temp_meg, fit[0] * temp_meg + fit[1], color=[0,0,0])
ax.plot(temp_meg, temp_read, 'o', markerfacecolor=[.5, .5, .5], markeredgecolor=[1,1,1], markersize=10)
ax.plot(lowNoise1_good, orig_twre[good_readers], 'o', markerfacecolor=[.5, .5, .5], markeredgecolor=[1,1,1], markersize=10)
plt.xlim([-1.5,4])
os.chdir('figures')
plt.savefig('IFG_corr_dot_350.png',dpi=600,papertype='letter',format='png')
plt.savefig('IFG_corr_dot_350.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
r, p = stats.pearsonr(temp_read,temp_meg)
print('Dot(all): correlation = %.4f, p = %.4f' %(r, p))
r, p = stats.pearsonr(orig_twre[good_readers],lowNoise1_good)
print('Dot(good): correlation = %.4f, p = %.4f' %(r, p))
r, p = stats.pearsonr(orig_twre[poor_readers],lowNoise1_poor)
print('Dot(poor): correlation = %.4f, p = %.4f' %(r, p))
temp_meg_fix = temp_meg
""" Corr: Difference score lexical vs. fixation """
plt.figure()
plt.clf()
ax = plt.subplot()
fit = np.polyfit(temp_meg_fix, temp_meg_lex, deg=1)
ax.plot(temp_meg_fix, fit[0] * temp_meg_fix + fit[1], color=[0,0,0])
ax.plot(temp_meg_fix, temp_meg_lex, 'o', markerfacecolor=[0.5,0.5,0.5], markeredgecolor=[1,1,1], markersize=10)
#ax.plot(temp3_good, temp2_good, 'o', markerfacecolor=c_table[3], markeredgecolor=[1,1,1], markersize=10)
plt.axis('square')
plt.ylim([-1.5, 4.5])
plt.xlim([-1.5, 4])
r, p = stats.pearsonr(temp_meg_fix,temp_meg_lex)
print('STG: lexical vs. dot task (all): correlation = %.4f, p = %.7f' %(r, p))
os.chdir(figureDir)
plt.savefig('IFG_lexical_dot_350.png',dpi=600,papertype='letter',format='png')
plt.savefig('IFG_lexical_dot_350.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
#%%
"""Correlation over time"""
""" Correlation """
temp1 = X11[:,:,all_subject,:]
M = np.mean(temp1[broca_vertices_l,:,:,:],axis=0)
temp1 = X11[:,:,good_readers,:]
M1 = np.mean(temp1[broca_vertices_l,:,:,:],axis=0)
temp2 = X11[:,:,poor_readers,:]
M2 = np.mean(temp2[broca_vertices_l,:,:,:],axis=0)
del temp1, temp2
sstep = 10
tstart = 200
n_ttest = np.empty((len(range(tstart,800,sstep)),1))
p_ttest = np.empty((len(range(tstart,800,sstep)),1))
r_lex = np.empty((len(range(tstart,800,sstep)),1))
p_lex = np.empty((len(range(tstart,800,sstep)),1))
r_dot = np.empty((len(range(tstart,800,sstep)),1))
p_dot = np.empty((len(range(tstart,800,sstep)),1))
r_bet = np.empty((len(range(tstart,800,sstep)),1))
p_bet = np.empty((len(range(tstart,800,sstep)),1))
temp_meg_lex = np.empty((len(all_subject),len(range(tstart,800,sstep))))
temp_meg_fix = np.empty((len(all_subject),len(range(tstart,800,sstep))))
for ii, t1 in zip(range(0,len(range(tstart,800,sstep))), range(tstart,800,sstep)):
t_window1 = np.multiply(np.divide(np.add([t1,t1+50],[100,100]),1000.), sRate)
t_window1 = [np.int(i) for i in t_window1]
task = 5
lowNoise1_good = np.mean(M1[np.int(t_window1[0]):np.int(t_window1[1]),:,task], axis = 0) - np.mean(M1[np.int(t_window1[0]):np.int(t_window1[1]),:,task+3], axis = 0)
lowNoise1_good_err = np.std(lowNoise1_good) / np.sqrt(len(lowNoise1_good))
lowNoise1 = np.mean(M[np.int(t_window1[0]):np.int(t_window1[1]),:,task], axis = 0) - np.mean(M[np.int(t_window1[0]):np.int(t_window1[1]),:,task+3], axis = 0)
lowNoise1_poor = np.mean(M2[t_window1[0]:t_window1[1],:,task], axis = 0) - np.mean(M2[t_window1[0]:t_window1[1],:,task+3], axis = 0)
lowNoise1_poor_err = np.std(lowNoise1_poor) / np.sqrt(len(lowNoise1_poor))
temp_meg = np.concatenate((lowNoise1_good,lowNoise1_poor))
temp_read = np.concatenate((orig_twre[good_readers],orig_twre[poor_readers]))
r_lex[ii], p_lex[ii] = stats.pearsonr(temp_read,temp_meg)
n_ttest[ii], p_ttest[ii] = stats.ttest_1samp(lowNoise1,0)
temp_meg_lex[:,ii] = temp_meg
task = 0
lowNoise1_good = np.mean(M1[np.int(t_window1[0]):np.int(t_window1[1]),:,task], axis = 0) - np.mean(M1[np.int(t_window1_dot[0]):np.int(t_window1_dot[1]),:,task+3], axis = 0)
lowNoise1_good_err = np.std(lowNoise1_good) / np.sqrt(len(lowNoise1_good))
lowNoise1_poor = np.mean(M2[t_window1[0]:t_window1[1],:,task], axis = 0) - np.mean(M2[t_window1_dot[0]:t_window1_dot[1],:,task+3], axis = 0)
lowNoise1_poor_err = np.std(lowNoise1_poor) / np.sqrt(len(lowNoise1_poor))
temp_meg = np.concatenate((lowNoise1_good,lowNoise1_poor))
temp_read = np.concatenate((orig_twre[good_readers],orig_twre[poor_readers]))
r_dot[ii], p_dot[ii] = stats.pearsonr(temp_read,temp_meg)
temp_meg_fix[:,ii] = temp_meg
r_bet[ii], p_bet[ii] = stats.pearsonr(temp_meg_fix[:,ii],temp_meg_lex[:,ii])
#%%
"""Correlation over time"""
c = ( (0.6196, 0.0039, 0.2588),
(0.8353, 0.2431, 0.3098),
(0.9569, 0.4275, 0.2627),
(0.9922, 0.6824, 0.3804),
(0.9961, 0.8784, 0.5451),
(1.0000, 1.0000, 0.7490) )
plt.figure()
plt.clf()
x = range(tstart,800,sstep)
plt.plot(x, r_lex, color=[0,0,0])
plt.fill_between([430,530],-0.4,0.7,facecolor=[0,0,0],alpha=0.4)
plt.ylim([-0.4,0.7])
plt.xlabel('Time after stimulus onet (ms)')
plt.ylabel('Correlation (r-value)')
plt.title('IFG: Lexical task')
for ttt in range(0, len(range(tstart,800,sstep))):
if p_lex[ttt] >= 0.05:
al = plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[0], markeredgecolor=c[0], markersize=10, alpha = 0.0)
elif p_lex[ttt] < 0.05 and p_lex[ttt] >= 0.01:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[1], markeredgecolor=c[1], markersize=10)
elif p_lex[ttt] < 0.01 and p_lex[ttt] >= 0.005:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[2], markeredgecolor=c[2], markersize=10)
elif p_lex[ttt] < 0.005 and p_lex[ttt] >= 0.001:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[3], markeredgecolor=c[3], markersize=10)
else:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[4], markeredgecolor=c[4], markersize=10)
os.chdir(figureDir)
plt.savefig('IFG_corr_lex_overtime.png',dpi=600,papertype='letter',format='png')
plt.savefig('IFG_corr_lex_overtime.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
plt.figure()
plt.clf()
x = range(tstart,800,sstep)
plt.plot(x, r_dot, color=[0,0,0])
plt.fill_between([430,530],-0.4,0.7,facecolor=[0,0,0],alpha=0.4)
plt.ylim([-0.4,0.7])
plt.xlabel('Time after stimulus onet (ms)')
plt.ylabel('Correlation (r-value)')
plt.title('IFG: Fixation task')
for ttt in range(0, len(range(tstart,800,sstep))):
if p_dot[ttt] >= 0.05:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[0], markeredgecolor=c[0], markersize=10, alpha = 0.0)
elif p_dot[ttt] < 0.05 and p_lex[ttt] >= 0.01:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[1], markeredgecolor=c[1], markersize=10)
elif p_dot[ttt] < 0.01 and p_lex[ttt] >= 0.005:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[2], markeredgecolor=c[2], markersize=10)
elif p_dot[ttt] < 0.005 and p_lex[ttt] >= 0.001:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[3], markeredgecolor=c[3], markersize=10)
else:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[4], markeredgecolor=c[4], markersize=10)
os.chdir(figureDir)
plt.savefig('IFG_corr_dot_overtime.png',dpi=600,papertype='letter',format='png')
plt.savefig('IFG_corr_dot_overtime.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
plt.figure()
plt.clf()
x = range(tstart,800,sstep)
plt.plot(x, r_bet, color=[0,0,0])
plt.fill_between([430,530],-0.4,0.7,facecolor=[0,0,0],alpha=0.4)
plt.ylim([-0.4,0.7])
plt.xlabel('Time after stimulus onet (ms)')
plt.ylabel('Correlation between tasks (r-value)')
plt.title('IFG')
for ttt in range(0, len(range(tstart,800,sstep))):
if p_bet[ttt] >= 0.05:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[0], markeredgecolor=c[0], markersize=10, alpha = 0.0)
elif p_bet[ttt] < 0.05 and p_lex[ttt] >= 0.01:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[1], markeredgecolor=c[1], markersize=10)
elif p_bet[ttt] < 0.01 and p_lex[ttt] >= 0.005:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[2], markeredgecolor=c[2], markersize=10)
elif p_bet[ttt] < 0.005 and p_lex[ttt] >= 0.001:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[3], markeredgecolor=c[3], markersize=10)
else:
plt.plot(x[ttt], 0.0, 's', markerfacecolor=c[4], markeredgecolor=c[4], markersize=10)
os.chdir(figureDir)
plt.savefig('IFG_corr_bettasks_overtime.png',dpi=600,papertype='letter',format='png')
plt.savefig('IFG_corr_bettasks_overtime.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
#%%
""" TPJ """
tpj_vertices_l = np.load('TPJ_Vert.npy')
temp1 = X11[:,:,all_subject,:]
M = np.mean(np.mean(temp1[tpj_vertices_l,:,:,:],axis=0),axis=1)
errM = np.std(np.mean(temp1[tpj_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(all_subject))
del temp1
temp1 = X11[:,:,good_readers,:]
M1 = np.mean(np.mean(temp1[tpj_vertices_l,:,:,:],axis=0),axis=1)
errM1 = np.std(np.mean(temp1[tpj_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(good_readers))
diffM1 = np.mean(np.mean(temp1[tpj_vertices_l,:,:,5],axis=0),axis=1) - np.mean(np.mean(temp1[tpj_vertices_l,:,:,8],axis=0),axis=1)
diffM2 = np.mean(np.mean(temp1[tpj_vertices_l,:,:,0],axis=0),axis=1) - np.mean(np.mean(temp1[tpj_vertices_l,:,:,3],axis=0),axis=1)
del temp1
temp1 = X11[:,:,poor_readers,:]
M2 = np.mean(np.mean(temp1[tpj_vertices_l,:,:,:],axis=0),axis=1)
errM2 = np.std(np.mean(temp1[tpj_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(poor_readers))
diffM3 = np.mean(np.mean(temp1[tpj_vertices_l,:,:,5],axis=0),axis=1) - np.mean(np.mean(temp1[tpj_vertices_l,:,:,8],axis=0),axis=1)
diffM4 = np.mean(np.mean(temp1[tpj_vertices_l,:,:,0],axis=0),axis=1) - np.mean(np.mean(temp1[tpj_vertices_l,:,:,3],axis=0),axis=1)
del temp1
# For calculating p-values
X = np.mean(X11[tpj_vertices_l,:,:,:],axis=0)
###############################################################################
""" Timecourse: Lexical task """
task1 = 5
task2 = 8
plotit2(times, M, errM, task1, task2, yMin=0, yMax=2.3, subject = 'all: TPJ')
plotsig2(times,nReps,X, task1, task2, all_subject, boot_pVal)
os.chdir(figureDir)
plt.savefig('TPJ_lexical_all.png',dpi=600,papertype='letter',format='png')
plt.savefig('TPJ_lexical_all.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
plotit2(times, M1, errM1, task1, task2, yMin=0, yMax=2.7, subject = 'typical: TPJ')
plotsig2(times,nReps,X, task1, task2, good_readers, boot_pVal)
os.chdir(figureDir)
plt.savefig('TPJ_lexical_good.png',dpi=600,papertype='letter',format='png')
plt.savefig('TPJ_lexical_good.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
plotit2(times, M2, errM2, task1, task2, yMin=0, yMax=2.7, subject = 'struggling: TPJ')
plotsig2(times,nReps,X, task1, task2, poor_readers, boot_pVal)
os.chdir(figureDir)
plt.savefig('TPJ_lexical_poor.png',dpi=600,papertype='letter',format='png')
plt.savefig('TPJ_lexical_poor.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
""" Timecourse: Dot task """
task1 = 0
task2 = 3
plotit2(times, M, errM, task1, task2, yMin=0, yMax=2.3, subject = 'all: TPJ')
plotsig2(times,nReps,X, task1, task2, all_subject, boot_pVal)
os.chdir(figureDir)
plt.savefig('TPJ_dot_all.png',dpi=600,papertype='letter',format='png')
plt.savefig('TPJ_dot_all.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
plotit2(times, M1, errM1, task1, task2, yMin=0, yMax=2.7, subject = 'typical: TPJ')
plotsig2(times,nReps,X, task1, task2, good_readers, boot_pVal)
os.chdir(figureDir)
plt.savefig('TPJ_dot_good.png',dpi=600,papertype='letter',format='png')
plt.savefig('TPJ_dot_good.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
plotit2(times, M2, errM2, task1, task2, yMin=0, yMax=2.7, subject = 'struggling: TPJ')
plotsig2(times,nReps,X, task1, task2, poor_readers, boot_pVal)
os.chdir(figureDir)
plt.savefig('TPJ_dot_poor.png',dpi=600,papertype='letter',format='png')
plt.savefig('TPJ_dot_poor.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
""" Correlation: Lexical """
temp1 = X11[:,:,good_readers,:]
M1 = np.mean(temp1[tpj_vertices_l,:,:,:],axis=0)
temp2 = X11[:,:,poor_readers,:]
M2 = np.mean(temp2[tpj_vertices_l,:,:,:],axis=0)
del temp1, temp2
t_window1 = np.multiply(np.divide(np.add([400,500],[100,100]),1000.), sRate)
t_window1 = [np.int(i) for i in t_window1]
task = 5
lowNoise1_good = np.mean(M1[np.int(t_window1[0]):np.int(t_window1[1]),:,task], axis = 0) - np.mean(M1[np.int(t_window1[0]):np.int(t_window1[1]),:,task+3], axis = 0)
lowNoise1_good_err = np.std(lowNoise1_good) / np.sqrt(len(lowNoise1_good))
lowNoise1_poor = np.mean(M2[t_window1[0]:t_window1[1],:,task], axis = 0) - np.mean(M2[t_window1[0]:t_window1[1],:,task+3], axis = 0)
lowNoise1_poor_err = np.std(lowNoise1_poor) / np.sqrt(len(lowNoise1_poor))
temp_meg = np.concatenate((lowNoise1_good,lowNoise1_poor))
temp_read = np.concatenate((orig_twre[good_readers],orig_twre[poor_readers]))
plt.figure()
plt.clf()
ax = plt.subplot()
fit = np.polyfit(temp_meg, temp_read, deg=1)
ax.plot(temp_meg, fit[0] * temp_meg + fit[1], color=[0,0,0])
ax.plot(temp_meg, temp_read, 'o', markerfacecolor=[.5, .5, .5], markeredgecolor=[1,1,1], markersize=10)
ax.plot(lowNoise1_good, orig_twre[good_readers], 'o', markerfacecolor=[.5, .5, .5], markeredgecolor=[1,1,1], markersize=10)
os.chdir('figures')
plt.savefig('TPJ_corr_lexical.png',dpi=600,papertype='letter',format='png')
plt.savefig('TPJ_corr_lexical.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
np.corrcoef(temp_read,temp_meg)
r, p = stats.pearsonr(temp_read,temp_meg)
print('lexical(all): correlation = %.4f, p = %.4f' %(r, p))
np.corrcoef(orig_twre[good_readers],lowNoise1_good)
r, p = stats.pearsonr(orig_twre[good_readers],lowNoise1_good)
print('lexical(good): correlation = %.4f, p = %.4f' %(r, p))
np.corrcoef(orig_twre[poor_readers],lowNoise1_poor)
r, p = stats.pearsonr(orig_twre[poor_readers],lowNoise1_poor)
print('lexical(poor): correlation = %.4f, p = %.4f' %(r, p))
""" Correlation: Dot task """
t_window1_dot = np.multiply(np.divide(np.add([300,400],[100,100]),1000.), sRate)
t_window1_dot = [np.int(i) for i in t_window1_dot]
task = 0
lowNoise1_good = np.mean(M1[np.int(t_window1_dot[0]):np.int(t_window1_dot[1]),:,task], axis = 0) - np.mean(M1[np.int(t_window1_dot[0]):np.int(t_window1_dot[1]),:,task+3], axis = 0)
lowNoise1_good_err = np.std(lowNoise1_good) / np.sqrt(len(lowNoise1_good))
lowNoise1_poor = np.mean(M2[t_window1_dot[0]:t_window1_dot[1],:,task], axis = 0) - np.mean(M2[t_window1_dot[0]:t_window1_dot[1],:,task+3], axis = 0)
lowNoise1_poor_err = np.std(lowNoise1_poor) / np.sqrt(len(lowNoise1_poor))
temp_meg = np.concatenate((lowNoise1_good,lowNoise1_poor))
temp_read = np.concatenate((orig_twre[good_readers],orig_twre[poor_readers]))
plt.figure()
plt.clf()
ax = plt.subplot()
fit = np.polyfit(temp_meg, temp_read, deg=1)
ax.plot(temp_meg, fit[0] * temp_meg + fit[1], color=[0,0,0])
ax.plot(temp_meg, temp_read, 'o', markerfacecolor=[.5, .5, .5], markeredgecolor=[1,1,1], markersize=10)
ax.plot(lowNoise1_good, orig_twre[good_readers], 'o', markerfacecolor=[.5, .5, .5], markeredgecolor=[1,1,1], markersize=10)
os.chdir('figures')
plt.savefig('TPJ_corr_dot.png',dpi=600,papertype='letter',format='png')
plt.savefig('TPJ_corr_dot.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
np.corrcoef(temp_read,temp_meg)
r, p = stats.pearsonr(temp_read,temp_meg)
print('Dot(all): correlation = %.4f, p = %.4f' %(r, p))
np.corrcoef(orig_twre[good_readers],lowNoise1_good)
r, p = stats.pearsonr(orig_twre[good_readers],lowNoise1_good)
print('Dot(good): correlation = %.4f, p = %.4f' %(r, p))
np.corrcoef(orig_twre[poor_readers],lowNoise1_poor)
r, p = stats.pearsonr(orig_twre[poor_readers],lowNoise1_poor)
print('Dot(poor): correlation = %.4f, p = %.4f' %(r, p))
""" Task effects: Word response in lexical vs. dot task """
t0 = time.time()
task1 = 0
task2 = 5
temp2_good = np.mean(M1[np.int(t_window1[0]):np.int(t_window1[1]),:,task2], axis = 0)
temp2_poor = np.mean(M2[np.int(t_window1[0]):np.int(t_window1[1]),:,task2], axis = 0)
temp3_good = np.mean(M1[np.int(t_window1_dot[0]):np.int(t_window1_dot[1]),:,task1], axis = 0)
temp3_poor = np.mean(M2[np.int(t_window1_dot[0]):np.int(t_window1_dot[1]),:,task1], axis = 0)
temp2 = np.concatenate((temp2_good,temp2_poor)) # lexical
temp3 = np.concatenate((temp3_good,temp3_poor)) # dot
plt.figure()
plt.clf()
ax = plt.subplot()
fit = np.polyfit(temp3, temp2, deg=1)
ax.plot(temp3, fit[0] * temp3 + fit[1], color=[0,0,0])
ax.plot(temp3_poor, temp2_poor, 'o', markerfacecolor=[.5,.5,.5], markeredgecolor=[1,1,1], markersize=10)
ax.plot(temp3_good, temp2_good, 'o', markerfacecolor=[.5,.5,.5], markeredgecolor=[1,1,1], markersize=10)
plt.axis('square')
plt.ylim([0, 7])
plt.xlim([0, 7])
r, p = stats.pearsonr(temp3,temp2)
print('TPJ: lexical vs. dot task (all): correlation = %.4f, p = %.7f' %(r, p))
os.chdir(figureDir)
plt.savefig('TPJ_lexical_dot.png',dpi=600,papertype='letter',format='png')
plt.savefig('TPJ_lexical_dot.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
del M, M1, M2
#%%
""" Motor """
M = np.mean(np.mean(tX11[motor_vertices_l,:,:,:],axis=0),axis=1)
errM = np.std(np.mean(tX11[motor_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(all_subject))
temp1 = X11[:,:,good_readers,:]
M1 = np.mean(np.mean(temp1[motor_vertices_l,:,:,:],axis=0),axis=1)
errM1 = np.std(np.mean(temp1[motor_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(good_readers))
del temp1
temp1 = X11[:,:,poor_readers,:]
M2 = np.mean(np.mean(temp1[motor_vertices_l,:,:,:],axis=0),axis=1)
errM2 = np.std(np.mean(temp1[motor_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(poor_readers))
del temp1
# For calculating p-values
X = np.mean(X11[motor_vertices_l,:,:,:],axis=0)
###############################################################################
t0 = time.time()
plotit2(times, M, errM, 0, 3, yMin=0, yMax=2.3, subject = 'all')
plotsig2(times,nReps,X, 0, 3, all_subject, boot_pVal)
C = np.mean(X11[motor_vertices_l,:,:,0],axis=0) - np.mean(X11[motor_vertices_l,:,:,3],axis=0)
#corr = plotcorr3(times, C[:,all_subject], twre_index)
#plt.text(times[np.where(corr == np.max(corr))[0][0]],0.5,np.str(times[np.where(corr == np.max(corr))[0][0]]))
#plt.text(times[np.where(corr == np.max(corr))[0][0]],0.4,np.str(np.max(corr)))
os.chdir(figureDir)
plt.savefig('Motor_dot_all.png',dpi=600,papertype='letter',format='png')
plt.savefig('Motor_dot_all.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
#print "\n\nElasped time: %0.2d mins %0.2d secs" % (divmod(time.time()-t0, 60))
t0 = time.time()
plotit2(times, M1, errM1, 0, 3, yMin=0, yMax=2.7, subject = 'typical')
plotsig2(times,nReps,X, 0, 3, good_readers, boot_pVal)
os.chdir(figureDir)
plt.savefig('Motor_dot_good.png',dpi=600,papertype='letter',format='png')
plt.savefig('Motor_dot_good.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
#print "\n\nElasped time: %0.2d mins %0.2d secs" % (divmod(time.time()-t0, 60))
t0 = time.time()
plotit2(times, M2, errM2, 0, 3, yMin=0, yMax=2.7, subject = 'struggling')
plotsig2(times,nReps,X, 0, 3, poor_readers, boot_pVal)
os.chdir(figureDir)
plt.savefig('Motor_dot_poor.png',dpi=600,papertype='letter',format='png')
plt.savefig('Motor_dot_poor.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
#print "\n\nElasped time: %0.2d mins %0.2d secs" % (divmod(time.time()-t0, 60))
###############################################################################
t0 = time.time()
plotit2(times, M, errM, 5, 8, yMin=0, yMax=2.3, subject = 'all')
plotsig2(times,nReps,X, 5, 8, all_subject, boot_pVal)
#C = np.mean(X11[motor_vertices_l,:,:,5],axis=0) - np.mean(X11[motor_vertices_l,:,:,8],axis=0)
#corr2 = plotcorr3(times, C[:,all_subject], twre_index)
#plt.text(times[np.where(corr2 == np.max(corr2))[0][0]],0.5,np.str(times[np.where(corr2 == np.max(corr2))[0][0]]))
#plt.text(times[np.where(corr2 == np.max(corr2))[0][0]],0.4,np.str(np.max(corr2)))
os.chdir(figureDir)
plt.savefig('Motor_lexical_all.png',dpi=600,papertype='letter',format='png')
plt.savefig('Motor_lexical_all.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
#print "\n\nElasped time: %0.2d mins %0.2d secs" % (divmod(time.time()-t0, 60))
t0 = time.time()
plotit2(times, M1, errM1, 5, 8, yMin=0, yMax=2.7, subject = 'typical')
plotsig2(times,nReps,X, 5, 8, good_readers, boot_pVal)
os.chdir(figureDir)
plt.savefig('Motor_lexical_good.png',dpi=600,papertype='letter',format='png')
plt.savefig('Motor_lexical_good.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
#print "\n\nElasped time: %0.2d mins %0.2d secs" % (divmod(time.time()-t0, 60))
t0 = time.time()
plotit2(times, M2, errM2, 5, 8, yMin=0, yMax=2.7, subject = 'struggling')
plotsig2(times,nReps,X, 5, 8, poor_readers, boot_pVal)
os.chdir(figureDir)
plt.savefig('Motor_lexical_poor.png',dpi=600,papertype='letter',format='png')
plt.savefig('Motor_lexical_poor.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
#print "\n\nElasped time: %0.2d mins %0.2d secs" % (divmod(time.time()-t0, 60))
#%%
""" Sylvian """
M = np.mean(np.mean(tX11[sylvian_vertices_l,:,:,:],axis=0),axis=1)
errM = np.std(np.mean(tX11[sylvian_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(all_subject))
temp1 = X11[:,:,good_readers,:]
M1 = np.mean(np.mean(temp1[sylvian_vertices_l,:,:,:],axis=0),axis=1)
errM1 = np.std(np.mean(temp1[sylvian_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(good_readers))
del temp1
temp1 = X11[:,:,poor_readers,:]
M2 = np.mean(np.mean(temp1[sylvian_vertices_l,:,:,:],axis=0),axis=1)
errM2 = np.std(np.mean(temp1[sylvian_vertices_l,:,:,:],axis=0),axis=1) / np.sqrt(len(poor_readers))
del temp1
# For calculating p-values
X = np.mean(X11[sylvian_vertices_l,:,:,:],axis=0)
###############################################################################
t0 = time.time()
plotit2(times, M, errM, 0, 3, yMin=0, yMax=2.7, subject = 'all')
plotsig2(times,nReps,X, 0, 3, all_subject, boot_pVal)
#C = np.mean(X11[sylvian_vertices_l,:,:,0],axis=0) - np.mean(X11[sylvian_vertices_l,:,:,3],axis=0)
#corr = plotcorr3(times, C[:,all_subject], twre_index)
#plt.text(times[np.where(corr == np.max(corr))[0][0]],0.5,np.str(times[np.where(corr == np.max(corr))[0][0]]))
#plt.text(times[np.where(corr == np.max(corr))[0][0]],0.4,np.str(np.max(corr)))
os.chdir(figureDir)
plt.savefig('Sylvian_dot_all.png',dpi=600,papertype='letter',format='png')
plt.savefig('Sylvian_dot_all.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
#print "\n\nElasped time: %0.2d mins %0.2d secs" % (divmod(time.time()-t0, 60))
t0 = time.time()
plotit2(times, M1, errM1, 0, 3, yMin=0, yMax=2.7, subject = 'typical')
plotsig2(times,nReps,X, 0, 3, good_readers, boot_pVal)
os.chdir(figureDir)
plt.savefig('Sylvian_dot_good.png',dpi=600,papertype='letter',format='png')
plt.savefig('Sylvian_dot_good.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
#print "\n\nElasped time: %0.2d mins %0.2d secs" % (divmod(time.time()-t0, 60))
t0 = time.time()
plotit2(times, M2, errM2, 0, 3, yMin=0, yMax=2.7, subject = 'struggling')
plotsig2(times,nReps,X, 0, 3, poor_readers, boot_pVal)
os.chdir(figureDir)
plt.savefig('Sylvian_dot_poor.png',dpi=600,papertype='letter',format='png')
plt.savefig('Sylvian_dot_poor.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
#print "\n\nElasped time: %0.2d mins %0.2d secs" % (divmod(time.time()-t0, 60))
###############################################################################
t0 = time.time()
plotit2(times, M, errM, 5, 8, yMin=0, yMax=2.7, subject = 'all')
plotsig2(times,nReps,X, 5, 8, all_subject, boot_pVal)
#C = np.mean(X11[sylvian_vertices_l,:,:,5],axis=0) - np.mean(X11[sylvian_vertices_l,:,:,8],axis=0)
#corr2 = plotcorr3(times, C[:,all_subject], twre_index)
#plt.text(times[np.where(corr2 == np.max(corr2))[0][0]],0.5,np.str(times[np.where(corr2 == np.max(corr2))[0][0]]))
#plt.text(times[np.where(corr2 == np.max(corr2))[0][0]],0.4,np.str(np.max(corr2)))
os.chdir(figureDir)
plt.savefig('Sylvian_lexical_all.png',dpi=600,papertype='letter',format='png')
plt.savefig('Sylvian_lexical_all.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
#print "\n\nElasped time: %0.2d mins %0.2d secs" % (divmod(time.time()-t0, 60))
t0 = time.time()
plotit2(times, M1, errM1, 5, 8, yMin=0, yMax=2.7, subject = 'typical')
plotsig2(times,nReps,X, 5, 8, good_readers, boot_pVal)
os.chdir(figureDir)
plt.savefig('Sylvian_lexical_good.png',dpi=600,papertype='letter',format='png')
plt.savefig('Sylvian_lexical_good.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
#print "\n\nElasped time: %0.2d mins %0.2d secs" % (divmod(time.time()-t0, 60))
t0 = time.time()
plotit2(times, M2, errM2, 5, 8, yMin=0, yMax=2.7, subject = 'struggling')
plotsig2(times,nReps,X, 5, 8, poor_readers, boot_pVal)
os.chdir(figureDir)
plt.savefig('Sylvian_lexical_poor.png',dpi=600,papertype='letter',format='png')
plt.savefig('Sylvian_lexical_poor.pdf',dpi=600,papertype='letter',format='pdf')
os.chdir('..')
#print "\n\nElasped time: %0.2d mins %0.2d secs" % (divmod(time.time()-t0, 60))
#%%
""" Making bar plots """
t_window1 = np.multiply(np.divide(np.add([300,600],[100,100]),1000.), sRate)
t_window1 = [np.int(i) for i in t_window1]
dot_window1 = np.multiply(np.divide(np.add([300,600],[100,100]),1000.), sRate)
dot_window1 = [np.int(i) for i in dot_window1]
t_window2 = np.multiply(np.divide(np.add([600,700],[100,100]),1000.), sRate)
t_window2 = [np.int(i) for i in t_window2]
dot_early = np.multiply(np.divide(np.add([300,400],[100,100]),1000.), sRate)
dot_early = [np.int(i) for i in dot_early]
dot_late = np.multiply(np.divide(np.add([500,600],[100,100]),1000.), sRate)
dot_late = [np.int(i) for i in dot_late]
#temp_vertices = broca_vertices_l
temp_vertices = stg_vertices_l
# AUD 1
# Lexical task
task = 5
temp1 = X11[:,:,good_readers,:]
M1 = np.mean(temp1[temp_vertices,:,:,:],axis=0)
lowNoise1_good = np.mean(M1[np.int(t_window1[0]):np.int(t_window1[1]),:,task], axis = 0) - np.mean(M1[np.int(t_window1[0]):np.int(t_window1[1]),:,task+3], axis = 0)
lowNoise1_good_err = np.std(lowNoise1_good) / np.sqrt(len(lowNoise1_good))
medNoise1_good = np.mean(M1[np.int(t_window1[0]):np.int(t_window1[1]),:,task+1], axis = 0) - np.mean(M1[np.int(t_window1[0]):np.int(t_window1[1]),:,task+3], axis = 0)
medNoise1_good_err = np.std(medNoise1_good) / np.sqrt(len(medNoise1_good))
del temp1
temp2 = X11[:,:,poor_readers,:]
M2 = np.mean(temp2[temp_vertices,:,:,:],axis=0)
lowNoise1_poor = np.mean(M2[t_window1[0]:t_window1[1],:,task], axis = 0) - np.mean(M2[t_window1[0]:t_window1[1],:,task+3], axis = 0)
lowNoise1_poor_err = np.std(lowNoise1_poor) / np.sqrt(len(lowNoise1_poor))
medNoise1_poor = np.mean(M2[t_window1[0]:t_window1[1],:,task+1], axis = 0) - np.mean(M2[t_window1[0]:t_window1[1],:,task+3], axis = 0)
medNoise1_poor_err = np.std(medNoise1_poor) / np.sqrt(len(medNoise1_poor))
#lowNoise2_poor = np.mean(M2[t_window2[0]:t_window2[1],:,task], axis = 0) - np.mean(M2[t_window2[0]:t_window2[1],:,task+3], axis = 0)
#lowNoise2_poor_err = np.std(lowNoise2_poor) / np.sqrt(len(lowNoise2_poor))
#medNoise2_poor = np.mean(M2[t_window2[0]:t_window2[1],:,task+1], axis = 0) - np.mean(M2[t_window2[0]:t_window2[1],:,task+3], axis = 0)
#medNoise2_poor_err = np.std(medNoise2_poor) / np.sqrt(len(medNoise2_poor))
del temp2
# Dot task
task = 0
temp1 = X11[:,:,good_readers,:]
M1 = np.mean(temp1[temp_vertices,:,:,:],axis=0)
dot_lowNoise1_good = np.mean(M1[dot_window1[0]:dot_window1[1],:,task], axis = 0) - np.mean(M1[dot_window1[0]:dot_window1[1],:,task+3], axis = 0)
dot_lowNoise1_good_err = np.std(dot_lowNoise1_good) / np.sqrt(len(dot_lowNoise1_good))
dot_medNoise1_good = np.mean(M1[dot_window1[0]:dot_window1[1],:,task+1], axis = 0) - np.mean(M1[dot_window1[0]:dot_window1[1],:,task+3], axis = 0)
dot_medNoise1_good_err = np.std(dot_medNoise1_good) / np.sqrt(len(dot_medNoise1_good))
dot_lowNoise2_early_good = np.mean(M1[dot_early[0]:dot_early[1],:,task], axis = 0) - np.mean(M1[dot_early[0]:dot_early[1],:,task+3], axis = 0)
dot_lowNoise2_early_good_err = np.std(dot_lowNoise2_early_good) / np.sqrt(len(dot_lowNoise2_early_good))
dot_lowNoise2_late_good = np.mean(M1[dot_late[0]:dot_late[1],:,task], axis = 0) - np.mean(M1[dot_late[0]:dot_late[1],:,task+3], axis = 0)
dot_lowNoise2_late_good_err = np.std(dot_lowNoise2_late_good) / np.sqrt(len(dot_lowNoise2_late_good))
del temp1
temp2 = X11[:,:,poor_readers,:]
M2 = np.mean(temp2[temp_vertices,:,:,:],axis=0)
dot_lowNoise1_poor = np.mean(M2[dot_window1[0]:dot_window1[1],:,task], axis = 0) - np.mean(M2[dot_window1[0]:dot_window1[1],:,task+3], axis = 0)
dot_lowNoise1_poor_err = np.std(dot_lowNoise1_poor) / np.sqrt(len(dot_lowNoise1_poor))
dot_medNoise1_poor = np.mean(M2[dot_window1[0]:dot_window1[1],:,task+1], axis = 0) - np.mean(M2[dot_window1[0]:dot_window1[1],:,task+3], axis = 0)
dot_medNoise1_poor_err = np.std(dot_medNoise1_poor) / np.sqrt(len(dot_medNoise1_poor))
dot_lowNoise2_early_poor = np.mean(M2[dot_early[0]:dot_early[1],:,task], axis = 0) - np.mean(M2[dot_early[0]:dot_early[1],:,task+3], axis = 0)
dot_lowNoise2_early_poor_err = np.std(dot_lowNoise2_early_poor) / np.sqrt(len(dot_lowNoise2_early_poor))
dot_lowNoise2_late_poor = np.mean(M2[dot_late[0]:dot_late[1],:,task], axis = 0) - np.mean(M2[dot_late[0]:dot_late[1],:,task+3], axis = 0)
dot_lowNoise2_late_poor_err = np.std(dot_lowNoise2_late_poor) / np.sqrt(len(dot_lowNoise2_late_poor))
del temp2
"""
Correlation
"""
aaa = np.array(subs)
temp_meg1 = np.concatenate((dot_lowNoise1_good,dot_lowNoise1_poor))
temp_read = np.concatenate((orig_twre[good_readers],orig_twre[poor_readers]))
temp_brs = np.concatenate((brs[good_readers],brs[poor_readers]))
temp_rf = | np.concatenate((rf[good_readers],rf[poor_readers])) | numpy.concatenate |
# -*- coding: utf-8 -*-
from __future__ import division, absolute_import, print_function
__all__ = [
'is_matrix', 'is_square', 'is_column', 'is_row', 'is_diag', 'is_symmetirc',
'col', 'row', 'deg2rad', 'rad2deg', 'cart2pol', 'pol2cart', 'cart2sph',
'sph2cart', 'rotate_matrix_rad', 'rotate_matrix_deg', 'ellip_volume',
'ellip_point', 'ellip_uniform', 'cholcov', 'multi_normal', 'disc_random'
]
import numbers
import numpy as np
import scipy.linalg as lg
import scipy.special as sl
from collections.abc import Iterable
def is_matrix(x):
return isinstance(x, np.ndarray) and len(x.shape) == 2
def is_square(x):
return is_matrix(x) and x.shape[0] == x.shape[1]
def is_column(x):
return is_matrix(x) and x.shape[1] == 1
def is_row(x):
return is_matrix(x) and x.shape[0] == 1
def is_diag(x):
return is_matrix(x) and (x == np.diag(x.diagonal())).all()
def is_symmetirc(x):
return not | np.any(x - x.T) | numpy.any |
#!/usr/bin/env python
# Copyright 2014-2020 The PySCF Developers. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import numpy as np
from pyscf import lib
from pyscf import ao2mo
from pyscf.cc.rintermediates import _get_vvvv # noqa
from pyscf.cc.ccsd import BLKMIN
# Ref: Gauss and Stanton, J. Chem. Phys. 103, 3561 (1995) Table III
def make_tau(t2, t1, r1, fac=1, out=None):
t1a, t1b = t1
r1a, r1b = r1
tau1aa = make_tau_aa(t2[0], t1a, r1a, fac, out)
tau1bb = make_tau_aa(t2[2], t1b, r1b, fac, out)
tau1ab = make_tau_ab(t2[1], t1, r1, fac, out)
return tau1aa, tau1ab, tau1bb
def make_tau_aa(t2aa, t1a, r1a, fac=1, out=None):
tau1aa = np.einsum('ia,jb->ijab', t1a, r1a)
tau1aa-= np.einsum('ia,jb->jiab', t1a, r1a)
tau1aa = tau1aa - tau1aa.transpose(0,1,3,2)
tau1aa *= fac * .5
tau1aa += t2aa
return tau1aa
def make_tau_ab(t2ab, t1, r1, fac=1, out=None):
t1a, t1b = t1
r1a, r1b = r1
tau1ab = np.einsum('ia,jb->ijab', t1a, r1b)
tau1ab+= np.einsum('ia,jb->ijab', r1a, t1b)
tau1ab *= fac * .5
tau1ab += t2ab
return tau1ab
def Foo(t1, t2, eris):
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
nocca, noccb, nvira, nvirb = t2ab.shape
eris_ovov = np.asarray(eris.ovov)
eris_OVOV = np.asarray(eris.OVOV)
eris_ovOV = np.asarray(eris.ovOV)
tilaa, tilab, tilbb = make_tau(t2, t1, t1, fac=0.5)
Fooa = lib.einsum('inef,menf->mi', tilaa, eris_ovov)
Fooa += lib.einsum('iNeF,meNF->mi', tilab, eris_ovOV)
Foob = lib.einsum('inef,menf->mi', tilbb, eris_OVOV)
Foob += lib.einsum('nIfE,nfME->MI', tilab, eris_ovOV)
eris_ovoo = np.asarray(eris.ovoo)
eris_OVOO = np.asarray(eris.OVOO)
eris_OVoo = np.asarray(eris.OVoo)
eris_ovOO = np.asarray(eris.ovOO)
Fooa += np.einsum('ne,nemi->mi', t1a, eris_ovoo)
Fooa -= np.einsum('ne,meni->mi', t1a, eris_ovoo)
Fooa += np.einsum('NE,NEmi->mi', t1b, eris_OVoo)
Foob += np.einsum('ne,nemi->mi', t1b, eris_OVOO)
Foob -= np.einsum('ne,meni->mi', t1b, eris_OVOO)
Foob += np.einsum('ne,neMI->MI', t1a, eris_ovOO)
fooa = eris.focka[:nocca,:nocca]
foob = eris.fockb[:noccb,:noccb]
fova = eris.focka[:nocca,nocca:]
fovb = eris.fockb[:noccb,noccb:]
Fova, Fovb = Fov(t1, t2, eris)
Fooa += fooa + 0.5*lib.einsum('me,ie->mi', Fova+fova, t1a)
Foob += foob + 0.5*lib.einsum('me,ie->mi', Fovb+fovb, t1b)
return Fooa, Foob
def Fvv(t1, t2, eris):
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
nocca, noccb, nvira, nvirb = t2ab.shape
Fvva = 0
Fvvb = 0
tauaa, tauab, taubb = make_tau(t2, t1, t1)
#:eris_ovvv = lib.unpack_tril(np.asarray(eris.ovvv).reshape(nocca*nvira,-1)).reshape(nocca,nvira,nvira,nvira)
#:ovvv = eris_ovvv - eris_ovvv.transpose(0,3,2,1)
#:self.Fvva = np.einsum('mf,mfae->ae', t1a, ovvv)
mem_now = lib.current_memory()[0]
max_memory = max(0, lib.param.MAX_MEMORY - mem_now)
blksize = min(nocca, max(BLKMIN, int(max_memory*1e6/8/(nvira**3*3))))
for p0,p1 in lib.prange(0, nocca, blksize):
ovvv = eris.get_ovvv(slice(p0,p1)) # ovvv = eris.ovvv[p0:p1]
ovvv = ovvv - ovvv.transpose(0,3,2,1)
Fvva += np.einsum('mf,mfae->ae', t1a[p0:p1], ovvv)
ovvv = None
#:eris_OVVV = lib.unpack_tril(np.asarray(eris.OVVV).reshape(noccb*nvirb,-1)).reshape(noccb,nvirb,nvirb,nvirb)
#:OVVV = eris_OVVV - eris_OVVV.transpose(0,3,2,1)
#:self.Fvvb = np.einsum('mf,mfae->ae', t1b, OVVV)
#:self.wOVVO = lib.einsum('jf,mebf->mbej', t1b, OVVV)
#:self.wOVOO = 0.5 * lib.einsum('mebf,ijef->mbij', OVVV, taubb)
blksize = min(noccb, max(BLKMIN, int(max_memory*1e6/8/(nvirb**3*3))))
for p0, p1 in lib.prange(0, noccb, blksize):
OVVV = eris.get_OVVV(slice(p0,p1)) # OVVV = eris.OVVV[p0:p1]
OVVV = OVVV - OVVV.transpose(0,3,2,1)
Fvvb += np.einsum('mf,mfae->ae', t1b[p0:p1], OVVV)
OVVV = None
#:eris_ovVV = lib.unpack_tril(np.asarray(eris.ovVV).reshape(nocca*nvira,-1)).reshape(nocca,nvira,nvirb,nvirb)
#:self.Fvvb += np.einsum('mf,mfAE->AE', t1a, eris_ovVV)
#:self.woVvO = lib.einsum('JF,meBF->mBeJ', t1b, eris_ovVV)
#:self.woVVo = lib.einsum('jf,mfBE->mBEj',-t1a, eris_ovVV)
#:self.woVoO = 0.5 * lib.einsum('meBF,iJeF->mBiJ', eris_ovVV, tauab)
#:self.woVoO += 0.5 * lib.einsum('mfBE,iJfE->mBiJ', eris_ovVV, tauab)
blksize = min(nocca, max(BLKMIN, int(max_memory*1e6/8/(nvira*nvirb**2*3))))
for p0,p1 in lib.prange(0, nocca, blksize):
ovVV = eris.get_ovVV(slice(p0,p1)) # ovVV = eris.ovVV[p0:p1]
Fvvb += np.einsum('mf,mfAE->AE', t1a[p0:p1], ovVV)
ovVV = None
#:eris_OVvv = lib.unpack_tril(np.asarray(eris.OVvv).reshape(noccb*nvirb,-1)).reshape(noccb,nvirb,nvira,nvira)
#:self.Fvva += np.einsum('MF,MFae->ae', t1b, eris_OVvv)
#:self.wOvVo = lib.einsum('jf,MEbf->MbEj', t1a, eris_OVvv)
#:self.wOvvO = lib.einsum('JF,MFbe->MbeJ',-t1b, eris_OVvv)
#:self.wOvOo = 0.5 * lib.einsum('MEbf,jIfE->MbIj', eris_OVvv, tauab)
#:self.wOvOo += 0.5 * lib.einsum('MFbe,jIeF->MbIj', eris_OVvv, tauab)
blksize = min(noccb, max(BLKMIN, int(max_memory*1e6/8/(nvirb*nvira**2*3))))
for p0, p1 in lib.prange(0, noccb, blksize):
OVvv = eris.get_OVvv(slice(p0,p1)) # OVvv = eris.OVvv[p0:p1]
Fvva += np.einsum('MF,MFae->ae', t1b[p0:p1], OVvv)
OVvv = None
eris_ovov = np.asarray(eris.ovov)
eris_OVOV = np.asarray(eris.OVOV)
eris_ovOV = np.asarray(eris.ovOV)
tilaa, tilab, tilbb = make_tau(t2, t1, t1, fac=0.5)
Fvva -= lib.einsum('mnaf,menf->ae', tilaa, eris_ovov)
Fvva -= lib.einsum('mNaF,meNF->ae', tilab, eris_ovOV)
Fvvb -= lib.einsum('mnaf,menf->ae', tilbb, eris_OVOV)
Fvvb -= lib.einsum('nMfA,nfME->AE', tilab, eris_ovOV)
fvva = eris.focka[nocca:,nocca:]
fvvb = eris.fockb[noccb:,noccb:]
fova = eris.focka[:nocca,nocca:]
fovb = eris.fockb[:noccb,noccb:]
Fova, Fovb = Fov(t1, t2, eris)
Fvva += fvva - 0.5*lib.einsum('me,ma->ae', Fova+fova, t1a)
Fvvb += fvvb - 0.5*lib.einsum('me,ma->ae', Fovb+fovb, t1b)
return Fvva, Fvvb
def Fov(t1, t2, eris):
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
nocca, noccb, nvira, nvirb = t2ab.shape
fova = eris.focka[:nocca,nocca:]
fovb = eris.fockb[:noccb,noccb:]
eris_ovov = np.asarray(eris.ovov)
eris_OVOV = np.asarray(eris.OVOV)
eris_ovOV = np.asarray(eris.ovOV)
ovov = eris_ovov - eris_ovov.transpose(0,3,2,1)
OVOV = eris_OVOV - eris_OVOV.transpose(0,3,2,1)
Fova = np.einsum('nf,menf->me', t1a, ovov)
Fova+= np.einsum('NF,meNF->me', t1b, eris_ovOV)
Fova += fova
Fovb = np.einsum('nf,menf->me', t1b, OVOV)
Fovb+= np.einsum('nf,nfME->ME', t1a, eris_ovOV)
Fovb += fovb
return Fova, Fovb
def Woooo(t1, t2, eris):
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
eris_ovoo = np.asarray(eris.ovoo)
eris_OVOO = np.asarray(eris.OVOO)
eris_OVoo = np.asarray(eris.OVoo)
eris_ovOO = np.asarray(eris.ovOO)
ovoo = eris_ovoo - eris_ovoo.transpose(2,1,0,3)
OVOO = eris_OVOO - eris_OVOO.transpose(2,1,0,3)
woooo = lib.einsum('je,nemi->minj', t1a, ovoo)
wOOOO = lib.einsum('je,nemi->minj', t1b, OVOO)
wooOO = lib.einsum('JE,NEmi->miNJ', t1b, eris_OVoo)
woOOo = lib.einsum('je,meNI->mINj',-t1a, eris_ovOO)
woooo += np.asarray(eris.oooo)
wOOOO += np.asarray(eris.OOOO)
wooOO += np.asarray(eris.ooOO)
woooo = woooo - woooo.transpose(0,3,2,1)
wOOOO = wOOOO - wOOOO.transpose(0,3,2,1)
wooOO = wooOO - woOOo.transpose(0,3,2,1)
eris_ovov = np.asarray(eris.ovov)
eris_OVOV = np.asarray(eris.OVOV)
eris_ovOV = np.asarray(eris.ovOV)
ovov = eris_ovov - eris_ovov.transpose(0,3,2,1)
OVOV = eris_OVOV - eris_OVOV.transpose(0,3,2,1)
tauaa, tauab, taubb = make_tau(t2, t1, t1)
woooo += 0.5*lib.einsum('ijef,menf->minj', tauaa, ovov)
wOOOO += 0.5*lib.einsum('ijef,menf->minj', taubb, OVOV)
wooOO += lib.einsum('iJeF,meNF->miNJ', tauab, eris_ovOV)
wOOoo = None
return woooo, wooOO, wOOoo, wOOOO
def Wooov(t1, t2, eris):
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
dtype = np.result_type(t1a, t1b, t2aa, t2ab, t2bb)
eris_ovoo = np.asarray(eris.ovoo)
eris_OVOO = np.asarray(eris.OVOO)
eris_OVoo = np.asarray(eris.OVoo)
eris_ovOO = np.asarray(eris.ovOO)
ovoo = eris_ovoo - eris_ovoo.transpose(2,1,0,3)
OVOO = eris_OVOO - eris_OVOO.transpose(2,1,0,3)
wooov = np.array( ovoo.transpose(2,3,0,1), dtype=dtype)
wOOOV = np.array( OVOO.transpose(2,3,0,1), dtype=dtype)
wooOV = np.array(eris_OVoo.transpose(2,3,0,1), dtype=dtype)
wOOov = np.array(eris_ovOO.transpose(2,3,0,1), dtype=dtype)
eris_ovoo = eris_OVOO = eris_ovOO = eris_OVoo = None
eris_ovov = np.asarray(eris.ovov)
eris_OVOV = np.asarray(eris.OVOV)
eris_ovOV = np.asarray(eris.ovOV)
ovov = eris_ovov - eris_ovov.transpose(0,3,2,1)
OVOV = eris_OVOV - eris_OVOV.transpose(0,3,2,1)
wooov += lib.einsum('if,mfne->mine', t1a, ovov)
wOOOV += lib.einsum('if,mfne->mine', t1b, OVOV)
wooOV += lib.einsum('if,mfNE->miNE', t1a, eris_ovOV)
wOOov += lib.einsum('IF,neMF->MIne', t1b, eris_ovOV)
return wooov, wooOV, wOOov, wOOOV
def Woovo(t1, t2, eris):
t1a, t1b = t1
t2aa, t2ab, t2bb = t2
nocca, noccb, nvira, nvirb = t2ab.shape
dtype = np.result_type(t1a, t1b, t2aa, t2ab, t2bb)
wovoo = np.zeros((nocca,nvira,nocca,nocca), dtype=dtype)
wOVOO = np.zeros((noccb,nvirb,noccb,noccb), dtype=dtype)
woVoO = np.zeros((nocca,nvirb,nocca,noccb), dtype=dtype)
wOvOo = np.zeros((noccb,nvira,noccb,nocca), dtype=dtype)
tauaa, tauab, taubb = make_tau(t2, t1, t1)
#:eris_ovvv = lib.unpack_tril(np.asarray(eris.ovvv).reshape(nocca*nvira,-1)).reshape(nocca,nvira,nvira,nvira)
#:ovvv = eris_ovvv - eris_ovvv.transpose(0,3,2,1)
#:self.wovoo = 0.5 * lib.einsum('mebf,ijef->mbij', eris_ovvv, tauaa)
#:self.wovoo -= 0.5 * lib.einsum('mfbe,ijef->mbij', eris_ovvv, tauaa)
mem_now = lib.current_memory()[0]
max_memory = max(0, lib.param.MAX_MEMORY - mem_now)
blksize = min(nocca, max(BLKMIN, int(max_memory*1e6/8/(nvira**3*3))))
for p0,p1 in lib.prange(0, nocca, blksize):
ovvv = eris.get_ovvv(slice(p0,p1)) # ovvv = eris.ovvv[p0:p1]
ovvv = ovvv - ovvv.transpose(0,3,2,1)
wovoo[p0:p1] = 0.5 * lib.einsum('mebf,ijef->mbij', ovvv, tauaa)
ovvv = None
#:eris_OVVV = lib.unpack_tril(np.asarray(eris.OVVV).reshape(noccb*nvirb,-1)).reshape(noccb,nvirb,nvirb,nvirb)
#:OVVV = eris_OVVV - eris_OVVV.transpose(0,3,2,1)
#:self.wOVOO = 0.5 * lib.einsum('mebf,ijef->mbij', OVVV, taubb)
blksize = min(noccb, max(BLKMIN, int(max_memory*1e6/8/(nvirb**3*3))))
for p0, p1 in lib.prange(0, noccb, blksize):
OVVV = eris.get_OVVV(slice(p0,p1)) # OVVV = eris.OVVV[p0:p1]
OVVV = OVVV - OVVV.transpose(0,3,2,1)
wOVOO[p0:p1] = 0.5 * lib.einsum('mebf,ijef->mbij', OVVV, taubb)
OVVV = None
#:eris_ovVV = lib.unpack_tril(np.asarray(eris.ovVV).reshape(nocca*nvira,-1)).reshape(nocca,nvira,nvirb,nvirb)
#:self.Fvvb += np.einsum('mf,mfAE->AE', t1a, eris_ovVV)
#:self.woVvO = lib.einsum('JF,meBF->mBeJ', t1b, eris_ovVV)
#:self.woVVo = lib.einsum('jf,mfBE->mBEj',-t1a, eris_ovVV)
#:self.woVoO = 0.5 * lib.einsum('meBF,iJeF->mBiJ', eris_ovVV, tauab)
#:self.woVoO += 0.5 * lib.einsum('mfBE,iJfE->mBiJ', eris_ovVV, tauab)
blksize = min(nocca, max(BLKMIN, int(max_memory*1e6/8/(nvira*nvirb**2*3))))
for p0,p1 in lib.prange(0, nocca, blksize):
ovVV = eris.get_ovVV(slice(p0,p1)) # ovVV = eris.ovVV[p0:p1]
woVoO[p0:p1] = 0.5 * lib.einsum('meBF,iJeF->mBiJ', ovVV, tauab)
woVoO[p0:p1]+= 0.5 * lib.einsum('mfBE,iJfE->mBiJ', ovVV, tauab)
ovVV = None
#:eris_OVvv = lib.unpack_tril(np.asarray(eris.OVvv).reshape(noccb*nvirb,-1)).reshape(noccb,nvirb,nvira,nvira)
#:self.Fvva += np.einsum('MF,MFae->ae', t1b, eris_OVvv)
#:self.wOvVo = lib.einsum('jf,MEbf->MbEj', t1a, eris_OVvv)
#:self.wOvvO = lib.einsum('JF,MFbe->MbeJ',-t1b, eris_OVvv)
#:self.wOvOo = 0.5 * lib.einsum('MEbf,jIfE->MbIj', eris_OVvv, tauab)
#:self.wOvOo += 0.5 * lib.einsum('MFbe,jIeF->MbIj', eris_OVvv, tauab)
blksize = min(noccb, max(BLKMIN, int(max_memory*1e6/8/(nvirb*nvira**2*3))))
for p0, p1 in lib.prange(0, noccb, blksize):
OVvv = eris.get_OVvv(slice(p0,p1)) # OVvv = eris.OVvv[p0:p1]
wOvOo[p0:p1] = 0.5 * lib.einsum('MEbf,jIfE->MbIj', OVvv, tauab)
wOvOo[p0:p1]+= 0.5 * lib.einsum('MFbe,jIeF->MbIj', OVvv, tauab)
OVvv = None
eris_ovov = np.asarray(eris.ovov)
eris_OVOV = np.asarray(eris.OVOV)
eris_ovOV = np.asarray(eris.ovOV)
ovov = eris_ovov - eris_ovov.transpose(0,3,2,1)
OVOV = eris_OVOV - eris_OVOV.transpose(0,3,2,1)
tilaa, tilab, tilbb = make_tau(t2, t1, t1, fac=0.5)
eris_ovoo = np.asarray(eris.ovoo)
eris_OVOO = np.asarray(eris.OVOO)
eris_OVoo = np.asarray(eris.OVoo)
eris_ovOO = np.asarray(eris.ovOO)
ovoo = eris_ovoo - eris_ovoo.transpose(2,1,0,3)
OVOO = eris_OVOO - eris_OVOO.transpose(2,1,0,3)
tmpaa = lib.einsum('nemi,jnbe->mbij', ovoo, t2aa)
tmpaa+= lib.einsum('NEmi,jNbE->mbij', eris_OVoo, t2ab)
tmpbb = lib.einsum('nemi,jnbe->mbij', OVOO, t2bb)
tmpbb+= lib.einsum('neMI,nJeB->MBIJ', eris_ovOO, t2ab)
woVoO += lib.einsum('nemi,nJeB->mBiJ', ovoo, t2ab)
woVoO += lib.einsum('NEmi,JNBE->mBiJ', eris_OVoo, t2bb)
woVoO -= lib.einsum('meNI,jNeB->mBjI', eris_ovOO, t2ab)
wOvOo += lib.einsum('NEMI,jNbE->MbIj', OVOO, t2ab)
wOvOo += lib.einsum('neMI,jnbe->MbIj', eris_ovOO, t2aa)
wOvOo -= lib.einsum('MEni,nJbE->MbJi', eris_OVoo, t2ab)
wovoo += tmpaa - tmpaa.transpose(0,1,3,2)
wOVOO += tmpbb - tmpbb.transpose(0,1,3,2)
tmp1aa = lib.einsum('njbf,menf->mbej', t2aa, ovov)
tmp1aa-= lib.einsum('jNbF,meNF->mbej', t2ab, eris_ovOV)
tmp1bb = lib.einsum('njbf,menf->mbej', t2bb, OVOV)
tmp1bb-= lib.einsum('nJfB,nfME->MBEJ', t2ab, eris_ovOV)
tmp1ab = lib.einsum('NJBF,meNF->mBeJ', t2bb, eris_ovOV)
tmp1ab-= lib.einsum('nJfB,menf->mBeJ', t2ab, ovov)
tmp1ba = lib.einsum('njbf,nfME->MbEj', t2aa, eris_ovOV)
tmp1ba-= lib.einsum('jNbF,MENF->MbEj', t2ab, OVOV)
tmp1abba =-lib.einsum('jNfB,mfNE->mBEj', t2ab, eris_ovOV)
tmp1baab =-lib.einsum('nJbF,neMF->MbeJ', t2ab, eris_ovOV)
tmpaa = lib.einsum('ie,mbej->mbij', t1a, tmp1aa)
tmpbb = lib.einsum('ie,mbej->mbij', t1b, tmp1bb)
tmpab = lib.einsum('ie,mBeJ->mBiJ', t1a, tmp1ab)
tmpab-= lib.einsum('IE,mBEj->mBjI', t1b, tmp1abba)
tmpba = lib.einsum('IE,MbEj->MbIj', t1b, tmp1ba)
tmpba-= lib.einsum('ie,MbeJ->MbJi', t1a, tmp1baab)
wovoo -= tmpaa - tmpaa.transpose(0,1,3,2)
wOVOO -= tmpbb - tmpbb.transpose(0,1,3,2)
woVoO -= tmpab
wOvOo -= tmpba
eris_ovov = eris_OVOV = eris_ovOV = None
eris_ovoo = np.asarray(eris.ovoo)
eris_OVOO = np.asarray(eris.OVOO)
eris_ovOO = np.asarray(eris.ovOO)
eris_OVoo = np.asarray(eris.OVoo)
wovoo += eris_ovoo.transpose(3,1,2,0) - eris_ovoo.transpose(2,1,0,3)
wOVOO += eris_OVOO.transpose(3,1,2,0) - eris_OVOO.transpose(2,1,0,3)
woVoO += eris_OVoo.transpose(3,1,2,0)
wOvOo += eris_ovOO.transpose(3,1,2,0)
eris_ovoo = eris_OVOO = eris_ovOO = eris_OVoo = None
eris_ovvo = np.asarray(eris.ovvo)
eris_OVVO = np.asarray(eris.OVVO)
eris_OVvo = np.asarray(eris.OVvo)
eris_ovVO = np.asarray(eris.ovVO)
eris_oovv = np.asarray(eris.oovv)
eris_OOVV = np.asarray(eris.OOVV)
eris_OOvv = np.asarray(eris.OOvv)
eris_ooVV = | np.asarray(eris.ooVV) | numpy.asarray |
#TODO: Determine optimal alpha-reweighting for weighted cross entropy and for focal loss
#TODO: Add weighting to multi-class dice
#TODO: Fix sigmoid cross-entropy for categorical outputs
#TODO: Add more custom metrics (HD95, detection rate, etc.)
#TODO: Fix classification loss(es) to work with mirrored strategy (https://www.tensorflow.org/tutorials/distribute/custom_training)
import numpy as np
import tensorflow as tf
from itertools import product
from sorcery import unpack_keys
from model_callbacks import DecayAlphaParameter
#binary cross-entropy for classification using logits
def binary_cross_entropy_classification(y_true, y_pred):
#re-weight cross-entropy loss using desired sample weighting
if y_true.shape[-1] == 1:
y_true_onehot = one_hot_encode(tf.squeeze(y_true, axis=-1), LossParameters.num_classes)
else:
y_true_onehot = one_hot_encode(y_true[...,0], LossParameters.num_classes)
sample_weight = tf.reduce_sum(tf.multiply(y_true_onehot, LossParameters.weights[:,0]), axis=-1)
return tf.losses.BinaryCrossentropy(from_logits=True)(y_true, y_pred, sample_weight=sample_weight)
#binary accuracy metric for classification using logits
def binary_accuracy(y_true, y_pred):
return tf.reduce_mean(tf.metrics.binary_accuracy(y_true, y_pred, threshold=0.0))
#binary accuracy metric for classification using logits (specifically for use as validation monitor since it will average over all patient crops)
def binary_accuracy_batch(y_true, y_pred):
y_true_batch = tf.reduce_mean(y_true, axis=0)
y_pred_batch = tf.reduce_mean(y_pred, axis=0)
return tf.reduce_mean(tf.metrics.binary_accuracy(y_true_batch, y_pred_batch, threshold=0.0))
#multi-class sorensen-dice coefficient metric
def dice_coef_metric(y_true, y_pred):
#activate outputs
if y_true.shape[-1] != 1:
y_true = tf.expand_dims(y_true[...,0], -1)
y_true_onehot, y_pred_prob = activate_ouputs(y_true, y_pred)
#can treat batch as a "pseudo-volume", or collect dice metrics on each volume in the batch individually
axes_to_sum = find_axes_to_sum(y_true_onehot)
#calculate dice metric per class
intersection = tf.reduce_sum(tf.multiply(y_true_onehot, y_pred_prob), axis=axes_to_sum)
union = tf.add(tf.reduce_sum(y_true_onehot, axis=axes_to_sum), tf.reduce_sum(y_pred_prob, axis=axes_to_sum))
numerator = tf.add(tf.multiply(intersection, 2.), LossParameters.smooth)
denominator = tf.add(union, LossParameters.smooth)
dice_metric_per_class = tf.divide(numerator, denominator)
#return average dice metric over classes (choosing to use or not use the background class)
return calculate_final_dice_metric(dice_metric_per_class)
#multi-class hard sorensen-dice coefficient metric (only should be used as metric, not as loss)
def hard_dice_coef_metric(y_true, y_pred):
#activate outputs
if y_true.shape[-1] != 1:
y_true = tf.expand_dims(y_true[...,0], -1)
y_true_onehot, y_pred_prob = activate_ouputs(y_true, y_pred)
#can treat batch as a "pseudo-volume", or collect dice metrics on each volume in the batch individually
axes_to_sum = find_axes_to_sum(y_true_onehot)
#argmax to find predicted class and then one-hot the predicted vector
y_pred_onehot = one_hot_encode(tf.math.argmax(y_pred_prob, axis=-1), LossParameters.num_classes)
#calculate dice metric per class
intersection = tf.reduce_sum(tf.multiply(y_true_onehot, y_pred_onehot), axis=axes_to_sum)
union = tf.add(tf.reduce_sum(y_true_onehot, axis=axes_to_sum), tf.reduce_sum(y_pred_onehot, axis=axes_to_sum))
numerator = tf.multiply(intersection, 2.)
denominator = union
dice_metric_per_class = tf.divide(numerator, denominator)
#replace any NaN values with 1.0 (NaN only occurs when both the ground truth predicted label is empty, which should give a true dice score of 1.0)
dice_metric_per_class = tf.where(tf.math.is_nan(dice_metric_per_class), tf.ones_like(dice_metric_per_class), dice_metric_per_class)
#return average dice metric over classes (choosing to use or not use the background class)
return calculate_final_dice_metric(dice_metric_per_class)
#multi-class sorensen-dice coefficient loss
def dice_coef_loss(y_true, y_pred):
return tf.subtract(1., dice_coef_metric(y_true, y_pred))
#multi-class jaccard similarity coefficient metric
def jaccard_coef_metric(y_true, y_pred):
#activate outputs
if y_true.shape[-1] != 1:
y_true = tf.expand_dims(y_true[...,0], -1)
y_true_onehot, y_pred_prob = activate_ouputs(y_true, y_pred)
#can treat batch as a "pseudo-volume", or collect dice metrics on each volume in the batch individually
axes_to_sum = find_axes_to_sum(y_true_onehot)
intersection = tf.reduce_sum(tf.multiply(y_true_onehot, y_pred_prob), axis=axes_to_sum)
union = tf.add(tf.reduce_sum(y_true_onehot, axis=axes_to_sum), tf.reduce_sum(y_pred_prob, axis=axes_to_sum))
numerator = tf.add(intersection, LossParameters.smooth)
denominator = tf.add(tf.subtract(union, intersection), LossParameters.smooth)
jaccard_metric_per_class = tf.divide(numerator, denominator)
#return average jaccard metric over classes (choosing to use or not use the background class)
return calculate_final_dice_metric(jaccard_metric_per_class)
#jaccard similarity coefficient loss
def jaccard_coef_loss(y_true, y_pred):
return tf.subtract(1., jaccard_coef_metric(y_true, y_pred))
#weighted categorical cross entropy loss
def weighted_cross_entropy_loss(y_true, y_pred):
if y_true.shape[-1] != 1:
y_true = tf.expand_dims(y_true[...,0], -1)
y_true_onehot, y_pred_prob, cross_entropy_matrix = cross_entropy_loss_matrix(y_true, y_pred)
return tf.multiply(weight_matrix(y_true_onehot, y_pred_prob), cross_entropy_matrix)
#weighted binary cross entropy with boundary loss
def weighted_boundary_loss(y_true, y_pred):
return tf.multiply(y_true[...,1], weighted_cross_entropy_loss(y_true, y_pred))
#categorical focal loss
def focal_loss(y_true, y_pred):
if y_true.shape[-1] != 1:
y_true = tf.expand_dims(y_true[...,0], -1)
y_true_onehot, y_pred_prob, cross_entropy_matrix = cross_entropy_loss_matrix(y_true, y_pred)
alpha_term = weight_matrix(y_true_onehot, y_pred_prob)
gamma_term = focal_weight(y_true_onehot, y_pred_prob)
return tf.multiply(tf.multiply(alpha_term, gamma_term), cross_entropy_matrix)
#combined dice loss and weighted cross entropy loss
def joint_dice_cross_entropy_loss(y_true, y_pred):
loss_contribution1 = tf.multiply(DecayAlphaParameter.alpha1, dice_coef_loss(y_true, y_pred))
loss_contribution2 = tf.multiply(DecayAlphaParameter.alpha2, weighted_cross_entropy_loss(y_true, y_pred))
return tf.add(loss_contribution1, loss_contribution2)
#combined dice loss and binary boundary loss
def joint_dice_boundary_loss(y_true, y_pred):
loss_contribution1 = tf.multiply(DecayAlphaParameter.alpha1, dice_coef_loss(y_true, y_pred))
loss_contribution2 = tf.multiply(DecayAlphaParameter.alpha2, weighted_boundary_loss(y_true, y_pred))
return tf.add(loss_contribution1, loss_contribution2)
#combined dice loss and focal loss
def joint_dice_focal_loss(y_true, y_pred):
loss_contribution1 = tf.multiply(DecayAlphaParameter.alpha1, dice_coef_loss(y_true, y_pred))
loss_contribution2 = tf.multiply(DecayAlphaParameter.alpha2, focal_loss(y_true, y_pred))
return tf.add(loss_contribution1, loss_contribution2)
#combined dice loss and focal loss using adaptive weighting based on current dice score
def adaptive_dice_focal_loss(y_true, y_pred):
batch_dice_loss = dice_coef_loss(y_true, y_pred)
batch_focal_loss = focal_loss(y_true, y_pred)
weighting_value = tf.divide(1., tf.add(1., tf.math.exp(tf.multiply(-50., tf.subtract(batch_dice_loss, .35)))))
loss_contribution1 = tf.multiply(weighting_value, batch_dice_loss)
loss_contribution2 = tf.multiply(tf.subtract(1., weighting_value), batch_focal_loss)
return tf.add(loss_contribution1, loss_contribution2)
#dice metric for brats segmentation (converts sparse ground truth into overlapping set of regions to be used with sigmoid)
def brats_dice_coef_metric(y_true, y_pred):
#activate outputs
y_pred_prob = sigmoid_probability(y_pred)
if y_true.shape[-1] == 1:
y_true_onehot_orig = one_hot_encode(tf.squeeze(y_true, axis=-1), 4)
else:
y_true_onehot_orig = one_hot_encode(y_true[...,0], 4)
#convert ground truth to proper region labels
background = y_true_onehot_orig[...,0]
enhancing_tumor = y_true_onehot_orig[...,3]
tumor_core = tf.add(y_true_onehot_orig[...,1], enhancing_tumor)
whole_tumor = tf.add(y_true_onehot_orig[...,2], tumor_core)
y_true_onehot = tf.stack([background, enhancing_tumor, tumor_core, whole_tumor], axis=-1)
#can treat batch as a "pseudo-volume", or collect dice metrics on each volume in the batch individually
axes_to_sum = find_axes_to_sum(y_true_onehot)
#add "fake" background channel to predicted
y_pred_prob = tf.concat([tf.expand_dims(background, axis=-1), y_pred_prob], axis=-1)
#calculate dice metric per class
intersection = tf.reduce_sum(tf.multiply(y_true_onehot, y_pred_prob), axis=axes_to_sum)
union = tf.add(tf.reduce_sum(y_true_onehot, axis=axes_to_sum), tf.reduce_sum(y_pred_prob, axis=axes_to_sum))
numerator = tf.add(tf.multiply(intersection, 2.), LossParameters.smooth)
denominator = tf.add(union, LossParameters.smooth)
dice_metric_per_class = tf.divide(numerator, denominator)
#return average dice metric over classes (choosing to use or not use the background class)
return calculate_final_dice_metric(dice_metric_per_class)
#dice coefficient loss for use with brats dataset
def brats_dice_coef_loss(y_true, y_pred):
return tf.subtract(1., brats_dice_coef_metric(y_true, y_pred))
#hard dice metric for brats segmentation (converts sparse ground truth into overlapping set of regions to be used with sigmoid)
def hard_brats_dice_coef_metric(y_true, y_pred):
#activate outputs
y_pred_prob = sigmoid_probability(y_pred)
if y_true.shape[-1] == 1:
y_true_onehot_orig = one_hot_encode(tf.squeeze(y_true, axis=-1), 4)
else:
y_true_onehot_orig = one_hot_encode(y_true[...,0], 4)
#convert ground truth to proper region labels
background = y_true_onehot_orig[...,0]
enhancing_tumor = y_true_onehot_orig[...,3]
tumor_core = tf.add(y_true_onehot_orig[...,1], enhancing_tumor)
whole_tumor = tf.add(y_true_onehot_orig[...,2], tumor_core)
y_true_onehot = tf.stack([background, enhancing_tumor, tumor_core, whole_tumor], axis=-1)
#can treat batch as a "pseudo-volume", or collect dice metrics on each volume in the batch individually
axes_to_sum = find_axes_to_sum(y_true_onehot)
#add "fake" background channel to predicted
y_pred_prob = tf.concat([tf.expand_dims(background, axis=-1), y_pred_prob], axis=-1)
#calculate dice metric per class
y_pred_binary = tf.cast(tf.greater_equal(y_pred_prob, 0.5), tf.float32)
intersection = tf.reduce_sum(tf.multiply(y_true_onehot, y_pred_binary), axis=axes_to_sum)
union = tf.add(tf.reduce_sum(y_true_onehot, axis=axes_to_sum), tf.reduce_sum(y_pred_binary, axis=axes_to_sum))
numerator = tf.multiply(intersection, 2.)
denominator = union
dice_metric_per_class = tf.divide(numerator, denominator)
#replace any NaN values with 1.0 (NaN only occurs when both the ground truth predicted label is empty, which should give a true dice score of 1.0)
dice_metric_per_class = tf.where(tf.math.is_nan(dice_metric_per_class), tf.ones_like(dice_metric_per_class), dice_metric_per_class)
#return average dice metric over classes (choosing to use or not use the background class)
return calculate_final_dice_metric(dice_metric_per_class)
#dice metric for only brats whole tumor
def brats_region_dice_coef_metric(y_true, y_pred):
if y_true.shape[-1] != 1:
y_true = tf.expand_dims(y_true[...,0], -1)
enhancing_tumor = tf.cast(y_true == 3, tf.float32)
tumor_core = tf.add(tf.cast(y_true == 1, tf.float32), enhancing_tumor)
whole_tumor = tf.add(tf.cast(y_true == 2, tf.float32), tumor_core)
y_true_region = tumor_core
return dice_coef_metric(y_true_region, y_pred)
#dice loss for only brats whole tumor
def brats_region_dice_coef_loss(y_true, y_pred):
return tf.subtract(1., brats_region_dice_coef_metric(y_true, y_pred))
#dice loss for only brats whole tumor
def brats_region_dice_and_boundary_loss(y_true, y_pred):
loss1 = tf.subtract(1., brats_region_dice_coef_metric(y_true, y_pred))
loss2 = weighted_boundary_loss(y_true, y_pred)
return loss1 + loss2
#hard dice metric for only brats whole tumor
def hard_brats_region_dice_coef_metric(y_true, y_pred):
if y_true.shape[-1] != 1:
y_true = tf.expand_dims(y_true[...,0], -1)
enhancing_tumor = tf.cast(y_true == 3, tf.float32)
tumor_core = tf.add(tf.cast(y_true == 1, tf.float32), enhancing_tumor)
whole_tumor = tf.add(tf.cast(y_true == 2, tf.float32), tumor_core)
y_true_region = tumor_core
return hard_dice_coef_metric(y_true_region, y_pred)
#joint dice and focal loss for brats segmentation (messy but works)
def brats_dice_and_focal_loss(y_true, y_pred):
#activate outputs
y_pred_prob = sigmoid_probability(y_pred)
y_true_onehot_orig = one_hot_encode(tf.squeeze(y_true, axis=-1), 4)
#convert ground truth to proper region labels
background = y_true_onehot_orig[...,0]
enhancing_tumor = y_true_onehot_orig[...,3]
tumor_core = tf.add(y_true_onehot_orig[...,1], enhancing_tumor)
whole_tumor = tf.add(y_true_onehot_orig[...,2], tumor_core)
y_true_onehot = tf.stack([background, enhancing_tumor, tumor_core, whole_tumor], axis=-1)
#can treat batch as a "pseudo-volume", or collect dice metrics on each volume in the batch individually
axes_to_sum = find_axes_to_sum(y_true_onehot)
#add "fake" background channel to predicted
y_pred_prob = tf.concat([tf.expand_dims(background, axis=-1), y_pred_prob], axis=-1)
#calculate dice metric per class
intersection = tf.reduce_sum(tf.multiply(y_true_onehot, y_pred_prob), axis=axes_to_sum)
union = tf.add(tf.reduce_sum(y_true_onehot, axis=axes_to_sum), tf.reduce_sum(y_pred_prob, axis=axes_to_sum))
numerator = tf.add(tf.multiply(intersection, 2.), LossParameters.smooth)
denominator = tf.add(union, LossParameters.smooth)
dice_metric_per_class = tf.divide(numerator, denominator)
#return average dice loss over classes (choosing to use or not use the background class)
if LossParameters.dice_over_batch == False:
dice_metric_per_class = tf.reduce_mean(dice_metric_per_class, axis=0)
dice_loss = tf.subtract(1., dice_metric_per_class[1:])
#loop over classes and get binary focal loss of each
for i in range(0,3):
y_true_class = tf.expand_dims(y_true_onehot[...,i+1], axis=-1)
y_pred_class = tf.expand_dims(y_pred[...,i], axis=-1)
y_pred_sigmoid = sigmoid_probability(y_pred_class)
y_pred_class_prob = tf.concat([tf.subtract(1., y_pred_sigmoid), y_pred_sigmoid], axis=-1)
y_true_class_onehot = one_hot_encode(tf.squeeze(y_true_class, axis=-1), 2)
#cross-entropy
cross_entropy_matrix = tf.losses.binary_crossentropy(y_true_class, y_pred_class, from_logits=True)
#alpha term
alpha_term = tf.zeros_like(y_pred_class_prob[...,0])
y_pred_class_onehot = one_hot_encode(tf.math.argmax(y_pred_class_prob, axis=-1), 2)
#enhancing tumor
if i == 0:
temp_weights = | np.array([[2., 2.], [1., 1.]], dtype=np.float32) | numpy.array |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
from __future__ import unicode_literals
import numpy as np # type: ignore
import onnx
from ..base import Base
from . import expect
class Clip(Base):
@staticmethod
def export(): # type: () -> None
node = onnx.helper.make_node(
'Clip',
inputs=['x'],
outputs=['y'],
min=-1.0,
max=1.0
)
x = np.array([-2, 0, 2]).astype(np.float32)
y = np.clip(x, -1, 1) # expected output [-1., 0., 1.]
expect(node, inputs=[x], outputs=[y],
name='test_clip_example')
x = np.random.randn(3, 4, 5).astype(np.float32)
y = np.clip(x, -1.0, 1.0)
expect(node, inputs=[x], outputs=[y],
name='test_clip')
node = onnx.helper.make_node(
'Clip',
inputs=['x'],
outputs=['y'],
min=-5.0,
max=5.0,
)
x = np.array([-1, 0, 1]).astype(np.float32)
y = np.array([-1, 0, 1]).astype(np.float32)
expect(node, inputs=[x], outputs=[y],
name='test_clip_inbounds')
x = np.array([-6, 0, 6]).astype(np.float32)
y = np.array([-5, 0, 5]).astype(np.float32)
expect(node, inputs=[x], outputs=[y],
name='test_clip_outbounds')
x = np.array([-1, 0, 6]).astype(np.float32)
y = | np.array([-1, 0, 5]) | numpy.array |
# -*- coding: utf-8 -*-
from itertools import product
import numpy as np
import pytest
from numpy.testing import assert_allclose, assert_equal
from bounded_rand_walkers.cpp import bound_map
from bounded_rand_walkers.utils import (
DelaunayArray,
approx_edges,
cluster_indices,
get_centres,
in_bounds,
label,
match_ref,
normalise,
)
def test_get_centres():
assert_allclose(get_centres([1, 2, 3, 4]), [1.5, 2.5, 3.5])
assert_allclose(get_centres([-1, 1, 3, 4]), [0, 2, 3.5])
@pytest.mark.parametrize(
"x,exp_indices,exp_labelled",
[
(
np.array([1, 1, 0, 0, 1, 0]),
np.array([[0, 2], [4, 5]]),
np.array([1, 1, 0, 0, 2, 0]),
),
(
np.array([0, 1, 0, 0, 1, 1]),
np.array([[1, 2], [4, 6]]),
np.array([0, 1, 0, 0, 2, 2]),
),
(
np.array([0, 1, 0, 0, 1, 0]),
np.array([[1, 2], [4, 5]]),
| np.array([0, 1, 0, 0, 2, 0]) | numpy.array |
# -*- coding: utf-8 -*-
"""
Created on 4 Jun 2021
@author: Alexandre
"""
import numpy as np
from pyro.dynamic import statespace
###############################################################################
class SingleMass( statespace.StateSpaceSystem ):
"""Single Mass with linear spring and damper
Attributes
----------
"""
############################
def __init__(self, m=1, k=2, b=0):
""" """
# params
self.m = m
self.k = k
self.b = b
self.l1 = 2
self.l2 = 1
# Matrix ABCD
self.compute_ABCD()
# initialize standard params
statespace.StateSpaceSystem.__init__( self, self.A, self.B, self.C, self.D)
# Name and labels
self.name = 'Linear-Spring-Damper'
self.input_label = [ 'Force']
self.input_units = [ '[N]']
self.output_label = ['Position']
self.output_units = ['[m]']
self.state_label = [ 'Position','Velocity']
self.state_units = [ '[m]', '[m/s]']
self.linestyle = '-'
###########################################################################
def compute_ABCD(self):
"""
"""
self.A = np.array([ [ 0 , 1 ],
[ -self.k/self.m , -self.b/self.m ] ])
self.B = np.array([ [ 0 ],
[ 1 /self.m ]])
self.C = np.array([ [ 1 , 0 ]])
self.D = np.array([ [ 0 ]])
###########################################################################
# Graphical output
###########################################################################
#############################
def xut2q( self, x , u , t ):
""" Compute configuration variables ( q vector ) """
q = np.array([ x[0], u[0] ]) # Hack to illustrate force vector
return q
###########################################################################
def forward_kinematic_domain(self, q ):
"""
"""
l = self.l1 * 2
domain = [ (-l+self.l1,l+self.l1) , (-l,l) , (-l,l) ]#
return domain
###########################################################################
def forward_kinematic_lines(self, q ):
"""
Compute points p = [x;y;z] positions given config q
----------------------------------------------------
- points of interest for ploting
Outpus:
lines_pts = [] : a list of array (n_pts x 3) for each lines
"""
lines_pts = [] # list of array (n_pts x 3) for each lines
lines_style = []
lines_color = []
# ground line
pts = np.zeros(( 2 , 3 ))
pts[0,:] = np.array([-self.l1,-self.l2,0])
pts[1,:] = np.array([-self.l1,+self.l2,0])
lines_pts.append( pts )
lines_style.append( '-')
lines_color.append( 'k')
# mass
pts = np.zeros(( 5 , 3 ))
pts[0,:] = np.array([q[0] - self.l2/2,+self.l2/2,0])
pts[1,:] = np.array([q[0] + self.l2/2,+self.l2/2,0])
pts[2,:] = np.array([q[0] + self.l2/2,-self.l2/2,0])
pts[3,:] = np.array([q[0] - self.l2/2,-self.l2/2,0])
pts[4,:] = pts[0,:]
lines_pts.append( pts )
lines_style.append( '-')
lines_color.append( 'b')
# spring
pts = np.zeros(( 15 , 3 ))
d = q[0] + self.l1 - self.l2/2
h = self.l2 / 3
pts[0,:] = np.array([d*0.00 - self.l1,0,0])
pts[1,:] = np.array([d*0.20 - self.l1,0,0])
pts[2,:] = np.array([d*0.25 - self.l1,+h,0])
pts[3,:] = np.array([d*0.30 - self.l1,-h,0])
pts[4,:] = np.array([d*0.35 - self.l1,+h,0])
pts[5,:] = np.array([d*0.40 - self.l1,-h,0])
pts[6,:] = np.array([d*0.45 - self.l1,+h,0])
pts[7,:] = np.array([d*0.50 - self.l1,-h,0])
pts[8,:] = np.array([d*0.55 - self.l1,+h,0])
pts[9,:] = np.array([d*0.60 - self.l1,-h,0])
pts[10,:] = np.array([d*0.65 - self.l1,+h,0])
pts[11,:] = np.array([d*0.70 - self.l1,-h,0])
pts[12,:] = np.array([d*0.75 - self.l1,+h,0])
pts[13,:] = np.array([d*0.80 - self.l1,0,0])
pts[14,:] = np.array([d*1.00 - self.l1,0,0])
lines_pts.append( pts )
lines_style.append( '-')
lines_color.append( 'k')
return lines_pts , lines_style , lines_color
###########################################################################
def forward_kinematic_lines_plus(self, x , u , t ):
"""
plots the force vector
"""
lines_pts = [] # list of array (n_pts x 3) for each lines
lines_style = []
lines_color = []
# force arrow
pts = np.zeros(( 5 , 3 ))
xf = x[0] # base of force x coordinate
f = u[0] # force amplitude
pts[0,:] = np.array([xf + self.l2/2,0,0])
pts[1,:] = np.array([xf + self.l2/2 + f,0,0])
pts[2,:] = np.array([xf + self.l2/2 + f - self.l2/4*f,+self.l2/4*f,0])
pts[3,:] = np.array([xf + self.l2/2 + f,0,0])
pts[4,:] = np.array([xf + self.l2/2 + f - self.l2/4*f,-self.l2/4*f,0])
lines_pts.append( pts )
lines_style.append( '-')
lines_color.append( 'r')
return lines_pts , lines_style , lines_color
###############################################################################
class TwoMass( statespace.StateSpaceSystem ):
"""Two Mass with linear spring and damper
Attributes
----------
"""
############################
def __init__(self, m=1, k=2, b=0.2, output_mass = 2):
""" """
# params
self.m1 = m
self.k1 = k
self.b1 = b
self.m2 = m
self.k2 = k
self.b2 = b
self.l1 = 2
self.l2 = 1
# sensor output
self.output_mass = output_mass
# Matrix ABCD
self.compute_ABCD()
# initialize standard params
statespace.StateSpaceSystem.__init__( self, self.A, self.B, self.C, self.D)
# Name and labels
self.name = 'Two mass with linear spring-dampers'
self.input_label = ['Force']
self.input_units = ['[N]']
self.output_label = ['x2']
self.output_units = ['[m]']
self.state_label = [ 'x1','x2', 'dx1', 'dx2']
self.state_units = [ '[m]', '[m]', '[m/s]', '[m/s]']
self.linestyle = '-'
###########################################################################
def compute_ABCD(self):
"""
"""
self.A = np.array([ [ 0, 0, 1, 0 ],
[ 0, 0, 0, 1 ],
[ -(self.k1+self.k2)/self.m1, +self.k2/self.m1, -self.b1/self.m1, 0],
[ +self.k2/self.m2, -self.k2/self.m2, 0, -self.b2/self.m2]])
self.B = np.array([ [ 0 ],
[ 0 ],
[ 0 ],
[ 1/self.m2 ]])
if self.output_mass == 2:
self.C = np.array([ [ 0 , 1 , 0 , 0 ]])
self.output_label = ['x2']
elif self.output_mass ==1:
self.C = np.array([ [ 1 , 0 , 0 , 0 ]])
self.output_label = ['x1']
else:
self.C = np.array([ [ 0 , 1 , 0 , 0 ]])
self.output_label = ['x2']
self.D = np.array([ [ 0 ]])
###########################################################################
# Graphical output
###########################################################################
#############################
def xut2q( self, x , u , t ):
""" Compute configuration variables ( q vector ) """
q = np.array([ x[0], x[1], u[0] ])
return q
###########################################################################
def forward_kinematic_domain(self, q ):
"""
"""
l = self.l1 * 3
domain = [ (-l+self.l1,l+self.l1) , (-l,l) , (-l,l) ]#
return domain
###########################################################################
def forward_kinematic_lines(self, q ):
"""
Compute points p = [x;y;z] positions given config q
----------------------------------------------------
- points of interest for ploting
Outpus:
lines_pts = [] : a list of array (n_pts x 3) for each lines
"""
lines_pts = [] # list of array (n_pts x 3) for each lines
lines_style = []
lines_color = []
# ground line
pts = np.zeros(( 2 , 3 ))
pts[0,:] = np.array([-self.l1*2,-self.l2,0])
pts[1,:] = np.array([-self.l1*2,+self.l2,0])
lines_pts.append( pts )
lines_style.append( '-')
lines_color.append( 'k')
# mass 1
pts = np.zeros(( 5 , 3 ))
x1 = q[0] - self.l1
pts[0,:] = np.array([ x1 - self.l2/2,+self.l2/2,0])
pts[1,:] = np.array([ x1 + self.l2/2,+self.l2/2,0])
pts[2,:] = np.array([ x1 + self.l2/2,-self.l2/2,0])
pts[3,:] = np.array([ x1 - self.l2/2,-self.l2/2,0])
pts[4,:] = pts[0,:]
lines_pts.append( pts )
lines_style.append( '-')
lines_color.append( 'g')
# mass 2
pts = np.zeros(( 5 , 3 ))
x2 = q[1]
pts[0,:] = np.array([x2 - self.l2/2,+self.l2/2,0])
pts[1,:] = np.array([x2 + self.l2/2,+self.l2/2,0])
pts[2,:] = np.array([x2 + self.l2/2,-self.l2/2,0])
pts[3,:] = np.array([x2 - self.l2/2,-self.l2/2,0])
pts[4,:] = pts[0,:]
lines_pts.append( pts )
lines_style.append( '-')
lines_color.append( 'b')
# spring 1
pts = np.zeros(( 15 , 3 ))
d = q[0] + self.l1 - self.l2/2
h = self.l2 / 3
pts[0,:] = np.array([d*0.00 - self.l1*2,0,0])
pts[1,:] = np.array([d*0.20 - self.l1*2,0,0])
pts[2,:] = np.array([d*0.25 - self.l1*2,+h,0])
pts[3,:] = np.array([d*0.30 - self.l1*2,-h,0])
pts[4,:] = np.array([d*0.35 - self.l1*2,+h,0])
pts[5,:] = np.array([d*0.40 - self.l1*2,-h,0])
pts[6,:] = np.array([d*0.45 - self.l1*2,+h,0])
pts[7,:] = np.array([d*0.50 - self.l1*2,-h,0])
pts[8,:] = np.array([d*0.55 - self.l1*2,+h,0])
pts[9,:] = np.array([d*0.60 - self.l1*2,-h,0])
pts[10,:] = np.array([d*0.65 - self.l1*2,+h,0])
pts[11,:] = np.array([d*0.70 - self.l1*2,-h,0])
pts[12,:] = np.array([d*0.75 - self.l1*2,+h,0])
pts[13,:] = np.array([d*0.80 - self.l1*2,0,0])
pts[14,:] = np.array([d*1.00 - self.l1*2,0,0])
lines_pts.append( pts )
lines_style.append( '-')
lines_color.append( 'k')
# spring 2
pts = np.zeros(( 15 , 3 ))
d = q[1] - q[0] + self.l1 - self.l2
h = self.l2 / 3
pts[0,:] = np.array([d*0.00 + x1 + self.l2/2,0,0])
pts[1,:] = np.array([d*0.20 + x1+self.l2/2,0,0])
pts[2,:] = np.array([d*0.25 + x1+self.l2/2,+h,0])
pts[3,:] = np.array([d*0.30 + x1+self.l2/2,-h,0])
pts[4,:] = np.array([d*0.35 + x1+self.l2/2,+h,0])
pts[5,:] = np.array([d*0.40 + x1+self.l2/2,-h,0])
pts[6,:] = np.array([d*0.45 + x1+self.l2/2,+h,0])
pts[7,:] = np.array([d*0.50 + x1+self.l2/2,-h,0])
pts[8,:] = np.array([d*0.55 + x1+self.l2/2,+h,0])
pts[9,:] = np.array([d*0.60 + x1+self.l2/2,-h,0])
pts[10,:] = np.array([d*0.65 + x1+self.l2/2,+h,0])
pts[11,:] = np.array([d*0.70 + x1+self.l2/2,-h,0])
pts[12,:] = np.array([d*0.75 + x1+self.l2/2,+h,0])
pts[13,:] = np.array([d*0.80 + x1+self.l2/2,0,0])
pts[14,:] = np.array([d*1.00 + x1+self.l2/2,0,0])
lines_pts.append( pts )
lines_style.append( '-')
lines_color.append( 'k')
return lines_pts , lines_style , lines_color
###########################################################################
def forward_kinematic_lines_plus(self, x , u , t ):
"""
plots the force vector
"""
lines_pts = [] # list of array (n_pts x 3) for each lines
lines_style = []
lines_color = []
# force arrow
pts = np.zeros(( 5 , 3 ))
xf = x[1] # base of force x coordinate
f = u[0] # force amplitude
pts[0,:] = np.array([xf + self.l2/2,0,0])
pts[1,:] = np.array([xf + self.l2/2 + f,0,0])
pts[2,:] = np.array([xf + self.l2/2 + f - self.l2/4*f,+self.l2/4*f,0])
pts[3,:] = np.array([xf + self.l2/2 + f,0,0])
pts[4,:] = np.array([xf + self.l2/2 + f - self.l2/4*f,-self.l2/4*f,0])
lines_pts.append( pts )
lines_style.append( '-')
lines_color.append( 'r')
return lines_pts , lines_style , lines_color
###############################################################################
class ThreeMass( statespace.StateSpaceSystem ):
"""Three Mass with linear spring and damper
Attributes
----------
"""
############################
def __init__(self, m=1, k=2, b=0.2, output_mass = 2):
""" """
# params
self.m1 = m
self.k1 = k
self.b1 = b
self.m2 = m
self.k2 = k
self.b2 = b
self.m3 = m
self.k3 = k
self.b3 = b
self.l1 = 2
self.l2 = 1
# sensor output
self.output_mass = output_mass
# Matrix ABCD
self.compute_ABCD()
# initialize standard params
statespace.StateSpaceSystem.__init__( self, self.A, self.B, self.C, self.D)
# Name and labels
self.name = 'Three mass with linear spring-dampers'
self.input_label = ['Force']
self.input_units = ['[N]']
self.output_label = ['x3']
self.output_units = ['[m]']
self.state_label = [ 'x1','x2', 'x3', 'dx1', 'dx2', 'dx3']
self.state_units = [ '[m]', '[m]', '[m]', '[m/s]', '[m/s]','[m/s]']
self.linestyle = '-'
###########################################################################
def compute_ABCD(self):
"""
"""
k1 = self.k1
k2 = self.k2
k3 = self.k3
m1 = self.m1
m2 = self.m2
m3 = self.m3
b1 = self.b1
b2 = self.b2
b3 = self.b3
self.A = np.array([ [ 0, 0, 0, 1, 0, 0 ],
[ 0, 0, 0, 0, 1, 0 ],
[ 0, 0, 0, 0, 0, 1 ],
[ -(k1+k2)/m1, +k2/m1, 0, -b1/m1, 0, 0 ],
[ +k2/m2, -(k2+k3)/m2, +k3/m2, 0, -b2/m2, 0 ],
[ 0, +k3/m3, -k3/m3, 0, 0, -b3/m3 ]])
self.B = np.array([ [ 0 ],
[ 0 ],
[ 0 ],
[ 0 ],
[ 0 ],
[ 1/m2 ]])
if self.output_mass == 3:
self.C = np.array([ [ 0 , 0 , 1 , 0 , 0 , 0 ]])
self.output_label = ['x3']
elif self.output_mass == 2:
self.C = np.array([ [ 0 , 1 , 0 , 0 , 0 , 0 ]])
self.output_label = ['x2']
elif self.output_mass == 1:
self.C = np.array([ [ 1 , 0 , 0 , 0 , 0 , 0 ]])
self.output_label = ['x1']
else:
self.C = np.array([ [ 0 , 0 , 1 , 0 , 0 , 0 ]])
self.output_label = ['x3']
self.D = np.array([ [ 0 ]])
###########################################################################
# Graphical output
###########################################################################
#############################
def xut2q( self, x , u , t ):
""" Compute configuration variables ( q vector ) """
q = np.array([ x[0], x[1], x[2], u[0] ])
return q
###########################################################################
def forward_kinematic_domain(self, q ):
"""
"""
l = self.l1 * 3
domain = [ (-l+self.l1,l+self.l1) , (-l,l) , (-l,l) ]#
return domain
###########################################################################
def forward_kinematic_lines(self, q ):
"""
Compute points p = [x;y;z] positions given config q
----------------------------------------------------
- points of interest for ploting
Outpus:
lines_pts = [] : a list of array (n_pts x 3) for each lines
"""
lines_pts = [] # list of array (n_pts x 3) for each lines
lines_style = []
lines_color = []
# ground line
pts = np.zeros(( 2 , 3 ))
pts[0,:] = np.array([-self.l1*2,-self.l2,0])
pts[1,:] = np.array([-self.l1*2,+self.l2,0])
lines_pts.append( pts )
lines_style.append( '-')
lines_color.append( 'k')
# mass 1
pts = np.zeros(( 5 , 3 ))
x1 = q[0] - self.l1
pts[0,:] = np.array([ x1 - self.l2/2,+self.l2/2,0])
pts[1,:] = np.array([ x1 + self.l2/2,+self.l2/2,0])
pts[2,:] = np.array([ x1 + self.l2/2,-self.l2/2,0])
pts[3,:] = np.array([ x1 - self.l2/2,-self.l2/2,0])
pts[4,:] = pts[0,:]
lines_pts.append( pts )
lines_style.append( '-')
lines_color.append( 'm')
# mass 2
pts = np.zeros(( 5 , 3 ))
x2 = q[1]
pts[0,:] = np.array([x2 - self.l2/2,+self.l2/2,0])
pts[1,:] = np.array([x2 + self.l2/2,+self.l2/2,0])
pts[2,:] = np.array([x2 + self.l2/2,-self.l2/2,0])
pts[3,:] = np.array([x2 - self.l2/2,-self.l2/2,0])
pts[4,:] = pts[0,:]
lines_pts.append( pts )
lines_style.append( '-')
lines_color.append( 'g')
#mass 3
pts = np.zeros(( 5 , 3 ))
x3 = q[2] + self.l1
pts[0,:] = np.array([x3 - self.l2/2,+self.l2/2,0])
pts[1,:] = np.array([x3 + self.l2/2,+self.l2/2,0])
pts[2,:] = np.array([x3 + self.l2/2,-self.l2/2,0])
pts[3,:] = np.array([x3 - self.l2/2,-self.l2/2,0])
pts[4,:] = pts[0,:]
lines_pts.append( pts )
lines_style.append( '-')
lines_color.append( 'b')
# spring 1
pts = np.zeros(( 15 , 3 ))
d = q[0] + self.l1 - self.l2/2
h = self.l2 / 3
pts[0,:] = np.array([d*0.00 - self.l1*2,0,0])
pts[1,:] = np.array([d*0.20 - self.l1*2,0,0])
pts[2,:] = np.array([d*0.25 - self.l1*2,+h,0])
pts[3,:] = np.array([d*0.30 - self.l1*2,-h,0])
pts[4,:] = np.array([d*0.35 - self.l1*2,+h,0])
pts[5,:] = np.array([d*0.40 - self.l1*2,-h,0])
pts[6,:] = np.array([d*0.45 - self.l1*2,+h,0])
pts[7,:] = np.array([d*0.50 - self.l1*2,-h,0])
pts[8,:] = np.array([d*0.55 - self.l1*2,+h,0])
pts[9,:] = np.array([d*0.60 - self.l1*2,-h,0])
pts[10,:] = np.array([d*0.65 - self.l1*2,+h,0])
pts[11,:] = np.array([d*0.70 - self.l1*2,-h,0])
pts[12,:] = np.array([d*0.75 - self.l1*2,+h,0])
pts[13,:] = np.array([d*0.80 - self.l1*2,0,0])
pts[14,:] = np.array([d*1.00 - self.l1*2,0,0])
lines_pts.append( pts )
lines_style.append( '-')
lines_color.append( 'k')
# spring 2
pts = np.zeros(( 15 , 3 ))
d = q[1] - q[0] + self.l1 - self.l2
pts[0,:] = np.array([d*0.00 + x1 + self.l2/2,0,0])
pts[1,:] = np.array([d*0.20 + x1+self.l2/2,0,0])
pts[2,:] = np.array([d*0.25 + x1+self.l2/2,+h,0])
pts[3,:] = np.array([d*0.30 + x1+self.l2/2,-h,0])
pts[4,:] = np.array([d*0.35 + x1+self.l2/2,+h,0])
pts[5,:] = np.array([d*0.40 + x1+self.l2/2,-h,0])
pts[6,:] = np.array([d*0.45 + x1+self.l2/2,+h,0])
pts[7,:] = np.array([d*0.50 + x1+self.l2/2,-h,0])
pts[8,:] = np.array([d*0.55 + x1+self.l2/2,+h,0])
pts[9,:] = np.array([d*0.60 + x1+self.l2/2,-h,0])
pts[10,:] = np.array([d*0.65 + x1+self.l2/2,+h,0])
pts[11,:] = np.array([d*0.70 + x1+self.l2/2,-h,0])
pts[12,:] = np.array([d*0.75 + x1+self.l2/2,+h,0])
pts[13,:] = np.array([d*0.80 + x1+self.l2/2,0,0])
pts[14,:] = np.array([d*1.00 + x1+self.l2/2,0,0])
lines_pts.append( pts )
lines_style.append( '-')
lines_color.append( 'k')
# spring 3
pts = np.zeros(( 15 , 3 ))
d = q[2] - q[1] + self.l1 - self.l2
pts[0,:] = np.array([d*0.00 + x2 + self.l2/2,0,0])
pts[1,:] = np.array([d*0.20 + x2+self.l2/2,0,0])
pts[2,:] = np.array([d*0.25 + x2+self.l2/2,+h,0])
pts[3,:] = np.array([d*0.30 + x2+self.l2/2,-h,0])
pts[4,:] = np.array([d*0.35 + x2+self.l2/2,+h,0])
pts[5,:] = np.array([d*0.40 + x2+self.l2/2,-h,0])
pts[6,:] = np.array([d*0.45 + x2+self.l2/2,+h,0])
pts[7,:] = np.array([d*0.50 + x2+self.l2/2,-h,0])
pts[8,:] = np.array([d*0.55 + x2+self.l2/2,+h,0])
pts[9,:] = np.array([d*0.60 + x2+self.l2/2,-h,0])
pts[10,:] = np.array([d*0.65 + x2+self.l2/2,+h,0])
pts[11,:] = np.array([d*0.70 + x2+self.l2/2,-h,0])
pts[12,:] = np.array([d*0.75 + x2+self.l2/2,+h,0])
pts[13,:] = np.array([d*0.80 + x2+self.l2/2,0,0])
pts[14,:] = np.array([d*1.00 + x2+self.l2/2,0,0])
lines_pts.append( pts )
lines_style.append( '-')
lines_color.append( 'k')
return lines_pts , lines_style , lines_color
###########################################################################
def forward_kinematic_lines_plus(self, x , u , t ):
"""
plots the force vector
"""
lines_pts = [] # list of array (n_pts x 3) for each lines
lines_style = []
lines_color = []
# force arrow
pts = np.zeros(( 5 , 3 ))
xf = x[2] + self.l1
f = u[0]
pts[0,:] = np.array([xf + self.l2/2,0,0])
pts[1,:] = np.array([xf + self.l2/2 + f,0,0])
pts[2,:] = | np.array([xf + self.l2/2 + f - self.l2/4*f,+self.l2/4*f,0]) | numpy.array |
import cv2
import matplotlib
import matplotlib.pyplot as plt
import numpy as np
import torch
from src.utils.custom_config import custom_parser_config
from seg_trainer import SegmentationModule
matplotlib.use('TKAgg', warn=False, force=True)
if __name__ == "__main__":
input_img = cv2.imread(
"/home/anirudh/NJ/Interview/Pheno-Inspect/git_proj/coding_task_make-a-model/dataset/sugarbeet_weed_dataset/items/"
"68653b6d-f406-442d-833e-31ffb43cf578/map/tileLayers/rgb/tiles/0-0-1.png")
input_img = input_img[np.newaxis, ...]
input_img = np.swapaxes(input_img, 1, 3)
input_img = torch.from_numpy(input_img).float()
MODEL_CHKP_PATH = "/home/anirudh/NJ/Interview/Pheno-Inspect/git_proj/coding_task_make-a-model/src/" \
"lightning_logs/version_198/checkpoints/epoch=91-step=183.ckpt"
seg_inference = SegmentationModule(config_data=custom_parser_config,
batch_size=5,
epochs=150,
gpu=1,
train_mode=False)
seg_inference.model.eval()
seg_inference.load_state_dict(torch.load(MODEL_CHKP_PATH), strict=False)
with torch.no_grad():
print(input_img.shape)
output_seg = seg_inference(input_img)
output_seg = torch.argmax(output_seg, 1)
print(output_seg.shape)
print(np.unique(output_seg))
output_seg = np.swapaxes(output_seg, 0, 2)
print(np.unique(output_seg))
result = | np.squeeze(output_seg) | numpy.squeeze |
"""Unittests for the map module."""
import unittest
import numpy as np
import pandas as pd
import pandas.testing as pdt
import pygeos
import pyproj
import geopandas as gpd
import shapely.wkt
import numpy.testing as npt
import gnssmapper.common as cm
import gnssmapper.geo as geo
class TestObservationMethods(unittest.TestCase):
def setUp(self):
self.rays = gpd.GeoSeries([shapely.geometry.LineString([[527990, 183005, 0], [528020, 183005, 15]]),
shapely.geometry.LineString([[527990, 183005, 10], [528020, 183005, 25]])],
crs="epsg:27700")
def test_rays(self) -> None:
r = [[0, 0, 0], [1, 1, 1]]
s = [[10000, 0, 0],[10001, 1, 1]]
expected = [pygeos.Geometry("LineString (0 0 0,1000 0 0)"), pygeos.Geometry("LineString (1 1 1,1001 1 1)")]
out=geo.rays(r,s)
self.assertTrue(np.all(pygeos.predicates.equals(out,expected)))
def test_to_crs(self) -> None:
target = pyproj.crs.CRS(cm.constants.epsg_wgs84)
transformed= geo.to_crs(self.rays,target)
self.assertTrue(np.all(s.has_z for s in transformed))
self.assertEqual(target,transformed.crs)
df = gpd.GeoDataFrame(geometry = self.rays,crs=self.rays.crs)
transformed_df = geo.to_crs(df,target)
self.assertTrue(np.all(s.has_z for s in transformed_df.geometry))
self.assertEqual(target,transformed_df.crs)
class TestShapelyMethods(unittest.TestCase):
def setUp(self):
self.building = shapely.wkt.loads("POLYGON((528010 183010, 528010 183000,528000 183000, 528000 183010,528010 183010))")
def test_intersection(self):
five = shapely.geometry.LineString([[527990,183005,0],[528020,183005,15]])
point = geo.intersection([five],[self.building],[10])
self.assertAlmostEqual(np.array(point[0])[2],5)
def test_intersection_projected(self):
fifteen = shapely.geometry.LineString([[527990,183005,10],[528020,183005,25]])
point = geo.intersection_projected([fifteen], [self.building])
npt.assert_array_almost_equal(np.array(list(point)[0].coords).flatten(), [528000, 183005, 15])
inside = shapely.geometry.LineString([[528005,183005,10],[528020,183005,25]])
inside_point = geo.intersection_projected([inside], [self.building])
npt.assert_array_almost_equal(np.array(list(inside_point)[0].coords).flatten(), [528010, 183005, 15])
outside = shapely.geometry.LineString([[527990,183015,10],[528020,183015,25]])
outside_point = geo.intersection_projected([outside], [self.building])
self.assertTrue(list(outside_point)[0].is_empty)
empty = shapely.geometry.LineString()
empty_point = geo.intersection_projected([empty], [self.building])
self.assertTrue(list(empty_point)[0].is_empty)
def test_intersection_projected_height(self):
fifteen = shapely.geometry.LineString([[527990,183005,10],[528020,183005,25]])
point = geo.intersection_projected_height([fifteen],[self.building])
self.assertAlmostEqual(point[0],15)
def test_intersects(self):
five = shapely.geometry.LineString([[527990, 183005, 0], [528020, 183005, 15]])
fifteen = shapely.geometry.LineString([[527990, 183005, 10], [528020, 183005, 25]])
rays = [five, fifteen]
buildings = [self.building, self.building]
heights=[10,10]
npt.assert_array_almost_equal(geo.intersects(rays,buildings,heights),[True,False])
class TestFresnel(unittest.TestCase):
def setUp(self):
self.buildings = [shapely.wkt.loads("POLYGON((528010 183010, 528010 183000,528000 183000, 528000 183010,528010 183010))")]
def test_fresnel_integral(self):
v=np.array([-1,0,1,2.4])
o=np.array([-20* | np.log(1.12) | numpy.log |
import importlib
from hydroDL import kPath, utils
from hydroDL.app import waterQuality as wq
from hydroDL.master import basins
from hydroDL.data import usgs, gageII, gridMET, ntn
from hydroDL.master import slurm
from hydroDL.post import axplot, figplot
import numpy as np
import matplotlib.pyplot as plt
import os
import pandas as pd
import json
import scipy
from astropy.timeseries import LombScargle
import matplotlib.gridspec as gridspec
dirSel = os.path.join(kPath.dirData, 'USGS', 'inventory', 'siteSel')
with open(os.path.join(dirSel, 'dictRB_Y30N5.json')) as f:
dictSite = json.load(f)
codeLst = sorted(usgs.newC)
ep = 500
reTest = False
siteNoLst = dictSite['comb']
nSite = len(siteNoLst)
# load all sequence
outNameLSTM = '{}-{}-{}-{}'.format('rbWN5', 'comb', 'QTFP_C', 'comb-B10')
dictLSTM, dictWRTDS, dictObs = wq.loadModel(
siteNoLst, outNameLSTM, codeLst)
corrMat, rmseMat = wq.dictErr(dictLSTM, dictWRTDS, dictObs, codeLst)
# load basin attributes
dfG = gageII.readData(siteNoLst=siteNoLst)
dfG = gageII.updateRegion(dfG)
dfG = gageII.updateCode(dfG)
# significance test
dfS = pd.DataFrame(index=codeLst, columns=['rmse', 'corr'])
for k, code in enumerate(codeLst):
a = corrMat[:, k, 1]
b = corrMat[:, k, 2]
aa, bb = utils.rmNan([a, b], returnInd=False)
s, p = scipy.stats.ttest_ind(aa, bb)
# s, p = scipy.stats.wilcoxon(aa, bb)
dfS.at[code, 'corr'] = p
a = rmseMat[:, k, 1]
b = rmseMat[:, k, 2]
aa, bb = utils.rmNan([a, b], returnInd=False)
s, p = scipy.stats.ttest_ind(aa, bb)
# s, p = scipy.stats.wilcoxon(aa, bb)
dfS.at[code, 'rmse'] = p
# a cdf for rsq of seasonality and linearity
codeLst2 = ['00915', '00925', '00930', '00935', '00940', '00945',
'00955', '70303', '80154']
[nfy, nfx] = [4, 2]
fig, axes = plt.subplots(4, 2)
for k, code in enumerate(codeLst2):
j, i = utils.index2d(k, 4, 2)
indS = [siteNoLst.index(siteNo) for siteNo in dictSite[code]]
ic = codeLst.index(code)
axplot.plotCDF(axes[j, i], [corrMat[indS, ic, 1]**2, corrMat[indS, ic, 2]**2],
legLst=['LSTM', 'WRTDS'])
axes[j, i].set_title(code)
fig.show()
code = '00405'
indS = [siteNoLst.index(siteNo) for siteNo in dictSite[code]]
ic = codeLst.index(code)
fig, ax = plt.subplots(1, 1)
ax.plot(corrMat[indS, ic, 1]**2, corrMat[indS, ic, 2]**2, '*')
fig.show()
np.sum(corrMat[indS, ic, 1]**2 > corrMat[indS, ic, 2]**2)
| np.sum(corrMat[indS, ic, 1]**2 > corrMat[indS, ic, 2]**2) | numpy.sum |
import pandas
import pickle
import numpy as np
import matplotlib.pyplot as plt
import sklearn
from sklearn import linear_model
EXPERIMENT_DATA = pickle.load(open('EXPERIMENT_SET_pandas.pkl','rb'))
EVALUATION_SET = pickle.load(open('EVALUATION_SET_pandas.pkl','rb'))
EXPERIMENT_DATA = EXPERIMENT_DATA[EXPERIMENT_DATA["GRAD"] == "YES"]
EXPERIMENT_DATA = EXPERIMENT_DATA[["RM","YIELD","BAGSOLD"]]
def obtainw(l, X, Y):
dataSize = X.shape[1]
I_D = | np.identity(dataSize) | numpy.identity |
# classes to hold the physical components of the system
# e.g. the stellar light, dark matter, black hole, globular clusters
import numpy as np
import logging
from dynamite import mges as mge
class System(object):
"""The physical system being modelled
e.g. system is a galaxy. A system is composed of ``Components`` e.g. the
galaxy is composed of stars, black hole, dark matter halo. This object is
automatically created when the configuration file is read.
"""
def __init__(self, *args):
self.logger = logging.getLogger(f'{__name__}.{__class__.__name__}')
self.n_cmp = 0
self.cmp_list = []
self.n_pot = 0
self.n_kin = 0
self.n_pop = 0
self.parameters = None
self.distMPc = None
self.name = None
self.position_angle = None
for component in args:
self.add_component(component)
def add_component(self, cmp):
"""add a component to the system
Parameters
----------
cmp : a ``dyn.physical_system.Component`` object
Returns
-------
None
updated the system componenent attributes
"""
self.cmp_list += [cmp]
self.n_cmp += 1
self.n_pot += cmp.contributes_to_potential
self.n_kin += len(cmp.kinematic_data)
self.n_pop += len(cmp.population_data)
def validate(self):
"""
Validate the system
Ensures the System has the required attributes: at least one component,
no duplicate component names, and the ml parameter, and that the
sformat string for the ml parameter is set.
Raises
------
ValueError : if required attributes or components are missing, or if
there is no ml parameter
Returns
-------
None.
"""
if len(self.cmp_list) != len(set(self.cmp_list)):
raise ValueError('No duplicate component names allowed')
if (self.distMPc is None) or (self.name is None) \
or (self.position_angle is None):
text = 'System needs distMPc, name, and position_angle attributes'
self.logger.error(text)
raise ValueError(text)
if not self.cmp_list:
text = 'System has no components'
self.logger.error(text)
raise ValueError(text)
if len(self.parameters) != 1 and self.parameters[0].name != 'ml':
text = 'System needs ml as its sole parameter'
self.logger.error(text)
raise ValueError(text)
self.parameters[0].update(sformat = '01.2f') # sformat of ml parameter
def validate_parset(self, par):
"""
Validates the system's parameter values
Kept separate from the validate method to facilitate easy calling from
the ``ParameterGenerator`` class. Returns `True` if all parameters are
non-negative, except for logarithmic parameters which are not checked.
Parameters
----------
par : dict
{ "p":val, ... } where "p" are the system's parameters and
val are their respective raw values
Returns
-------
isvalid : bool
True if the parameter set is valid, False otherwise
"""
p_raw_values = [par[p.name]
for p in self.parameters if not p.logarithmic]
isvalid = np.all(np.sign(p_raw_values) >= 0)
if not isvalid:
self.logger.debug(f'Invalid system parameters {par}: at least '
'one negative non-log parameter.')
return bool(isvalid)
def __repr__(self):
return f'{self.__class__.__name__} with {self.__dict__}'
def get_component_from_name(self, cmp_name):
"""get_component_from_name
Parameters
----------
cmp_name : string
component name (as specified in the congi file)
Returns
-------
a ``dyn.physical_system.Component`` object
"""
cmp_list_list = np.array([cmp0.name for cmp0 in self.cmp_list])
idx = np.where(cmp_list_list == cmp_name)
self.logger.debug(f'Checking for 1 and only 1 component {cmp_name}...')
error_msg = f"There should be 1 and only 1 component named {cmp_name}"
assert len(idx[0]) == 1, error_msg
self.logger.debug('...check ok.')
component = self.cmp_list[idx[0][0]]
return component
def get_component_from_class(self, cmp_class):
"""get_component_from_class
Parameters
----------
cmp_class : string
name of the component type/class
Raises
-------
ValueError : if there are more than one component of the same class.
# TODO: remove this limit, e.g. if we had two MGE-based components
one for stars, one for gas
Returns
-------
a ``dyn.physical_system.Component`` object
"""
self.logger.debug('Checking for 1 and only 1 component of class '
f'{cmp_class}...')
components = filter(lambda c: isinstance(c,cmp_class), self.cmp_list)
component = next(components, False)
if component is False or next(components, False) is not False:
error_msg = 'Actually... there should be 1 and only 1 ' \
f'component of class {cmp_class}'
self.logger.error(error_msg)
raise ValueError(error_msg)
self.logger.debug('...check ok.')
return component
def get_all_dark_components(self):
"""Get all components which are Dark
Returns
-------
list
a list of Component objects, keeping only the dark components
"""
dark_cmp = [c for c in self.cmp_list if isinstance(c, DarkComponent)]
return dark_cmp
def get_all_dark_non_plummer_components(self):
"""Get all Dark components which are not plummer
Useful in legacy orbit libraries for finding the dark halo component.
For legacy models, the black hole is always a plummer, so any Dark but
non plummer components must represent the dark halo.
Returns
-------
list
a list of Component objects, keeping only the dark components
"""
dark_cmp = self.get_all_dark_components()
dark_non_plum_cmp = [c for c in dark_cmp if not isinstance(c, Plummer)]
return dark_non_plum_cmp
def get_all_kinematic_data(self):
"""get_all_kinematic_data
Loop over all components, extract their kinemtics into a list.
Returns
-------
list
all_kinematics in a list
"""
all_kinematics = []
for component in self.cmp_list:
all_kinematics += component.kinematic_data
return all_kinematics
class Component(object):
"""A component of the physical system
e.g. the stellar component, black hole, or dark halo of a galaxy
Parameters
----------
name : string
a short but descriptive name of the component
visible : Bool
whether this is visible <--> whether it has an associated MGE
contributes_to_potential : Bool
whether this contributes_to_potential **not currently used**
symmetry : string
one of 'spherical', 'axisymm', or 'triax' **not currently used**
kinematic_data : list
a list of ``dyn.kinemtics.Kinematic`` data for this component
parameters : list
a list of ``dyn.parameter_space.Parameter`` objects for this component
population_data : list
a list of ``dyn.populations.Population`` data for this component **not
currently used**
"""
def __init__(self,
name = None,
visible=None,
contributes_to_potential=None,
symmetry=None,
kinematic_data=[],
population_data=[],
parameters=[]):
self.logger = logging.getLogger(f'{__name__}.{__class__.__name__}')
if name == None:
self.name = self.__class__.__name__
else:
self.name = name
self.visible = visible
self.contributes_to_potential = contributes_to_potential
self.symmetry = symmetry
self.kinematic_data = kinematic_data
self.population_data = population_data
self.parameters = parameters
def validate(self, par=None):
"""
Validate the component
Ensure it has the required attributes and parameters.
Additionally, the sformat strings for the parameters are set.
Parameters
----------
par : a list with parameter names. Mandatory.
Raises
------
ValueError : if a required attribute is missing or the required
parameters do not exist
Returns
-------
None.
"""
errstr = f'Component {self.__class__.__name__} needs attribute '
if self.visible is None:
text = errstr + 'visible'
self.logger.error(text)
raise ValueError(text)
if self.contributes_to_potential is None:
text = errstr + 'contributes_to_potential'
self.logger.error(text)
raise ValueError(text)
if not self.parameters:
text = errstr + 'parameters'
self.logger.error(text)
raise ValueError(text)
pars = [self.get_parname(p.name) for p in self.parameters]
if set(pars) != set(par):
text = f'{self.__class__.__name__} needs parameters ' + \
f'{par}, not {pars}.'
self.logger.error(text)
raise ValueError(text)
def validate_parset(self, par):
"""
Validates the component's parameter values.
Kept separate from the
validate method to facilitate easy calling from the parameter
generator class. This is a `placeholder` method which returns
`True` if all parameters are non-negative, except for logarithmic
parameters which are not checked. Specific validation
should be implemented for each component subclass.
Parameters
----------
par : dict
{ "p":val, ... } where "p" are the component's parameters and
val are their respective raw values
Returns
-------
isvalid : bool
True if the parameter set is valid, False otherwise
"""
p_raw_values = [par[self.get_parname(p.name)]
for p in self.parameters if not p.logarithmic]
isvalid = np.all(np.sign(p_raw_values) >= 0)
if not isvalid:
self.logger.debug(f'Invalid parset {par}: at least one negative '
'non-log parameter.')
return isvalid
def get_parname(self, par):
"""
Strip the component name suffix from the parameter name.
Parameters
----------
par : str
The full parameter name "parameter-component".
Returns
-------
pure_parname : str
The parameter name without the component name suffix.
"""
try:
pure_parname = par[:par.rindex(f'-{self.name}')]
except:
self.logger.error(f'Component name {self.name} not found in '
f'parameter string {par}')
raise
return pure_parname
def __repr__(self):
return (f'\n{self.__class__.__name__}({self.__dict__}\n)')
class VisibleComponent(Component):
"""Any visible component of the sytem, with an MGE
Parameters
----------
mge_pot : a ``dyn.mges.MGE`` object
describing the (projected) surface-mass density
mge_lum : a ``dyn.mges.MGE`` object
describing the (projected) surface-luminosity density
"""
def __init__(self,
mge_pot=None,
mge_lum=None,
**kwds):
# visible components have MGE surface density
self.mge_pot = mge_pot
self.mge_lum = mge_lum
super().__init__(visible=True, **kwds)
self.logger = logging.getLogger(f'{__name__}.{__class__.__name__}')
def validate(self, **kwds):
super().validate(**kwds)
if not (isinstance(self.mge_pot, mge.MGE) and \
isinstance(self.mge_lum, mge.MGE)):
text = f'{self.__class__.__name__}.mge_pot and ' \
f'{self.__class__.__name__}.mge_lum ' \
'must be mges.MGE objects'
self.logger.error(text)
raise ValueError(text)
if len(self.mge_pot.data) != len(self.mge_lum.data):
text = f'{self.__class__.__name__}.mge_pot and ' \
f'{self.__class__.__name__}.mge_lum ' \
'must be of equal length'
self.logger.error(text)
raise ValueError(text)
class AxisymmetricVisibleComponent(VisibleComponent):
def __init__(self, **kwds):
super().__init__(symmetry='axisymm', **kwds)
def validate(self):
par = ['par1', 'par2']
super().validate(par=par)
class TriaxialVisibleComponent(VisibleComponent):
"""Triaxial component with a MGE projected density
Has parameters (p,q,u) = (b/a, c/a, sigma_obs/sigma_intrinsic) used for
deprojecting the MGE. A given (p,q,u) correspond to a fixed set of
`viewing angles` for the triaxial ellipsoid.
"""
def __init__(self, **kwds):
super().__init__(symmetry='triax', **kwds)
self.logger = logging.getLogger(f'{__name__}.{__class__.__name__}')
self.qobs = np.nan
def validate(self):
"""
Validate the TriaxialVisibleComponent
In addition to validating parameter names and setting their sformat
strings, also set self.qobs (minimal flattening from mge data)
Returns
-------
None.
"""
par = ['q', 'p', 'u']
super().validate(par=par)
self.qobs = np.amin(self.mge_pot.data['q'])
if self.qobs is np.nan:
raise ValueError(f'{self.__class__.__name__}.qobs is np.nan')
def validate_parset(self, par):
"""
Validate the p, q, u parset
Validates the triaxial component's p, q, u parameter set. Requires
self.qobs to be set. A parameter set is valid if the resulting
(theta, psi, phi) are not np.nan.
Parameters
----------
par : dict
{ "p":val, ... } where "p" are the component's parameters and
val are their respective values
Returns
-------
bool
True if the parameter set is valid, False otherwise
"""
tpp = self.triax_pqu2tpp(par['p'], par['q'], par['u'])
return bool(not np.any( | np.isnan(tpp) | numpy.isnan |
'''
A simple rejection sampling script for custom statistical distributions
Author: <NAME> - <EMAIL>
Usage:
$ python rejection.py -h for help and basic instruction
'''
from __future__ import print_function, division
print(__doc__)
try:
import numpy as np
except ImportError:
print("Numpy not installed, try - pip install numpy")
import sys
sys.exit()
#
# CUSTOM PDF CLASS ##########################################################
#
class custom_df:
'''
Custom_df class defines the custom distribution function from
the user.
__init__ : Define the piecewise components of the distribution
along with any other parameters you need.
df : Distribution function - accepts a n-length vector only.
sample : Sample the distribution function - accepts a single
float only.
'''
def __init__(self):
self.lgn = lambda m, a, mc, s: a * np.exp(-(np.log10(m) - \
np.log10(mc))**2.0 / (2*s**2.0))
self.pwr = lambda m, a, s: a*m**(-s)
self.p = [0.093, 0.2, 0.55, 0.0415, 1.35]
def df(self,x):
assert((isinstance(x, float) != True)), \
"Distribution not found - passed values are a float - use .sample"
#
# Define your custom distribution here that accepts a vector.
#
idx = np.min(np.where(x >= 1.0)[0])
n = np.empty(len(x))
n[:idx] = self.lgn(x[:idx], self.p[0], self.p[1], self.p[2])
n[idx:] = self.pwr(x[idx:], self.p[3], self.p[4])
return n
def sample(self,x):
assert(isinstance(x, float)), \
"Sample not taken - passed values are a tuple - use .df"
#
# Define how to sample the distribution given a single value
#
if x > 1.0:
dn = self.pwr(x, self.p[3], self.p[4])
else:
dn = self.lgn(x, self.p[0], self.p[1], self.p[2])
return dn
#
# END CUSTOM PDF CLASS ######################################################
#
#
#
# MAIN PROGRAM
#
import argparse
INPUT = argparse.ArgumentParser(description='rejection.py user parameters')
#
# USER PARAMETER INPUT
# REQUIRED PARAMETERS:
# a = lower limit of distribution
# b = upper limit of distribution
#
INPUT.add_argument('a', metavar='a', type=float,
nargs=1, help='lower limit of distribution')
INPUT.add_argument('b', metavar='b', type=float,
nargs=1, help='upper limit of distribution')
#
# USER PARAMETER INPUT
# OPTIONAL PARAMETERS:
# plot = True or False
# logspace = True or False
# output = True or False
# verbose = True or False
# exploratory = True or False
#
INPUT.add_argument('-np', '--no-plot', dest='noplot', action='store_false',
help='Do not produce plot')
INPUT.add_argument('-lg', '--logspace', dest='logspace', action='store_true',
help='Independent axis in evenly sampled in logspace')
INPUT.add_argument('-o', '--output', dest='output', action='store_true',
help='Write .csv output file')
INPUT.add_argument('-v', '--verbose', dest='verbose', action='store_true',
help='Tells you more information during run')
INPUT.add_argument('-ex', '--exploratory', dest='explore', action='store_true',
help='No sampling - plots distribution for exploration')
OPTS = INPUT.parse_args()
#
# USER PARAMETER CHECK AND READOUT
# This section will fail if:
# Parameters 'a' is not less than 'b'
#
assert OPTS.a[0] < OPTS.b[0], "INPUT ERROR - CHECK DISTRIBUTION RANGE"
if OPTS.verbose:
print("\nPYTHON SCRIPT: rejection")
print()
print("INPUT DISTRIBUTION LIMIT a) :", OPTS.a[0])
print("INPUT DISTRIBUTION LIMIT b) :", OPTS.b[0])
print("LOGSPACE :", OPTS.logspace)
print("EXPLORATORY :", OPTS.explore)
#
# Main rejection sampling loop
#
#
#
# Checks if user specifies logspace at execution
#
if OPTS.logspace:
X = | np.logspace(OPTS.a[0], OPTS.b[0], 1000) | numpy.logspace |
""" Interact with the FRITZ ZTF-II marshal """
import os
import warnings
import pandas
import json
import requests
import numpy as np
from astropy import time
from astropy.io import fits
from .io import LOCALSOURCE, _load_id_
FRITZSOURCE = os.path.join(LOCALSOURCE,"fritz")
if not os.path.isdir(FRITZSOURCE):
os.mkdir(FRITZSOURCE)
FID_TO_NAME = {1:"ztfg", 2:"ztfr", 3:"ztfi"}
ZTFCOLOR = {"ztfr":"tab:red", "ztfg":"tab:green", "ztfi":"tab:orange"}
_BASE_FRITZ_URL = "https://fritz.science/"
FRITZSOURCE = os.path.join(LOCALSOURCE,"fritz")
####################
# #
# GENERIC TOOLS #
# #
####################
# ---------- #
# Downloads #
# ---------- #
def api(method, endpoint, data=None, load=True, token=None, **kwargs):
""" """
if token is None:
token = _load_id_('fritz')
headers = {'Authorization': f"token {token}"}
response = requests.request(method, endpoint, json=data, headers=headers, **kwargs)
if not load:
return response
try:
downloaded = json.loads(response.content)
except:
warnings.warn("cannot load the response.content")
downloaded = None
if downloaded["status"] not in ["success"]:
raise IOError(f"downloading status of '{method} {endpoint}' is not success: {downloaded['status']}")
return downloaded["data"]
def bulk_download(fobject, names, nprocess=4, show_progress=True,
asdict=False, force_dl=False, client=None, store=True):
""" Multiprocessed download of Fritz{fobject}.
This makes use of the Fritz{fobject}.from_name() classmethods
Parameters
----------
fobject: [string]
What you want to download.
- "lightcurve" (or "photometry"), "spectra" (or "spectrum"), "alerts", or "source"
names: [list of string]
list of names for which you want to download data.
nprocess: [int] -optional-
list of parallel download processes.
force_dl: [bool] -optional-
Should this redownload existing data ?
store: [bool] -optional-
Should the downloaded data be stored ?
asdict: [bool] -optional-
Should this return a dictionary or a list
- asdict=True: {name: fritz{fobject}}
- asdict=False: [fritz{fobject}]
Returns
-------
Dictionary {name: fritz{fobject}}
"""
KNOW_OBJECT = ["lightcurve","photometry", "spectra", "spectrum", "alerts","source"]
if fobject not in KNOW_OBJECT:
raise ValueError(f"Unknown fritz object {fobject}")
if fobject == "spectrum":
fobject = "spectra"
if fobject == "photometry":
fobject = "lightcurve"
if client is not None:
from dask import delayed
dl_func = eval(f"_single_download_{fobject}_")
d_download = [delayed(dl_func)([name, force_dl, store]) for name in names]
return client.compute(d_download)
from .utils.tools import is_running_from_notebook
import multiprocessing
nnames = len(names)
#
# - Progress bar or not
if show_progress:
from astropy.utils.console import ProgressBar
bar = ProgressBar( nnames, ipython_widget=is_running_from_notebook())
else:
bar = None
#
# - Input
objects = {}
force_dl = [force_dl]*nnames
store = [store]*nnames
#
# - Multiprocessing
with multiprocessing.Pool(nprocess) as p:
# Da Loop
for j, flc in enumerate( p.imap(eval(f"_single_download_{fobject}_"), zip(names, force_dl, store) ) ):
if bar is not None:
bar.update(j)
objects[names[j]] = flc
if bar is not None:
bar.update(nnames)
return objects if asdict else list(objects.values())
def _single_download_lightcurve_(args):
""" """
name, force_dl, store = args
return FritzPhotometry.from_name(name, force_dl=force_dl, store=store)
def _single_download_spectra_(args):
""" """
name, force_dl, store = args
return FritzSpectrum.from_name(name, force_dl=force_dl, store=store)
def _single_download_alerts_(args):
""" """
name, force_dl, store = args
return FritzAlerts.from_name(name, force_dl=force_dl, store=store)
def _single_download_source_(args):
""" """
name, force_dl, store = args
return FritzSource.from_name(name, force_dl=force_dl, store=store)
# =============== #
# #
# LightCurve #
# #
# =============== #
def download_lightcurve(name, get_object=False,
token=None, clean_groupcolumn=True,
format=None, magsys=None, store=False,
verbose=False,
**kwargs):
"""
Parameters
----------
format: [string] -optional-
= skyportal api option =
flux or mag (None means default)
magsys: [string] -optional-
= skyportal api option =
ab or vega (None means default)
**kwargs are ignored (here for backward compatibilities)
"""
#
# - start: addon
addon = []
if format is not None:
addon.append(f"format={format}")
if magsys is not None:
addon.append(f"magsys={magsys}")
addon = "" if len(addon)==0 else "?"+"&".join(addon)
# - end: addon
#
q_url = _BASE_FRITZ_URL+f'api/sources/{name}/photometry{addon}'
if verbose:
print(f"queried URL: {q_url}")
lcdata = api('get', q_url, load=True, token=token)
lcdata = pandas.DataFrame(lcdata)
if clean_groupcolumn:
lcdata["groups"] = [[i_["id"] for i_ in lcdata["groups"].iloc[i]]
for i in range(len(lcdata))]
# - output
if not store and not get_object:
return lcdata
flcdata = FritzPhotometry( lcdata )
if store:
flcdata.store()
return flcdata if get_object else lcdata
# =============== #
# #
# Spectra #
# #
# =============== #
def download_spectra(name, get_object=False, token=None, store=False, verbose=False):
""" """
q_url = _BASE_FRITZ_URL+f'api/sources/{name}/spectra'
if verbose:
print(f"queried URL: {q_url}")
list_of_dict = api('get', q_url, load=True, token=token)
#
# - Any problem ?
if list_of_dict is None or len(list_of_dict)==0:
warnings.warn(f"no spectra downloaded. {q_url} download is empty")
return None
spectra = list_of_dict["spectra"]
if spectra is None or len(spectra)==0:
warnings.warn(f"no spectra downloaded. {q_url} download is empty")
return None
# - No ? Good
#
if not store and not get_object:
return spectra
if spectra is None or len(spectra)==0:
return None
if len(spectra)==1:
fspectra = FritzSpectrum(spectra[0])
if store:
fspectra.store()
else:
fspectra = [FritzSpectrum(spec_) for spec_ in spectra]
if store:
[fspec_.store() for fspec_ in fspectra]
return fspectra if get_object else spectra
# =============== #
# #
# Alers #
# #
# =============== #
def download_alerts(name, candid=None, allfields=None,
get_object=False, token=None, store=False, verbose=False):
"""
looking for api/alerts/{name}{addon}
Parameters
----------
candid: [int/str]
alert candid like: 1081317100915015025
"""
#
# - start: addon
addon = []
if candid is not None:
addon.append(f"candid={candid}")
if allfields is not None:
addon.append(f"includeAllFields={allfields}")
addon = "" if len(addon)==0 else "?"+"&".join(addon)
# - end: addon
#
q_url = _BASE_FRITZ_URL+f'api/alerts/{name}{addon}'
if verbose:
print(f"queried URL: {q_url}")
alerts = api('get',q_url, load=True, token=token)
# - output
if not store and not get_object:
return alerts
falerts = FritzAlerts.from_alerts(alerts)
if store:
falerts.store()
return falerts if get_object else alerts
# =============== #
# #
# Source #
# #
# =============== #
def download_source(name, get_object=False, token=None, store=False, verbose=False):
""" """
addon=''
q_url = _BASE_FRITZ_URL+f'api/sources/{name}{addon}'
if verbose:
print(f"queried URL: {q_url}")
source = api('get', q_url, load=True, token=token)
if not store and not get_object:
return source
fsource = FritzSource(source)
if store:
fsource.store()
return fsource if get_object else source
# =============== #
# --------------- #
# - Sample - #
# --------------- #
# =============== #
def download_sample( groupid, get_object=False,
savesummary=False,
savedafter=None, savedbefore=None,
name=None,
includephotometry=None,
includerequested=None,
addon=None, token=None,
store=False, verbose=False):
"""
includephotometry: [bool] -optional-
Includes the photometric table inside sources["photometry"]
"""
#
# - start: addon
if addon is None:
addon = []
elif type(addon) is str:
addon = [addon]
if savesummary:
addon.append(f"saveSummary=true")
if store:
warnings.warn("store option not available if savesummary=True.")
store=False
if groupid is not None and groupid not in ["*", "all"]:
addon.append(f"group_ids={groupid}")
if savedafter is not None:
addon.append(f"savedAfter={time.Time(savedafter).isot}")
if savedbefore is not None:
addon.append(f"savedBefore={time.Time(savedbefore).isot}")
if name is not None:
addon.append(f"sourceID={name}")
if includephotometry is not None:
addon.append(f"includePhotometry={includephotometry}")
if includerequested is not None:
addon.append(f"includeRequested={includerequested}")
addon = "" if len(addon)==0 else "?"+"&".join(addon)
# - end: addon
#
q_url = _BASE_FRITZ_URL+f"api/sources{addon}"
if verbose:
print(f"queried URL: {q_url}")
sources = api('get', q_url, load=True, token=token)
if not store and not get_object:
return sources
sample = FritzSample(sources, groupid)
if store:
sample.store()
return sample if get_object else sources
#
# Group
#
def download_groups(get_object=False, token=None, store=True, verbose=False):
""" """
q_url = _BASE_FRITZ_URL+f'api/groups'
if verbose:
print(f"queried URL: {q_url}")
groups = api('get',q_url, load=True, token=token)
if not store and not get_object:
return groups
fgroups = FritzGroups(groups)
if store:
fgroups.store()
return fgroups if get_object else groups
# -------------- #
# Data I/O #
# -------------- #
#
# Spectra
#
def parse_spectrum_filename(filename):
""" """
directory = os.path.dirname(filename)
basename = os.path.basename(filename).split(".")[0]
extension = filename.split(".")[-1]
if not basename.startswith("fritz"):
raise ValueError("Cannot parse the given name. Not a fritz_bla file.")
_, instspectrum, name, *orig = basename.split("_")
originalname = "_".join(orig)
return {"instrument":instspectrum.replace("spectrum",""),
"name":name,
"original_file_filename":originalname,
"extension":extension,
"directory":directory}
####################
# #
# Classes #
# #
####################
# -------------- #
# Photometry/ #
# LightCurve #
# -------------- #
class FritzPhotometry( object ):
""" """
def __init__(self, dataframe=None):
""" """
if dataframe is not None:
self.set_data(dataframe)
@classmethod
def from_fritz(cls, name):
""" """
print("FritzPhotometry.from_fritz(name) is DEPRECATED, use FritzPhotometry.from_name(name)")
return cls.from_name(name)
@classmethod
def from_name(cls, name, force_dl=False, store=False, **kwargs):
""" """
if not force_dl:
filename = cls._build_filename_(name, **kwargs)
if os.path.isfile(filename):
extension = filename.split(".")[-1]
return getattr(cls,f"read_{extension}")(filename)
return cls( download_lightcurve(name, get_object=False, store=store) )
# ============= #
# Method #
# ============= #
# --------- #
# I/O #
# --------- #
def store(self, fileout=None, dirout="default", extension="csv", **kwargs):
""" calls the self.to_{extension} with the default naming convention. """
# can differ to extension if fileout given
if fileout is None:
fileout = self._build_filename_(self.name, dirout=dirout, extension=extension)
if extension in ["csv","json","parquet",]:
return getattr(self,f"to_{extension}")(fileout, **kwargs)
if extension in ["hdf","hd5","hdf5","h5"]:
return self.to_hdf(fileout, **kwargs)
raise ValueError(f"only 'csv','json', 'hdf5' extension implemented ; {extension} given")
# - read file
@classmethod
def read_parquet(cls, filename, **kwargs):
""" """
return cls(pandas.read_parquet(filename, **kwargs))
@classmethod
def read_csv(cls, filename, **kwargs):
""" """
return cls(pandas.read_csv(filename, **kwargs))
@classmethod
def read_hdf(cls, filename, key="data",**kwargs):
""" """
return cls(pandas.read_hdf(filename, key=key, **kwargs))
@classmethod
def read_json(cls, filename, **kwargs):
""" """
return cls(pandas.read_json(filename, **kwargs))
# - to file
def to_parquet(self, fileout, **kwargs):
""" export the data as parquet using pandas.to_parquet """
self.data.to_parquet(fileout, **{**{"index":False},**kwargs})
def to_csv(self, fileout, **kwargs):
""" export the data as csv using pandas.to_csv """
self.data.to_csv(fileout, **{**{"index":False},**kwargs})
def to_hdf(self, fileout, **kwargs):
""" export the data as csv using pandas.to_hdf """
self.data.to_hdf(fileout, key="data", **{**{"index":False},**kwargs})
def to_json(self, fileout, **kwargs):
""" export the data as csv using pandas.to_json. """
self.data.to_json(fileout, **{**{"index":False},**kwargs})
@staticmethod
def _build_filename_(name, dirout=None, extension="csv"):
""" """
if dirout is None or dirout == "default":
dirout = os.path.join(FRITZSOURCE,"lightcurve")
if not os.path.isdir(dirout):
os.makedirs(dirout, exist_ok=True)
return os.path.join(dirout,f"fritz_lightcurve_{name}.{extension}")
# --------- #
# SETTER #
# --------- #
def set_data(self, dataframe, reshape=True):
""" """
self._data = dataframe
# --------- #
# GETTER #
# --------- #
def get_keys(self, keys, full=False, perband=False, groupby=None, usestat=None, index=None, **kwargs):
"""
Parameters
----------
full: [bool] -optional-
Returns the full data[["ra","dec"]]
= If True, the rest is ignored =
// if full=False
perband: [bool] -optional-
Returns the `usestat` coordinate grouped per band
groupby: [string/None] -optional-
Returns the `usestat` coordinate grouped per given key.
usestat: [string] -optional-
How should be alert coordinates be combined.
any pandas statistics (mean, median, min, max etc.)
Returns
-------
"""
data_ = self.get_data(**kwargs).loc[index] if index is not None else self.get_data(**kwargs)
if full:
return data_[keys]
if perband:
if groupby is None:
groupby = "filter"
else:
groupby = np.atleast_1d(groupby).tolist()+["filter"]
# = Grouped
if groupby is not None:
grouped = data_.groupby(groupby)[keys]
if usestat is None:
return grouped
return getattr(grouped, usestat)()
# = not grouped
if usestat is None:
return data_[keys]
return getattr(data_[keys],usestat)()
def get_coordinates(self, full=False, method="median", detected=True, perband=False, groupby=None, **kwargs):
""" get the coordinates of the alerts
Parameters
----------
full: [bool] -optional-
do you want all the Ra, DEC of the alerts (detected)
method: [numpy's method] -optional-
how should the ra, dec be combined (nanmean, nanmedian etc)
= ignored if full=True =
detected: [bool] -optional-
only consider the detected alerts entries (ra,dec are NaN if not)
Returns
-------
DataFrame
"""
return self.get_keys(keys=["ra","dec"], detected=detected, usestat=method, full=full, perband=perband, groupby=groupby, **kwargs)
def get_data(self, detected=None, filters="*", time_range=None, query=None):
""" get a filtered version of the data.
Example:
--------
self.get_data(filters="ztfg", detected=True, time_range=["2020-10-16",None])
Parameters
----------
detected: [bool or None] -optional-
Do you want:
- True: the detected entries only
- False: the upper limits only
- None: both (e.g. no filtering)
filters: [string (list_of) or None] -optional-
Which filters you want.
- None or '*'/'all': no filtering
- str: this filter only (e.g. 'ztfg')
- list of str: any of these filters (e.g., ['ztfg','ztfr']
time_range: [2d-array or None] -optional-
start and stop time range, None means no limit.
query: [str or list] -optional-
any other query you want to add.
Queries are ' and '.join(query) at the end.
Returns
-------
dataframe
"""
if query is None:
query = []
else:
query = list(np.atleast_1d(query))
# - Detected Filtering
if detected is not None:
if not detected:
query.append("mag == 'NaN'")
else:
query.append("mag != 'NaN'")
# - Filters Filtering
if filters is not None and filters not in ["*","all","any"]:
filters = list(np.atleast_1d(filters))
query.append("filter == @filters")
# - Time Filtering
if time_range is not None:
tstart, tend = time_range
if tstart is None and tend is None:
pass
else:
tindex = pandas.DatetimeIndex(time.Time(self.data["mjd"], format="mjd").datetime)
if tstart is None:
query.append(f"@tindex<'{tend}'")
elif tend is None:
query.append(f"'{tstart}'<@tindex")
else:
query.append(f"'{tstart}'<@tindex<'{tend}'")
# - Returns
if len(query)==0:
return self.data
return self.data.query(" and ".join(query))
def get_filters(self):
""" list of filter in the data """
return np.unique(self.data["filter"]).astype(str)
def show(self, ax=None, savefile=None, filtering={}):
""" """
import matplotlib.pyplot as mpl
from matplotlib import dates as mdates
if ax is None:
fig = mpl.figure(figsize=[5,3])
ax = fig.add_axes([0.15,0.15,0.75,0.75])
else:
fig = ax.figure
base_prop = dict(ls="None", mec="0.9", mew=0.5, ecolor="0.7",marker="o", ms=7)
base_up = dict(ls="None", label="_no_legend_")
if filtering is None:
data = self.data.copy()
else:
data = self.get_data(**filtering)
# - Detected
for filter_ in np.unique(data["filter"]):
if filter_ not in ZTFCOLOR:
warnings.warn(f"Unknown instrument: {filter_} | magnitude not shown")
continue
datadet_ = data.query("filter == @filter_ and mag != 'NaN'")
ax.errorbar(time.Time(datadet_["mjd"], format="mjd").datetime,
datadet_["mag"], yerr= datadet_["magerr"],
label=filter_, color=ZTFCOLOR[filter_], **base_prop)
ax.invert_yaxis()
for filter_ in np.unique(data["filter"]):
if filter_ not in ZTFCOLOR:
continue
# Upper limits
datadet_ = data.query("filter == @filter_ and mag == 'NaN'")
ax.errorbar(time.Time(datadet_["mjd"], format="mjd").datetime,
datadet_["limiting_mag"], yerr= 0.1, lolims=True, alpha=0.3,
**{**base_up,**{"color":ZTFCOLOR[filter_]}})
locator = mdates.AutoDateLocator()
formatter = mdates.ConciseDateFormatter(locator)
ax.xaxis.set_major_locator(locator)
ax.xaxis.set_major_formatter(formatter)
ax.set_ylabel("mag")
if savefile is not None:
fig.savefig(savefile)
return fig
# ============= #
# Properties #
# ============= #
@property
def data(self):
""" """
return self._data
@property
def name(self):
""" short cut of self.obj_id"""
return self.obj_id
@property
def obj_id(self):
""" """
obj_id = np.unique(self.data["obj_id"])
if len(obj_id)==1:
return obj_id[0]
if len(obj_id)>1:
warnings.warn(f"several obj_id {obj_id}")
return obj_id
if len(obj_id)==0:
warnings.warn(f"no obj_id")
return None
# ----------- #
# #
# SOURCES #
# #
# ----------- #
class FritzSource( object ):
def __init__(self, fritzdict=None):
""" """
if fritzdict is not None:
self.set_fritzdict(fritzdict)
@classmethod
def from_name(cls, name, force_dl=False, store=False, **kwargs):
""" """
if not force_dl:
filename = cls._build_filename_(name, **kwargs)
if os.path.isfile(filename):
extension = filename.split(".")[-1]
return getattr(cls,f"read_{extension}")(filename)
return cls( download_source(name, get_object=False, store=store, **kwargs) )
# I/O
def store(self, fileout=None, dirout=None, extension="json", **kwargs):
""" calls the self.to_{extension} with default naming convention. """
# can differ to extension if fileout given
if fileout is None:
fileout = self._build_filename_(self.name, dirout=dirout, extension=extension)
if extension == "json":
return getattr(self,f"to_{extension}")(fileout, **kwargs)
raise ValueError(f"only 'json' extension implemented ; {extension} given")
@staticmethod
def _build_filename_(name, dirout=None, extension="json"):
""" """
if dirout is None or dirout == "default":
dirout = os.path.join(FRITZSOURCE,"source")
if not os.path.isdir(dirout):
os.makedirs(dirout, exist_ok=True)
return os.path.join(dirout,f"fritz_source_{name}.{extension}")
# - to file
def to_json(self, filename):
""" """
import json
with open(filename,'w') as fileout_:
json.dump(self.fritzdict, fileout_)
# - read file
@classmethod
def read_json(cls, filename):
""" """
this = cls()
with open(filename, 'r') as filename_:
this.set_fritzdict(json.load(filename_))
this.set_filename(filename)
return this
# ============ #
# Method #
# ============ #
# ------- #
# SETTER #
# ------- #
def set_fritzdict(self, fritzdict):
""" """
self._fritzdict = fritzdict
def set_filename(self, filename):
""" """
self._filename = filename
# ------- #
# GETTER #
# ------- #
def get_coordinates(self, as_skycoords=False):
""" """
if as_skycoords:
from astropy import coordinates, units
return coordinates.SkyCoord(self.ra, self.dec, unit=units.deg)
return self.ra, self.dec
def get_classification(self, full=True, squeeze=True):
""" """
fullclassi = self.fritzdict["classifications"]
if full:
return fullclassi
classi = [c_.get("classification",None) for c_ in fullclassi]
if squeeze:
classi = np.unique(classi)
if len(classi)==1:
return classi[0]
return classi
def get_redshift(self, full=True):
""" """
if not full:
return self.fritzdict.get("redshift",None)
return {k:self.fritzdict[k] for k in ["redshift","redshift_history"]}
def get_annotation(self, squeeze=True):
""" """
annot = self.fritzdict["annotations"]
if len(annot)==1 and squeeze:
return annot[0]
return annot
def get_time(self, which=["created_at","last_detected_at"], squeeze=True,
format=None, asarray=False):
"""
Parameters
----------
which: [str or list of] -optional-
Which time key you want (could be a combination)
- created_at
- last_detected_at
squeeze: [bool] -optional-
get the value directly if only one 'which'
format: [str or None] -optional-
The format of the output:
- None or 'str': as given by Fritz
- 'time': as astropy.time.Time
- "jd", "mjd", "datetime", etc.: any astropy.time.Time attribute
asarray: [bool] -optional-
Shall this return a dictionary or an array
= ignored if squeeze and which is a single key =
Returns
-------
value, dict or array (see squeeze and asarray)
"""
which = np.atleast_1d(which)
#
# = start: Formating
if format is None or format in ["str","default","string"]:
times = {k:self.fritzdict[k] for k in which}
else:
if format in ["time","Time","astropy","astropy.time", "astropy.Time"]:
times = {k:time.Time(self.fritzdict[k]) for k in which}
else:
times = {k:getattr(time.Time(self.fritzdict[k].split(".")[0]),format) for k in which}
# = end: Formating
#
if len(which)==1 and squeeze:
return times[which[0]]
if asarray:
return np.asarray([times[w] for w in which])
return times
def get_metaquery(self, priorcreation=100, postlast=100, size=0.01, add_query=None):
""" get entry for ZTFQuery.query.load_metaquery(**this_output) """
jdmin, jdmax = self.get_time(which=["created_at", "last_detected_at"],
format="jd", asarray=True)+ [-priorcreation, postlast]
return dict( radec=[self.ra,self.dec], size=size,
sql_query=f"obsjd BETWEEN {jdmin} and {jdmax}")
# ------- #
# PLOTTER #
# ------- #
def view_on_fritz(self):
""" opens your browser at the corresponding fritz source page"""
if self.name is None:
raise AttributeError("self.name is not set. Cannot launch target Marshal page.")
import webbrowser
return webbrowser.open(_BASE_FRITZ_URL+f'source/{self.name}', new=2)
# ============ #
# Properties #
# ============ #
@property
def fritzdict(self):
""" dictionary given by fritz for the spectrum """
return self._fritzdict
def has_fritzdict(self):
""" Test if fritzdict has been set. True means yes."""
return hasattr(self, "_fritzdict") and self._fritzdict is not None
@property
def name(self):
""" short cut to self.id"""
return self.id
@property
def id(self):
""" """
return self.fritzdict["id"]
@property
def ra(self):
""" Target Right Ascention """
return self.fritzdict["ra"]
@property
def dec(self):
""" Target Declination """
return self.fritzdict["dec"]
@property
def redshift(self):
""" Target Redshift,
see also self.get_redshift() """
return self.get_redshift(full=False)
@property
def classification(self):
""" Target Classification,
see also self.get_classification() """
return self.get_classification(full=False)
@property
def fritzkey(self):
""" Fritz Internal Key """
return self.fritzdict["internal_key"]
# -------------- #
# #
# Spectro #
# #
# -------------- #
def parse_ascii(datastring, sep=None, hkey="#", hsep=": ", isvariance=None):
""" """
header_key = [l for l in datastring if l.startswith("#")]
if len(header_key)>0:
header = pandas.DataFrame([l.replace(hkey,"").split(hsep)
for l in header_key if len(l.replace(hkey,"").split(hsep))==2],
columns=["key","value"])#.set_index("key")["value"]
header["key"] = header["key"].str.strip()
header = header.set_index("key")#["value"]
else:
header = None
lbda, flux, *error = np.asarray([l.split(sep) for l in datastring
if not l.startswith(hkey) and len(l)>2],
dtype="float").T
if len(error) == 0:
error = None
elif len(error) == 1:
error = error[0]
else:
warnings.warn("Cannot parse the last columns (lbda, flux, several_columns) ; ignored.")
error = None
if error is not None:
if isvariance is None:
isvariance = np.all(np.abs(flux/error)>1e3)
if isvariance:
error = np.sqrt(error)
if error is not None:
data = pandas.DataFrame(np.asarray([lbda, flux, error]).T,
columns=["lbda", "flux", "error"])
else:
data = pandas.DataFrame(np.asarray([lbda, flux]).T,
columns=["lbda", "flux"])
return data, header
class FritzSpectrum( object ):
""" """
_IMPLEMENTED_ORIGINALFORMAT = ["sedm"]
def __init__(self, fritzdict=None, **kwargs):
""" """
if fritzdict is not None:
self.set_fritzdict(fritzdict, **kwargs)
# --------- #
# From #
# --------- #
@classmethod
def from_fritz(cls, name, entry=None, spectra_ok=True):
""" """
print("FritzSpectrum.from_fritz(name) is DEPRECATED, use FritzSpectrum.from_name(name)")
return cls.from_name(name, entry=entry, spectra_ok=spectra_ok)
@classmethod
def from_name(cls, name, warn=True, force_dl=False, store=False, **kwargs):
""" """
if not force_dl:
from glob import glob
local_spectra = glob(cls._build_filename_(name, "*", "*", **kwargs))
if len(local_spectra)>0:
spectra = []
for filename in local_spectra:
extension = filename.split(".")[-1]
spectra.append(getattr(cls,f"read_{extension}")(filename))
if len(spectra)==1:
return spectra[0]
if warn:
warnings.warn(f"{name} has several spectra, list of FritzSpectrum returned")
return spectra
# No Local spectra or force download.
spectra = download_spectra(name, get_object=False, store=store)
if spectra is None or len(spectra) == 0:
if warn:
warnings.warn(f"No spectra downloaded for {name}")
return None
if len(spectra) == 1:
return cls(spectra[0])
if warn:
warnings.warn(f"{name} has several spectra, list of FritzSpectrum returned")
return [cls(spec_) for spec_ in spectra]
def store(self, fileout=None, dirout=None, extension="ascii", **kwargs):
""" calls the self.to_{extension} with default naming convention. """
# can differ to extension if fileout given
if fileout is None:
fileout = self._build_filename_(self.name, self.instrument, self.filekey,
dirout=dirout, extension=extension)
if extension in ["txt", "dat","data", "ascii"]:
extension = "ascii"
if extension in ["fits", "json", "ascii", "txt"]:
out = getattr(self,f"to_{extension}")(fileout, **kwargs)
self.set_filename(fileout)
return out
raise ValueError(f"only 'fits','json', 'txt'/'dat'/'ascii' extension implemented ; {extension} given")
# - to file
def to_fits(self, fileout, overwrite=True):
""" Store the data in fits format """
from astropy.io.fits import HDUList, Header
from astropy.io.fits import PrimaryHDU, ImageHDU
fitsheader = Header()
if self.header is not None:
for k,v in self.header.infer_objects().iterrows():
fitsheader.set(k, v.value)
hdul = []
# -- Data saving
hdul.append( PrimaryHDU(self.flux, fitsheader) )
if self.has_error():
hdul.append( ImageHDU(self.error, name='ERROR') )
if not self._is_lbdastep_constant_():
hdul.append( ImageHDU(self.lbda, name='LBDA') )
hdulist = HDUList(hdul)
hdulist.writeto(fileout, overwrite=overwrite)
def to_hdf(self, filename, datakey="data", headerkey="header", **kwargs):
""" Store the data to hdf5 format """
self.data.to_hdf(filename, key=datakey)
pandas.DataFrame(self.header).to_hdf(filename, key=headerkey)
def to_ascii(self, fileout):
""" Store the data in text format """
fileout_ = open(fileout, "w")
for k,v in self.header.to_dict().items():
fileout_.write("# %s: %s\n"%(k.upper(),v))
if self.has_error():
for l_,f_,v_ in zip(self.lbda, self.flux, self.error):
fileout_.write(f"{l_:.1f} {f_:.3e} {v_:.3e}\n")
else:
for l_,f_ in zip(self.lbda, self.flux):
fileout_.write(f"{l_:.1f} {f_:.3e}\n")
fileout_.close()
def to_json(self, fileout):
""" Store the data in json format """
import json
with open(fileout,'w') as fileout_:
json.dump(self.fritzdict, fileout_)
# - read file
@classmethod
def read_fits(cls, filename, dataext=0, headerext=0,
errortable="ERROR", lbdatable="LBDA"):
""" load and build the object given the fits file """
fits_ = fits.open(filename)
# Flux
flux = fits_[dataext].data
# Header
header = dict(fits_[headerext].header)
colnames = [f_.name.lower() for f_ in fits_]
# Error (if any)
if errortable.lower() in colnames:
error = fits_[colnames.index(errortable.lower())].data
else:
error = None
# Wavelength
if lbdatable.lower() in colnames:
lbda = fits_[colnames.index(lbdatable.lower())].data
else:
lbda = cls._header_to_lbda_(header)
this = cls()
this.setup(lbda, flux, header, error=error)
# useful information to store
fritzdict = cls._filename_to_fritzdict_(filename)
this.set_fritzdict(fritzdict, load_spectrum=False)
this.set_filename(filename)
return this
@classmethod
def read_hdf(cls, filename, datakey="data", headerkey="header", **kwargs):
""" load and build the object given the hdf5 file """
data = pandas.read_hdf(filename, key=datakey)
header = pandas.read_hdf(filename, key=headerkey)
fritzdict = cls._filename_to_fritzdict_(filename)
this = cls()
this.set_data(data)
this.set_header(header)
this.set_fritzdict(fritzdict, load_spectrum=False)
this.set_filename(filename)
return this
@classmethod
def read_ascii(cls, filename, **kwargs):
""" load and build the object given the text file """
data, header = parse_ascii(open(filename).read().splitlines(), **kwargs)
fritzdict = cls._filename_to_fritzdict_(filename)
this = cls()
this.set_data(data)
this.set_header(header)
this.set_fritzdict(fritzdict, load_spectrum=False)
this.set_filename(filename)
return this
@classmethod
def read_json(cls, filename):
""" load and build the object given the json file """
this = cls()
with open(filename, 'r') as filename_:
this.set_fritzdict(json.load(filename_))
this.set_filename(filename)
return this
@staticmethod
def _build_filename_(name, instrument, key="", dirout=None, extension="ascii"):
""" """
if dirout is None or dirout == "default":
dirout = os.path.join(FRITZSOURCE,"spectra",name)
if not os.path.isdir(dirout):
os.makedirs(dirout, exist_ok=True)
return os.path.join(dirout,f"fritz_spectrum_{instrument.lower()}_{key}_{name.lower()}.{extension}")
@staticmethod
def _filename_to_fritzdict_(filename, warn=False):
""" """
try:
dictfile = parse_spectrum_filename(filename)
except:
if warn:
warnings.warn("Cannot parse the input name, so information (instrument, obj_id) might be missing")
dictfile = None
if dictfile is not None:
fritzdict = {"instrument_name":dictfile["instrument"],
"obj_id":dictfile["name"],
"original_file_string":None,
"original_file_filename":dictfile["original_file_filename"]
}
else:
fritzdict = {}
return fritzdict
# ============= #
# Method #
# ============= #
# --------- #
# LOADER #
# --------- #
def load_spectrum(self, from_original_file=None, **kwargs):
""" """
if from_original_file is None:
from_original_file = self.instrument in self._IMPLEMENTED_ORIGINALFORMAT
if from_original_file:
if not self.instrument in self._IMPLEMENTED_ORIGINALFORMAT:
warnings.warn(f"No original format file implemented for {self.instrument}. Back to fritzformat")
from_original_file=False
if not from_original_file:
self._loadspec_fritzformat_(**kwargs)
else:
self._loadspec_fileformat_(**kwargs)
def _loadspec_fritzformat_(self, ignore_warnings=True):
""" """
lbda = np.asarray(self.fritzdict["wavelengths"], dtype="float")
flux = np.asarray(self.fritzdict["fluxes"], dtype="float")
error = self.fritzdict.get("errors", None)
try:
header = {k:v for k,v in dict(self.fritzdict["altdata"]) if len(k>0)} if self.fritzdict.get("altdata") is not None else None
except:
warnings.warn("Cannot convert the fritz' altdata into a header. header set to None")
header = None
self.setup(lbda, flux, header, error=error)
def _loadspec_fileformat_(self):
""" """
if self.instrument == "sedm":
data, header = parse_ascii( self.fritzdict["original_file_string"].splitlines() )
else:
raise NotImplementedError(f"only sedm fileformat implemented {self.instrument} given. Contact Mickael if you need that.")
self.set_data(data)
self.set_header(header)
def _lbda_to_header_(self, header=None):
""" """
if not self._is_lbdastep_constant_():
raise ValueError("step is not regular, cannot convert lbda to header keys")
if header is None:
header = self.header
if type(header)==dict:
header["CDELT"] = self._lbdastep[0]
header["CRVAL"] = self.lbda[0]
header["NAXIS"] = len(self.lbda)
else:
header.loc["CDELT"] = self._lbdastep[0]
header.loc["CRVAL"] = self.lbda[0]
header.loc["NAXIS"] = len(self.lbda)
return header
@classmethod
def _header_to_lbda_(cls, header):
""" """
# Both format exist
if "CDELT1" in header:
step = header.get("CDELT1")
start = header.get("CRVAL1")
size = header.get("NAXIS1")
else:
step = header.get("CDELT")
start = header.get("CRVAL")
size = header.get("NAXIS")
return np.arange(size)*step + start
# --------- #
# SETTER #
# --------- #
def setup(self, lbda, flux, header, error=None):
""" Build the spectrum given the input
this calls self.set_data() and self.set_header()
"""
if error is None:
data = pandas.DataFrame( | np.asarray([lbda, flux], dtype="float") | numpy.asarray |
__copyright__ = "Copyright (c) 2020 Jina AI Limited. All rights reserved."
__license__ = "Apache-2.0"
import numpy as np
def reduce_mean(data: 'np.ndarray', mask_2d: 'np.ndarray') -> 'np.ndarray':
emb_dim = data.shape[2]
mask = np.tile(mask_2d, (emb_dim, 1, 1))
mask = np.rollaxis(mask, 0, 3)
output = mask * data
return np.sum(output, axis=1) / np.sum(mask, axis=1)
def reduce_max(data: 'np.ndarray', mask_2d: 'np.ndarray') -> 'np.ndarray':
emb_dim = data.shape[2]
mask = np.tile(mask_2d, (emb_dim, 1, 1))
mask = np.rollaxis(mask, 0, 3)
output = mask * data
neg_mask = (mask_2d - 1) * 1e10
neg_mask = np.tile(neg_mask, (emb_dim, 1, 1))
neg_mask = | np.rollaxis(neg_mask, 0, 3) | numpy.rollaxis |
#!/usr/bin/env python2.7
# Author : <NAME> (<EMAIL>)
#
# Description : This code reads the KSTAR MIR data via the iKSTAR server
#
# Acknowledgement : Dr. Y. Nam and Prof. G.S. Yun
#
import numpy as np
import h5py
MNUM = 10000000 # totla number of samples in an ECEI channel
VN = 16 # number of vertical arrays
class KstarMir(object):
def __init__(self, shot, clist):
self.shot = shot
if 12272 < shot and shot < 14942:
self.data_path = '/eceidata/exp_2015/'
elif 14941 < shot and shot < 17356:
self.data_path = '/eceidata2/exp_2016/'
elif 17963 < shot and shot < 19392:
self.data_path = '/eceidata2/exp_2017/'
elif 19391 < shot and shot < 21779:
self.data_path = '/eceidata2/exp_2018/'
elif 21778 < shot and shot < 24100:
self.data_path = '/eceidata2/exp_2019/'
elif 24100 < shot:
self.data_path = '/eceidata2/exp_2020/'
self.clist = expand_clist(clist)
# file name
self.fname = "{:s}{:06d}/MIR.{:06d}.h5".format(self.data_path, shot, shot)
# get attributes
with h5py.File(self.fname, 'r') as f:
# get attributes
dset = f['MIR']
self.tt = dset.attrs['TriggerTime'] # in [s]
self.toff = self.tt[0]+0.001
self.fs = dset.attrs['SampleRate'][0]*1000.0 # in [Hz] same sampling rate
self.bt = dset.attrs['TFcurrent']*0.0995556 # [kA] -> [T]
# self.mfl = dset.attrs['MFL'] # can't find attribute
# self.mirh = dset.attrs['MIRH']
# self.mirf = dset.attrs['MIRF']
# self.lo = dset.attrs['MLo']
# self.rf1 = dset.attrs['MRF1']
# self.rf2 = dset.attrs['MRF2']
# self.rf3 = dset.attrs['MRF3']
# self.rf4 = dset.attrs['MRF4']
print('MIR file = {}'.format(self.fname))
# get channel posistion
self.channel_position()
def get_data(self, trange, norm=0, atrange=[1.0, 1.01], res=0, verbose=1):
self.trange = trange
# norm = 0 : no normalization
# norm = 1 : normalization by trange average
# norm = 2 : normalization by atrange average
# res = 0 : no resampling
if norm == 0:
if verbose == 1: print('Data is not normalized MIR')
elif norm == 1:
if verbose == 1: print('Data is normalized by trange std MIR')
elif norm == 2:
if verbose == 1: print('Data is normalized by atrange std MIR')
# get time base
time, idx1, idx2 = self.time_base(trange)
if norm == 2:
atime, aidx1, aidx2 = self.time_base(atrange)
# get data
with h5py.File(self.fname, 'r') as f:
# time series length
tnum = idx2 - idx1
# number of channels
cnum = len(self.clist)
data = np.zeros((cnum, tnum))
for i in range(0, cnum):
vn = int(self.clist[i][4:6])
fn = int(self.clist[i][6:8])
inode = 'MD{:02d}{:02d}'.format(1 + (fn-1)*2,vn)
qnode = 'MD{:02d}{:02d}'.format(fn*2,vn)
inode = "/MIR/" + inode + "/Voltage"
qnode = "/MIR/" + qnode + "/Voltage"
iv = f[inode][idx1:idx2]/10000.0
qv = f[qnode][idx1:idx2]/10000.0
# remove offset
iv = iv - np.mean(iv)
qv = qv - np.mean(qv)
if norm == 1:
iv = iv/np.std(iv)
qv = qv/np.std(qv)
elif norm == 2:
iav = f[inode][aidx1:aidx2]/10000.0
qav = f[qnode][aidx1:aidx2]/10000.0
iv = iv/ | np.std(iav) | numpy.std |
#!/usr/bin/env python
# Part of the psychopy_ext library
# Copyright 2010-2015 <NAME>
# The program is distributed under the terms of the GNU General Public License,
# either version 3 of the License, or (at your option) any later version.
"""
A library of simple models of vision
Simple usage::
import glob
from psychopy_ext import models
ims = glob.glob('Example_set/*.jpg') # get all jpg images
hmax = models.HMAX()
# if you want to see how similar your images are to each other
hmax.compare(ims)
# or to simply get the output and use it further
out = hmax.run(ims)
"""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
# from __future__ import unicode_literals
import sys, os, glob, itertools, warnings, inspect, argparse, imp
import tempfile, shutil
import pickle
from collections import OrderedDict
import numpy as np
import scipy.ndimage
import pandas
import seaborn as sns
import matlab_wrapper
import sklearn.manifold
import sklearn.preprocessing, sklearn.metrics, sklearn.cluster
import skimage.feature, skimage.data
from psychopy_ext import stats, plot, report, utils
try:
imp.find_module('caffe')
HAS_CAFFE = True
except:
try:
os.environ['CAFFE']
# put Python bindings in the path
sys.path.insert(0, os.path.join(os.environ['CAFFE'], 'python'))
HAS_CAFFE = True
except:
HAS_CAFFE = False
if HAS_CAFFE:
# Suppress GLOG output for python bindings
GLOG_minloglevel = os.environ.pop('GLOG_minloglevel', None)
os.environ['GLOG_minloglevel'] = '5'
import caffe
from caffe.proto import caffe_pb2
from google.protobuf import text_format
HAS_CAFFE = True
# Turn GLOG output back on for subprocess calls
if GLOG_minloglevel is None:
del os.environ['GLOG_minloglevel']
else:
os.environ['GLOG_minloglevel'] = GLOG_minloglevel
class Model(object):
def __init__(self, model, labels=None, verbose=True, *args, **kwargs):
self.name = ALIASES[model]
self.nice_name = NICE_NAMES[model]
self.safename = self.name
self.labels = labels
self.args = args
self.kwargs = kwargs
self.verbose = verbose
def download_model(self, path=None):
"""Downloads and extracts a model
:Kwargs:
path (str, default: '')
Where model should be extracted
"""
self._setup()
if self.model.model_url is None:
print('Model {} is already available'.format(self.nice_name))
elif self.model.model_url == 'manual':
print('WARNING: Unfortunately, you need to download {} manually. '
'Follow the instructions in the documentation.'.format(self.nice_name))
else:
print('Downloading and extracting {}...'.format(self.nice_name))
if path is None:
path = os.getcwd()
text = raw_input('Where do you want the model to be extracted? '
'(default: {})\n'.format(path))
if text != '': path = text
outpath, _ = utils.extract_archive(self.model.model_url,
folder_name=self.safename, path=path)
if self.name == 'phog':
with open(os.path.join(outpath, 'anna_phog.m')) as f:
text = f.read()
with open(os.path.join(outpath, 'anna_phog.m'), 'wb') as f:
s = 'dlmwrite(s,p);'
f.write(text.replace(s, '% ' + s, 1))
print('Model {} is available here: {}'.format(self.nice_name, outpath))
print('If you want to use this model, either give this path when '
'calling the model or add it to your path '
'using {} as the environment variable.'.format(self.safename.upper()))
def _setup(self):
if not hasattr(self, 'model'):
if self.name in CAFFE_MODELS:
self.model = CAFFE_MODELS[self.name](model=self.name, *self.args, **self.kwargs)
else:
self.model = KNOWN_MODELS[self.name](*self.args, **self.kwargs)
self.model.labels = self.labels
self.isflat = self.model.isflat
self.model.verbose = self.verbose
def run(self, *args, **kwargs):
self._setup()
return self.model.run(*args, **kwargs)
def train(self, *args, **kwargs):
self._setup()
return self.model.train(*args, **kwargs)
def test(self, *args, **kwargs):
self._setup()
return self.model.test(*args, **kwargs)
def predict(self, *args, **kwargs):
self._setup()
return self.model.predict(*args, **kwargs)
def gen_report(self, *args, **kwargs):
self._setup()
return self.model.gen_report(*args, **kwargs)
class _Model(object):
def __init__(self, labels=None):
self.name = 'Model'
self.safename = 'model'
self.isflat = False
self.labels = labels
self.model_url = None
def gen_report(self, test_ims, train_ims=None, html=None):
print('input images:', test_ims)
print('processing:', end=' ')
if html is None:
html = report.Report(path=reppath)
html.open()
close_html = True
else:
close_html = False
resps = self.run(test_ims=test_ims, train_ims=train_ims)
html.writeh('Dissimilarity', h=1)
dis = dissimilarity(resps)
plot_data(dis, kind='dis')
html.writeimg('dis', caption='Dissimilarity across stimuli'
'(blue: similar, red: dissimilar)')
html.writeh('MDS', h=1)
mds_res = mds(dis)
plot_data(mds_res, kind='mds', icons=test_ims)
html.writeimg('mds', caption='Multidimensional scaling')
if self.labels is not None:
html.writeh('Linear separability', h=1)
lin = linear_clf(dis, y)
plot_data(lin, kind='linear_clf', chance=1./len(np.unique(self.labels)))
html.writeimg('lin', caption='Linear separability')
if close_html:
html.close()
def run(self, test_ims, train_ims=None, layers='output', return_dict=True):
"""
This is the main function to run the model.
:Args:
test_ims (str, list, tuple, np.ndarray)
Test images
:Kwargs:
- train_ims (str, list, tuple, np.ndarray)
Training images
- layers ('all'; 'output', 'top', None; str, int;
list of str or int; default: None)
Which layers to record and return. 'output', 'top' and None
return the output layer.
- return_dict (bool, default: True`)
Whether a dictionary should be returned. If False, only the last
layer is returned as an np.ndarray.
"""
if train_ims is not None:
self.train(train_ims)
output = self.test(test_ims, layers=layers, return_dict=return_dict)
return output
def train(self, train_ims):
"""
A placeholder for a function for training a model.
If the model is not trainable, then it will default to this function
here that does nothing.
"""
self.train_ims = im2iter(train_ims)
def test(self, test_ims, layers='output', return_dict=True):
"""
A placeholder for a function for testing a model.
:Args:
test_ims (str, list, tuple, np.ndarray)
Test images
:Kwargs:
- layers ('all'; 'output', 'top', None; str, int;
list of str or int; default: 'output')
Which layers to record and return. 'output', 'top' and None
return the output layer.
- return_dict (bool, default: True`)
Whether a dictionary should be returned. If False, only the last
layer is returned as an np.ndarray.
"""
self.layers = layers
# self.test_ims = im2iter(test_ims)
def predict(self, ims, topn=5):
"""
A placeholder for a function for predicting a label.
"""
pass
def _setup_layers(self, layers, model_keys):
if self.safename in CAFFE_MODELS:
filt_layers = self._filter_layers()
else:
filt_layers = model_keys
if layers in [None, 'top', 'output']:
self.layers = [filt_layers[-1]]
elif layers == 'all':
self.layers = filt_layers
elif isinstance(layers, (str, unicode)):
self.layers = [layers]
elif isinstance(layers, int):
self.layers = [filt_layers[layers]]
elif isinstance(layers, (list, tuple, np.ndarray)):
if isinstance(layers[0], int):
self.layers = [filt_layers[layer] for layer in layers]
elif isinstance(layers[0], (str, unicode)):
self.layers = layers
else:
raise ValueError('Layers can only be: None, "all", int or str, '
'list of int or str, got', layers)
else:
raise ValueError('Layers can only be: None, "all", int or str, '
'list of int or str, got', layers)
def _fmt_output(self, output, layers, return_dict=True):
self._setup_layers(layers, output.keys())
outputs = [output[layer] for layer in self.layers]
if not return_dict:
output = output[self.layers[-1]]
return output
def _im2iter(self, ims):
"""
Converts input into in iterable.
This is used to take arbitrary input value for images and convert them to
an iterable. If a string is passed, a list is returned with a single string
in it. If a list or an array of anything is passed, nothing is done.
Otherwise, if the input object does not have `len`, an Exception is thrown.
"""
if isinstance(ims, (str, unicode)):
out = [ims]
else:
try:
len(ims)
except:
raise ValueError('input image data type not recognized')
else:
try:
ndim = ims.ndim
except:
out = ims
else:
if ndim == 1: out = ims.tolist()
elif self.isflat:
if ndim == 2: out = [ims]
elif ndim == 3: out = ims
else:
raise ValueError('images must be 2D or 3D, got %d '
'dimensions instead' % ndim)
else:
if ndim == 3: out = [ims]
elif ndim == 4: out = ims
else:
raise ValueError('images must be 3D or 4D, got %d '
'dimensions instead' % ndim)
return out
def load_image(self, *args, **kwargs):
return utils.load_image(*args, **kwargs)
def dissimilarity(self, resps, kind='mean_euclidean', **kwargs):
return dissimilarity(resps, kind=kind, **kwargs)
def mds(self, dis, ims=None, ax=None, seed=None, kind='metric'):
return mds(dis, ims=ims, ax=ax, seed=seed, kind=kind)
def cluster(self, *args, **kwargs):
return cluster(*args, **kwargs)
def linear_clf(self, resps, y, clf=None):
return linear_clf(resps, y, clf=clf)
def plot_data(data, kind=None, **kwargs):
if kind in ['dis', 'dissimilarity']:
if isinstance(data, dict): data = data.values()[0]
g = sns.heatmap(data, **kwargs)
elif kind == 'mds':
g = plot.mdsplot(data, **kwargs)
elif kind in ['clust', 'cluster']:
g = sns.factorplot('layer', 'dissimilarity', data=df, kind='point')
elif kind in ['lin', 'linear_clf']:
g = sns.factorplot('layer', 'accuracy', data=df, kind='point')
if chance in kwargs:
ax.axhline(kwargs['chance'], ls='--', c='.2')
else:
try:
sns.factorplot(x='layers', y=data.columns[-1], data=data)
except:
raise ValueError('Plot kind "{}" not recognized.'.format(kind))
return g
def dissimilarity(resps, kind='mean_euclidean', **kwargs):
"""
Computes dissimilarity between all rows in a matrix.
:Args:
resps (numpy.array)
A NxM array of model responses. Each row contains an
output vector of length M from a model, and distances
are computed between each pair of rows.
:Kwargs:
- kind (str or callable, default: 'mean_euclidean')
Distance metric. Accepts string values or callables recognized
by :func:`~sklearn.metrics.pairwise.pairwise_distances`, and
also 'mean_euclidean' that normalizes
Euclidean distance by the number of features (that is,
divided by M), as used, e.g., by Grill-Spector et al.
(1999), Op de Beeck et al. (2001), Panis et al. (2011).
.. note:: Up to version 0.6, 'mean_euclidean' was called
'euclidean', and 'cosine' was called 'gaborjet'. Also note
that 'correlation' used to be called 'corr' and is now
returning dissimilarities in the range [0,2] per
scikit-learn convention.
- \*\*kwargs
Keyword arguments for
:func:`~sklearn.metric.pairwise.pairwise_distances`
:Returns:
A square NxN matrix, typically symmetric unless otherwise
defined by the metric, and with NaN's in the diagonal.
"""
if kind == 'mean_euclidean':
dis_func = lambda x: sklearn.metrics.pairwise.pairwise_distances(x, metric='euclidean', **kwargs) / np.sqrt(x.shape[1])
else:
dis_func = lambda x: sklearn.metrics.pairwise.pairwise_distances(x, metric=kind, **kwargs)
if isinstance(resps, (dict, OrderedDict)):
dis = OrderedDict()
for layer, resp in resps.items():
dis[layer] = dis_func(resp)
diag = np.diag_indices(dis[layer].shape[0])
dis[layer][diag] = np.nan
else:
dis = dis_func(resps)
dis[np.diag_indices(dis.shape[0])] = np.nan
return dis
def mds(dis, ims=None, kind='metric', seed=None):
"""
Multidimensional scaling
:Args:
dis
Dissimilarity matrix
:Kwargs:
- ims
Image paths
- seed
A seed if you need to reproduce MDS results
- kind ({'classical', 'metric'}, default: 'metric')
'Classical' is based on MATLAB's cmdscale, 'metric' uses
:func:`~sklearn.manifold.MDS`.
"""
df = []
if ims is None:
if isinstance(dis, dict):
ims = map(str, range(len(dis.values()[0])))
else:
ims = map(str, range(len(dis)))
for layer_name, this_dis in dis.items():
if kind == 'classical':
vals = stats.classical_mds(this_dis)
else:
mds_model = sklearn.manifold.MDS(n_components=2,
dissimilarity='precomputed', random_state=seed)
this_dis[np.isnan(this_dis)] = 0
vals = mds_model.fit_transform(this_dis)
for im, (x,y) in zip(ims, vals):
imname = os.path.splitext(os.path.basename(im))[0]
df.append([layer_name, imname, x, y])
df = pandas.DataFrame(df, columns=['layer', 'im', 'x', 'y'])
# df = stats.factorize(df)
# if self.layers != 'all':
# if not isinstance(self.layers, (tuple, list)):
# self.layers = [self.layers]
# df = df[df.layer.isin(self.layers)]
# plot.mdsplot(df, ax=ax, icons=icons, zoom=zoom)
return df
def cluster(resps, labels, metric=None, clust=None,
bootstrap=True, stratified=False, niter=1000, ci=95, *func_args, **func_kwargs):
if metric is None:
metric = sklearn.metrics.adjusted_rand_score
struct = labels if stratified else None
n_clust = len(np.unique(labels))
if clust is None:
clust = sklearn.cluster.AgglomerativeClustering(n_clusters=n_clust, linkage='ward')
df = []
def mt(data, labels):
labels_pred = clust.fit_predict(data)
qual = metric(labels, labels_pred)
return qual
print('clustering...', end=' ')
for layer, data in resps.items():
labels_pred = clust.fit_predict(data)
qualo = metric(labels, labels_pred)
if bootstrap:
pct = stats.bootstrap_resample(data1=data, data2=labels,
niter=niter, func=mt, struct=struct, ci=None,
*func_args, **func_kwargs)
for i, p in enumerate(pct):
df.append([layer, qualo, i, p])
else:
pct = [np.nan, np.nan]
df.append([layer, qualo, 0, np.nan])
df = pandas.DataFrame(df, columns=['layer', 'iter', 'bootstrap',
'dissimilarity'])
# df = stats.factorize(df)
return df
def linear_clf(resps, y, clf=None):
if clf is None: clf = sklearn.svm.LinearSVC
df = []
n_folds = len(y) / len(np.unique(y))
for layer, resp in resps.items():
# normalize to 0 mean and variance 1 for each feature (column-wise)
resp = sklearn.preprocessing.StandardScaler().fit_transform(resp)
cv = sklearn.cross_validation.StratifiedKFold(y,
n_folds=n_folds, shuffle=True)
# from scikit-learn docs:
# need not match cross_val_scores precisely!!!
preds = sklearn.cross_validation.cross_val_predict(clf(),
resp, y, cv=cv)
for yi, pred in zip(y, preds):
df.append([layer, yi, pred, yi==pred])
df = pandas.DataFrame(df, columns=['layer', 'actual', 'predicted', 'accuracy'])
# df = stats.factorize(df)
return df
class Pixelwise(_Model):
def __init__(self):
"""
Pixelwise model
The most simple model of them all. Uses pixel values only.
"""
super(Pixelwise, self).__init__()
self.name = 'Pixelwise'
self.safename = 'px'
def test(self, test_ims, layers='output', return_dict=False):
self.layers = [self.safename]
ims = self._im2iter(test_ims)
resps = np.vstack([self.load_image(im).ravel() for im in ims])
resps = self._fmt_output(OrderedDict([(self.safename, resps)]), layers,
return_dict=return_dict)
return resps
class Retinex(_Model):
def __init__(self):
"""
Retinex algorithm
Based on A. Torralba's implementation presented at PAVIS 2014.
.. warning:: Experimental
"""
super(Retinex, self).__init__()
self.name = 'Retinex'
self.safename = 'retinex'
def gen(self, im, thres=20./256, plot=True, save=False):
im = self.load_image(im)
# 2D derivative
der = np.array([[0, 0, 0], [-1, 1, 0], [0, 0, 0]])
im_paint = np.zeros(im.shape)
im_illum = np.zeros(im.shape)
for chno in range(3):
ch = im[:,:,chno]
outv = scipy.ndimage.convolve(ch, der)
outh = scipy.ndimage.convolve(ch, der.T)
out = np.dstack([outv, outh])
# threshold
paint = np.copy(out)
paint[np.abs(paint) < thres] = 0
illum = np.copy(out)
illum[np.abs(illum) >= thres] = 0
# plt.imshow(paint[:,:,0]); plt.show()
# plt.imshow(paint[:,:,1]); plt.show()
# plt.imshow(illum[:,:,0]); plt.show()
# plt.imshow(illum[:,:,1]); plt.show()
# Pseudo-inverse (using the trick from Weiss, ICCV 2001; equations 5-7)
im_paint[:,:,chno] = self._deconvolve(paint, der)
im_illum[:,:,chno] = self._deconvolve(illum, der)
im_paint = (im_paint - np.min(im_paint)) / (np.max(im_paint) - np.min(im_paint))
im_illum = (im_illum - np.min(im_illum)) / (np.max(im_illum) - np.min(im_illum))
# paintm = scipy.misc.imread('paint2.jpg')
# illumm = scipy.misc.imread('illum2.jpg')
# print np.sum((im_paint-paintm)**2)
# print np.sum((im_illum-illumm)**2)
if plot:
sns.plt.subplot(131)
sns.plt.imshow(im)
sns.plt.subplot(132)
sns.plt.imshow(im_paint)
sns.plt.subplot(133)
sns.plt.imshow(im_illum)
sns.plt.show()
if save:
name, ext = imname.splitext()
scipy.misc.imsave('%s_paint.%s' %(name, ext), im_paint)
scipy.misc.imsave('%s_illum.%s' %(name, ext), im_illum)
def _deconvolve(self, out, der):
# der = np.dstack([der, der.T])
d = []
gi = []
for i, deri in enumerate([der, der.T]):
d.append(scipy.ndimage.convolve(out[...,i], np.flipud(np.fliplr(deri))))
gi.append(scipy.ndimage.convolve(deri, np.flipud(np.fliplr(deri)), mode='constant'))
d = np.sum(d, axis=0)
gi = np.sum(gi, axis=0)
gi = np.pad(gi, (der.shape[0]/2, der.shape[1]/2), mode='constant')
gi = scipy.ndimage.convolve(gi, np.array([[1,0,0], [0,0,0], [0,0,0]]))
mxsize = np.max(out.shape[:2])
g = np.fft.fft2(gi, s=(mxsize*2, mxsize*2))
g[g==0] = 1
h = 1/g
h[g==0] = 0
tr = h * np.fft.fft2(d, s=(mxsize*2,mxsize*2))
ii = np.fft.fftshift(np.real(np.fft.ifft2(tr)))
n = (gi.shape[0] - 5) / 2
im = ii[mxsize - n : mxsize + out.shape[0] - n,
mxsize - n : mxsize + out.shape[1] - n]
return im
class Zoccolan(_Model):
"""
Based on 10.1073/pnas.0811583106
.. warning:: Not implemented fully
"""
def __init__(self):
super(Zoccolan, self).__init__()
self.name = 'Zoccolan'
self.safename = 'zoccolan'
# receptive field sizes in degrees
#self.rfs = np.array([.6,.8,1.])
#self.rfs = np.array([.2,.35,.5])
self.rfs = [10, 20, 30] # deg visual angle
self.oris = np.linspace(0, np.pi, 12)
self.phases = [0, np.pi]
self.sfs = range(1, 11) # cycles per RF size
self.winsize = [5, 5] # size of each patch on the grid
# window size will be fixed in pixels and we'll adjust degrees accordingly
# self.win_size_px = 300
def get_gabors(self, rf):
lams = float(rf[0])/self.sfs # lambda = 1./sf #1./np.array([.1,.25,.4])
sigma = rf[0]/2./np.pi
# rf = [100,100]
gabors = np.zeros(( len(oris),len(phases),len(lams), rf[0], rf[1] ))
i = np.arange(-rf[0]/2+1,rf[0]/2+1)
#print i
j = np.arange(-rf[1]/2+1,rf[1]/2+1)
ii,jj = np.meshgrid(i,j)
for o, theta in enumerate(self.oris):
x = ii*np.cos(theta) + jj*np.sin(theta)
y = -ii*np.sin(theta) + jj*np.cos(theta)
for p, phase in enumerate(self.phases):
for s, lam in enumerate(lams):
fxx = np.cos(2*np.pi*x/lam + phase) * np.exp(-(x**2+y**2)/(2*sigma**2))
fxx -= np.mean(fxx)
fxx /= np.linalg.norm(fxx)
#if p==0:
#plt.subplot(len(oris),len(lams),count+1)
#plt.imshow(fxx,cmap=mpl.cm.gray,interpolation='bicubic')
#count+=1
gabors[o,p,s,:,:] = fxx
plt.show()
return gabors
def run(self, ims):
ims = self.input2array(ims)
output = [self.test(im) for im in ims]
def test(self, im):
field = im.shape
num_tiles = (15,15)#[field[0]/10.,field[0]/10.]
size = (field[0]/num_tiles[0], field[0]/num_tiles[0])
V1 = []#np.zeros( gabors.shape + num_tiles )
# tiled_im = im.reshape((num_tiles[0],size[0],num_tiles[1],size[1]))
# tiled_im = np.rollaxis(tiled_im, 1, start=3)
# flat_im = im.reshape((num_tiles[0],num_tiles[1],-1))
for r, rf in enumerate(self.rfs):
def apply_filter(window, this_filter):
this_resp = np.dot(this_filter,window)/np.linalg.norm(this_filter)
# import pdb; pdb.set_trace()
return np.max((0,this_resp)) # returns at least zero
def filter_bank(this_filter,rf):
#print 'done0'
resp = scipy.ndimage.filters.generic_filter(
im, apply_filter, size=rf,mode='nearest',
extra_arguments = (this_filter,))
# import pdb; pdb.set_trace()
#print 'done1'
ii,jj = np.meshgrid(np.arange(0,field[0],size[0]),
np.arange(0,field[1],size[1]) )
selresp = resp[jj,ii]
# maxresp = scipy.ndimage.filters.maximum_filter(
# resp,
# size = size,
# mode = 'nearest'
# )
return np.ravel(selresp)
gabors = self.get_gabors(rf)
#import pdb; pdb.set_trace()
gabors = gabors.reshape(gabors.shape[:3]+(-1,))
# gabors_norms = np.apply_along_axis(np.linalg.norm, -1, gabors)
# import pdb; pdb.set_trace()
# V1.append( np.apply_along_axis(filter_bank, -1, gabors,rf) )
V1resp = np.zeros(gabors.shape[:-1]+num_tiles)
# import pdb; pdb.set_trace()
for i,wi in enumerate(np.arange(0,field[0]-rf[0],size[0])):
for j,wj in enumerate(np.arange(0,field[1]-rf[1],size[1])):
window = im[wi:wi+rf[0],wj:wj+rf[1]]
resp = np.inner(gabors,np.ravel(window))
resp[resp<0] = 0
V1resp[:,:,:,i,j] = resp #/gabors_norms
# print 'done'
V1.append(V1resp)
return [V1]
class GaborJet(_Model):
def __init__(self, nscales=5, noris=8, imsize=256, grid_size=0):
"""
Python implementation of the Gabor-Jet model from Biederman lab.
A given image is transformed with a
Gabor wavelet and certain values on a grid are chosen for the output.
Further details are in `Xu et al., 2009
<http://dx.doi.org/10.1016/j.visres.2009.08.021>`_.
Original implementation copyright 2004 '<NAME>
<http://geon.usc.edu/GWTgrid_simple.m>`_.
:Kwargs:
- nscales (int, default: 5)
Spatial frequency scales
- noris (int, default: 8)
Orientation spacing; angle = np.pi/noris
- imsize ({128, 256}, default: 256)
The image can only be 128x128 px or 256x256 px size.
If the image has a different size, it will be rescaled
**without** maintaining the original aspect ratio.
- grid_size (int, default: 0)
How many positions within an image to take:
- 0: grid of 10x10
- 1: grid of 12x12
- else: grid of imsize x imsize
"""
super(GaborJet, self).__init__()
self.name = 'GaborJet'
self.safename = 'gaborjet'
self.isflat = True
self.nscales = nscales
self.noris = noris
self.imsize = imsize
# generate the grid
if grid_size == 0:
s = imsize/128.
rangeXY = np.arange(20*s, 110*s+1, 10*s) - 1 # 10x10
elif grid_size == 1:
s = imsize/128.
rangeXY = np.arange(10*s, 120*s+1, 10*s) - 1 # 12x12
else:
rangeXY = np.arange(imsize) # 128x128 or 256x256
self.rangeXY = rangeXY.astype(int)
[xx,yy] = np.meshgrid(rangeXY,rangeXY)
self.grid = xx + 1j*yy
self.grid = self.grid.T.ravel() # transpose just to match MatLab's grid(:) behavior
self.grid_pos = np.hstack([self.grid.imag, self.grid.real]).T
def test(self,
test_ims,
cell_type='complex',
sigma=2*np.pi,
layers='magnitudes',
return_dict=False
):
"""
Apply GaborJet to given images.
:Args:
test_ims: str or list of str
Image(s) to process with the model.
:Kwargs:
- cell_type (str, default: 'complex')
Choose between 'complex'(40 output values) and 'simple' (80
values)
- sigma (float, default: 2*np.pi)
Control the size of gaussian envelope
- layers ({'all', 'phases', 'magnitudes'}, default: 'magnitudes')
Not truly layers, but two output possibilities: either Fourier
magnitudes or phases.
- return_dict (bool, default: True)
Whether only magnitude should be returned. If True, then also
phase and grid positions are returned in a dict.
:Returns:
Magnitude and, depending on 'return_dict', phase.
"""
mags = []
phases = []
imlist = self._im2iter(test_ims)
for imno, im in enumerate(imlist):
sys.stdout.write("\rRunning %s... %d%%" % (self.name,
100*imno/len(imlist)))
sys.stdout.flush()
im = self.load_image(im, resize=(self.imsize, self.imsize), flatten=True)
mag, phase = self._test(im, cell_type=cell_type, sigma=sigma)
mags.append(mag.ravel())
phases.append(phase.ravel())
sys.stdout.write("\rRunning %s... done\n" % self.name)
output = OrderedDict([('phases', np.array(phases)),
('magnitudes', np.array(mags))])
output = self._fmt_output(output, layers, return_dict=return_dict)
return output
def _test(self, im, cell_type='complex', sigma=2*np.pi):
# FFT of the image
im_freq = np.fft.fft2(im)
# setup the paramers
kx_factor = 2 * np.pi / self.imsize
ky_factor = 2 * np.pi / self.imsize
# setup space coordinates
xy = np.arange(-self.imsize/2, self.imsize/2).astype(float)
[tx,ty] = np.meshgrid(xy, xy)
tx *= kx_factor
ty *= -ky_factor
# initiallize useful variables
nvars = self.nscales * self.noris
if cell_type == 'complex':
mag = np.zeros((len(self.grid), nvars))
phase = np.zeros((len(self.grid), nvars))
else:
mag = np.zeros((len(self.grid), 2*nvars))
phase = np.zeros((len(self.grid), nvars))
for scale in range(self.nscales):
k0 = np.pi/2 * (1/np.sqrt(2))**scale
for ori in range(self.noris):
ka = np.pi * ori / self.noris
k0x = k0 * np.cos(ka)
k0y = k0 * np.sin(ka)
# generate a kernel specified scale and orientation, which has DC on the center
# this is a FFT of a Morlet wavelet (http://en.wikipedia.org/wiki/Morlet_wavelet)
freq_kernel = 2*np.pi * (
np.exp( -(sigma/k0)**2/2 * ((k0x-tx)**2 + (k0y-ty)**2) ) -\
np.exp( -(sigma/k0)**2/2 * (k0**2+tx**2+ty**2) )
)
# use fftshift to change DC to the corners
freq_kernel = np.fft.fftshift(freq_kernel)
# convolve the image with a kernel of the specified scale and orientation
conv = im_freq*freq_kernel
# calculate magnitude and phase
iconv = np.fft.ifft2(conv)
# import ipdb; ipdb.set_trace()
#eps = np.finfo(float).eps**(3./4)
#real = np.real(iTmpFilterImage)
#real[real<eps] = 0
#imag = np.imag(iTmpFilterImage)
#imag[imag<eps] = 0
#iTmpFilterImage = real + 1j*imag
ph = np.angle(iconv)
ph = ph[self.rangeXY,:][:,self.rangeXY] + np.pi
ind = scale*self.noris+ori
phase[:,ind] = ph.ravel()
if cell_type == 'complex':
mg = np.abs(iconv)
# get magnitude and phase at specific positions
mg = mg[self.rangeXY,:][:,self.rangeXY]
mag[:,ind] = mg.ravel()
else:
mg_real = np.real(iconv)
mg_imag = np.imag(iconv)
# get magnitude and phase at specific positions
mg_real = mg_real[self.rangeXY,:][:,self.rangeXY]
mg_imag = mg_imag[self.rangeXY,:][:,self.rangeXY]
mag[:,ind] = mg_real.ravel()
mag[:,nvars+ind] = mg_imag.ravel()
# use magnitude for dissimilarity measures
return mag, phase
def dissimilarity(self, kind='cosine', *args, **kwargs):
"""
Default dissimilarity for :class:`GaborJet` is `cosine`.
"""
return super(GaborJet, self).dissimilarity(kind=kind, *args, **kwargs)
class HMAX99(_Model):
"""
HMAX for Python
Based on the original HMAX (`Riesenhuber & Poggio, 1999
<http://dx.doi.org/10.1038/14819>`_)
Code rewritten using a Pure MATLAB implementation by <NAME> at the
MIT Center for Biological and Computational Learning. Most of the
structure, variable names and some of the comments come from this
implementation. More comments have been added and code was optimized as
much as possible while trying to maintain its structure close to the
original. View-tuned units have been added by <NAME>ck.
The output was tested against the Pure MatLab output which can be tested
against the Standard C/MATLAB code featured at `Riesenhuber's lab
<http://riesenhuberlab.neuro.georgetown.edu/hmax/index.html#code>`_.
.. note:: This implementation is not the most current HMAX
implementation that doesn't rely on hardcoding features anymore
(e.g., Serre et al., 2007). Use :class:`HMAX_HMIN` or :class:`HMAX_PNAS` to access MATLAB
interface to a more current version of HMAX.
.. note:: Images are resized to 256 x 256 as required by the original
implementation
Original VTU implementation copyright 2007 <NAME>
Original MatLab implementation copyright 2004 <NAME>
Since the original code did not specify a license type, I assume GNU GPL v3
since it is used in `Jim Mutch's latest implementation of HMAX
<http://cbcl.mit.edu/jmutch/cns/>`_
:Kwargs:
- matlab (boolean, default: False)
If *True*, Gaussian filters will be implemented using the
original models implementation which mimicks MatLab's behavior.
Otherwise, a more efficient numerical method is used.
- filter_type ({'gaussian', 'gabor'}, default: 'gaussian')
Type of V1 filter. We default to gaussian as it was used originally
in HMAX'99. However, many people prefer using Gabor filters as
they presumambly model V1 better.
"""
def __init__(self, matlab=False, filter_type='gaussian'):
super(HMAX99, self).__init__()
self.name = "HMAX'99"
self.safename = 'hmax99'
self.isflat = True
self.n_ori = 4 # number of orientations
# S1 filter sizes for scale band 1, 2, 3, and 4
self.filter_sizes_all = [[7, 9], [11, 13, 15], [17, 19, 21],
[23, 25, 27, 29]]
# specify (per scale band) how many S1 units will be used to pool over
self.C1_pooling_all = [4, 6, 9, 12]
self.S2_config = [2,2] # how many C1 outputs to put into one "window" in S2 in each direction
if filter_type == 'gaussian': # "typically" used
if matlab: # exact replica of the MatLab implementation
self.filts = self.get_gaussians_matlab(self.filter_sizes_all,
self.n_ori)
else: # a faster and more elegant implementation
self.filts = self.get_gaussians(self.filter_sizes_all,
self.n_ori)
self.mask_name = 'square'
elif filter_type == 'gabor':
self.filts = self.get_gabors(self.filter_sizes_all, self.n_ori)
self.mask_name = 'circle'
else:
raise ValueError("filter type not recognized")
self.istrained = False # initially VTUs are not set up
def train(self, train_ims):
"""
Train the model
That is, supply view-tuned units (VTUs) with C2 responses to
'prototype' images, to which these VTUs will be maximally tuned.
:Args:
train_ims (str, list, tuple, np.ndarray)
Training images
"""
try:
self.tuning = pickle.load(open(train_ims,'rb'))
print('done')
except:
self.tuning = self.test(train_ims, op='training', layers='C2',
return_dict=False)
self.istrained = True
def test(self, test_ims, op='testing', layers='output', return_dict=True):
"""
Test the model on the given image
:Args:
test_ims (str, list, tuple, np.ndarray)
Test images.
"""
ims = self._im2iter(test_ims)
# Get number of filter sizes
out = OrderedDict()
size_S1 = sum([len(fs) for fs in self.filter_sizes_all])
S1 = np.zeros((256, 256, size_S1, self.n_ori))
out['C1'] = np.zeros((len(ims), 256, 256, self.n_ori,
len(self.filter_sizes_all)))
# S2 has an irregular shape which depends on the spatial frequency band
S2 = []
C2_tmp = np.zeros(((self.S2_config[0]*self.S2_config[1])**self.n_ori,
len(self.filter_sizes_all)))
out['C2'] = np.zeros((len(ims), C2_tmp.shape[0]))
for imno, im in enumerate(ims):
# im *= 255
sys.stdout.write("\rRunning HMAX'99... %s: %d%%" %(op, 100*imno/len(ims)))
sys.stdout.flush()
im = self.load_image(im, flatten=True, resize=(256,256))
# Go through each scale band
S1_idx = 0
for which_band in range(len(self.filter_sizes_all)):
# calculate S1 responses
S1_tmp = self.get_S1(im, which_band)
num_filter = len(self.filter_sizes_all[which_band])
# store S1 responses for each scale band
S1[..., S1_idx:S1_idx + num_filter, :] = S1_tmp
S1_idx += num_filter
# calculate other layers
C1_tmp = self.get_C1(S1_tmp, which_band)
out['C1'][imno, ..., which_band] = C1_tmp
S2_tmp = self.get_S2(C1_tmp, which_band)
S2.append(S2_tmp)
C2_tmp[:, which_band] = self.get_C2(S2_tmp, which_band)
out['C2'][imno] = np.max(C2_tmp, -1) # max over all scale bands
# calculate VTU if trained
if self.istrained:
out['VTU'] = self.get_VTU(out['C2'])
sys.stdout.write("\rRunning HMAX'99... %s: done\n" %op)
output = self._fmt_output(out, layers, return_dict=return_dict)
return output
def get_gaussians(
self,
filter_sizes_all,
n_ori = 4,
sigDivisor = 4.
):
"""
Generates 2D difference of Gaussians (DoG) filters.
This function is a faster, more accurate and more elegant version of
the original gaussian_filters_matlab but will not produce identical
filters as the original (but very close). For practical purposes, this
one is prefered. In case you want to mimic the identical behavior of
the original HMAX, use gaussian_filters_matlab.
:Args:
filter_sizes_all (list of depth 2)
A nested list (grouped by filter bands) of integer filter sizes
:Kwargs:
- n_ori (int, default: 4)
A number of filter orientations. Orientations are spaced by np.pi/n_ori.
- sigDivisor (float, default: 4.)
A parameter to adjust DoG filter frequency.
:Returns:
A nested list of filters of all orientations
"""
gaussians = []
# loop over filter bands
for fNo, filter_sizes in enumerate(filter_sizes_all):
gaussians.append([])
# loop over filter sizes within a filter band
for filter_size in filter_sizes:
fxx = np.zeros((filter_size,filter_size,n_ori))
sigmaq = (filter_size/sigDivisor)**2
i = | np.arange(-filter_size/2+1,filter_size/2+1) | numpy.arange |
import numpy as np
import ibllib.dsp as dsp
from ibllib.dsp import smooth
from ibllib.dsp.utils import parabolic_max
from brainbox.processing import bincount2D
def estimate_drift(spike_times, spike_amps, spike_depths, display=False):
"""
Electrode drift for spike sorted data.
:param spike_times:
:param spike_amps:
:param spike_depths:
:param display:
:return: drift (ntimes vector) in input units (usually um)
:return: ts (ntimes vector) time scale in seconds
"""
# binning parameters
DT_SECS = 1 # output sampling rate of the depth estimation (seconds)
DEPTH_BIN_UM = 2 # binning parameter for depth
AMP_BIN_LOG10 = [1.25, 3.25] # binning parameter for amplitudes (log10 in uV)
N_AMP = 1 # number of amplitude bins
NXCORR = 50 # positive and negative lag in depth samples to look for depth
NT_SMOOTH = 9 # length of the Gaussian smoothing window in samples (DT_SECS rate)
# experimental: try the amp with a log scale
nd = int(np.ceil(np.nanmax(spike_depths) / DEPTH_BIN_UM))
tmin, tmax = (np.min(spike_times), np.max(spike_times))
nt = int((np.ceil(tmax) - np.floor(tmin)) / DT_SECS)
# 3d histogram of spikes along amplitude, depths and time
atd_hist = np.zeros((N_AMP, nt, nd), dtype=np.single)
abins = (np.log10(spike_amps * 1e6) - AMP_BIN_LOG10[0]) / np.diff(AMP_BIN_LOG10) * N_AMP
abins = np.minimum(np.maximum(0, np.floor(abins)), N_AMP - 1)
for i, abin in enumerate( | np.unique(abins) | numpy.unique |
import torch
import numpy as np
import pytest
from bgflow.nn.flow.crd_transform.ic_helper import (
det2x2,
det3x3,
dist_deriv,
angle_deriv,
torsion_deriv,
ic2xy0_deriv,
ic2xyz_deriv,
init_xyz2ics,
init_ics2xyz,
)
from bgflow.nn.flow.crd_transform.ic import (
GlobalInternalCoordinateTransformation,
RelativeInternalCoordinateTransformation,
MixedCoordinateTransformation,
decompose_z_matrix,
)
# TODO Floating point precision is brittle!
# Revision should include numerically more robust
# implementations - especially for angular values.
TOLERANCES = {
torch.device("cuda:0"): {torch.float32: (1e-2, 1e-3), torch.float64: (1e-6, 1e-6)},
torch.device("cpu"): {torch.float32: (1e-4, 1e-4), torch.float64: (1e-7, 1e-7)}
}
N_REPETITIONS = 50
@pytest.fixture()
def alanine_ics():
"""Examplary z-matrix, fixed atoms, and positions for capped alanine."""
rigid_block = np.array([6, 8, 9, 10, 14])
relative_z_matrix = np.array(
[
[0, 1, 4, 6],
[1, 4, 6, 8],
[2, 1, 4, 0],
[3, 1, 4, 0],
[4, 6, 8, 14],
[5, 4, 6, 8],
[7, 6, 8, 4],
[11, 10, 8, 6],
[12, 10, 8, 11],
[13, 10, 8, 11],
[15, 14, 8, 16],
[16, 14, 8, 6],
[17, 16, 14, 15],
[18, 16, 14, 8],
[19, 18, 16, 14],
[20, 18, 16, 19],
[21, 18, 16, 19],
]
)
global_z_matrix = np.array(
[
[0, -1, -1, -1],
[1, 0, -1, -1],
[2, 1, 0, -1],
[3, 1, 0, 2],
[4, 1, 0, 2],
[5, 4, 1, 0],
[6, 4, 1, 5],
[7, 6, 4, 1],
[8, 6, 4, 7],
[9, 8, 6, 4],
[10, 8, 6, 9],
[14, 8, 6, 9],
[11, 10, 8, 6],
[12, 10, 8, 11],
[13, 10, 8, 11],
[15, 14, 8, 6],
[16, 14, 8, 15],
[17, 16, 14, 15],
[18, 16, 17, 14],
[19, 18, 16, 14],
[20, 18, 19, 16],
[21, 18, 20, 16],
]
)
xyz = np.array(
[
[1.375, 1.25, 1.573],
[1.312, 1.255, 1.662],
[1.327, 1.306, 1.493],
[1.377, 1.143, 1.549],
[1.511, 1.31, 1.618],
[1.606, 1.236, 1.63],
[1.523, 1.441, 1.633],
[1.445, 1.5, 1.607],
[1.645, 1.515, 1.667],
[1.703, 1.459, 1.74],
[1.73, 1.53, 1.54],
[1.792, 1.619, 1.554],
[1.78, 1.439, 1.508],
[1.663, 1.555, 1.457],
[1.618, 1.646, 1.734],
[1.509, 1.703, 1.709],
[1.715, 1.705, 1.809],
[1.798, 1.653, 1.831],
[1.703, 1.847, 1.852],
[1.801, 1.871, 1.892],
[1.674, 1.911, 1.768],
[1.631, 1.858, 1.933],
]
)
return relative_z_matrix, global_z_matrix, rigid_block, xyz.reshape(1, -1)
def rad2deg(x):
return x * 180.0 / np.pi
def deg2rad(x):
return x * np.pi / 180.0
# def test_outer(device, dtype, atol=1e-6, rtol=1e-5):
# for _ in range(N_REPETITIONS):
# x, y = torch.Tensor(2, 5, 7, 3).to(device, dtype).normal_()
# A = outer(x, y).view(-1)
# B = []
# for i in range(5):
# for j in range(7):
# for k in range(3):
# for l in range(3):
# B.append(x[i, j, k] * y[i, j, l])
# B = torch.Tensor(B).to(device, dtype)
# assert torch.allclose(A, B, atol=atol, rtol=rtol)
def test_det2x2(device, dtype, atol=1e-6, rtol=1e-5):
for _ in range(N_REPETITIONS):
x = torch.Tensor(7, 5, 3, 2, 2).to(device, dtype).normal_()
assert torch.allclose(det2x2(x), x.det(), atol=atol, rtol=rtol)
def test_det3x3(device, dtype, atol=5e-6, rtol=5e-5):
for _ in range(N_REPETITIONS):
x = torch.Tensor(7, 5, 3, 3, 3).to(device, dtype).normal_()
if not torch.allclose(det3x3(x), x.det(), atol=atol, rtol=rtol):
print(det3x3(x) - x.det())
assert torch.allclose(det3x3(x), x.det(), atol=atol, rtol=rtol)
def test_dist_deriv(device, dtype, atol=1e-6, rtol=1e-5):
x1 = torch.Tensor([0, 0, 0]).to(device, dtype)
x2 = torch.Tensor([1, 1, 0]).to(device, dtype)
d, J = dist_deriv(x1, x2)
sqrt2 = torch.Tensor([2]).to(device, dtype).sqrt()
assert torch.allclose(d, sqrt2, atol=atol, rtol=rtol)
assert torch.allclose(J, -x2 / sqrt2)
def test_angle_deriv(device, dtype):
atol = 1e-2 if dtype is torch.float32 and device is torch.device("cuda:0") else 1e-4
rtol = 1e-3 if dtype is torch.float32 else 1e-5
atol, rtol = TOLERANCES[device][dtype]
np.random.seed(123122)
# check 45 deg angle
x1 = torch.Tensor([0, 1, 0]).to(device, dtype)
x2 = torch.Tensor([0, 0, 0]).to(device, dtype)
x3 = torch.Tensor([1, 1, 0]).to(device, dtype)
a, J = angle_deriv(x1, x2, x3)
assert torch.allclose(a, torch.tensor(deg2rad(45.0), device=device, dtype=dtype))
assert torch.allclose(J, torch.Tensor([-1, 0, 0]).to(device, dtype), atol=atol)
# check random angle
for i in range(N_REPETITIONS):
# random reference angle
# TODO: more stable angle derivatives
a_ref = np.random.uniform(
1e-2, np.pi - 1e-2
) # prevent angles with numerical issues
x1 = (
torch.Tensor([np.cos(a_ref), np.sin(a_ref), 0])
.to(device, dtype)
.requires_grad_(True)
)
# construct system in standard basis
x2 = torch.Tensor([0, 0, 0]).to(device, dtype)
x3 = torch.Tensor([1, 0, 0]).to(device, dtype)
# apply random rotation to system
R = torch.tensor(
np.linalg.qr(np.random.uniform(size=(3, 3)))[0], dtype=dtype, device=device
)
x1, x2, x3 = (x @ R for x in (x1, x2, x3))
a, J = angle_deriv(x1, x2, x3)
# compute Jacobian with autograd
J_ref = torch.autograd.grad(a.sum(), x1)[0]
assert torch.allclose(
a, torch.tensor(a_ref, dtype=dtype, device=device), atol=atol, rtol=rtol
)
assert torch.allclose(J, J_ref, atol=atol, rtol=rtol)
def test_torsion_deriv(device, dtype):
atol, rtol = TOLERANCES[device][dtype]
np.random.seed(202422)
for i in range(N_REPETITIONS):
# random reference angle
a_ref = | np.random.uniform(0, np.pi) | numpy.random.uniform |
import numpy as np
import datajoint as dj
from PIL import ImageColor
from collections import Counter
import matplotlib as mpl
import matplotlib.pyplot as plt
import seaborn as sns
import itertools
import pandas as pd
from pipeline import experiment, ephys, psth, lab, histology, ccf, psth_foraging
from pipeline.plot.util import (_plot_with_sem, _extract_one_stim_dur,
_plot_stacked_psth_diff, _plot_avg_psth, _jointplot_w_hue)
from pipeline.plot import unit_psth
from pipeline.util import (_get_units_hemisphere, _get_trial_event_times,
_get_stim_onset_time, _get_clustering_method)
from . import PhotostimError
_plt_xmin = -3
_plt_xmax = 2
def plot_clustering_quality(probe_insertion, clustering_method=None, axs=None):
probe_insertion = probe_insertion.proj()
if clustering_method is None:
try:
clustering_method = _get_clustering_method(probe_insertion)
except ValueError as e:
raise ValueError(str(e) + '\nPlease specify one with the kwarg "clustering_method"')
amp, snr, spk_rate, isi_violation = (ephys.Unit * ephys.UnitStat * ephys.ProbeInsertion.InsertionLocation
& probe_insertion & {'clustering_method': clustering_method}).fetch(
'unit_amp', 'unit_snr', 'avg_firing_rate', 'isi_violation')
metrics = {'amp': amp,
'snr': snr,
'isi': np.array(isi_violation) * 100, # to percentage
'rate': np.array(spk_rate)}
label_mapper = {'amp': 'Amplitude',
'snr': 'Signal to noise ratio (SNR)',
'isi': 'ISI violation (%)',
'rate': 'Firing rate (spike/s)'}
fig = None
if axs is None:
fig, axs = plt.subplots(2, 3, figsize = (12, 8))
fig.subplots_adjust(wspace=0.4)
assert axs.size == 6
for (m1, m2), ax in zip(itertools.combinations(list(metrics.keys()), 2), axs.flatten()):
ax.plot(metrics[m1], metrics[m2], '.k')
ax.set_xlabel(label_mapper[m1])
ax.set_ylabel(label_mapper[m2])
# cosmetic
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
return fig
def plot_unit_characteristic(probe_insertion, clustering_method=None, axs=None):
probe_insertion = probe_insertion.proj()
if clustering_method is None:
try:
clustering_method = _get_clustering_method(probe_insertion)
except ValueError as e:
raise ValueError(str(e) + '\nPlease specify one with the kwarg "clustering_method"')
if clustering_method in ('kilosort2'):
q_unit = (ephys.Unit * ephys.ProbeInsertion.InsertionLocation.proj('depth') * ephys.UnitStat
* lab.ElectrodeConfig.Electrode.proj() * lab.ProbeType.Electrode.proj('x_coord', 'y_coord')
& probe_insertion & {'clustering_method': clustering_method} & 'unit_quality != "all"').proj(
..., x='x_coord', y='y_coord')
else:
q_unit = (ephys.Unit * ephys.ProbeInsertion.InsertionLocation.proj('depth') * ephys.UnitStat
& probe_insertion & {'clustering_method': clustering_method} & 'unit_quality != "all"').proj(
..., x='unit_posx', y='unit_posy')
amp, snr, spk_rate, x, y, insertion_depth = q_unit.fetch(
'unit_amp', 'unit_snr', 'avg_firing_rate', 'x', 'y', 'depth')
metrics = pd.DataFrame(list(zip(*(amp/amp.max(), snr/snr.max(), spk_rate/spk_rate.max(),
x, insertion_depth.astype(float) + y))))
metrics.columns = ['amp', 'snr', 'rate', 'x', 'y']
# --- prepare for plotting
shank_count = (ephys.ProbeInsertion & probe_insertion).aggr(lab.ElectrodeConfig.Electrode * lab.ProbeType.Electrode,
shank_count='count(distinct shank)').fetch1('shank_count')
m_scale = get_m_scale(shank_count)
ymin = metrics.y.min() - 100
ymax = metrics.y.max() + 200
xmax = 1.3 * metrics.x.max()
xmin = -1/6*xmax
cosmetic = {'legend': None,
'linewidth': 1.75,
'alpha': 0.9,
'facecolor': 'none', 'edgecolor': 'k'}
# --- plot
fig = None
if axs is None:
fig, axs = plt.subplots(1, 3, figsize=(10, 8))
fig.subplots_adjust(wspace=0.6)
assert axs.size == 3
sns.scatterplot(data=metrics, x='x', y='y', s=metrics.amp*m_scale, ax=axs[0], **cosmetic)
sns.scatterplot(data=metrics, x='x', y='y', s=metrics.snr*m_scale, ax=axs[1], **cosmetic)
sns.scatterplot(data=metrics, x='x', y='y', s=metrics.rate*m_scale, ax=axs[2], **cosmetic)
# manually draw the legend
lg_ypos = ymax
data = pd.DataFrame({'x': [0.1*xmax, 0.4*xmax, 0.75*xmax], 'y': [lg_ypos, lg_ypos, lg_ypos],
'size_ratio': np.array([0.2, 0.5, 0.8])})
for ax, ax_maxval in zip(axs.flatten(), (amp.max(), snr.max(), spk_rate.max())):
sns.scatterplot(data=data, x='x', y='y', s=data.size_ratio*m_scale, ax=ax, **dict(cosmetic, facecolor='k'))
for _, r in data.iterrows():
ax.text(r['x']-4, r['y']+70, (r['size_ratio']*ax_maxval).astype(int))
# cosmetic
for title, ax in zip(('Amplitude', 'SNR', 'Firing rate'), axs.flatten()):
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_title(title)
ax.set_xlim((xmin, xmax))
ax.plot([0.5*xmin, xmax], [lg_ypos-80, lg_ypos-80], '-k')
ax.set_ylim((ymin, ymax + 150))
return fig
def plot_unit_selectivity(probe_insertion, clustering_method=None, axs=None):
probe_insertion = probe_insertion.proj()
if clustering_method is None:
try:
clustering_method = _get_clustering_method(probe_insertion)
except ValueError as e:
raise ValueError(str(e) + '\nPlease specify one with the kwarg "clustering_method"')
if clustering_method in ('kilosort2'):
q_unit = (psth.PeriodSelectivity * ephys.Unit * ephys.ProbeInsertion.InsertionLocation
* lab.ElectrodeConfig.Electrode.proj() * lab.ProbeType.Electrode.proj('x_coord', 'y_coord')
* experiment.Period & probe_insertion & {'clustering_method': clustering_method}
& 'period_selectivity != "non-selective"').proj(..., x='unit_posx', y='unit_posy').proj(
..., x='x_coord', y='y_coord')
else:
q_unit = (psth.PeriodSelectivity * ephys.Unit * ephys.ProbeInsertion.InsertionLocation
* experiment.Period & probe_insertion & {'clustering_method': clustering_method}
& 'period_selectivity != "non-selective"').proj(..., x='unit_posx', y='unit_posy')
attr_names = ['unit', 'period', 'period_selectivity', 'contra_firing_rate',
'ipsi_firing_rate', 'x', 'y', 'depth']
selective_units = q_unit.fetch(*attr_names)
selective_units = pd.DataFrame(selective_units).T
selective_units.columns = attr_names
selective_units.period_selectivity.astype('category')
# --- account for insertion depth (manipulator depth)
selective_units.y = selective_units.depth.values.astype(float) + selective_units.y
# --- get ipsi vs. contra firing rate difference
f_rate_diff = np.abs(selective_units.ipsi_firing_rate - selective_units.contra_firing_rate)
selective_units['f_rate_diff'] = f_rate_diff / f_rate_diff.max()
# --- prepare for plotting
shank_count = (ephys.ProbeInsertion & probe_insertion).aggr(lab.ElectrodeConfig.Electrode * lab.ProbeType.Electrode,
shank_count='count(distinct shank)').fetch1('shank_count')
m_scale = get_m_scale(shank_count)
cosmetic = {'legend': None,
'linewidth': 0.0001}
ymin = selective_units.y.min() - 100
ymax = selective_units.y.max() + 100
xmax = 1.3 * selective_units.x.max()
xmin = -1/6*xmax
# a bit of hack to get the 'open circle'
pts = np.linspace(0, np.pi * 2, 24)
circ = np.c_[np.sin(pts) / 2, -np.cos(pts) / 2]
vert = np.r_[circ, circ[::-1] * .7]
open_circle = mpl.path.Path(vert)
# --- plot
fig = None
if axs is None:
fig, axs = plt.subplots(1, 3, figsize=(10, 8))
fig.subplots_adjust(wspace=0.6)
assert axs.size == 3
for (title, df), ax in zip(((p, selective_units[selective_units.period == p])
for p in ('sample', 'delay', 'response')), axs):
sns.scatterplot(data=df, x='x', y='y',
s=df.f_rate_diff.values.astype(float)*m_scale,
hue='period_selectivity', marker=open_circle,
palette={'contra-selective': 'b', 'ipsi-selective': 'r'},
ax=ax, **cosmetic)
contra_p = (df.period_selectivity == 'contra-selective').sum() / len(df) * 100
# cosmetic
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.set_title(f'{title}\n% contra: {contra_p:.2f}\n% ipsi: {100-contra_p:.2f}')
ax.set_xlim((xmin, xmax))
ax.set_xlabel('x')
ax.set_ylabel('y')
ax.set_ylim((ymin, ymax))
return fig
def plot_unit_bilateral_photostim_effect(probe_insertion, clustering_method=None, axs=None):
probe_insertion = probe_insertion.proj()
if not (psth.TrialCondition().get_trials('all_noearlylick_both_alm_stim') & probe_insertion):
raise PhotostimError('No Bilateral ALM Photo-stimulation present')
if clustering_method is None:
try:
clustering_method = _get_clustering_method(probe_insertion)
except ValueError as e:
raise ValueError(str(e) + '\nPlease specify one with the kwarg "clustering_method"')
dv_loc = (ephys.ProbeInsertion.InsertionLocation & probe_insertion).fetch1('depth')
no_stim_cond = (psth.TrialCondition
& {'trial_condition_name':
'all_noearlylick_nostim'}).fetch1('KEY')
bi_stim_cond = (psth.TrialCondition
& {'trial_condition_name':
'all_noearlylick_both_alm_stim'}).fetch1('KEY')
units = ephys.Unit & probe_insertion & {'clustering_method': clustering_method} & 'unit_quality != "all"'
metrics = pd.DataFrame(columns=['unit', 'x', 'y', 'frate_change'])
# get photostim onset and duration
stim_durs = np.unique((experiment.Photostim & experiment.PhotostimEvent
* psth.TrialCondition().get_trials('all_noearlylick_both_alm_stim')
& probe_insertion).fetch('duration'))
stim_dur = _extract_one_stim_dur(stim_durs)
stim_time = _get_stim_onset_time(units, 'all_noearlylick_both_alm_stim')
# XXX: could be done with 1x fetch+join
for u_idx, unit in enumerate(units.fetch('KEY', order_by='unit')):
if clustering_method in ('kilosort2'):
x, y = (ephys.Unit * lab.ElectrodeConfig.Electrode.proj()
* lab.ProbeType.Electrode.proj('x_coord', 'y_coord') & unit).fetch1('x_coord', 'y_coord')
else:
x, y = (ephys.Unit & unit).fetch1('unit_posx', 'unit_posy')
# obtain unit psth per trial, for all nostim and bistim trials
nostim_trials = ephys.Unit.TrialSpikes & unit & psth.TrialCondition.get_trials(no_stim_cond['trial_condition_name'])
bistim_trials = ephys.Unit.TrialSpikes & unit & psth.TrialCondition.get_trials(bi_stim_cond['trial_condition_name'])
nostim_psths, nostim_edge = psth.compute_unit_psth(unit, nostim_trials.fetch('KEY'), per_trial=True)
bistim_psths, bistim_edge = psth.compute_unit_psth(unit, bistim_trials.fetch('KEY'), per_trial=True)
# compute the firing rate difference between contra vs. ipsi within the stimulation time window
ctrl_frate = np.array([nostim_psth[np.logical_and(nostim_edge >= stim_time,
nostim_edge <= stim_time + stim_dur)].mean()
for nostim_psth in nostim_psths])
stim_frate = np.array([bistim_psth[np.logical_and(bistim_edge >= stim_time,
bistim_edge <= stim_time + stim_dur)].mean()
for bistim_psth in bistim_psths])
frate_change = (stim_frate.mean() - ctrl_frate.mean()) / ctrl_frate.mean()
frate_change = abs(frate_change) if frate_change < 0 else 0.0001
metrics.loc[u_idx] = (int(unit['unit']), x, float(dv_loc) + y, frate_change)
metrics.frate_change = metrics.frate_change / metrics.frate_change.max()
# --- prepare for plotting
shank_count = (ephys.ProbeInsertion & probe_insertion).aggr(lab.ElectrodeConfig.Electrode * lab.ProbeType.Electrode,
shank_count='count(distinct shank)').fetch1('shank_count')
m_scale = get_m_scale(shank_count)
fig = None
if axs is None:
fig, axs = plt.subplots(1, 1, figsize=(4, 8))
xmax = 1.3 * metrics.x.max()
xmin = -1/6*xmax
cosmetic = {'legend': None,
'linewidth': 1.75,
'alpha': 0.9,
'facecolor': 'none', 'edgecolor': 'k'}
sns.scatterplot(data=metrics, x='x', y='y', s=metrics.frate_change*m_scale,
ax=axs, **cosmetic)
axs.spines['right'].set_visible(False)
axs.spines['top'].set_visible(False)
axs.set_title('% change')
axs.set_xlim((xmin, xmax))
return fig
def plot_pseudocoronal_slice(probe_insertion, shank_no=1):
# ---- Electrode sites ----
annotated_electrodes = (lab.ElectrodeConfig.Electrode * lab.ProbeType.Electrode
* ephys.ProbeInsertion
* histology.ElectrodeCCFPosition.ElectrodePosition
& probe_insertion & {'shank': shank_no})
electrode_coords = np.array(list(zip(*annotated_electrodes.fetch(
'ccf_z', 'ccf_y', 'ccf_x', order_by='ccf_y')))) # (AP, DV, ML)
probe_track_coords = np.array(list(zip(*(histology.LabeledProbeTrack.Point
& probe_insertion & {'shank': shank_no}).fetch(
'ccf_z', 'ccf_y', 'ccf_x', order_by='ccf_y'))))
voxel_res = ccf.CCFLabel.CCF_R3_20UM_RESOLUTION
lr_max, dv_max, _ = ccf.get_ccf_xyz_max()
pseudocoronal_points, shank_ccfs = histology.retrieve_pseudocoronal_slice(probe_insertion, shank_no)
dv_pts, lr_pts, ap_pts, color_codes = pseudocoronal_points.T
dv_pts = dv_pts.astype(int)
lr_pts = lr_pts.astype(int)
color_codes = color_codes.astype(str)
# ---- paint annotation color code ----
coronal_slice = np.full((dv_max + 1, lr_max + 1, 3), np.nan)
for color in set(color_codes):
matched_ind = np.where(color_codes == color)[0]
dv_ind = dv_pts[matched_ind] # rows
lr_ind = lr_pts[matched_ind] # cols
try:
c_rgb = ImageColor.getcolor("#" + color, "RGB")
except ValueError as e:
print(str(e))
continue
coronal_slice[dv_ind, lr_ind, :] = np.full((len(matched_ind), 3), c_rgb)
# ---- paint the interpolated track of this probe/shank in gray ----
in_probe_range = np.logical_and(shank_ccfs[:, 1] >= probe_track_coords[:, 1].min(),
shank_ccfs[:, 1] <= probe_track_coords[:, 1].max())
in_electrode_range = np.logical_and(shank_ccfs[:, 1] >= electrode_coords[:, 1].min(),
shank_ccfs[:, 1] <= electrode_coords[:, 1].max())
tracks_coords = shank_ccfs[np.logical_and(in_probe_range, ~in_electrode_range), :]
coronal_slice[tracks_coords[:, 1], tracks_coords[:, 0], :] = np.full(
(tracks_coords.shape[0], 3), ImageColor.getcolor("#FFFFFF", "RGB"))
# ---- paint electrode sites on this probe/shank in black ----
coronal_slice[electrode_coords[:, 1], electrode_coords[:, 2], :] = np.full(
(electrode_coords.shape[0], 3), ImageColor.getcolor("#080808", "RGB"))
# ---- downsample the 2D slice to the voxel resolution ----
coronal_slice = coronal_slice[::voxel_res, ::voxel_res, :]
# paint outside region white
nan_r, nan_c = np.where(np.nansum(coronal_slice, axis=2) == 0)
coronal_slice[nan_r, nan_c, :] = np.full((len(nan_r), 3), ImageColor.getcolor("#FFFFFF", "RGB"))
# ---- plot ----
fig, ax = plt.subplots(1, 1)
ax.imshow(coronal_slice.astype(np.uint8), extent=[0, lr_max, dv_max, 0])
ax.invert_xaxis()
ax.set_xticks([])
ax.set_yticks([])
ax.spines['right'].set_visible(False)
ax.spines['top'].set_visible(False)
ax.spines['bottom'].set_visible(False)
ax.spines['left'].set_visible(False)
return fig
def plot_driftmap(probe_insertion, clustering_method=None, shank_no=1):
probe_insertion = probe_insertion.proj()
assert histology.InterpolatedShankTrack & probe_insertion
if clustering_method is None:
try:
clustering_method = _get_clustering_method(probe_insertion)
except ValueError as e:
raise ValueError(str(e) + '\nPlease specify one with the kwarg "clustering_method"')
units = (ephys.Unit * lab.ElectrodeConfig.Electrode
& probe_insertion & {'clustering_method': clustering_method}
& 'unit_quality != "all"')
units = (units.proj('spike_times', 'spike_depths', 'unit_posy')
* ephys.ProbeInsertion.proj()
* lab.ProbeType.Electrode.proj('shank') & {'shank': shank_no})
# ---- ccf region ----
annotated_electrodes = (lab.ElectrodeConfig.Electrode * lab.ProbeType.Electrode
* ephys.ProbeInsertion
* histology.ElectrodeCCFPosition.ElectrodePosition
* ccf.CCFAnnotation * ccf.CCFBrainRegion.proj(..., annotation='region_name')
& probe_insertion & {'shank': shank_no})
pos_y, ccf_y, color_code = annotated_electrodes.fetch(
'y_coord', 'ccf_y', 'color_code', order_by='y_coord DESC')
# CCF position of most ventral recording site
last_electrode_site = np.array((histology.InterpolatedShankTrack.DeepestElectrodePoint
& probe_insertion & {'shank': shank_no}).fetch1(
'ccf_x', 'ccf_y', 'ccf_z'))
# CCF position of the brain surface where this shank crosses
brain_surface_site = np.array((histology.InterpolatedShankTrack.BrainSurfacePoint
& probe_insertion & {'shank': shank_no}).fetch1(
'ccf_x', 'ccf_y', 'ccf_z'))
# CCF position of most ventral recording site, with respect to the brain surface
y_ref = - | np.linalg.norm(last_electrode_site - brain_surface_site) | numpy.linalg.norm |
import numpy as np
from scipy.signal import correlate2d
rho = np.random.random((4,4)) + 1j * np.random.random((4,4))
res0 = correlate2d(rho, rho, 'full', 'fill', 0) # unwrapped
res1 = np.fft.fftshift(np.fft.ifftn(np.abs(np.fft.fftn(rho, s=2*np.array(rho.shape)-1))**2))
np.allclose(res0, np.flip(res1, axis=(0,1)).conj())
res0 = correlate2d(rho, rho, 'same', 'wrap') # wrapped
res1 = np.fft.fftshift(np.fft.ifftn(np.abs(np.fft.fftn(rho))**2))
np.allclose(res0, np.flip(res1, axis=(0,1)).conj())
# real values
rho = np.random.random((4,4))
res0 = correlate2d(rho, rho, 'full', 'fill', 0) # unwrapped
res1 = np.fft.fftshift(np.fft.irfftn(np.abs(np.fft.rfftn(rho, s=2*np.array(rho.shape)-1))**2, s=2*np.array(rho.shape)-1))
np.allclose(res0, | np.flip(res1, axis=(0,1)) | numpy.flip |
import os
# supress tensorflow logging other than errors
os.environ['TF_CPP_MIN_LOG_LEVEL'] = '3'
import numpy as np
import tensorflow as tf
from keras import backend as K
from keras.datasets import mnist
from keras.models import Sequential, load_model
from keras.layers import Dense, Dropout, Activation, Flatten
from keras.layers import Convolution2D, MaxPooling2D
from keras.utils import np_utils
import matplotlib
matplotlib.use('Agg')
import matplotlib.pyplot as plt
import matplotlib.gridspec as gridspec
from attacks.fgsm import fgsm
def random_orthogonal(i):
"""Return a random vector orthogonal to i."""
v = np.random.random(i.shape)
i /= np.linalg.norm(i)
a = np.dot(v, i) / np.dot(i, i)
j = v - a*i
b = np.linalg.norm(j)
j /= b
return j, (a, i)
img_rows = 28
img_cols = 28
img_chan = 1
nb_classes = 10
input_shape=(img_rows, img_cols, img_chan)
print('\nLoading mnist')
(X_train, y_train), (X_test, y_test) = mnist.load_data()
X_train = X_train.astype('float32') / 255.
X_test = X_test.astype('float32') / 255.
X_train = X_train.reshape(-1, img_rows, img_cols, img_chan)
X_test = X_test.reshape(-1, img_rows, img_cols, img_chan)
print('\nX_train shape:', X_train.shape)
print('y_train shape:', y_train.shape)
# one hot encoding
y_train = np_utils.to_categorical(y_train, nb_classes)
y_test = np_utils.to_categorical(y_test, nb_classes)
sess = tf.InteractiveSession()
K.set_session(sess)
if False:
print('\nLoading model0')
model0 = load_model('model/figure_2_model0.h5')
else:
print('\nBuilding model0')
model0 = Sequential([
Convolution2D(32, 3, 3, input_shape=input_shape),
Activation('relu'),
Convolution2D(32, 3, 3),
Activation('relu'),
MaxPooling2D(pool_size=(2, 2)),
# Dropout(0.25),
Flatten(),
Dense(128),
Activation('relu'),
# Dropout(0.5),
Dense(10),
Activation('softmax')])
model0.compile(optimizer='adam', loss='categorical_crossentropy',
metrics=['accuracy'])
print('\nTraining model0')
model0.fit(X_train, y_train, nb_epoch=10)
print('\nSaving model0')
os.makedirs('model', exist_ok=True)
model0.save('model/figure_2_model0.h5')
x = tf.placeholder(tf.float32, (None, img_rows, img_cols, img_chan))
y = tf.placeholder(tf.int32, (None, ))
x_adv = fgsm(model0, x, eps=0.25, nb_epoch=1)
print('\nTesting against clean data')
score = model0.evaluate(X_test, y_test)
print('\nloss: {0:.4f} acc: {1:.4f}'.format(score[0], score[1]))
if False:
print('\nLoading adversarial datasets')
X_adv = np.load('data/figure_2.npy')
else:
print('\nGenerating adversarial')
batch_size = 64
X_adv = np.empty(X_test.shape)
nb_sample = X_test.shape[0]
nb_batch = int(np.ceil(nb_sample/batch_size))
for batch in range(nb_batch):
print('batch {0}/{1}'.format(batch+1, nb_batch), end='\r')
start = batch * batch_size
end = min(nb_sample, start+batch_size)
tmp = sess.run(x_adv, feed_dict={x: X_test[start:end],
K.learning_phase(): 0})
X_adv[start:end] = tmp
print('\nSaving adversarials')
os.makedirs('data', exist_ok=True)
np.save('data/figure_2.npy', X_adv)
print('\nTesting against adversarial data')
score = model0.evaluate(X_adv, y_test)
print('\nloss: {0:.4f} acc: {1:.4f}'.format(score[0], score[1]))
if False:
print('\nLoading model1')
model1 = load_model('model/figure_2_model1.h5')
else:
print('\nBuilding model1')
model1 = Sequential([
Convolution2D(32, 3, 3, input_shape=input_shape),
Activation('relu'),
Convolution2D(32, 3, 3),
Activation('relu'),
MaxPooling2D(pool_size=(2, 2)),
Dropout(0.25),
Flatten(),
Dense(128),
Activation('relu'),
Dropout(0.5),
Dense(10),
Activation('softmax')])
model1.compile(loss='categorical_crossentropy',
optimizer='adam', metrics=['accuracy'])
x_adv_tmp = fgsm(model1, x, eps=0.3, nb_epoch=1)
print('\nDummy testing')
model1.evaluate(X_test[:10], y_test[:10], verbose=0)
print('\nPreparing training/validation dataset')
validation_split = 0.1
N = int(X_train.shape[0]*validation_split)
X_tmp_train, X_tmp_val = X_train[:-N], X_train[-N:]
y_tmp_train, y_tmp_val = y_train[:-N], y_train[-N:]
print('\nTraining model1')
nb_epoch = 10
batch_size = 64
nb_sample = X_tmp_train.shape[0]
nb_batch = int(np.ceil(nb_sample/batch_size))
for epoch in range(nb_epoch):
print('Epoch {0}/{1}'.format(epoch+1, nb_epoch))
for batch in range(nb_batch):
print(' batch {0}/{1} '.format(batch+1, nb_batch),
end='\r', flush=True)
start = batch * batch_size
end = min(nb_sample, start+batch_size)
X_tmp_adv = sess.run(x_adv_tmp, feed_dict={
x: X_tmp_train[start:end], K.learning_phase(): 0})
y_tmp_adv = y_tmp_train[start:end]
X_batch = np.vstack((X_tmp_train[start:end], X_tmp_adv))
y_batch = np.vstack((y_tmp_train[start:end], y_tmp_adv))
score = model1.train_on_batch(X_batch, y_batch)
score = model1.evaluate(X_tmp_val, y_tmp_val)
print(' loss: {0:.4f} acc: {1:.4f}'
.format(score[0], score[1]))
print('\nSaving model1')
os.makedirs('model', exist_ok=True)
model1.save('model/figure_2_model1.h5')
print('\nTesting against adversarial')
score = model1.evaluate(X_adv, y_test)
print('\nloss: {0:.4f} acc: {1:.4f}'.format(score[0], score[1]))
print('\nPreparing predictions')
y0_0 = model0.predict(X_test)
y0_1 = model0.predict(X_adv)
y1_0 = model1.predict(X_test)
y1_1 = model1.predict(X_adv)
z_test = np.argmax(y_test, axis=1)
z0_0 = np.argmax(y0_0, axis=1)
z0_1 = | np.argmax(y0_1, axis=1) | numpy.argmax |
"""Spatial statistical tools to estimate uncertainties related to DEMs"""
from __future__ import annotations
import math as m
import multiprocessing as mp
import os
import warnings
from functools import partial
from typing import Callable, Union, Iterable, Optional, Sequence, Any
import itertools
import matplotlib
import matplotlib.pyplot as plt
import matplotlib.colors as colors
from numba import njit
import numpy as np
import pandas as pd
from scipy import integrate
from scipy.optimize import curve_fit
from skimage.draw import disk
from scipy.interpolate import RegularGridInterpolator, LinearNDInterpolator, griddata
from scipy.stats import binned_statistic, binned_statistic_2d, binned_statistic_dd
from geoutils.spatial_tools import subsample_raster, get_array_and_mask
from geoutils.georaster import RasterType, Raster
with warnings.catch_warnings():
warnings.filterwarnings("ignore", category=DeprecationWarning)
import skgstat as skg
from skgstat import models
def nmad(data: np.ndarray, nfact: float = 1.4826) -> float:
"""
Calculate the normalized median absolute deviation (NMAD) of an array.
Default scaling factor is 1.4826 to scale the median absolute deviation (MAD) to the dispersion of a normal
distribution (see https://en.wikipedia.org/wiki/Median_absolute_deviation#Relation_to_standard_deviation, and
e.g. http://dx.doi.org/10.1016/j.isprsjprs.2009.02.003)
:param data: input data
:param nfact: normalization factor for the data
:returns nmad: (normalized) median absolute deviation of data.
"""
if isinstance(data, np.ma.masked_array):
data_arr = get_array_and_mask(data, check_shape=False)[0]
else:
data_arr = np.asarray(data)
return nfact * np.nanmedian(np.abs(data_arr - np.nanmedian(data_arr)))
def interp_nd_binning(df: pd.DataFrame, list_var_names: Union[str,list[str]], statistic : Union[str, Callable[[np.ndarray],float]] = nmad,
min_count: Optional[int] = 100) -> Callable[[tuple[np.ndarray, ...]], np.ndarray]:
"""
Estimate an interpolant function for an N-dimensional binning. Preferably based on the output of nd_binning.
For more details on the input dataframe, and associated list of variable name and statistic, see nd_binning.
If the variable pd.DataSeries corresponds to an interval (as the output of nd_binning), uses the middle of the interval.
Otherwise, uses the variable as such.
Workflow of the function:
Fills the no-data present on the regular N-D binning grid with nearest neighbour from scipy.griddata, then provides an
interpolant function that linearly interpolates/extrapolates using scipy.RegularGridInterpolator.
:param df: dataframe with statistic of binned values according to explanatory variables (preferably output of nd_binning)
:param list_var_names: explanatory variable data series to select from the dataframe (containing interval or float dtype)
:param statistic: statistic to interpolate, stored as a data series in the dataframe
:param min_count: minimum number of samples to be used as a valid statistic (replaced by nodata)
:return: N-dimensional interpolant function
:examples
# Using a dataframe created from scratch
>>> df = pd.DataFrame({"var1": [1, 2, 3, 1, 2, 3, 1, 2, 3], "var2": [1, 1, 1, 2, 2, 2, 3, 3, 3], "statistic": [1, 2, 3, 4, 5, 6, 7, 8, 9]})
# In 2 dimensions, the statistic array looks like this
# array([
# [1, 2, 3],
# [4, 5, 6],
# [7, 8, 9]
# ])
>>> fun = interp_nd_binning(df, list_var_names=["var1", "var2"], statistic="statistic", min_count=None)
# Right on point.
>>> fun((2, 2))
array(5.)
# Interpolated linearly inside the 2D frame.
>>> fun((1.5, 1.5))
array(3.)
# Extrapolated linearly outside the 2D frame.
>>> fun((-1, 1))
array(-1.)
"""
# if list of variable input is simply a string
if isinstance(list_var_names,str):
list_var_names = [list_var_names]
# check that the dataframe contains what we need
for var in list_var_names:
if var not in df.columns:
raise ValueError('Variable "'+var+'" does not exist in the provided dataframe.')
statistic_name = statistic if isinstance(statistic,str) else statistic.__name__
if statistic_name not in df.columns:
raise ValueError('Statistic "' + statistic_name + '" does not exist in the provided dataframe.')
if min_count is not None and 'count' not in df.columns:
raise ValueError('Statistic "count" is not in the provided dataframe, necessary to use the min_count argument.')
if df.empty:
raise ValueError('Dataframe is empty.')
df_sub = df.copy()
# if the dataframe is an output of nd_binning, keep only the dimension of interest
if 'nd' in df_sub.columns:
df_sub = df_sub[df_sub.nd == len(list_var_names)]
# compute the middle values instead of bin interval if the variable is a pandas interval type
for var in list_var_names:
check_any_interval = [isinstance(x, pd.Interval) for x in df_sub[var].values]
if any(check_any_interval):
df_sub[var] = pd.IntervalIndex(df_sub[var]).mid.values
# otherwise, leave as is
# check that explanatory variables have valid binning values which coincide along the dataframe
df_sub = df_sub[np.logical_and.reduce([np.isfinite(df_sub[var].values) for var in list_var_names])]
if df_sub.empty:
raise ValueError('Dataframe does not contain a nd binning with the variables corresponding to the list of variables.')
# check that the statistic data series contain valid data
if all(~np.isfinite(df_sub[statistic_name].values)):
raise ValueError('Dataframe does not contain any valid statistic values.')
# remove statistic values calculated with a sample count under the minimum count
if min_count is not None:
df_sub.loc[df_sub['count'] < min_count,statistic_name] = np.nan
values = df_sub[statistic_name].values
ind_valid = np.isfinite(values)
# re-check that the statistic data series contain valid data after filtering with min_count
if all(~ind_valid):
raise ValueError("Dataframe does not contain any valid statistic values after filtering with min_count = "+str(min_count)+".")
# get a list of middle values for the binning coordinates, to define a nd grid
list_bmid = []
shape = []
for var in list_var_names:
bmid = sorted(np.unique(df_sub[var][ind_valid]))
list_bmid.append(bmid)
shape.append(len(bmid))
# griddata first to perform nearest interpolation with NaNs (irregular grid)
# valid values
values = values[ind_valid]
# coordinates of valid values
points_valid = tuple([df_sub[var].values[ind_valid] for var in list_var_names])
# grid coordinates
bmid_grid = np.meshgrid(*list_bmid, indexing='ij')
points_grid = tuple([bmid_grid[i].flatten() for i in range(len(list_var_names))])
# fill grid no data with nearest neighbour
values_grid = griddata(points_valid, values, points_grid, method='nearest')
values_grid = values_grid.reshape(shape)
# RegularGridInterpolator to perform linear interpolation/extrapolation on the grid
# (will extrapolate only outside of boundaries not filled with the nearest of griddata as fill_value = None)
interp_fun = RegularGridInterpolator(tuple(list_bmid), values_grid, method='linear', bounds_error=False, fill_value=None)
return interp_fun
def nd_binning(values: np.ndarray, list_var: Iterable[np.ndarray], list_var_names=Iterable[str], list_var_bins: Optional[Union[int,Iterable[Iterable]]] = None,
statistics: Iterable[Union[str, Callable, None]] = ['count', np.nanmedian ,nmad], list_ranges : Optional[Iterable[Sequence]] = None) \
-> pd.DataFrame:
"""
N-dimensional binning of values according to one or several explanatory variables.
Values input is a (N,) array and variable input is a list of flattened arrays of similar dimensions (N,).
For more details on the format of input variables, see documentation of scipy.stats.binned_statistic_dd.
:param values: values array (N,)
:param list_var: list (L) of explanatory variables array (N,)
:param list_var_names: list (L) of names of the explanatory variables
:param list_var_bins: count, or list (L) of counts or custom bin edges for the explanatory variables; defaults to 10 bins
:param statistics: list (X) of statistics to be computed; defaults to count, median and nmad
:param list_ranges: list (L) of minimum and maximum ranges to bin the explanatory variables; defaults to min/max of the data
:return:
"""
# we separate 1d, 2d and nd binning, because propagating statistics between different dimensional binning is not always feasible
# using scipy because it allows for several dimensional binning, while it's not straightforward in pandas
if list_var_bins is None:
list_var_bins = (10,) * len(list_var_names)
elif isinstance(list_var_bins,int):
list_var_bins = (list_var_bins,) * len(list_var_names)
# flatten the arrays if this has not been done by the user
values = values.ravel()
list_var = [var.ravel() for var in list_var]
# remove no data values
valid_data = np.logical_and.reduce([np.isfinite(values)]+[np.isfinite(var) for var in list_var])
values = values[valid_data]
list_var = [var[valid_data] for var in list_var]
statistics_name = [f if isinstance(f,str) else f.__name__ for f in statistics]
# get binned statistics in 1d: a simple loop is sufficient
list_df_1d = []
for i, var in enumerate(list_var):
df_stats_1d = pd.DataFrame()
# get statistics
for j, statistic in enumerate(statistics):
stats_binned_1d, bedges_1d = binned_statistic(var,values,statistic=statistic,bins=list_var_bins[i],range=list_ranges)[:2]
# save in a dataframe
df_stats_1d[statistics_name[j]] = stats_binned_1d
# we need to get the middle of the bins from the edges, to get the same dimension length
df_stats_1d[list_var_names[i]] = pd.IntervalIndex.from_breaks(bedges_1d,closed='left')
# report number of dimensions used
df_stats_1d['nd'] = 1
list_df_1d.append(df_stats_1d)
# get binned statistics in 2d: all possible 2d combinations
list_df_2d = []
if len(list_var)>1:
combs = list(itertools.combinations(list_var_names, 2))
for i, comb in enumerate(combs):
var1_name, var2_name = comb
# corresponding variables indexes
i1, i2 = list_var_names.index(var1_name), list_var_names.index(var2_name)
df_stats_2d = pd.DataFrame()
for j, statistic in enumerate(statistics):
stats_binned_2d, bedges_var1, bedges_var2 = binned_statistic_2d(list_var[i1],list_var[i2],values,statistic=statistic
,bins=[list_var_bins[i1],list_var_bins[i2]]
,range=list_ranges)[:3]
# get statistics
df_stats_2d[statistics_name[j]] = stats_binned_2d.flatten()
# derive interval indexes and convert bins into 2d indexes
ii1 = pd.IntervalIndex.from_breaks(bedges_var1,closed='left')
ii2 = pd.IntervalIndex.from_breaks(bedges_var2,closed='left')
df_stats_2d[var1_name] = [i1 for i1 in ii1 for i2 in ii2]
df_stats_2d[var2_name] = [i2 for i1 in ii1 for i2 in ii2]
# report number of dimensions used
df_stats_2d['nd'] = 2
list_df_2d.append(df_stats_2d)
# get binned statistics in nd, without redoing the same stats
df_stats_nd = pd.DataFrame()
if len(list_var)>2:
for j, statistic in enumerate(statistics):
stats_binned_2d, list_bedges = binned_statistic_dd(list_var,values,statistic=statistic,bins=list_var_bins,range=list_ranges)[0:2]
df_stats_nd[statistics_name[j]] = stats_binned_2d.flatten()
list_ii = []
# loop through the bin edges and create IntervalIndexes from them (to get both
for bedges in list_bedges:
list_ii.append(pd.IntervalIndex.from_breaks(bedges,closed='left'))
# create nd indexes in nd-array and flatten for each variable
iind = np.meshgrid(*list_ii)
for i, var_name in enumerate(list_var_names):
df_stats_nd[var_name] = iind[i].flatten()
# report number of dimensions used
df_stats_nd['nd'] = len(list_var_names)
# concatenate everything
list_all_dfs = list_df_1d + list_df_2d + [df_stats_nd]
df_concat = pd.concat(list_all_dfs)
# commenting for now: pd.MultiIndex can be hard to use
# df_concat = df_concat.set_index(list_var_names)
return df_concat
def create_circular_mask(shape: Union[int, Sequence[int]], center: Optional[list[float]] = None,
radius: Optional[float] = None) -> np.ndarray:
"""
Create circular mask on a raster, defaults to the center of the array and it's half width
:param shape: shape of array
:param center: center
:param radius: radius
:return:
"""
w, h = shape
if center is None: # use the middle of the image
center = (int(w / 2), int(h / 2))
if radius is None: # use the smallest distance between the center and image walls
radius = min(center[0], center[1], w - center[0], h - center[1])
# skimage disk is not inclusive (correspond to distance_from_center < radius and not <= radius)
mask = np.zeros(shape, dtype=bool)
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "invalid value encountered in true_divide")
rr, cc = disk(center=center,radius=radius,shape=shape)
mask[rr, cc] = True
# manual solution
# Y, X = np.ogrid[:h, :w]
# dist_from_center = np.sqrt((X - center[0]) ** 2 + (Y - center[1]) ** 2)
# mask = dist_from_center < radius
return mask
def create_ring_mask(shape: Union[int, Sequence[int]], center: Optional[list[float]] = None, in_radius: float = 0.,
out_radius: Optional[float] = None) -> np.ndarray:
"""
Create ring mask on a raster, defaults to the center of the array and a circle mask of half width of the array
:param shape: shape of array
:param center: center
:param in_radius: inside radius
:param out_radius: outside radius
:return:
"""
w, h = shape
if out_radius is None:
center = (int(w / 2), int(h / 2))
out_radius = min(center[0], center[1], w - center[0], h - center[1])
with warnings.catch_warnings():
warnings.filterwarnings("ignore", "invalid value encountered in true_divide")
mask_inside = create_circular_mask((w,h),center=center,radius=in_radius)
mask_outside = create_circular_mask((w,h),center=center,radius=out_radius)
mask_ring = np.logical_and(~mask_inside,mask_outside)
return mask_ring
def _subsample_wrapper(values: np.ndarray, coords: np.ndarray, shape: tuple[int,int] = None, subsample: int = 10000,
subsample_method: str = 'pdist_ring', inside_radius = None, outside_radius = None,
random_state: None | np.random.RandomState | np.random.Generator | int = None) -> tuple[np.ndarray, np.ndarray]:
"""
(Not used by default)
Wrapper for subsampling pdist methods
"""
nx, ny = shape
# Define state for random subsampling (to fix results during testing)
if random_state is None:
rnd = np.random.default_rng()
elif isinstance(random_state, (np.random.RandomState, np.random.Generator)):
rnd = random_state
else:
rnd = np.random.RandomState(np.random.MT19937(np.random.SeedSequence(random_state)))
# Subsample spatially for disk/ring methods
if subsample_method in ['pdist_disk', 'pdist_ring']:
# Select random center coordinates
center_x = rnd.choice(nx, 1)[0]
center_y = rnd.choice(ny, 1)[0]
if subsample_method == 'pdist_ring':
subindex = create_ring_mask((nx, ny), center=[center_x, center_y], in_radius=inside_radius,
out_radius=outside_radius)
else:
subindex = create_circular_mask((nx, ny), center=[center_x, center_y], radius=inside_radius)
index = subindex.flatten()
values_sp = values[index]
coords_sp = coords[index, :]
else:
values_sp = values
coords_sp = coords
index = subsample_raster(values_sp, subsample=subsample, return_indices=True, random_state=rnd)
values_sub = values_sp[index[0]]
coords_sub = coords_sp[index[0], :]
return values_sub, coords_sub
def _aggregate_pdist_empirical_variogram(values: np.ndarray, coords: np.ndarray, subsample: int, shape: tuple,
subsample_method: str, gsd: float,
pdist_multi_ranges: Optional[list[float]] = None, **kwargs) -> pd.DataFrame:
"""
(Not used by default)
Aggregating subfunction of sample_empirical_variogram for pdist methods.
The pairwise differences are calculated within each subsample.
"""
# If no multi_ranges are provided, define a logical default behaviour with the pixel size and grid size
if subsample_method in ['pdist_disk', 'pdist_ring']:
if pdist_multi_ranges is None:
# Define list of ranges as exponent 2 of the resolution until the maximum range
pdist_multi_ranges = []
# We start at 10 times the ground sampling distance
new_range = gsd * 10
while new_range < kwargs.get('maxlag') / 2:
pdist_multi_ranges.append(new_range)
new_range *= 2
pdist_multi_ranges.append(kwargs.get('maxlag'))
# Define subsampling parameters
list_inside_radius, list_outside_radius = ([] for i in range(2))
binned_ranges = [0] + pdist_multi_ranges
for i in range(len(binned_ranges) - 1):
# Radiuses need to be passed as pixel sizes, dividing by ground sampling distance
outside_radius = binned_ranges[i + 1]/gsd
if subsample_method == 'pdist_ring':
inside_radius = binned_ranges[i]/gsd
else:
inside_radius = None
list_outside_radius.append(outside_radius)
list_inside_radius.append(inside_radius)
else:
# For random point selection, no need for multi-range parameters
pdist_multi_ranges = [kwargs.get('maxlag')]
list_outside_radius = [None]
list_inside_radius = [None]
# Estimate variogram with specific subsampling at multiple ranges
list_df_range = []
for j in range(len(pdist_multi_ranges)):
values_sub, coords_sub = _subsample_wrapper(values, coords, shape = shape, subsample = subsample,
subsample_method = subsample_method,
inside_radius = list_inside_radius[j],
outside_radius = list_outside_radius[j],
random_state= kwargs.get('random_state'))
if len(values_sub) == 0:
continue
df_range = _get_pdist_empirical_variogram(values=values_sub, coords=coords_sub, **kwargs)
# Aggregate runs
list_df_range.append(df_range)
df = pd.concat(list_df_range)
return df
def _get_pdist_empirical_variogram(values: np.ndarray, coords: np.ndarray, **kwargs) -> pd.DataFrame:
"""
Get empirical variogram from skgstat.Variogram object calculating pairwise distances within the sample
:param values: values
:param coords: coordinates
:return: empirical variogram (variance, lags, counts)
"""
# Remove random_state keyword argument that is not used
kwargs.pop('random_state')
# Get arguments of Variogram class init function
vgm_args = skg.Variogram.__init__.__code__.co_varnames[:skg.Variogram.__init__.__code__.co_argcount]
# Check no other argument is left to be passed
remaining_kwargs = kwargs.copy()
for arg in vgm_args:
remaining_kwargs.pop(arg, None)
if len(remaining_kwargs) != 0:
warnings.warn('Keyword arguments: '+','.join(list(remaining_kwargs.keys()))+ ' were not used.')
# Filter corresponding arguments before passing
filtered_kwargs = {k:kwargs[k] for k in vgm_args if k in kwargs}
# Derive variogram with default MetricSpace (equivalent to scipy.pdist)
V = skg.Variogram(coordinates=coords, values=values, normalize=False, fit_method=None, **filtered_kwargs)
# Get bins, empirical variogram values, and bin count
bins, exp = V.get_empirical()
count = V.bin_count
# Write to dataframe
df = pd.DataFrame()
df = df.assign(exp=exp, bins=bins, count=count)
return df
def _get_cdist_empirical_variogram(values: np.ndarray, coords: np.ndarray, subsample_method: str,
**kwargs) -> pd.DataFrame:
"""
Get empirical variogram from skgstat.Variogram object calculating pairwise distances between two sample collections
of a MetricSpace (see scikit-gstat documentation for more details)
:param values: values
:param coords: coordinates
:return: empirical variogram (variance, lags, counts)
"""
# Rename the "subsample" argument into "samples", which is used by skgstat Metric subclasses
kwargs['samples'] = kwargs.pop('subsample')
# Rename the "random_state" argument into "rnd", also used by skgstat Metric subclasses
kwargs['rnd'] = kwargs.pop('random_state')
# Define MetricSpace function to be used, fetch possible keywords arguments
if subsample_method == 'cdist_point':
# List keyword arguments of the Probabilistic class init function
ms_args = skg.ProbabalisticMetricSpace.__init__.__code__.co_varnames[:skg.ProbabalisticMetricSpace.__init__.__code__.co_argcount]
ms = skg.ProbabalisticMetricSpace
else:
# List keyword arguments of the RasterEquidistant class init function
ms_args = skg.RasterEquidistantMetricSpace.__init__.__code__.co_varnames[:skg.RasterEquidistantMetricSpace.__init__.__code__.co_argcount]
ms = skg.RasterEquidistantMetricSpace
# Get arguments of Variogram class init function
vgm_args = skg.Variogram.__init__.__code__.co_varnames[:skg.Variogram.__init__.__code__.co_argcount]
# Check no other argument is left to be passed, accounting for MetricSpace arguments
remaining_kwargs = kwargs.copy()
for arg in vgm_args + ms_args:
remaining_kwargs.pop(arg, None)
if len(remaining_kwargs) != 0:
warnings.warn('Keyword arguments: ' + ', '.join(list(remaining_kwargs.keys())) + ' were not used.')
# Filter corresponding arguments before passing to MetricSpace function
filtered_ms_kwargs = {k: kwargs[k] for k in ms_args if k in kwargs}
M = ms(coords=coords, **filtered_ms_kwargs)
# Filter corresponding arguments before passing to Variogram function
filtered_var_kwargs = {k: kwargs[k] for k in vgm_args if k in kwargs}
V = skg.Variogram(M, values=values, normalize=False, fit_method=None, **filtered_var_kwargs)
# Get bins, empirical variogram values, and bin count
bins, exp = V.get_empirical()
count = V.bin_count
# Write to dataframe
df = pd.DataFrame()
df = df.assign(exp=exp, bins=bins, count=count)
return df
def _wrapper_get_empirical_variogram(argdict: dict) -> pd.DataFrame:
"""
Multiprocessing wrapper for get_pdist_empirical_variogram and get_cdist_empirical variogram
:param argdict: Keyword argument to pass to get_pdist/cdist_empirical_variogram
:return: empirical variogram (variance, lags, counts)
"""
if argdict['verbose']:
print('Working on run '+str(argdict['i']) + ' out of '+str(argdict['imax']))
argdict.pop('i')
argdict.pop('imax')
if argdict['subsample_method'] in ['cdist_equidistant', 'cdist_point']:
# Simple wrapper for the skgstat Variogram function for cdist methods
get_variogram = _get_cdist_empirical_variogram
else:
# Aggregating several skgstat Variogram after iterative subsampling of specific points in the Raster
get_variogram = _aggregate_pdist_empirical_variogram
return get_variogram(**argdict)
def sample_empirical_variogram(values: Union[np.ndarray, RasterType], gsd: float = None, coords: np.ndarray = None,
subsample: int = 10000, subsample_method: str = 'cdist_equidistant',
n_variograms: int = 1, n_jobs: int = 1, verbose=False,
random_state: None | np.random.RandomState | np.random.Generator | int = None,
**kwargs) -> pd.DataFrame:
"""
Sample empirical variograms with binning adaptable to multiple ranges and spatial subsampling adapted for raster data.
By default, subsampling is based on RasterEquidistantMetricSpace implemented in scikit-gstat. This method samples more
effectively large grid data by isolating pairs of spatially equidistant ensembles for distributed pairwise comparison.
In practice, two subsamples are drawn for pairwise comparison: one from a disk of certain radius within the grid, and
another one from rings of larger radii that increase steadily between the pixel size and the extent of the raster.
Those disk and rings are sampled several times across the grid using random centers.
If values are provided as a Raster subclass, nothing else is required.
If values are provided as a 2D array (M,N), a ground sampling distance is sufficient to derive the pairwise distances.
If values are provided as a 1D array (N), an array of coordinates (N,2) or (2,N) is expected. If the coordinates
do not correspond to all points of the grid, a ground sampling distance is needed to correctly get the grid size.
Spatial subsampling method argument subsample_method can be one of "cdist_equidistant", "cdist_point", "pdist_point",
"pdist_disk" and "pdist_ring".
The cdist methods use MetricSpace classes of scikit-gstat and do pairwise comparison of two ensembles as in
scipy.spatial.cdist.
The pdist methods use methods to subsample the Raster points directly and do pairwise comparison within a single
ensemble as in scipy.spatial.pdist.
For the cdist methods, the variogram is estimated in a single run from the MetricSpace.
For the pdist methods, an iterative process is required: a list of ranges subsampled independently is used.
Variograms are derived independently for several runs and ranges using each pairwise sample, and later aggregated.
If the subsampling method selected is "random_point", the multi-range argument is ignored as range has no effect on
this subsampling method.
For pdist methods, keyword arguments are passed to skgstat.Variogram.
For cdist methods, keyword arguments are passed to both skgstat.Variogram and skgstat.MetricSpace.
:param values: values
:param gsd: ground sampling distance
:param coords: coordinates
:param subsample: number of samples to randomly draw from the values
:param subsample_method: spatial subsampling method
:param n_variograms: number of independent empirical variogram estimations
:param n_jobs: number of processing cores
:param verbose: print statements during processing
:param random_state: random state or seed number to use for calculations (to fix random sampling during testing)
:return: empirical variogram (variance, lags, counts)
"""
# First, check all that the values provided are OK
if isinstance(values, Raster):
gsd = values.res[0]
values, mask = get_array_and_mask(values.data)
elif isinstance(values, (np.ndarray, np.ma.masked_array)):
values, mask = get_array_and_mask(values)
else:
raise TypeError('Values must be of type np.ndarray, np.ma.masked_array or Raster subclass.')
values = values.squeeze()
# Then, check if the logic between values, coords and gsd is respected
if (gsd is not None or subsample_method in ['cdist_equidistant', 'pdist_disk','pdist_ring']) and values.ndim == 1:
raise TypeError('Values array must be 2D when using any of the "cdist_equidistant", "pdist_disk" and '
'"pdist_ring" methods, or providing a ground sampling distance instead of coordinates.')
elif coords is not None and values.ndim != 1:
raise TypeError('Values array must be 1D when providing coordinates.')
elif coords is not None and (coords.shape[0] != 2 and coords.shape[1] != 2):
raise TypeError('The coordinates array must have one dimension with length equal to 2')
# Check the subsample method provided exists, otherwise list options
if subsample_method not in ['cdist_equidistant','cdist_point','pdist_point','pdist_disk','pdist_ring']:
raise TypeError('The subsampling method must be one of "cdist_equidistant, "cdist_point", "pdist_point", '
'"pdist_disk" or "pdist_ring".')
# Check that, for several runs, the binning function is an Iterable, otherwise skgstat might provide variogram
# values over slightly different binnings due to randomly changing subsample maximum lags
if n_variograms > 1 and 'bin_func' in kwargs.keys() and not isinstance(kwargs.get('bin_func'), Iterable):
warnings.warn('Using a named binning function of scikit-gstat might provide different binnings for each '
'independent run. To remediate that issue, pass bin_func as an Iterable of right bin edges, '
'(or use default bin_func).')
# Defaulting to coordinates if those are provided
if coords is not None:
nx = None
ny = None
# Making the shape of coordinates consistent if they are transposed
if coords.shape[0] == 2 and coords.shape[1] != 2:
coords = np.transpose(coords)
# If no coordinates provided, we use the shape of the array and the provided ground sampling distance to derive
# relative coordinates (starting at zero)
else:
nx, ny = np.shape(values)
x, y = np.meshgrid(np.arange(0, values.shape[0] * gsd, gsd), np.arange(0, values.shape[1] * gsd, gsd))
coords = np.dstack((x.flatten(), y.flatten())).squeeze()
values = values.flatten()
# Get the ground sampling distance from the coordinates before keeping only valid data, if it was not provided
if gsd is None:
gsd = np.mean([coords[0, 0] - coords[0, 1], coords[0, 0] - coords[1, 0]])
# Get extent
extent = (np.min(coords[:, 0]), np.max(coords[:, 0]), np.min(coords[:, 1]), np.max(coords[:, 1]))
# Get the maximum lag from the coordinates before keeping only valid data, if it was not provided
if 'maxlag' not in kwargs.keys():
# We define maximum lag as the maximum distance between coordinates (needed to provide custom bins, otherwise
# skgstat rewrites the maxlag with the subsample of coordinates provided)
maxlag = np.sqrt((np.max(coords[:, 0])-np.min(coords[:, 1]))**2
+ (np.max(coords[:, 1]) - np.min(coords[:, 1]))**2)
kwargs.update({'maxlag': maxlag})
# Keep only valid data for cdist methods, remove later for pdist methods
if 'cdist' in subsample_method:
ind_valid = np.isfinite(values)
values = values[ind_valid]
coords = coords[ind_valid, :]
if 'bin_func' not in kwargs.keys():
# If no bin_func is provided, we provide an Iterable to provide a custom binning function to skgstat,
# because otherwise bins might be unconsistent across runs
bin_func = []
right_bin_edge = np.sqrt(2) * gsd
while right_bin_edge < kwargs.get('maxlag'):
bin_func.append(right_bin_edge)
# We use the default exponential increasing factor of RasterEquidistantMetricSpace, adapted for grids
right_bin_edge *= np.sqrt(2)
bin_func.append(kwargs.get('maxlag'))
kwargs.update({'bin_func': bin_func})
# Prepare necessary arguments to pass to variogram subfunctions
args = {'values': values, 'coords': coords, 'subsample_method': subsample_method, 'subsample': subsample,
'verbose': verbose}
if subsample_method in ['cdist_equidistant','pdist_ring','pdist_disk', 'pdist_point']:
# The shape is needed for those three methods
args.update({'shape': (nx, ny)})
if subsample_method == 'cdist_equidistant':
# The coordinate extent is needed for this method
args.update({'extent':extent})
else:
args.update({'gsd': gsd})
# If a random_state is passed, each run needs to be passed an independent child random state, otherwise they will
# provide exactly the same sampling and results
if random_state is not None:
# Define the random state if only a seed is provided
if isinstance(random_state, (np.random.RandomState, np.random.Generator)):
rnd = random_state
else:
rnd = np.random.RandomState(np.random.MT19937(np.random.SeedSequence(random_state)))
# Create a list of child random states
if n_variograms == 1:
# No issue if there is only one variogram run
list_random_state = [rnd]
else:
# Otherwise, pass a list of seeds
list_random_state = list(rnd.choice(n_variograms, n_variograms, replace=False))
else:
list_random_state = [None for i in range(n_variograms)]
# Derive the variogram
# Differentiate between 1 core and several cores for multiple runs
# All variogram runs have random sampling inherent to their subfunctions, so we provide the same input arguments
if n_jobs == 1:
if verbose:
print('Using 1 core...')
list_df_run = []
for i in range(n_variograms):
argdict = {'i': i, 'imax': n_variograms, 'random_state': list_random_state[i], **args, **kwargs}
df_run = _wrapper_get_empirical_variogram(argdict=argdict)
list_df_run.append(df_run)
else:
if verbose:
print('Using ' + str(n_jobs) + ' cores...')
pool = mp.Pool(n_jobs, maxtasksperchild=1)
argdict = [{'i': i, 'imax': n_variograms, 'random_state': list_random_state[i], **args, **kwargs} for i in range(n_variograms)]
list_df_run = pool.map(_wrapper_get_empirical_variogram, argdict, chunksize=1)
pool.close()
pool.join()
# Aggregate multiple ranges subsampling
df = pd.concat(list_df_run)
# For a single run, no multi-run sigma estimated
if n_variograms == 1:
df['err_exp'] = np.nan
# For several runs, group results, use mean as empirical variogram, estimate sigma, and sum the counts
else:
df_grouped = df.groupby('bins', dropna=False)
df_mean = df_grouped[['exp']].mean()
df_std = df_grouped[['exp']].std()
df_count = df_grouped[['count']].sum()
df_mean['bins'] = df_mean.index.values
df_mean['err_exp'] = df_std['exp']
df_mean['count'] = df_count['count']
df = df_mean
return df
def fit_sum_model_variogram(list_model: list[str], empirical_variogram: pd.DataFrame,
bounds: list[tuple[float, float]] = None,
p0: list[float] = None) -> tuple[Callable, list[float]]:
"""
Fit a multi-range variogram model to an empirical variogram, weighted least-squares based on sampling errors
:param list_model: list of K variogram models to sum for the fit: from short-range to long-ranges
:param empirical_variogram: empirical variogram
:param bounds: bounds of ranges and sills for each model (shape K x 4 = K x range lower, range upper, sill lower, sill upper)
:param p0: initial guess of ranges and sills each model (shape K x 2 = K x range first guess, sill first guess)
:return: modelled variogram function, coefficients
"""
# TODO: expand to other models than spherical, exponential, gaussian (more than 2 arguments)
# Define a sum of variogram function
def vgm_sum(h, *args):
fn = 0
i = 0
for model in list_model:
fn += skg.models.spherical(h, args[i], args[i+1])
# fn += vgm(h, model=model,crange=args[i],psill=args[i+1])
i += 2
return fn
# First, filter outliers
empirical_variogram = empirical_variogram[np.isfinite(empirical_variogram.exp.values)]
# Use shape of empirical variogram to assess rough boundaries/first estimates
n_average = np.ceil(len(empirical_variogram.exp.values) / 10)
exp_movaverage = np.convolve(empirical_variogram.exp.values, np.ones(int(n_average)) / n_average, mode='valid')
grad = np.gradient(exp_movaverage, 2)
# Maximum variance of the process
max_var = np.max(exp_movaverage)
# Simplify things for scipy: let's provide boundaries and first guesses
if bounds is None:
bounds = []
for i in range(len(list_model)):
# Use largest boundaries possible for our problem
psill_bound = [0, max_var]
range_bound = [0, empirical_variogram.bins.values[-1]]
# Add bounds and guesses with same order as function arguments
bounds.append(range_bound)
bounds.append(psill_bound)
if p0 is None:
p0 = []
for i in range(len(list_model)):
# Use psill evenly distributed
psill_p0 = ((i+1)/len(list_model))*max_var
# Use corresponding ranges
# !! This fails when no empirical value crosses this (too wide binning/nugget)
# ind = np.array(np.abs(exp_movaverage-psill_p0)).argmin()
# range_p0 = empirical_variogram.bins.values[ind]
range_p0 = ((i+1)/len(list_model)) * empirical_variogram.bins.values[-1]
p0.append(range_p0)
p0.append(psill_p0)
bounds = np.transpose(np.array(bounds))
# If the error provided is all NaNs (single variogram run), or all zeros (two variogram runs), run without weights
if np.all(np.isnan(empirical_variogram.err_exp.values)) or np.all(empirical_variogram.err_exp.values == 0):
cof, cov = curve_fit(vgm_sum, empirical_variogram.bins.values, empirical_variogram.exp.values, method='trf',
p0=p0, bounds=bounds)
# Otherwise, use a weighted fit
else:
# We need to filter for possible no data in the error
valid = np.isfinite(empirical_variogram.err_exp.values)
cof, cov = curve_fit(vgm_sum, empirical_variogram.bins.values[valid], empirical_variogram.exp.values[valid],
method='trf', p0=p0, bounds=bounds, sigma=empirical_variogram.err_exp.values[valid])
# Provide the output function (couldn't find a way to pass this through functool.partial as arguments are unordered)
def vgm_sum_fit(h):
fn = 0
i = 0
for model in list_model:
fn += skg.models.spherical(h, cof[i], cof[i+1])
i += 2
return fn
return vgm_sum_fit, cof
def exact_neff_sphsum_circular(area: float, crange1: float, psill1: float, crange2: float, psill2: float) -> float:
"""
Number of effective samples derived from exact integration of sum of 2 spherical variogram models over a circular area.
The number of effective samples serves to convert between standard deviation/partial sills and standard error
over the area.
If SE is the standard error, SD the standard deviation and N_eff the number of effective samples, we have:
SE = SD / sqrt(N_eff) => N_eff = SD^2 / SE^2 => N_eff = (PS1 + PS2)/SE^2 where PS1 and PS2 are the partial sills
estimated from the variogram models, and SE is estimated by integrating the variogram models with parameters PS1/PS2
and R1/R2 where R1/R2 are the correlation ranges.
Source: <NAME> al. (2009), appendix: http://dx.doi.org/10.3189/002214309789470950
:param area: circular area
:param crange1: range of short-range variogram model
:param psill1: partial sill of short-range variogram model
:param crange2: range of long-range variogram model
:param psill2: partial sill of long-range variogram model
:return: number of effective samples
"""
# short range variogram
c1 = psill1 # partial sill
a1 = crange1 # short correlation range
# long range variogram
c1_2 = psill2
a1_2 = crange2 # long correlation range
h_equiv = np.sqrt(area / np.pi)
# hypothesis of a circular shape to integrate variogram model
if h_equiv > a1_2:
std_err = np.sqrt(c1 * a1 ** 2 / (5 * h_equiv ** 2) + c1_2 * a1_2 ** 2 / (5 * h_equiv ** 2))
elif (h_equiv < a1_2) and (h_equiv > a1):
std_err = np.sqrt(c1 * a1 ** 2 / (5 * h_equiv ** 2) + c1_2 * (1-h_equiv / a1_2+1 / 5 * (h_equiv / a1_2) ** 3))
else:
std_err = np.sqrt(c1 * (1-h_equiv / a1+1 / 5 * (h_equiv / a1) ** 3) +
c1_2 * (1-h_equiv / a1_2+1 / 5 * (h_equiv / a1_2) ** 3))
return (psill1 + psill2)/std_err**2
def neff_circ(area: float, list_vgm: list[tuple[float, str, float]]) -> float:
"""
Number of effective samples derived from numerical integration for any sum of variogram models a circular area
(generalization of Rolstad et al. (2009): http://dx.doi.org/10.3189/002214309789470950)
The number of effective samples N_eff serves to convert between standard deviation/partial sills and standard error
over the area: SE = SD / sqrt(N_eff) if SE is the standard error, SD the standard deviation.
:param area: area
:param list_vgm: variogram functions to sum (range, model name, partial sill)
:returns: number of effective samples
"""
psill_tot = 0
for vario in list_vgm:
psill_tot += vario[2]
def hcov_sum(h):
fn = 0
for vario in list_vgm:
crange, model, psill = vario
fn += h*(cov(h, crange, model=model, psill=psill))
return fn
h_equiv = np.sqrt(area / np.pi)
full_int = integrate_fun(hcov_sum, 0, h_equiv)
std_err = np.sqrt(2*np.pi*full_int / area)
return psill_tot/std_err**2
def neff_rect(area: float, width: float, crange1: float, psill1: float, model1: str = 'Sph', crange2: float = None,
psill2: float = None, model2: str = None) -> float:
"""
Number of effective samples derived from numerical integration for a sum of 2 variogram functions over a rectangular area
:param area: area
:param width: width of rectangular area
:param crange1: correlation range of first variogram
:param psill1: partial sill of first variogram
:param model1: model of first variogram
:param crange2: correlation range of second variogram
:param psill2: partial sill of second variogram
:param model2: model of second variogram
:returns: number of effective samples
"""
def hcov_sum(h, crange1=crange1, psill1=psill1, model1=model1, crange2=crange2, psill2=psill2, model2=model2):
if crange2 is None or psill2 is None or model2 is None:
return h*(cov(h, crange1, model=model1, psill=psill1))
else:
return h*(cov(h, crange1, model=model1, psill=psill1)+cov(h, crange2, model=model2, psill=psill2))
width = min(width, area/width)
full_int = integrate_fun(hcov_sum, 0, width/2)
bin_int = np.linspace(width/2, area/width, 100)
for i in range(len(bin_int)-1):
low = bin_int[i]
upp = bin_int[i+1]
mid = bin_int[i] + (bin_int[i+1] - bin_int[i])/2
piec_int = integrate_fun(hcov_sum, low, upp)
full_int += piec_int * 2/np.pi*np.arctan(width/(2*mid))
std_err = np.sqrt(2*np.pi*full_int / area)
if crange2 is None or psill2 is None or model2 is None:
return psill1 / std_err ** 2
else:
return (psill1 + psill2) / std_err ** 2
def integrate_fun(fun: Callable, low_b: float, upp_b: float) -> float:
"""
Numerically integrate function between upper and lower bounds
:param fun: function
:param low_b: lower bound
:param upp_b: upper bound
:return: integral
"""
return integrate.quad(fun, low_b, upp_b)[0]
def cov(h: float, crange: float, model: str = 'Sph', psill: float = 1., kappa: float = 1/2, nugget: float = 0) -> Callable:
"""
Covariance function based on variogram function (COV = STD - VGM)
:param h: spatial lag
:param crange: correlation range
:param model: model
:param psill: partial sill
:param kappa: smoothing parameter for Exp Class
:param nugget: nugget
:returns: covariance function
"""
return (nugget + psill) - vgm(h, crange, model=model, psill=psill, kappa=kappa)
def vgm(h: float, crange: float, model: str = 'Sph', psill: float = 1., kappa: float = 1/2, nugget: float = 0):
"""
Compute variogram model function (Spherical, Exponential, Gaussian or Exponential Class)
:param h: spatial lag
:param crange: correlation range
:param model: model
:param psill: partial sill
:param kappa: smoothing parameter for Exp Class
:param nugget: nugget
:returns: variogram function
"""
c0 = nugget # nugget
c1 = psill # partial sill
a1 = crange # correlation range
s = kappa # smoothness parameter for Matern class
if model == 'Sph': # spherical model
if h < a1:
vgm = c0 + c1 * (3 / 2 * h / a1-1 / 2 * (h / a1) ** 3)
else:
vgm = c0 + c1
elif model == 'Exp': # exponential model
vgm = c0 + c1 * (1-np.exp(-h / a1))
elif model == 'Gau': # gaussian model
vgm = c0 + c1 * (1-np.exp(- (h / a1) ** 2))
elif model == 'Exc': # stable exponential model
vgm = c0 + c1 * (1-np.exp(-(h / a1)**s))
return vgm
def std_err_finite(std: float, neff_tot: float, neff: float) -> float:
"""
Standard error of subsample of a finite ensemble
:param std: standard deviation
:param neff_tot: maximum number of effective samples
:param neff: number of effective samples
:return: standard error
"""
return std * np.sqrt(1 / neff_tot * (neff_tot - neff) / neff_tot)
def std_err(std: float, neff: float) -> float:
"""
Standard error
:param std: standard deviation
:param neff: number of effective samples
:return: standard error
"""
return std * np.sqrt(1 / neff)
def distance_latlon(tup1: tuple, tup2: tuple, earth_rad: float = 6373000) -> float:
"""
Distance between two lat/lon coordinates projected on a spheroid
ref: https://stackoverflow.com/questions/19412462/getting-distance-between-two-points-based-on-latitude-longitude
:param tup1: lon/lat coordinates of first point
:param tup2: lon/lat coordinates of second point
:param earth_rad: radius of the earth in meters
:return: distance
"""
lat1 = m.radians(abs(tup1[1]))
lon1 = m.radians(abs(tup1[0]))
lat2 = m.radians(abs(tup2[1]))
lon2 = m.radians(abs(tup2[0]))
dlon = lon2 - lon1
dlat = lat2 - lat1
a = m.sin(dlat / 2)**2 + m.cos(lat1) * m.cos(lat2) * m.sin(dlon / 2)**2
c = 2 * m.atan2(m.sqrt(a), m.sqrt(1 - a))
distance = earth_rad * c
return distance
def kernel_sph(xi: float, x0: float, a1: float) -> float:
# TODO: homogenize kernel/variogram use
"""
Spherical kernel
:param xi: position of first point
:param x0: position of second point
:param a1: range of kernel
:return: covariance between the two points
"""
if np.abs(xi - x0) > a1:
return 0
else:
return 1 - 3 / 2 * np.abs(xi-x0) / a1 + 1 / 2 * (np.abs(xi-x0) / a1) ** 3
def part_covar_sum(argsin: tuple) -> float:
"""
Multiprocessing wrapper for covariance summing
:param argsin: Tupled argument for covariance calculation
:return: covariance sum
"""
list_tuple_errs, corr_ranges, list_area_tot, list_lat, list_lon, i_range = argsin
n = len(list_tuple_errs)
part_var_err = 0
for i in i_range:
for j in range(n):
d = distance_latlon((list_lon[i], list_lat[i]), (list_lon[j], list_lat[j]))
for k in range(len(corr_ranges)):
part_var_err += kernel_sph(0, d, corr_ranges[k]) * list_tuple_errs[i][k] * list_tuple_errs[j][k] * \
list_area_tot[i] * list_area_tot[j]
return part_var_err
def double_sum_covar(list_tuple_errs: list[float], corr_ranges: list[float], list_area_tot: list[float],
list_lat: list[float], list_lon: list[float], nproc: int = 1) -> float:
"""
Double sum of covariances for propagating multi-range correlated errors between disconnected spatial ensembles
:param list_tuple_errs: list of tuples of correlated errors by range, by ensemble
:param corr_ranges: list of correlation ranges
:param list_area_tot: list of areas of ensembles
:param list_lat: list of center latitude of ensembles
:param list_lon: list of center longitude of ensembles
:param nproc: number of cores to use for multiprocessing
:returns: sum of covariances
"""
n = len(list_tuple_errs)
if nproc == 1:
print('Deriving double covariance sum with 1 core...')
var_err = 0
for i in range(n):
for j in range(n):
d = distance_latlon((list_lon[i], list_lat[i]), (list_lon[j], list_lat[j]))
for k in range(len(corr_ranges)):
var_err += kernel_sph(0, d, corr_ranges[k]) * list_tuple_errs[i][k] * list_tuple_errs[j][k] * \
list_area_tot[i] * list_area_tot[j]
else:
print('Deriving double covariance sum with '+str(nproc)+' cores...')
pack_size = int(np.ceil(n/nproc))
argsin = [(list_tuple_errs, corr_ranges, list_area_tot, list_lon, list_lat, np.arange(
i, min(i+pack_size, n))) for k, i in enumerate(np.arange(0, n, pack_size))]
pool = mp.Pool(nproc, maxtasksperchild=1)
outputs = pool.map(part_covar_sum, argsin, chunksize=1)
pool.close()
pool.join()
var_err = np.sum(np.array(outputs))
area_tot = 0
for j in range(len(list_area_tot)):
area_tot += list_area_tot[j]
var_err /= np.nansum(area_tot) ** 2
return np.sqrt(var_err)
def patches_method(values: np.ndarray, gsd: float, area: float, mask: Optional[np.ndarray] = None,
perc_min_valid: float = 80., statistics: Iterable[Union[str, Callable, None]] = ['count', np.nanmedian ,nmad],
patch_shape: str = 'circular', n_patches: int = 1000, verbose: bool = False,
random_state: None | int | np.random.RandomState | np.random.Generator = None) -> pd.DataFrame:
"""
Patches method for empirical estimation of the standard error over an integration area
:param values: values
:param gsd: ground sampling distance
:param mask: mask of sampled terrain
:param area: size of integration area
:param perc_min_valid: minimum valid area in the patch
:param statistics: list of statistics to compute in the patch
:param patch_shape: shape of patch ['circular' or 'rectangular']
:param n_patches: maximum number of patches to sample
:param verbose: print statement to console
:param random_state: random state or seed number to use for calculations (to fix random sampling during testing)
:return: tile, mean, median, std and count of each patch
"""
# Define state for random subsampling (to fix results during testing)
if random_state is None:
rnd = np.random.default_rng()
elif isinstance(random_state, (np.random.RandomState, np.random.Generator)):
rnd = random_state
else:
rnd = np.random.RandomState(np.random.MT19937(np.random.SeedSequence(random_state)))
statistics_name = [f if isinstance(f,str) else f.__name__ for f in statistics]
values, mask_values = get_array_and_mask(values)
values = values.squeeze()
# Use all grid if no mask is provided
if mask is None:
mask = np.ones(np.shape(values),dtype=bool)
# First, remove non sampled area (but we need to keep the 2D shape of raster for patch sampling)
valid_mask = np.logical_and(~mask_values, mask)
values[~valid_mask] = np.nan
# Divide raster in cadrants where we can sample
nx, ny = np.shape(values)
valid_count = len(values[~np.isnan(values)])
count = nx * ny
if verbose:
print('Number of valid pixels: ' + str(count))
nb_cadrant = int(np.floor(np.sqrt((count * gsd ** 2) / area) + 1))
# For rectangular quadrants
nx_sub = int(np.floor((nx - 1) / nb_cadrant))
ny_sub = int(np.floor((ny - 1) / nb_cadrant))
# For circular patches
rad = np.sqrt(area/np.pi) / gsd
# Create list of all possible cadrants
list_cadrant = [[i, j] for i in range(nb_cadrant) for j in range(nb_cadrant)]
u = 0
# Keep sampling while there is cadrants left and below maximum number of patch to sample
remaining_nsamp = n_patches
list_df = []
while len(list_cadrant) > 0 and u < n_patches:
# Draw a random coordinate from the list of cadrants, select more than enough random points to avoid drawing
# randomly and differencing lists several times
list_idx_cadrant = rnd.choice(len(list_cadrant), size=min(len(list_cadrant), 10*remaining_nsamp))
for idx_cadrant in list_idx_cadrant:
if verbose:
print('Working on a new cadrant')
# Select center coordinates
i = list_cadrant[idx_cadrant][0]
j = list_cadrant[idx_cadrant][1]
if patch_shape == 'rectangular':
patch = values[nx_sub * i:nx_sub * (i + 1), ny_sub * j:ny_sub * (j + 1)].flatten()
elif patch_shape == 'circular':
center_x = np.floor(nx_sub*(i+1/2))
center_y = np.floor(ny_sub*(j+1/2))
mask = create_circular_mask((nx, ny), center=[center_x, center_y], radius=rad)
patch = values[mask]
else:
raise ValueError('Patch method must be rectangular or circular.')
nb_pixel_total = len(patch)
nb_pixel_valid = len(patch[np.isfinite(patch)])
if nb_pixel_valid >= np.ceil(perc_min_valid / 100. * nb_pixel_total):
u=u+1
if u > n_patches:
break
if verbose:
print('Found valid cadrant ' + str(u) + ' (maximum: ' + str(n_patches) + ')')
df = pd.DataFrame()
df = df.assign(tile=[str(i) + '_' + str(j)])
for j, statistic in enumerate(statistics):
if isinstance(statistic, str):
if statistic == 'count':
df[statistic] = [nb_pixel_valid]
else:
raise ValueError('No other string than "count" are supported for named statistics.')
else:
df[statistics_name[j]] = [statistic(patch)]
list_df.append(df)
# Get remaining samples to draw
remaining_nsamp = n_patches - u
# Remove cadrants already sampled from list
list_cadrant = [c for j, c in enumerate(list_cadrant) if j not in list_idx_cadrant]
if len(list_df)>0:
df_all = pd.concat(list_df)
else:
warnings.warn('No valid patch found covering this area: returning dataframe containing only nans' )
df_all = pd.DataFrame()
for j, statistic in enumerate(statistics):
df_all[statistics_name[j]] = [np.nan]
return df_all
def plot_vgm(df: pd.DataFrame, list_fit_fun: Optional[list[Callable[[float],float]]] = None,
list_fit_fun_label: Optional[list[str]] = None, ax: matplotlib.axes.Axes | None = None,
xscale='linear', xscale_range_split: Optional[list] = None,
xlabel = None, ylabel = None, xlim = None, ylim = None):
"""
Plot empirical variogram, and optionally also plot one or several model fits.
Input dataframe is expected to be the output of xdem.spatialstats.sample_empirical_variogram.
Input function model is expected to be the output of xdem.spatialstats.fit_sum_model_variogram.
:param df: dataframe of empirical variogram
:param list_fit_fun: list of model function fits
:param list_fit_fun_label: list of model function fits labels
:param ax: plotting ax to use, creates a new one by default
:param xscale: scale of x axis
:param xscale_range_split: list of ranges at which to split the figure
:param xlabel: label of x axis
:param ylabel: label of y axis
:param xlim: limits of x axis
:param ylim: limits of y axis
:return:
"""
# Create axes if they are not passed
if ax is None:
fig = plt.figure()
elif isinstance(ax, matplotlib.axes.Axes):
ax = ax
fig = ax.figure
else:
raise ValueError("ax must be a matplotlib.axes.Axes instance or None")
if ylabel is None:
ylabel = r'Variance [$\mu$ $\pm \sigma$]'
if xlabel is None:
xlabel = 'Spatial lag (m)'
init_gridsize = [10, 10]
# Create parameters to split x axis into different linear scales
# If there is no split, get parameters for a single subplot
if xscale_range_split is None:
nb_subpanels=1
if xscale == 'log':
xmin = [np.min(df.bins)/2]
else:
xmin = [0]
xmax = [np.max(df.bins)]
xgridmin = [0]
xgridmax = [init_gridsize[0]]
gridsize = init_gridsize
# Otherwise, derive a list for each subplot
else:
# Add initial zero if not in input
if xscale_range_split[0] != 0:
if xscale == 'log':
first_xmin = np.min(df.bins)/2
else:
first_xmin = 0
xscale_range_split = [first_xmin] + xscale_range_split
# Add maximum distance if not in input
if xscale_range_split[-1] != np.max(df.bins):
xscale_range_split.append(np.max(df.bins))
# Scale grid size by the number of subpanels
nb_subpanels = len(xscale_range_split)-1
gridsize = init_gridsize.copy()
gridsize[0] *= nb_subpanels
# Create list of parameters to pass to ax/grid objects of subpanels
xmin, xmax, xgridmin, xgridmax = ([] for i in range(4))
for i in range(nb_subpanels):
xmin.append(xscale_range_split[i])
xmax.append(xscale_range_split[i+1])
xgridmin.append(init_gridsize[0]*i)
xgridmax.append(init_gridsize[0]*(i+1))
# Need a grid plot to show the sample count and the statistic
grid = plt.GridSpec(gridsize[1], gridsize[0], wspace=0.5, hspace=0.5)
# Loop over each subpanel
for k in range(nb_subpanels):
# First, an axis to plot the sample histogram
ax0 = fig.add_subplot(grid[:3, xgridmin[k]:xgridmax[k]])
ax0.set_xscale(xscale)
ax0.set_xticks([])
# Plot the histogram manually with fill_between
interval_var = [0] + list(df.bins)
for i in range(len(df)):
count = df['count'].values[i]
ax0.fill_between([interval_var[i], interval_var[i+1]], [0] * 2, [count] * 2,
facecolor=plt.cm.Greys(0.75), alpha=1,
edgecolor='white', linewidth=0.5)
if k == 0:
ax0.set_ylabel('Sample count')
# Scientific format to avoid undesired additional space on the label side
ax0.ticklabel_format(axis='y', style='sci', scilimits=(0, 0))
else:
ax0.set_yticks([])
# Ignore warnings for log scales
ax0.set_xlim((xmin[k], xmax[k]))
# Now, plot the statistic of the data
ax = fig.add_subplot(grid[3:, xgridmin[k]:xgridmax[k]])
# Get the bins center
bins_center = np.subtract(df.bins, np.diff([0] + df.bins.tolist()) / 2)
# If all the estimated errors are all NaN (single run), simply plot the empirical variogram
if np.all(np.isnan(df.err_exp)):
ax.scatter(bins_center, df.exp, label='Empirical variogram', color='blue', marker='x')
# Otherwise, plot the error estimates through multiple runs
else:
ax.errorbar(bins_center, df.exp, yerr=df.err_exp, label='Empirical variogram (1-sigma s.d)', fmt='x')
# If a list of functions is passed, plot the modelled variograms
if list_fit_fun is not None:
for i, fit_fun in enumerate(list_fit_fun):
x = np.linspace(xmin[k], xmax[k], 1000)
y = fit_fun(x)
if list_fit_fun_label is not None:
ax.plot(x, y, linestyle='dashed', label=list_fit_fun_label[i], zorder=30)
else:
ax.plot(x, y, linestyle='dashed', color='black', zorder=30)
if list_fit_fun_label is None:
ax.plot([],[],linestyle='dashed',color='black',label='Model fit')
ax.set_xscale(xscale)
if nb_subpanels>1 and k == (nb_subpanels-1):
ax.xaxis.set_ticks( | np.linspace(xmin[k], xmax[k], 3) | numpy.linspace |
from abc import ABC, abstractmethod
from functools import cached_property
from typing import Tuple, Optional, List, Callable
import ConfigSpace as CS
import numpy as np
import ConfigSpace.hyperparameters as CSH
from matplotlib import pyplot as plt
from pyPDP.algorithms import Algorithm
from pyPDP.algorithms.ice import ICE, ICECurve
from pyPDP.blackbox_functions import BlackboxFunction
from pyPDP.surrogate_models import SurrogateModel
from pyPDP.utils.typing import SelectedHyperparameterType
from pyPDP.utils.utils import unscale_float, calculate_log_delta, ConfigSpaceHolder, get_hyperparameters
from scipy.stats import norm
Sample = Tuple[np.ndarray, np.ndarray] # configurations, variances
class Region(ConfigSpaceHolder):
def __init__(
self,
x_points: np.ndarray,
y_points: np.ndarray,
y_variances: np.ndarray,
config_space: CS.ConfigurationSpace,
selected_hyperparameter: SelectedHyperparameterType
):
"""
:param x_points: Shape: (num_points_in_region, num_grid_points, num_features)
:param y_points: Shape: (num_points_in_region, num_grid_points)
:param y_variances: Shape: (num_points_in_region, num_grid_points)
"""
super().__init__(config_space)
self.x_points = x_points
self.y_points = y_points
self.y_variances = y_variances
if isinstance(selected_hyperparameter, CSH.Hyperparameter):
selected_hyperparameter = [selected_hyperparameter]
self.selected_hyperparameter = tuple(selected_hyperparameter)
assert len(self.x_points) == len(self.y_points) == len(self.y_variances)
assert self.x_points.shape[1] == self.y_points.shape[1] == self.y_variances.shape[1]
def __len__(self):
return len(self.x_points)
@cached_property
def mean_confidence(self) -> float:
return np.mean(np.sqrt(self.y_variances)).item()
@cached_property
def loss(self) -> float:
# l2 loss calculation according to paper
mean_variances = | np.mean(self.y_variances, axis=0) | numpy.mean |
#Importing Libraries
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
#Importing Dataset
train = pd.read_csv('train.csv')
train.info()
null = train.isnull().sum()
#Droping new price column because it has maximum nan value and also Unnamed:0 because it is equivalent to index
train = train.drop(['Unnamed: 0', 'New_Price'], axis=1)
# Droping nan values
train = train.dropna()
train = train.reset_index(drop=True)
null = train.isnull().sum()
train['Location'].value_counts()
train['Fuel_Type'].value_counts()
train['Transmission'].value_counts()
train['Owner_Type'].value_counts()
# Lets split some columns to make a new feature
train_df = train.copy()
name = train_df['Name'].str.split(" ", n =2, expand = True)
train_df['Company'] = name[0]
train_df['Model'] = name[1]
train_df['Mileage'] = train_df['Mileage'].str.split(" ", n=1, expand = True).get(0)
train_df['Engine'] = train_df['Engine'].str.split(" ", n=1, expand = True).get(0)
train_df['Power'] = train_df['Power'].str.split(" ", n=1, expand = True).get(0)
train_df = train_df.drop(['Name'], axis = 1)
train_df['Mileage'] = train_df['Mileage'].astype(float)
train_df['Engine'] = train_df['Engine'].astype(int)
train_df.replace("null", np.nan, inplace = True)
train_df = train_df.dropna()
train_df = train_df.reset_index(drop=True)
train_df['Power'] = train_df['Power'].astype(float)
train_df['Company'].value_counts()
train_df['Company'] = train_df['Company'].replace('ISUZU', 'Isuzu')
#Handiling Rare Categorical Feature
cat_features = [feature for feature in train_df.columns if train_df[feature].dtype == 'O']
for feature in cat_features:
temp = train_df.groupby(feature)['Price'].count()/len(train_df)
temp_df = temp[temp>0.01].index
train_df[feature] = np.where(train_df[feature].isin(temp_df), train_df[feature], 'Rare')
train_df['Company'].value_counts()
train_df.info()
train_df['Seats'] = train_df['Seats'].astype(int)
#E.D.A.
features = [feature for feature in train_df.columns]
import seaborn as sns
plt.scatter(x = "Year", y = "Price" , data = train_df )
sns.boxplot(x = 'Company' , y= 'Price', data= train_df)
for feature in features:
df =train.copy()
df[feature].hist(bins=20)
plt.xlabel(feature)
plt.ylabel('Count')
plt.title(feature)
plt.show()
sns.distplot(train_df['Price'])
#as we can see price is not depends on location so we can drop this column
train_df = train_df.drop(['Location'], axis = 1)
#Encoding Categorical data
columns = ['Fuel_Type','Transmission','Owner_Type','Company','Model']
def categorical_ohe(multicolumns):
df = train_df.copy()
i = 0
for feilds in multicolumns:
print(feilds)
d1 = pd.get_dummies(train_df[feilds])
train_df.drop([feilds], axis = 1)
if i == 0:
df = d1.copy()
else:
df = pd.concat([df, d1], axis =1)
i =i +1
df = pd.concat([df,train_df], axis =1)
return df
final_df = categorical_ohe(columns)
final_df = final_df.loc[:,~final_df.columns.duplicated()]
import datetime
now = datetime.datetime.now()
final_df['Year'] = final_df['Year'].apply(lambda x : now.year - x)
corr = final_df.corr()
#From correlation chart we can see that Kilometers_Driven and Seats are not impacting much on price prediction so we can drop them
data = final_df.drop(final_df[columns], axis =1 ) #Droping Categorical Columns
x = data.to_csv('data.csv', index = False)
X= data.drop(['Price','Rare','Kilometers_Driven','Seats'], axis = 1)
y = data['Price']
#Splitting Dataset into training and test data
from sklearn.model_selection import train_test_split
X_train, X_test, y_train, y_test = train_test_split(X, y, test_size= 0.2, random_state = 0)
#Feature Scaling
from sklearn.preprocessing import StandardScaler
scaler = StandardScaler()
scaler.fit(X)
X_train = scaler.transform(X_train)
X_test = scaler.transform(X_test)
#Fitting Model
from sklearn.ensemble import RandomForestRegressor
rf = RandomForestRegressor(n_estimators = 100, random_state= 0)
rf.fit(X_train,y_train)
y_pred = rf.predict(X_test)
from sklearn.tree import DecisionTreeRegressor
dt = DecisionTreeRegressor(random_state = 0)
dt.fit(X_train, y_train)
dt_pred = dt.predict(X_test)
from sklearn.svm import SVR
svr = SVR(kernel = 'rbf')
svr.fit(X_train, y_train)
svr_pred = svr.predict(X_test)
from xgboost import XGBRegressor
xgb = XGBRegressor()
xgb.fit(X_train,y_train)
y_pred1 = xgb.predict(X_test)
from sklearn.metrics import r2_score
r2_score(y_test, svr_pred)
r2_score(y_test, y_pred)
r2_score(y_test, dt_pred)
r2_score(y_test, y_pred1)
def predict_price(Company,Model, Fuel_Type, Transmission,
Owner_Type, Year, Mileage, Engine,
Power):
com_index = np.where(X.columns==Company)[0][0]
model_index = np.where(X.columns==Model)[0][0]
fuel_index = np.where(X.columns==Fuel_Type)[0][0]
trans_index = np.where(X.columns==Transmission)[0][0]
owner_index = | np.where(X.columns==Owner_Type) | numpy.where |
import numpy as np
import Weighted_SVM as WS
from cvxopt import matrix, solvers
import cvxopt
import scipy.sparse as sparse
def scipy_sparse_to_spmatrix(A):
coo = A.tocoo()
SP = cvxopt.spmatrix(coo.data.tolist(), coo.row.tolist(), coo.col.tolist(), size=A.shape)
return(SP)
def Uni_arr(X):
# designed for getting different arrays
R,C = X.shape
Set = [X[0].tolist()]
for i in np.arange(1,R):
temp = sum([X[i].tolist()==j for j in Set])
if temp == 0:
Set.append(X[i].tolist())
return(Set)
class algor(object):
def __init__(self,Data,w2v):
# Data[0]: number of observations * word frequency
# Data[1]: covariates
# Data[2]: label
self.X = Data[0]
self.factor = Data[1]
self.FacNum = np.shape(np.mat(self.factor))[1]
self.Y = Data[2]
self.w2v = w2v.T # self.w2v is dimension of word embedding * number of words
self.wordNum = np.shape(self.w2v)[1] # The number of sentiment words used
self.LoBeta = np.shape(self.w2v)[0] # The length of Beta
self.ite = 0.0001
self.K = int(np.max(self.Y)) # 0,1,...,K
self.BETA = np.zeros(self.LoBeta)
self.W = [] # the set for W
self.BETA_0 = [] # the set for various beta_0
self.IndexBeta_0 = []
self.XforInput = []
self.RH = [] # used in step 1: B(t_ij) w_{x_{ij}}
self.LH = [] # used in step 2: beta * D
self.Vec_S = [] # The contant vector in step 3
self.AindexBeta0 = np.unique(self.IndexBeta_0)
self.IndexFY = []
self.Dict_embed = {}
self.Dict_beta0 = {}
self.DB = np.matmul(self.w2v,self.X.T.toarray())
self.SLL = []
self.Err = []
self.Err_1 = []
self.Beta_set = [self.BETA]
def Initialization(self):
self.YforInput = []
self.BETA = np.random.normal(0,0.1,self.LoBeta)
Cov_set = Uni_arr(self.factor)
for i in Cov_set:
Temp = np.ones(self.wordNum)
BTemp = 0-np.sort(np.random.uniform(-1, 1, int(self.K)))
self.Dict_embed.update({str(i):Temp})
self.Dict_beta0.update({str(i):BTemp})
for i in range(len(self.Y)):
for j in range(int(self.K)):
self.YforInput.append(2 * ((self.Y[i]-j)>0)-1)
self.a = np.zeros(len(self.YforInput))
def Build_BW(self):
# calculating B * W of beta * D (B * W)
# D * B
#DB = np.matmul(self.w2v,self.X.T.toarray())
temp = []
EmbedTemp = []
for i in range(len(self.Y)):
Embed_Temp = self.Dict_embed.get(str(self.factor[i].tolist()))
EmbedTemp.append(Embed_Temp.tolist())
EmbedTemp = np.array(EmbedTemp)
BW = np.multiply(self.X.toarray(),EmbedTemp)
X_out = np.matmul(BW,self.w2v.T)
return(X_out)
def Build_BWB(self):
BWP = np.matmul(self.BETA,self.w2v)
Out = self.X.toarray() * BWP
return(Out)
def Loss(self,Lam_1,Lam_2):
NB = np.linalg.norm(self.BETA)**2 * Lam_1 *0.5
NW = sum([np.linalg.norm(i)**2 for i in list(self.Dict_embed.values())]) * Lam_2*0.5
N_size = np.shape(self.X)[0]
Part = np.matmul(self.BETA, self.w2v)
Result = []
L = 0.
for i in range(N_size):
Beta_0 = self.Dict_beta0.get(str(self.factor[i]))
W = self.Dict_embed.get(str(self.factor[i]))
Part_2 = np.multiply(self.X[i].toarray()[0], W)
Y_F = np.dot(Part, Part_2) + Beta_0
Y_B = 2*(self.Y[i] - np.array([i for i in range(int(self.K))])>0)-1
Re = 1 - Y_F * Y_B
Re1 = sum([np.max([0,i]) for i in Re])
L += Re1
return(L+NB+NW)
def Predict(self,X,cov):
N_size = np.shape(X)[0]
Part = np.matmul(self.BETA,self.w2v)
Result = []
for i in range(N_size):
Beta_0 = self.Dict_beta0.get(str(cov[i]))
W = self.Dict_embed.get(str(cov[i]))
Part_2 = np.multiply(X[i].toarray()[0],W)
Y_F = np.sign(np.dot(Part,Part_2)+Beta_0)
Result.append(sum(Y_F==1))
return(np.array(Result))
def Upd_Beta(self,Lam_1):
sample_weight = []
Data_input = []
TempMat = self.Build_BW()
for i in range(len(self.Y)):
B0temp = self.Dict_beta0.get(str(self.factor[i].tolist()))
for j in range(int(self.K)):
temp = 1 - self.YforInput[i * int(self.K) + j] * B0temp[j]
sample_weight.append(temp)
temp_Data_input = (TempMat[i] / temp).tolist()
Data_input.append(temp_Data_input)
sample_weight = | np.array(sample_weight) | numpy.array |
import numpy as np
import copy
import scipy
from scipy.stats import norm
from scipy import io,signal
import matplotlib.cm as cm
import matplotlib.pyplot as plt
from weighted_median import *
def check_str_bool(s):
return s in ['True' ,'true', '1', 't', 'y','YES' ,'Yes','yes', 'yeah','Yeah', 'yup', 'certainly', 'uh-huh']
class vec_properties:
def __init__(self,source,ws,time_unit,time_scale_to_seconds,length_unit,length_scale_to_meter):
self.source = source
self.ws = ws
self.time_unit = time_unit
self.time_scale_to_seconds = time_scale_to_seconds
self.length_unit = length_unit
self.length_scale_to_meter = length_scale_to_meter
self.velocity_units = length_unit+'/'+time_unit
def show(self):
print(
'source: ',self.source,'\n',
'window size: ',self.ws,'\n',
'dt: ',self.time_scale_to_seconds,'\n',
'pixel to meter: ',self.length_scale_to_meter,'\n',
'velocity units: ',self.velocity_units,'\n')
class field_properties:
def __init__(self,frame,time,images_path,source,time_unit,time_scale_to_seconds,length_unit,length_scale_to_meter):
self.frame = frame
self.time = time
self.images_path = images_path
self.source = source
self.history = ''
self.time_unit = time_unit
self.time_scale_to_seconds = time_scale_to_seconds
self.length_unit = length_unit
self.length_scale_to_meter = length_scale_to_meter
self.velocity_units = length_unit+'/'+time_unit
def show(self):
print(
'frame: ',self.frame,'\n',
'absolute time: ',self.time,'\n',
'images_path: ',self.images_path,'\n',
'source: ',self.source,'\n',
'dt: ',self.time_scale_to_seconds,'\n',
'pixel to meter: ',self.length_scale_to_meter,'\n',
'length units: ',self.length_scale_to_meter,'\n',
'velocity units: ',self.velocity_units)
class run_properties:
pass
class vector:
def __init__(self,X,Y,U,V,S2N,properties):
self.X = X
self.Y = Y
self.U = U
self.V = V
self.S2N = S2N
self.properties = properties
def convert_units(self,output_length_unit,output_time_unit):
LS = {'mm':0.001, 'cm':0.01, 'm':1.0,'meter':1.0,'meters':1.0, 'km':1000.}
TS = {'ms':0.001, 's':1.0,'second':1.0,'seconds':1.0, 'min':60.,'mins':60.,'h':3600.,'hour':3600.,'hours':3600.}
LS[self.properties.length_unit]=float(self.properties.length_scale_to_meter)
TS[self.properties.time_unit]=float(self.properties.time_scale_to_seconds)
self.X = self.X*(LS[self.properties.length_unit]/LS[output_length_unit])
self.Y = self.Y*(LS[self.properties.length_unit]/LS[output_length_unit])
self.U = self.U*(LS[self.properties.length_unit]/LS[output_length_unit])*(TS[output_time_unit]/TS[self.properties.time_unit])
self.V = self.V*(LS[self.properties.length_unit]/LS[output_length_unit])*(TS[output_time_unit]/TS[self.properties.time_unit])
self.properties.length_unit = output_length_unit
self.properties.length_scale_to_meter = LS[output_length_unit]
self.properties.time_unit = output_time_unit
self.properties.time_scale_to_seconds = TS[output_time_unit]
self.properties.velocity_units = output_length_unit+'/'+output_time_unit
class field:
def __init__(self,field_properties):
self.data = {}
self.filtered = {}
self.properties = field_properties
def __add__(self,other):
check_list = []
check_list.append(self.properties.length_unit == other.properties.length_unit)
check_list.append(self.properties.length_scale_to_meter == other.properties.length_scale_to_meter)
check_list.append(self.properties.time_unit == other.properties.time_unit)
check_list.append(self.properties.time_scale_to_seconds == other.properties.time_scale_to_seconds)
check_list.append(self.properties.velocity_units == other.properties.velocity_units)
if all(check_list):
sum_properties = self.properties
sum_properties.source = 'Sum'
sum_properties.frame = self.properties.frame + ' & ' + other.properties.frame
sum_properties.time = self.properties.time + ' & ' + other.properties.time
sum_properties.images_path = self.properties.images_path + ' & ' + other.properties.images_path
sum_field = field(sum_properties)
for xy in list(self.data.keys()):
sum_field.add_vec(self.data[xy])
for xy in list(other.data.keys()):
sum_field.add_vec(other.data[xy])
return sum_field
else:
print( 'Field properties do not match')
def add_vec(self, vector):
self.data[vector.X,vector.Y] = vector
def check_if_grid_point_exists(self,x,y):
xy = list(self.data.keys())
return (x,y) in xy
def move_to_filtered(self,vector):
self.filtered[vector.X,vector.Y] = copy.deepcopy(vector)
vector.U = np.nan
vector.V = np.nan
vector.properties.source = 'filtered'
def transfer(self,other):
for xy in list(other.data.keys()):
self.add_vec(other.data[xy])
def convert_field_units(self,output_length_unit,output_time_unit):
XY = list(self.data.keys())
if self.properties.length_unit == None or self.properties.length_unit == '':
self.properties.length_unit = str(input('field length units'))
if self.properties.length_scale_to_meter== None or self.properties.length_scale_to_meter == '':
self.length_scale_to_meter = str(input('field length units to meters'))
if self.properties.time_unit == None or self.properties.time_unit == '':
self.properties.time_unit = str(input('field time units'))
if self.properties.time_scale_to_seconds== None or self.properties.time_scale_to_seconds == '':
self.properties.time_scale_to_seconds = str(input('field time units to seconds'))
for xy in XY:
self.data[xy].properties.length_unit = self.properties.length_unit
self.data[xy].properties.length_scale_to_meter = self.properties.length_scale_to_meter
self.data[xy].properties.time_unit = self.properties.time_unit
self.data[xy].properties.time_scale_to_seconds = self.properties.time_scale_to_seconds
self.data[xy].convert_units(output_length_unit,output_time_unit)
self.add_vec(self.data[xy])
self.remove_vec(xy[0],xy[1])
XY0 = list(self.data.keys())[0]
self.properties.length_unit = self.data[XY0].properties.length_unit
self.properties.length_scale_to_meter = self.data[XY0].properties.length_scale_to_meter
self.properties.time_unit = self.data[XY0].properties.time_unit
self.properties.time_scale_to_seconds = self.data[XY0].properties.time_scale_to_seconds
self.properties.velocity_units = self.data[XY0].properties.velocity_units
def remove_vec(self,X,Y,vector=None):
if vector is not None:
del self.data[vector.X,vector.Y]
else:
del self.data[X,Y]
def return_vel(self,x,y):
u = self.data[x,y].U
v = self.data[x,y].V
return u,v
def return_n_closest_neighbors(self,x,y,n=4):
X,Y = self.return_grid()
dist = np.sqrt((X-x)**2+(Y-y)**2)
n_closest_neighbors = [ [(X[ind],Y[ind]),dist[ind]] for ind in dist.argsort()[:n]]
return n_closest_neighbors
def return_closest_neighbors_radius(self,x,y,radius):
X,Y = self.return_grid()
dist = np.sqrt((X-x)**2+(Y-y)**2)
indecies = np.where(dist<radius)
closest_neighbors = [[(X[indecies[0][i]],Y[indecies[0][i]]),dist[indecies[0][i]]] for i in range(len(indecies[0]))]
return closest_neighbors
def return_grid(self):
XY = list(self.data.keys())
X,Y = zip(*XY)
X = np.array(X)
Y = np.array(Y)
return X,Y
def return_all_velocities(self):
XY = list(self.data.keys())
U = np.array([self.data[xy[0],xy[1]].U for xy in XY])
V = np.array([self.data[xy[0],xy[1]].V for xy in XY])
return U,V
def sub_average(self):
XY = list(self.data.keys())
umean,ustd,vmean,vstd = self.mean_velocity()
for i in range(len(XY)):
self.data[XY[i]].U = self.data[XY[i]].U - umean
self.data[XY[i]].V = self.data[XY[i]].V - vmean
def create_mesh_grid(self):
X,Y = self.return_grid()
U,V = self.return_all_velocities()
X_mesh_grid = sorted(list(set(X)))
Y_mesh_grid = sorted(list(set(Y)))
X_mesh_grid,Y_mesh_grid = np.meshgrid(X_mesh_grid,Y_mesh_grid)
U_mesh_grid = np.empty(X_mesh_grid.shape)
U_mesh_grid.fill(np.nan)
V_mesh_grid = np.empty(X_mesh_grid.shape)
V_mesh_grid.fill(np.nan)
for vec_ind in range(len(X)):
x = X[vec_ind]
y = Y[vec_ind]
col = np.array(np.where(X_mesh_grid[0,:]==x))[0,0]
row = np.array(np.where(Y_mesh_grid[:,0]==y))[0,0]
U_mesh_grid[row,col] = U[vec_ind]
V_mesh_grid[row,col] = V[vec_ind]
return X_mesh_grid,Y_mesh_grid[::-1],U_mesh_grid[::-1],V_mesh_grid[::-1]
def s2n_filter(self,threshold):
XY = list(self.data.keys())
for xy in XY:
if self.data[xy].S2N < threshold:
self.move_to_filtered(self.data[xy])
def hist_filter(self,percentage):
def TrueXor(*args):
return sum(args) == 1
hist_u,hist_v,hist2d = self.velocity_histogram()
#strech boundry edges
hist_u[1][0] = hist_u[1][0]-1
hist_u[1][-1] = hist_u[1][-1]+1
hist_v[1][0] = hist_v[1][0]-1
hist_v[1][-1] = hist_v[1][-1]+1
hist2d[1][0] = hist2d[1][0]-1
hist2d[1][-1] = hist2d[1][-1]+1
hist2d[2][0] = hist2d[2][0]-1
hist2d[2][-1] = hist2d[2][-1]+1
XY = list(self.data.keys())
number_of_vectors = len(XY)
for xy in XY:
u = self.data[xy].U
v = self.data[xy].V
if np.isfinite(u) and not np.isfinite(v):
if hist_u[0][np.digitize(u,hist_u[1])-1] / number_of_vectors > percentage/100:
u_iter,v_iter = self.inverse_distance_interpolation(xy[0],xy[1])
if np.isfinite(v_iter):
self.data[xy].V = v_iter
v = v_iter
else:
self.move_to_filtered(self.data[xy])
if np.isfinite(v) and not np.isfinite(u):
if hist_v[0][np.digitize(v,hist_v[1])-1] / number_of_vectors > percentage/100:
u_iter,v_iter = self.inverse_distance_interpolation(xy[0],xy[1])
if np.isfinite(u_iter):
self.data[xy].U = u_iter
u = u_iter
else:
self.move_to_filtered(self.data[xy])
if np.isfinite(v) and np.isfinite(u):
U_histpos = np.digitize(u,hist2d[1])-1
V_histpos = np.digitize(v,hist2d[2])-1
if hist2d[0][U_histpos,V_histpos] / number_of_vectors < percentage/100:
self.move_to_filtered(self.data[xy])
def Z_filter(self,threshold,neighbors=4,power=1):
XY = list(self.data.keys())
for xy in XY:
u = self.data[xy].U
v = self.data[xy].V
closest_neighbors = self.return_n_closest_neighbors(self.data[xy].X,self.data[xy].Y,neighbors+1)[1:]
neighbor_pos , dis = zip(*closest_neighbors)
weights = [(1/d)**power for d in dis]
U,V = zip(*[self.return_vel(pos[0],pos[1]) for pos in neighbor_pos])
median_U = weighted_median(U,weights)
median_V = weighted_median(V,weights)
median_absolute_deviation_U = weighted_median([np.abs(u_neighbor - median_U) for u_neighbor in U],weights)
median_absolute_deviation_V = weighted_median([np.abs(v_neighbor - median_V) for v_neighbor in V],weights)
if 0.6745*(u - median_U) / max(median_absolute_deviation_U,0.01) > threshold:
self.move_to_filtered(self.data[xy])
continue
if 0.6745*(v - median_V) / max(median_absolute_deviation_V,0.01) > threshold:
self.move_to_filtered(self.data[xy])
continue
def max_arg_filter(self,U_bound,V_bound):
XY = list(self.data.keys())
for xy in XY:
U_check = True
V_check = True
if self.data[xy].U > U_bound[1] or self.data[xy].U < U_bound[0]:
U_check=False
if self.data[xy].V > V_bound[1] or self.data[xy].V < V_bound[0]:
V_check=False
if U_check and not V_check:
u_itr,v_itr = self.inverse_distance_interpolation(xy[0],xy[1])
self.data[xy].V = v_itr
elif V_check and not U_check:
u_itr,v_itr = self.inverse_distance_interpolation(xy[0],xy[1])
self.data[xy].U = u_itr
elif not V_check and not U_check:
self.move_to_filtered(self.data[xy])
def mean_velocity(self):
U,V = self.return_all_velocities()
return np.nanmean(U),np.nanstd(U),np.nanmean(V),np.nanstd(V)
def velocity_histogram(self,bins=10):
def remove_nans(u,v):
u = list(u)
v = list(v)
nan_index=[]
for i in range(len(u)):
if not np.isfinite(u[i]) or not np.isfinite(v[i]):
nan_index.append(i)
for index in sorted(nan_index, reverse=True):
del u[index]
del v[index]
return np.array(u),np.array(v)
U,V = self.return_all_velocities()
hist_U = np.histogram(U[np.isfinite(U)],bins)
hist_V = np.histogram(V[np.isfinite(V)],bins)
U,V = remove_nans(U,V)
hist2d = np.histogram2d(U, V, bins)
return hist_U,hist_V,hist2d
def extract_area(self,x_boundry,y_boundry):
area = field(self.properties)
X,Y = self.return_grid()
for i in range(len(X)):
if x_boundry[0]<=X[i]<=x_boundry[1] and y_boundry[0]<=Y[i]<=y_boundry[1]:
area.add_vec(self.data[X[i],Y[i]])
return area
def vel_gradients(self):
X,Y,U,V = self.create_mesh_grid()
Udx,Udy = np.gradient(U)
Vdx,Vdy = np.gradient(V)
return X,Y,Udx,Udy,Vdx,Vdy
def vel_differntial(self):
def least_square_diff(field,grid,axis=0):
if axis==0:
shape = field.shape
dif = np.zeros(shape)
for row in range(shape[0]):
for col in range(2,shape[1]-2):
rs = 2*field[row,col+2]+field[row,col+1]
ls = -field[row,col-1]-2*field[row,col-2]
dis = 10*(grid[row,col+1]-grid[row,col])
dif[row,col] = (rs+ls)/dis
#dif[row,col] = (2*field[row,col+2]+field[row,col+1]-field[row,col-1]-2*field[row,col-2])/10*(grid[row,col+1]-grid[row,col])
return dif
elif axis==1:
shape = field.shape
dif = np.zeros(shape)
for row in range(2,shape[0]-2):
for col in range(shape[1]):
us = 2*field[row-2,col]+field[row-1,col]
ds = -field[row+1,col]-2*field[row+2,col]
dis = 10*(grid[row-1,col]-grid[row,col])
dif[row,col] = (us+ds)/dis
#dif[row,col] = (2*field[row-2,col]+field[row-1,col]-field[row+1,col]-2*field[row+2,col])/10*(grid[row-1,col]-grid[row,col])
return dif
X,Y,U,V = self.create_mesh_grid()
dU_x = least_square_diff(U,X)
dU_y = least_square_diff(U,Y,axis=1)
dV_x = least_square_diff(V,X)
dV_y = least_square_diff(V,Y,axis=1)
return dU_x,dU_y,dV_x,dV_y
def profile(self,axis='y'):
X,Y,U,V = self.create_mesh_grid()
if axis=='y' or axis=='Y':
U_profile = np.nanmean(U,axis=1)[::-1]
V_profile = np.nanmean(V,axis=1)[::-1]
Y_profile = Y[:,0]
return U_profile,V_profile,Y_profile
else:
U_profile = np.nanmean(U,axis=0)[::-1]
V_profile = np.nanmean(V,axis=0)[::-1]
X_profile = X[0,:]
return U_profile,V_profile,X_profile
def vorticity_field(self):
dU_x,dU_y,dV_x,dV_y = self.vel_differntial()
vort = dV_x-dU_y
return vort[2:-2,2:-2]
def inverse_distance_interpolation(self,x,y,number_of_neighbors=5,radius=None,inverse_power=2):
def weigted_velocity(neighbors_vels,weights):
weight_sum=0
weigted_vels=[]
for i in range(len(neighbors_vels)):
if not np.isnan(neighbors_vels[i]):
weight_sum += weights[i]
weigted_vels.append(weights[i]*neighbors_vels[i])
return np.nansum(weigted_vels)/weight_sum
if self.check_if_grid_point_exists(x,y):
if radius is not None:
indecies,distances = zip(*self.return_closest_neighbors_radius(x,y,radius))
else:
indecies,distances = zip(*self.return_n_closest_neighbors(x,y,n=number_of_neighbors+1))
weights = list(np.array(distances[1:])**-float(inverse_power))
neighbors_vel = [self.return_vel(ind[0],ind[1]) for ind in indecies[1:]]
u_vels,v_vels = zip(*neighbors_vel)
inter_u = weigted_velocity(u_vels,weights)
inter_v = weigted_velocity(v_vels,weights)
return inter_u,inter_v
else:
if radius is not None:
indecies,distances = zip(*self.return_closest_neighbors_radius(x,y,radius))
else:
indecies,distances = zip(*self.return_n_closest_neighbors(x,y,n=number_of_neighbors))
weights = list(np.array(distances)**-float(inverse_power))
neighbors_vel = [self.return_vel(ind[0],ind[1]) for ind in indecies]
u_vels,v_vels = zip(*neighbors_vel)
inter_u = weigted_velocity(u_vels,weights)
inter_v = weigted_velocity(v_vels,weights)
return inter_u,inter_v
def interpf(self):
X,Y = self.return_grid()
for ind in range(X.shape[0]):
pos = (X[ind],Y[ind])
u_cur,v_cur = self.return_vel(pos[0],pos[1])
if np.isnan(u_cur) and | np.isnan(v_cur) | numpy.isnan |
Subsets and Splits
No community queries yet
The top public SQL queries from the community will appear here once available.