prompt
stringlengths 19
879k
| completion
stringlengths 3
53.8k
| api
stringlengths 8
59
|
---|---|---|
# -*- coding: utf-8 -*-
"""
Created on Mon Apr 13 13:23:05 2020
@author: kvstr
"""
import numpy as np
import scipy.sparse as sparse
from scipy.sparse import linalg
from scipy.linalg import solve_banded
from scipy.interpolate import griddata
import time
from numba import njit
from numba import prange
import matplotlib.pyplot as plt
import vtk
from vtk.util.numpy_support import vtk_to_numpy
from skimage.measure import block_reduce
# %% Continuity
# @njit(parallel=True)
def Continuity(u, v, x, y):
"""
Calculation of the continuity error in a 2D flow field
Parameters
----------
u: MxN Array
u-velocity matrix
v: MxN Array
v-velocity matrix
x: Nx1 vector
x-coordinates of points
y: Mx1 vector
y-coordinates of points
Returns
-------
error : NxM array
Continuity error on each grid point
"""
if not u.shape == v.shape:
print('Fields have different sizes')
return None
else:
error = abs(np.divide((u[:-1, 1:] - u[:-1, :-1]),\
np.gradient(x, axis=1)[:-1, :-1])\
+np.divide(v[1:, :-1] - v[:-1, :-1],\
np.gradient(y, axis=0)[:-1, :-1]))
error = np.pad(error, ((0, 1),), constant_values=0)
return error
# %% Momentum
# @njit # (parallel=True)
def Momentum(vort, u, v, dx, dy):
"""
Calculation of the momentum error in a 2D flow field
Parameters
----------
vort : NxM array
Vorticity value on each grid point.
u : NxM array
u-velocity value on each grid point.
v : NxM array
v-velocity value on each grid point.
dx : Mx1 Array
x-distance cell centre-face.
dy : Nx1
y-distance cell centre-face.
Returns
-------
error: NxM array
Momentum error on each grid point.
"""
nu = 1.05e-6
if not (np.shape(vort) == np.shape(u) and np.shape(vort) == np.shape(v)):
print('Momentum: Shape mismatch')
return None
else:
# Vorticity Gradient x
vortx = np.zeros_like(vort)
vortxx = np.zeros_like(vortx)
vortx[:, -1] = np.divide(vort[:, -1]-vort[:, -2], dx[-1]+dx[-2])
vortx[:, 0] = np.divide(vort[:, 1]-vort[:, 0], dx[1]+dx[0])
for i in range(1, vort.shape[1]-1):
vortx[:, i] = (np.divide(vort[:, i+1]*dx[i] - vort[:, i]*dx[i+1],
dx[i]+dx[i+1])
-np.divide(vort[:, i]*dx[i-1] - vort[:, i-1]*dx[i],
dx[i]+dx[i-1])) / 2*dx[i]
vortxx[:, -1] = np.divide(vortx[:, -1]-vortx[:, -2], dx[-1]+dx[-2])
vortxx[:, 0] = np.divide(vortx[:, 1]-vortx[:, 0], dx[0]+dx[1])
for i in range(1, vortx.shape[1]-1):
vortxx[:, i] = (np.divide(vortx[:, i+1]*dx[i] - vortx[:, i]*dx[i+1],
dx[i]+dx[i+1])
-np.divide(vortx[:, i]*dx[i-1] - vortx[:, i-1]*dx[i],
dx[i]+dx[i-1])) / 2*dx[i]
# Vorticity Gradient y
vorty = np.zeros_like(vort)
vortyy = np.zeros_like(vortx)
vorty[-1, :] = np.divide(vort[-1, :]-vort[-2, :], dy[-1]+dy[-2])
vorty[0, :] = np.divide(vort[1, :]-vort[0, :], dy[0]+dy[1])
for i in range(1, vort.shape[0]-1):
vorty[i, :] = (np.divide(vort[i+1, :]*dy[i] - vort[i, :]*dy[i+1],
dy[i]+dy[i+1])
-np.divide(vort[i, :]*dy[i-1] - vort[i-1, :]*dy[i],
dy[i]+dy[i-1])) / 2*dy[i]
vortyy[-1, :] = np.divide(vorty[-1, :]-vorty[-2, :], dy[-1]+dy[-2])
vortyy[0, :] = np.divide(vorty[1, :]-vorty[0, :], dy[0]+dy[1])
for i in range(1, vorty.shape[0]-1):
vortyy[i, :] = (np.divide(vorty[i+1, :]*dy[i] - vorty[i, :]*dy[i+1],
dy[i]+dy[i+1])
-np.divide(vorty[i, :]*dy[i-1] - vorty[i-1, :]*dy[i],
dy[i]+dy[i-1])) / 2*dy[i]
t1 = np.multiply(u, vortx)
t2 = np.multiply(v, vorty)
t3 = nu * (vortxx+vortyy)
error = abs(np.subtract(t1+t2, t3))
return error
# %% CellSizes
def CellSizes(x, y):
"""
Calculates the distance from cell centre to cell face in either direction
Parameters
----------
x : Mx1 Array
x-Coordinates of cell centers.
y : Nx1 Array
y-Coordinates of cell centers.
Returns
-------
dx : Mx1 Array
x-distance cell centre-face.
dy : Nx1
y-distance cell centre-face.
"""
# Calcuating Cell sizes x-direction
first = np.where(np.gradient(x) == 1)[0][0]
last = np.where(np.gradient(x) == 1)[0][-1]
dx = np.ones_like(x)*.5
for i in np.linspace(first-1, 0, first, dtype=int):
dx[i] = x[i+1] - x[i] - dx[i+1]
for i in range(last, x.shape[0]):
dx[i] = x[i] - x[i-1] - dx[i-1]
# Calculating cell sizes in y-direction
first = np.where(np.gradient(y) == 1)[0][0]
last = np.where(np.gradient(y) == 1)[0][-1]
dy = np.ones_like(y)*.5
for i in np.linspace(first-1, 0, first, dtype=int):
dy[i] = y[i+1] - y[i] - dy[i+1]
for i in range(last, y.shape[0]):
dy[i] = y[i] - y[i-1] -dy[i-1]
return dx, dy
# %% Vorticity
def Vorticity(u, v, dx, dy):
"""
Calculates the Vorticity from velocity Components and Cell sizes
Parameters
----------
u : NxM Array
u-velocity at each grid point.
v : NxM Array
v-velocity at each grid point.
dx : Mx1 Array
Half cell sizes in x-direction.
dy : Nx1 Array
Half cell sizes in y-direction.
Returns
-------
vort : NxM Array
Vorticity at each grid point.
"""
# Gradient v-velocity
dvdx = np.zeros_like(v)
dvdx[:, 0] = np.divide(v[:, 1] - v[:, 0], dx[0]+dx[1])
dvdx[:, -1] = | np.divide(v[:, -1]-v[:, -2], dx[-1]+dx[-2]) | numpy.divide |
#!/usr/bin/env python3
"""Example 6.2, page 125"""
import copy
import multiprocessing as mp
import numpy as np
import matplotlib.pyplot as plt
# Create graph: vertices are states, edges are actions (transitions)
STATE_ACTIONS = {'left': ('left', 'left'),
'a': ('left', 'b'),
'b': ('a', 'c'),
'c': ('b', 'd'),
'd': ('c', 'e'),
'e': ('d', 'right'),
'right': ('right', 'right')}
# List of states
STATES = list(STATE_ACTIONS.keys())
TERMINALS = 'left', 'right'
# Transition probabilities
PROBABILITIES = np.full((len(STATES), 2), [0.5, 0.5])
# State values (probability to reach 'Right' state)
INIT_VALUES = np.full(len(STATES), 0.5)
np.put(INIT_VALUES, [0, -1], 0)
TRUE_VALUES = np.arange(1, 6) / 6
# Reward for each action
REWARDS = np.zeros((len(STATES), 2), dtype=int)
REWARDS[5, 1] = 1
class RandomWalk:
"""Represents Markov reward process defined by arbitrary graph"""
def __init__(self, graph, values, probabilities, rewards, terminals):
"""Map states to numebers"""
state_names = list(graph.keys())
state_to_index = dict([(state, idx) for idx, state in enumerate(state_names)])
# left, a, b, c, d, e, right -> 0, 1, 2, 3, 4, 5, 6
self.states = [state_to_index[state] for state in state_names]
self.terminals = [state_to_index[state] for state in state_names if state in terminals]
# (left, b), ... -> [0, 2], ...
self.actions = list()
for actions in graph.values():
action_idxs = [state_to_index[state] for state in actions]
self.actions.append(action_idxs)
self.values = copy.copy(values)
self.probabilities = probabilities
self.rewards = rewards
def get_true_values(self):
true_values = copy.copy(INIT_VALUES)
updated_values = np.empty(len(self.states))
while sum(abs(true_values - updated_values)) > 1e-5:
for state in self.states[1: -1]:
true_values[state] = updated_values[state]
next_values = np.array([updated_values[self.actions[state][0]], updated_values[self.actions[state][1]]])
updated_values[state] = sum(self.probabilities[state] * (next_values + self.rewards[state]))
return true_values
def step(self, state):
"""Single step of the Markov reward process"""
# Choose next state index
next_state_idxs = range(len(self.actions[state]))
next_state_idx = np.random.choice(next_state_idxs, p=self.probabilities[state])
# Get next state and reward
next_state = self.actions[state][next_state_idx]
reward = self.rewards[state][next_state_idx]
return next_state, reward
def generate_episode(self, state=3):
"""Generates sequences of states and rewards, default starting state is C.
Returns pairs (S_0, R_1), (S_1, R_2), ... . Terminal state is omitted"""
state_sequence = list()
reward_sequence = list()
while state not in self.terminals:
state_sequence.append(state)
state, reward = self.step(state)
reward_sequence.append(reward)
return state_sequence, reward_sequence
def mc_episode_estimate(self, state=3, alpha=0.1):
"""Estimate single episode" with Monte-Carlo method"""
state_sequence, reward_sequence = self.generate_episode(state)
return_sequence = np.cumsum(reward_sequence[::-1])[::-1]
for state, _return in zip(state_sequence, return_sequence):
self.values[state] += alpha * (_return - self.values[state])
return self.values
def td_episode_estimate(self, state=3, alpha=0.1):
"""Estimate single episode" with temporal-difference method"""
while state not in self.terminals:
next_state, reward = self.step(state)
self.values[state] += alpha * (reward + self.values[next_state] - self.values[state])
state = next_state
return self.values
@staticmethod
def mc_batch_episode_increment(state_seq, reward_seq, values, value_increments):
return_sequence = | np.cumsum(reward_seq[::-1]) | numpy.cumsum |
# --------------
# Importing header files
import numpy as np
import warnings
warnings.filterwarnings('ignore')
#New record
new_record=[[50, 9, 4, 1, 0, 0, 40, 0]]
new_records = np.array(new_record)
#Reading file
data = np.genfromtxt(path, delimiter=",", skip_header=1)
#Code starts here
census = np.concatenate((data, new_records))
age = census[:,0]
max_age = np.max(age)
min_age = np.min(age)
age_mean = round(np.mean(age),2)
age_std = round(np.std(age),2)
print(max_age)
print(min_age)
print(age_mean)
print(age_std)
race0,race1,race2,race3,race4 = [], [], [], [], []
for i in census:
if(i[2] == 0):
race0.append(i)
elif(i[2] == 1):
race1.append(i)
elif(i[2] == 2):
race2.append(i)
elif(i[2] == 3):
race3.append(i)
elif(i[2] == 4):
race4.append(i)
race_0 = np.array(race0)
race_1 = np.array(race1)
race_2 = | np.array(race2) | numpy.array |
'''
Testing of the zoo
'''
import pytest
import numpy as np
np.random.seed(100)
from freelunch.zoo import animal, particle, krill
from freelunch.util import BadObjectiveFunctionScores, InvalidSolutionUpdate
animals = [particle, krill]
def test_animal():
location_1 = np.array([1,1,1])
fitness_1 = 2
location_2 = np.array([0,0,0])
fitness_2 = 0
location_3 = np.array([2,2,2])
fitness_3 = 10
friend = animal(dna=location_1, fitness=fitness_1)
assert(np.all(friend.dna == location_1))
assert(friend.fitness == 2)
assert(np.all(friend.best_pos == location_1))
assert(friend.best == 2)
friend.move(location_2, fitness_2)
assert(np.all(friend.dna == location_2))
assert(friend.fitness == 0)
assert(np.all(friend.best_pos == location_2))
assert(friend.best == 0)
friend.move(location_3, fitness_3)
assert(np.all(friend.dna == location_3))
assert(friend.fitness == 10)
assert(np.all(friend.best_pos == location_2))
assert(friend.best == 0)
with pytest.raises(ValueError):
friend.move(location_3, np.inf)
with pytest.raises(ValueError):
friend.move(location_3, np.nan)
with pytest.raises(ValueError):
friend.move(location_3, [])
with pytest.raises(InvalidSolutionUpdate):
friend.move(np.array([np.inf,1,1]), 1)
with pytest.raises(InvalidSolutionUpdate):
friend.move(np.array([np.nan,1,1]), 1)
with pytest.raises(InvalidSolutionUpdate):
friend.move(np.array([1+2j,1,1]), 1)
friend = animal(dna=location_1, fitness=fitness_1)
friend2 = animal(dna=location_2, fitness=fitness_2)
assert(friend2 < friend)
assert(friend > friend2)
friend2._fitness = None # Or will throw error
assert(friend < friend2)
assert(not (friend2 < friend))
assert(friend2 > friend)
assert(not (friend > friend2))
friend._fitness = None # Or will throw error
with pytest.raises(BadObjectiveFunctionScores):
friend < friend2
with pytest.raises(BadObjectiveFunctionScores):
friend > friend2
@pytest.mark.parametrize('creature', animals)
def test_particle(creature):
location_1 = np.array([1,1,1])
vel = np.random.randn(1,3)
fitness_1 = 2
location_2 = np.array([0,0,0])
fitness_2 = 0
location_3 = np.array([2,2,2])
fitness_3 = 10
friend = creature(pos=location_1, vel=vel, fitness=fitness_1)
assert(np.all(friend.dna == location_1))
assert(friend.fitness == 2)
assert(np.all(friend.best_pos == location_1))
assert(friend.best == 2)
friend.move(location_2, vel, fitness_2)
assert(np.all(friend.dna == location_2))
assert(friend.fitness == 0)
assert(np.all(friend.best_pos == location_2))
assert(friend.best == 0)
friend.move(location_3, vel, fitness_3)
assert(np.all(friend.dna == location_3))
assert(friend.fitness == 10)
assert(np.all(friend.best_pos == location_2))
assert(friend.best == 0)
with pytest.raises(ValueError):
friend.move(location_3, vel, np.inf)
with pytest.raises(ValueError):
friend.move(location_3, vel, np.nan)
with pytest.raises(ValueError):
friend.move(location_3,vel, [])
with pytest.raises(InvalidSolutionUpdate):
friend.move(np.array([np.inf,1,1]), vel, 1)
with pytest.raises(InvalidSolutionUpdate):
friend.move( | np.array([np.nan,1,1]) | numpy.array |
#-------------------------------------------
#
# FILENAME: IFI_compare_RadIA_PIREP.py
#
# CREATED: 12.15.2021 - dserke
#
# PURPOSE: 1) ingest matched RadIA and PIREPs csv file, 2) manipulate and plot the data
#
#-------------------------------------------
#-------------------------------------------
# IMPORT LIBRARIES
#-------------------------------------------
import pandas as pd
import geopandas as gpd
import numpy as np
from numpy import *
import csv
import wradlib as wrl
import matplotlib as mpl
from matplotlib import pyplot as plt
from matplotlib.colors import ListedColormap
from mpl_toolkits.axes_grid1 import make_axes_locatable
import warnings
warnings.filterwarnings('ignore')
#-------------------------------------------
# DEFINE INPUT PATHS
#-------------------------------------------
# ... define raduis (r) of earth in km
r_km = 6378.1
ft_TO_m = 0.3048
nbins = 1832.0
range_res_m = 250.0
bw_deg = 1.0 # half power beam width (deg)
vol_deg = [0.5, 1.5, 2.5, 3.5, 4.5]
lat_KBBX = 39.4969580
lon_KBBX = -121.6316557
alt_KBBX_m = 221.0 * ft_TO_m
sitecoords = (lon_KBBX, lat_KBBX, alt_KBBX_m)
# ... define base path dir
base_path_dir = '/d1/serke/projects/'
# ... paths to Rv3 INTs and PIRP csv data files
Rv3PIR_dir = base_path_dir+'RADIA_FAA/data/RadIAv3PIREPs/'
# ... names of Rv3 INTs and PIRP csv data files
# ... NOTE: currently, these files just represent ICICLE F17
Rv3PIR_FZDZ_name = 'exfout_MrmsPostProcessor_fzdz_interest.csv'
Rv3PIR_SSLW_name = 'exfout_MrmsPostProcessor_slw_interest.csv'
Rv3PIR_PIRP_name = 'exmatch_MrmsPostProcessor.csv'
# ... path to NEXRAD site location csv
nexrad_sites_dir = base_path_dir+'case_studies/SNOWIE_2017/data/RadIA_data/nexrad_site_data/'
nexrad_sites_name = 'nexrad_site_whdr.csv'
#-------------------------------------------
# LOAD INPUT DATASETS
#-------------------------------------------
# ... radar data into radar object
Rv3PIR_FZDZ = pd.read_csv(Rv3PIR_dir+Rv3PIR_FZDZ_name, header=0, index_col=0)
Rv3PIR_SSLW = pd.read_csv(Rv3PIR_dir+Rv3PIR_SSLW_name, header=0, index_col=0)
Rv3PIR_PIRP = pd.read_csv(Rv3PIR_dir+Rv3PIR_PIRP_name, header=0, index_col=0)
# ... radar site location dataset
nexrad_sites = pd.read_csv(nexrad_sites_dir+nexrad_sites_name, header=0, index_col=1)
# ... low res countries dataset
countries = gpd.read_file(gpd.datasets.get_path("naturalearth_lowres"))
#-------------------------------------------
# MANIPULATE INPUT DATA
#-------------------------------------------
# Data from full month Feb2019 ICICLE have a few missing RadIA matchups (rows)
# ... find missing integers in RadIA FZDZ/SSLW lists
def find_missing(input):
return [x for x in range(input[0], input[-1]+1)
if x not in input]
missing_SSLW_inds = find_missing(Rv3PIR_SSLW.index)
missing_FZDZ_inds = find_missing(Rv3PIR_FZDZ.index)
# ... exclude the inds missing from FZDZ/SSLW dfs from the PIRP df
Rv3PIR_PIRP.drop(Rv3PIR_PIRP.index[[missing_SSLW_inds]], inplace=True)
# ... exclude ind 0 from the PIRP df
#Rv3PIR_PIRP.drop(Rv3PIR_PIRP.index[[0]], inplace=True)
Rv3PIR_FZDZ.index = Rv3PIR_FZDZ.index-1
Rv3PIR_SSLW.index = Rv3PIR_SSLW.index-1
# ... define function for distance between two lat/lon points
def haversine_distance(lat1, lon1, lat2, lon2):
phi1 = np.radians(lat1)
phi2 = np.radians(lat2)
delta_phi = np.radians(lat2 - lat1)
delta_lambda = np.radians(lon2 - lon1)
a = np.sin(delta_phi / 2)**2 + np.cos(phi1) * np.cos(phi2) * np.sin(delta_lambda / 2)**2
res = r_km * (2 * np.arctan2(np.sqrt(a), np.sqrt(1 - a)))
return np.round(res, 2)
# ... calculate distance between Rv3PIR_PIRP lon/lat and nexrad_sites LAT_DEG/LON_DEG
nexrad_distfromPIRPmin_km = []
for index_PIRP, row_PIRP in enumerate(range(Rv3PIR_PIRP.shape[0])):
dist_from_nexrads_km = []
for index, row in enumerate(range(nexrad_sites.shape[0])):
dist_from_nexrads_km.append(haversine_distance(Rv3PIR_PIRP[' lat'][index_PIRP], Rv3PIR_PIRP[' lon'][index_PIRP], nexrad_sites[' LAT_DEG'][index], nexrad_sites[' LON_DEG'][index]))
#print(index, dist_from_nexrads_km[index])
# ... add DistFromPIRP to sites df
nexrad_sites['DistFromPIRP'] = dist_from_nexrads_km
# ... find min dist of PIRP from all sites and save to list
nexrad_distfromPIRPmin_km.append(nexrad_sites['DistFromPIRP'].min())
# ... concat closest nexrad site dist to PIRP to Rv3PIR_PIRP df
Rv3PIR_PIRP['Distfromnexrad_min_km'] = nexrad_distfromPIRPmin_km
# ... concatenate Rv3 algo INT outputs and PIRP input pandas dfs into one df
Rv3PIR_ALL = pd.concat([Rv3PIR_FZDZ, Rv3PIR_SSLW, Rv3PIR_PIRP], axis=1)
#Rv3PIR_MAXint = Rv3PIR_ALL[[' fzdz_interestmax', ' slw_interestmax']]
# ... create new Rv3/PIRP pandas df containing only Rv3 INT=NaN values
Rv3PIR_RNAN = Rv3PIR_ALL.loc[ (Rv3PIR_ALL[' fzdz_interestmax'].astype(np.float).isna()) & (Rv3PIR_ALL[' slw_interestmax'].astype(np.float).isna()) ]
# ... create new Rv3/PIRP pandas df containing only (Rv3 INT=NaN & PIRP sev > 0) values
Rv3PIR_RNAN_Sg0 = Rv3PIR_ALL.loc[ (Rv3PIR_ALL[' fzdz_interestmax'].astype(np.float).isna()) & (Rv3PIR_ALL[' slw_interestmax'].astype(np.float).isna()) & (Rv3PIR_ALL[' iint1'] > 0) ]
# ... create new Rv3/PIRP pandas df containing only (Rv3 INT=NaN & PIRP sev > 0) values
Rv3PIR_RVAL_Sg0 = Rv3PIR_ALL.loc[ ~(Rv3PIR_ALL[' fzdz_interestmax'].astype(np.float).isna()) & ~(Rv3PIR_ALL[' slw_interestmax'].astype(np.float).isna()) & (Rv3PIR_ALL[' iint1'] > 0) ]
# ... indexing/filtering of dataframe values
PIRP_tot_num = np.array(Rv3PIR_ALL[' iint1'])[np.array(Rv3PIR_ALL[' iint1'])].shape[0]
PIRP_pos_num = np.array(Rv3PIR_ALL[' iint1'])[np.array(Rv3PIR_ALL[' iint1']) > 0.0].shape[0]
PIRP_neg_num = np.array(Rv3PIR_ALL[' iint1'])[np.array(Rv3PIR_ALL[' iint1']) < 0.0].shape[0]
SSLW_pos_num = (np.array(Rv3PIR_ALL[' iint1'])[(Rv3PIR_ALL[' slw_interestmax']).astype(np.float) >= 0.5]).shape[0]
SSLW_neg_num = (np.array(Rv3PIR_ALL[' iint1'])[(Rv3PIR_ALL[' slw_interestmax']).astype(np.float) < 0.5]).shape[0]
SSLW_neg = (np.array(Rv3PIR_ALL[' iint1'])[(Rv3PIR_ALL[' slw_interestmax']).astype(np.float) < 0.5])
FZDZ = (np.array(Rv3PIR_ALL[' iint1'])[(Rv3PIR_ALL[' fzdz_interestmax']).astype(np.float) >= 0.0])
FZDZ_pos_num = (np.array(Rv3PIR_ALL[' iint1'])[(Rv3PIR_ALL[' fzdz_interestmax']).astype(np.float) >= 0.5]).shape[0]
FZDZ_neg_num = (np.array(Rv3PIR_ALL[' iint1'])[(Rv3PIR_ALL[' fzdz_interestmax']).astype(np.float) < 0.5]).shape[0]
FZDZ_neg = [np.array(Rv3PIR_ALL[' iint1'])[(Rv3PIR_ALL[' fzdz_interestmax']).astype(np.float) < 0.5]]
Rv3_pos_ind = [(Rv3PIR_ALL[' fzdz_interestmax']).astype(np.float) >= 0.5] or [(Rv3PIR_ALL[' slw_interestmax']).astype(np.float) >= 0.5]
Rv3_neg_ind = [(Rv3PIR_ALL[' fzdz_interestmax']).astype(np.float) < 0.5] and [(Rv3PIR_ALL[' slw_interestmax']).astype(np.float) < 0.5]
Rv3_pos_num = sum(sum(Rv3_pos_ind))
Rv3_neg_num = sum(sum(Rv3_neg_ind))
ranges = np.arange(nbins) * range_res_m
#-------------------------------------------
# PLOTS
#-------------------------------------------
#def mscatter(x,y,ax=None, m=None, **kw):
# import matplotlib.markers as mmarkers
# if not ax: ax=plt.gca()
# sc = ax.scatter(x,y,**kw)
# if (m is not None) and (len(m)==len(x)):
# paths = []
# for marker in m:
# if isinstance(marker, mmarkers.MarkerStyle):
# marker_obj = marker
# else:
# marker_obj = mmarkers.MarkerStyle(marker)
# path = marker_obj.get_path().transformed(
# marker_obj.get_transform())
# paths.append(path)
# sc.set_paths(paths)
# return sc
cMap = 'viridis'
# SCATTER PLOTS
# ... for R-v3 FZDZ/SSLW ints with PIREP sev color-coded points
fig, ax = plt.subplots(figsize = (15, 15))
m = ['*','o','o','o','o','o','o','o','o','o']
#scatter = mscatter(np.array(Rv3PIR_ALL[' fzdz_interestmax']).astype(np.float), np.array(Rv3PIR_ALL[' slw_interestmax']).astype(np.float), c=np.array(Rv3PIR_ALL[' iint1']).astype(np.float), s=75, m=m, ax=ax)
#plt.show()
ax.scatter(np.array(Rv3PIR_ALL[' fzdz_interestmax']).astype(np.float), np.array(Rv3PIR_ALL[' slw_interestmax']).astype(np.float), c=np.array(Rv3PIR_ALL[' iint1']).astype(np.float), cmap=cMap, vmin=-1, vmax=8, s=75, marker='o')
ax.grid(color='grey', linestyle='--', linewidth=1)
ax.set_title('RadIA-v3 INT(MAX) near PIREPs for ICICLE F17', fontsize=20)
ax.text(0.02, 0.61, 'N(P-TOT) = '+str(PIRP_tot_num), c='black', fontsize = 20)
ax.text(0.02, 0.56, 'N(P-POS) = '+str(PIRP_pos_num), c='green', fontsize = 20)
ax.text(0.07, 0.51, 'N(R-G50) = '+str(sum(sum(Rv3_pos_ind))), c='green', fontsize = 20)
ax.text(0.02, 0.46, 'N(P-NEG) = '+str(PIRP_neg_num), c='red', fontsize = 20)
ax.text(0.07, 0.41, 'N(R-L50) = '+str(sum(sum(Rv3_neg_ind))), c='red', fontsize = 20)
ax.set_xlabel('FZDZ INT', fontsize = 16)
ax.set_ylabel('SSLW INT', fontsize = 16)
ax.plot([0.0, 0.5], [0.5, 0.5], 'r--', label='test')
ax.plot([0.5, 0.5], [0.5, 0.0], 'r--', label='test')
plt.xlim(0.0, 1.02)
plt.ylim(0.0, 1.02)
plt.show()
# ... for R-v3 SSLW ints vs PIREP sev
m, b = np. polyfit(np.array(Rv3PIR_RVAL_Sg0[' slw_interestmax']).astype(np.float), np.array(Rv3PIR_RVAL_Sg0[' iint1']).astype(np.float), 1)
fig, ax = plt.subplots(figsize = (15, 15))
ax.scatter(np.array(Rv3PIR_RVAL_Sg0[' slw_interestmax']).astype(np.float), np.array(Rv3PIR_RVAL_Sg0[' iint1']).astype(np.float), c='grey', cmap=cMap, vmin=-1, vmax=8, s=75, marker='o')
ax.plot(np.array(Rv3PIR_RVAL_Sg0[' slw_interestmax']).astype(np.float), m*np.array(Rv3PIR_RVAL_Sg0[' slw_interestmax']).astype(np.float) + b)
ax.grid(color='grey', linestyle='--', linewidth=1)
plt.show()
# ... for R-v3 FZDZ ints vs PIREP sev
m, b = np. polyfit(np.array(Rv3PIR_RVAL_Sg0[' fzdz_interestmax']).astype(np.float), np.array(Rv3PIR_RVAL_Sg0[' iint1']).astype(np.float), 1)
fig, ax = plt.subplots(figsize = (15, 15))
ax.scatter(np.array(Rv3PIR_RVAL_Sg0[' fzdz_interestmax']).astype(np.float), np.array(Rv3PIR_RVAL_Sg0[' iint1']).astype(np.float), c='grey', cmap=cMap, vmin=-1, vmax=8, s=75, marker='o')
ax.plot(np.array(Rv3PIR_RVAL_Sg0[' slw_interestmax']).astype(np.float), m*np.array(Rv3PIR_RVAL_Sg0[' slw_interestmax']).astype(np.float) + b)
ax.grid(color='grey', linestyle='--', linewidth=1)
plt.show()
## SCATTER PLOTS
## ...for R-v3 FZDZ ints versus PIREP reporting height with PIREP sev color-coded points
#fig, ax = plt.subplots(figsize = (15, 15))
#ax.scatter(np.array(Rv3PIR_ALL[' fzdz_interestmax']).astype(np.float), Rv3PIR_ALL[' flvl'], c=np.array(Rv3PIR_ALL[' iint1']).astype(np.float), cmap=cMap, vmin=-1, vmax=8, s=75, marker='o')
#ax.set_xlabel('FZDZ INT', fontsize = 16)
#ax.set_ylabel('F-lvl [kft]', fontsize = 16)
#plt.grid(b=True, alpha=0.5)
#plt.show()
# 3 PLOTS: RANGE/ALT FOR 1) CLEAR AIR VCP VOLUME, 2) PIREP SEV WHEN Rv3=NaN, 3) PIREP SEV WHEN Rv3=VAL
wrl.vis.plot_scan_strategy(ranges, vol_deg, sitecoords, beamwidth=bw_deg, vert_res=1000., maxalt=15000., range_res=5000., maxrange=200000., units='km', cmap='viridis')
fig, ax = plt.subplots(figsize = (16, 8))
plt.scatter( | np.array(Rv3PIR_RNAN_Sg0['Distfromnexrad_min_km']) | numpy.array |
import matplotlib.pyplot as plt
import numpy as np
import scipy.optimize
def plotdata(f1,f2,line=False,killme=False):
x1,y1,x2,y2=[],[],[],[]
for i in range(m):
if y[i]:x1.append(f1[i]),y1.append(f2[i])
else:x2.append(f1[i]),y2.append(f2[i])
plt.plot(x1, y1, 'rx')
plt.plot(x2, y2, 'bo')
plt.ylabel('exame 1')
plt.xlabel('exame 2')
plt.xticks(np.arange(min(f1), max(f1)+1, 50))
plt.yticks(np.arange(min(f2), max(f2)+1, 50))
plt.legend(['ex1','ex2'])
if line:
l1 = np.array([min(f1),max(f1)])
l2=(-1./theta[2])*(theta[1]*l1 + theta[0])
plt.plot(l1, l2, '-g')
if killme: # haha fix this
u = np.linspace(-1, 1.5, 50)
v = np.linspace(-1, 1.5, 50)
z=np.zeros((len(u),len(v)))
for i in range(1,len(u)+1):
for j in range(1,len(v)+1):
z[i,j]=mapFeature(u[i],v[j])*theta
z=z.transpose()
plt.contour(u, v, z, [0, 0], 'LineWidth', 2)
plt.show()
def sigmoid(z):
return 1/(1+ | np.exp(-z) | numpy.exp |
import pandas as pd
import numpy as np
import os
# Generate the risk distribution parameters from the risk_distribution.py script
from risk_distribution import *
# Import parameters from parameters.py script
from parameters import *
# Set path for saving dataframes
base_path = '...'
sims = 10000
# Functions to return probabilistic variables in suitable format
def gamma(alpha, beta):
alpha = np.array([alpha] * sims)
beta = np.array([beta] * sims)
samples = np.random.gamma(alpha, beta)
return samples
def gamma_specified(min, multiplier, alpha, beta):
min = np.array([min] * sims).T
alpha = np.array([alpha] * sims)
beta = | np.array([beta] * sims) | numpy.array |
"""
mle.py
This code is part of Optimization of Hardware Parameters on a Real-Time Sound
Localization System paper
It contains some support functions for pso.py
Authors:
<NAME>
<NAME>
<NAME>
<NAME>
"""
import numpy as np
from itertools import combinations as comb
propSpeed = 340.29 #propagation speed of signal
def dist (Pi, Pj) :
"""
Calculate the distance of the arrays Pi and Pj
Pi, Pj : 1D or 2D ndarrays , each line is a coordinate vector
return 2D ndarray with the distance/distances between each line of Pi
and Pj
Broadcasting allowed
"""
i = 1 if (Pi.ndim == 2 or Pj.ndim == 2) else None
diff = Pi - Pj
square = diff**2
dist = np.sqrt(np.sum(square, axis=i, keepdims=True))
dist = dist.reshape((dist.shape[0], 1)) if (i == None) else dist
return dist
def arrayMatrix(array):
""" Generates matrix that is used in MLE calculus for some array
array: row 2D ndarray with the coordinates of all sensors
[xi yi zi] is the coordinates of i-th sensor, i in [1..M]
1th sensor is the reference
array = [[x1 ... xM]
[y1 ... yM]
[z1 ... zM]].T
return matrix M
M = -pseudoInverse([[(xM-x1) ... (xM-x1)]
[(yM-y1) ... (yM-y1)]
[(zM-z1) ... (yM-y1)]].T)
"""
M = array[1:] - array[0]
M = -np.linalg.pinv(M)
return M
def mleMatrices (tdoa, array, M):
""" Generates matrices for MLE (maximum likelihood estimation) calculation
tdoa : column 2D ndarray with the TDOA from some reference sensor
array : row 2D ndarray with the coordinates of all sensors
[xi yi zi] is the coordinates of i-th sensor, i in [1..M]
1th sensor is the reference
array = [[x1 ... xM]
[y1 ... yM]
[z1 ... zM]].T
M : M matrix related with array variable (calculated with
arrayMatrix(array))
return matrices M1, M2 that are used in the MLE calculation
Xs = M1 * D1 + M2
Xs => [xs ys zs].T estimate source coordinates column vector
D1 => estimated distance between the source and the reference sensor
"""
A = -tdoa * propSpeed
Baux1 = A**2
Baux2 = -array[1:]**2 + array[0]**2
Baux2 = np.sum(Baux2, axis=1, keepdims=True)
B = 0.5 * (Baux1 + Baux2)
M1 = | np.dot(M, A) | numpy.dot |
# -*- coding: utf-8 -*-
"""
...
"""
import LibraryTT.txt2array as conversion
import numpy as np
from numpy import sqrt
import pandas as pd
import matplotlib.pyplot as plt
import random
import math
from mpl_toolkits.mplot3d import Axes3D
# import open3d as o3d
# %matplotlib inline
D = conversion.txt2array()
DD = | np.copy(D) | numpy.copy |
import numpy as np
def permutate(data,labels):
# permutate the data
indices = np.random.permutation(data.index)
data = data.reindex(indices)
labels = labels.reindex(indices)
return (data,labels)
def split_test_train(data,labels,percent_train):
splitpoint = int(data.index.size * percent_train)
trainData = data[0:splitpoint]
testData = data[splitpoint + 1:]
trainLabels = labels[0:splitpoint]
testLabels = labels[splitpoint + 1:]
return (trainData,trainLabels,testData,testLabels)
def labelMatrixToArray(labelMatrix, threshold):
labels = []
exclude = []
for row in labelMatrix.index:
r = labelMatrix.loc[row,:]
lblInfo = r[r > threshold]
lbl = 0
# TODO: for training, it would be better
# to remove the ones where 0 is more than 50 and label is less than 15
if lblInfo.size > 0:
lbl = lblInfo.index[0]
labels.append(lbl)
else:
exclude.append(row)
return (labels, exclude)
def normalizeZeroClass(labels, data):
counts = labels.groupby(0).size()
max = counts[1:].max()
zeroIndex = labels[labels[0] == 0.0].index
selectedIndex = np.random.choice(zeroIndex, size=max, replace=False)
removalIndex = zeroIndex.drop(selectedIndex)
labelDF = labels.drop(removalIndex)
trainData = data.drop(removalIndex)
return (labelDF, trainData, removalIndex)
def normalizeZeroClassArray(labels_arr, data):
lbls = np.array(labels_arr)
zeroIndex = np.where(lbls == 0.0)[0]#lbls[lbls == 0.0].index
equalizer = zeroIndex.size-(len(labels_arr)-zeroIndex.size)
removalIndex = | np.random.choice(zeroIndex, size=equalizer, replace=False) | numpy.random.choice |
#!/usr/bin/env python3
import numpy as np
import nabla
from nabla import grad, Dual, minimise
def close(x, y, eps=1e-12):
return abs(x-y)<eps
def dualclose(x, y, eps=1e-12):
isclose = close(x.real, y.real, eps)
for i in range(x.nvars):
isclose = isclose and close(x.dual[i], y.dual[i], eps)
return isclose
def test_dual():
x = Dual(2,3)
y = Dual(4,5)
assert x<y and y>x and x<=y and y>=x
z = x + y
assert z.real==6 and z.dual[0]==8
z = x - y
assert z.real==-2 and z.dual[0]==-2
z = x * y
assert z.real==8 and z.dual[0]==22
z = x / y
assert z.real==0.5 and z.dual[0]==(3*4 - 2*5)/4**2
x = Dual(2,3)
y = 4
assert x<y and y>x and x<=y and y>=x
z = x + y
assert z.real==6 and z.dual[0]==3
z = x - y
assert z.real==-2 and z.dual[0]==3
z = x * y
assert z.real==8 and z.dual[0]==12
z = x / y
assert z.real==0.5 and z.dual[0]==(3*4 - 2*0)/4**2
x = 2
y = Dual(4,5)
assert x<y and y>x and x<=y and y>=x
z = x + y
assert z.real==6 and z.dual[0]==5
z = x - y
assert z.real==-2 and z.dual[0]==-5
z = x * y
assert z.real==8 and z.dual[0]==10
z = x / y
assert z.real==0.5 and z.dual[0]==(0*4 - 2*5)/4**2
sqrty = np.sqrt(y)
ytohalf = y ** 0.5
assert close(sqrty.real, ytohalf.real) and close(sqrty.dual[0], ytohalf.dual[0])
z = 2**y
zalt = Dual(2)**y
assert close(z.real, zalt.real) and close(z.dual[0], zalt.dual[0])
x = Dual(2,3)
y = Dual(4,5)
w = x*y
z = nabla.dot([x], [y])
assert dualclose(z, w)
z = nabla.dot( | np.array([x]) | numpy.array |
from time import time
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
from sklearn.cluster import KMeans
from sklearn.neighbors import NearestNeighbors
from vae import VAE
def plot_label_clusters(encoder, decoder, data, labels):
# display a 2D plot of the digit classes in the latent space
z_mean, _, _ = encoder.predict(data)
plt.figure(figsize=(12, 10))
plt.scatter(z_mean[:, 0], z_mean[:, 1], c=labels)
plt.colorbar()
plt.xlabel("z[0]")
plt.ylabel("z[1]")
plt.show()
def main(training: bool = False):
(x_train, y_train), (x_test, y_test) = tf.keras.datasets.mnist.load_data()
mnist_digits = np.concatenate([x_train, x_test], axis=0)
labels = np.concatenate([y_train, y_test], axis=0)
mnist_digits = np.expand_dims(mnist_digits, -1).astype("float32") / 255
if training:
vae = VAE(data_shape=(28,28,1), latent_dim=2, epochs=20, batch_size=128, optimizer=tf.keras.optimizers.Adam)
vae.train_vae(mnist_digits, save_model=True)
else:
vae = VAE(data_shape=(28,28,1), latent_dim=2)
vae.full_model = tf.keras.models.load_model('vae')
vae.encoder = tf.keras.models.load_model('vae_encoder')
vae.decoder = tf.keras.models.load_model('vae_decoder')
plot_label_clusters(vae.encoder, vae.decoder, mnist_digits, labels)
_,_,Latent = vae.encoder.predict(mnist_digits)
# Clusters = KMeans(n_clusters=10, random_state=42)
# X_ClusterLabels = Clusters.fit_predict(Latent)
neigh = NearestNeighbors(n_neighbors=5)
neigh.fit(Latent)
test = x_test[0]
plt.imshow(test)
plt.show()
start = time()
test = | np.expand_dims(test, 0) | numpy.expand_dims |
"""
Supporting routines for coordinate conversions as well as vector operations and
transformations used in Space Science.
"""
import scipy
import scipy.integrate
import numpy as np
import datetime
import pysat
# import reference IGRF fortran code within the package
from OMMBV import igrf as igrf
import OMMBV.fortran_coords
# parameters used to define Earth ellipsoid
# WGS84 parameters below
earth_a = 6378.1370
earth_b = 6356.75231424518
# standard geoncentric Earth radius
# average radius of Earth
earth_geo_radius = 6371.
def geocentric_to_ecef(latitude, longitude, altitude):
"""Convert geocentric coordinates into ECEF
Parameters
----------
latitude : float or array_like
Geocentric latitude (degrees)
longitude : float or array_like
Geocentric longitude (degrees)
altitude : float or array_like
Height (km) above presumed spherical Earth with radius 6371 km.
Returns
-------
x, y, z
numpy arrays of x, y, z locations in km
"""
r = earth_geo_radius + altitude
x = r*np.cos(np.deg2rad(latitude))*np.cos(np.deg2rad(longitude))
y = r*np.cos(np.deg2rad(latitude))*np.sin(np.deg2rad(longitude))
z = r*np.sin(np.deg2rad(latitude))
return x, y, z
def ecef_to_geocentric(x, y, z, ref_height=None):
"""Convert ECEF into geocentric coordinates
Parameters
----------
x : float or array_like
ECEF-X in km
y : float or array_like
ECEF-Y in km
z : float or array_like
ECEF-Z in km
ref_height : float or array_like
Reference radius used for calculating height.
Defaults to average radius of 6371 km
Returns
-------
latitude, longitude, altitude
numpy arrays of locations in degrees, degrees, and km
"""
if ref_height is None:
ref_height = earth_geo_radius
r = np.sqrt(x**2 + y**2 + z**2)
colatitude = np.rad2deg(np.arccos(z/r))
longitude = np.rad2deg(np.arctan2(y, x))
latitude = 90. - colatitude
return latitude, longitude, r - ref_height
def geodetic_to_ecef(latitude, longitude, altitude):
"""Convert WGS84 geodetic coordinates into ECEF
Parameters
----------
latitude : float or array_like
Geodetic latitude (degrees)
longitude : float or array_like
Geodetic longitude (degrees)
altitude : float or array_like
Geodetic Height (km) above WGS84 reference ellipsoid.
Returns
-------
x, y, z
numpy arrays of x, y, z locations in km
"""
ellip = np.sqrt(1. - earth_b**2/earth_a**2)
r_n = earth_a/np.sqrt(1. - ellip**2*np.sin(np.deg2rad(latitude))**2)
# colatitude = 90. - latitude
x = (r_n + altitude)*np.cos(np.deg2rad(latitude))*np.cos(np.deg2rad(longitude))
y = (r_n + altitude)*np.cos(np.deg2rad(latitude))*np.sin(np.deg2rad(longitude))
z = (r_n*(1. - ellip**2) + altitude)*np.sin(np.deg2rad(latitude))
return x, y, z
try:
ecef_to_geodetic = OMMBV.fortran_coords.ecef_to_geodetic
except AttributeError:
print('Unable to use Fortran version of ecef_to_geodetic. Please check installation.')
def python_ecef_to_geodetic(x, y, z, method=None):
"""Convert ECEF into Geodetic WGS84 coordinates
Parameters
----------
x : float or array_like
ECEF-X in km
y : float or array_like
ECEF-Y in km
z : float or array_like
ECEF-Z in km
method : 'iterative' or 'closed' ('closed' is deafult)
String selects method of conversion. Closed for mathematical
solution (http://www.epsg.org/Portals/0/373-07-2.pdf , page 96 section 2.2.1)
or iterative (http://www.oc.nps.edu/oc2902w/coord/coordcvt.pdf).
Returns
-------
latitude, longitude, altitude
numpy arrays of locations in degrees, degrees, and km
"""
# quick notes on ECEF to Geodetic transformations
# http://danceswithcode.net/engineeringnotes/geodetic_to_ecef/geodetic_to_ecef.html
method = method or 'closed'
# ellipticity of Earth
ellip = np.sqrt(1. - earth_b**2/earth_a**2)
# first eccentricity squared
e2 = ellip**2 # 6.6943799901377997E-3
longitude = np.arctan2(y, x)
# cylindrical radius
p = np.sqrt(x**2 + y**2)
# closed form solution
# a source, http://www.epsg.org/Portals/0/373-07-2.pdf , page 96 section 2.2.1
if method == 'closed':
e_prime = np.sqrt((earth_a**2 - earth_b**2)/earth_b**2)
theta = np.arctan2(z*earth_a, p*earth_b)
latitude = np.arctan2(z + e_prime**2*earth_b*np.sin(theta)**3, p - e2*earth_a*np.cos(theta)**3)
r_n = earth_a/np.sqrt(1. - e2*np.sin(latitude)**2)
h = p/np.cos(latitude) - r_n
# another possibility
# http://ir.lib.ncku.edu.tw/bitstream/987654321/39750/1/3011200501001.pdf
## iterative method
# http://www.oc.nps.edu/oc2902w/coord/coordcvt.pdf
if method == 'iterative':
latitude = np.arctan2(p, z)
r_n = earth_a/np.sqrt(1. - e2*np.sin(latitude)**2)
for i in np.arange(6):
# print latitude
r_n = earth_a/np.sqrt(1. - e2*np.sin(latitude)**2)
h = p/np.cos(latitude) - r_n
latitude = np.arctan(z/p/(1. - e2*(r_n/(r_n + h))))
# print h
# final ellipsoidal height update
h = p/np.cos(latitude) - r_n
return np.rad2deg(latitude), np.rad2deg(longitude), h
def enu_to_ecef_vector(east, north, up, glat, glong):
"""Converts vector from East, North, Up components to ECEF
Position of vector in geospace may be specified in either
geocentric or geodetic coordinates, with corresponding expression
of the vector using radial or ellipsoidal unit vectors.
Parameters
----------
east : float or array-like
Eastward component of vector
north : float or array-like
Northward component of vector
up : float or array-like
Upward component of vector
latitude : float or array_like
Geodetic or geocentric latitude (degrees)
longitude : float or array_like
Geodetic or geocentric longitude (degrees)
Returns
-------
x, y, z
Vector components along ECEF x, y, and z directions
"""
# convert lat and lon in degrees to radians
rlat = np.radians(glat)
rlon = np.radians(glong)
x = -east*np.sin(rlon) - north*np.cos(rlon)*np.sin(rlat) + up*np.cos(rlon)*np.cos(rlat)
y = east*np.cos(rlon) - north*np.sin(rlon)* | np.sin(rlat) | numpy.sin |
from __future__ import division, print_function, absolute_import
import numpy as np
from copy import deepcopy
from ipsolver._constraints import (NonlinearConstraint,
LinearConstraint,
BoxConstraint)
from ipsolver._canonical_constraint import (_parse_constraint,
to_canonical,
empty_canonical_constraint)
from numpy.testing import (TestCase, assert_array_almost_equal,
assert_array_equal, assert_array_less,
assert_raises, assert_equal, assert_,
run_module_suite, assert_allclose, assert_warns,
dec)
class TestParseConstraint(TestCase):
def test_equality_constraint(self):
kind = ("equals", [10, 20, 30])
eq, ineq, val_eq, val_ineq, sign, fun_len = _parse_constraint(kind)
assert_array_equal(eq, [0, 1, 2])
assert_array_equal(val_eq, [10, 20, 30])
assert_array_equal(ineq, [])
assert_array_equal(val_ineq, [])
assert_array_equal(sign, [])
def test_greater_constraint(self):
kind = ("greater", [10, 20, 30])
eq, ineq, val_eq, val_ineq, sign, fun_len = _parse_constraint(kind)
assert_array_equal(eq, [])
assert_array_equal(val_eq, [])
assert_array_equal(ineq, [0, 1, 2])
assert_array_equal(val_ineq, [10, 20, 30])
assert_array_equal(sign, [-1, -1, -1])
kind = ("greater", [10, np.inf, 30])
eq, ineq, val_eq, val_ineq, sign, fun_len = _parse_constraint(kind)
assert_array_equal(eq, [])
assert_array_equal(val_eq, [])
assert_array_equal(ineq, [0, 2])
assert_array_equal(val_ineq, [10, 30])
assert_array_equal(sign, [-1, -1])
def test_less_constraint(self):
kind = ("less", [10, 20, 30])
eq, ineq, val_eq, val_ineq, sign, fun_len = _parse_constraint(kind)
assert_array_equal(eq, [])
assert_array_equal(val_eq, [])
assert_array_equal(ineq, [0, 1, 2])
assert_array_equal(val_ineq, [10, 20, 30])
assert_array_equal(sign, [1, 1, 1])
kind = ("less", [10, np.inf, 30])
eq, ineq, val_eq, val_ineq, sign, fun_len = _parse_constraint(kind)
assert_array_equal(eq, [])
assert_array_equal(val_eq, [])
assert_array_equal(ineq, [0, 2])
assert_array_equal(val_ineq, [10, 30])
assert_array_equal(sign, [1, 1])
def test_interval_constraint(self):
kind = ("interval", [10, 20, 30], [50, 60, 70])
eq, ineq, val_eq, val_ineq, sign, fun_len = _parse_constraint(kind)
assert_array_equal(eq, [])
assert_array_equal(val_eq, [])
assert_array_equal(ineq, [0, 1, 2, 0, 1, 2])
assert_array_equal(val_ineq, [10, 20, 30, 50, 60, 70])
assert_array_equal(sign, [-1, -1, -1, 1, 1, 1])
kind = ("interval", [10, 20, 30], [50, 20, 70])
eq, ineq, val_eq, val_ineq, sign, fun_len = _parse_constraint(kind)
assert_array_equal(eq, [1])
assert_array_equal(val_eq, [20])
assert_array_equal(ineq, [0, 2, 0, 2])
assert_array_equal(val_ineq, [10, 30, 50, 70])
assert_array_equal(sign, [-1, -1, 1, 1])
kind = ("interval", [10, 20, 30], [50, 20, np.inf])
eq, ineq, val_eq, val_ineq, sign, fun_len = _parse_constraint(kind)
assert_array_equal(eq, [1])
assert_array_equal(val_eq, [20])
assert_array_equal(ineq, [0, 2, 0])
assert_array_equal(val_ineq, [10, 30, 50])
assert_array_equal(sign, [-1, -1, 1])
kind = ("interval", [-np.inf, 20, 30], [50, 20, np.inf])
eq, ineq, val_eq, val_ineq, sign, fun_len = _parse_constraint(kind)
assert_array_equal(eq, [1])
assert_array_equal(val_eq, [20])
assert_array_equal(ineq, [2, 0])
assert_array_equal(val_ineq, [30, 50])
assert_array_equal(sign, [-1, 1])
class TestToCanonical(TestCase):
def test_empty_constraint(self):
x = [1, 2, 3]
canonical = empty_canonical_constraint(x, 3)
assert_array_equal(canonical.n_eq, 0)
assert_array_equal(canonical.n_ineq, 0)
c_ineq, c_eq = canonical.constr(x)
assert_array_equal(c_ineq, [])
assert_array_equal(c_eq, [])
J_ineq, J_eq = canonical.jac(x)
assert_array_equal(J_ineq, np.empty((0, 3)))
assert_array_equal(J_eq, np.empty((0, 3)))
assert_array_equal(canonical.hess, None)
assert_array_equal(canonical.enforce_feasibility, [])
def test_box_to_canonical_conversion(self):
box = BoxConstraint(("interval", [10, 20, 30], [50, np.inf, 70]),
[False, False, False])
x = [1, 2, 3]
x = box.evaluate_and_initialize(x)
canonical = to_canonical(box)
assert_array_equal(canonical.n_eq, 0)
assert_array_equal(canonical.n_ineq, 5)
c_ineq, c_eq = canonical.constr(x)
assert_array_equal(c_ineq, [10-1,
20-2,
30-3,
1-50,
3-70])
assert_array_equal(c_eq, [])
assert_array_equal(c_ineq, canonical.c_ineq0)
assert_array_equal(c_eq, canonical.c_eq0)
J_ineq, J_eq = canonical.jac(x)
assert_array_equal(J_ineq.toarray(), [[-1, 0, 0],
[0, -1, 0],
[0, 0, -1],
[1, 0, 0],
[0, 0, 1]])
assert_array_equal(J_eq, np.empty((0, 3)))
assert_array_equal(J_ineq.toarray(), canonical.J_ineq0.toarray())
assert_array_equal(J_eq.toarray(), canonical.J_eq0.toarray())
assert_array_equal(canonical.hess, None)
assert_array_equal(canonical.enforce_feasibility,
[False, False, False, False, False])
def test_linear_to_canonical_conversion(self):
A = np.array([[1, 2, 3, 4], [5, 0, 0, 6], [7, 0, 8, 0]])
linear = LinearConstraint(A, ("interval",
[10, 20, 30],
[10, np.inf, 70]),
[False, False, False])
x = [1, 2, 3, 4]
x = linear.evaluate_and_initialize(x)
canonical = to_canonical(linear)
assert_array_equal(canonical.n_eq, 1)
assert_array_equal(canonical.n_ineq, 3)
c_ineq, c_eq = canonical.constr(x)
assert_array_equal(c_eq, [1+4+9+16-10])
assert_array_equal(c_ineq, [20-5*1-6*4,
30-7*1-8*3,
7*1+8*3-70])
assert_array_equal(c_ineq, canonical.c_ineq0)
assert_array_equal(c_eq, canonical.c_eq0)
J_ineq, J_eq = canonical.jac(x)
assert_array_equal(J_eq, [[1, 2, 3, 4]])
assert_array_equal(J_ineq, [[-5, 0, 0, -6],
[-7, 0, -8, 0],
[7, 0, 8, 0]])
assert_array_equal(J_ineq, canonical.J_ineq0)
assert_array_equal(J_eq, canonical.J_eq0)
assert_array_equal(canonical.hess, None)
assert_array_equal(canonical.enforce_feasibility,
[False, False, False])
def test_nonlinear_to_canonical_conversion(self):
f1 = 10
g1 = np.array([1, 2, 3, 4])
H1 = np.eye(4)
f2 = 1
g2 = np.array([1, 1, 1, 1])
H2 = np.zeros((4, 4))
f3 = 12
g3 = np.array([1, 0, 0, 1])
H3 = np.diag([1, 2, 3, 4])
def fun(x):
return np.array([f1 + g1.dot(x) + 1/2*H1.dot(x).dot(x),
f2 + g2.dot(x) + 1/2*H2.dot(x).dot(x),
f3 + g3.dot(x) + 1/2*H3.dot(x).dot(x)])
def jac(x):
return np.vstack([g1 + H1.dot(x),
g2 + H2.dot(x),
g3 + H3.dot(x)])
def hess(x, v):
return v[0]*H1 + v[1]*H2 + v[2]*H3
nonlinear = NonlinearConstraint(fun,
("interval",
[10, 20, 30],
[10, np.inf, 70]),
jac, hess,
False)
x = [1, 2, 3, 4]
x = nonlinear.evaluate_and_initialize(x)
canonical = to_canonical(nonlinear)
assert_array_equal(canonical.n_eq, 1)
assert_array_equal(canonical.n_ineq, 3)
c_ineq, c_eq = canonical.constr(x)
assert_array_equal(c_ineq,
[20-(f2 + g2.dot(x) + 1/2*H2.dot(x).dot(x)),
30-(f3 + g3.dot(x) + 1/2*H3.dot(x).dot(x)),
f3 + g3.dot(x) + 1/2*H3.dot(x).dot(x) - 70])
assert_array_equal(c_eq,
[f1 + g1.dot(x) + 1/2*H1.dot(x).dot(x) - 10])
assert_array_equal(c_ineq, canonical.c_ineq0)
assert_array_equal(c_eq, canonical.c_eq0)
J_ineq, J_eq = canonical.jac(x)
assert_array_equal(J_eq, np.atleast_2d(g1 + H1.dot(x)))
assert_array_equal(J_ineq, np.vstack([-(g2 + H2.dot(x)),
-(g3 + H3.dot(x)),
g3 + H3.dot(x)]))
v_eq = np.array([10])
v_ineq = | np.array([5, 6, 3]) | numpy.array |
import onnx
import os
import tensorrt as trt
import example
import pycuda.driver as cuda
import pycuda.autoinit
import common
import time
import numpy as np
TRT_LOGGER = trt.Logger(trt.Logger.INFO)
#G_LOGGER = trt4.infer.ConsoleLogger(trt4.infer.LogSeverity.WARNING)
def get_engine(onnx_file_path, engine_file_path, plugin_factory):
"""Attempts to load a serialized engine if available, otherwise builds a new TensorRT engine and saves it."""
if os.path.exists(engine_file_path):
# If a serialized engine exists, use it instead of building an engine.
print("Reading engine from file {}".format(engine_file_path))
with open(engine_file_path, "rb") as f, trt.Runtime(TRT_LOGGER) as runtime:
#with open(engine_file_path, "rb") as f, trt4.infer.create_infer_runtime(G_LOGGER) as runtime:
#return runtime.deserialize_cuda_engine(f.read(), plugin_factory)
print(plugin_factory)
#return runtime.deserialize_cuda_engine(f.read(), plugin_factory)
return runtime.deserialize_cuda_engine(f.read(), plugin_factory)
#plugin_factory = trt.OnnxPluginFactory(TRT_LOGGER)
plugin_factory= example.Create(TRT_LOGGER)
onnx_file = './bisenet.onnx'
#engine_file = './bisenet.trt'
engine_file = './bisenet.trt'
# Output shapes expected by the post-processor
output_shapes = [(1, 19, 96, 192)]
# Do inference with TensorRT
trt_outputs = []
def print_statics(arr):
mean = np.mean(arr)
max = np.max(arr)
min = | np.min(arr) | numpy.min |
import torch.utils.data as data
import sys
sys.path.append('/home/benkesheng/BMI_DETECT/')
from sklearn.metrics import mean_absolute_error
from sklearn.svm import SVR
from Detected import Image_Processor
import torch
import torch.nn as nn
import torch.utils.data as data
from torchvision import models, transforms
import numpy as np
import os
import pandas as pd
import cv2
import re
import csv
from PIL import Image
from Data import Img_info
import random
def setup_seed(seed):
torch.manual_seed(seed)
torch.cuda.manual_seed(seed)
np.random.seed(seed)
random.seed(seed)
torch.backends.cudnn.deterministic = True
setup_seed(20)
END_EPOCH = 0
mask_model = "/home/benkesheng/BMI_DETECT/pose2seg_release.pkl"
keypoints_model = "COCO-Keypoints/keypoint_rcnn_R_101_FPN_3x.yaml"
# P = Image_Processor(mask_model,keypoints_model)
IMG_MEAN = [0.485, 0.456, 0.406]
IMG_STD = [0.229, 0.224, 0.225]
DEVICE = torch.device("cuda:3")
IMG_SIZE = 224
BATCH_SIZE = 64
def _get_image_size(img):
if transforms.functional._is_pil_image(img):
return img.size
elif isinstance(img, torch.Tensor) and img.dim() > 2:
return img.shape[-2:][::-1]
else:
raise TypeError("Unexpected type {}".format(type(img)))
class Resize(transforms.Resize):
def __call__(self, img):
h, w = _get_image_size(img)
scale = max(w, h) / float(self.size)
new_w, new_h = int(w / scale), int(h / scale)
return transforms.functional.resize(img, (new_w, new_h), self.interpolation)
class Dataset(data.Dataset):
def __init__(self, file, transfrom):
self.Pic_Names = os.listdir(file)
self.file = file
self.transfrom = transfrom
def __len__(self):
return len(self.Pic_Names)
def __getitem__(self, idx):
img_name = self.Pic_Names[idx]
Pic = Image.open(os.path.join(self.file, self.Pic_Names[idx]))
Pic = self.transfrom(Pic)
try:
ret = re.match(r"\d+?_([FMfm])_(\d+?)_(\d+?)_(\d+).+", img_name)
BMI = (int(ret.group(4)) / 100000) / (int(ret.group(3)) / 100000) ** 2
Pic_name = os.path.join(self.file, self.Pic_Names[idx])
return (Pic, Pic_name), BMI
except:
return (Pic, ''), 10000
transform = transforms.Compose([
Resize(IMG_SIZE),
transforms.Pad(IMG_SIZE),
transforms.CenterCrop(IMG_SIZE),
transforms.ToTensor(),
transforms.Normalize(IMG_MEAN, IMG_STD)
])
dataset = Dataset('/home/benkesheng/BMI_DETECT/datasets/Image_train', transform)
# val_dataset = Dataset('/home/benkesheng/BMI_DETECT/datasets/Image_val', transform)
test_dataset = Dataset('/home/benkesheng/BMI_DETECT/datasets/Image_test', transform)
train_size = int(0.8 * len(dataset))
val_size = len(dataset) - train_size
train_dataset, val_dataset = torch.utils.data.random_split(dataset, [train_size, val_size])
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=BATCH_SIZE, shuffle=True)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=1, shuffle=True)
test_loader = torch.utils.data.DataLoader(test_dataset, batch_size=1, shuffle=True)
# Vgg16
# Pred_Net = torchvision.models.vgg16(pretrained=True)
# for param in Pred_Net.parameters():
# param.requires_grad = True
#
# Pred_Net.classifier = nn.Sequential(
# nn.Linear(25088, 1024),
# nn.ReLU(True),
# nn.Linear(1024, 512),
# nn.ReLU(True),
# nn.Linear(512, 256),
# nn.ReLU(True),
# nn.Linear(256, 20),
# nn.ReLU(True),
# nn.Linear(20, 1)
# )
# Resnet101
Pred_Net = models.resnet101(pretrained=True,num_classes= 1)
print(Pred_Net)
for param in Pred_Net.parameters():
param.requires_grad = True
# Pred_Net.fc = nn.Sequential(
# nn.Linear(2048, 1024),
# nn.ReLU(True),
# nn.Linear(1024, 512),
# nn.ReLU(True),
# nn.Linear(512, 256),
# nn.ReLU(True),
# nn.Linear(256, 20),
# nn.ReLU(True),
# nn.Linear(20, 1)
# )
Pred_Net = Pred_Net.to(DEVICE)
criterion = nn.MSELoss()
optimizer = torch.optim.Adam([
{'params': Pred_Net.parameters()}
], lr=0.0001)
def train(model, device, train_loader, epoch):
model.train()
runing_loss = 0.0
for idx, ((x, n), y) in enumerate(train_loader, 0):
x, y = x.to(device), y.to(device)
optimizer.zero_grad()
y_pred = model(x)
# print(y_pred.shape)
y = torch.unsqueeze(y, 1)
loss = criterion(y_pred.double(), y.double())
loss.backward()
optimizer.step()
runing_loss += loss.item()
print('loss:', loss.item())
print('Train Epoch:{}\t RealLoss:{:.6f}'.format(epoch, runing_loss / len(train_loader)))
def mean_absolute_percentage_error(y_true, y_pred):
y_true, y_pred = np.array(y_true), | np.array(y_pred) | numpy.array |
"""Generate a single discrete time SIR model.
"""
from . import data_model
import numpy as np
from scipy import stats
import xarray as xr
# Generate Betas
# Beta, or the growth rate of the infection, depends on the covariates.
# Here we implement three different functional forms for the dependency.
SPLIT_TIME = 100
def generate_betas_from_single_random_covariate(num_locations):
"""Beta depend on a single covariate that is randomly generated.
Args:
num_locations: an int representing the number of locations to simulate
Returns:
beta: an xr.DataArray consisting of the growth rate
for each epidemic
v: an xr.DataArray consisting of the randomly generated covariate for each
location
alpha: an xr.DataArray consisting of the weights for each covariate
"""
v = xr.DataArray(
np.random.uniform(0.0, 1.0, (num_locations, 1)),
dims=['location', 'static_covariate'])
alpha = xr.DataArray(np.ones(1), dims=['static_covariate'])
beta = 0.4 * np.exp(alpha @ v)
return beta, v, alpha
def generate_betas_effect_mod(num_locations):
"""Betas depend on 2 discrete, randomly generated effects.
Args:
num_locations: an int representing the number of locations to simulate
Returns:
beta: an xr.DataArray consisting of the growth rate
for each epidemic
v: an xr.DataArray consisting of the randomly generated covariate for each
location
alpha: an xr.DataArray consisting of the weights for each covariate
"""
v = xr.DataArray(np.random.binomial(1, 0.5, size=(num_locations, 2)),
dims={'location': num_locations, 'static_covariate': 2})
hd = v.values[:, 0]
ws = v.values[:, 1]
beta_np = np.exp(np.log(1.5) + np.log(2.0) * (hd == 1) * (ws == 0))
beta = xr.DataArray(beta_np, dims={'location': num_locations})
return beta, v, xr.DataArray(np.array([1, 1]), dims={'static_covariate': 2})
def generate_betas_many_cov2(num_locations, num_pred=1, num_not_pred=2):
"""Betas depend on real valued vector of covariates.
Args:
num_locations: an int representing the number of locations to simulate.
num_pred: an int representing the number of covariates that affect beta.
num_not_pred: an int representing the number of covariates that do not
affect beta.
Returns:
beta: an xr.DataArray consisting of the growth rate
for each epidemic
v: an xr.DataArray consisting of the randomly generated covariate for each
location
alpha: an xr.DataArray consisting of the weights for each covariate
"""
# generate random covariates
# sample from range -1, 1 uniformly
v = xr.DataArray(np.random.uniform(
low=-1.0, high=1.0, size=(num_locations, num_pred + num_not_pred)),
dims={'location': num_locations,
'static_covariate': num_pred+num_not_pred})
# construct weights for each covariate
alpha_1 = np.ones(num_pred)
alpha_0 = np.zeros(num_not_pred)
alpha = xr.DataArray(np.concatenate((alpha_1, alpha_0), axis=0),
dims={'static_covariate': num_pred+num_not_pred})
# this has a different functional form than we've seen before
beta_np = 1 + np.exp(np.matmul(alpha.values, v.values.T))
beta = xr.DataArray(beta_np, dims={'location': num_locations})
return beta, v, alpha
def gen_dynamic_beta_random_time(num_locations, num_time_steps):
"""Betas change at a random time between 1 and num_time_steps-1.
Args:
num_locations: an int representing the number of locations to simulate
num_time_steps: an int representing the number of time steps to simulate
Returns:
beta: an xr.DataArray consisting of the growth rate
for each epidemic with dimensions (location, time)
v: an xr.DataArray consisting of the randomly generated covariate for each
location with dimensions (location, time, 1)
alpha: an xr.DataArray consisting of the weights for each covariate with
dimension 1.
"""
time = np.random.randint(1, num_time_steps-1, num_locations)
cov = np.zeros((num_locations, num_time_steps, 1))
for i in range(num_locations):
cov[i][time[i]:] = 1
v = xr.DataArray(cov, dims=['location', 'time', 'dynamic_covariate'])
alpha = np.random.uniform(-1., 0.)*xr.DataArray( | np.ones(1) | numpy.ones |
import numpy as np
import os
import re
import requests
import sys
import time
from netCDF4 import Dataset
import pandas as pd
from bs4 import BeautifulSoup
from tqdm import tqdm
# setup constants used to access the data from the different M2M interfaces
BASE_URL = 'https://ooinet.oceanobservatories.org/api/m2m/' # base M2M URL
SENSOR_URL = '12576/sensor/inv/' # Sensor Information
# setup access credentials
AUTH = ['OOIAPI-853A3LA6QI3L62', '<KEY>']
def M2M_Call(uframe_dataset_name, start_date, end_date):
options = '?beginDT=' + start_date + '&endDT=' + end_date + '&format=application/netcdf'
r = requests.get(BASE_URL + SENSOR_URL + uframe_dataset_name + options, auth=(AUTH[0], AUTH[1]))
if r.status_code == requests.codes.ok:
data = r.json()
else:
return None
# wait until the request is completed
print('Waiting for OOINet to process and prepare data request, this may take up to 20 minutes')
url = [url for url in data['allURLs'] if re.match(r'.*async_results.*', url)][0]
check_complete = url + '/status.txt'
with tqdm(total=400, desc='Waiting') as bar:
for i in range(400):
r = requests.get(check_complete)
bar.update(1)
if r.status_code == requests.codes.ok:
bar.n = 400
bar.last_print_n = 400
bar.refresh()
print('\nrequest completed in %f minutes.' % elapsed)
break
else:
time.sleep(3)
elapsed = (i * 3) / 60
return data
def M2M_Files(data, tag=''):
"""
Use a regex tag combined with the results of the M2M data request to collect the data from the THREDDS catalog.
Collected data is gathered into an xarray dataset for further processing.
:param data: JSON object returned from M2M data request with details on where the data is to be found for download
:param tag: regex tag to use in discriminating the data files, so we only collect the correct ones
:return: the collected data as an xarray dataset
"""
# Create a list of the files from the request above using a simple regex as a tag to discriminate the files
url = [url for url in data['allURLs'] if re.match(r'.*thredds.*', url)][0]
files = list_files(url, tag)
return files
def list_files(url, tag=''):
"""
Function to create a list of the NetCDF data files in the THREDDS catalog created by a request to the M2M system.
:param url: URL to user's THREDDS catalog specific to a data request
:param tag: regex pattern used to distinguish files of interest
:return: list of files in the catalog with the URL path set relative to the catalog
"""
page = requests.get(url).text
soup = BeautifulSoup(page, 'html.parser')
pattern = re.compile(tag)
return [node.get('href') for node in soup.find_all('a', text=pattern)]
def M2M_Data(nclist,variables):
thredds = 'https://opendap.oceanobservatories.org/thredds/dodsC/ooi/'
#nclist is going to contain more than one url eventually
for jj in range(len(nclist)):
url=nclist[jj]
url=url[25:]
dap_url = thredds + url + '#fillmismatch'
openFile = Dataset(dap_url,'r')
for ii in range(len(variables)):
dum = openFile.variables[variables[ii].name]
variables[ii].data = np.append(variables[ii].data, dum[:].data)
tmp = variables[0].data/60/60/24
time_converted = pd.to_datetime(tmp, unit='D', origin=pd.Timestamp('1900-01-01'))
return variables, time_converted
class var(object):
def __init__(self):
"""A Class that generically holds data with a variable name
and the units as attributes"""
self.name = ''
self.data = np.array([])
self.units = ''
def __repr__(self):
return_str = "name: " + self.name + '\n'
return_str += "units: " + self.units + '\n'
return_str += "data: size: " + str(self.data.shape)
return return_str
class structtype(object):
def __init__(self):
""" A class that imitates a Matlab structure type
"""
self._data = []
def __getitem__(self, index):
"""implement index behavior in the struct"""
if index == len(self._data):
self._data.append(var())
return self._data[index]
def __len__(self):
return len(self._data)
def M2M_URLs(platform_name,node,instrument_class,method):
var_list = structtype()
#MOPAK
if platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/telemetered/mopak_o_dcl_accel'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/telemetered/metbk_a_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/telemetered/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/telemetered/fdchp_a_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/telemetered/dosta_abcdjm_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/telemetered/dosta_abcdjm_ctdbp_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/telemetered/dofst_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/telemetered/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#ZPLSC
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/telemetered/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_host/zplsc_c_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/telemetered/wavss_a_dcl_statistics'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/telemetered/velpt_ab_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/telemetered/pco2w_abc_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/telemetered/phsen_abcdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/telemetered/spkir_abj_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/telemetered/presf_abc_dcl_tide_measurement'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/telemetered/ctdbp_cdef_dcl_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/telemetered/vel3d_cd_dcl_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#VEL3DK
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/telemetered/vel3d_k_wfp_stc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/telemetered/ctdpf_ckl_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/telemetered/pco2a_a_dcl_instrument_water'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#PARAD
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/telemetered/parad_k__stc_imodem_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/telemetered/optaa_dj_dcl_instrument'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'Telemetered':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/telemetered/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
##
#MOPAK
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSPM' and node == 'BUOY' and instrument_class == 'MOPAK' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSPM/SBS01/01-MOPAK0000/recovered_host/mopak_o_dcl_accel_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#METBK
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'METBK1' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/06-METBKA000/recovered_host/metbk_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sea_surface_temperature'
var_list[2].name = 'sea_surface_conductivity'
var_list[3].name = 'met_salsurf'
var_list[4].name = 'met_windavg_mag_corr_east'
var_list[5].name = 'met_windavg_mag_corr_north'
var_list[6].name = 'barometric_pressure'
var_list[7].name = 'air_temperature'
var_list[8].name = 'relative_humidity'
var_list[9].name = 'longwave_irradiance'
var_list[10].name = 'shortwave_irradiance'
var_list[11].name = 'precipitation'
var_list[12].name = 'met_heatflx_minute'
var_list[13].name = 'met_latnflx_minute'
var_list[14].name = 'met_netlirr_minute'
var_list[15].name = 'met_sensflx_minute'
var_list[16].name = 'eastward_velocity'
var_list[17].name = 'northward_velocity'
var_list[18].name = 'met_spechum'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[15].data = np.array([])
var_list[16].data = np.array([])
var_list[17].data = np.array([])
var_list[18].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'S/m'
var_list[3].units = 'unitless'
var_list[4].units = 'm/s'
var_list[5].units = 'm/s'
var_list[6].units = 'mbar'
var_list[7].units = 'degC'
var_list[8].units = '#'
var_list[9].units = 'W/m'
var_list[10].units = 'W/m'
var_list[11].units = 'mm'
var_list[12].units = 'W/m'
var_list[13].units = 'W/m'
var_list[14].units = 'W/m'
var_list[15].units = 'W/m'
var_list[16].units = 'm/s'
var_list[17].units = 'm/s'
var_list[18].units = 'g/kg'
#FLORT
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'FLORT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/02-FLORTD000/recovered_host/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
#FDCHP
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_host/fdchp_a_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#DOSTA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/04-DOSTAD000/recovered_host/dosta_abcdjm_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'optode_temperature'
var_list[4].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
var_list[4].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_host/dosta_abcdjm_ctdbp_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'dosta_ln_optode_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
#ADCP
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_host/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
#WAVSS
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/05-WAVSSA000/recovered_host/wavss_a_dcl_statistics_recovered'
var_list[0].name = 'time'
var_list[1].name = 'number_zero_crossings'
var_list[2].name = 'average_wave_height'
var_list[3].name = 'mean_spectral_period'
var_list[4].name = 'max_wave_height'
var_list[5].name = 'significant_wave_height'
var_list[6].name = 'significant_period'
var_list[7].name = 'wave_height_10'
var_list[8].name = 'wave_period_10'
var_list[9].name = 'mean_wave_period'
var_list[10].name = 'peak_wave_period'
var_list[11].name = 'wave_period_tp5'
var_list[12].name = 'wave_height_hmo'
var_list[13].name = 'mean_direction'
var_list[14].name = 'mean_spread'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[9].data = np.array([])
var_list[10].data = np.array([])
var_list[11].data = np.array([])
var_list[12].data = np.array([])
var_list[13].data = np.array([])
var_list[14].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'counts'
var_list[2].units = 'm'
var_list[3].units = 'sec'
var_list[4].units = 'm'
var_list[5].units = 'm'
var_list[6].units = 'sec'
var_list[7].units = 'm'
var_list[8].units = 'sec'
var_list[9].units = 'sec'
var_list[10].units = 'sec'
var_list[11].units = 'sec'
var_list[12].units = 'm'
var_list[13].units = 'degrees'
var_list[14].units = 'degrees'
#VELPT
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
#uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_host/velpt_ab_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
#PCO2W
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_host/pco2w_abc_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
#PHSEN
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_host/phsen_abcdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
#SPKIR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'SPKIR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/08-SPKIRB000/recovered_host/spkir_abj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
#PRESF
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_host/presf_abc_dcl_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'abs_seafloor_pressure'
var_list[2].name = 'seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
#CTDBP
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_host/ctdbp_cdef_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
#VEL3D
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_host/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
#PCO2A
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'PCO2A' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/SBD12/04-PCO2AA000/recovered_host/pco2a_a_dcl_instrument_water_recovered'
var_list[0].name = 'time'
var_list[1].name = 'partial_pressure_co2_ssw'
var_list[2].name = 'partial_pressure_co2_atm'
var_list[3].name = 'pco2_co2flux'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uatm'
var_list[2].units = 'uatm'
var_list[3].units = 'mol m-2 s-1'
#OPTAA
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID27/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/MFD37/01-OPTAAD000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'OPTAA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/MFD37/01-OPTAAC000/recovered_host/optaa_dj_dcl_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#NUTNR
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredHost':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_host/suna_dcl_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID27/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-CTDBPC000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'CTD' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-CTDBPE000/recovered_inst/ctdbp_cdef_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdbp_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_seawater_pressure'
var_list[5].name = 'ctdbp_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/03-CTDPFK000/recovered_wfp/ctdpf_ckl_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'ctdpf_ckl_seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdpf_ckl_seawater_pressure'
var_list[5].name = 'ctdpf_ckl_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/01-ADCPTA000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/01-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/04-ADCPTC000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ADCP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/04-ADCPSJ000/recovered_inst/adcp_velocity_earth'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'ZPLSC' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/07-ZPLSCC000/recovered_inst/zplsc_echogram_data'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'BUOY' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/SBD11/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'VELPT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/04-VELPTA000/recovered_inst/velpt_ab_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'eastward_velocity'
var_list[2].name = 'northward_velocity'
var_list[3].name = 'upward_velocity'
var_list[4].name = 'heading_decidegree'
var_list[5].name = 'roll_decidegree'
var_list[6].name = 'pitch_decidegree'
var_list[7].name = 'temperature_centidegree'
var_list[8].name = 'pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'deci-degrees'
var_list[6].units = 'deci-degrees'
var_list[7].units = '0.01degC'
var_list[8].units = '0.001dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'VEL3D' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/01-VEL3DK000/recovered_wfp/vel3d_k_wfp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_k_eastward_velocity'
var_list[2].name = 'vel3d_k_northward_velocity'
var_list[3].name = 'vel3d_k_upward_velocity'
var_list[4].name = 'vel3d_k_heading'
var_list[5].name = 'vel3d_k_pitch'
var_list[6].name = 'vel3d_k_roll'
var_list[7].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'ddegrees'
var_list[5].units = 'ddegrees'
var_list[6].units = 'ddegrees'
var_list[7].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'VEL3D' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/01-VEL3DD000/recovered_inst/vel3d_cd_dcl_velocity_data_recovered'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/02-PRESFA000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/02-PRESFA000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/02-PRESFB000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PRESF' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/02-PRESFC000/recovered_inst/presf_abc_tide_measurement_recovered'
var_list[0].name = 'time'
var_list[1].name = 'presf_tide_pressure'
var_list[2].name = 'presf_tide_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
var_list[2].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PHSEN' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/06-PHSEND000/recovered_inst/phsen_abcdef_instrument'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'phsen_abcdef_ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'PCO2W' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD35/05-PCO2WB000/recovered_inst/pco2w_abc_instrument'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/05-PARADK000/recovered_wfp/parad_k__stc_imodem_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_k_par'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE04OSSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE07SHSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE09OSSM' and node == 'NSIF' and instrument_class == 'NUTNR' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/RID26/07-NUTNRB000/recovered_inst/suna_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'nitrate_concentration'
var_list[2].name = 'salinity_corrected_nitrate'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHSM' and node == 'BUOY' and instrument_class == 'FDCHP' and method == 'RecoveredInst':
uframe_dataset_name = 'CE02SHSM/SBD12/08-FDCHPA000/recovered_inst/fdchp_a_instrument_recovered'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE01ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/SBD17/06-FLORTD000/recovered_inst/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE06ISSM' and node == 'BUOY' and instrument_class == 'FLORT' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/SBD17/06-FLORTD000/recovered_inst/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/04-FLORTK000/recovered_wfp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE09OSPM' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredWFP':
uframe_dataset_name = 'CE09OSPM/WFP01/02-DOFSTK000/recovered_wfp/dofst_k_wfp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dofst_k_oxygen_l2'
var_list[2].name = 'dofst_k_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'Hz'
var_list[3].units = 'dbar'
elif platform_name == 'CE01ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/RID16/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'NSIF' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/RID16/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE07SHSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE07SHSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE09OSSM' and node == 'MFN' and instrument_class == 'DOSTA' and method == 'RecoveredInst':
uframe_dataset_name = 'CE09OSSM/MFD37/03-DOSTAD000/recovered_inst/dosta_abcdjm_ctdbp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[3].name = 'ctdbp_seawater_temperature'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'degC'
elif platform_name == 'CE01ISSM' and node == 'MFN' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredInst':
uframe_dataset_name = 'CE01ISSM/MFD35/04-ADCPTM000/recovered_inst/adcpt_m_instrument_log9_recovered'
var_list[0].name = 'time'
var_list[1].name = 'significant_wave_height'
var_list[2].name = 'peak_wave_period'
var_list[3].name = 'peak_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'seconds'
var_list[3].units = 'degrees'
elif platform_name == 'CE06ISSM' and node == 'MFN' and instrument_class == 'WAVSS_Stats' and method == 'RecoveredInst':
uframe_dataset_name = 'CE06ISSM/MFD35/04-ADCPTM000/recovered_inst/adcpt_m_instrument_log9_recovered'
var_list[0].name = 'time'
var_list[1].name = 'significant_wave_height'
var_list[2].name = 'peak_wave_period'
var_list[3].name = 'peak_wave_direction'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'seconds'
var_list[3].units = 'degrees'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/06-CTDBPN106/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_no_seawater_pressure'
var_list[5].name = 'ctdbp_no_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'CTD' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/06-CTDBPO108/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_temperature'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'density'
var_list[4].name = 'ctdbp_no_seawater_pressure'
var_list[5].name = 'ctdbp_no_seawater_conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/06-CTDBPN106/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'DOSTA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/06-CTDBPO108/streamed/ctdbp_no_sample'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'ctd_tc_oxygen'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/10-PHSEND103/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'PHSEN' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/10-PHSEND107/streamed/phsen_data_record'
var_list[0].name = 'time'
var_list[1].name = 'phsen_thermistor_temperature'
var_list[2].name = 'ph_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/09-PCO2WB103/streamed/pco2w_b_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'PCO2W' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/09-PCO2WB104/streamed/pco2w_b_sami_data_record'
var_list[0].name = 'time'
var_list[1].name = 'pco2w_thermistor_temperature'
var_list[2].name = 'pco2_seawater'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'uatm'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'ADCP' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/05-ADCPTB104/streamed/adcp_velocity_beam'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'ADCP' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/05-ADCPSI103/streamed/adcp_velocity_beam'
var_list[0].name = 'time'
var_list[1].name = 'bin_depths'
var_list[2].name = 'heading'
var_list[3].name = 'pitch'
var_list[4].name = 'roll'
var_list[5].name = 'eastward_seawater_velocity'
var_list[6].name = 'northward_seawater_velocity'
var_list[7].name = 'upward_seawater_velocity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'meters'
var_list[2].units = 'deci-degrees'
var_list[3].units = 'deci-degrees'
var_list[4].units = 'deci-degrees'
var_list[5].units = 'm/s'
var_list[6].units = 'm/s'
var_list[7].units = 'm/s'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'VEL3D' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/07-VEL3DC108/streamed/vel3d_cd_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'VEL3D' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/07-VEL3DC107/streamed/vel3d_cd_velocity_data'
var_list[0].name = 'time'
var_list[1].name = 'vel3d_c_eastward_turbulent_velocity'
var_list[2].name = 'vel3d_c_northward_turbulent_velocity'
var_list[3].name = 'vel3d_c_upward_turbulent_velocity'
var_list[4].name = 'seawater_pressure_mbar'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = '0.001dbar'
elif platform_name == 'CE02SHBP' and node == 'BEP' and instrument_class == 'OPTAA' and method == 'Streamed':
uframe_dataset_name = 'CE02SHBP/LJ01D/08-OPTAAD106/streamed/optaa_sample'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
elif platform_name == 'CE04OSBP' and node == 'BEP' and instrument_class == 'OPTAA' and method == 'Streamed':
uframe_dataset_name = 'CE04OSBP/LJ01C/08-OPTAAC104/streamed/optaa_sample'
var_list[0].name = 'time'
var_list[0].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
#CSPP Data below
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/08-FLORTJ000/telemetered/flort_dj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/08-FLORTJ000/telemetered/flort_dj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/08-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/02-DOSTAJ000/telemetered/dosta_abcdjm_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/02-DOSTAJ000/telemetered/dosta_abcdjm_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/02-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/09-CTDPFJ000/telemetered/ctdpf_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/09-CTDPFJ000/telemetered/ctdpf_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/09-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/10-PARADJ000/telemetered/parad_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/10-PARADJ000/telemetered/parad_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/10-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/06-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/07-SPKIRJ000/telemetered/spkir_abj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/07-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/07-SPKIRJ000/telemetered/spkir_abj_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/07-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE01ISSP/SP001/05-VELPTJ000/telemetered/velpt_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/05-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'Telemetered':
uframe_dataset_name = 'CE06ISSP/SP001/05-VELPTJ000/telemetered/velpt_j_cspp_instrument'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/05-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE01ISSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE01ISSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE06ISSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE06ISSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/07-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'FLORT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/07-FLORTJ000/recovered_cspp/flort_sample'
var_list[0].name = 'time'
var_list[1].name = 'seawater_scattering_coefficient'
var_list[2].name = 'fluorometric_chlorophyll_a'
var_list[3].name = 'fluorometric_cdom'
var_list[4].name = 'total_volume_scattering_coefficient'
var_list[5].name = 'optical_backscatter'
var_list[6].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm-1'
var_list[2].units = 'ug/L'
var_list[3].units = 'ppb'
var_list[4].units = 'm-1 sr-1'
var_list[5].units = 'm-1'
var_list[6].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/01-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'DOSTA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/01-DOSTAJ000/recovered_cspp/dosta_abcdjm_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'dissolved_oxygen'
var_list[2].name = 'estimated_oxygen_concentration'
var_list[3].name = 'dosta_abcdjm_cspp_tc_oxygen'
var_list[4].name = 'optode_temperature'
var_list[5].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/kg'
var_list[2].units = 'umol/L'
var_list[3].units = 'umol/L'
var_list[4].units = 'degC'
var_list[5].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/08-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'CTD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/08-CTDPFJ000/recovered_cspp/ctdpf_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'temperature'
var_list[2].name = 'salinity'
var_list[3].name = 'density'
var_list[4].name = 'pressure'
var_list[5].name = 'conductivity'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/09-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'PARAD' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/09-PARADJ000/recovered_cspp/parad_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'parad_j_par_counts_output'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol photons m-2 s-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/05-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'NUTNR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/05-NUTNRJ000/recovered_cspp/nutnr_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'salinity_corrected_nitrate'
var_list[2].name = 'nitrate_concentration'
var_list[3].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/L'
var_list[3].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/06-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'SPKIR' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/06-SPKIRJ000/recovered_cspp/spkir_abj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'spkir_abj_cspp_downwelling_vector'
var_list[2].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'uW cm-2 nm-1'
var_list[2].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/02-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'VELPT' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/02-VELPTJ000/recovered_cspp/velpt_j_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'velpt_j_eastward_velocity'
var_list[2].name = 'velpt_j_northward_velocity'
var_list[3].name = 'velpt_j_upward_velocity'
var_list[4].name = 'heading'
var_list[5].name = 'roll'
var_list[6].name = 'pitch'
var_list[7].name = 'temperature'
var_list[8].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[8].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'm/s'
var_list[2].units = 'm/s'
var_list[3].units = 'm/s'
var_list[4].units = 'degrees'
var_list[5].units = 'degrees'
var_list[6].units = 'degrees'
var_list[7].units = 'degC'
var_list[8].units = 'dbar'
elif platform_name == 'CE02SHSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE02SHSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CE07SHSP' and node == 'PROFILER' and instrument_class == 'OPTAA' and method == 'RecoveredCSPP':
uframe_dataset_name = 'CE07SHSP/SP001/04-OPTAAJ000/recovered_cspp/optaa_dj_cspp_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'int_ctd_pressure'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'dbar'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/05-CTDGVM000/telemetered/ctdgv_m_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'CTD' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL247/05-CTDGVM000/recovered_host/ctdgv_m_glider_instrument_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_water_temp'
var_list[2].name = 'practical_salinity'
var_list[3].name = 'sci_seawater_density'
var_list[4].name = 'sci_water_pressure_dbar'
var_list[5].name = 'sci_water_cond'
var_list[6].name = 'lat'
var_list[7].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[6].data = np.array([])
var_list[7].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'degC'
var_list[2].units = 'unitless'
var_list[3].units = 'kg/m3'
var_list[4].units = 'dbar'
var_list[5].units = 'S/m'
var_list[6].units = 'degree_north'
var_list[7].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL386/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL386' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL386/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL384/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL384' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL384/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL383/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL383' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL383/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL382/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL382' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL382/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL381/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL381' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL381/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL327/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL327' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL327/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL326/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL326' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL326/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL320/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL320' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL320/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL319/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL319' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL319/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL312/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL312' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL312/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL311/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL311' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'RecoveredHost':
uframe_dataset_name = 'CE05MOAS/GL311/04-DOSTAM000/recovered_host/dosta_abcdjm_glider_recovered'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = np.array([])
var_list[2].data = np.array([])
var_list[3].data = np.array([])
var_list[4].data = np.array([])
var_list[5].data = np.array([])
var_list[0].units = 'seconds since 1900-01-01'
var_list[1].units = 'umol/L'
var_list[2].units = 'umol/kg'
var_list[3].units = 'dbar'
var_list[4].units = 'degree_north'
var_list[5].units = 'degree_east'
elif platform_name == 'CEGL247' and node == 'GLIDER' and instrument_class == 'DOSTA' and method == 'Telemetered':
uframe_dataset_name = 'CE05MOAS/GL247/04-DOSTAM000/telemetered/dosta_abcdjm_glider_instrument'
var_list[0].name = 'time'
var_list[1].name = 'sci_oxy4_oxygen'
var_list[2].name = 'sci_abs_oxygen'
var_list[3].name = 'int_ctd_pressure'
var_list[4].name = 'lat'
var_list[5].name = 'lon'
var_list[0].data = np.array([])
var_list[1].data = | np.array([]) | numpy.array |
"""
Code modified from modAL project: https://github.com/modAL-python/modAL
Uncertainty measures and uncertainty based sampling strategies for the active learning models.
"""
from typing import Tuple, Union, Callable, List
import numpy as np
from scipy.stats import entropy
import tensorflow as tf
from tensorflow.keras import Model
from sklearn.metrics.pairwise import euclidean_distances
import lightgbm as lgb
from sklearn.model_selection import train_test_split
from model import create_dnn, create_dnn2
def shuffled_argmax(values: np.ndarray, n_instances: int = 1) -> np.ndarray:
"""
Shuffles the values and sorts them afterwards. This can be used to break
the tie when the highest utility score is not unique. The shuffle randomizes
order, which is preserved by the mergesort algorithm.
Args:
values: Contains the values to be selected from.
n_instances: Specifies how many indices to return.
Returns:
The indices of the n_instances largest values.
"""
assert n_instances <= values.shape[0], 'n_instances must be less or equal than the size of utility'
# shuffling indices and corresponding values
shuffled_idx = np.random.permutation(len(values))
shuffled_values = values[shuffled_idx]
# getting the n_instances best instance
# since mergesort is used, the shuffled order is preserved
sorted_query_idx = np.argsort(shuffled_values, kind='mergesort')[len(shuffled_values)-n_instances:]
# inverting the shuffle
query_idx = shuffled_idx[sorted_query_idx]
return query_idx
def multi_argmax(values: np.ndarray, n_instances: int = 1) -> np.ndarray:
"""
Selects the indices of the n_instances highest values.
Args:
values: Contains the values to be selected from.
n_instances: Specifies how many indices to return.
Returns:
The indices of the n_instances largest values.
"""
assert n_instances <= values.shape[0], 'n_instances must be less or equal than the size of utility'
max_idx = np.argpartition(-values, n_instances-1, axis=0)[:n_instances]
return max_idx
def classifier_entropy(classifier: Model, X: np.ndarray, y: np.ndarray, binary_labels:bool = True, dual=False) -> np.ndarray:
"""
Entropy of predictions of the for the provided samples.
Args:
classifier: The classifier for which the prediction entropy is to be measured.
X: The samples for which the prediction entropy is to be measured.
Returns:
Entropy of the class probabilities.
"""
if dual:
classwise_uncertainty = classifier(X,y).reshape(-1, 1)
classwise_uncertainty = | np.hstack((1-classwise_uncertainty, classwise_uncertainty)) | numpy.hstack |
import os
import sys
import numpy as np
import pandas as pd
import time
import scipy.sparse
import scipy.sparse.linalg
from scipy import stats
from scipy.optimize import minimize
np.set_printoptions(threshold=sys.maxsize)
# Add lib to the python path.
from genTestDat import genTestData2D, prodMats2D
from est2d import *
from est3d import *
from npMatrix2d import *
from npMatrix3d import *
# ==================================================================================
#
# The below code runs multiple simulations in serial. It takes the following inputs:
#
# ----------------------------------------------------------------------------------
#
# - desInd: Integer value between 1 and 3 representing which design to run. The
# designs are as follows:
# - Design 1: nlevels=[50], nraneffs=[2]
# - Design 2: nlevels=[50,10], nraneffs=[3,2]
# - Design 3: nlevels=[100,50,10], nraneffs=[4,3,2]
# - OutDir: The output directory.
# - nsim: Number of simulations (default=1000)
# - mode: String indicating whether to run parameter estimation simulations (mode=
# 'param') or T statistic simulations (mode='Tstat').
# - REML: Boolean indicating whether to use ML or ReML estimation.
#
# ----------------------------------------------------------------------------------
#
# Author: <NAME> (06/04/2020)
#
# ==================================================================================
def sim2D(desInd, OutDir, nsim=1000, mode='param', REML=False):
# Loop through and run simulations
for simInd in range(1,nsim+1):
runSim(simInd, desInd, OutDir, mode, REML)
# ==================================================================================
#
# The below simulates random test data and runs all methods described in the LMM
# paper on the simulated data. It requires the following inputs:
#
# ----------------------------------------------------------------------------------
#
# - SimInd: An index to represent the simulation. All output for this simulation will
# be saved in files with the index specified by this argument. The
# simulation with index 1 will also perform any necessary additional setup
# and should therefore be run before any others.
# - desInd: Integer value between 1 and 3 representing which design to run. The
# designs are as follows:
# - Design 1: nlevels=[50], nraneffs=[2]
# - Design 2: nlevels=[50,10], nraneffs=[3,2]
# - Design 3: nlevels=[100,50,10], nraneffs=[4,3,2]
# - OutDir: The output directory.
# - mode: String indicating whether to run parameter estimation simulations (mode=
# 'param') or T statistic simulations (mode='Tstat').
# - REML: Boolean indicating whether to use ML or ReML estimation.
#
# ----------------------------------------------------------------------------------
#
# Author: <NAME> (06/04/2020)
#
# ==================================================================================
def runSim(simInd, desInd, OutDir, mode='param', REML=False):
# Make sure simInd is an int
simInd = int(simInd)
#===============================================================================
# Setup
#===============================================================================
# Decide whether we wish to run T statistics/degrees of freedom estimation
if mode=='param':
runDF = False
else:
runDF = True
# Different designs
if desInd==1:
nlevels = np.array([50])
nraneffs = np.array([2])
if desInd==2:
nlevels = np.array([50,25])
nraneffs = np.array([3,2])
if desInd==3:
nlevels = np.array([100,30,10])
nraneffs = np.array([4,3,2])
# Number of observations
n = 1000
# If we are doing a degrees of freedom simulation, create the factor vectors, X and Z if
# this is the first run. These will then be used across all following simulations. If we
# are doing a simulation to look at parameter estimation, we recreate the design on every
# run as our focus is to stress test the performance of the algorithms, rather than compare
# performance of one specific model in particular.
if simInd == 1 or not runDF:
# Delete any factor vectors from a previous batch of simulations.
if runDF:
for i in range(len(nlevels)):
if os.path.isfile(os.path.join(OutDir, 'fv_' + str(desInd) + '_' + str(i) + '.csv')):
os.remove(os.path.join(OutDir, 'fv_' + str(desInd) + '_' + str(i) + '.csv'))
fvs = None
X = None
Z = None
# Otherwise read the factor vectors, X and Z in from file.
else:
# Initialize empty factor vectors dict
fvs = dict()
# Loop through factors and save factor vectors
for i in range(len(nlevels)):
fvs[i] = pd.io.parsers.read_csv(os.path.join(OutDir, 'fv_' + str(desInd) + '_' + str(i) + '.csv'), header=None).values
X = pd.io.parsers.read_csv(os.path.join(OutDir, 'X_' + str(desInd) + '.csv'), header=None).values
Z = pd.io.parsers.read_csv(os.path.join(OutDir, 'Z_' + str(desInd) + '.csv'), header=None).values
# Generate test data
Y,X,Z,nlevels,nraneffs,beta,sigma2,b,D, fvs = genTestData2D(n=n, p=5, nlevels=nlevels, nraneffs=nraneffs, save=True, simInd=simInd, desInd=desInd, OutDir=OutDir, factorVectors=fvs, X=X, Z=Z)
# Save the new factor vectors if this is the first run.
if simInd == 1 and runDF:
# Loop through the factors saving them
for i in range(len(nlevels)):
pd.DataFrame(fvs[i]).to_csv(os.path.join(OutDir, 'fv_' + str(desInd) + '_' + str(i) + '.csv'), index=False, header=None)
pd.DataFrame(X).to_csv(os.path.join(OutDir, 'X_' + str(desInd) + '.csv'), index=False, header=None)
pd.DataFrame(Z).to_csv(os.path.join(OutDir, 'Z_' + str(desInd) + '.csv'), index=False, header=None)
# Work out number of observations, parameters, random effects, etc
n = X.shape[0]
p = X.shape[1]
q = np.sum(nraneffs*nlevels)
qu = np.sum(nraneffs*(nraneffs+1)//2)
r = nlevels.shape[0]
# Tolerance
tol = 1e-6
# Work out factor indices.
facInds = np.cumsum(nraneffs*nlevels)
facInds = np.insert(facInds,0,0)
# Convert D to dict
Ddict=dict()
for k in np.arange(len(nlevels)):
Ddict[k] = D[facInds[k]:(facInds[k]+nraneffs[k]),facInds[k]:(facInds[k]+nraneffs[k])]
# Get the product matrices
XtX, XtY, XtZ, YtX, YtY, YtZ, ZtX, ZtY, ZtZ = prodMats2D(Y,Z,X)
# -----------------------------------------------------------------------------
# Create empty data frame for results:
# -----------------------------------------------------------------------------
# Row indices
indexVec = np.array(['Time', 'nit', 'llh'])
for i in np.arange(p):
indexVec = np.append(indexVec, 'beta'+str(i+1))
# Sigma2
indexVec = np.append(indexVec, 'sigma2')
# Dk
for k in np.arange(r):
for j in np.arange(nraneffs[k]*(nraneffs[k]+1)//2):
indexVec = np.append(indexVec, 'D'+str(k+1)+','+str(j+1))
# Sigma2*Dk
for k in | np.arange(r) | numpy.arange |
import numpy as np
import matplotlib
import matplotlib.pyplot as plt
from matplotlib.ticker import AutoMinorLocator, MultipleLocator, MaxNLocator
from matplotlib.path import Path
from matplotlib.patches import PathPatch
from matplotlib.colors import BoundaryNorm
import matplotlib.image as mpimg
import imageio
from analysis.mesure import *
from util.operations import field_operate as fo
from engines.phi.field._grid import CenteredGrid, StaggeredGrid
import pdb
import sys
def plot_field(field,
plot_type=['surface'],
options=[],
Lx=None, Ly=None, dx=None, dy=None,
lx='x', ly='y', lbar='field', ltitle='Plot Field',
save=False, filename='./field.png',
fig=None, ax=None):
'''Main function to plot vectorial and scalar fields (including masking fields).
USAGE:
-plots=[ [ 'plot_type', [ ['plot options' , xx], ['plot options' , xx] ]],
[ 'plot_type', [ ['plot options' , xx], ['plot options' , xx] ]]
]
plot_type: contourn, mask, surface, streamlines
general_plot_options:
-edges -> [ [edge_hl_x, edge_hl_y], [edge_hr_x, edge_hr_y], [edge_vb_x, edge_vb_y], [edge_vt_x, edge_vt_y] ]
-square -> [x1,x2,y1,y2]
-circle -> [x,y,D]
-aux_contourn -> True/False
-limits -> [min, max]
-vector_axis -> 0/1
-indeces -> True/False
-grid -> True/False
-zoom_position -> [x1,x2,y1,y2]
-full_zoom -> True/False
'''
#1.1.Staggered grid == Cells walls
cx = | np.arange(0, Lx +dx/2, dx) | numpy.arange |
import os
import re
import numpy as np
from sklearn import linear_model
from scipy import sparse
import collections
import codecs
import random
class HMM(object):
"""
HMM Model
"""
def __init__(self, dic, decode_type):
"""
Initialize the model.
"""
self.num_words = len(dic['word_to_id'])
self.num_tags = len(dic['tag_to_id'])
self.initial_prob = np.ones([self.num_tags])
self.transition_prob = np.ones([self.num_tags, self.num_tags])
self.emission_prob = np.ones([self.num_tags, self.num_words])
self.decode_type = decode_type
self.q = 0
# This is dummy code to create uniform probability distributions. Feel free to remove it.
self.initial_prob /= np.sum(self.initial_prob)
for i,p in enumerate(self.transition_prob):
p /= np.sum(p)
for i,p in enumerate(self.emission_prob):
p /= np.sum(p)
return
def train(self, corpus):
"""
TODO: Train a bigram HMM model using MLE estimates.
Update self.initial_prob, self.transition_prob and self.emission_prob appropriately.
corpus is a list of dictionaries of the form:
{'str_words': str_words, ### List of string words
'words': words, ### List of word IDs
'tags': tags} ### List of tag IDs
All three lists above have length equal to the sentence length for each instance.
"""
# BEGIN CODE
transition_counts = np.zeros([self.num_tags, self.num_tags]) #initialize matrix for matrix counts
emission_counts = np.zeros([self.num_tags, self.num_words]) #initialize matrix for emission counts
initial_counts = np.zeros([self.num_tags])
for sentence in corpus:
sentence_tags = sentence["tags"]
sentence_words = sentence["words"]
idx = 0
# Loop to count emission and transition
for t_tags, t_words in zip(sentence_tags, sentence_words):
emission_counts[t_tags][t_words] +=1 #add emission counts
if idx == 0:
initial_counts[t_tags] += 1
if idx > 0:
transition_counts[sentence_tags[idx - 1]][t_tags] += 1
idx +=1
self.initial_prob = (1/np.sum(initial_counts)) * initial_counts
emission_sum = np.sum(emission_counts, axis=1)
transition_sum = np.sum(transition_counts, axis=1)
for i in range(self.num_tags):
self.emission_prob[i] = (emission_counts[i]) / (emission_sum[i])
self.transition_prob[i] = (transition_counts[i]) / (transition_sum[i])
# END CODE
return
def greedy_decode(self, sentence):
"""
Decode a single sentence in Greedy fashion
Return a list of tags.
"""
tags = []
init_scores = [self.initial_prob[t] * self.emission_prob[t][sentence[0]] for t in range(self.num_tags)]
tags.append(np.argmax(init_scores))
for w in sentence[1:]:
scores = [self.transition_prob[tags[-1]][t] * self.emission_prob[t][w] for t in range(self.num_tags)]
tags.append(np.argmax(scores))
assert len(tags) == len(sentence)
return tags
def viterbi_decode(self, sentence):
"""
TODO: Decode a single sentence using the Viterbi algorithm.
Return a list of tags.
"""
tags = []
# BEGIN CODE
#Initial scores
init_scores = [self.initial_prob[t] * self.emission_prob[t][sentence[0]] for t in range(self.num_tags)]
#Initialize array to compute viterbi
len_sent = len(sentence)
viterb_arr = np.zeros([self.num_tags,len_sent])
back_tag_arr = np.zeros([self.num_tags,(len_sent-1)])
viterb_max_list = np.zeros(len(sentence))
for idx, w in enumerate(sentence):
# Initial probabilities
if idx == 0:
for t in range(self.num_tags):
viterb_arr[t][idx] = init_scores[t] * self.emission_prob[t][w]
else:
for t in range(self.num_tags):
possible_list = []
for p in range(self.num_tags):
possible_list.append(viterb_arr[p][idx-1] * self.transition_prob[p][t] * self.emission_prob[t][w])
viterb_arr[t][idx] = np.max(possible_list) #Get the maximum value
back_tag_arr[t][idx-1] = np.argmax(possible_list)
tags_revr = []
for idx in list(range(len_sent))[::-1]:
if idx == (len_sent - 1):
max_b = np.argmax(viterb_arr[:,idx])
tags_revr.append(max_b)
else:
max_b = back_tag_arr[int(max_b)][idx]
tags_revr.append(max_b)
tags = tags_revr[::-1]
# END CODE
assert len(tags) == len(sentence)
return tags
def tag(self, sentence):
"""
Tag a sentence using a trained HMM.
"""
if self.decode_type == 'viterbi':
return self.viterbi_decode(sentence)
else:
return self.greedy_decode(sentence)
#assuming the context window is 1
class FFN(object):
"""
Window-based feed forward neural network classifier
"""
def __init__(self, dic, embedding, hidden_size=15, window=2):
"""
Initialize the model.
"""
self.num_words = len(dic['word_to_id'])
self.num_tags = len(dic['tag_to_id'])
self.dic=dic
self.window = window
self.hidden_size = hidden_size
self.learning_rate = 0.15
self.eps = 0.0006
# This contains a dictionary of word embeddings {str_word -> embedding}
self.embedding=embedding
self.embedding_size = list(self.embedding.values())[0].shape[1]
# TODO: make sure to initialize these appropriately.
#INITIALZIED BELOW. I use he et al (2015) initalization!
np.random.seed(117)
self.w=np.random.randn(((2*self.window)+1) * self.embedding_size, self.hidden_size) * np.sqrt(2/( (((2*self.window)+1) * self.embedding_size) + self.hidden_size)) # weights for hidden layer
self.b1=np.random.rand(self.hidden_size) # bias for hidden layer
self.u = np.random.randn(self.hidden_size, 5) * np.sqrt(2/( self.hidden_size + 5)) # weights for output layer
self.b2 = np.random.rand(5) # bias for output layer
return
def make_windowed_data(self, sentence, tags):
"""
TODO: Convert a single sentence and corresponding tags into a batch of inputs and outputs to the FFN
"""
input_vector=np.zeros([len(sentence), (2*self.window+1) * self.embedding_size])
output_vector=np.zeros([len(sentence), self.num_tags])
#BEGIN CODE
len_sent = len(sentence)
for idx, (w, t) in enumerate(zip(sentence, tags)):
output_vector[idx][t] = 1
#INPUT VECTOR
#Start token
for i in [1,2,3,4,5]:
k = i - 3
if (0 <= idx + k < len_sent):
key_string = (str(sentence[idx + k])).lower()
if key_string in self.embedding:
input_vector[idx][(i-1) * self.embedding_size: (i) * self.embedding_size] = self.embedding[key_string]
else:
input_vector[idx][(i-1) * self.embedding_size: (i) * self.embedding_size] = np.zeros(self.embedding_size)
else:
input_vector[idx][(i-1) * self.embedding_size: (i) * self.embedding_size] = np.zeros(self.embedding_size)
#OUTPUT vector
#END CODE
return input_vector,output_vector
def train(self, corpus):
"""
TODO: Train the FFN with stochastic gradient descent.
For each sentence in the corpus, convert it to a batch of inputs, compute the log loss and apply stochastic gradient descent on the parameters.
"""
# Useful functions
def sigmoid(x):
return 1/(1+np.exp(-x))
def sigmoid_derivative(x):
return sigmoid(x) *(1-sigmoid (x))
def softmax(A):
expA = np.exp(A)
return expA / expA.sum(axis=0, keepdims=True)
def stablesoftmax(A):
"""Compute the softmax of vector x in a numerically stable way."""
shiftA = A - np.max(A)
exps = np.exp(shiftA)
return exps / np.sum(exps)
eps = self.eps
# BEGIN CODE
step_size = self.learning_rate
#1. TODO: Initialize any useful variables here.
i =0
converge_count = 0
# FOR EACH EPOCH:
while i < 35 :
#FOR EACH sentence in CORPUS:
if converge_count < 2:
random.shuffle(corpus) #Randomize ordered
#if i % 1 ==0:
#print("ROUND",i)
i +=1
for k, sentence in enumerate(corpus):
str_words = sentence["str_words"]
sent_tags = sentence["tags"]
#2. TODO: Make windowed batch data
in_out_obj = self.make_windowed_data(str_words, sent_tags)
input_vector = in_out_obj[0]
output_vector = in_out_obj[1]
#2A create gradients for the entire sentences
grad_b2_sum = 0
grad_b1_sum = 0
grad_w_sum = 0
grad_u_sum = 0
len_in = len(input_vector)
#3. TODO: Do a forward pass through the network.
# loop in the sentence itself
for in_vec, out_vec in zip(input_vector, output_vector):
sig_input = np.matmul(in_vec, self.w) + self.b1
#print(sig_input.shape)
h_t = sigmoid(sig_input)
#print(h_t.shape)
y_hat = softmax(np.matmul(h_t, self.u) + self.b2)
#rint(y_hat.shape)
#4. TODO: Do a backward pass through the network to compute required gradients.
dJ_dB = y_hat - out_vec
dJ_dh = np.matmul(dJ_dB, np.transpose(self.u))
dJ_dA = np.multiply(sigmoid_derivative(sig_input), dJ_dh)
grad_b1_sum += dJ_dA
grad_b2_sum += dJ_dB
grad_w_sum += np.outer(np.transpose(in_vec), dJ_dA) ##OBtain outer product
grad_u_sum += np.outer(np.transpose(h_t), (dJ_dB)) #outer product of 2 vectors
#if np.isnan(np.linalg.norm(grad_u_sum)) == False:
# print("ROUND")
# print(y_hat)
# print(out_vec)
# print(np.linalg.norm(grad_u_sum))
# print(np.linalg.norm(grad_b1_sum))
# print(np.linalg.norm(grad_b2_sum))
#5. TODO: Update the weights (self.w, self.b1, self.u, self.b2)s
self.b2 = self.b2 - (step_size * (grad_b2_sum/ len_in))
self.w = self.w - (step_size * (grad_w_sum/len_in))
self.b1 = self.b1 - (step_size * (grad_b1_sum/len_in))
self.u = self.u - (step_size * (grad_u_sum/len_in))
if np.all(np.absolute(grad_u_sum/len_in) < eps) and np.all(np.absolute(grad_b2_sum/len_in) < eps) and np.all(np.absolute(grad_w_sum/len_in) < eps) and np.all(np.absolute(grad_b1_sum/len_in) < eps):
self.b2 = self.b2 + (step_size * (grad_b2_sum/ len_in))
self.w = self.w + (step_size * (grad_w_sum/len_in))
self.b1 = self.b1 + (step_size * (grad_b1_sum/len_in))
self.u = self.u + (step_size * (grad_u_sum/len_in))
#print("CONVERGED!")
converge_count +=1
eps = eps * 0.9
if converge_count > 1:
break
else:
continue
break
else:
continue
break
# END CODE
return
def tag(self, sentence):
"""
TODO: Tag a sentence using a trained FFN model.
Since this model is not sequential (why?), you do not need to do greedy or viterbi decoding.
"""
tags = []
# Helper functions.
def sigmoid(x):
return 1/(1+np.exp(-x))
def sigmoid_derivative(x):
return sigmoid(x) *(1-sigmoid (x))
def softmax(A):
expA = | np.exp(A) | numpy.exp |
from contextlib import contextmanager
from copy import deepcopy
from functools import partial
import sys
import warnings
import numpy as np
from numpy.testing import assert_equal
import pytest
from numpy.testing import assert_allclose
from expyfun import ExperimentController, visual, _experiment_controller
from expyfun._experiment_controller import _get_dev_db
from expyfun._utils import (_TempDir, fake_button_press, _check_skip_backend,
fake_mouse_click, requires_opengl21,
_wait_secs as wait_secs, known_config_types,
_new_pyglet)
from expyfun._sound_controllers._sound_controller import _SOUND_CARD_KEYS
from expyfun.stimuli import get_tdt_rates
std_args = ['test'] # experiment name
std_kwargs = dict(output_dir=None, full_screen=False, window_size=(8, 8),
participant='foo', session='01', stim_db=0.0, noise_db=0.0,
verbose=True, version='dev')
def dummy_print(string):
"""Print."""
print(string)
@pytest.mark.parametrize('ws', [(2, 1), (1, 1)])
def test_unit_conversions(hide_window, ws):
"""Test unit conversions."""
kwargs = deepcopy(std_kwargs)
kwargs['stim_fs'] = 44100
kwargs['window_size'] = ws
with ExperimentController(*std_args, **kwargs) as ec:
verts = np.random.rand(2, 4)
for to in ['norm', 'pix', 'deg', 'cm']:
for fro in ['norm', 'pix', 'deg', 'cm']:
v2 = ec._convert_units(verts, fro, to)
v2 = ec._convert_units(v2, to, fro)
assert_allclose(verts, v2)
# test that degrees yield equiv. pixels in both directions
verts = np.ones((2, 1))
v0 = ec._convert_units(verts, 'deg', 'pix')
verts = np.zeros((2, 1))
v1 = ec._convert_units(verts, 'deg', 'pix')
v2 = v0 - v1 # must check deviation from zero position
assert_allclose(v2[0], v2[1])
pytest.raises(ValueError, ec._convert_units, verts, 'deg', 'nothing')
pytest.raises(RuntimeError, ec._convert_units, verts[0], 'deg', 'pix')
def test_validate_audio(hide_window):
"""Test that validate_audio can pass through samples."""
with ExperimentController(*std_args, suppress_resamp=True,
**std_kwargs) as ec:
ec.set_stim_db(_get_dev_db(ec.audio_type) - 40) # 0.01 RMS
assert ec._stim_scaler == 1.
for shape in ((1000,), (1, 1000), (2, 1000)):
samples_in = np.zeros(shape)
samples_out = ec._validate_audio(samples_in)
assert samples_out.shape == (1000, 2)
assert samples_out.dtype == np.float32
assert samples_out is not samples_in
for order in 'CF':
samples_in = np.zeros((2, 1000), dtype=np.float32, order=order)
samples_out = ec._validate_audio(samples_in)
assert samples_out.shape == samples_in.shape[::-1]
assert samples_out.dtype == np.float32
# ensure that we have not bade a copy, just a view
assert samples_out.base is samples_in
def test_data_line(hide_window):
"""Test writing of data lines."""
entries = [['foo'],
['bar', 'bar\tbar'],
['bar2', r'bar\tbar'],
['fb', None, -0.5]]
# this is what should be written to the file for each one
goal_vals = ['None', 'bar\\tbar', 'bar\\\\tbar', 'None']
assert_equal(len(entries), len(goal_vals))
temp_dir = _TempDir()
with std_kwargs_changed(output_dir=temp_dir):
with ExperimentController(*std_args, stim_fs=44100,
**std_kwargs) as ec:
for ent in entries:
ec.write_data_line(*ent)
fname = ec._data_file.name
with open(fname) as fid:
lines = fid.readlines()
# check the header
assert_equal(len(lines), len(entries) + 4) # header, colnames, flip, stop
assert_equal(lines[0][0], '#') # first line is a comment
for x in ['timestamp', 'event', 'value']: # second line is col header
assert (x in lines[1])
assert ('flip' in lines[2]) # ec.__init__ ends with a flip
assert ('stop' in lines[-1]) # last line is stop (from __exit__)
outs = lines[1].strip().split('\t')
assert (all(l1 == l2 for l1, l2 in zip(outs, ['timestamp',
'event', 'value'])))
# check the entries
ts = []
for line, ent, gv in zip(lines[3:], entries, goal_vals):
outs = line.strip().split('\t')
assert_equal(len(outs), 3)
# check timestamping
if len(ent) == 3 and ent[2] is not None:
assert_equal(outs[0], str(ent[2]))
else:
ts.append(float(outs[0]))
# check events
| assert_equal(outs[1], ent[0]) | numpy.testing.assert_equal |
#!/usr/bin/env python3
import sys, getopt, math, os, time, traceback
import numpy as np
import mdtraj as mdt
import parmed as pmd
################################# Arguments ###################################
# Default parameters
traj_file = ''
psf_file = ''
native_AA_pdb = ''
num_samples = 5
n_boot = int(1e4)
fasta_seq = ''
disordered_pdb = ''
# This is the trimer interface region should be removed from propensity estimation
offset_sel = ''
agg_prop_file = 'agg_pro.dat'
deg_sel = ':ILE,VAL,LEU,PHE,CYS,MET,ALA,GLY,TRP'
# read control file
ctrlfile = ''
if len(sys.argv) == 1:
print(usage)
sys.exit()
try:
opts, args = getopt.getopt(sys.argv[1:],"hf:", ["ctrlfile="])
except getopt.GetoptError:
print(usage)
sys.exit()
for opt, arg in opts:
if opt == '-h':
print(usage)
sys.exit()
elif opt in ("-f", "--ctrlfile"):
ctrlfile = arg
if not os.path.exists(ctrlfile):
print('Error: cannot find control file ' + ctrlfile + '.')
sys.exit()
file_object = open(ctrlfile,'r')
try:
for line in file_object:
line = line.strip()
if not line:
# This is a blank line
continue
if line.startswith('#'):
# This is a comment line
continue
if line.startswith('traj_file'):
words = line.split('=')
traj_file = words[1].strip()
continue
if line.startswith('psf_file'):
words = line.split('=')
psf_file = words[1].strip()
continue
if line.startswith('native_AA_pdb'):
words = line.split('=')
native_AA_pdb = words[1].strip()
continue
if line.startswith('num_samples'):
words = line.split('=')
num_samples = int(words[1].strip())
continue
if line.startswith('n_boot'):
words = line.split('=')
n_boot = int(words[1].strip())
continue
if line.startswith('fasta_seq'):
words = line.split('=')
fasta_seq = words[1].strip()
continue
if line.startswith('disordered_pdb'):
words = line.split('=')
disordered_pdb = words[1].strip()
continue
if line.startswith('offset_sel'):
words = line.split('=')
offset_sel = words[1].strip()
continue
if line.startswith('agg_prop_file'):
words = line.split('=')
agg_prop_file = words[1].strip()
continue
if line.startswith('deg_sel'):
words = line.split('=')
deg_sel = words[1].strip()
continue
finally:
file_object.close()
################################# Functions ###################################
def bootstrap(boot_fun, data, n_time):
idx_list = np.arange(len(data))
if len(data.shape) == 1:
boot_stat = np.zeros(n_time)
elif len(data.shape) == 2:
boot_stat = np.zeros((n_time, data.shape[1]))
else:
print('bootstrap: Can only handle 1 or 2 dimentional data')
sys.exit()
for i in range(n_time):
sample_idx_list = np.random.choice(idx_list, len(idx_list))
if len(data.shape) == 1:
new_data = data[sample_idx_list]
boot_stat[i] = boot_fun(new_data)
elif len(data.shape) == 2:
new_data = data[sample_idx_list, :]
for j in range(data.shape[1]):
boot_stat[i,j] = boot_fun(new_data[:,j])
return boot_stat
################################## MAIN #######################################
struct = pmd.load_file(psf_file)
ext_struct = pmd.load_file(disordered_pdb)
for idx, res in enumerate(ext_struct.residues):
struct.residues[idx].name = res.name
# Estimate Aggregation site
agg_sel = ':'
f = open('agg_pro.dat')
lines = f.readlines()
f.close()
for li, line in enumerate(lines):
if line.startswith('HITS'):
break
for line in lines[li+1:]:
line = line.strip()
if line.startswith('>--->'):
agg_sel += line.split(':')[1]
break
agg_sel += offset_sel
print('Agg_sel: %s'%agg_sel)
agg_idx = list(pmd.amber.AmberMask(struct, agg_sel).Selected())
# Estimate Degradation site
deg_sel += offset_sel
print('Deg_sel: %s'%deg_sel)
deg_idx = list(pmd.amber.AmberMask(struct, deg_sel).Selected())
# Estimate Chaperone binding site
cb_sel = ':'
lines = os.popen('ChaperISM.py '+fasta_seq+' -qt').readlines()
for li, line in enumerate(lines):
if line.startswith(' POSITION '):
break
cb_site_list = []
for line in lines[li+2:len(lines)-1]:
words = line.strip().split()
if words[-1] == '*':
#cb_site_list += list(np.arange(int(words[0])+1, int(words[0])+8))
cb_site_list.append(int(words[0])+4)
cb_site_list = np.unique(cb_site_list)
cb_site_list = [str(c) for c in cb_site_list]
cb_sel += ','.join(cb_site_list)
cb_sel += offset_sel
print('CB_sel: %s'%cb_sel)
cb_idx = list(pmd.amber.AmberMask(struct, cb_sel).Selected())
# Calculate SASA
traj = mdt.load(traj_file, top=psf_file)
SASA_list = []
for frame_id in range(traj.n_frames):
#for frame_id in range(5):
SASA_0 = []
state_id = int(frame_id / num_samples)
rep_id = frame_id - state_id * num_samples
os.system('mkdir tmp')
os.system('cp '+native_AA_pdb+' tmp/')
native_AA_name = native_AA_pdb.strip().split('/')[-1]
os.chdir('tmp')
traj[frame_id].save('tmp.pdb', force_overwrite=True)
screen = os.popen('backmap.py -i '+native_AA_name+' -c tmp.pdb 2>&1').readlines()
if not os.path.exists('tmp_rebuilt.pdb'):
SASA_0 = [np.nan, np.nan, np.nan]
for s in screen:
print(s)
print('Failed to backmap')
else:
struct = mdt.load('tmp_rebuilt.pdb')
sasa = mdt.shrake_rupley(struct, mode='residue')[0]
agg_sasa = np.sum(sasa[agg_idx])
deg_sasa = np.sum(sasa[deg_idx])
cb_sasa = np.sum(sasa[cb_idx])
SASA_0 = [agg_sasa, deg_sasa, cb_sasa]
SASA_list.append(SASA_0)
os.chdir('../')
os.system('rm -rf tmp/')
print('Frame %d done'%frame_id)
struct = mdt.load(disordered_pdb)
sasa = mdt.shrake_rupley(struct, mode='residue')[0]
agg_sasa = np.sum(sasa[agg_idx])
deg_sasa = np.sum(sasa[deg_idx])
cb_sasa = np.sum(sasa[cb_idx])
SASA_list = [[agg_sasa, deg_sasa, cb_sasa]] + SASA_list
SASA_list = np.array(SASA_list)
n_states = int((len(SASA_list)-1) / num_samples)
SASA_avg = np.zeros((n_states+1, SASA_list.shape[1]))
SASA_std = np.zeros((n_states+1, SASA_list.shape[1]))
SASA_avg[0,:] = SASA_list[0,:]
SASA_std[0,:] = np.zeros(SASA_list.shape[1])
for i in range(n_states):
SASA_avg[i+1,:] = np.mean(SASA_list[i*num_samples+1:(i+1)*num_samples+1,:], axis=0)
SASA_std[i+1,:] = | np.std(SASA_list[i*num_samples+1:(i+1)*num_samples+1,:], axis=0) | numpy.std |
# Copyright (c) 2020 PaddlePaddle Authors. All Rights Reserve.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
import random
import numpy as np
import math
from PIL import Image
from ..registry import PIPELINES
from collections.abc import Sequence
@PIPELINES.register()
class Scale(object):
"""
Scale images.
Args:
short_size(float | int): Short size of an image will be scaled to the short_size.
"""
def __init__(self, short_size):
self.short_size = short_size
def __call__(self, results):
"""
Performs resize operations.
Args:
imgs (Sequence[PIL.Image]): List where each item is a PIL.Image.
For example, [PIL.Image0, PIL.Image1, PIL.Image2, ...]
return:
resized_imgs: List where each item is a PIL.Image after scaling.
"""
imgs = results['imgs']
resized_imgs = []
for i in range(len(imgs)):
img = imgs[i]
w, h = img.size
if (w <= h and w == self.short_size) or (h <= w
and h == self.short_size):
resized_imgs.append(img)
continue
if w < h:
ow = self.short_size
oh = int(self.short_size * 4.0 / 3.0)
resized_imgs.append(img.resize((ow, oh), Image.BILINEAR))
else:
oh = self.short_size
ow = int(self.short_size * 4.0 / 3.0)
resized_imgs.append(img.resize((ow, oh), Image.BILINEAR))
results['imgs'] = resized_imgs
return results
@PIPELINES.register()
class RandomCrop(object):
"""
Random crop images.
Args:
target_size(int): Random crop a square with the target_size from an image.
"""
def __init__(self, target_size):
self.target_size = target_size
def __call__(self, results):
"""
Performs random crop operations.
Args:
imgs: List where each item is a PIL.Image.
For example, [PIL.Image0, PIL.Image1, PIL.Image2, ...]
return:
crop_imgs: List where each item is a PIL.Image after random crop.
"""
imgs = results['imgs']
w, h = imgs[0].size
th, tw = self.target_size, self.target_size
assert (w >= self.target_size) and (h >= self.target_size), \
"image width({}) and height({}) should be larger than crop size".format(
w, h, self.target_size)
crop_images = []
x1 = random.randint(0, w - tw)
y1 = random.randint(0, h - th)
for img in imgs:
if w == tw and h == th:
crop_images.append(img)
else:
crop_images.append(img.crop((x1, y1, x1 + tw, y1 + th)))
results['imgs'] = crop_images
return results
@PIPELINES.register()
class CenterCrop(object):
"""
Center crop images.
Args:
target_size(int): Center crop a square with the target_size from an image.
"""
def __init__(self, target_size):
self.target_size = target_size
def __call__(self, results):
"""
Performs Center crop operations.
Args:
imgs: List where each item is a PIL.Image.
For example, [PIL.Image0, PIL.Image1, PIL.Image2, ...]
return:
ccrop_imgs: List where each item is a PIL.Image after Center crop.
"""
imgs = results['imgs']
ccrop_imgs = []
for img in imgs:
w, h = img.size
th, tw = self.target_size, self.target_size
assert (w >= self.target_size) and (h >= self.target_size), \
"image width({}) and height({}) should be larger than crop size".format(
w, h, self.target_size)
x1 = int(round((w - tw) / 2.))
y1 = int(round((h - th) / 2.))
ccrop_imgs.append(img.crop((x1, y1, x1 + tw, y1 + th)))
results['imgs'] = ccrop_imgs
return results
@PIPELINES.register()
class MultiScaleCrop(object):
def __init__(
self,
target_size, #NOTE: named target size now, but still pass short size in it!
scales=None,
max_distort=1,
fix_crop=True,
more_fix_crop=True):
self.target_size = target_size
self.scales = scales if scales else [1, .875, .75, .66]
self.max_distort = max_distort
self.fix_crop = fix_crop
self.more_fix_crop = more_fix_crop
def __call__(self, results):
"""
Performs MultiScaleCrop operations.
Args:
imgs: List where wach item is a PIL.Image.
XXX:
results:
"""
imgs = results['imgs']
input_size = [self.target_size, self.target_size]
im_size = imgs[0].size
# get random crop offset
def _sample_crop_size(im_size):
image_w, image_h = im_size[0], im_size[1]
base_size = min(image_w, image_h)
crop_sizes = [int(base_size * x) for x in self.scales]
crop_h = [
input_size[1] if abs(x - input_size[1]) < 3 else x
for x in crop_sizes
]
crop_w = [
input_size[0] if abs(x - input_size[0]) < 3 else x
for x in crop_sizes
]
pairs = []
for i, h in enumerate(crop_h):
for j, w in enumerate(crop_w):
if abs(i - j) <= self.max_distort:
pairs.append((w, h))
crop_pair = random.choice(pairs)
if not self.fix_crop:
w_offset = random.randint(0, image_w - crop_pair[0])
h_offset = random.randint(0, image_h - crop_pair[1])
else:
w_step = (image_w - crop_pair[0]) / 4
h_step = (image_h - crop_pair[1]) / 4
ret = list()
ret.append((0, 0)) # upper left
if w_step != 0:
ret.append((4 * w_step, 0)) # upper right
if h_step != 0:
ret.append((0, 4 * h_step)) # lower left
if h_step != 0 and w_step != 0:
ret.append((4 * w_step, 4 * h_step)) # lower right
if h_step != 0 or w_step != 0:
ret.append((2 * w_step, 2 * h_step)) # center
if self.more_fix_crop:
ret.append((0, 2 * h_step)) # center left
ret.append((4 * w_step, 2 * h_step)) # center right
ret.append((2 * w_step, 4 * h_step)) # lower center
ret.append((2 * w_step, 0 * h_step)) # upper center
ret.append((1 * w_step, 1 * h_step)) # upper left quarter
ret.append((3 * w_step, 1 * h_step)) # upper right quarter
ret.append((1 * w_step, 3 * h_step)) # lower left quarter
ret.append((3 * w_step, 3 * h_step)) # lower righ quarter
w_offset, h_offset = random.choice(ret)
return crop_pair[0], crop_pair[1], w_offset, h_offset
crop_w, crop_h, offset_w, offset_h = _sample_crop_size(im_size)
crop_img_group = [
img.crop((offset_w, offset_h, offset_w + crop_w, offset_h + crop_h))
for img in imgs
]
ret_img_group = [
img.resize((input_size[0], input_size[1]), Image.BILINEAR)
for img in crop_img_group
]
results['imgs'] = ret_img_group
return results
@PIPELINES.register()
class RandomFlip(object):
"""
Random Flip images.
Args:
p(float): Random flip images with the probability p.
"""
def __init__(self, p=0.5):
self.p = p
def __call__(self, results):
"""
Performs random flip operations.
Args:
imgs: List where each item is a PIL.Image.
For example, [PIL.Image0, PIL.Image1, PIL.Image2, ...]
return:
flip_imgs: List where each item is a PIL.Image after random flip.
"""
imgs = results['imgs']
v = random.random()
if v < self.p:
results['imgs'] = [
img.transpose(Image.FLIP_LEFT_RIGHT) for img in imgs
]
else:
results['imgs'] = imgs
return results
@PIPELINES.register()
class Image2Array(object):
"""
transfer PIL.Image to Numpy array and transpose dimensions from 'dhwc' to 'dchw'.
Args:
transpose: whether to transpose or not, default False. True for tsn.
"""
def __init__(self, transpose=True):
self.transpose = transpose
def __call__(self, results):
"""
Performs Image to NumpyArray operations.
Args:
imgs: List where each item is a PIL.Image.
For example, [PIL.Image0, PIL.Image1, PIL.Image2, ...]
return:
np_imgs: Numpy array.
"""
imgs = results['imgs']
np_imgs = ( | np.stack(imgs) | numpy.stack |
from models.dss_layer import DSSConv2d, DSSInput, DSSInvolution, DSSAlign
import torch.nn as nn
import numpy as np
import torch
import random
########################################################################
# Base Network
# ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
class DSSNet(nn.Module):
def __init__(self, settings=[]):
super(DSSNet, self).__init__()
self.settings = settings
self.selected_out =[]
self.fhooks = []
self.split_half = True
self.layer_wise_DSS = False
self.selected_idx = 0
self.epoch_step = 0
self.is_aligned = False
# hooks
def search_DSS_cand(self):
DSS_cand = []
for m in self.modules():
if isinstance(m, DSSConv2d) or isinstance(m, DSSAlign):
DSS_cand.append(m)
self.DSS_cand = DSS_cand
print(DSS_cand)
return DSS_cand
def search_th_cand(self):
th_cand = []
for m in self.modules():
if isinstance(m, DSSConv2d) or isinstance(m, DSSAlign) or isinstance(m, DSSInput):
th_cand.append(m)
self.th_cand = th_cand
return th_cand
def reset_hook(self):
for fhook in self.fhooks:
fhook.remove()
self.fhooks.clear()
self.selected_out.clear()
def register_rand_hook(self):
for fhook in self.fhooks:
fhook.remove()
self.fhooks.clear()
self.selected_out.clear()
self.selected_idx = random.choices(range(len(self.DSS_cand)), weights=self.n_connections)[0]
# self.selected_idx = np.random.randint(low=0, high=len(self.DSS_cand), size=1)[0]
# print('register_rand_hook %d %d '%(self.selected_idx, len(self.fhooks)))
self.fhooks.append(self.DSS_cand[self.selected_idx].register_forward_hook(self.forward_hook(self.selected_idx)))
# In Sigma-Delta DSS loss is computed layer-wises
# Note: this module is compatible only for fully DSSConv2d network with DSSInput
def mask_scale_grad(self):
if self.layer_wise_DSS:
for idx, m in enumerate(self.th_cand):
if (idx==self.selected_idx):
m.th.requires_grad = True
else:
m.th.requires_grad = False
def reset_mask(self):
for m in self.modules():
if isinstance(m, DSSConv2d):
m.reset_mask()
def pre_process(self):
self.clip_scale()
# self.change_quantizer()
self.mask_scale_grad()
if self.epoch_step<self.settings.n_warpup_epochs:
self.reset_mask()
def register_all_hook(self):
for fhook in self.fhooks:
fhook.remove()
self.fhooks.clear()
self.selected_out.clear()
# print('register_all_hook %d '%(len(self.fhooks)))
for idx, module in enumerate(self.DSS_cand):
self.fhooks.append(module.register_forward_hook(self.forward_hook(idx)))
def forward_hook(self, selected_idx):
def hook(module, input, output):
# print('forward_hook idx:%d, numel %d'%(selected_idx, output.numel()))
dss_stats = module.get_exp(input)
self.selected_out.append([selected_idx, dss_stats])
return hook
def reset_mp(self):
for module in self.modules():
module.mp = []
module.mp_lft = []
def set_thr(self, scale_val, weight=[]):
self.scale = scale_val
if len(weight)==0 and isinstance(scale_val, list)==False:
for module in self.modules():
module.th.data = torch.tensor(scale_val)
# print(module.th.data>0)
module.th.requires_grad = (module.th.data>0).item()
elif len(scale_val)>1:
for idx, module in enumerate(self.modules):
module.th.data = torch.tensor(scale_val[idx])
module.th.requires_grad = module.th.data>0
else:
for module, weight_ in zip(self.modules, weight):
module.th.data = scale_val*weight_
module.th.requires_grad=True
module.th.requires_grad = module.th.data
def set_training(self, training):
for module in self.modules():
if isinstance(module, DSSConv2d) or isinstance(module, DSSInput):
module.training = training
def set_train_mode(self, flags):
def _set_requires_grad(module, atr_name, value):
if hasattr(module, atr_name) and getattr(module, atr_name)!=None:
getattr(module, atr_name).requires_grad = value
for module in self.modules():
_set_requires_grad(module, 'th', flags[0])
_set_requires_grad(module, 'm', flags[1])
_set_requires_grad(module, 'b', flags[2])
_set_requires_grad(module, 'w', flags[3])
def clip_scale(self):
for module in self.modules():
if isinstance(module, DSSConv2d) or isinstance(module, DSSInput):
quantizer = self.settings.quantizer
if quantizer in ['MG_divmul_lin', 'floor', 'ULSQ_muldiv_lin']:
module.th.data.clamp_(1.0/(self.settings.max_scale-self.settings.min_scale), None)
elif quantizer in ['MG_muldiv_lin','ULSQ_divmul_lin', 'floor_inv', 'nan']:
module.th.data.clamp_(None, self.settings.max_scale-self.settings.min_scale)
elif quantizer in ['MG_divmul_log', 'floor_log','ULSQ_divmul_log']:
module.th.data.clamp_(0, None)
elif quantizer in ['MG_muldiv_log','ULSQ_muldiv_log', 'floor_inv_log']:
module.th.data.clamp_(None, 0)
else:
print('Not implemented')
def get_scale(self):
scale = []
for module in self.modules():
if isinstance(module, DSSConv2d) or isinstance(module, DSSInput):
scale.append(module.get_scale().mean().data.item())
return | np.array(scale) | numpy.array |
# Copyright 2019 The TensorFlow Hub Authors. All Rights Reserved.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# ==============================================================================
"""Tests for module search utility."""
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from sklearn.neighbors import KNeighborsClassifier
from sklearn import metrics
import tensorflow.compat.v2 as tf
from tensorflow_hub.tools.module_search import utils
class TestUtils(tf.test.TestCase):
train_samples = 450
test_samples = 50
dim = 10
classes = 7
random_seed = 127
def test_compute_distance_matrix(self):
np.random.seed(seed=self.random_seed)
x_train = np.random.rand(self.train_samples, self.dim)
x_test = np.random.rand(self.test_samples, self.dim)
d = utils.compute_distance_matrix(x_train, x_test)
self.assertEqual(d.shape, (self.test_samples, self.train_samples))
for i in range(self.test_samples):
for j in range(self.train_samples):
d_ij = np.linalg.norm(x_train[j, :] - x_test[i, :])**2
self.assertAlmostEqual(d_ij, d[i, j], places=5)
def test_compute_distance_matrix_loo(self):
np.random.seed(seed=self.random_seed)
x_train = np.random.rand(self.train_samples, self.dim)
d = utils.compute_distance_matrix_loo(x_train)
self.assertEqual(d.shape, (self.train_samples, self.train_samples))
for i in range(self.train_samples):
for j in range(self.train_samples):
if i == j:
self.assertEqual(float("inf"), d[i, j])
else:
d_ij = np.linalg.norm(x_train[j, :] - x_train[i, :])**2
self.assertAlmostEqual(d_ij, d[i, j], places=5)
def knn_errorrate(self, k):
x_train = np.random.rand(self.train_samples, self.dim)
x_test = np.random.rand(self.test_samples, self.dim)
d = utils.compute_distance_matrix(x_train, x_test)
y_test = np.random.randint(self.classes, size=self.test_samples)
y_train = np.random.randint(self.classes, size=self.train_samples)
err = utils.knn_errorrate(d, y_train, y_test, k=k)
knn = KNeighborsClassifier(n_neighbors=k)
knn.fit(x_train, y_train)
y_pred = knn.predict(x_test)
acc = metrics.accuracy_score(y_test, y_pred)
self.assertAlmostEqual(1.0 - err, acc, places=5)
def test_knn_errorrate(self):
np.random.seed(seed=self.random_seed)
ks = [1, 3, 5]
for idx, val in enumerate(ks):
with self.subTest(i=idx):
self.knn_errorrate(val)
def knn_errorrate_loo(self, k):
x_train = | np.random.rand(self.train_samples, self.dim) | numpy.random.rand |
import pandas as pd
#drop unknow artist
import matplotlib as mpl
import matplotlib.pyplot as plt
log_dir ='logs/'
mpl.rcParams['figure.figsize'] = (22, 20)
dataset=pd.read_csv('/content/MultitaskPainting100k_Dataset_groundtruth/groundtruth_multiloss_train_header.csv')
# indexName=pf[pf['artist']=='Unknown photographer'].index
# pf.drop(indexName,inplace=True)
# grouped = pf.groupby(['artist']).size().reset_index(name='counts')
# p=grouped.sort_values('counts', ascending=False).head(50)
# top50=p['artist'].tolist()
# dataset=pd.DataFrame()
# for name,group in pf.groupby(['artist']):
# if name in top50:
# dataset=pd.concat([dataset,group],axis=0)
# dataset=dataset.reset_index()
import numpy as np
from collections import Counter
from sklearn.model_selection import StratifiedShuffleSplit
def generate_classdict(label):
counter = Counter(label)
class_num=len(counter)
class_list=list(counter.keys()) #?
class_dict={}
class_weight={}
total = len(label)
count=0
for name,num in counter.items():
class_dict[name]=count
class_weight[count]=(total/(num*class_num))
count+=1
return class_num,class_list,class_dict,class_weight
X=np.array(dataset['filename'].tolist())
y=np.array(dataset['style'].tolist())
Style_class_num,Style_class_list,Style_class_dict,Style_class_weight=generate_classdict(y)
y=np.array(dataset['genre'].tolist())
Objtype_class_num,Objtype_class_list,Objtype_class_dict,Objtype_class_weight=generate_classdict(y)
# y=np.array(dataset['Creation Date'].tolist())
# CreationDate_class_num,CreationDate_class_list,CreationDate_class_dict,CreationDate_class_weight=generate_classdict(y)
y=np.array(dataset['artist'].tolist())
Artist_class_num,Artist_class_list,Artist_class_dict,Artist_class_weight=generate_classdict(y)
sss = StratifiedShuffleSplit(n_splits=1, test_size=0.2, random_state=0)
print(sss.get_n_splits(X, y))
train_frame=pd.DataFrame()
test_frame=pd.DataFrame()
for train_index, test_index in sss.split(X, y):
train_frame=dataset.loc[train_index]
test_frame=dataset.loc[test_index]
from tensorflow.keras.preprocessing.image import load_img, img_to_array
from tensorflow.keras.utils import to_categorical
import tensorflow as tf
import numpy as np
path='/content/images/'
train_input_shape = (224,224)
batch_size=64
imgs_size=(64,224,224,3)
Artist_size=(batch_size,Artist_class_num)
Style_size=(batch_size,Style_class_num)
Objtype_size=(batch_size,Objtype_class_num)
# CreationDate_size=(batch_size,CreationDate_class_num)
def multi_task_Gen():
iter=train_frame.iterrows()
x_array=np.zeros(imgs_size)
y1_array=[]
y2_array=[]
y3_array=[]
y4_array=[]
count=0
while True:
if count>=batch_size:
x_array=np.asarray(x_array)
y1_array=np.asarray(y1_array)
y2_array=np.asarray(y2_array)
y3_array=np.asarray(y3_array)
# y4_array=np.asarray(y4_array)
# print(x_array.shape)
# print(y1_array.shape)
# print(y2_array.shape)
# print(y3_array.shape)
# print(np.array([y1_array,y2_array,y3_array]).shape)
yield x_array,{'Artist_output':y1_array,'Style_output':y2_array,'Objtype_output':y3_array}#,'CreationDate_output':y4_array
count=0
x_array=np.zeros(imgs_size)
y1_array=[]
y2_array=[]
y3_array=[]
# y4_array=[]
dataframe = next(iter)
# print()
#print(to_categorical(class_dict[dataframe[1]['Artist']],num_classes=n_class))
x_array[count]=(img_to_array(load_img(path+dataframe[1]['filename'],target_size=train_input_shape))*1./255)
#print(count)
y1_array.append(to_categorical(Artist_class_dict[dataframe[1]['artist']],num_classes=Artist_class_num))
y2_array.append(to_categorical(Style_class_dict[dataframe[1]['style']],num_classes=Style_class_num))
y3_array.append(to_categorical(Objtype_class_dict[dataframe[1]['genre']],num_classes=Objtype_class_num))
# y4_array.append(to_categorical(CreationDate_class_dict[dataframe[1]['Creation Date']],num_classes=CreationDate_class_num))
#print(dataframe[1]['Style'],'//',dataframe[1]['Object Type'])
count+=1
def multi_task_Gen_valid():
iter=test_frame.iterrows()
x_array= | np.zeros(imgs_size) | numpy.zeros |
import os
import numpy as np
import tensorflow as tf
################################# detection ####################################
def zero_padding(inputs, pad_1, pad_2):
pad_mat = np.array([[0, 0], [pad_1, pad_2], [pad_1, pad_2], [0, 0]])
return tf.pad(tensor=inputs, paddings=pad_mat)
def conv_bn(inputs, oc, ks, st, scope, training, rate=1):
with tf.compat.v1.variable_scope(scope):
if st == 1:
layer = tf.compat.v1.layers.conv2d(
inputs, oc, ks, strides=st, padding='SAME', use_bias=False,
dilation_rate=rate,
kernel_regularizer=tf.keras.regularizers.l2(0.5 * (1.0)),
kernel_initializer=tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution="uniform")
)
else:
pad_total = ks - 1
pad_1 = pad_total // 2
pad_2 = pad_total - pad_1
padded_inputs = zero_padding(inputs, pad_1, pad_2)
layer = tf.compat.v1.layers.conv2d(
padded_inputs, oc, ks, strides=st, padding='VALID', use_bias=False,
dilation_rate=rate,
kernel_regularizer=tf.keras.regularizers.l2(0.5 * (1.0)),
kernel_initializer=tf.compat.v1.keras.initializers.VarianceScaling(scale=1.0, mode="fan_avg", distribution="uniform")
)
layer = tf.compat.v1.layers.batch_normalization(layer, training=training)
return layer
def conv_bn_relu(inputs, oc, ks, st, scope, training, rate=1):
layer = conv_bn(inputs, oc, ks, st, scope, training, rate=rate)
layer = tf.nn.relu(layer)
return layer
def bottleneck(inputs, oc, st, scope, training, rate=1):
with tf.compat.v1.variable_scope(scope):
ic = inputs.get_shape().as_list()[-1]
if ic == oc:
if st == 1:
shortcut = inputs
else:
shortcut = \
tf.nn.max_pool2d(inputs, [1, st, st, 1], [1, st, st, 1], 'SAME')
else:
shortcut = conv_bn(inputs, oc, 1, st, 'shortcut', training)
residual = conv_bn_relu(inputs, oc//4, 1, 1, 'conv1', training)
residual = conv_bn_relu(residual, oc//4, 3, st, 'conv2', training, rate)
residual = conv_bn(residual, oc, 1, 1, 'conv3', training)
output = tf.nn.relu(shortcut + residual)
return output
def resnet50(inputs, scope, training):
with tf.compat.v1.variable_scope(scope):
layer = conv_bn_relu(inputs, 64, 7, 2, 'conv1', training)
with tf.compat.v1.variable_scope('block1'):
for unit in range(2):
layer = bottleneck(layer, 256, 1, 'unit%d' % (unit+1), training)
layer = bottleneck(layer, 256, 2, 'unit3', training)
with tf.compat.v1.variable_scope('block2'):
for unit in range(4):
layer = bottleneck(layer, 512, 1, 'unit%d' % (unit+1), training, 2)
with tf.compat.v1.variable_scope('block3'):
for unit in range(6):
layer = bottleneck(layer, 1024, 1, 'unit%d' % (unit+1), training, 4)
layer = conv_bn_relu(layer, 256, 3, 1, 'squeeze', training)
return layer
def net_2d(features, training, scope, n_out):
with tf.compat.v1.variable_scope(scope):
layer = conv_bn_relu(features, 256, 3, 1, 'project', training)
with tf.compat.v1.variable_scope('prediction'):
hmap = tf.compat.v1.layers.conv2d(
layer, n_out, 1, strides=1, padding='SAME',
activation=tf.nn.sigmoid,
kernel_initializer=tf.compat.v1.initializers.truncated_normal(stddev=0.01)
)
return hmap
def net_3d(features, training, scope, n_out, need_norm):
with tf.compat.v1.variable_scope(scope):
layer = conv_bn_relu(features, 256, 3, 1, 'project', training)
with tf.compat.v1.variable_scope('prediction'):
dmap_raw = tf.compat.v1.layers.conv2d(
layer, n_out * 3, 1, strides=1, padding='SAME',
activation=None,
kernel_initializer=tf.compat.v1.initializers.truncated_normal(stddev=0.01)
)
if need_norm:
dmap_norm = tf.norm(tensor=dmap_raw, axis=-1, keepdims=True)
dmap = dmap_raw / tf.maximum(dmap_norm, 1e-6)
else:
dmap = dmap_raw
h, w = features.get_shape().as_list()[1:3]
dmap = tf.reshape(dmap, [-1, h, w, n_out, 3])
if need_norm:
return dmap, dmap_norm
return dmap
def tf_hmap_to_uv(hmap):
hmap_flat = tf.reshape(hmap, (tf.shape(input=hmap)[0], -1, tf.shape(input=hmap)[3]))
argmax = tf.argmax(input=hmap_flat, axis=1, output_type=tf.int32)
argmax_x = argmax // tf.shape(input=hmap)[2]
argmax_y = argmax % tf.shape(input=hmap)[2]
uv = tf.stack((argmax_x, argmax_y), axis=1)
uv = tf.transpose(a=uv, perm=[0, 2, 1])
return uv
def get_pose_tile(N):
pos_tile = tf.tile(
tf.constant(
np.expand_dims(
np.stack(
[
np.tile( | np.linspace(-1, 1, 32) | numpy.linspace |
"""Tests for :mod:`numpy.core.fromnumeric`."""
import numpy as np
A = np.array(True, ndmin=2, dtype=bool)
B = np.array(1.0, ndmin=2, dtype=np.float32)
A.setflags(write=False)
B.setflags(write=False)
a = np.bool_(True)
b = np.float32(1.0)
c = 1.0
np.take(a, 0)
np.take(b, 0)
np.take(c, 0)
np.take(A, 0)
np.take(B, 0)
np.take(A, [0])
np.take(B, [0])
np.reshape(a, 1)
| np.reshape(b, 1) | numpy.reshape |
import datetime
import mlir
import itertools
import pytest
import numpy as np
from mlir_graphblas import MlirJitEngine
from mlir_graphblas.engine import parse_mlir_functions
from mlir_graphblas.sparse_utils import MLIRSparseTensor
from mlir_graphblas.mlir_builder import MLIRFunctionBuilder
from mlir_graphblas.types import AliasMap, SparseEncodingType
from mlir_graphblas.functions import ConvertLayout
from mlir_graphblas.algorithms import (
triangle_count_combined,
dense_neural_network_combined,
)
from .jit_engine_test_utils import sparsify_array, GRAPHBLAS_PASSES
from typing import List, Callable
# TODO a lot of these tests take sums or reductions over an scf.for loop by storing into a memref
# It's better practice to use as demonstrated
# at https://mlir.llvm.org/docs/Dialects/SCFDialect/#scffor-mlirscfforop
@pytest.fixture(scope="module")
def engine():
jit_engine = MlirJitEngine()
jit_engine.add(
"""
#trait_densify_csr = {
indexing_maps = [
affine_map<(i,j) -> (i,j)>,
affine_map<(i,j) -> (i,j)>
],
iterator_types = ["parallel", "parallel"]
}
#CSR64 = #sparse_tensor.encoding<{
dimLevelType = [ "dense", "compressed" ],
dimOrdering = affine_map<(i,j) -> (i,j)>,
pointerBitWidth = 64,
indexBitWidth = 64
}>
func @csr_densify5x5(%argA: tensor<5x5xf64, #CSR64>) -> tensor<5x5xf64> {
%output_storage = constant dense<0.0> : tensor<5x5xf64>
%0 = linalg.generic #trait_densify_csr
ins(%argA: tensor<5x5xf64, #CSR64>)
outs(%output_storage: tensor<5x5xf64>) {
^bb(%A: f64, %x: f64):
linalg.yield %A : f64
} -> tensor<5x5xf64>
return %0 : tensor<5x5xf64>
}
func @csr_densify8x8(%argA: tensor<8x8xf64, #CSR64>) -> tensor<8x8xf64> {
%output_storage = constant dense<0.0> : tensor<8x8xf64>
%0 = linalg.generic #trait_densify_csr
ins(%argA: tensor<8x8xf64, #CSR64>)
outs(%output_storage: tensor<8x8xf64>) {
^bb(%A: f64, %x: f64):
linalg.yield %A : f64
} -> tensor<8x8xf64>
return %0 : tensor<8x8xf64>
}
#trait_densify_csc = {
indexing_maps = [
affine_map<(i,j) -> (j,i)>,
affine_map<(i,j) -> (i,j)>
],
iterator_types = ["parallel", "parallel"]
}
#CSC64 = #sparse_tensor.encoding<{
dimLevelType = [ "dense", "compressed" ],
dimOrdering = affine_map<(i,j) -> (j,i)>,
pointerBitWidth = 64,
indexBitWidth = 64
}>
func @csc_densify8x8(%argA: tensor<8x8xf64, #CSC64>) -> tensor<8x8xf64> {
%output_storage = constant dense<0.0> : tensor<8x8xf64>
%0 = linalg.generic #trait_densify_csc
ins(%argA: tensor<8x8xf64, #CSC64>)
outs(%output_storage: tensor<8x8xf64>) {
^bb(%A: f64, %x: f64):
linalg.yield %A : f64
} -> tensor<8x8xf64>
return %0 : tensor<8x8xf64>
}
""",
GRAPHBLAS_PASSES,
)
return jit_engine
@pytest.fixture(scope="module")
def aliases() -> AliasMap:
csr64 = SparseEncodingType(["dense", "compressed"], [0, 1], 64, 64)
csc64 = SparseEncodingType(["dense", "compressed"], [1, 0], 64, 64)
aliases = AliasMap()
aliases["CSR64"] = csr64
aliases["CSC64"] = csc64
return aliases
def test_ir_builder_convert_layout_wrapper(engine: MlirJitEngine, aliases: AliasMap):
# Build Function
convert_layout_function = ConvertLayout()
ir_builder = MLIRFunctionBuilder(
"convert_layout_wrapper",
input_types=["tensor<?x?xf64, #CSR64>"],
return_types=("tensor<?x?xf64, #CSC64>",),
aliases=aliases,
)
(input_var,) = ir_builder.inputs
convert_layout_result = ir_builder.call(convert_layout_function, input_var)
ir_builder.return_vars(convert_layout_result)
assert ir_builder.get_mlir()
# Test Compiled Function
convert_layout_wrapper_callable = ir_builder.compile(
engine=engine, passes=GRAPHBLAS_PASSES
)
indices = np.array(
[
[1, 2],
[4, 3],
],
dtype=np.uint64,
)
values = np.array([1.2, 4.3], dtype=np.float64)
sizes = np.array([8, 8], dtype=np.uint64)
sparsity = np.array([False, True], dtype=np.bool8)
input_tensor = MLIRSparseTensor(indices, values, sizes, sparsity)
dense_input_tensor = np.zeros([8, 8], dtype=np.float64)
dense_input_tensor[1, 2] = 1.2
dense_input_tensor[4, 3] = 4.3
assert np.isclose(dense_input_tensor, engine.csr_densify8x8(input_tensor)).all()
output_tensor = convert_layout_wrapper_callable(input_tensor)
assert np.isclose(dense_input_tensor, engine.csc_densify8x8(output_tensor)).all()
def test_ir_builder_triple_convert_layout(engine: MlirJitEngine, aliases: AliasMap):
# Build Function
ir_builder = MLIRFunctionBuilder(
"triple_convert_layout",
input_types=["tensor<?x?xf64, #CSR64>"],
return_types=["tensor<?x?xf64, #CSC64>"],
aliases=aliases,
)
(input_var,) = ir_builder.inputs
# Use different instances of Tranpose to ideally get exactly one convert_layout helper in the final MLIR text
inter1 = ir_builder.call(ConvertLayout("csc"), input_var)
inter2 = ir_builder.call(ConvertLayout("csr"), inter1)
return_var = ir_builder.call(ConvertLayout("csc"), inter2)
ir_builder.return_vars(return_var)
mlir_text = ir_builder.get_mlir_module()
ast = parse_mlir_functions(mlir_text, engine._cli)
# verify there are exactly two functions
functions = [
node
for node in engine._walk_module(ast)
if isinstance(node, mlir.astnodes.Function)
]
triple_convert_func = functions.pop(-1)
assert triple_convert_func.visibility == "public"
convert_layout_funcs = [
func for func in functions if func.name.value.startswith("convert_layout_to_cs")
]
assert len(convert_layout_funcs) == 2
# Test Compiled Function
triple_convert_layout_callable = ir_builder.compile(
engine=engine, passes=GRAPHBLAS_PASSES
)
indices = np.array(
[
[1, 2],
[4, 3],
],
dtype=np.uint64,
)
values = np.array([1.2, 4.3], dtype=np.float64)
sizes = np.array([8, 8], dtype=np.uint64)
sparsity = np.array([False, True], dtype=np.bool8)
input_tensor = MLIRSparseTensor(indices, values, sizes, sparsity)
dense_input_tensor = | np.zeros([8, 8], dtype=np.float64) | numpy.zeros |
#!/usr/bin/env python
# coding: utf8
#
# Copyright (c) 2021 Centre National d'Etudes Spatiales (CNES).
#
# This file is part of demcompare
# (see https://github.com/CNES/demcompare).
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
#
"""
This module contains functions associated to raster images.
"""
# Standard imports
import copy
import logging
import os
from typing import List, Tuple, Union
# Third party imports
import numpy as np
import pyproj
import rasterio
import rasterio.crs
import rasterio.mask
import rasterio.warp
import rasterio.windows
import xarray as xr
from astropy import units as u
from rasterio import Affine
from rasterio.warp import Resampling, reproject
from scipy import interpolate
from scipy.ndimage import filters
def read_image(path: str, band: int = 1) -> np.ndarray:
"""
Read image as array
:param path: path
:param band: numero of band to extract
:return: band array
"""
img_ds = rasterio.open(path)
data = img_ds.read(band)
return data
def pix_to_coord(
transform_array: Union[List, np.ndarray],
row: Union[int, np.ndarray],
col: Union[List, np.ndarray],
) -> Union[Tuple[Tuple[np.ndarray, np.ndarray], float, float]]:
"""
Transform pixels to coordinates
:param transform_array: Transform
:param row: row
:param col: column
:return: x,y
"""
transform = Affine.from_gdal(
transform_array[0],
transform_array[1],
transform_array[2],
transform_array[3],
transform_array[4],
transform_array[5],
)
x, y = rasterio.transform.xy(transform, row, col, offset="center")
if not isinstance(x, int):
x = np.array(x)
y = np.array(y)
return x, y
def reproject_dataset(
dataset: xr.Dataset, from_dataset: xr.Dataset, interp: str = "bilinear"
) -> xr.Dataset:
"""
Reproject dataset, and return the corresponding xarray.DataSet
:param dataset: Dataset to reproject
:param from_dataset: Dataset to get projection from
:param interp: interpolation method
:return: reprojected dataset
"""
# Copy dataset
reprojected_dataset = copy.copy(from_dataset)
interpolation_method = Resampling.bilinear
if interp == "bilinear":
interpolation_method = Resampling.bilinear
elif interp == "nearest":
interpolation_method = Resampling.nearest
else:
logging.warning(
"Interpolation method not available, use default 'bilinear'"
)
src_transform = Affine.from_gdal(
dataset["trans"].data[0],
dataset["trans"].data[1],
dataset["trans"].data[2],
dataset["trans"].data[3],
dataset["trans"].data[4],
dataset["trans"].data[5],
)
dst_transform = Affine.from_gdal(
from_dataset["trans"].data[0],
from_dataset["trans"].data[1],
from_dataset["trans"].data[2],
from_dataset["trans"].data[3],
from_dataset["trans"].data[4],
from_dataset["trans"].data[5],
)
source_array = dataset["im"].data
dest_array = np.zeros_like(from_dataset["im"].data)
dest_array[:, :] = -9999
src_crs = rasterio.crs.CRS.from_dict(dataset.attrs["georef"])
dst_crs = rasterio.crs.CRS.from_dict(from_dataset.attrs["georef"])
# reproject
reproject(
source=source_array,
destination=dest_array,
src_transform=src_transform,
src_crs=src_crs,
dst_transform=dst_transform,
dst_crs=dst_crs,
resampling=interpolation_method,
src_nodata=dataset.attrs["no_data"],
dst_nodata=-9999,
)
# change output dataset
dest_array[dest_array == -9999] = np.nan
reprojected_dataset["im"].data = dest_array
reprojected_dataset.attrs["no_data"] = dataset.attrs["no_data"]
return reprojected_dataset
def read_img(
img: str,
no_data: float = None,
ref: str = "WGS84",
geoid_path: Union[str, None] = None,
zunit: str = "m",
load_data: bool = False,
) -> xr.Dataset:
"""
Read image and transform and return the corresponding xarray.DataSet
:param img: Path to the image
:param no_data: no_data value in the image
:param ref: WGS84 or geoid
:param geoid_path: optional, path to local geoid
:param zunit: unit
:param load_data: load as dem
:return: dataset containing the variables :
- im : 2D (row, col) xarray.DataArray float32
- trans 1D xarray.DataArray float32
"""
img_ds = rasterio.open(img)
data = img_ds.read(1)
transform = img_ds.transform
dataset = create_dataset(
data,
transform,
img,
no_data=no_data,
ref=ref,
geoid_path=geoid_path,
zunit=zunit,
load_data=load_data,
)
return dataset
def create_dataset(
data: np.ndarray,
transform: np.ndarray,
img: str,
no_data: float = None,
ref: str = "WGS84",
geoid_path: Union[str, None] = None,
zunit: str = "m",
load_data: bool = False,
) -> xr.Dataset:
"""
Create dataset from array and transform,
and return the corresponding xarray.DataSet
:param data: image data
:param transform: image data
:param img: image path
:param no_data: no_data value in the image
:param ref: WGS84 or geoid
:param geoid_path: optional path to local geoid, default is EGM96
:param zunit: unit
:param load_data: load as dem
:return: xarray.DataSet containing the variables :
- im : 2D (row, col) xarray.DataArray float32
"""
img_ds = rasterio.open(img)
georef = img_ds.crs
# Manage nodata
if no_data is None:
meta_nodata = img_ds.nodatavals[0]
if meta_nodata is not None:
no_data = meta_nodata
else:
no_data = -9999
if len(data.shape) == 3:
# to dim 2
dim_single = np.where(data.shape) == 1
if dim_single == 0:
data = data[0, :, :]
if dim_single == 2:
data = data[:, :, 0]
data = data.astype(np.float32)
data[data == no_data] = np.nan
# convert to meter
data = ((data * u.Unit(zunit)).to(u.meter)).value
new_zunit = u.meter
dataset = xr.Dataset(
{"im": (["row", "col"], data.astype(np.float32))},
coords={
"row": np.arange(data.shape[0]),
"col": np.arange(data.shape[1]),
},
)
transform = np.array(transform.to_gdal())
# Add transform
trans_len = np.arange(0, len(transform))
dataset.coords["trans_len"] = trans_len
dataset["trans"] = xr.DataArray(data=transform, dims=["trans_len"])
# get plani unit
if georef.is_geographic:
plani_unit = u.deg
else:
plani_unit = u.m
# Add image conf to the image dataset
# Add resolution, and units
dataset.attrs = {
"no_data": no_data,
"input_img": img,
"georef": georef,
"xres": transform[1],
"yres": transform[5],
"plani_unit": plani_unit,
"zunit": new_zunit,
}
if load_data is not False:
if ref == "geoid":
# transform to ellipsoid
geoid_offset = get_geoid_offset(dataset, geoid_path)
dataset["im"].data += geoid_offset
return dataset
def read_img_from_array(
img_array: np.ndarray,
from_dataset: xr.Dataset = None,
no_data: float = None,
) -> xr.Dataset:
"""
Read image, and return the corresponding xarray.DataSet.
If from_dataset is None defaults attributes are set.
:param img_array: array
:param no_data: no_data value in the image
:param from_dataset: dataset to copy
:return: xarray.DataSet containing the variables :
- im : 2D (row, col) xarray.DataArray float32
"""
data = np.copy(img_array)
# Manage nodata
if no_data is None:
no_data = -9999
data[data == no_data] = np.nan
if from_dataset is None:
dataset = xr.Dataset(
{"im": (["row", "col"], data.astype(np.float32))},
coords={
"row": np.arange(data.shape[0]),
"col": np.arange(data.shape[1]),
},
)
# add random resolution
dataset.attrs["xres"] = 1
dataset.attrs["yres"] = 1
# add nodata
dataset.attrs["no_data"] = no_data
else:
dataset = copy.deepcopy(from_dataset)
dataset["im"] = None
dataset.coords["row"] = np.arange(data.shape[0])
dataset.coords["col"] = np.arange(data.shape[1])
dataset["im"] = xr.DataArray(
data=data.astype(np.float32), dims=["row", "col"]
)
return dataset
def load_dems(
ref_path: str,
dem_path: str,
ref_nodata: float = None,
dem_nodata: float = None,
ref_georef: str = "WGS84",
dem_georef: str = "WGS84",
ref_geoid_path: Union[str, None] = None,
dem_geoid_path: Union[str, None] = None,
ref_zunit: str = "m",
dem_zunit: str = "m",
load_data: Union[bool, dict, Tuple] = True,
) -> Tuple[xr.Dataset, xr.Dataset]:
"""
Loads both DEMs
:param ref_path: path to ref dem
:param dem_path:path to sec dem
:param ref_nodata: ref no data value
(None by default and if set inside metadata)
:param dem_nodata: dem no data value
(None by default and if set inside metadata)
:param ref_georef: ref georef (either WGS84 -default- or geoid)
:param dem_georef: dem georef (either WGS84 -default- or geoid)
:param ref_geoid_path: optional path to local geoid, default is EGM96
:param dem_geoid_path: optional path to local geoid, default is EGM96
:param ref_zunit: ref z unit
:param dem_zunit: dem z unit
:param load_data: True if dem are to be fully loaded,
other options are False or a dict roi
:return: ref and dem datasets
"""
# Get roi of dem
src_dem = rasterio.open(dem_path)
dem_crs = src_dem.crs
dem_trans = src_dem.transform
bounds_dem = src_dem.bounds
if load_data is not True:
# Use ROI
if isinstance(load_data, (tuple, list)):
bounds_dem = load_data
elif isinstance(load_data, dict):
if (
"left" in load_data
and "bottom" in load_data
and "right" in load_data
and "top" in load_data
):
# coordinates
bounds_dem = (
load_data["left"],
load_data["bottom"],
load_data["right"],
load_data["top"],
)
elif (
"x" in load_data
and "y" in load_data
and "w" in load_data
and "h" in load_data
):
# coordinates
window_dem = rasterio.windows.Window(
load_data["x"],
load_data["y"],
load_data["w"],
load_data["h"],
)
bounds_dem = rasterio.windows.bounds(window_dem, dem_trans)
print(bounds_dem)
else:
print("Not he right conventions for ROI")
# Get roi of ref
src_ref = rasterio.open(ref_path)
ref_crs = src_ref.crs
bounds_ref = src_ref.bounds
transformed_ref_bounds = rasterio.warp.transform_bounds(
ref_crs,
dem_crs,
bounds_ref[0],
bounds_ref[1],
bounds_ref[2],
bounds_ref[3],
)
# intersect roi
if rasterio.coords.disjoint_bounds(bounds_dem, transformed_ref_bounds):
raise NameError("ERROR: ROIs do not intersect")
intersection_roi = (
max(bounds_dem[0], transformed_ref_bounds[0]),
max(bounds_dem[1], transformed_ref_bounds[1]),
min(bounds_dem[2], transformed_ref_bounds[2]),
min(bounds_dem[3], transformed_ref_bounds[3]),
)
# get crop
polygon_roi = bounding_box_to_polygon(
intersection_roi[0],
intersection_roi[1],
intersection_roi[2],
intersection_roi[3],
)
geom_like_polygon = {"type": "Polygon", "coordinates": [polygon_roi]}
# crop dem
new_cropped_dem, new_cropped_dem_transform = rasterio.mask.mask(
src_dem, [geom_like_polygon], all_touched=True, crop=True
)
# create datasets
dem = create_dataset(
new_cropped_dem,
new_cropped_dem_transform,
dem_path,
no_data=dem_nodata,
ref=dem_georef,
geoid_path=dem_geoid_path,
zunit=dem_zunit,
load_data=load_data,
)
# full_ref represent a dataset with the full image
full_ref = create_dataset(
src_ref.read(1),
src_ref.transform,
ref_path,
no_data=ref_nodata,
ref=ref_georef,
geoid_path=ref_geoid_path,
zunit=ref_zunit,
load_data=load_data,
)
# reproject, crop, resample
ref = reproject_dataset(full_ref, dem, interp="bilinear")
# update dataset input_img with ref old value
ref.attrs["input_img"] = full_ref.attrs["input_img"]
return ref, dem
def bounding_box_to_polygon(
left: float, bottom: float, right: float, top: float
) -> List[List[float]]:
"""
Transform bounding box to polygon
:param left: left bound
:param bottom: bottom bound
:param right: right bound
:param top: top bound
:return: polygon
"""
polygon = [
[left, bottom],
[right, bottom],
[right, top],
[left, top],
[left, bottom],
]
return polygon
def translate(
dataset: xr.Dataset, x_offset: float, y_offset: float
) -> xr.Dataset:
"""
Modify transform from dataset
:param dataset:
:param x_offset: x offset
:param y_offset: y offset
:return translated dataset
"""
dataset_translated = copy.copy(dataset)
x_off, y_off = pix_to_coord(dataset["trans"].data, y_offset, x_offset)
dataset_translated["trans"].data[0] = x_off
dataset_translated["trans"].data[3] = y_off
return dataset_translated
def translate_to_coregistered_geometry(
dem1: xr.Dataset,
dem2: xr.Dataset,
dx: int,
dy: int,
interpolator: str = "bilinear",
) -> Tuple[xr.Dataset, xr.Dataset]:
"""
Translate both DSMs to their coregistered geometry.
Note that :
a) The dem2 georef is assumed to be the reference
b) The dem2 shall be the one resampled as it supposedly is the cleaner one.
Hence, dem1 is only cropped, dem2 is the only one that might be resampled.
However, as dem2 is the ref, dem1 georef is translated to dem2 georef.
:param dem1: dataset, master dem
:param dem2: dataset, slave dem
:param dx: f, dx value in pixels
:param dy: f, dy value in pixels
:param interpolator: gdal interpolator
:return: coregistered DEM as datasets
"""
#
# Translate the georef of dem1 based on dx and dy values
# -> this makes dem1 coregistered on dem2
#
# note the -0.5 since the (0,0) pixel coord is pixel centered
dem1 = translate(dem1, dx - 0.5, dy - 0.5)
#
# Intersect and reproject both dsms.
# -> intersect them to the biggest common grid
# now that they have been shifted
# -> dem1 is then cropped with intersect so that it lies within intersect
# but is not resampled in the process
# -> reproject dem2 to dem1 grid,
# the intersection grid sampled on dem1 grid
#
transform_dem1 = Affine.from_gdal(
dem1["trans"].data[0],
dem1["trans"].data[1],
dem1["trans"].data[2],
dem1["trans"].data[3],
dem1["trans"].data[4],
dem1["trans"].data[5],
)
bounds_dem1 = rasterio.transform.array_bounds(
dem1["im"].data.shape[1], dem1["im"].data.shape[0], transform_dem1
)
transform_dem2 = Affine.from_gdal(
dem2["trans"].data[0],
dem2["trans"].data[1],
dem2["trans"].data[2],
dem2["trans"].data[3],
dem2["trans"].data[4],
dem2["trans"].data[5],
)
bounds_dem2 = rasterio.transform.array_bounds(
dem2["im"].data.shape[1], dem2["im"].data.shape[0], transform_dem2
)
intersection_roi = (
max(bounds_dem1[0], bounds_dem2[0]),
max(bounds_dem1[1], bounds_dem2[1]),
min(bounds_dem1[2], bounds_dem2[2]),
min(bounds_dem1[3], bounds_dem2[3]),
)
# get crop
polygon_roi = bounding_box_to_polygon(
intersection_roi[0],
intersection_roi[1],
intersection_roi[2],
intersection_roi[3],
)
geom_like_polygon = {"type": "Polygon", "coordinates": [polygon_roi]}
# crop dem
srs_dem1 = rasterio.open(
" ",
mode="w+",
driver="GTiff",
width=dem1["im"].data.shape[1],
height=dem1["im"].data.shape[0],
count=1,
dtype=dem1["im"].data.dtype,
crs=dem1.attrs["georef"],
transform=transform_dem1,
)
srs_dem1.write(dem1["im"].data, 1)
new_cropped_dem1, new_cropped_dem1_transform = rasterio.mask.mask(
srs_dem1, [geom_like_polygon], all_touched=True, crop=True
)
# create datasets
reproj_dem1 = copy.copy(dem1)
reproj_dem1["trans"].data = np.array(new_cropped_dem1_transform.to_gdal())
reproj_dem1 = read_img_from_array(
new_cropped_dem1[0, :, :],
from_dataset=reproj_dem1,
no_data=dem1.attrs["no_data"],
)
# reproject, crop, resample
reproj_dem2 = reproject_dataset(dem2, reproj_dem1, interp=interpolator)
return reproj_dem1, reproj_dem2
def save_tif(
dataset: xr.Dataset, filename: str, new_array=None, no_data: float = -32768
) -> xr.Dataset:
"""
Write a Dataset in a tiff file.
If new_array is set, new_array is used as data.
:param dataset: dataset
:param filename: output filename
:param new_array: new array to write
:param no_data: value of nodata to use
:return: dataset
"""
# update from dataset
previous_profile = {}
previous_profile["crs"] = rasterio.crs.CRS.from_dict(
dataset.attrs["georef"]
)
previous_profile["transform"] = Affine.from_gdal(
dataset["trans"].data[0],
dataset["trans"].data[1],
dataset["trans"].data[2],
dataset["trans"].data[3],
dataset["trans"].data[4],
dataset["trans"].data[5],
)
data = dataset["im"].data
if new_array is not None:
data = new_array
if len(dataset["im"].shape) == 2:
row, col = data.shape
with rasterio.open(
filename,
mode="w+",
driver="GTiff",
width=col,
height=row,
count=1,
dtype=data.dtype,
crs=previous_profile["crs"],
transform=previous_profile["transform"],
) as source_ds:
source_ds.nodata = no_data
source_ds.write(data, 1)
else:
row, col, depth = data.shape
with rasterio.open(
filename,
mode="w+",
driver="GTiff",
width=col,
height=row,
count=depth,
dtype=data.dtype,
crs=previous_profile["crs"],
transform=previous_profile["transform"],
) as source_ds:
for dsp in range(1, depth + 1):
source_ds.write(data[:, :, dsp - 1], dsp)
new_dataset = copy.deepcopy(dataset)
# update dataset input_img with new filename
new_dataset.attrs["input_img"] = filename
return new_dataset
def get_slope(dataset: xr.Dataset, degree: bool = False) -> np.ndarray:
"""
Compute slope from dataset
Slope is presented here :
http://pro.arcgis.com/ \
fr/pro-app/tool-reference/spatial-analyst/how-aspect-works.htm
:param dataset: dataset
:param degree: True if is in degree
:return: slope
"""
def get_orthodromic_distance(
lon1: float, lat1: float, lon2: float, lat2: float
):
"""
Get Orthodromic distance from two (lat,lon) coordinates
:param lon1:
:param lat1:
:param lon2:
:param lat2:
:return: orthodromic distance
"""
# WGS-84 equatorial radius in km
radius_equator = 6378137.0
return radius_equator * np.arccos(
np.cos(lat1 * np.pi / 180)
* np.cos(lat2 * np.pi / 180)
* np.cos((lon2 - lon1) * np.pi / 180)
+ np.sin(lat1 * np.pi / 180) * np.sin(lat2 * np.pi / 180)
)
crs = rasterio.crs.CRS.from_dict(dataset.attrs["georef"])
if not crs.is_projected:
# Our dem is not projected, we can't simply use the pixel resolution
# -> we need to compute resolution between each point
ny, nx = dataset["im"].data.shape
xp = np.arange(nx)
yp = np.arange(ny)
xp, yp = np.meshgrid(xp, yp)
lon, lat = pix_to_coord(dataset["trans"].data, yp, xp)
lonr = np.roll(lon, 1, 1)
latl = np.roll(lat, 1, 0)
distx = get_orthodromic_distance(lon, lat, lonr, lat)
disty = get_orthodromic_distance(lon, lat, lon, latl)
# deal withs ingularities at edges
distx[:, 0] = distx[:, 1]
disty[0] = disty[1]
else:
distx = np.abs(dataset.attrs["xres"])
disty = np.abs(dataset.attrs["yres"])
conv_x = np.array([[-1, 0, 1], [-2, 0, 2], [-1, 0, 1]])
conv_y = conv_x.transpose()
# Now we do the convolutions :
gx = filters.convolve(dataset["im"].data, conv_x, mode="reflect")
gy = filters.convolve(dataset["im"].data, conv_y, mode="reflect")
# And eventually we do compute tan(slope) and aspect
tan_slope = np.sqrt((gx / distx) ** 2 + (gy / disty) ** 2) / 8
slope = np.arctan(tan_slope)
# Just simple unit change as required
if degree is False:
slope *= 100
else:
slope = (slope * 180) / np.pi
return slope
def interpolate_geoid(
geoid_filename: str, coords: np.ndarray, interpol_method: str = "linear"
) -> np.ndarray:
"""
Bilinear interpolation of geoid
:param geoid_filename : coord geoid_filename
:param coords : coords matrix 2xN [lon,lat]
:param interpol_method : interpolation type
:return interpolated position : [lon,lat,estimate geoid] (3D np.array)
"""
dataset = rasterio.open(geoid_filename)
transform = dataset.transform
step_x = transform[0]
# ascending
step_y = -transform[4]
# 0 or 0.5
# coin BG
[ori_x, ori_y] = transform * (
0.5,
dataset.height - 0.5,
) # positions au centre pixel
last_x = ori_x + step_x * dataset.width
last_y = ori_y + step_y * dataset.height
# transform dep to positions
geoid_values = dataset.read(1)[::-1, :].transpose()
x = np.arange(ori_x, last_x, step_x)
# lat must be in ascending order,
y = np.arange(ori_y, last_y, step_y)
geoid_grid_coordinates = (x, y)
interp_geoid = interpolate.interpn(
geoid_grid_coordinates,
geoid_values,
coords,
method=interpol_method,
bounds_error=False,
fill_value=None,
)
return interp_geoid
def get_geoid_offset(
dataset: xr.Dataset, geoid_path: Union[str, None]
) -> np.ndarray:
"""
Get offset from geoid to ellipsoid
:param dataset : dataset
:param geoid_path : optional absolut geoid_path, if None egm96 is used
:return offset as array
"""
# If no geoid path has been given, use the default geoid egm96
# installed in setup.py
if geoid_path is None:
# this returns the fully resolved path to the python installed module
module_path = os.path.dirname(__file__)
# Geoid relative Path as installed in setup.py
geoid_path = "geoid/egm96_15.gtx"
# Create full geoid path
geoid_path = os.path.join(module_path, geoid_path)
ny, nx = dataset["im"].data.shape
xp = np.arange(nx)
yp = np.arange(ny)
# xp in [-180, 180], yp in [-90, 90]
xp[xp > 180] = xp[xp > 180] - 360
xp[xp < 180] = xp[xp < 180] + 360
yp[yp > 90] = yp[yp > 90] - 180
yp[yp < 90] = yp[yp < 90] + 180
xp, yp = np.meshgrid(xp, yp)
lon, lat = pix_to_coord(dataset["trans"].data, yp, xp)
src_crs = rasterio.crs.CRS.from_dict(dataset.attrs["georef"])
if src_crs.is_projected:
# convert to global coordinates
proj = pyproj.Proj(src_crs)
lon, lat = proj(lon, lat, inverse=True)
# transform to list (2xN)
lon_1d = np.reshape(
lon, (dataset["im"].data.shape[0] * dataset["im"].data.shape[1])
)
lat_1d = np.reshape(
lat, (dataset["im"].data.shape[0] * dataset["im"].data.shape[1])
)
coords = | np.zeros((lon_1d.size, 2)) | numpy.zeros |
import sys
import warnings
import itertools
import platform
import pytest
from decimal import Decimal
import numpy as np
from numpy.core import umath
from numpy.random import rand, randint, randn
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_raises_regex,
assert_array_equal, assert_almost_equal, assert_array_almost_equal,
assert_warns, HAS_REFCOUNT
)
class TestResize(object):
def test_copies(self):
A = np.array([[1, 2], [3, 4]])
Ar1 = np.array([[1, 2, 3, 4], [1, 2, 3, 4]])
assert_equal(np.resize(A, (2, 4)), Ar1)
Ar2 = np.array([[1, 2], [3, 4], [1, 2], [3, 4]])
assert_equal(np.resize(A, (4, 2)), Ar2)
Ar3 = np.array([[1, 2, 3], [4, 1, 2], [3, 4, 1], [2, 3, 4]])
assert_equal(np.resize(A, (4, 3)), Ar3)
def test_zeroresize(self):
A = np.array([[1, 2], [3, 4]])
Ar = np.resize(A, (0,))
assert_array_equal(Ar, np.array([]))
assert_equal(A.dtype, Ar.dtype)
Ar = np.resize(A, (0, 2))
assert_equal(Ar.shape, (0, 2))
Ar = np.resize(A, (2, 0))
assert_equal(Ar.shape, (2, 0))
def test_reshape_from_zero(self):
# See also gh-6740
A = np.zeros(0, dtype=[('a', np.float32)])
Ar = np.resize(A, (2, 1))
assert_array_equal(Ar, np.zeros((2, 1), Ar.dtype))
assert_equal(A.dtype, Ar.dtype)
class TestNonarrayArgs(object):
# check that non-array arguments to functions wrap them in arrays
def test_choose(self):
choices = [[0, 1, 2],
[3, 4, 5],
[5, 6, 7]]
tgt = [5, 1, 5]
a = [2, 0, 1]
out = np.choose(a, choices)
assert_equal(out, tgt)
def test_clip(self):
arr = [-1, 5, 2, 3, 10, -4, -9]
out = np.clip(arr, 2, 7)
tgt = [2, 5, 2, 3, 7, 2, 2]
assert_equal(out, tgt)
def test_compress(self):
arr = [[0, 1, 2, 3, 4],
[5, 6, 7, 8, 9]]
tgt = [[5, 6, 7, 8, 9]]
out = np.compress([0, 1], arr, axis=0)
assert_equal(out, tgt)
def test_count_nonzero(self):
arr = [[0, 1, 7, 0, 0],
[3, 0, 0, 2, 19]]
tgt = np.array([2, 3])
out = np.count_nonzero(arr, axis=1)
assert_equal(out, tgt)
def test_cumproduct(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_(np.all(np.cumproduct(A) == np.array([1, 2, 6, 24, 120, 720])))
def test_diagonal(self):
a = [[0, 1, 2, 3],
[4, 5, 6, 7],
[8, 9, 10, 11]]
out = np.diagonal(a)
tgt = [0, 5, 10]
assert_equal(out, tgt)
def test_mean(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_(np.mean(A) == 3.5)
assert_(np.all(np.mean(A, 0) == np.array([2.5, 3.5, 4.5])))
assert_(np.all(np.mean(A, 1) == np.array([2., 5.])))
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_(np.isnan(np.mean([])))
assert_(w[0].category is RuntimeWarning)
def test_ptp(self):
a = [3, 4, 5, 10, -3, -5, 6.0]
assert_equal(np.ptp(a, axis=0), 15.0)
def test_prod(self):
arr = [[1, 2, 3, 4],
[5, 6, 7, 9],
[10, 3, 4, 5]]
tgt = [24, 1890, 600]
assert_equal(np.prod(arr, axis=-1), tgt)
def test_ravel(self):
a = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
tgt = [1, 2, 3, 4, 5, 6, 7, 8, 9, 10, 11, 12]
assert_equal(np.ravel(a), tgt)
def test_repeat(self):
a = [1, 2, 3]
tgt = [1, 1, 2, 2, 3, 3]
out = np.repeat(a, 2)
assert_equal(out, tgt)
def test_reshape(self):
arr = [[1, 2, 3], [4, 5, 6], [7, 8, 9], [10, 11, 12]]
tgt = [[1, 2, 3, 4, 5, 6], [7, 8, 9, 10, 11, 12]]
assert_equal(np.reshape(arr, (2, 6)), tgt)
def test_round(self):
arr = [1.56, 72.54, 6.35, 3.25]
tgt = [1.6, 72.5, 6.4, 3.2]
assert_equal(np.around(arr, decimals=1), tgt)
def test_searchsorted(self):
arr = [-8, -5, -1, 3, 6, 10]
out = np.searchsorted(arr, 0)
assert_equal(out, 3)
def test_size(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_(np.size(A) == 6)
assert_(np.size(A, 0) == 2)
assert_(np.size(A, 1) == 3)
def test_squeeze(self):
A = [[[1, 1, 1], [2, 2, 2], [3, 3, 3]]]
assert_equal(np.squeeze(A).shape, (3, 3))
assert_equal(np.squeeze(np.zeros((1, 3, 1))).shape, (3,))
assert_equal(np.squeeze(np.zeros((1, 3, 1)), axis=0).shape, (3, 1))
assert_equal(np.squeeze(np.zeros((1, 3, 1)), axis=-1).shape, (1, 3))
assert_equal(np.squeeze(np.zeros((1, 3, 1)), axis=2).shape, (1, 3))
assert_equal(np.squeeze([np.zeros((3, 1))]).shape, (3,))
assert_equal(np.squeeze([np.zeros((3, 1))], axis=0).shape, (3, 1))
assert_equal(np.squeeze([np.zeros((3, 1))], axis=2).shape, (1, 3))
assert_equal(np.squeeze([np.zeros((3, 1))], axis=-1).shape, (1, 3))
def test_std(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_almost_equal(np.std(A), 1.707825127659933)
assert_almost_equal(np.std(A, 0), np.array([1.5, 1.5, 1.5]))
assert_almost_equal(np.std(A, 1), np.array([0.81649658, 0.81649658]))
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_(np.isnan(np.std([])))
assert_(w[0].category is RuntimeWarning)
def test_swapaxes(self):
tgt = [[[0, 4], [2, 6]], [[1, 5], [3, 7]]]
a = [[[0, 1], [2, 3]], [[4, 5], [6, 7]]]
out = np.swapaxes(a, 0, 2)
assert_equal(out, tgt)
def test_sum(self):
m = [[1, 2, 3],
[4, 5, 6],
[7, 8, 9]]
tgt = [[6], [15], [24]]
out = np.sum(m, axis=1, keepdims=True)
assert_equal(tgt, out)
def test_take(self):
tgt = [2, 3, 5]
indices = [1, 2, 4]
a = [1, 2, 3, 4, 5]
out = np.take(a, indices)
assert_equal(out, tgt)
def test_trace(self):
c = [[1, 2], [3, 4], [5, 6]]
assert_equal(np.trace(c), 5)
def test_transpose(self):
arr = [[1, 2], [3, 4], [5, 6]]
tgt = [[1, 3, 5], [2, 4, 6]]
assert_equal(np.transpose(arr, (1, 0)), tgt)
def test_var(self):
A = [[1, 2, 3], [4, 5, 6]]
assert_almost_equal(np.var(A), 2.9166666666666665)
assert_almost_equal(np.var(A, 0), np.array([2.25, 2.25, 2.25]))
assert_almost_equal(np.var(A, 1), np.array([0.66666667, 0.66666667]))
with warnings.catch_warnings(record=True) as w:
warnings.filterwarnings('always', '', RuntimeWarning)
assert_(np.isnan(np.var([])))
assert_(w[0].category is RuntimeWarning)
B = np.array([None, 0])
B[0] = 1j
assert_almost_equal(np.var(B), 0.25)
class TestIsscalar(object):
def test_isscalar(self):
assert_(np.isscalar(3.1))
assert_(np.isscalar(np.int16(12345)))
assert_(np.isscalar(False))
assert_(np.isscalar('numpy'))
assert_(not np.isscalar([3.1]))
assert_(not np.isscalar(None))
# PEP 3141
from fractions import Fraction
assert_(np.isscalar(Fraction(5, 17)))
from numbers import Number
assert_(np.isscalar(Number()))
class TestBoolScalar(object):
def test_logical(self):
f = np.False_
t = np.True_
s = "xyz"
assert_((t and s) is s)
assert_((f and s) is f)
def test_bitwise_or(self):
f = np.False_
t = np.True_
assert_((t | t) is t)
assert_((f | t) is t)
assert_((t | f) is t)
assert_((f | f) is f)
def test_bitwise_and(self):
f = np.False_
t = np.True_
assert_((t & t) is t)
assert_((f & t) is f)
assert_((t & f) is f)
assert_((f & f) is f)
def test_bitwise_xor(self):
f = np.False_
t = np.True_
assert_((t ^ t) is f)
assert_((f ^ t) is t)
assert_((t ^ f) is t)
assert_((f ^ f) is f)
class TestBoolArray(object):
def setup(self):
# offset for simd tests
self.t = np.array([True] * 41, dtype=bool)[1::]
self.f = np.array([False] * 41, dtype=bool)[1::]
self.o = np.array([False] * 42, dtype=bool)[2::]
self.nm = self.f.copy()
self.im = self.t.copy()
self.nm[3] = True
self.nm[-2] = True
self.im[3] = False
self.im[-2] = False
def test_all_any(self):
assert_(self.t.all())
assert_(self.t.any())
assert_(not self.f.all())
assert_(not self.f.any())
assert_(self.nm.any())
assert_(self.im.any())
assert_(not self.nm.all())
assert_(not self.im.all())
# check bad element in all positions
for i in range(256 - 7):
d = np.array([False] * 256, dtype=bool)[7::]
d[i] = True
assert_(np.any(d))
e = np.array([True] * 256, dtype=bool)[7::]
e[i] = False
assert_(not np.all(e))
assert_array_equal(e, ~d)
# big array test for blocked libc loops
for i in list(range(9, 6000, 507)) + [7764, 90021, -10]:
d = np.array([False] * 100043, dtype=bool)
d[i] = True
assert_(np.any(d), msg="%r" % i)
e = np.array([True] * 100043, dtype=bool)
e[i] = False
assert_(not np.all(e), msg="%r" % i)
def test_logical_not_abs(self):
assert_array_equal(~self.t, self.f)
assert_array_equal(np.abs(~self.t), self.f)
assert_array_equal(np.abs(~self.f), self.t)
assert_array_equal(np.abs(self.f), self.f)
assert_array_equal(~np.abs(self.f), self.t)
assert_array_equal(~np.abs(self.t), self.f)
assert_array_equal(np.abs(~self.nm), self.im)
np.logical_not(self.t, out=self.o)
assert_array_equal(self.o, self.f)
np.abs(self.t, out=self.o)
assert_array_equal(self.o, self.t)
def test_logical_and_or_xor(self):
assert_array_equal(self.t | self.t, self.t)
assert_array_equal(self.f | self.f, self.f)
assert_array_equal(self.t | self.f, self.t)
assert_array_equal(self.f | self.t, self.t)
np.logical_or(self.t, self.t, out=self.o)
assert_array_equal(self.o, self.t)
assert_array_equal(self.t & self.t, self.t)
assert_array_equal(self.f & self.f, self.f)
assert_array_equal(self.t & self.f, self.f)
assert_array_equal(self.f & self.t, self.f)
np.logical_and(self.t, self.t, out=self.o)
assert_array_equal(self.o, self.t)
assert_array_equal(self.t ^ self.t, self.f)
assert_array_equal(self.f ^ self.f, self.f)
assert_array_equal(self.t ^ self.f, self.t)
assert_array_equal(self.f ^ self.t, self.t)
np.logical_xor(self.t, self.t, out=self.o)
assert_array_equal(self.o, self.f)
assert_array_equal(self.nm & self.t, self.nm)
assert_array_equal(self.im & self.f, False)
assert_array_equal(self.nm & True, self.nm)
assert_array_equal(self.im & False, self.f)
assert_array_equal(self.nm | self.t, self.t)
assert_array_equal(self.im | self.f, self.im)
assert_array_equal(self.nm | True, self.t)
assert_array_equal(self.im | False, self.im)
assert_array_equal(self.nm ^ self.t, self.im)
assert_array_equal(self.im ^ self.f, self.im)
assert_array_equal(self.nm ^ True, self.im)
assert_array_equal(self.im ^ False, self.im)
class TestBoolCmp(object):
def setup(self):
self.f = np.ones(256, dtype=np.float32)
self.ef = np.ones(self.f.size, dtype=bool)
self.d = np.ones(128, dtype=np.float64)
self.ed = np.ones(self.d.size, dtype=bool)
# generate values for all permutation of 256bit simd vectors
s = 0
for i in range(32):
self.f[s:s+8] = [i & 2**x for x in range(8)]
self.ef[s:s+8] = [(i & 2**x) != 0 for x in range(8)]
s += 8
s = 0
for i in range(16):
self.d[s:s+4] = [i & 2**x for x in range(4)]
self.ed[s:s+4] = [(i & 2**x) != 0 for x in range(4)]
s += 4
self.nf = self.f.copy()
self.nd = self.d.copy()
self.nf[self.ef] = np.nan
self.nd[self.ed] = np.nan
self.inff = self.f.copy()
self.infd = self.d.copy()
self.inff[::3][self.ef[::3]] = np.inf
self.infd[::3][self.ed[::3]] = np.inf
self.inff[1::3][self.ef[1::3]] = -np.inf
self.infd[1::3][self.ed[1::3]] = -np.inf
self.inff[2::3][self.ef[2::3]] = np.nan
self.infd[2::3][self.ed[2::3]] = np.nan
self.efnonan = self.ef.copy()
self.efnonan[2::3] = False
self.ednonan = self.ed.copy()
self.ednonan[2::3] = False
self.signf = self.f.copy()
self.signd = self.d.copy()
self.signf[self.ef] *= -1.
self.signd[self.ed] *= -1.
self.signf[1::6][self.ef[1::6]] = -np.inf
self.signd[1::6][self.ed[1::6]] = -np.inf
self.signf[3::6][self.ef[3::6]] = -np.nan
self.signd[3::6][self.ed[3::6]] = -np.nan
self.signf[4::6][self.ef[4::6]] = -0.
self.signd[4::6][self.ed[4::6]] = -0.
def test_float(self):
# offset for alignment test
for i in range(4):
assert_array_equal(self.f[i:] > 0, self.ef[i:])
assert_array_equal(self.f[i:] - 1 >= 0, self.ef[i:])
assert_array_equal(self.f[i:] == 0, ~self.ef[i:])
assert_array_equal(-self.f[i:] < 0, self.ef[i:])
assert_array_equal(-self.f[i:] + 1 <= 0, self.ef[i:])
r = self.f[i:] != 0
assert_array_equal(r, self.ef[i:])
r2 = self.f[i:] != np.zeros_like(self.f[i:])
r3 = 0 != self.f[i:]
assert_array_equal(r, r2)
assert_array_equal(r, r3)
# check bool == 0x1
assert_array_equal(r.view(np.int8), r.astype(np.int8))
assert_array_equal(r2.view(np.int8), r2.astype(np.int8))
assert_array_equal(r3.view(np.int8), r3.astype(np.int8))
# isnan on amd64 takes the same code path
assert_array_equal(np.isnan(self.nf[i:]), self.ef[i:])
assert_array_equal(np.isfinite(self.nf[i:]), ~self.ef[i:])
assert_array_equal(np.isfinite(self.inff[i:]), ~self.ef[i:])
assert_array_equal(np.isinf(self.inff[i:]), self.efnonan[i:])
assert_array_equal(np.signbit(self.signf[i:]), self.ef[i:])
def test_double(self):
# offset for alignment test
for i in range(2):
assert_array_equal(self.d[i:] > 0, self.ed[i:])
assert_array_equal(self.d[i:] - 1 >= 0, self.ed[i:])
assert_array_equal(self.d[i:] == 0, ~self.ed[i:])
assert_array_equal(-self.d[i:] < 0, self.ed[i:])
assert_array_equal(-self.d[i:] + 1 <= 0, self.ed[i:])
r = self.d[i:] != 0
assert_array_equal(r, self.ed[i:])
r2 = self.d[i:] != np.zeros_like(self.d[i:])
r3 = 0 != self.d[i:]
assert_array_equal(r, r2)
assert_array_equal(r, r3)
# check bool == 0x1
assert_array_equal(r.view(np.int8), r.astype(np.int8))
assert_array_equal(r2.view(np.int8), r2.astype(np.int8))
assert_array_equal(r3.view(np.int8), r3.astype(np.int8))
# isnan on amd64 takes the same code path
assert_array_equal(np.isnan(self.nd[i:]), self.ed[i:])
assert_array_equal(np.isfinite(self.nd[i:]), ~self.ed[i:])
assert_array_equal(np.isfinite(self.infd[i:]), ~self.ed[i:])
assert_array_equal(np.isinf(self.infd[i:]), self.ednonan[i:])
assert_array_equal(np.signbit(self.signd[i:]), self.ed[i:])
class TestSeterr(object):
def test_default(self):
err = np.geterr()
assert_equal(err,
dict(divide='warn',
invalid='warn',
over='warn',
under='ignore')
)
def test_set(self):
with np.errstate():
err = np.seterr()
old = np.seterr(divide='print')
assert_(err == old)
new = np.seterr()
assert_(new['divide'] == 'print')
np.seterr(over='raise')
assert_(np.geterr()['over'] == 'raise')
assert_(new['divide'] == 'print')
np.seterr(**old)
assert_(np.geterr() == old)
@pytest.mark.skipif(platform.machine() == "armv5tel", reason="See gh-413.")
def test_divide_err(self):
with np.errstate(divide='raise'):
with assert_raises(FloatingPointError):
np.array([1.]) / np.array([0.])
np.seterr(divide='ignore')
np.array([1.]) / np.array([0.])
def test_errobj(self):
olderrobj = np.geterrobj()
self.called = 0
try:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
with np.errstate(divide='warn'):
np.seterrobj([20000, 1, None])
np.array([1.]) / np.array([0.])
assert_equal(len(w), 1)
def log_err(*args):
self.called += 1
extobj_err = args
assert_(len(extobj_err) == 2)
assert_("divide" in extobj_err[0])
with np.errstate(divide='ignore'):
np.seterrobj([20000, 3, log_err])
np.array([1.]) / np.array([0.])
assert_equal(self.called, 1)
np.seterrobj(olderrobj)
with np.errstate(divide='ignore'):
np.divide(1., 0., extobj=[20000, 3, log_err])
assert_equal(self.called, 2)
finally:
np.seterrobj(olderrobj)
del self.called
def test_errobj_noerrmask(self):
# errmask = 0 has a special code path for the default
olderrobj = np.geterrobj()
try:
# set errobj to something non default
np.seterrobj([umath.UFUNC_BUFSIZE_DEFAULT,
umath.ERR_DEFAULT + 1, None])
# call a ufunc
np.isnan(np.array([6]))
# same with the default, lots of times to get rid of possible
# pre-existing stack in the code
for i in range(10000):
np.seterrobj([umath.UFUNC_BUFSIZE_DEFAULT, umath.ERR_DEFAULT,
None])
np.isnan(np.array([6]))
finally:
np.seterrobj(olderrobj)
class TestFloatExceptions(object):
def assert_raises_fpe(self, fpeerr, flop, x, y):
ftype = type(x)
try:
flop(x, y)
assert_(False,
"Type %s did not raise fpe error '%s'." % (ftype, fpeerr))
except FloatingPointError as exc:
assert_(str(exc).find(fpeerr) >= 0,
"Type %s raised wrong fpe error '%s'." % (ftype, exc))
def assert_op_raises_fpe(self, fpeerr, flop, sc1, sc2):
# Check that fpe exception is raised.
#
# Given a floating operation `flop` and two scalar values, check that
# the operation raises the floating point exception specified by
# `fpeerr`. Tests all variants with 0-d array scalars as well.
self.assert_raises_fpe(fpeerr, flop, sc1, sc2)
self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2)
self.assert_raises_fpe(fpeerr, flop, sc1, sc2[()])
self.assert_raises_fpe(fpeerr, flop, sc1[()], sc2[()])
def test_floating_exceptions(self):
# Test basic arithmetic function errors
with np.errstate(all='raise'):
# Test for all real and complex float types
for typecode in np.typecodes['AllFloat']:
ftype = np.obj2sctype(typecode)
if np.dtype(ftype).kind == 'f':
# Get some extreme values for the type
fi = np.finfo(ftype)
ft_tiny = fi.tiny
ft_max = fi.max
ft_eps = fi.eps
underflow = 'underflow'
divbyzero = 'divide by zero'
else:
# 'c', complex, corresponding real dtype
rtype = type(ftype(0).real)
fi = np.finfo(rtype)
ft_tiny = ftype(fi.tiny)
ft_max = ftype(fi.max)
ft_eps = ftype(fi.eps)
# The complex types raise different exceptions
underflow = ''
divbyzero = ''
overflow = 'overflow'
invalid = 'invalid'
self.assert_raises_fpe(underflow,
lambda a, b: a/b, ft_tiny, ft_max)
self.assert_raises_fpe(underflow,
lambda a, b: a*b, ft_tiny, ft_tiny)
self.assert_raises_fpe(overflow,
lambda a, b: a*b, ft_max, ftype(2))
self.assert_raises_fpe(overflow,
lambda a, b: a/b, ft_max, ftype(0.5))
self.assert_raises_fpe(overflow,
lambda a, b: a+b, ft_max, ft_max*ft_eps)
self.assert_raises_fpe(overflow,
lambda a, b: a-b, -ft_max, ft_max*ft_eps)
self.assert_raises_fpe(overflow,
np.power, ftype(2), ftype(2**fi.nexp))
self.assert_raises_fpe(divbyzero,
lambda a, b: a/b, ftype(1), ftype(0))
self.assert_raises_fpe(invalid,
lambda a, b: a/b, ftype(np.inf), ftype(np.inf))
self.assert_raises_fpe(invalid,
lambda a, b: a/b, ftype(0), ftype(0))
self.assert_raises_fpe(invalid,
lambda a, b: a-b, ftype(np.inf), ftype(np.inf))
self.assert_raises_fpe(invalid,
lambda a, b: a+b, ftype(np.inf), ftype(-np.inf))
self.assert_raises_fpe(invalid,
lambda a, b: a*b, ftype(0), ftype(np.inf))
def test_warnings(self):
# test warning code path
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter("always")
with np.errstate(all="warn"):
np.divide(1, 0.)
assert_equal(len(w), 1)
assert_("divide by zero" in str(w[0].message))
np.array(1e300) * np.array(1e300)
assert_equal(len(w), 2)
assert_("overflow" in str(w[-1].message))
np.array(np.inf) - np.array(np.inf)
assert_equal(len(w), 3)
assert_("invalid value" in str(w[-1].message))
np.array(1e-300) * np.array(1e-300)
assert_equal(len(w), 4)
assert_("underflow" in str(w[-1].message))
class TestTypes(object):
def check_promotion_cases(self, promote_func):
# tests that the scalars get coerced correctly.
b = np.bool_(0)
i8, i16, i32, i64 = np.int8(0), np.int16(0), np.int32(0), np.int64(0)
u8, u16, u32, u64 = np.uint8(0), np.uint16(0), np.uint32(0), np.uint64(0)
f32, f64, fld = np.float32(0), np.float64(0), np.longdouble(0)
c64, c128, cld = np.complex64(0), np.complex128(0), np.clongdouble(0)
# coercion within the same kind
assert_equal(promote_func(i8, i16), np.dtype(np.int16))
assert_equal(promote_func(i32, i8), np.dtype(np.int32))
assert_equal(promote_func(i16, i64), np.dtype(np.int64))
assert_equal(promote_func(u8, u32), np.dtype(np.uint32))
assert_equal(promote_func(f32, f64), np.dtype(np.float64))
assert_equal(promote_func(fld, f32), np.dtype(np.longdouble))
assert_equal(promote_func(f64, fld), np.dtype(np.longdouble))
assert_equal(promote_func(c128, c64), np.dtype(np.complex128))
assert_equal(promote_func(cld, c128), np.dtype(np.clongdouble))
assert_equal(promote_func(c64, fld), np.dtype(np.clongdouble))
# coercion between kinds
assert_equal(promote_func(b, i32), np.dtype(np.int32))
assert_equal(promote_func(b, u8), np.dtype(np.uint8))
assert_equal(promote_func(i8, u8), np.dtype(np.int16))
assert_equal(promote_func(u8, i32), np.dtype(np.int32))
assert_equal(promote_func(i64, u32), np.dtype(np.int64))
assert_equal(promote_func(u64, i32), np.dtype(np.float64))
assert_equal(promote_func(i32, f32), np.dtype(np.float64))
assert_equal(promote_func(i64, f32), np.dtype(np.float64))
assert_equal(promote_func(f32, i16), np.dtype(np.float32))
assert_equal(promote_func(f32, u32), np.dtype(np.float64))
assert_equal(promote_func(f32, c64), np.dtype(np.complex64))
assert_equal(promote_func(c128, f32), np.dtype(np.complex128))
assert_equal(promote_func(cld, f64), np.dtype(np.clongdouble))
# coercion between scalars and 1-D arrays
assert_equal(promote_func(np.array([b]), i8), np.dtype(np.int8))
assert_equal(promote_func(np.array([b]), u8), np.dtype(np.uint8))
assert_equal(promote_func(np.array([b]), i32), np.dtype(np.int32))
assert_equal(promote_func(np.array([b]), u32), np.dtype(np.uint32))
assert_equal(promote_func(np.array([i8]), i64), np.dtype(np.int8))
assert_equal(promote_func(u64, np.array([i32])), np.dtype(np.int32))
assert_equal(promote_func(i64, np.array([u32])), np.dtype(np.uint32))
assert_equal(promote_func(np.int32(-1), np.array([u64])),
np.dtype(np.float64))
assert_equal(promote_func(f64, np.array([f32])), np.dtype(np.float32))
assert_equal(promote_func(fld, np.array([f32])), np.dtype(np.float32))
assert_equal(promote_func(np.array([f64]), fld), np.dtype(np.float64))
assert_equal(promote_func(fld, np.array([c64])),
np.dtype(np.complex64))
assert_equal(promote_func(c64, np.array([f64])),
np.dtype(np.complex128))
assert_equal(promote_func(np.complex64(3j), np.array([f64])),
np.dtype(np.complex128))
# coercion between scalars and 1-D arrays, where
# the scalar has greater kind than the array
assert_equal(promote_func(np.array([b]), f64), np.dtype(np.float64))
assert_equal(promote_func(np.array([b]), i64), np.dtype(np.int64))
assert_equal(promote_func(np.array([b]), u64), np.dtype(np.uint64))
assert_equal(promote_func(np.array([i8]), f64), np.dtype(np.float64))
assert_equal(promote_func(np.array([u16]), f64), np.dtype(np.float64))
# uint and int are treated as the same "kind" for
# the purposes of array-scalar promotion.
assert_equal(promote_func(np.array([u16]), i32), np.dtype(np.uint16))
# float and complex are treated as the same "kind" for
# the purposes of array-scalar promotion, so that you can do
# (0j + float32array) to get a complex64 array instead of
# a complex128 array.
assert_equal(promote_func(np.array([f32]), c128),
np.dtype(np.complex64))
def test_coercion(self):
def res_type(a, b):
return np.add(a, b).dtype
self.check_promotion_cases(res_type)
# Use-case: float/complex scalar * bool/int8 array
# shouldn't narrow the float/complex type
for a in [np.array([True, False]), np.array([-3, 12], dtype=np.int8)]:
b = 1.234 * a
assert_equal(b.dtype, np.dtype('f8'), "array type %s" % a.dtype)
b = np.longdouble(1.234) * a
assert_equal(b.dtype, np.dtype(np.longdouble),
"array type %s" % a.dtype)
b = np.float64(1.234) * a
assert_equal(b.dtype, np.dtype('f8'), "array type %s" % a.dtype)
b = np.float32(1.234) * a
assert_equal(b.dtype, np.dtype('f4'), "array type %s" % a.dtype)
b = np.float16(1.234) * a
assert_equal(b.dtype, np.dtype('f2'), "array type %s" % a.dtype)
b = 1.234j * a
assert_equal(b.dtype, np.dtype('c16'), "array type %s" % a.dtype)
b = np.clongdouble(1.234j) * a
assert_equal(b.dtype, np.dtype(np.clongdouble),
"array type %s" % a.dtype)
b = np.complex128(1.234j) * a
assert_equal(b.dtype, np.dtype('c16'), "array type %s" % a.dtype)
b = np.complex64(1.234j) * a
assert_equal(b.dtype, np.dtype('c8'), "array type %s" % a.dtype)
# The following use-case is problematic, and to resolve its
# tricky side-effects requires more changes.
#
# Use-case: (1-t)*a, where 't' is a boolean array and 'a' is
# a float32, shouldn't promote to float64
#
# a = np.array([1.0, 1.5], dtype=np.float32)
# t = np.array([True, False])
# b = t*a
# assert_equal(b, [1.0, 0.0])
# assert_equal(b.dtype, np.dtype('f4'))
# b = (1-t)*a
# assert_equal(b, [0.0, 1.5])
# assert_equal(b.dtype, np.dtype('f4'))
#
# Probably ~t (bitwise negation) is more proper to use here,
# but this is arguably less intuitive to understand at a glance, and
# would fail if 't' is actually an integer array instead of boolean:
#
# b = (~t)*a
# assert_equal(b, [0.0, 1.5])
# assert_equal(b.dtype, np.dtype('f4'))
def test_result_type(self):
self.check_promotion_cases(np.result_type)
assert_(np.result_type(None) == np.dtype(None))
def test_promote_types_endian(self):
# promote_types should always return native-endian types
assert_equal(np.promote_types('<i8', '<i8'), np.dtype('i8'))
assert_equal(np.promote_types('>i8', '>i8'), np.dtype('i8'))
assert_equal(np.promote_types('>i8', '>U16'), np.dtype('U21'))
assert_equal(np.promote_types('<i8', '<U16'), np.dtype('U21'))
assert_equal(np.promote_types('>U16', '>i8'), np.dtype('U21'))
assert_equal(np.promote_types('<U16', '<i8'), np.dtype('U21'))
assert_equal(np.promote_types('<S5', '<U8'), np.dtype('U8'))
assert_equal(np.promote_types('>S5', '>U8'), np.dtype('U8'))
assert_equal(np.promote_types('<U8', '<S5'), np.dtype('U8'))
assert_equal(np.promote_types('>U8', '>S5'), np.dtype('U8'))
assert_equal(np.promote_types('<U5', '<U8'), np.dtype('U8'))
assert_equal(np.promote_types('>U8', '>U5'), np.dtype('U8'))
assert_equal(np.promote_types('<M8', '<M8'), np.dtype('M8'))
assert_equal(np.promote_types('>M8', '>M8'), np.dtype('M8'))
assert_equal(np.promote_types('<m8', '<m8'), np.dtype('m8'))
assert_equal(np.promote_types('>m8', '>m8'), np.dtype('m8'))
def test_promote_types_strings(self):
assert_equal(np.promote_types('bool', 'S'), np.dtype('S5'))
assert_equal(np.promote_types('b', 'S'), np.dtype('S4'))
assert_equal(np.promote_types('u1', 'S'), np.dtype('S3'))
assert_equal(np.promote_types('u2', 'S'), np.dtype('S5'))
assert_equal(np.promote_types('u4', 'S'), np.dtype('S10'))
assert_equal(np.promote_types('u8', 'S'), np.dtype('S20'))
assert_equal(np.promote_types('i1', 'S'), np.dtype('S4'))
assert_equal(np.promote_types('i2', 'S'), np.dtype('S6'))
assert_equal(np.promote_types('i4', 'S'), np.dtype('S11'))
assert_equal(np.promote_types('i8', 'S'), np.dtype('S21'))
assert_equal(np.promote_types('bool', 'U'), np.dtype('U5'))
assert_equal(np.promote_types('b', 'U'), np.dtype('U4'))
assert_equal(np.promote_types('u1', 'U'), np.dtype('U3'))
assert_equal(np.promote_types('u2', 'U'), np.dtype('U5'))
assert_equal(np.promote_types('u4', 'U'), np.dtype('U10'))
assert_equal(np.promote_types('u8', 'U'), np.dtype('U20'))
assert_equal(np.promote_types('i1', 'U'), np.dtype('U4'))
assert_equal(np.promote_types('i2', 'U'), np.dtype('U6'))
assert_equal(np.promote_types('i4', 'U'), np.dtype('U11'))
assert_equal(np.promote_types('i8', 'U'), np.dtype('U21'))
assert_equal(np.promote_types('bool', 'S1'), np.dtype('S5'))
assert_equal(np.promote_types('bool', 'S30'), np.dtype('S30'))
assert_equal(np.promote_types('b', 'S1'), np.dtype('S4'))
assert_equal(np.promote_types('b', 'S30'), np.dtype('S30'))
assert_equal(np.promote_types('u1', 'S1'), np.dtype('S3'))
assert_equal(np.promote_types('u1', 'S30'), np.dtype('S30'))
assert_equal(np.promote_types('u2', 'S1'), np.dtype('S5'))
assert_equal(np.promote_types('u2', 'S30'), np.dtype('S30'))
assert_equal(np.promote_types('u4', 'S1'), np.dtype('S10'))
assert_equal(np.promote_types('u4', 'S30'), np.dtype('S30'))
assert_equal(np.promote_types('u8', 'S1'), np.dtype('S20'))
assert_equal(np.promote_types('u8', 'S30'), np.dtype('S30'))
def test_can_cast(self):
assert_(np.can_cast(np.int32, np.int64))
assert_(np.can_cast(np.float64, complex))
assert_(not np.can_cast(complex, float))
assert_(np.can_cast('i8', 'f8'))
assert_(not np.can_cast('i8', 'f4'))
assert_(np.can_cast('i4', 'S11'))
assert_(np.can_cast('i8', 'i8', 'no'))
assert_(not np.can_cast('<i8', '>i8', 'no'))
assert_(np.can_cast('<i8', '>i8', 'equiv'))
assert_(not np.can_cast('<i4', '>i8', 'equiv'))
assert_(np.can_cast('<i4', '>i8', 'safe'))
assert_(not np.can_cast('<i8', '>i4', 'safe'))
assert_(np.can_cast('<i8', '>i4', 'same_kind'))
assert_(not np.can_cast('<i8', '>u4', 'same_kind'))
assert_(np.can_cast('<i8', '>u4', 'unsafe'))
assert_(np.can_cast('bool', 'S5'))
assert_(not np.can_cast('bool', 'S4'))
assert_(np.can_cast('b', 'S4'))
assert_(not np.can_cast('b', 'S3'))
assert_(np.can_cast('u1', 'S3'))
assert_(not np.can_cast('u1', 'S2'))
assert_(np.can_cast('u2', 'S5'))
assert_(not np.can_cast('u2', 'S4'))
assert_(np.can_cast('u4', 'S10'))
assert_(not np.can_cast('u4', 'S9'))
assert_(np.can_cast('u8', 'S20'))
assert_(not np.can_cast('u8', 'S19'))
assert_(np.can_cast('i1', 'S4'))
assert_(not np.can_cast('i1', 'S3'))
assert_(np.can_cast('i2', 'S6'))
assert_(not np.can_cast('i2', 'S5'))
assert_(np.can_cast('i4', 'S11'))
assert_(not np.can_cast('i4', 'S10'))
assert_(np.can_cast('i8', 'S21'))
assert_(not np.can_cast('i8', 'S20'))
assert_(np.can_cast('bool', 'S5'))
assert_(not np.can_cast('bool', 'S4'))
assert_(np.can_cast('b', 'U4'))
assert_(not np.can_cast('b', 'U3'))
assert_(np.can_cast('u1', 'U3'))
assert_(not | np.can_cast('u1', 'U2') | numpy.can_cast |
import numpy as np
import math
import scipy.integrate as integrate
def W3(r, h):
r = abs(r)/h
C = 8/h**3/math.pi
if r > 1:
return 0
elif r > 1/2:
return C*2*(1-r)**3
else:
return C*(1 - 6*r**2 + 6*r**3)
def func(x,h,z):
return W3(math.sqrt(z**2 + x**2),h)*2*math.pi*x
def integral(hsml, z):
if hsml**2 - z**2 < 0:
return 0
else:
return integrate.quad(func, 0, math.sqrt(hsml**2 - z**2), args=(hsml, z))[0]
np_W3 = np.frompyfunc(W3,2,1)
np_int = np.frompyfunc(integral,2,1)
def Mout(Z, hsml, Vz, M, T, H, flag):
dz = np.abs(np.abs(Z) - H)
dMout = np_int(hsml, dz)*M*np.abs(Vz)
if flag == 0: #cold outflow
dotM_p = np.where((dz < hsml) & (Z > 0) & (Vz > 0) & (T < 1e5), dMout, 0)
dotM_m = np.where((dz < hsml) & (Z < 0) & (Vz < 0) & (T < 1e5), dMout, 0)
else: # hot outflow
dotM_p = np.where((dz < hsml) & (Z > 0) & (Vz > 0) & (T > 1e5), dMout, 0)
dotM_m = np.where((dz < hsml) & (Z < 0) & (Vz < 0) & (T > 1e5), dMout, 0)
dotM = dotM_m + dotM_p
return dotM
def Eout(Z, hsml, Vz, M, U, T, H, flag):
dz = np.abs(np.abs(Z) - H)
E = 0.5*M*(Vz*Vz) + U*M
dEout = np_int(hsml, dz)*E*np.abs(Vz)
if flag == 0: #cold outflow
dotE_p = | np.where((dz < hsml) & (Z > 0) & (Vz > 0) & (T < 1e5), dEout, 0) | numpy.where |
import pickle
import random
import logging
import cv2
import torch
import numpy as np
from tqdm import tqdm, trange
from imgaug.augmenters import Resize
import os
from natsort import natsorted
import re
import imgaug.augmenters as iaa
from imgaug.augmentables.lines import LineString, LineStringsOnImage
from torchvision.transforms import ToTensor
from lib.lane import Lane
from PIL import Image
from torchvision import transforms
from scipy.interpolate import InterpolatedUnivariateSpline
from torchvision import utils
# class Lane:
# def __init__(self, points=None, invalid_value=-2., metadata=None):
# super(Lane, self).__init__()
# self.curr_iter = 0
# self.points = points
# self.invalid_value = invalid_value
# self.function = InterpolatedUnivariateSpline(points[:, 1], points[:, 0], k=min(3, len(points) - 1))
# self.min_y = points[:, 1].min() - 0.01
# self.max_y = points[:, 1].max() + 0.01
#
# self.metadata = metadata or {}
#
# def __repr__(self):
# return '[Lane]\n' + str(self.points) + '\n[/Lane]'
#
# def __call__(self, lane_ys):
# lane_xs = self.function(lane_ys)
#
# lane_xs[(lane_ys < self.min_y) | (lane_ys > self.max_y)] = self.invalid_value
# return lane_xs
#
# def __iter__(self):
# return self
#
# def __next__(self):
# if self.curr_iter < len(self.points):
# self.curr_iter += 1
# return self.points[self.curr_iter - 1]
# self.curr_iter = 0
# raise StopIteration
class Runner:
def __init__(self, cfg, exp, device, test_dataset, test_first_dir, test_second_dir, exp_name, hyper, hyper_param,
video_name, root_path, webcam=False, resume=False, view=None, deterministic=False):
self.cfg = cfg
self.exp = exp
self.device = device
self.resume = resume
self.view = view
self.test_dataset = test_dataset
self.test_first_dir = test_first_dir
self.test_second_dir = test_second_dir
self.logger = logging.getLogger(__name__)
self.dataset_type = hyper_param[3]
self.conf_threshold = hyper_param[0]
self.nms_thres = hyper_param[1]
self.nms_topk = hyper_param[2]
self.root = root_path
self.video_name = video_name
self.hyper = hyper
print(self.root)
self.exp_name = "/{}/{}/".format(exp_name, self.hyper)
self.name = test_first_dir + test_second_dir + test_dataset
print(self.name)
self.log_dir = self.name + self.exp_name # os.path.join(self.name,self.exp_name)
print(self.log_dir)
os.makedirs(self.log_dir, exist_ok=True)
# Fix seeds
torch.manual_seed(cfg['seed'])
| np.random.seed(cfg['seed']) | numpy.random.seed |
from collections import OrderedDict
from functools import reduce
import numpy as np
import matplotlib.collections as mcoll
import matplotlib.mlab as mlab
import matplotlib.cbook as cbook
from classes.om import ObjectManager
from classes.ui import UIManager
from classes.ui import RepresentationController
from classes.ui import RepresentationView
from app.app_utils import MPL_COLORMAPS
class DensityRepresentationController(RepresentationController):
tid = 'density_representation_controller'
_ATTRIBUTES = OrderedDict()
_ATTRIBUTES['type'] = {
'default_value': 'wiggle', # 'density',
'type': str
}
_ATTRIBUTES['colormap'] = {
'default_value': 'Spectral_r', # 'gray',
'type': str
}
_ATTRIBUTES['interpolation'] = {
'default_value': 'bicubic', # 'none', #'bilinear',
'type': str
}
_ATTRIBUTES['min_density'] = {
'default_value': None,
'type': float
}
_ATTRIBUTES['max_density'] = {
'default_value': None,
'type': float
}
_ATTRIBUTES['density_alpha'] = {
'default_value': 1.0,
'type': float
}
_ATTRIBUTES['linecolor'] = {
'default_value': 'Black',
'type': str
}
_ATTRIBUTES['linewidth'] = {
'default_value': 0.6,
'type': float
}
_ATTRIBUTES['min_wiggle'] = {
'default_value': None,
'type': float
}
_ATTRIBUTES['max_wiggle'] = {
'default_value': None,
'type': float
}
_ATTRIBUTES['wiggle_alpha'] = {
'default_value': 0.5,
'type': float
}
_ATTRIBUTES['fill'] = {
'default_value': None,
'type': str
}
_ATTRIBUTES['fill_color_left'] = {
'default_value': 'Red',
'type': str
}
_ATTRIBUTES['fill_color_right'] = {
'default_value': 'Blue',
'type': str
}
def __init__(self, **state):
super().__init__(**state)
def PostInit(self):
self.subscribe(self.on_change_colormap, 'change.colormap')
self.subscribe(self.on_change_density_alpha,
'change.density_alpha'
)
self.subscribe(self.on_change_wiggle_alpha,
'change.wiggle_alpha'
)
def _get_pg_properties(self):
"""
"""
props = OrderedDict()
props['type'] = {
'pg_property': 'EnumProperty',
'label': 'Plot type',
'options_labels': ['Density', 'Wiggle', 'Both'],
'options_values': ['density', 'wiggle', 'both']
}
props['colormap'] = {
'pg_property': 'MPLColormapsProperty',
'label': 'Colormap',
}
props['interpolation'] = {
'pg_property': 'EnumProperty',
'label': 'Colormap interpolation',
'options_labels': ['none', 'nearest', 'bilinear', 'bicubic',
'spline16', 'spline36', 'hanning', 'hamming',
'hermite', 'kaiser', 'quadric', 'catrom',
'gaussian', 'bessel', 'mitchell', 'sinc',
'lanczos'
]
}
props['min_density'] = {
'pg_property': 'FloatProperty',
'label': 'Colormap min value'
}
props['max_density'] = {
'pg_property': 'FloatProperty',
'label': 'Colormap max value'
}
props['density_alpha'] = {
'pg_property': 'FloatProperty',
'label': 'Colormap alpha'
}
props['linecolor'] = {
'pg_property': 'MPLColorsProperty',
'label': 'Wiggle line color'
}
props['linewidth'] = {
'pg_property': 'FloatProperty',
'label': 'Wiggle line width'
}
props['min_wiggle'] = {
'pg_property': 'FloatProperty',
'label': 'Wiggle min value'
}
props['max_wiggle'] = {
'pg_property': 'FloatProperty',
'label': 'Wiggle max value'
}
props['wiggle_alpha'] = {
'pg_property': 'FloatProperty',
'label': 'Wiggle alpha'
}
props['fill'] = {
'pg_property': 'EnumProperty',
'label': 'Wiggle fill type',
'options_labels': ['None', 'Left', 'Right', 'Both'],
'options_values': [None, 'left', 'right', 'both']
}
props['fill_color_left'] = {
'pg_property': 'MPLColorsProperty',
'label': 'Wiggle left fill color'
}
props['fill_color_right'] = {
'pg_property': 'MPLColorsProperty',
'label': 'Wiggle right fill color'
}
return props
def on_change_density_alpha(self, new_value, old_value):
if new_value >= 0.0 and new_value <= 1.0:
self.view.set_density_alpha(new_value)
else:
self.set_value_from_event('density_alpha', old_value)
def on_change_wiggle_alpha(self, new_value, old_value):
if new_value >= 0.0 and new_value <= 1.0:
self.view.set_wiggle_alpha(new_value)
else:
self.set_value_from_event('wiggle_alpha', old_value)
def on_change_colormap(self, new_value, old_value):
if new_value not in MPL_COLORMAPS:
msg = 'Invalid colormap. Valid values are: {}'.format(MPL_COLORMAPS)
print(msg)
self.set_value_from_event('colormap', old_value)
else:
self.view.set_colormap(new_value)
class DensityRepresentationView(RepresentationView):
tid = 'density_representation_view'
def __init__(self, controller_uid):
super().__init__(controller_uid)
def PostInit(self):
UIM = UIManager()
controller = UIM.get(self._controller_uid)
#
# TODO: Ver um melhor lugar para redefinir a colormap
tid, _ = controller.get_data_object_uid()
if tid == 'gather' or tid == 'seismic':
controller.colormap = 'gray_r'
#
controller.subscribe(self._draw, 'change.type')
controller.subscribe(self.set_interpolation, 'change.interpolation')
controller.subscribe(self._draw, 'change.min_density')
controller.subscribe(self._draw, 'change.max_density')
controller.subscribe(self.set_line_width, 'change.linewidth')
controller.subscribe(self.set_line_color, 'change.linecolor')
controller.subscribe(self.fill_between, 'change.fill')
controller.subscribe(self.fill_color_left, 'change.fill_color_left')
controller.subscribe(self.fill_color_right, 'change.fill_color_right')
controller.subscribe(self._draw, 'change.min_wiggle')
controller.subscribe(self._draw, 'change.max_wiggle')
def _draw(self, new_value, old_value):
# Bypass function
self.draw()
def set_colormap(self, colormap):
if self._mplot_objects['density']:
self._mplot_objects['density'].set_cmap(colormap)
toc = self.get_parent_controller()
label = toc.get_label()
if label:
label.set_colormap(colormap)
self.draw_canvas()
def set_interpolation(self, new_value, old_value):
if self._mplot_objects['density']:
self._mplot_objects['density'].set_interpolation(new_value)
self.draw_canvas()
def set_density_alpha(self, alpha):
if self._mplot_objects['density']:
self._mplot_objects['density'].set_alpha(alpha)
self.draw_canvas()
def set_wiggle_alpha(self, alpha):
if len(self._mplot_objects['wiggle']) == 0:
return
for idx in range(0, len(self._mplot_objects['wiggle'])):
mpl_obj = self._mplot_objects['wiggle'][idx]
if mpl_obj is not None:
mpl_obj.set_alpha(alpha)
self.draw_canvas()
def set_line_color(self, new_value, old_value):
for idx_line in range(0, len(self._mplot_objects['wiggle']), 3):
line = self._mplot_objects['wiggle'][idx_line]
line.set_color(new_value)
self.draw_canvas()
def set_line_width(self, new_value, old_value):
for idx_line in range(0, len(self._mplot_objects['wiggle']), 3):
line = self._mplot_objects['wiggle'][idx_line]
line.set_linewidth(new_value)
self.draw_canvas()
def fill_color_left(self, new_value, old_value):
for idx_fill_obj in range(1, len(self._mplot_objects['wiggle']), 3):
fill_mpl_obj = self._mplot_objects['wiggle'][idx_fill_obj]
if fill_mpl_obj:
fill_mpl_obj.set_color(new_value)
self.draw_canvas()
def fill_color_right(self, new_value, old_value):
for idx_fill_obj in range(2, len(self._mplot_objects['wiggle']), 3):
fill_mpl_obj = self._mplot_objects['wiggle'][idx_fill_obj]
if fill_mpl_obj:
fill_mpl_obj.set_color(new_value)
self.draw_canvas()
def get_data_info(self, event):
"""
Retorna a string com informações do dado exibido em tela,
de acordo com a posicao do mouse no momento.
"""
image = self._mplot_objects.get('density')
if image:
value = image.get_cursor_data(event)
#
# UIM = UIManager()
# controller = UIM.get(self._controller_uid)
toc = self.get_parent_controller()
x_di_uid, x_index_data = toc.get_index_for_dimension(-2)
y_di_uid, y_index_data = toc.get_index_for_dimension(-1)
canvas = self.get_canvas()
xvalue = canvas.inverse_transform(event.xdata,
x_index_data[0],
x_index_data[-1]
)
#
OM = ObjectManager()
x_data_index = OM.get(x_di_uid)
y_data_index = OM.get(y_di_uid)
#
if event.ydata < y_index_data[0] or event.ydata > y_index_data[-1]:
return None
#
msg = x_data_index.name + ': {:0.2f}'.format(xvalue) + ', ' \
+ y_data_index.name + ': {:0.2f}'.format(event.ydata)
msg += ', Value: {:0.2f}'.format(value)
return '[' + msg + ']'
else:
msg = ''
return '[' + msg + ']'
# raise Exception('Tratar get_data_info para Wiggle.')
def draw(self):
self.clear()
self._mplot_objects['density'] = None
self._mplot_objects['wiggle'] = []
#
UIM = UIManager()
controller = UIM.get(self._controller_uid)
toc = self.get_parent_controller()
#
data = toc.get_filtered_data(dimensions_desired=2)
x_di_uid, x_index_data = toc.get_index_for_dimension(-2)
y_di_uid, y_index_data = toc.get_index_for_dimension(-1)
#
OM = ObjectManager()
xdata_index = OM.get(x_di_uid)
ydata_index = OM.get(y_di_uid)
#
canvas = self.get_canvas()
toc_uid = UIM._getparentuid(self._controller_uid)
track_controller_uid = UIM._getparentuid(toc_uid)
track_controller = UIM.get(track_controller_uid)
#
xlim_min, xlim_max = canvas.get_xlim('plot_axes')
#
if controller.type == 'density' or controller.type == 'both':
# (left, right, bottom, top)
extent = (xlim_min, xlim_max,
np.nanmax(y_index_data), np.nanmin(y_index_data)
)
try:
image = track_controller.append_artist('AxesImage',
cmap=controller.colormap,
interpolation=controller.interpolation,
extent=extent
)
image.set_data(data.T)
image.set_label(self._controller_uid)
if image.get_clip_path() is None:
# image does not already have clipping set,
# clip to axes patch
image.set_clip_path(image.axes.patch)
if controller.min_density is None:
controller.set_value_from_event('min_density',
| np.nanmin(data) | numpy.nanmin |
""" Test functions for linalg module
"""
import os
import sys
import itertools
import traceback
import textwrap
import subprocess
import pytest
import numpy as np
from numpy import array, single, double, csingle, cdouble, dot, identity, matmul
from numpy import multiply, atleast_2d, inf, asarray
from numpy import linalg
from numpy.linalg import matrix_power, norm, matrix_rank, multi_dot, LinAlgError
from numpy.linalg.linalg import _multi_dot_matrix_chain_order
from numpy.testing import (
assert_, assert_equal, assert_raises, assert_array_equal,
assert_almost_equal, assert_allclose, suppress_warnings,
assert_raises_regex, HAS_LAPACK64,
)
from numpy.testing._private.utils import requires_memory
def consistent_subclass(out, in_):
# For ndarray subclass input, our output should have the same subclass
# (non-ndarray input gets converted to ndarray).
return type(out) is (type(in_) if isinstance(in_, np.ndarray)
else np.ndarray)
old_assert_almost_equal = assert_almost_equal
def assert_almost_equal(a, b, single_decimal=6, double_decimal=12, **kw):
if asarray(a).dtype.type in (single, csingle):
decimal = single_decimal
else:
decimal = double_decimal
old_assert_almost_equal(a, b, decimal=decimal, **kw)
def get_real_dtype(dtype):
return {single: single, double: double,
csingle: single, cdouble: double}[dtype]
def get_complex_dtype(dtype):
return {single: csingle, double: cdouble,
csingle: csingle, cdouble: cdouble}[dtype]
def get_rtol(dtype):
# Choose a safe rtol
if dtype in (single, csingle):
return 1e-5
else:
return 1e-11
# used to categorize tests
all_tags = {
'square', 'nonsquare', 'hermitian', # mutually exclusive
'generalized', 'size-0', 'strided' # optional additions
}
class LinalgCase:
def __init__(self, name, a, b, tags=set()):
"""
A bundle of arguments to be passed to a test case, with an identifying
name, the operands a and b, and a set of tags to filter the tests
"""
assert_(isinstance(name, str))
self.name = name
self.a = a
self.b = b
self.tags = frozenset(tags) # prevent shared tags
def check(self, do):
"""
Run the function `do` on this test case, expanding arguments
"""
do(self.a, self.b, tags=self.tags)
def __repr__(self):
return f'<LinalgCase: {self.name}>'
def apply_tag(tag, cases):
"""
Add the given tag (a string) to each of the cases (a list of LinalgCase
objects)
"""
assert tag in all_tags, "Invalid tag"
for case in cases:
case.tags = case.tags | {tag}
return cases
#
# Base test cases
#
np.random.seed(1234)
CASES = []
# square test cases
CASES += apply_tag('square', [
LinalgCase("single",
array([[1., 2.], [3., 4.]], dtype=single),
array([2., 1.], dtype=single)),
LinalgCase("double",
array([[1., 2.], [3., 4.]], dtype=double),
array([2., 1.], dtype=double)),
LinalgCase("double_2",
array([[1., 2.], [3., 4.]], dtype=double),
array([[2., 1., 4.], [3., 4., 6.]], dtype=double)),
LinalgCase("csingle",
array([[1. + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=csingle),
array([2. + 1j, 1. + 2j], dtype=csingle)),
LinalgCase("cdouble",
array([[1. + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=cdouble),
array([2. + 1j, 1. + 2j], dtype=cdouble)),
LinalgCase("cdouble_2",
array([[1. + 2j, 2 + 3j], [3 + 4j, 4 + 5j]], dtype=cdouble),
array([[2. + 1j, 1. + 2j, 1 + 3j], [1 - 2j, 1 - 3j, 1 - 6j]], dtype=cdouble)),
LinalgCase("0x0",
np.empty((0, 0), dtype=double),
np.empty((0,), dtype=double),
tags={'size-0'}),
LinalgCase("8x8",
np.random.rand(8, 8),
np.random.rand(8)),
LinalgCase("1x1",
np.random.rand(1, 1),
np.random.rand(1)),
LinalgCase("nonarray",
[[1, 2], [3, 4]],
[2, 1]),
])
# non-square test-cases
CASES += apply_tag('nonsquare', [
LinalgCase("single_nsq_1",
array([[1., 2., 3.], [3., 4., 6.]], dtype=single),
array([2., 1.], dtype=single)),
LinalgCase("single_nsq_2",
array([[1., 2.], [3., 4.], [5., 6.]], dtype=single),
array([2., 1., 3.], dtype=single)),
LinalgCase("double_nsq_1",
array([[1., 2., 3.], [3., 4., 6.]], dtype=double),
array([2., 1.], dtype=double)),
LinalgCase("double_nsq_2",
array([[1., 2.], [3., 4.], [5., 6.]], dtype=double),
array([2., 1., 3.], dtype=double)),
LinalgCase("csingle_nsq_1",
array(
[[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=csingle),
array([2. + 1j, 1. + 2j], dtype=csingle)),
LinalgCase("csingle_nsq_2",
array(
[[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=csingle),
array([2. + 1j, 1. + 2j, 3. - 3j], dtype=csingle)),
LinalgCase("cdouble_nsq_1",
array(
[[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=cdouble),
array([2. + 1j, 1. + 2j], dtype=cdouble)),
LinalgCase("cdouble_nsq_2",
array(
[[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=cdouble),
array([2. + 1j, 1. + 2j, 3. - 3j], dtype=cdouble)),
LinalgCase("cdouble_nsq_1_2",
array(
[[1. + 1j, 2. + 2j, 3. - 3j], [3. - 5j, 4. + 9j, 6. + 2j]], dtype=cdouble),
array([[2. + 1j, 1. + 2j], [1 - 1j, 2 - 2j]], dtype=cdouble)),
LinalgCase("cdouble_nsq_2_2",
array(
[[1. + 1j, 2. + 2j], [3. - 3j, 4. - 9j], [5. - 4j, 6. + 8j]], dtype=cdouble),
array([[2. + 1j, 1. + 2j], [1 - 1j, 2 - 2j], [1 - 1j, 2 - 2j]], dtype=cdouble)),
LinalgCase("8x11",
np.random.rand(8, 11),
np.random.rand(8)),
LinalgCase("1x5",
np.random.rand(1, 5),
np.random.rand(1)),
LinalgCase("5x1",
np.random.rand(5, 1),
np.random.rand(5)),
LinalgCase("0x4",
np.random.rand(0, 4),
np.random.rand(0),
tags={'size-0'}),
LinalgCase("4x0",
np.random.rand(4, 0),
np.random.rand(4),
tags={'size-0'}),
])
# hermitian test-cases
CASES += apply_tag('hermitian', [
LinalgCase("hsingle",
array([[1., 2.], [2., 1.]], dtype=single),
None),
LinalgCase("hdouble",
array([[1., 2.], [2., 1.]], dtype=double),
None),
LinalgCase("hcsingle",
array([[1., 2 + 3j], [2 - 3j, 1]], dtype=csingle),
None),
LinalgCase("hcdouble",
array([[1., 2 + 3j], [2 - 3j, 1]], dtype=cdouble),
None),
LinalgCase("hempty",
np.empty((0, 0), dtype=double),
None,
tags={'size-0'}),
LinalgCase("hnonarray",
[[1, 2], [2, 1]],
None),
LinalgCase("matrix_b_only",
array([[1., 2.], [2., 1.]]),
None),
LinalgCase("hmatrix_1x1",
np.random.rand(1, 1),
None),
])
#
# Gufunc test cases
#
def _make_generalized_cases():
new_cases = []
for case in CASES:
if not isinstance(case.a, np.ndarray):
continue
a = np.array([case.a, 2 * case.a, 3 * case.a])
if case.b is None:
b = None
else:
b = np.array([case.b, 7 * case.b, 6 * case.b])
new_case = LinalgCase(case.name + "_tile3", a, b,
tags=case.tags | {'generalized'})
new_cases.append(new_case)
a = np.array([case.a] * 2 * 3).reshape((3, 2) + case.a.shape)
if case.b is None:
b = None
else:
b = np.array([case.b] * 2 * 3).reshape((3, 2) + case.b.shape)
new_case = LinalgCase(case.name + "_tile213", a, b,
tags=case.tags | {'generalized'})
new_cases.append(new_case)
return new_cases
CASES += _make_generalized_cases()
#
# Generate stride combination variations of the above
#
def _stride_comb_iter(x):
"""
Generate cartesian product of strides for all axes
"""
if not isinstance(x, np.ndarray):
yield x, "nop"
return
stride_set = [(1,)] * x.ndim
stride_set[-1] = (1, 3, -4)
if x.ndim > 1:
stride_set[-2] = (1, 3, -4)
if x.ndim > 2:
stride_set[-3] = (1, -4)
for repeats in itertools.product(*tuple(stride_set)):
new_shape = [abs(a * b) for a, b in zip(x.shape, repeats)]
slices = tuple([slice(None, None, repeat) for repeat in repeats])
# new array with different strides, but same data
xi = np.empty(new_shape, dtype=x.dtype)
xi.view(np.uint32).fill(0xdeadbeef)
xi = xi[slices]
xi[...] = x
xi = xi.view(x.__class__)
assert_(np.all(xi == x))
yield xi, "stride_" + "_".join(["%+d" % j for j in repeats])
# generate also zero strides if possible
if x.ndim >= 1 and x.shape[-1] == 1:
s = list(x.strides)
s[-1] = 0
xi = np.lib.stride_tricks.as_strided(x, strides=s)
yield xi, "stride_xxx_0"
if x.ndim >= 2 and x.shape[-2] == 1:
s = list(x.strides)
s[-2] = 0
xi = np.lib.stride_tricks.as_strided(x, strides=s)
yield xi, "stride_xxx_0_x"
if x.ndim >= 2 and x.shape[:-2] == (1, 1):
s = list(x.strides)
s[-1] = 0
s[-2] = 0
xi = np.lib.stride_tricks.as_strided(x, strides=s)
yield xi, "stride_xxx_0_0"
def _make_strided_cases():
new_cases = []
for case in CASES:
for a, a_label in _stride_comb_iter(case.a):
for b, b_label in _stride_comb_iter(case.b):
new_case = LinalgCase(case.name + "_" + a_label + "_" + b_label, a, b,
tags=case.tags | {'strided'})
new_cases.append(new_case)
return new_cases
CASES += _make_strided_cases()
#
# Test different routines against the above cases
#
class LinalgTestCase:
TEST_CASES = CASES
def check_cases(self, require=set(), exclude=set()):
"""
Run func on each of the cases with all of the tags in require, and none
of the tags in exclude
"""
for case in self.TEST_CASES:
# filter by require and exclude
if case.tags & require != require:
continue
if case.tags & exclude:
continue
try:
case.check(self.do)
except Exception as e:
msg = f'In test case: {case!r}\n\n'
msg += traceback.format_exc()
raise AssertionError(msg) from e
class LinalgSquareTestCase(LinalgTestCase):
def test_sq_cases(self):
self.check_cases(require={'square'},
exclude={'generalized', 'size-0'})
def test_empty_sq_cases(self):
self.check_cases(require={'square', 'size-0'},
exclude={'generalized'})
class LinalgNonsquareTestCase(LinalgTestCase):
def test_nonsq_cases(self):
self.check_cases(require={'nonsquare'},
exclude={'generalized', 'size-0'})
def test_empty_nonsq_cases(self):
self.check_cases(require={'nonsquare', 'size-0'},
exclude={'generalized'})
class HermitianTestCase(LinalgTestCase):
def test_herm_cases(self):
self.check_cases(require={'hermitian'},
exclude={'generalized', 'size-0'})
def test_empty_herm_cases(self):
self.check_cases(require={'hermitian', 'size-0'},
exclude={'generalized'})
class LinalgGeneralizedSquareTestCase(LinalgTestCase):
@pytest.mark.slow
def test_generalized_sq_cases(self):
self.check_cases(require={'generalized', 'square'},
exclude={'size-0'})
@pytest.mark.slow
def test_generalized_empty_sq_cases(self):
self.check_cases(require={'generalized', 'square', 'size-0'})
class LinalgGeneralizedNonsquareTestCase(LinalgTestCase):
@pytest.mark.slow
def test_generalized_nonsq_cases(self):
self.check_cases(require={'generalized', 'nonsquare'},
exclude={'size-0'})
@pytest.mark.slow
def test_generalized_empty_nonsq_cases(self):
self.check_cases(require={'generalized', 'nonsquare', 'size-0'})
class HermitianGeneralizedTestCase(LinalgTestCase):
@pytest.mark.slow
def test_generalized_herm_cases(self):
self.check_cases(require={'generalized', 'hermitian'},
exclude={'size-0'})
@pytest.mark.slow
def test_generalized_empty_herm_cases(self):
self.check_cases(require={'generalized', 'hermitian', 'size-0'},
exclude={'none'})
def dot_generalized(a, b):
a = asarray(a)
if a.ndim >= 3:
if a.ndim == b.ndim:
# matrix x matrix
new_shape = a.shape[:-1] + b.shape[-1:]
elif a.ndim == b.ndim + 1:
# matrix x vector
new_shape = a.shape[:-1]
else:
raise ValueError("Not implemented...")
r = np.empty(new_shape, dtype=np.common_type(a, b))
for c in itertools.product(*map(range, a.shape[:-2])):
r[c] = dot(a[c], b[c])
return r
else:
return dot(a, b)
def identity_like_generalized(a):
a = asarray(a)
if a.ndim >= 3:
r = np.empty(a.shape, dtype=a.dtype)
r[...] = identity(a.shape[-2])
return r
else:
return identity(a.shape[0])
class SolveCases(LinalgSquareTestCase, LinalgGeneralizedSquareTestCase):
# kept apart from TestSolve for use for testing with matrices.
def do(self, a, b, tags):
x = linalg.solve(a, b)
assert_almost_equal(b, dot_generalized(a, x))
assert_(consistent_subclass(x, b))
class TestSolve(SolveCases):
@pytest.mark.parametrize('dtype', [single, double, csingle, cdouble])
def test_types(self, dtype):
x = np.array([[1, 0.5], [0.5, 1]], dtype=dtype)
assert_equal(linalg.solve(x, x).dtype, dtype)
def test_0_size(self):
class ArraySubclass(np.ndarray):
pass
# Test system of 0x0 matrices
a = np.arange(8).reshape(2, 2, 2)
b = np.arange(6).reshape(1, 2, 3).view(ArraySubclass)
expected = linalg.solve(a, b)[:, 0:0, :]
result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0, :])
assert_array_equal(result, expected)
assert_(isinstance(result, ArraySubclass))
# Test errors for non-square and only b's dimension being 0
assert_raises(linalg.LinAlgError, linalg.solve, a[:, 0:0, 0:1], b)
assert_raises(ValueError, linalg.solve, a, b[:, 0:0, :])
# Test broadcasting error
b = np.arange(6).reshape(1, 3, 2) # broadcasting error
assert_raises(ValueError, linalg.solve, a, b)
assert_raises(ValueError, linalg.solve, a[0:0], b[0:0])
# Test zero "single equations" with 0x0 matrices.
b = np.arange(2).reshape(1, 2).view(ArraySubclass)
expected = linalg.solve(a, b)[:, 0:0]
result = linalg.solve(a[:, 0:0, 0:0], b[:, 0:0])
assert_array_equal(result, expected)
assert_(isinstance(result, ArraySubclass))
b = np.arange(3).reshape(1, 3)
assert_raises(ValueError, linalg.solve, a, b)
assert_raises(ValueError, linalg.solve, a[0:0], b[0:0])
assert_raises(ValueError, linalg.solve, a[:, 0:0, 0:0], b)
def test_0_size_k(self):
# test zero multiple equation (K=0) case.
class ArraySubclass(np.ndarray):
pass
a = np.arange(4).reshape(1, 2, 2)
b = np.arange(6).reshape(3, 2, 1).view(ArraySubclass)
expected = | linalg.solve(a, b) | numpy.linalg.solve |
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
from xgboost import XGBClassifier
from sklearn.model_selection import KFold
from category_encoders import CountEncoder
from sklearn.pipeline import Pipeline
from sklearn.metrics import log_loss
import matplotlib.pyplot as plt
from sklearn.multioutput import MultiOutputClassifier
import os
import warnings
warnings.filterwarnings('ignore')
SEED = 777
NFOLDS = 5
DATA_DIR = 'data/'
np.random.seed(SEED)
train = pd.read_csv(DATA_DIR + 'train_features.csv')
targets = pd.read_csv(DATA_DIR + 'train_targets_scored.csv')
test = pd.read_csv(DATA_DIR + 'test_features.csv')
sub = pd.read_csv(DATA_DIR + 'sample_submission.csv')
# drop id col
X = train.iloc[:, 1:].to_numpy()
X_test = test.iloc[:, 1:].to_numpy()
y = targets.iloc[:, 1:].to_numpy()
for i in range(len(X[:, 1])):
if X[i, 1] == 24:
X[i, 1] = 0
if X[i, 1] == 48:
X[i, 1] = 1
if X[i, 1] == 72:
X[i, 1] = 2
for i in range(len(X[:, 2])):
if X[i, 2] == 'D1':
X[i, 2] = 0
if X[i, 2] == 'D2':
X[i, 2] = 1
for i in range(len(X_test[:, 1])):
if X_test[i, 1] == 24:
X_test[i, 1] = 0
if X_test[i, 1] == 48:
X_test[i, 1] = 1
if X_test[i, 1] == 72:
X_test[i, 1] = 2
for i in range(len(X_test[:, 2])):
if X_test[i, 2] == 'D1':
X_test[i, 2] = 0
if X_test[i, 2] == 'D2':
X_test[i, 2] = 1
classifier = MultiOutputClassifier(XGBClassifier(tree_method='gpu_hist'))
clf = Pipeline([('encode', CountEncoder(cols=[0, 2])),
('classify', classifier)
])
params = {'classify__estimator__colsample_bytree': 0.6522,
'classify__estimator__gamma': 3.6975,
'classify__estimator__learning_rate': 0.0503,
'classify__estimator__max_delta_step': 2.0706,
'classify__estimator__max_depth': 10,
'classify__estimator__min_child_weight': 31.5800,
'classify__estimator__n_estimators': 166,
'classify__estimator__subsample': 0.8639
}
_ = clf.set_params(**params)
oof_preds = np.zeros(y.shape)
test_preds = np.zeros((test.shape[0], y.shape[1]))
oof_losses = []
kf = KFold(n_splits=NFOLDS, random_state=SEED)
# drop the cp_type column to make the dataframe in one datatype
X_test = X_test[:, 1:]
for fn, (trn_idx, val_idx) in enumerate(kf.split(X, y)):
print('Starting fold: ', fn)
X_train, X_val = X[trn_idx], X[val_idx]
y_train, y_val = y[trn_idx], y[val_idx]
# drop where cp_type==ctl_vehicle (baseline)
ctl_mask = X_train[:, 0] == 'ctl_vehicle'
X_train = X_train[~ctl_mask, :]
y_train = y_train[~ctl_mask]
# drop the cp_type column to make the dataframe in one datatype
X_train = X_train[:, 1:]
X_val = X_val[:, 1:]
# X_test = X_test[:, 1:]
clf.fit(X_train.astype('float'), y_train)
val_preds = clf.predict_proba(X_val.astype('float')) # list of preds per class
val_preds = np.array(val_preds)[:, :, 1].T # take the positive class
oof_preds[val_idx] = val_preds
loss = log_loss(np.ravel(y_val), np.ravel(val_preds))
oof_losses.append(loss)
preds = clf.predict_proba(X_test.astype('float'))
preds = np.array(preds)[:, :, 1].T # take the positive class
test_preds += preds / NFOLDS
print(oof_losses)
print('Mean OOF loss across folds', | np.mean(oof_losses) | numpy.mean |
import os
from collections import deque
import dmc2gym
import gym
import numpy as np
import torch
import torch.nn.functional as F
import torchvision.transforms.functional as TF
from numpy.random import randint
def make_env(domain_name, task_name, seed=0, episode_length=1000, frame_stack=3, action_repeat=4, image_size=100,
mode='train'):
"""Make environment for experiments"""
assert mode in {'train', 'color_easy', 'color_hard', 'video_easy', 'video_hard'}, \
f'specified mode "{mode}" is not supported'
env = dmc2gym.make(
domain_name=domain_name,
task_name=task_name,
seed=seed,
visualize_reward=False,
from_pixels=True,
height=image_size,
width=image_size,
episode_length=episode_length,
frame_skip=action_repeat
)
env = VideoWrapper(env, mode, seed)
env = FrameStack(env, frame_stack)
env = ColorWrapper(env, mode, seed)
return env
class ColorWrapper(gym.Wrapper):
"""Wrapper for the color experiments"""
prefix = "custom_vendor/data"
def __init__(self, env, mode, seed=None):
assert isinstance(env, FrameStack), 'wrapped env must be a framestack'
gym.Wrapper.__init__(self, env)
self._max_episode_steps = env._max_episode_steps
self._mode = mode
self._random_state = | np.random.RandomState(seed) | numpy.random.RandomState |
# -*- coding: UTF-8 -*-
"""Definitions for `Ensembler` class."""
import gc
import sys
import time
import numpy as np
import scipy
from astrocats.catalog.model import MODEL
from astrocats.catalog.quantity import QUANTITY
from emcee.autocorr import AutocorrError
from mosfit.mossampler import MOSSampler
from mosfit.samplers.sampler import Sampler
from mosfit.utils import calculate_WAIC, pretty_num
class Ensembler(Sampler):
"""Fit transient events with the provided model."""
_MAX_ACORC = 5
_REPLACE_AGE = 20
def __init__(
self, fitter, model=None, iterations=2000, burn=None, post_burn=None,
num_temps=1, num_walkers=None, convergence_criteria=None,
convergence_type='psrf', gibbs=False, fracking=True,
frack_step=20, **kwargs):
"""Initialize `Ensembler` class."""
super(Ensembler, self).__init__(
fitter, num_walkers=num_walkers, **kwargs)
self._model = model
self._iterations = iterations
self._burn = burn
self._post_burn = post_burn
self._num_temps = num_temps
self._cc = convergence_criteria
self._ct = convergence_type
self._gibbs = gibbs
self._fracking = fracking
self._frack_step = frack_step
self._upload_model = None
self._WAIC = None
def append_output(self, modeldict):
"""Append output from the ensembler to the model description."""
self._WAIC = None
if self._iterations > 0:
self._WAIC = calculate_WAIC(self._scores)
modeldict[MODEL.SCORE] = {
QUANTITY.VALUE: str(self._WAIC),
QUANTITY.KIND: 'WAIC'
}
modeldict[MODEL.CONVERGENCE] = []
if self._psrf < np.inf:
modeldict[MODEL.CONVERGENCE].append(
{
QUANTITY.VALUE: str(self._psrf),
QUANTITY.KIND: 'psrf'
}
)
if self._acor and self._aacort > 0:
acortimes = '<' if self._aa < self._MAX_ACORC else ''
acortimes += str(np.int(float(self._emi -
self._ams) / self._actc))
modeldict[MODEL.CONVERGENCE].append(
{
QUANTITY.VALUE: str(acortimes),
QUANTITY.KIND: 'autocorrelationtimes'
}
)
modeldict[MODEL.STEPS] = str(self._emi)
def prepare_output(self, check_upload_quality, upload):
"""Prepare output for writing to disk and uploading."""
prt = self._printer
if check_upload_quality:
if self._WAIC is None:
self._upload_model = False
elif self._WAIC is not None and self._WAIC < 0.0:
if upload:
prt.message('no_ul_waic', ['' if self._WAIC is None
else pretty_num(self._WAIC)])
self._upload_model = False
if len(self._all_chain):
self._pout = self._all_chain[:, :, -1, :]
self._lnprobout = self._all_lnprob[:, :, -1]
self._lnlikeout = self._all_lnlike[:, :, -1]
else:
self._pout = self._p
self._lnprobout = self._lnprob
self._lnlikeout = self._lnlike
weight = 1.0 / (self._nwalkers * self._ntemps)
self._weights = np.full_like(self._lnlikeout, weight)
# Here, we append to the vector of walkers from the full chain based
# upon the value of acort (the autocorrelation timescale).
if self._acor and self._aacort > 0 and self._aa == self._MAX_ACORC:
actc0 = int(np.ceil(self._aacort))
for i in range(1, np.int(float(self._emi - self._ams) / actc0)):
self._pout = np.concatenate(
(self._all_chain[:, :, -i * self._actc, :], self._pout),
axis=1)
self._lnprobout = np.concatenate(
(self._all_lnprob[:, :, -i * self._actc],
self._lnprobout), axis=1)
self._lnlikeout = np.concatenate(
(self._all_lnlike[:, :, -i * self._actc],
self._lnlikeout), axis=1)
self._weights = np.full_like(self._lnlikeout, weight)
def run(self, walker_data):
"""Use ensemble sampling to determine posteriors."""
from mosfit.fitter import draw_walker, frack, ln_likelihood, ln_prior
prt = self._printer
self._emcee_est_t = 0.0
self._bh_est_t = 0.0
if self._burn is not None:
self._burn_in = min(self._burn, self._iterations)
elif self._post_burn is not None:
self._burn_in = max(self._iterations - self._post_burn, 0)
else:
self._burn_in = int(np.round(self._iterations / 2))
self._ntemps, ndim = (
self._num_temps, self._model._num_free_parameters)
if self._num_walkers:
self._nwalkers = self._num_walkers
else:
self._nwalkers = 2 * ndim
test_walker = self._iterations > 0
self._lnprob = None
self._lnlike = None
pool_size = max(self._pool.size, 1)
# Derived so only half a walker redrawn with Gaussian distribution.
redraw_mult = 0.5 * np.sqrt(
2) * scipy.special.erfinv(float(
self._nwalkers - 1) / self._nwalkers)
prt.message('nmeas_nfree', [self._model._num_measurements, ndim])
if test_walker:
if self._model._num_measurements <= ndim:
prt.message('too_few_walkers', warning=True)
if self._nwalkers < 10 * ndim:
prt.message('want_more_walkers', [10 * ndim, self._nwalkers],
warning=True)
p0 = [[] for x in range(self._ntemps)]
# Generate walker positions based upon loaded walker data, if
# available.
walkers_pool = []
walker_weights = []
nmodels = len(set([x[0] for x in walker_data]))
wp_extra = 0
while len(walkers_pool) < len(walker_data):
appended_walker = False
for walk in walker_data:
if (len(walkers_pool) + wp_extra) % nmodels != walk[0]:
continue
new_walk = np.full(self._model._num_free_parameters, None)
for k, key in enumerate(self._model._free_parameters):
param = self._model._modules[key]
walk_param = walk[1].get(key)
if walk_param is None or 'value' not in walk_param:
continue
if param:
val = param.fraction(walk_param['value'])
if not np.isnan(val):
new_walk[k] = val
walkers_pool.append(new_walk)
walker_weights.append(walk[2])
appended_walker = True
if not appended_walker:
wp_extra += 1
# Make sure weights are normalized.
if None not in walker_weights:
totw = np.sum(walker_weights)
walker_weights = [x / totw for x in walker_weights]
# Draw walker positions. This is either done from the priors or from
# loaded walker data. If some parameters are not available from the
# loaded walker data they will be drawn from their priors instead.
pool_len = len(walkers_pool)
for i, pt in enumerate(p0):
dwscores = []
while len(p0[i]) < self._nwalkers:
prt.status(
self,
desc='drawing_walkers',
iterations=[
i * self._nwalkers + len(p0[i]) + 1,
self._nwalkers * self._ntemps])
if self._pool.size == 0 or pool_len:
self._p, score = draw_walker(
test_walker, walkers_pool,
replace=pool_len < self._ntemps * self._nwalkers,
weights=walker_weights)
p0[i].append(self._p)
dwscores.append(score)
else:
nmap = min(self._nwalkers -
len(p0[i]), max(self._pool.size, 10))
dws = self._pool.map(draw_walker, [test_walker] * nmap)
p0[i].extend([x[0] for x in dws])
dwscores.extend([x[1] for x in dws])
if self._fitter._draw_above_likelihood is not False:
self._fitter._draw_above_likelihood = np.mean(dwscores)
prt.message('initial_draws', inline=True)
self._p = list(p0)
self._emi = 0
self._acor = None
self._aacort = -1
self._aa = 0
self._psrf = np.inf
self._all_chain = np.array([])
self._scores = np.ones((self._ntemps, self._nwalkers)) * -np.inf
tft = 0.0 # Total self._fracking time
sli = 1.0 # Keep track of how many times chain halved
s_exception = None
kmat = None
ages = np.zeros((self._ntemps, self._nwalkers), dtype=int)
oldp = self._p
max_chunk = 1000
kmat_chunk = 5
iter_chunks = int(np.ceil(float(self._iterations) / max_chunk))
iter_arr = [max_chunk if xi < iter_chunks - 1 else
self._iterations - max_chunk * (iter_chunks - 1)
for xi, x in enumerate(range(iter_chunks))]
# Make sure a chunk separation is located at self._burn_in
chunk_is = sorted(set(
np.concatenate(([0, self._burn_in], np.cumsum(iter_arr)))))
iter_arr = np.diff(chunk_is)
# The argument of the for loop runs emcee, after each iteration of
# emcee the contents of the for loop are executed.
converged = False
exceeded_walltime = False
ici = 0
try:
if self._iterations > 0:
sampler = MOSSampler(
self._ntemps, self._nwalkers, ndim, ln_likelihood,
ln_prior, pool=self._pool)
st = time.time()
while (self._iterations > 0 and (
self._cc is not None or ici < len(iter_arr))):
slr = int(np.round(sli))
ic = (max_chunk if self._cc is not None else
iter_arr[ici])
if exceeded_walltime:
break
if (self._cc is not None and converged and
self._emi > self._iterations):
break
for li, (
self._p, self._lnprob, self._lnlike) in enumerate(
sampler.sample(
self._p, iterations=ic, gibbs=self._gibbs if
self._emi >= self._burn_in else True)):
if (self._fitter._maximum_walltime is not False and
time.time() - self._fitter._start_time >
self._fitter._maximum_walltime):
prt.message('exceeded_walltime', warning=True)
exceeded_walltime = True
break
self._emi = self._emi + 1
emim1 = self._emi - 1
messages = []
# Increment the age of each walker if their positions are
# unchanged.
for ti in range(self._ntemps):
for wi in range(self._nwalkers):
if np.array_equal(self._p[ti][wi], oldp[ti][wi]):
ages[ti][wi] += 1
else:
ages[ti][wi] = 0
# Record then reset sampler proposal/acceptance counts.
accepts = list(
np.mean(sampler.nprop_accepted / sampler.nprop,
axis=1))
sampler.nprop = np.zeros(
(sampler.ntemps, sampler.nwalkers), dtype=np.float)
sampler.nprop_accepted = np.zeros(
(sampler.ntemps, sampler.nwalkers),
dtype=np.float)
# During self._burn-in only, redraw any walkers with scores
# significantly worse than their peers, or those that are
# stale (i.e. remained in the same position for a long
# time).
if emim1 <= self._burn_in:
pmedian = [np.median(x) for x in self._lnprob]
pmead = [np.mean([abs(y - pmedian) for y in x])
for x in self._lnprob]
redraw_count = 0
bad_redraws = 0
for ti, tprob in enumerate(self._lnprob):
for wi, wprob in enumerate(tprob):
if (wprob <= pmedian[ti] -
max(redraw_mult * pmead[ti],
float(self._nwalkers)) or
np.isnan(wprob) or
ages[ti][wi] >= self._REPLACE_AGE):
redraw_count = redraw_count + 1
dxx = np.random.normal(
scale=0.01, size=ndim)
tar_x = np.array(
self._p[np.random.randint(
self._ntemps)][
np.random.randint(self._nwalkers)])
# Reflect if out of bounds.
new_x = np.clip(np.where(
np.where(tar_x + dxx < 1.0,
tar_x + dxx,
tar_x - dxx) > 0.0,
tar_x + dxx, tar_x - dxx), 0.0, 1.0)
new_like = ln_likelihood(new_x)
new_prob = new_like + ln_prior(new_x)
if new_prob > wprob or np.isnan(wprob):
self._p[ti][wi] = new_x
self._lnlike[ti][wi] = new_like
self._lnprob[ti][wi] = new_prob
else:
bad_redraws = bad_redraws + 1
if redraw_count > 0:
messages.append(
'{:.0%} redraw, {}/{} success'.format(
redraw_count /
(self._nwalkers * self._ntemps),
redraw_count - bad_redraws, redraw_count))
oldp = self._p.copy()
# Calculate the autocorrelation time.
low = 10
asize = 0.5 * (emim1 - self._burn_in) / low
if asize >= 0 and self._ct == 'acor':
acorc = max(
1, min(self._MAX_ACORC,
int(np.floor(0.5 * self._emi / low))))
self._aacort = -1.0
self._aa = 0
self._ams = self._burn_in
cur_chain = (np.concatenate(
(self._all_chain,
sampler.chain[:, :, :li + 1:slr, :]),
axis=2) if len(self._all_chain) else
sampler.chain[:, :, :li + 1:slr, :])
for a in range(acorc, 1, -1):
ms = self._burn_in
if ms >= self._emi - low:
break
try:
acorts = sampler.get_autocorr_time(
chain=cur_chain, low=low, c=a,
min_step=int(np.round(float(ms) / sli)),
max_walkers=5, fast=True)
acort = max([
max(x)
for x in acorts
])
except AutocorrError:
continue
else:
self._aa = a
self._aacort = acort * sli
self._ams = ms
break
self._acor = [self._aacort, self._aa, self._ams]
self._actc = int(np.ceil(self._aacort / sli))
actn = np.int(
float(self._emi - self._ams) / self._actc)
if (self._cc is not None and
actn >= self._cc and
self._emi > self._iterations):
prt.message('converged')
converged = True
break
# Calculate the PSRF (Gelman-Rubin statistic).
if li > 1 and self._emi > self._burn_in + 2:
cur_chain = (np.concatenate(
(self._all_chain,
sampler.chain[:, :, :li + 1:slr, :]),
axis=2) if len(self._all_chain) else
sampler.chain[:, :, :li + 1:slr, :])
vws = np.zeros((self._ntemps, ndim))
for ti in range(self._ntemps):
for xi in range(ndim):
vchain = cur_chain[
ti, :, int(np.floor(
self._burn_in / sli)):, xi]
vws[ti][xi] = self.psrf(vchain)
self._psrf = np.max(vws)
if np.isnan(self._psrf):
self._psrf = np.inf
if (self._ct == 'psrf' and
self._cc is not None and
self._psrf < self._cc and
self._emi > self._iterations):
prt.message('converged')
converged = True
break
if self._cc is not None:
self._emcee_est_t = -1.0
else:
self._emcee_est_t = float(
time.time() - st - tft) / self._emi * (
self._iterations - self._emi
) + tft / self._emi * max(
0, self._burn_in - self._emi)
# Perform self._fracking if we are still in the self._burn
# in phase and iteration count is a multiple of the frack
# step.
frack_now = (self._fracking and self._frack_step != 0 and
self._emi <= self._burn_in and
self._emi % self._frack_step == 0)
self._scores = [np.array(x) for x in self._lnprob]
if emim1 % kmat_chunk == 0:
sout = self._model.run_stack(
self._p[np.unravel_index(
np.argmax(self._lnprob), self._lnprob.shape)],
root='objective')
kmat = sout.get('kmat')
kdiag = sout.get('kdiagonal')
variance = sout.get('obandvs', sout.get('variance'))
if kdiag is not None and kmat is not None:
kmat[np.diag_indices_from(kmat)] += kdiag
elif kdiag is not None and kmat is None:
kmat = np.diag(kdiag + variance)
prt.status(
self,
desc='fracking' if frack_now else
('burning' if self._emi < self._burn_in
else 'walking'),
scores=self._scores,
kmat=kmat,
accepts=accepts,
iterations=[self._emi, None if
self._cc is not None else
self._iterations],
acor=self._acor,
psrf=[self._psrf, self._burn_in],
messages=messages,
make_space=emim1 == 0,
convergence_type=self._ct,
convergence_criteria=self._cc)
if s_exception:
break
if not frack_now:
continue
# Fracking starts here
sft = time.time()
ijperms = [[x, y] for x in range(self._ntemps)
for y in range(self._nwalkers)]
ijprobs = np.array([
1.0
# self._lnprob[x][y]
for x in range(self._ntemps) for y in range(
self._nwalkers)
])
ijprobs -= max(ijprobs)
ijprobs = [ | np.exp(0.1 * x) | numpy.exp |
import numpy as np
from ..Fourier.FFT import FFT
from scipy.signal import detrend
from .GetWindows import GetWindows
from ..LombScargle.LombScargle import LombScargle
from .DetectGaps import DetectGaps
from ..CrossPhase.CrossPhase import CrossPhase
from ..Tools.PolyDetrend import PolyDetrend
from ..Tools.RemoveStep import RemoveStep
def Spectrogram(t,v,wind,slip,Freq=None,Method='FFT',WindowFunction=None,
Param=None,Detrend=True,FindGaps=True,GoodData=None,
Quiet=True,LenW=None,Threshold=0.0,Fudge=False,
OneSided=True,Tax=None,Steps=None):
'''
Creates a spectogram using a sliding window.
Inputs
======
t : time array in seconds
v : array of values the same length as t. If using crossphase,
this should be a list or tuple containing two arrays.
wind : sliding window length in seconds
slip : difference in time between the start of one window and the
next - when slip < wind, each window will have an overlap,
when slip > wind, there will be gaps where some data will be
unused and when slip == wind, each window is adjacent in time.
Freq : a list of frequencies (Hz) to solve for - only does anything
when using L-S
Method : Currently either 'FFT' or 'LS'
WindowFunction : Select a window function to apply to the data before
the transform, the options are: 'none','cosine-bell','hamming',
'triangle','welch','blackman','nuttall','blackman-nuttall',
'flat-top','cosine','gaussian'
Param : This parameter is used to alter some of the window functions
(see WindowFunctions.py).
Detrend : This will linearly detrend each time window before the
transform.
FindGaps : This tells the routine to scan for data gaps, when set
to False - the data are assumed to be perfect.
GoodData : This can be set to a boolean array which tells the DetectGaps
function which data points are good (True) or bad (False),
if set to None, then any non-finite data is assumed to be bad.
Quiet : When set to True, the function produces no stdout output; when
False, stdout shows the progress.
LenW : This can be set to an integer value in order to force a specific
window length (the number of elements, as opposed to the length
in time defined using wind)
Threshold: If set to a value above 0, then all values which
correspond to frequencies where the amplitude is less than
Threshold are set to 0, effectively removing noise from the
spectra.
Fudge: (LS Only!)
This applies a fudge for when f == Nyquist frequency, because
small floating point numbers have relatively large errors.
This should only be needed if intending to reproduce a
two-sided FFT (also, if doing this then divide A by 2 and P
by 4).
OneSided: (FFT Only!)
This should be set to remove the negative frequencies in
the second half of the spectra. In doing so, the amplitudes
are doubled and the powers are quadrupled.
Tax : (LS only)
An array of times at the centre of each bin.
Returns
=======
Nw : Total number of time windows in the output array
LenW : Length of a time window (number of elements)
Freq : Array of frequencies in Hz.
numpy.recarray :
Stores the output of the transform under the following fields:
Tspec : Time in seconds of the middle of each time window
Pow : Power at each frequency in each window, shape (Nw,LenW)
Pha : Phase at each frequency in each window, shape (Nw,LenW)
Amp : Amplitude at each frequency in each window, shape (Nw,LenW)
Real : Real component at each frequency in each window, shape (Nw,LenW)
Imag : Imaginary component at each frequency in each window, shape (Nw,LenW)
'''
isLS = 'LS' in Method
isCP = 'CP' in Method
#check that the frequencies exist if we are using LS
#if Freq is None and isLS:
#find out the length of the array and
Tlen = np.size(t)
if Tlen <= 1:
return (0,0,0,0,0,0,0,0)
if isLS:
#we need frequencies here, so we will assume that the data are
#evenly spaced and that we can use the FFT frequencies
dt,ct = np.unique((t[1:]-t[:-1]),return_counts=True)
Res = dt[ct.argmax()]
else:
Res = t[1] - t[0]
#detect and gaps in the input data
if FindGaps:
ngd,Ti0,Ti1 = DetectGaps(v,GoodData)
else:
ngd = 1
Ti0 = np.array([0])
Ti1 = np.array([Tlen-1])
#find the number of windows
Nw,LenW,Nwind = GetWindows(t,wind,slip,ngd,Ti0,Ti1,LenW)
#find the number of frequencies
if Freq is None or not isLS:
Freq = np.arange(LenW+1,dtype='float32')/(LenW*Res)
if OneSided or isLS:
Freq = Freq[:LenW//2 + 1]
elif not Freq is None and isLS:
df = Freq[-1] - Freq[-2]
Freq = np.append(Freq,Freq[-1] + np.abs(df))
Nf = Freq.size - 1
#check if we have a predefined time axis
if isLS and not Tax is None:
Nw = Tax.size
ngd = 1
Nwind = np.array([Nw])
CustTax = True
else:
CustTax = False
#create the output arrays
dtype = [ ('Tspec','float64'), #mid point in time of the current window
('Pow','float32',(Nf,)), #Power spectra
('Pha','float32',(Nf,)), #phase spectra
('Amp','float32',(Nf,)), #Amplitude
('Real','float32',(Nf,)), #Real components of spectra
('Imag','float32',(Nf)), #Imaginary components of spectra
('Size','int32'), #Number of valid (finite) values used to create spectrum
('Good','float32'), #Fraction of good data
('Var','float32'),] #Variance
out = np.recarray(Nw,dtype=dtype)
out.fill(np.nan)
out.nV = 0.0
#loop through each good secion of the time series and FFT/L-S
nd=0
pos=0
for i in range(0,ngd):
if nd > 0:
#this bit adds a load of NaNs in a gap in the middle of two good sections
out.Tspec[pos] = (out.Tspec[pos-1] + t[Ti0[i]] + wind/2.0)/2.0
pos+=1
if Nwind[i] > 0:
if CustTax:
if isCP:
#good = np.where(np.isfinite(v[0]) & np.isfinite(v[1]))[0]
Tv0 = v[0]#[good]
Tv1 = v[1]#[good]
nTv = Tv0.size
else:
#good = np.where(np.isfinite(v))[0]
Tv = v#[good]
nTv = Tv.size
Tt = t#[good]
out.Tspec = Tax
if not Steps is None:
S = Steps
else:
#calculate the number of elements in this section and create
#an array of the indices to use
ng = Ti1[i]-Ti0[i]+1
good = np.arange(ng) + Ti0[i]
#copy the subarrays for time and v
if isCP:
Tv0 = v[0][good]
Tv1 = v[1][good]
nTv = Tv0.size
else:
Tv = v[good]
nTv = Tv.size
Tt = t[good]
if not Steps is None:
S = Steps[good]
#output time array
Tax = np.arange(Nwind[i],dtype='float64')*slip + wind/2.0 + Tt[0]
out.Tspec[pos:pos+Nwind[i]] = Tax
#loop through each window
for j in range(0,Nwind[i]):
#indices for this current window
if CustTax:
#for a custom time axis - use all point within 0.5*window
#of the midpoint of each element of the time axis
inds = np.where((Tt >= (out.Tspec[j] - wind/2.0)) & (Tt < (out.Tspec[j] + wind/2.0)))[0]
elif isLS:
#for when we use LS but not a custom time axis, use
#all elements starting from from Ti0[i]+slip*j until
#upto window later
inds = np.where((Tt >= (t[Ti0[i]] + slip*j)) & (Tt < (t[Ti0[i]] + slip*j + wind)))[0]
else:
#otherwise (FFT) everything should be perfectly evenly
#spaced, all windows have the same number of elements
use0 = np.int32(j*slip/Res)
inds = use0 + np.arange(LenW)
#check for good and bad values
if isCP:
badvals = (np.isfinite(Tv0[inds]) == False) | (np.isfinite(Tv1[inds]) == False)
else:
badvals = (np.isfinite(Tv[inds]) == False)
goodvals = badvals == False
gd = np.sum(goodvals)
#select only good values - unless doing FFT where all
#values should be good already
if isLS:
use = inds[np.where(goodvals)[0]]
else:
use = inds
#this shouldn't really happen, but if the length of the array
#doesn't match the indices, or there are dodgy values
bad = False
if use.size == 0:
bad = True
elif | np.max(use) | numpy.max |
from __future__ import absolute_import, division, print_function
import inspect
from datetime import datetime
import numpy as np
import pandas as pd
import pytest
import xarray as xr
import xarray.plot as xplt
from xarray import DataArray
from xarray.coding.times import _import_cftime
from xarray.plot.plot import _infer_interval_breaks
from xarray.plot.utils import (
_build_discrete_cmap, _color_palette, _determine_cmap_params,
import_seaborn, label_from_attrs)
from . import (
assert_array_equal, assert_equal, raises_regex, requires_cftime,
requires_matplotlib, requires_matplotlib2, requires_seaborn)
# import mpl and change the backend before other mpl imports
try:
import matplotlib as mpl
import matplotlib.pyplot as plt
except ImportError:
pass
@pytest.mark.flaky
@pytest.mark.skip(reason='maybe flaky')
def text_in_fig():
'''
Return the set of all text in the figure
'''
return {t.get_text() for t in plt.gcf().findobj(mpl.text.Text)}
def find_possible_colorbars():
# nb. this function also matches meshes from pcolormesh
return plt.gcf().findobj(mpl.collections.QuadMesh)
def substring_in_axes(substring, ax):
'''
Return True if a substring is found anywhere in an axes
'''
alltxt = set([t.get_text() for t in ax.findobj(mpl.text.Text)])
for txt in alltxt:
if substring in txt:
return True
return False
def easy_array(shape, start=0, stop=1):
'''
Make an array with desired shape using np.linspace
shape is a tuple like (2, 3)
'''
a = np.linspace(start, stop, num=np.prod(shape))
return a.reshape(shape)
@requires_matplotlib
class PlotTestCase(object):
@pytest.fixture(autouse=True)
def setup(self):
yield
# Remove all matplotlib figures
plt.close('all')
def pass_in_axis(self, plotmethod):
fig, axes = plt.subplots(ncols=2)
plotmethod(ax=axes[0])
assert axes[0].has_data()
@pytest.mark.slow
def imshow_called(self, plotmethod):
plotmethod()
images = plt.gca().findobj(mpl.image.AxesImage)
return len(images) > 0
def contourf_called(self, plotmethod):
plotmethod()
paths = plt.gca().findobj(mpl.collections.PathCollection)
return len(paths) > 0
class TestPlot(PlotTestCase):
@pytest.fixture(autouse=True)
def setup_array(self):
self.darray = DataArray(easy_array((2, 3, 4)))
def test_label_from_attrs(self):
da = self.darray.copy()
assert '' == label_from_attrs(da)
da.name = 'a'
da.attrs['units'] = 'a_units'
da.attrs['long_name'] = 'a_long_name'
da.attrs['standard_name'] = 'a_standard_name'
assert 'a_long_name [a_units]' == label_from_attrs(da)
da.attrs.pop('long_name')
assert 'a_standard_name [a_units]' == label_from_attrs(da)
da.attrs.pop('units')
assert 'a_standard_name' == label_from_attrs(da)
da.attrs['units'] = 'a_units'
da.attrs.pop('standard_name')
assert 'a [a_units]' == label_from_attrs(da)
da.attrs.pop('units')
assert 'a' == label_from_attrs(da)
def test1d(self):
self.darray[:, 0, 0].plot()
with raises_regex(ValueError, 'None'):
self.darray[:, 0, 0].plot(x='dim_1')
def test_1d_x_y_kw(self):
z = np.arange(10)
da = DataArray(np.cos(z), dims=['z'], coords=[z], name='f')
xy = [[None, None],
[None, 'z'],
['z', None]]
f, ax = plt.subplots(3, 1)
for aa, (x, y) in enumerate(xy):
da.plot(x=x, y=y, ax=ax.flat[aa])
with raises_regex(ValueError, 'cannot'):
da.plot(x='z', y='z')
with raises_regex(ValueError, 'None'):
da.plot(x='f', y='z')
with raises_regex(ValueError, 'None'):
da.plot(x='z', y='f')
def test_2d_line(self):
with raises_regex(ValueError, 'hue'):
self.darray[:, :, 0].plot.line()
self.darray[:, :, 0].plot.line(hue='dim_1')
self.darray[:, :, 0].plot.line(x='dim_1')
self.darray[:, :, 0].plot.line(y='dim_1')
self.darray[:, :, 0].plot.line(x='dim_0', hue='dim_1')
self.darray[:, :, 0].plot.line(y='dim_0', hue='dim_1')
with raises_regex(ValueError, 'cannot'):
self.darray[:, :, 0].plot.line(x='dim_1', y='dim_0', hue='dim_1')
def test_2d_line_accepts_legend_kw(self):
self.darray[:, :, 0].plot.line(x='dim_0', add_legend=False)
assert not plt.gca().get_legend()
plt.cla()
self.darray[:, :, 0].plot.line(x='dim_0', add_legend=True)
assert plt.gca().get_legend()
# check whether legend title is set
assert (plt.gca().get_legend().get_title().get_text()
== 'dim_1')
def test_2d_line_accepts_x_kw(self):
self.darray[:, :, 0].plot.line(x='dim_0')
assert plt.gca().get_xlabel() == 'dim_0'
plt.cla()
self.darray[:, :, 0].plot.line(x='dim_1')
assert plt.gca().get_xlabel() == 'dim_1'
def test_2d_line_accepts_hue_kw(self):
self.darray[:, :, 0].plot.line(hue='dim_0')
assert (plt.gca().get_legend().get_title().get_text()
== 'dim_0')
plt.cla()
self.darray[:, :, 0].plot.line(hue='dim_1')
assert (plt.gca().get_legend().get_title().get_text()
== 'dim_1')
def test_2d_before_squeeze(self):
a = DataArray(easy_array((1, 5)))
a.plot()
def test2d_uniform_calls_imshow(self):
assert self.imshow_called(self.darray[:, :, 0].plot.imshow)
@pytest.mark.slow
def test2d_nonuniform_calls_contourf(self):
a = self.darray[:, :, 0]
a.coords['dim_1'] = [2, 1, 89]
assert self.contourf_called(a.plot.contourf)
def test2d_1d_2d_coordinates_contourf(self):
sz = (20, 10)
depth = easy_array(sz)
a = DataArray(
easy_array(sz),
dims=['z', 'time'],
coords={
'depth': (['z', 'time'], depth),
'time': np.linspace(0, 1, sz[1])
})
a.plot.contourf(x='time', y='depth')
def test3d(self):
self.darray.plot()
def test_can_pass_in_axis(self):
self.pass_in_axis(self.darray.plot)
def test__infer_interval_breaks(self):
assert_array_equal([-0.5, 0.5, 1.5], _infer_interval_breaks([0, 1]))
assert_array_equal([-0.5, 0.5, 5.0, 9.5, 10.5],
_infer_interval_breaks([0, 1, 9, 10]))
assert_array_equal(
pd.date_range('20000101', periods=4) - np.timedelta64(12, 'h'),
_infer_interval_breaks(pd.date_range('20000101', periods=3)))
# make a bounded 2D array that we will center and re-infer
xref, yref = np.meshgrid(np.arange(6), np.arange(5))
cx = (xref[1:, 1:] + xref[:-1, :-1]) / 2
cy = (yref[1:, 1:] + yref[:-1, :-1]) / 2
x = _infer_interval_breaks(cx, axis=1)
x = _infer_interval_breaks(x, axis=0)
y = _infer_interval_breaks(cy, axis=1)
y = _infer_interval_breaks(y, axis=0)
np.testing.assert_allclose(xref, x)
np.testing.assert_allclose(yref, y)
# test that ValueError is raised for non-monotonic 1D inputs
with pytest.raises(ValueError):
_infer_interval_breaks(np.array([0, 2, 1]), check_monotonic=True)
def test_geo_data(self):
# Regression test for gh2250
# Realistic coordinates taken from the example dataset
lat = np.array([[16.28, 18.48, 19.58, 19.54, 18.35],
[28.07, 30.52, 31.73, 31.68, 30.37],
[39.65, 42.27, 43.56, 43.51, 42.11],
[50.52, 53.22, 54.55, 54.50, 53.06]])
lon = np.array([[-126.13, -113.69, -100.92, -88.04, -75.29],
[-129.27, -115.62, -101.54, -87.32, -73.26],
[-133.10, -118.00, -102.31, -86.42, -70.76],
[-137.85, -120.99, -103.28, -85.28, -67.62]])
data = np.sqrt(lon ** 2 + lat ** 2)
da = DataArray(data, dims=('y', 'x'),
coords={'lon': (('y', 'x'), lon),
'lat': (('y', 'x'), lat)})
da.plot(x='lon', y='lat')
ax = plt.gca()
assert ax.has_data()
da.plot(x='lat', y='lon')
ax = plt.gca()
assert ax.has_data()
def test_datetime_dimension(self):
nrow = 3
ncol = 4
time = pd.date_range('2000-01-01', periods=nrow)
a = DataArray(
easy_array((nrow, ncol)),
coords=[('time', time), ('y', range(ncol))])
a.plot()
ax = plt.gca()
assert ax.has_data()
@pytest.mark.slow
@pytest.mark.filterwarnings('ignore:tight_layout cannot')
def test_convenient_facetgrid(self):
a = easy_array((10, 15, 4))
d = DataArray(a, dims=['y', 'x', 'z'])
d.coords['z'] = list('abcd')
g = d.plot(x='x', y='y', col='z', col_wrap=2, cmap='cool')
assert_array_equal(g.axes.shape, [2, 2])
for ax in g.axes.flat:
assert ax.has_data()
with raises_regex(ValueError, '[Ff]acet'):
d.plot(x='x', y='y', col='z', ax=plt.gca())
with raises_regex(ValueError, '[Ff]acet'):
d[0].plot(x='x', y='y', col='z', ax=plt.gca())
@pytest.mark.slow
@requires_matplotlib2
def test_subplot_kws(self):
a = easy_array((10, 15, 4))
d = DataArray(a, dims=['y', 'x', 'z'])
d.coords['z'] = list('abcd')
g = d.plot(
x='x',
y='y',
col='z',
col_wrap=2,
cmap='cool',
subplot_kws=dict(facecolor='r'))
for ax in g.axes.flat:
# mpl V2
assert ax.get_facecolor()[0:3] == \
mpl.colors.to_rgb('r')
@pytest.mark.slow
def test_plot_size(self):
self.darray[:, 0, 0].plot(figsize=(13, 5))
assert tuple(plt.gcf().get_size_inches()) == (13, 5)
self.darray.plot(figsize=(13, 5))
assert tuple(plt.gcf().get_size_inches()) == (13, 5)
self.darray.plot(size=5)
assert plt.gcf().get_size_inches()[1] == 5
self.darray.plot(size=5, aspect=2)
assert tuple(plt.gcf().get_size_inches()) == (10, 5)
with raises_regex(ValueError, 'cannot provide both'):
self.darray.plot(ax=plt.gca(), figsize=(3, 4))
with raises_regex(ValueError, 'cannot provide both'):
self.darray.plot(size=5, figsize=(3, 4))
with raises_regex(ValueError, 'cannot provide both'):
self.darray.plot(size=5, ax=plt.gca())
with raises_regex(ValueError, 'cannot provide `aspect`'):
self.darray.plot(aspect=1)
@pytest.mark.slow
@pytest.mark.filterwarnings('ignore:tight_layout cannot')
def test_convenient_facetgrid_4d(self):
a = easy_array((10, 15, 2, 3))
d = DataArray(a, dims=['y', 'x', 'columns', 'rows'])
g = d.plot(x='x', y='y', col='columns', row='rows')
assert_array_equal(g.axes.shape, [3, 2])
for ax in g.axes.flat:
assert ax.has_data()
with raises_regex(ValueError, '[Ff]acet'):
d.plot(x='x', y='y', col='columns', ax=plt.gca())
class TestPlot1D(PlotTestCase):
@pytest.fixture(autouse=True)
def setUp(self):
d = [0, 1.1, 0, 2]
self.darray = DataArray(
d, coords={'period': range(len(d))}, dims='period')
self.darray.period.attrs['units'] = 's'
def test_xlabel_is_index_name(self):
self.darray.plot()
assert 'period [s]' == plt.gca().get_xlabel()
def test_no_label_name_on_x_axis(self):
self.darray.plot(y='period')
assert '' == plt.gca().get_xlabel()
def test_no_label_name_on_y_axis(self):
self.darray.plot()
assert '' == plt.gca().get_ylabel()
def test_ylabel_is_data_name(self):
self.darray.name = 'temperature'
self.darray.attrs['units'] = 'degrees_Celsius'
self.darray.plot()
assert 'temperature [degrees_Celsius]' == plt.gca().get_ylabel()
def test_xlabel_is_data_name(self):
self.darray.name = 'temperature'
self.darray.attrs['units'] = 'degrees_Celsius'
self.darray.plot(y='period')
assert 'temperature [degrees_Celsius]' == plt.gca().get_xlabel()
def test_format_string(self):
self.darray.plot.line('ro')
def test_can_pass_in_axis(self):
self.pass_in_axis(self.darray.plot.line)
def test_nonnumeric_index_raises_typeerror(self):
a = DataArray([1, 2, 3], {'letter': ['a', 'b', 'c']}, dims='letter')
with raises_regex(TypeError, r'[Pp]lot'):
a.plot.line()
def test_primitive_returned(self):
p = self.darray.plot.line()
assert isinstance(p[0], mpl.lines.Line2D)
@pytest.mark.slow
def test_plot_nans(self):
self.darray[1] = np.nan
self.darray.plot.line()
def test_x_ticks_are_rotated_for_time(self):
time = pd.date_range('2000-01-01', '2000-01-10')
a = DataArray(np.arange(len(time)), [('t', time)])
a.plot.line()
rotation = plt.gca().get_xticklabels()[0].get_rotation()
assert rotation != 0
def test_xyincrease_false_changes_axes(self):
self.darray.plot.line(xincrease=False, yincrease=False)
xlim = plt.gca().get_xlim()
ylim = plt.gca().get_ylim()
diffs = xlim[1] - xlim[0], ylim[1] - ylim[0]
assert all(x < 0 for x in diffs)
def test_slice_in_title(self):
self.darray.coords['d'] = 10
self.darray.plot.line()
title = plt.gca().get_title()
assert 'd = 10' == title
class TestPlotHistogram(PlotTestCase):
@pytest.fixture(autouse=True)
def setUp(self):
self.darray = DataArray(easy_array((2, 3, 4)))
def test_3d_array(self):
self.darray.plot.hist()
def test_xlabel_uses_name(self):
self.darray.name = 'testpoints'
self.darray.attrs['units'] = 'testunits'
self.darray.plot.hist()
assert 'testpoints [testunits]' == plt.gca().get_xlabel()
def test_title_is_histogram(self):
self.darray.plot.hist()
assert 'Histogram' == plt.gca().get_title()
def test_can_pass_in_kwargs(self):
nbins = 5
self.darray.plot.hist(bins=nbins)
assert nbins == len(plt.gca().patches)
def test_can_pass_in_axis(self):
self.pass_in_axis(self.darray.plot.hist)
def test_primitive_returned(self):
h = self.darray.plot.hist()
assert isinstance(h[-1][0], mpl.patches.Rectangle)
@pytest.mark.slow
def test_plot_nans(self):
self.darray[0, 0, 0] = np.nan
self.darray.plot.hist()
@requires_matplotlib
class TestDetermineCmapParams(object):
@pytest.fixture(autouse=True)
def setUp(self):
self.data = np.linspace(0, 1, num=100)
def test_robust(self):
cmap_params = _determine_cmap_params(self.data, robust=True)
assert cmap_params['vmin'] == np.percentile(self.data, 2)
assert cmap_params['vmax'] == np.percentile(self.data, 98)
assert cmap_params['cmap'] == 'viridis'
assert cmap_params['extend'] == 'both'
assert cmap_params['levels'] is None
assert cmap_params['norm'] is None
def test_center(self):
cmap_params = _determine_cmap_params(self.data, center=0.5)
assert cmap_params['vmax'] - 0.5 == 0.5 - cmap_params['vmin']
assert cmap_params['cmap'] == 'RdBu_r'
assert cmap_params['extend'] == 'neither'
assert cmap_params['levels'] is None
assert cmap_params['norm'] is None
def test_cmap_sequential_option(self):
with xr.set_options(cmap_sequential='magma'):
cmap_params = _determine_cmap_params(self.data)
assert cmap_params['cmap'] == 'magma'
def test_cmap_sequential_explicit_option(self):
with xr.set_options(cmap_sequential=mpl.cm.magma):
cmap_params = _determine_cmap_params(self.data)
assert cmap_params['cmap'] == mpl.cm.magma
def test_cmap_divergent_option(self):
with xr.set_options(cmap_divergent='magma'):
cmap_params = _determine_cmap_params(self.data, center=0.5)
assert cmap_params['cmap'] == 'magma'
def test_nan_inf_are_ignored(self):
cmap_params1 = _determine_cmap_params(self.data)
data = self.data
data[50:55] = np.nan
data[56:60] = np.inf
cmap_params2 = _determine_cmap_params(data)
assert cmap_params1['vmin'] == cmap_params2['vmin']
assert cmap_params1['vmax'] == cmap_params2['vmax']
@pytest.mark.slow
def test_integer_levels(self):
data = self.data + 1
# default is to cover full data range but with no guarantee on Nlevels
for level in np.arange(2, 10, dtype=int):
cmap_params = _determine_cmap_params(data, levels=level)
assert cmap_params['vmin'] == cmap_params['levels'][0]
assert cmap_params['vmax'] == cmap_params['levels'][-1]
assert cmap_params['extend'] == 'neither'
# with min max we are more strict
cmap_params = _determine_cmap_params(
data, levels=5, vmin=0, vmax=5, cmap='Blues')
assert cmap_params['vmin'] == 0
assert cmap_params['vmax'] == 5
assert cmap_params['vmin'] == cmap_params['levels'][0]
assert cmap_params['vmax'] == cmap_params['levels'][-1]
assert cmap_params['cmap'].name == 'Blues'
assert cmap_params['extend'] == 'neither'
assert cmap_params['cmap'].N == 4
assert cmap_params['norm'].N == 5
cmap_params = _determine_cmap_params(
data, levels=5, vmin=0.5, vmax=1.5)
assert cmap_params['cmap'].name == 'viridis'
assert cmap_params['extend'] == 'max'
cmap_params = _determine_cmap_params(data, levels=5, vmin=1.5)
assert cmap_params['cmap'].name == 'viridis'
assert cmap_params['extend'] == 'min'
cmap_params = _determine_cmap_params(
data, levels=5, vmin=1.3, vmax=1.5)
assert cmap_params['cmap'].name == 'viridis'
assert cmap_params['extend'] == 'both'
def test_list_levels(self):
data = self.data + 1
orig_levels = [0, 1, 2, 3, 4, 5]
# vmin and vmax should be ignored if levels are explicitly provided
cmap_params = _determine_cmap_params(
data, levels=orig_levels, vmin=0, vmax=3)
assert cmap_params['vmin'] == 0
assert cmap_params['vmax'] == 5
assert cmap_params['cmap'].N == 5
assert cmap_params['norm'].N == 6
for wrap_levels in [list, np.array, pd.Index, DataArray]:
cmap_params = _determine_cmap_params(
data, levels=wrap_levels(orig_levels))
assert_array_equal(cmap_params['levels'], orig_levels)
def test_divergentcontrol(self):
neg = self.data - 0.1
pos = self.data
# Default with positive data will be a normal cmap
cmap_params = _determine_cmap_params(pos)
assert cmap_params['vmin'] == 0
assert cmap_params['vmax'] == 1
assert cmap_params['cmap'] == "viridis"
# Default with negative data will be a divergent cmap
cmap_params = _determine_cmap_params(neg)
assert cmap_params['vmin'] == -0.9
assert cmap_params['vmax'] == 0.9
assert cmap_params['cmap'] == "RdBu_r"
# Setting vmin or vmax should prevent this only if center is false
cmap_params = _determine_cmap_params(neg, vmin=-0.1, center=False)
assert cmap_params['vmin'] == -0.1
assert cmap_params['vmax'] == 0.9
assert cmap_params['cmap'] == "viridis"
cmap_params = _determine_cmap_params(neg, vmax=0.5, center=False)
assert cmap_params['vmin'] == -0.1
assert cmap_params['vmax'] == 0.5
assert cmap_params['cmap'] == "viridis"
# Setting center=False too
cmap_params = _determine_cmap_params(neg, center=False)
assert cmap_params['vmin'] == -0.1
assert cmap_params['vmax'] == 0.9
assert cmap_params['cmap'] == "viridis"
# However, I should still be able to set center and have a div cmap
cmap_params = _determine_cmap_params(neg, center=0)
assert cmap_params['vmin'] == -0.9
assert cmap_params['vmax'] == 0.9
assert cmap_params['cmap'] == "RdBu_r"
# Setting vmin or vmax alone will force symmetric bounds around center
cmap_params = _determine_cmap_params(neg, vmin=-0.1)
assert cmap_params['vmin'] == -0.1
assert cmap_params['vmax'] == 0.1
assert cmap_params['cmap'] == "RdBu_r"
cmap_params = _determine_cmap_params(neg, vmax=0.5)
assert cmap_params['vmin'] == -0.5
assert cmap_params['vmax'] == 0.5
assert cmap_params['cmap'] == "RdBu_r"
cmap_params = _determine_cmap_params(neg, vmax=0.6, center=0.1)
assert cmap_params['vmin'] == -0.4
assert cmap_params['vmax'] == 0.6
assert cmap_params['cmap'] == "RdBu_r"
# But this is only true if vmin or vmax are negative
cmap_params = _determine_cmap_params(pos, vmin=-0.1)
assert cmap_params['vmin'] == -0.1
assert cmap_params['vmax'] == 0.1
assert cmap_params['cmap'] == "RdBu_r"
cmap_params = _determine_cmap_params(pos, vmin=0.1)
assert cmap_params['vmin'] == 0.1
assert cmap_params['vmax'] == 1
assert cmap_params['cmap'] == "viridis"
cmap_params = _determine_cmap_params(pos, vmax=0.5)
assert cmap_params['vmin'] == 0
assert cmap_params['vmax'] == 0.5
assert cmap_params['cmap'] == "viridis"
# If both vmin and vmax are provided, output is non-divergent
cmap_params = _determine_cmap_params(neg, vmin=-0.2, vmax=0.6)
assert cmap_params['vmin'] == -0.2
assert cmap_params['vmax'] == 0.6
assert cmap_params['cmap'] == "viridis"
def test_norm_sets_vmin_vmax(self):
vmin = self.data.min()
vmax = self.data.max()
for norm, extend in zip([mpl.colors.LogNorm(),
mpl.colors.LogNorm(vmin + 1, vmax - 1),
mpl.colors.LogNorm(None, vmax - 1),
mpl.colors.LogNorm(vmin + 1, None)],
['neither', 'both', 'max', 'min']):
test_min = vmin if norm.vmin is None else norm.vmin
test_max = vmax if norm.vmax is None else norm.vmax
cmap_params = _determine_cmap_params(self.data, norm=norm)
assert cmap_params['vmin'] == test_min
assert cmap_params['vmax'] == test_max
assert cmap_params['extend'] == extend
assert cmap_params['norm'] == norm
@requires_matplotlib
class TestDiscreteColorMap(object):
@pytest.fixture(autouse=True)
def setUp(self):
x = np.arange(start=0, stop=10, step=2)
y = np.arange(start=9, stop=-7, step=-3)
xy = np.dstack(np.meshgrid(x, y))
distance = np.linalg.norm(xy, axis=2)
self.darray = DataArray(distance, list(zip(('y', 'x'), (y, x))))
self.data_min = distance.min()
self.data_max = distance.max()
@pytest.mark.slow
def test_recover_from_seaborn_jet_exception(self):
pal = _color_palette('jet', 4)
assert type(pal) == np.ndarray
assert len(pal) == 4
@pytest.mark.slow
def test_build_discrete_cmap(self):
for (cmap, levels, extend, filled) in [('jet', [0, 1], 'both', False),
('hot', [-4, 4], 'max', True)]:
ncmap, cnorm = _build_discrete_cmap(cmap, levels, extend, filled)
assert ncmap.N == len(levels) - 1
assert len(ncmap.colors) == len(levels) - 1
assert cnorm.N == len(levels)
assert_array_equal(cnorm.boundaries, levels)
assert max(levels) == cnorm.vmax
assert min(levels) == cnorm.vmin
if filled:
assert ncmap.colorbar_extend == extend
else:
assert ncmap.colorbar_extend == 'max'
@pytest.mark.slow
def test_discrete_colormap_list_of_levels(self):
for extend, levels in [('max', [-1, 2, 4, 8, 10]),
('both', [2, 5, 10, 11]),
('neither', [0, 5, 10, 15]),
('min', [2, 5, 10, 15])]:
for kind in ['imshow', 'pcolormesh', 'contourf', 'contour']:
primitive = getattr(self.darray.plot, kind)(levels=levels)
assert_array_equal(levels, primitive.norm.boundaries)
assert max(levels) == primitive.norm.vmax
assert min(levels) == primitive.norm.vmin
if kind != 'contour':
assert extend == primitive.cmap.colorbar_extend
else:
assert 'max' == primitive.cmap.colorbar_extend
assert len(levels) - 1 == len(primitive.cmap.colors)
@pytest.mark.slow
def test_discrete_colormap_int_levels(self):
for extend, levels, vmin, vmax in [('neither', 7, None, None),
('neither', 7, None, 20),
('both', 7, 4, 8),
('min', 10, 4, 15)]:
for kind in ['imshow', 'pcolormesh', 'contourf', 'contour']:
primitive = getattr(self.darray.plot, kind)(
levels=levels, vmin=vmin, vmax=vmax)
assert levels >= \
len(primitive.norm.boundaries) - 1
if vmax is None:
assert primitive.norm.vmax >= self.data_max
else:
assert primitive.norm.vmax >= vmax
if vmin is None:
assert primitive.norm.vmin <= self.data_min
else:
assert primitive.norm.vmin <= vmin
if kind != 'contour':
assert extend == primitive.cmap.colorbar_extend
else:
assert 'max' == primitive.cmap.colorbar_extend
assert levels >= len(primitive.cmap.colors)
def test_discrete_colormap_list_levels_and_vmin_or_vmax(self):
levels = [0, 5, 10, 15]
primitive = self.darray.plot(levels=levels, vmin=-3, vmax=20)
assert primitive.norm.vmax == max(levels)
assert primitive.norm.vmin == min(levels)
def test_discrete_colormap_provided_boundary_norm(self):
norm = mpl.colors.BoundaryNorm([0, 5, 10, 15], 4)
primitive = self.darray.plot.contourf(norm=norm)
np.testing.assert_allclose(primitive.levels, norm.boundaries)
class Common2dMixin(object):
"""
Common tests for 2d plotting go here.
These tests assume that a staticmethod for `self.plotfunc` exists.
Should have the same name as the method.
"""
@pytest.fixture(autouse=True)
def setUp(self):
da = DataArray(easy_array((10, 15), start=-1),
dims=['y', 'x'],
coords={'y': np.arange(10),
'x': np.arange(15)})
# add 2d coords
ds = da.to_dataset(name='testvar')
x, y = np.meshgrid(da.x.values, da.y.values)
ds['x2d'] = DataArray(x, dims=['y', 'x'])
ds['y2d'] = DataArray(y, dims=['y', 'x'])
ds.set_coords(['x2d', 'y2d'], inplace=True)
# set darray and plot method
self.darray = ds.testvar
# Add CF-compliant metadata
self.darray.attrs['long_name'] = 'a_long_name'
self.darray.attrs['units'] = 'a_units'
self.darray.x.attrs['long_name'] = 'x_long_name'
self.darray.x.attrs['units'] = 'x_units'
self.darray.y.attrs['long_name'] = 'y_long_name'
self.darray.y.attrs['units'] = 'y_units'
self.plotmethod = getattr(self.darray.plot, self.plotfunc.__name__)
def test_label_names(self):
self.plotmethod()
assert 'x_long_name [x_units]' == plt.gca().get_xlabel()
assert 'y_long_name [y_units]' == plt.gca().get_ylabel()
def test_1d_raises_valueerror(self):
with raises_regex(ValueError, r'DataArray must be 2d'):
self.plotfunc(self.darray[0, :])
def test_3d_raises_valueerror(self):
a = DataArray(easy_array((2, 3, 4)))
if self.plotfunc.__name__ == 'imshow':
pytest.skip()
with raises_regex(ValueError, r'DataArray must be 2d'):
self.plotfunc(a)
def test_nonnumeric_index_raises_typeerror(self):
a = DataArray(easy_array((3, 2)), coords=[['a', 'b', 'c'], ['d', 'e']])
with raises_regex(TypeError, r'[Pp]lot'):
self.plotfunc(a)
def test_can_pass_in_axis(self):
self.pass_in_axis(self.plotmethod)
def test_xyincrease_defaults(self):
# With default settings the axis must be ordered regardless
# of the coords order.
self.plotfunc(DataArray(easy_array((3, 2)), coords=[[1, 2, 3],
[1, 2]]))
bounds = plt.gca().get_ylim()
assert bounds[0] < bounds[1]
bounds = plt.gca().get_xlim()
assert bounds[0] < bounds[1]
# Inverted coords
self.plotfunc(DataArray(easy_array((3, 2)), coords=[[3, 2, 1],
[2, 1]]))
bounds = plt.gca().get_ylim()
assert bounds[0] < bounds[1]
bounds = plt.gca().get_xlim()
assert bounds[0] < bounds[1]
def test_xyincrease_false_changes_axes(self):
self.plotmethod(xincrease=False, yincrease=False)
xlim = plt.gca().get_xlim()
ylim = plt.gca().get_ylim()
diffs = xlim[0] - 14, xlim[1] - 0, ylim[0] - 9, ylim[1] - 0
assert all(abs(x) < 1 for x in diffs)
def test_xyincrease_true_changes_axes(self):
self.plotmethod(xincrease=True, yincrease=True)
xlim = plt.gca().get_xlim()
ylim = plt.gca().get_ylim()
diffs = xlim[0] - 0, xlim[1] - 14, ylim[0] - 0, ylim[1] - 9
assert all(abs(x) < 1 for x in diffs)
def test_x_ticks_are_rotated_for_time(self):
time = pd.date_range('2000-01-01', '2000-01-10')
a = DataArray(
np.random.randn(2, len(time)), [('xx', [1, 2]), ('t', time)])
a.plot(x='t')
rotation = plt.gca().get_xticklabels()[0].get_rotation()
assert rotation != 0
def test_plot_nans(self):
x1 = self.darray[:5]
x2 = self.darray.copy()
x2[5:] = np.nan
clim1 = self.plotfunc(x1).get_clim()
clim2 = self.plotfunc(x2).get_clim()
assert clim1 == clim2
@pytest.mark.filterwarnings('ignore::UserWarning')
@pytest.mark.filterwarnings('ignore:invalid value encountered')
def test_can_plot_all_nans(self):
# regression test for issue #1780
self.plotfunc(DataArray(np.full((2, 2), np.nan)))
@pytest.mark.filterwarnings('ignore: Attempting to set')
def test_can_plot_axis_size_one(self):
if self.plotfunc.__name__ not in ('contour', 'contourf'):
self.plotfunc(DataArray(np.ones((1, 1))))
def test_disallows_rgb_arg(self):
with pytest.raises(ValueError):
# Always invalid for most plots. Invalid for imshow with 2D data.
self.plotfunc(DataArray(np.ones((2, 2))), rgb='not None')
def test_viridis_cmap(self):
cmap_name = self.plotmethod(cmap='viridis').get_cmap().name
assert 'viridis' == cmap_name
def test_default_cmap(self):
cmap_name = self.plotmethod().get_cmap().name
assert 'RdBu_r' == cmap_name
cmap_name = self.plotfunc(abs(self.darray)).get_cmap().name
assert 'viridis' == cmap_name
@requires_seaborn
def test_seaborn_palette_as_cmap(self):
cmap_name = self.plotmethod(levels=2, cmap='husl').get_cmap().name
assert 'husl' == cmap_name
def test_can_change_default_cmap(self):
cmap_name = self.plotmethod(cmap='Blues').get_cmap().name
assert 'Blues' == cmap_name
def test_diverging_color_limits(self):
artist = self.plotmethod()
vmin, vmax = artist.get_clim()
assert round(abs(-vmin - vmax), 7) == 0
def test_xy_strings(self):
self.plotmethod('y', 'x')
ax = plt.gca()
assert 'y_long_name [y_units]' == ax.get_xlabel()
assert 'x_long_name [x_units]' == ax.get_ylabel()
def test_positional_coord_string(self):
self.plotmethod(y='x')
ax = plt.gca()
assert 'x_long_name [x_units]' == ax.get_ylabel()
assert 'y_long_name [y_units]' == ax.get_xlabel()
self.plotmethod(x='x')
ax = plt.gca()
assert 'x_long_name [x_units]' == ax.get_xlabel()
assert 'y_long_name [y_units]' == ax.get_ylabel()
def test_bad_x_string_exception(self):
with raises_regex(ValueError, 'x and y must be coordinate variables'):
self.plotmethod('not_a_real_dim', 'y')
with raises_regex(ValueError,
'x must be a dimension name if y is not supplied'):
self.plotmethod(x='not_a_real_dim')
with raises_regex(ValueError,
'y must be a dimension name if x is not supplied'):
self.plotmethod(y='not_a_real_dim')
self.darray.coords['z'] = 100
def test_coord_strings(self):
# 1d coords (same as dims)
assert {'x', 'y'} == set(self.darray.dims)
self.plotmethod(y='y', x='x')
def test_non_linked_coords(self):
# plot with coordinate names that are not dimensions
self.darray.coords['newy'] = self.darray.y + 150
# Normal case, without transpose
self.plotfunc(self.darray, x='x', y='newy')
ax = plt.gca()
assert 'x_long_name [x_units]' == ax.get_xlabel()
assert 'newy' == ax.get_ylabel()
# ax limits might change between plotfuncs
# simply ensure that these high coords were passed over
assert np.min(ax.get_ylim()) > 100.
def test_non_linked_coords_transpose(self):
# plot with coordinate names that are not dimensions,
# and with transposed y and x axes
# This used to raise an error with pcolormesh and contour
# https://github.com/pydata/xarray/issues/788
self.darray.coords['newy'] = self.darray.y + 150
self.plotfunc(self.darray, x='newy', y='x')
ax = plt.gca()
assert 'newy' == ax.get_xlabel()
assert 'x_long_name [x_units]' == ax.get_ylabel()
# ax limits might change between plotfuncs
# simply ensure that these high coords were passed over
assert np.min(ax.get_xlim()) > 100.
def test_default_title(self):
a = DataArray(easy_array((4, 3, 2)), dims=['a', 'b', 'c'])
a.coords['c'] = [0, 1]
a.coords['d'] = u'foo'
self.plotfunc(a.isel(c=1))
title = plt.gca().get_title()
assert 'c = 1, d = foo' == title or 'd = foo, c = 1' == title
def test_colorbar_default_label(self):
self.plotmethod(add_colorbar=True)
assert ('a_long_name [a_units]' in text_in_fig())
def test_no_labels(self):
self.darray.name = 'testvar'
self.darray.attrs['units'] = 'test_units'
self.plotmethod(add_labels=False)
alltxt = text_in_fig()
for string in ['x_long_name [x_units]',
'y_long_name [y_units]',
'testvar [test_units]']:
assert string not in alltxt
def test_colorbar_kwargs(self):
# replace label
self.darray.attrs.pop('long_name')
self.darray.attrs['units'] = 'test_units'
# check default colorbar label
self.plotmethod(add_colorbar=True)
alltxt = text_in_fig()
assert 'testvar [test_units]' in alltxt
self.darray.attrs.pop('units')
self.darray.name = 'testvar'
self.plotmethod(add_colorbar=True, cbar_kwargs={'label': 'MyLabel'})
alltxt = text_in_fig()
assert 'MyLabel' in alltxt
assert 'testvar' not in alltxt
# you can use mapping types as well
self.plotmethod(
add_colorbar=True, cbar_kwargs=(('label', 'MyLabel'), ))
alltxt = text_in_fig()
assert 'MyLabel' in alltxt
assert 'testvar' not in alltxt
# change cbar ax
fig, (ax, cax) = plt.subplots(1, 2)
self.plotmethod(
ax=ax,
cbar_ax=cax,
add_colorbar=True,
cbar_kwargs={
'label': 'MyBar'
})
assert ax.has_data()
assert cax.has_data()
alltxt = text_in_fig()
assert 'MyBar' in alltxt
assert 'testvar' not in alltxt
# note that there are two ways to achieve this
fig, (ax, cax) = plt.subplots(1, 2)
self.plotmethod(
ax=ax,
add_colorbar=True,
cbar_kwargs={
'label': 'MyBar',
'cax': cax
})
assert ax.has_data()
assert cax.has_data()
alltxt = text_in_fig()
assert 'MyBar' in alltxt
assert 'testvar' not in alltxt
# see that no colorbar is respected
self.plotmethod(add_colorbar=False)
assert 'testvar' not in text_in_fig()
# check that error is raised
pytest.raises(
ValueError,
self.plotmethod,
add_colorbar=False,
cbar_kwargs={
'label': 'label'
})
def test_verbose_facetgrid(self):
a = easy_array((10, 15, 3))
d = DataArray(a, dims=['y', 'x', 'z'])
g = xplt.FacetGrid(d, col='z')
g.map_dataarray(self.plotfunc, 'x', 'y')
for ax in g.axes.flat:
assert ax.has_data()
def test_2d_function_and_method_signature_same(self):
func_sig = inspect.getcallargs(self.plotfunc, self.darray)
method_sig = inspect.getcallargs(self.plotmethod)
del method_sig['_PlotMethods_obj']
del func_sig['darray']
assert func_sig == method_sig
@pytest.mark.filterwarnings('ignore:tight_layout cannot')
def test_convenient_facetgrid(self):
a = easy_array((10, 15, 4))
d = DataArray(a, dims=['y', 'x', 'z'])
g = self.plotfunc(d, x='x', y='y', col='z', col_wrap=2)
assert_array_equal(g.axes.shape, [2, 2])
for (y, x), ax in np.ndenumerate(g.axes):
assert ax.has_data()
if x == 0:
assert 'y' == ax.get_ylabel()
else:
assert '' == ax.get_ylabel()
if y == 1:
assert 'x' == ax.get_xlabel()
else:
assert '' == ax.get_xlabel()
# Infering labels
g = self.plotfunc(d, col='z', col_wrap=2)
assert_array_equal(g.axes.shape, [2, 2])
for (y, x), ax in np.ndenumerate(g.axes):
assert ax.has_data()
if x == 0:
assert 'y' == ax.get_ylabel()
else:
assert '' == ax.get_ylabel()
if y == 1:
assert 'x' == ax.get_xlabel()
else:
assert '' == ax.get_xlabel()
@pytest.mark.filterwarnings('ignore:tight_layout cannot')
def test_convenient_facetgrid_4d(self):
a = easy_array((10, 15, 2, 3))
d = DataArray(a, dims=['y', 'x', 'columns', 'rows'])
g = self.plotfunc(d, x='x', y='y', col='columns', row='rows')
assert_array_equal(g.axes.shape, [3, 2])
for ax in g.axes.flat:
assert ax.has_data()
@pytest.mark.filterwarnings('ignore:This figure includes')
def test_facetgrid_map_only_appends_mappables(self):
a = easy_array((10, 15, 2, 3))
d = DataArray(a, dims=['y', 'x', 'columns', 'rows'])
g = self.plotfunc(d, x='x', y='y', col='columns', row='rows')
expected = g._mappables
g.map(lambda: plt.plot(1, 1))
actual = g._mappables
assert expected == actual
def test_facetgrid_cmap(self):
# Regression test for GH592
data = (np.random.random(size=(20, 25, 12)) + np.linspace(-3, 3, 12))
d = DataArray(data, dims=['x', 'y', 'time'])
fg = d.plot.pcolormesh(col='time')
# check that all color limits are the same
assert len(set(m.get_clim() for m in fg._mappables)) == 1
# check that all colormaps are the same
assert len(set(m.get_cmap().name for m in fg._mappables)) == 1
def test_cmap_and_color_both(self):
with pytest.raises(ValueError):
self.plotmethod(colors='k', cmap='RdBu')
def test_colormap_error_norm_and_vmin_vmax(self):
norm = mpl.colors.LogNorm(0.1, 1e1)
with pytest.raises(ValueError):
self.darray.plot(norm=norm, vmin=2)
with pytest.raises(ValueError):
self.darray.plot(norm=norm, vmax=2)
@pytest.mark.slow
class TestContourf(Common2dMixin, PlotTestCase):
plotfunc = staticmethod(xplt.contourf)
@pytest.mark.slow
def test_contourf_called(self):
# Having both statements ensures the test works properly
assert not self.contourf_called(self.darray.plot.imshow)
assert self.contourf_called(self.darray.plot.contourf)
def test_primitive_artist_returned(self):
artist = self.plotmethod()
assert isinstance(artist, mpl.contour.QuadContourSet)
@pytest.mark.slow
def test_extend(self):
artist = self.plotmethod()
assert artist.extend == 'neither'
self.darray[0, 0] = -100
self.darray[-1, -1] = 100
artist = self.plotmethod(robust=True)
assert artist.extend == 'both'
self.darray[0, 0] = 0
self.darray[-1, -1] = 0
artist = self.plotmethod(vmin=-0, vmax=10)
assert artist.extend == 'min'
artist = self.plotmethod(vmin=-10, vmax=0)
assert artist.extend == 'max'
@pytest.mark.slow
def test_2d_coord_names(self):
self.plotmethod(x='x2d', y='y2d')
# make sure labels came out ok
ax = plt.gca()
assert 'x2d' == ax.get_xlabel()
assert 'y2d' == ax.get_ylabel()
@pytest.mark.slow
def test_levels(self):
artist = self.plotmethod(levels=[-0.5, -0.4, 0.1])
assert artist.extend == 'both'
artist = self.plotmethod(levels=3)
assert artist.extend == 'neither'
@pytest.mark.slow
class TestContour(Common2dMixin, PlotTestCase):
plotfunc = staticmethod(xplt.contour)
def test_colors(self):
# matplotlib cmap.colors gives an rgbA ndarray
# when seaborn is used, instead we get an rgb tuple
def _color_as_tuple(c):
return tuple(c[:3])
# with single color, we don't want rgb array
artist = self.plotmethod(colors='k')
assert artist.cmap.colors[0] == 'k'
artist = self.plotmethod(colors=['k', 'b'])
assert (_color_as_tuple(artist.cmap.colors[1]) ==
(0.0, 0.0, 1.0))
artist = self.darray.plot.contour(
levels=[-0.5, 0., 0.5, 1.], colors=['k', 'r', 'w', 'b'])
assert (_color_as_tuple(artist.cmap.colors[1]) ==
(1.0, 0.0, 0.0))
assert (_color_as_tuple(artist.cmap.colors[2]) ==
(1.0, 1.0, 1.0))
# the last color is now under "over"
assert (_color_as_tuple(artist.cmap._rgba_over) ==
(0.0, 0.0, 1.0))
def test_cmap_and_color_both(self):
with pytest.raises(ValueError):
self.plotmethod(colors='k', cmap='RdBu')
def list_of_colors_in_cmap_deprecated(self):
with pytest.raises(Exception):
self.plotmethod(cmap=['k', 'b'])
@pytest.mark.slow
def test_2d_coord_names(self):
self.plotmethod(x='x2d', y='y2d')
# make sure labels came out ok
ax = plt.gca()
assert 'x2d' == ax.get_xlabel()
assert 'y2d' == ax.get_ylabel()
def test_single_level(self):
# this used to raise an error, but not anymore since
# add_colorbar defaults to false
self.plotmethod(levels=[0.1])
self.plotmethod(levels=1)
class TestPcolormesh(Common2dMixin, PlotTestCase):
plotfunc = staticmethod(xplt.pcolormesh)
def test_primitive_artist_returned(self):
artist = self.plotmethod()
assert isinstance(artist, mpl.collections.QuadMesh)
def test_everything_plotted(self):
artist = self.plotmethod()
assert artist.get_array().size == self.darray.size
@pytest.mark.slow
def test_2d_coord_names(self):
self.plotmethod(x='x2d', y='y2d')
# make sure labels came out ok
ax = plt.gca()
assert 'x2d' == ax.get_xlabel()
assert 'y2d' == ax.get_ylabel()
def test_dont_infer_interval_breaks_for_cartopy(self):
# Regression for GH 781
ax = plt.gca()
# Simulate a Cartopy Axis
setattr(ax, 'projection', True)
artist = self.plotmethod(x='x2d', y='y2d', ax=ax)
assert isinstance(artist, mpl.collections.QuadMesh)
# Let cartopy handle the axis limits and artist size
assert artist.get_array().size <= self.darray.size
@pytest.mark.slow
class TestImshow(Common2dMixin, PlotTestCase):
plotfunc = staticmethod(xplt.imshow)
@pytest.mark.slow
def test_imshow_called(self):
# Having both statements ensures the test works properly
assert not self.imshow_called(self.darray.plot.contourf)
assert self.imshow_called(self.darray.plot.imshow)
def test_xy_pixel_centered(self):
self.darray.plot.imshow(yincrease=False)
assert np.allclose([-0.5, 14.5], plt.gca().get_xlim())
assert np.allclose([9.5, -0.5], plt.gca().get_ylim())
def test_default_aspect_is_auto(self):
self.darray.plot.imshow()
assert 'auto' == plt.gca().get_aspect()
@pytest.mark.slow
def test_cannot_change_mpl_aspect(self):
with raises_regex(ValueError, 'not available in xarray'):
self.darray.plot.imshow(aspect='equal')
# with numbers we fall back to fig control
self.darray.plot.imshow(size=5, aspect=2)
assert 'auto' == plt.gca().get_aspect()
assert tuple(plt.gcf().get_size_inches()) == (10, 5)
@pytest.mark.slow
def test_primitive_artist_returned(self):
artist = self.plotmethod()
assert isinstance(artist, mpl.image.AxesImage)
@pytest.mark.slow
@requires_seaborn
def test_seaborn_palette_needs_levels(self):
with pytest.raises(ValueError):
self.plotmethod(cmap='husl')
def test_2d_coord_names(self):
with raises_regex(ValueError, 'requires 1D coordinates'):
self.plotmethod(x='x2d', y='y2d')
def test_plot_rgb_image(self):
DataArray(
easy_array((10, 15, 3), start=0),
dims=['y', 'x', 'band'],
).plot.imshow()
assert 0 == len(find_possible_colorbars())
def test_plot_rgb_image_explicit(self):
DataArray(
easy_array((10, 15, 3), start=0),
dims=['y', 'x', 'band'],
).plot.imshow(
y='y', x='x', rgb='band')
assert 0 == len(find_possible_colorbars())
def test_plot_rgb_faceted(self):
DataArray(
easy_array((2, 2, 10, 15, 3), start=0),
dims=['a', 'b', 'y', 'x', 'band'],
).plot.imshow(
row='a', col='b')
assert 0 == len(find_possible_colorbars())
def test_plot_rgba_image_transposed(self):
# We can handle the color axis being in any position
DataArray(
easy_array((4, 10, 15), start=0),
dims=['band', 'y', 'x'],
).plot.imshow()
def test_warns_ambigious_dim(self):
arr = DataArray(easy_array((3, 3, 3)), dims=['y', 'x', 'band'])
with pytest.warns(UserWarning):
arr.plot.imshow()
# but doesn't warn if dimensions specified
arr.plot.imshow(rgb='band')
arr.plot.imshow(x='x', y='y')
def test_rgb_errors_too_many_dims(self):
arr = DataArray(easy_array((3, 3, 3, 3)), dims=['y', 'x', 'z', 'band'])
with pytest.raises(ValueError):
arr.plot.imshow(rgb='band')
def test_rgb_errors_bad_dim_sizes(self):
arr = DataArray(easy_array((5, 5, 5)), dims=['y', 'x', 'band'])
with pytest.raises(ValueError):
arr.plot.imshow(rgb='band')
def test_normalize_rgb_imshow(self):
for kwds in (
dict(vmin=-1), dict(vmax=2),
dict(vmin=-1, vmax=1), dict(vmin=0, vmax=0),
dict(vmin=0, robust=True), dict(vmax=-1, robust=True),
):
da = DataArray(easy_array((5, 5, 3), start=-0.6, stop=1.4))
arr = da.plot.imshow(**kwds).get_array()
assert 0 <= arr.min() <= arr.max() <= 1, kwds
def test_normalize_rgb_one_arg_error(self):
da = DataArray(easy_array((5, 5, 3), start=-0.6, stop=1.4))
# If passed one bound that implies all out of range, error:
for kwds in [dict(vmax=-1), dict(vmin=2)]:
with pytest.raises(ValueError):
da.plot.imshow(**kwds)
# If passed two that's just moving the range, *not* an error:
for kwds in [dict(vmax=-1, vmin=-1.2), dict(vmin=2, vmax=2.1)]:
da.plot.imshow(**kwds)
def test_imshow_rgb_values_in_valid_range(self):
da = DataArray(np.arange(75, dtype='uint8').reshape((5, 5, 3)))
_, ax = plt.subplots()
out = da.plot.imshow(ax=ax).get_array()
assert out.dtype == np.uint8
assert (out[..., :3] == da.values).all() # Compare without added alpha
@pytest.mark.filterwarnings('ignore:Several dimensions of this array')
def test_regression_rgb_imshow_dim_size_one(self):
# Regression: https://github.com/pydata/xarray/issues/1966
da = DataArray(easy_array((1, 3, 3), start=0.0, stop=1.0))
da.plot.imshow()
def test_origin_overrides_xyincrease(self):
da = DataArray(easy_array((3, 2)), coords=[[-2, 0, 2], [-1, 1]])
da.plot.imshow(origin='upper')
assert plt.xlim()[0] < 0
assert plt.ylim()[1] < 0
plt.clf()
da.plot.imshow(origin='lower')
assert plt.xlim()[0] < 0
assert plt.ylim()[0] < 0
class TestFacetGrid(PlotTestCase):
@pytest.fixture(autouse=True)
def setUp(self):
d = easy_array((10, 15, 3))
self.darray = DataArray(
d, dims=['y', 'x', 'z'], coords={
'z': ['a', 'b', 'c']
})
self.g = xplt.FacetGrid(self.darray, col='z')
@pytest.mark.slow
def test_no_args(self):
self.g.map_dataarray(xplt.contourf, 'x', 'y')
# Don't want colorbar labeled with 'None'
alltxt = text_in_fig()
assert 'None' not in alltxt
for ax in self.g.axes.flat:
assert ax.has_data()
@pytest.mark.slow
def test_names_appear_somewhere(self):
self.darray.name = 'testvar'
self.g.map_dataarray(xplt.contourf, 'x', 'y')
for k, ax in zip('abc', self.g.axes.flat):
assert 'z = {0}'.format(k) == ax.get_title()
alltxt = text_in_fig()
assert self.darray.name in alltxt
for label in ['x', 'y']:
assert label in alltxt
@pytest.mark.slow
def test_text_not_super_long(self):
self.darray.coords['z'] = [100 * letter for letter in 'abc']
g = xplt.FacetGrid(self.darray, col='z')
g.map_dataarray(xplt.contour, 'x', 'y')
alltxt = text_in_fig()
maxlen = max(len(txt) for txt in alltxt)
assert maxlen < 50
t0 = g.axes[0, 0].get_title()
assert t0.endswith('...')
@pytest.mark.slow
def test_colorbar(self):
vmin = self.darray.values.min()
vmax = self.darray.values.max()
expected = np.array((vmin, vmax))
self.g.map_dataarray(xplt.imshow, 'x', 'y')
for image in plt.gcf().findobj(mpl.image.AxesImage):
clim = np.array(image.get_clim())
assert np.allclose(expected, clim)
assert 1 == len(find_possible_colorbars())
@pytest.mark.slow
def test_empty_cell(self):
g = xplt.FacetGrid(self.darray, col='z', col_wrap=2)
g.map_dataarray(xplt.imshow, 'x', 'y')
bottomright = g.axes[-1, -1]
assert not bottomright.has_data()
assert not bottomright.get_visible()
@pytest.mark.slow
def test_norow_nocol_error(self):
with raises_regex(ValueError, r'[Rr]ow'):
xplt.FacetGrid(self.darray)
@pytest.mark.slow
def test_groups(self):
self.g.map_dataarray(xplt.imshow, 'x', 'y')
upperleft_dict = self.g.name_dicts[0, 0]
upperleft_array = self.darray.loc[upperleft_dict]
z0 = self.darray.isel(z=0)
assert_equal(upperleft_array, z0)
@pytest.mark.slow
def test_float_index(self):
self.darray.coords['z'] = [0.1, 0.2, 0.4]
g = xplt.FacetGrid(self.darray, col='z')
g.map_dataarray(xplt.imshow, 'x', 'y')
@pytest.mark.slow
def test_nonunique_index_error(self):
self.darray.coords['z'] = [0.1, 0.2, 0.2]
with raises_regex(ValueError, r'[Uu]nique'):
xplt.FacetGrid(self.darray, col='z')
@pytest.mark.slow
def test_robust(self):
z = np.zeros((20, 20, 2))
darray = DataArray(z, dims=['y', 'x', 'z'])
darray[:, :, 1] = 1
darray[2, 0, 0] = -1000
darray[3, 0, 0] = 1000
g = xplt.FacetGrid(darray, col='z')
g.map_dataarray(xplt.imshow, 'x', 'y', robust=True)
# Color limits should be 0, 1
# The largest number displayed in the figure should be less than 21
numbers = set()
alltxt = text_in_fig()
for txt in alltxt:
try:
numbers.add(float(txt))
except ValueError:
pass
largest = max(abs(x) for x in numbers)
assert largest < 21
@pytest.mark.slow
def test_can_set_vmin_vmax(self):
vmin, vmax = 50.0, 1000.0
expected = np.array((vmin, vmax))
self.g.map_dataarray(xplt.imshow, 'x', 'y', vmin=vmin, vmax=vmax)
for image in plt.gcf().findobj(mpl.image.AxesImage):
clim = np.array(image.get_clim())
assert np.allclose(expected, clim)
@pytest.mark.slow
def test_can_set_norm(self):
norm = mpl.colors.SymLogNorm(0.1)
self.g.map_dataarray(xplt.imshow, 'x', 'y', norm=norm)
for image in plt.gcf().findobj(mpl.image.AxesImage):
assert image.norm is norm
@pytest.mark.slow
def test_figure_size(self):
assert_array_equal(self.g.fig.get_size_inches(), (10, 3))
g = xplt.FacetGrid(self.darray, col='z', size=6)
assert_array_equal(g.fig.get_size_inches(), (19, 6))
g = self.darray.plot.imshow(col='z', size=6)
assert_array_equal(g.fig.get_size_inches(), (19, 6))
g = xplt.FacetGrid(self.darray, col='z', size=4, aspect=0.5)
assert_array_equal(g.fig.get_size_inches(), (7, 4))
g = xplt.FacetGrid(self.darray, col='z', figsize=(9, 4))
assert_array_equal(g.fig.get_size_inches(), (9, 4))
with raises_regex(ValueError, "cannot provide both"):
g = xplt.plot(self.darray, row=2, col='z', figsize=(6, 4), size=6)
with raises_regex(ValueError, "Can't use"):
g = xplt.plot(self.darray, row=2, col='z', ax=plt.gca(), size=6)
@pytest.mark.slow
def test_num_ticks(self):
nticks = 99
maxticks = nticks + 1
self.g.map_dataarray(xplt.imshow, 'x', 'y')
self.g.set_ticks(max_xticks=nticks, max_yticks=nticks)
for ax in self.g.axes.flat:
xticks = len(ax.get_xticks())
yticks = len(ax.get_yticks())
assert xticks <= maxticks
assert yticks <= maxticks
assert xticks >= nticks / 2.0
assert yticks >= nticks / 2.0
@pytest.mark.slow
def test_map(self):
assert self.g._finalized is False
self.g.map(plt.contourf, 'x', 'y', Ellipsis)
assert self.g._finalized is True
self.g.map(lambda: None)
@pytest.mark.slow
def test_map_dataset(self):
g = xplt.FacetGrid(self.darray.to_dataset(name='foo'), col='z')
g.map(plt.contourf, 'x', 'y', 'foo')
alltxt = text_in_fig()
for label in ['x', 'y']:
assert label in alltxt
# everything has a label
assert 'None' not in alltxt
# colorbar can't be inferred automatically
assert 'foo' not in alltxt
assert 0 == len(find_possible_colorbars())
g.add_colorbar(label='colors!')
assert 'colors!' in text_in_fig()
assert 1 == len(find_possible_colorbars())
@pytest.mark.slow
def test_set_axis_labels(self):
g = self.g.map_dataarray(xplt.contourf, 'x', 'y')
g.set_axis_labels('longitude', 'latitude')
alltxt = text_in_fig()
for label in ['longitude', 'latitude']:
assert label in alltxt
@pytest.mark.slow
def test_facetgrid_colorbar(self):
a = easy_array((10, 15, 4))
d = DataArray(a, dims=['y', 'x', 'z'], name='foo')
d.plot.imshow(x='x', y='y', col='z')
assert 1 == len(find_possible_colorbars())
d.plot.imshow(x='x', y='y', col='z', add_colorbar=True)
assert 1 == len(find_possible_colorbars())
d.plot.imshow(x='x', y='y', col='z', add_colorbar=False)
assert 0 == len(find_possible_colorbars())
@pytest.mark.slow
def test_facetgrid_polar(self):
# test if polar projection in FacetGrid does not raise an exception
self.darray.plot.pcolormesh(
col='z',
subplot_kws=dict(projection='polar'),
sharex=False,
sharey=False)
@pytest.mark.filterwarnings('ignore:tight_layout cannot')
class TestFacetGrid4d(PlotTestCase):
@pytest.fixture(autouse=True)
def setUp(self):
a = easy_array((10, 15, 3, 2))
darray = DataArray(a, dims=['y', 'x', 'col', 'row'])
darray.coords['col'] = np.array(
['col' + str(x) for x in darray.coords['col'].values])
darray.coords['row'] = np.array(
['row' + str(x) for x in darray.coords['row'].values])
self.darray = darray
@pytest.mark.slow
def test_default_labels(self):
g = xplt.FacetGrid(self.darray, col='col', row='row')
assert (2, 3) == g.axes.shape
g.map_dataarray(xplt.imshow, 'x', 'y')
# Rightmost column should be labeled
for label, ax in zip(self.darray.coords['row'].values, g.axes[:, -1]):
assert substring_in_axes(label, ax)
# Top row should be labeled
for label, ax in zip(self.darray.coords['col'].values, g.axes[0, :]):
assert substring_in_axes(label, ax)
@pytest.mark.filterwarnings('ignore:tight_layout cannot')
class TestFacetedLinePlots(PlotTestCase):
@pytest.fixture(autouse=True)
def setUp(self):
self.darray = DataArray(np.random.randn(10, 6, 3, 4),
dims=['hue', 'x', 'col', 'row'],
coords=[range(10), range(6),
range(3), ['A', 'B', 'C', 'C++']],
name='<NAME> the 1st')
self.darray.hue.name = 'huename'
self.darray.hue.attrs['units'] = 'hunits'
self.darray.x.attrs['units'] = 'xunits'
self.darray.col.attrs['units'] = 'colunits'
self.darray.row.attrs['units'] = 'rowunits'
def test_facetgrid_shape(self):
g = self.darray.plot(row='row', col='col', hue='hue')
assert g.axes.shape == (len(self.darray.row), len(self.darray.col))
g = self.darray.plot(row='col', col='row', hue='hue')
assert g.axes.shape == (len(self.darray.col), len(self.darray.row))
def test_unnamed_args(self):
g = self.darray.plot.line('o--', row='row', col='col', hue='hue')
lines = [q for q in g.axes.flat[0].get_children()
if isinstance(q, mpl.lines.Line2D)]
# passing 'o--' as argument should set marker and linestyle
assert lines[0].get_marker() == 'o'
assert lines[0].get_linestyle() == '--'
def test_default_labels(self):
g = self.darray.plot(row='row', col='col', hue='hue')
# Rightmost column should be labeled
for label, ax in zip(self.darray.coords['row'].values, g.axes[:, -1]):
assert substring_in_axes(label, ax)
# Top row should be labeled
for label, ax in zip(self.darray.coords['col'].values, g.axes[0, :]):
assert substring_in_axes(str(label), ax)
# Leftmost column should have array name
for ax in g.axes[:, 0]:
assert substring_in_axes(self.darray.name, ax)
def test_test_empty_cell(self):
g = self.darray.isel(row=1).drop('row').plot(col='col',
hue='hue',
col_wrap=2)
bottomright = g.axes[-1, -1]
assert not bottomright.has_data()
assert not bottomright.get_visible()
def test_set_axis_labels(self):
g = self.darray.plot(row='row', col='col', hue='hue')
g.set_axis_labels('longitude', 'latitude')
alltxt = text_in_fig()
assert 'longitude' in alltxt
assert 'latitude' in alltxt
def test_both_x_and_y(self):
with pytest.raises(ValueError):
self.darray.plot.line(row='row', col='col',
x='x', y='hue')
def test_axes_in_faceted_plot(self):
with pytest.raises(ValueError):
self.darray.plot.line(row='row', col='col',
x='x', ax=plt.axes())
def test_figsize_and_size(self):
with pytest.raises(ValueError):
self.darray.plot.line(row='row', col='col',
x='x', size=3, figsize=4)
def test_wrong_num_of_dimensions(self):
with pytest.raises(ValueError):
self.darray.plot(row='row', hue='hue')
self.darray.plot.line(row='row', hue='hue')
class TestDatetimePlot(PlotTestCase):
@pytest.fixture(autouse=True)
def setUp(self):
'''
Create a DataArray with a time-axis that contains datetime objects.
'''
month = np.arange(1, 13, 1)
data = np.sin(2 * np.pi * month / 12.0)
darray = DataArray(data, dims=['time'])
darray.coords['time'] = np.array([datetime(2017, m, 1) for m in month])
self.darray = darray
def test_datetime_line_plot(self):
# test if line plot raises no Exception
self.darray.plot.line()
@requires_seaborn
def test_import_seaborn_no_warning():
# GH1633
with pytest.warns(None) as record:
import_seaborn()
assert len(record) == 0
@requires_matplotlib
def test_plot_seaborn_no_import_warning():
# GH1633
with pytest.warns(None) as record:
_color_palette('Blues', 4)
assert len(record) == 0
@requires_cftime
def test_plot_cftime_coordinate_error():
cftime = _import_cftime()
time = cftime.num2date(np.arange(5), units='days since 0001-01-01',
calendar='noleap')
data = DataArray(np.arange(5), coords=[time], dims=['time'])
with raises_regex(TypeError,
'requires coordinates to be numeric or dates'):
data.plot()
@requires_cftime
def test_plot_cftime_data_error():
cftime = _import_cftime()
data = cftime.num2date(np.arange(5), units='days since 0001-01-01',
calendar='noleap')
data = DataArray(data, coords=[np.arange(5)], dims=['x'])
with raises_regex(NotImplementedError, 'cftime.datetime'):
data.plot()
test_da_list = [DataArray(easy_array((10, ))),
DataArray(easy_array((10, 3))),
DataArray(easy_array((10, 3, 2)))]
@requires_matplotlib
class TestAxesKwargs(object):
@pytest.mark.parametrize('da', test_da_list)
@pytest.mark.parametrize('xincrease', [True, False])
def test_xincrease_kwarg(self, da, xincrease):
plt.clf()
da.plot(xincrease=xincrease)
assert plt.gca().xaxis_inverted() == (not xincrease)
@pytest.mark.parametrize('da', test_da_list)
@pytest.mark.parametrize('yincrease', [True, False])
def test_yincrease_kwarg(self, da, yincrease):
plt.clf()
da.plot(yincrease=yincrease)
assert plt.gca().yaxis_inverted() == (not yincrease)
@pytest.mark.parametrize('da', test_da_list)
@pytest.mark.parametrize('xscale', ['linear', 'log', 'logit', 'symlog'])
def test_xscale_kwarg(self, da, xscale):
plt.clf()
da.plot(xscale=xscale)
assert plt.gca().get_xscale() == xscale
@pytest.mark.parametrize('da', [DataArray(easy_array((10, ))),
DataArray(easy_array((10, 3)))])
@pytest.mark.parametrize('yscale', ['linear', 'log', 'logit', 'symlog'])
def test_yscale_kwarg(self, da, yscale):
plt.clf()
da.plot(yscale=yscale)
assert plt.gca().get_yscale() == yscale
@pytest.mark.parametrize('da', test_da_list)
def test_xlim_kwarg(self, da):
plt.clf()
expected = (0.0, 1000.0)
da.plot(xlim=[0, 1000])
assert plt.gca().get_xlim() == expected
@pytest.mark.parametrize('da', test_da_list)
def test_ylim_kwarg(self, da):
plt.clf()
da.plot(ylim=[0, 1000])
expected = (0.0, 1000.0)
assert plt.gca().get_ylim() == expected
@pytest.mark.parametrize('da', test_da_list)
def test_xticks_kwarg(self, da):
plt.clf()
da.plot(xticks=np.arange(5))
expected = | np.arange(5) | numpy.arange |
"""
Classes that implement SafeOpt.
Authors: - <NAME> (befelix at inf dot ethz dot ch)
- <NAME> (carion dot nicolas at gmail dot com)
"""
from __future__ import print_function, absolute_import, division
from collections import Sequence
from functools import partial
import numpy as np
from scipy.spatial.distance import cdist
from scipy.special import expit
from scipy.stats import norm
from builtins import range
from .utilities import (plot_2d_gp, plot_3d_gp, plot_contour_gp,
linearly_spaced_combinations)
from .swarm import SwarmOptimization
import logging
__all__ = ['SafeOpt', 'SafeOptSwarm']
class GaussianProcessOptimization(object):
"""
Base class for GP optimization.
Handles common functionality.
Parameters
----------
gp: GPy Gaussian process
fmin : float or list of floats
Safety threshold for the function value. If multiple safety constraints
are used this can also be a list of floats (the first one is always
the one for the values, can be set to None if not wanted).
beta: float or callable
A constant or a function of the time step that scales the confidence
interval of the acquisition function.
threshold: float or list of floats
The algorithm will not try to expand any points that are below this
threshold. This makes the algorithm stop expanding points eventually.
If a list, this represents the stopping criterion for all the gps.
This ignores the scaling factor.
scaling: list of floats or "auto"
A list used to scale the GP uncertainties to compensate for
different input sizes. This should be set to the maximal variance of
each kernel. You should probably leave this to "auto" unless your
kernel is non-stationary.
"""
def __init__(self, gp, fmin, beta=2, num_contexts=0, threshold=0,
scaling='auto'):
"""Initialization, see `GaussianProcessOptimization`."""
super(GaussianProcessOptimization, self).__init__()
if isinstance(gp, list):
self.gps = gp
else:
self.gps = [gp]
self.gp = self.gps[0]
self.fmin = fmin
if not isinstance(self.fmin, list):
self.fmin = [self.fmin] * len(self.gps)
self.fmin = np.atleast_1d(np.asarray(self.fmin).squeeze())
if hasattr(beta, '__call__'):
# Beta is a function of t
self.beta = beta
else:
# Assume that beta is a constant
self.beta = lambda t: beta
if scaling == 'auto':
dummy_point = np.zeros((1, self.gps[0].input_dim))
self.scaling = [gpm.kern.Kdiag(dummy_point)[0] for gpm in self.gps]
self.scaling = np.sqrt(np.asarray(self.scaling))
else:
self.scaling = np.asarray(scaling)
if self.scaling.shape[0] != len(self.gps):
raise ValueError("The number of scaling values should be "
"equal to the number of GPs")
self.threshold = threshold
self._parameter_set = None
self.bounds = None
self.num_samples = 0
self.num_contexts = num_contexts
self._x = None
self._y = None
self._get_initial_xy()
@property
def x(self):
return self._x
@property
def y(self):
return self._y
@property
def data(self):
"""Return the data within the GP models."""
return self._x, self._y
@property
def t(self):
"""Return the time step (number of measurements)."""
return self._x.shape[0]
def _get_initial_xy(self):
"""Get the initial x/y data from the GPs."""
self._x = self.gp.X
y = [self.gp.Y]
for gp in self.gps[1:]:
if np.allclose(self._x, gp.X):
y.append(gp.Y)
else:
raise NotImplemented('The GPs have different measurements.')
self._y = np.concatenate(y, axis=1)
def plot(self, n_samples, axis=None, figure=None, plot_3d=False,
**kwargs):
"""
Plot the current state of the optimization.
Parameters
----------
n_samples: int
How many samples to use for plotting
axis: matplotlib axis
The axis on which to draw (does not get cleared first)
figure: matplotlib figure
Ignored if axis is already defined
plot_3d: boolean
If set to true shows a 3D plot for 2 dimensional data
"""
# Fix contexts to their current values
if self.num_contexts > 0 and 'fixed_inputs' not in kwargs:
kwargs.update(fixed_inputs=self.context_fixed_inputs)
true_input_dim = self.gp.kern.input_dim - self.num_contexts
if true_input_dim == 1 or plot_3d:
inputs = np.zeros((n_samples ** true_input_dim, self.gp.input_dim))
inputs[:, :true_input_dim] = linearly_spaced_combinations(
self.bounds[:true_input_dim],
n_samples)
if not isinstance(n_samples, Sequence):
n_samples = [n_samples] * len(self.bounds)
axes = []
if self.gp.input_dim - self.num_contexts == 1:
# 2D plots with uncertainty
for gp, fmin in zip(self.gps, self.fmin):
if fmin == -np.inf:
fmin = None
ax = plot_2d_gp(gp, inputs, figure=figure, axis=axis,
fmin=fmin, **kwargs)
axes.append(ax)
else:
if plot_3d:
for gp in self.gps:
plot_3d_gp(gp, inputs, figure=figure, axis=axis, **kwargs)
else:
for gp in self.gps:
plot_contour_gp(gp,
[np.linspace(self.bounds[0][0],
self.bounds[0][1],
n_samples[0]),
np.linspace(self.bounds[1][0],
self.bounds[1][1],
n_samples[1])],
figure=figure,
axis=axis)
def _add_context(self, x, context):
"""Add the context to a vector.
Parameters
----------
x : ndarray
context : ndarray
Returns
-------
x_extended : ndarray
"""
context = np.atleast_2d(context)
num_contexts = context.shape[1]
x2 = np.empty((x.shape[0], x.shape[1] + num_contexts), dtype=float)
x2[:, :x.shape[1]] = x
x2[:, x.shape[1]:] = context
return x2
def _add_data_point(self, gp, x, y, context=None):
"""Add a data point to a particular GP.
This should only be called on its own if you know what you're doing.
This does not update the global data stores self.x and self.y.
Parameters
----------
x: 2d-array
y: 2d-array
context: array_like
The context(s) used for the data points
gp: instance of GPy.model.GPRegression
If specified, determines the GP to which we add the data point
to. Note that this should only be used if that data point is going
to be removed again.
"""
if context is not None:
x = self._add_context(x, context)
gp.set_XY(np.vstack([gp.X, x]),
np.vstack([gp.Y, y]))
def add_new_data_point(self, x, y, context=None):
"""
Add a new function observation to the GPs.
Parameters
----------
x: 2d-array
y: 2d-array
context: array_like
The context(s) used for the data points.
"""
x = np.atleast_2d(x)
y = np.atleast_2d(y)
if self.num_contexts:
x = self._add_context(x, context)
for i, gp in enumerate(self.gps):
not_nan = ~np.isnan(y[:, i])
if np.any(not_nan):
# Add data to GP (context already included in x)
self._add_data_point(gp, x[not_nan, :], y[not_nan, [i]])
# Update global data stores
self._x = np.concatenate((self._x, x), axis=0)
self._y = np.concatenate((self._y, y), axis=0)
def _remove_last_data_point(self, gp):
"""Remove the last data point of a specific GP.
This does not update global data stores, self.x and self.y.
Parameters
----------
gp: Instance of GPy.models.GPRegression
The gp that the last data point should be removed from
"""
gp.set_XY(gp.X[:-1, :], gp.Y[:-1, :])
def remove_last_data_point(self):
"""Remove the data point that was last added to the GP."""
last_y = self._y[-1]
for gp, yi in zip(self.gps, last_y):
if not np.isnan(yi):
gp.set_XY(gp.X[:-1, :], gp.Y[:-1, :])
self._x = self._x[:-1, :]
self._y = self._y[:-1, :]
class SafeOpt(GaussianProcessOptimization):
"""A class for Safe Bayesian Optimization.
This class implements the `SafeOpt` algorithm. It uses a Gaussian
process model in order to determine parameter combinations that are safe
with high probability. Based on these, it aims to both expand the set of
safe parameters and to find the optimal parameters within the safe set.
Parameters
----------
gp: GPy Gaussian process
A Gaussian process which is initialized with safe, initial data points.
If a list of GPs then the first one is the value, while all the
other ones are safety constraints.
parameter_set: 2d-array
List of parameters
fmin: list of floats
Safety threshold for the function value. If multiple safety constraints
are used this can also be a list of floats (the first one is always
the one for the values, can be set to None if not wanted)
lipschitz: list of floats
The Lipschitz constant of the system, if None the GP confidence
intervals are used directly.
beta: float or callable
A constant or a function of the time step that scales the confidence
interval of the acquisition function.
threshold: float or list of floats
The algorithm will not try to expand any points that are below this
threshold. This makes the algorithm stop expanding points eventually.
If a list, this represents the stopping criterion for all the gps.
This ignores the scaling factor.
scaling: list of floats or "auto"
A list used to scale the GP uncertainties to compensate for
different input sizes. This should be set to the maximal variance of
each kernel. You should probably leave this to "auto" unless your
kernel is non-stationary.
Examples
--------
>>> from safeopt import SafeOpt
>>> from safeopt import linearly_spaced_combinations
>>> import GPy
>>> import numpy as np
Define a Gaussian process prior over the performance
>>> x = np.array([[0.]])
>>> y = np.array([[1.]])
>>> gp = GPy.models.GPRegression(x, y, noise_var=0.01**2)
>>> bounds = [[-1., 1.]]
>>> parameter_set = linearly_spaced_combinations([[-1., 1.]],
... num_samples=100)
Initialize the Bayesian optimization and get new parameters to evaluate
>>> opt = SafeOpt(gp, parameter_set, fmin=[0.])
>>> next_parameters = opt.optimize()
Add a new data point with the parameters and the performance to the GP. The
performance has normally be determined through an external function call.
>>> performance = np.array([[1.]])
>>> opt.add_new_data_point(next_parameters, performance)
"""
def __init__(self, gp, parameter_set, fmin, lipschitz=None, beta=2,
num_contexts=0, threshold=0, scaling='auto'):
"""Initialization, see `SafeOpt`."""
super(SafeOpt, self).__init__(gp,
fmin=fmin,
beta=beta,
num_contexts=num_contexts,
threshold=threshold,
scaling=scaling)
if self.num_contexts > 0:
context_shape = (parameter_set.shape[0], self.num_contexts)
self.inputs = np.hstack((parameter_set,
np.zeros(context_shape,
dtype=parameter_set.dtype)))
self.parameter_set = self.inputs[:, :-self.num_contexts]
else:
self.inputs = self.parameter_set = parameter_set
self.liptschitz = lipschitz
if self.liptschitz is not None:
if not isinstance(self.liptschitz, list):
self.liptschitz = [self.liptschitz] * len(self.gps)
self.liptschitz = np.atleast_1d(
np.asarray(self.liptschitz).squeeze())
# Value intervals
self.Q = np.empty((self.inputs.shape[0], 2 * len(self.gps)),
dtype=np.float)
# Safe set
self.S = np.zeros(self.inputs.shape[0], dtype=np.bool)
# Switch to use confidence intervals for safety
if lipschitz is None:
self._use_lipschitz = False
else:
self._use_lipschitz = True
# Set of expanders and maximizers
self.G = self.S.copy()
self.M = self.S.copy()
@property
def use_lipschitz(self):
"""
Boolean that determines whether to use the Lipschitz constant.
By default this is set to False, which means the adapted SafeOpt
algorithm is used, that uses the GP confidence intervals directly.
If set to True, the `self.lipschitz` parameter is used to compute
the safe and expanders sets.
"""
return self._use_lipschitz
@use_lipschitz.setter
def use_lipschitz(self, value):
if value and self.liptschitz is None:
raise ValueError('Lipschitz constant not defined')
self._use_lipschitz = value
@property
def parameter_set(self):
"""Discrete parameter samples for Bayesian optimization."""
return self._parameter_set
@parameter_set.setter
def parameter_set(self, parameter_set):
self._parameter_set = parameter_set
# Plotting bounds (min, max value
self.bounds = list(zip(np.min(self._parameter_set, axis=0),
np.max(self._parameter_set, axis=0)))
self.num_samples = [len(np.unique(self._parameter_set[:, i]))
for i in range(self._parameter_set.shape[1])]
@property
def context_fixed_inputs(self):
"""Return the fixed inputs for the current context."""
n = self.gp.input_dim - 1
nc = self.num_contexts
if nc > 0:
contexts = self.inputs[0, -self.num_contexts:]
return list(zip(range(n, n - nc, -1), contexts))
@property
def context(self):
"""Return the current context variables."""
if self.num_contexts:
return self.inputs[0, -self.num_contexts:]
@context.setter
def context(self, context):
"""Set the current context and update confidence intervals.
Parameters
----------
context: ndarray
New context that should be applied to the input parameters
"""
if self.num_contexts:
if context is None:
raise ValueError('Need to provide value for context.')
self.inputs[:, -self.num_contexts:] = context
def update_confidence_intervals(self, context=None):
"""Recompute the confidence intervals form the GP.
Parameters
----------
context: ndarray
Array that contains the context used to compute the sets
"""
beta = self.beta(self.t)
# Update context to current setting
self.context = context
# Iterate over all functions
for i in range(len(self.gps)):
# Evaluate acquisition function
mean, var = self.gps[i].predict_noiseless(self.inputs)
mean = mean.squeeze()
std_dev = np.sqrt(var.squeeze())
# Update confidence intervals
self.Q[:, 2 * i] = mean - beta * std_dev
self.Q[:, 2 * i + 1] = mean + beta * std_dev
def compute_safe_set(self):
"""Compute only the safe set based on the current confidence bounds."""
# Update safe set
self.S[:] = np.all(self.Q[:, ::2] > self.fmin, axis=1)
def compute_sets(self, full_sets=False):
"""
Compute the safe set of points, based on current confidence bounds.
Parameters
----------
context: ndarray
Array that contains the context used to compute the sets
full_sets: boolean
Whether to compute the full set of expanders or whether to omit
computations that are not relevant for running SafeOpt
(This option is only useful for plotting purposes)
"""
beta = self.beta(self.t)
# Update safe set
self.compute_safe_set()
# Reference to confidence intervals
l, u = self.Q[:, :2].T
if not np.any(self.S):
self.M[:] = False
self.G[:] = False
return
# Set of possible maximisers
# Maximizers: safe upper bound above best, safe lower bound
self.M[:] = False
self.M[self.S] = u[self.S] >= np.max(l[self.S])
max_var = np.max(u[self.M] - l[self.M]) / self.scaling[0]
# Optimistic set of possible expanders
l = self.Q[:, ::2]
u = self.Q[:, 1::2]
self.G[:] = False
# For the run of the algorithm we do not need to calculate the
# full set of potential expanders:
# We can skip the ones already in M and ones that have lower
# variance than the maximum variance in M, max_var or the threshold.
# Amongst the remaining ones we only need to find the
# potential expander with maximum variance
if full_sets:
s = self.S
else:
# skip points in M, they will already be evaluated
s = np.logical_and(self.S, ~self.M)
# Remove points with a variance that is too small
s[s] = (np.max((u[s, :] - l[s, :]) / self.scaling, axis=1) >
max_var)
s[s] = np.any(u[s, :] - l[s, :] > self.threshold * beta, axis=1)
if not np.any(s):
# no need to evaluate any points as expanders in G, exit
return
def sort_generator(array):
"""Return the sorted array, largest element first."""
return array.argsort()[::-1]
# set of safe expanders
G_safe = np.zeros(np.count_nonzero(s), dtype=np.bool)
if not full_sets:
# Sort, element with largest variance first
sort_index = sort_generator(np.max(u[s, :] - l[s, :],
axis=1))
else:
# Sort index is just an enumeration of all safe states
sort_index = range(len(G_safe))
for index in sort_index:
if self.use_lipschitz:
# Distance between current index point and all other unsafe
# points
d = cdist(self.inputs[s, :][[index], :],
self.inputs[~self.S, :])
# Check if expander for all GPs
for i in range(len(self.gps)):
# Skip evaluation if 'no' safety constraint
if self.fmin[i] == -np.inf:
continue
# Safety: u - L * d >= fmin
G_safe[index] =\
np.any(u[s, i][index] - self.liptschitz[i] * d >=
self.fmin[i])
# Stop evaluating if not expander according to one
# safety constraint
if not G_safe[index]:
break
else:
# Check if expander for all GPs
for i, gp in enumerate(self.gps):
# Skip evlauation if 'no' safety constraint
if self.fmin[i] == -np.inf:
continue
# Add safe point with its max possible value to the gp
self._add_data_point(gp=gp,
x=self.parameter_set[s, :][index, :],
y=u[s, i][index],
context=self.context)
# Prediction of previously unsafe points based on that
mean2, var2 = gp.predict_noiseless(self.inputs[~self.S])
# Remove the fake data point from the GP again
self._remove_last_data_point(gp=gp)
mean2 = mean2.squeeze()
var2 = var2.squeeze()
l2 = mean2 - beta * np.sqrt(var2)
# If any unsafe lower bound is suddenly above fmin then
# the point is an expander
G_safe[index] = np.any(l2 >= self.fmin[i])
# Break if one safety GP is not an expander
if not G_safe[index]:
break
# Since we sorted by uncertainty and only the most
# uncertain element gets picked by SafeOpt anyways, we can
# stop after we found the first one
if G_safe[index] and not full_sets:
break
# Update safe set (if full_sets is False this is at most one point
self.G[s] = G_safe
def get_new_query_point(self, ucb=False):
"""
Compute a new point at which to evaluate the function.
Parameters
----------
ucb: bool
If True the safe-ucb criteria is used instead.
Returns
-------
x: np.array
The next parameters that should be evaluated.
"""
if not np.any(self.S):
raise EnvironmentError('There are no safe points to evaluate.')
if ucb:
max_id = np.argmax(self.Q[self.S, 1])
x = self.inputs[self.S, :][max_id, :]
else:
# Get lower and upper bounds
l = self.Q[:, ::2]
u = self.Q[:, 1::2]
MG = np.logical_or(self.M, self.G)
value = np.max((u[MG] - l[MG]) / self.scaling, axis=1)
x = self.inputs[MG, :][np.argmax(value), :]
if self.num_contexts:
return x[:-self.num_contexts]
else:
return x
def optimize(self, context=None, ucb=False):
"""Run Safe Bayesian optimization and get the next parameters.
Parameters
----------
context: ndarray
A vector containing the current context
ucb: bool
If True the safe-ucb criteria is used instead.
Returns
-------
x: np.array
The next parameters that should be evaluated.
"""
# Update confidence intervals based on current estimate
self.update_confidence_intervals(context=context)
# Update the sets
if ucb:
self.compute_safe_set()
else:
self.compute_sets()
return self.get_new_query_point(ucb=ucb)
def get_maximum(self, context=None):
"""
Return the current estimate for the maximum.
Parameters
----------
context: ndarray
A vector containing the current context
Returns
-------
x - ndarray
Location of the maximum
y - 0darray
Maximum value
Notes
-----
Uses the current context and confidence intervals!
Run update_confidence_intervals first if you recently added a new data
point.
"""
self.update_confidence_intervals(context=context)
# Compute the safe set (that's cheap anyways)
self.compute_safe_set()
# Return nothing if there are no safe points
if not | np.any(self.S) | numpy.any |
import os
import ee
import geemap
import json
import requests
import numpy as np
import pandas as pd
import matplotlib.pylab as plt
from datetime import datetime
from datetime import timedelta
import rasterio as rio
from rasterio import plot
from rasterio import warp
try:
ee.Initialize()
except:
ee.Authenticate()
ee.Initialize()
class dataCollector:
def __init__(self, beam=None, oaurl=None, track=None, date=None, latlims=None, lonlims=None, verbose=False):
if (beam is None) or ((oaurl is None) and (None in [track, date, latlims, lonlims])):
raise Exception('''Please specify a beam and
- either: an OpenAltimetry API url,
- or: a track, date, latitude limits and longitude limits.''')
else:
if oaurl is not None:
url = oaurl
tofind = '&beamName='
ids = url.find(tofind)
while ids>-1:
url = url.replace(url[ids:ids+len(tofind)+4],'')
ids = url.find(tofind)
iprod = url.find('/atl')
url = url.replace(url[iprod:iprod+6],'/atlXX')
url += tofind + beam + '&client=jupyter'
idate = url.find('date=') + len('date=')
date = url[idate:idate+10]
itrack = url.find('trackId=') + len('trackId=')
trackend = url[itrack:].find('&')
track = int(url[itrack:itrack+trackend])
bb = []
for s in ['minx=', 'maxx=', 'miny=', 'maxy=']:
ids = url.find(s) + len(s)
ide = url[ids:].find('&')
bb.append(float(url[ids:ids+ide]))
lonlims = bb[:2]
latlims = bb[2:]
elif None not in [track, date, latlims, lonlims]:
url = 'https://openaltimetry.org/data/api/icesat2/atlXX?'
url += 'date={date}&minx={minx}&miny={miny}&maxx={maxx}&maxy={maxy}&trackId={track}&beamName={beam}'.format(
date=date,minx=lonlims[0],miny=latlims[0],maxx=lonlims[1],maxy=latlims[1],track=track,beam=beam)
url += '&outputFormat=json&client=jupyter'
self.url = url
self.date = date
self.track = track
self.beam = beam
self.latlims = latlims
self.lonlims = lonlims
if verbose:
print('OpenAltimetry API URL:', self.url)
print('Date:', self.date)
print('Track:', self.track)
print('Beam:', self.beam)
print('Latitude limits:', self.latlims)
print('Longitude limits:', self.lonlims)
def requestData(self, verbose=False):
if verbose:
print('---> requesting ATL03 data...',end='')
product = 'atl03'
request_url = self.url.replace('atlXX',product)
data = requests.get(request_url).json()
lat, lon, h, confs = [], [], [], []
for beam in data:
for confidence in beam['series']:
for p in confidence['data']:
confs.append(confidence['name'])
lat.append(p[0])
lon.append(p[1])
h.append(p[2])
self.atl03 = pd.DataFrame(list(zip(lat,lon,h,confs)), columns = ['lat','lon','h','conf'])
if verbose:
print(' Done.')
print('---> requesting ATL06 data...',end='')
product = 'atl06'
request_url = self.url.replace('atlXX',product)
data = requests.get(request_url).json()
self.atl06 = pd.DataFrame(data['series'][0]['lat_lon_elev'], columns = ['lat','lon','h'])
if verbose:
print(' Done.')
print('---> requesting ATL07 data...',end='')
product = 'atl07'
request_url = self.url.replace('atlXX',product)
data = requests.get(request_url).json()
self.atl07 = pd.DataFrame(data['series'][0]['lat_lon_elev'], columns = ['lat','lon','h'])
if verbose:
print(' Done.')
print('---> requesting ATL08 data...',end='')
product = 'atl08'
request_url = self.url.replace('atlXX',product)
data = requests.get(request_url).json()
self.atl08 = pd.DataFrame(data['series'][0]['lat_lon_elev_canopy'], columns = ['lat','lon','h','canopy'])
if verbose:
print(' Done.')
################################################################################################
def plotData(self,ax=None,title='some Data I found on OpenAltimetry',plot_atl07=True,plot_atl08=True):
# get data if not already there
if 'atl03' not in vars(self).keys():
print('Data has not yet been requested from OpenAltimetry yet. Doing this now.')
self.requestData(verbose=True)
axes_not_specified = True if ax == None else False
# create the figure and axis
if axes_not_specified:
fig, ax = plt.subplots(figsize=[10,6])
atl03 = ax.scatter(self.atl03.lat, self.atl03.h, s=2, color='black', alpha=0.2, label='ATL03')
atl06, = ax.plot(self.atl06.lat, self.atl06.h, label='ATL06')
if plot_atl07 == True:
atl07, = ax.plot(self.atl07.lat, self.atl07.h, label='ATL07')
if plot_atl08 == True:
atl08, = ax.plot(self.atl08.lat, self.atl08.h, label='ATL08', linestyle='--')
heights = self.atl03.h[self.atl03.conf != 'Noise']
y_min1 = np.min(heights)
y_max1 = np.max(heights)
if plot_atl08 == True:
maxprods = np.nanmax((self.atl06.h.max(), self.atl08.h.max()))
minprods = np.nanmin((self.atl06.h.min(), self.atl08.h.min()))
else:
maxprods = np.nanmax(self.atl06.h.max())
minprods = np.nanmin((self.atl06.h.min(), self.atl07.h.min()))
hrange = maxprods - minprods
y_min2 = minprods - hrange * 0.5
y_max2 = maxprods + hrange * 0.5
y_min = np.nanmin((y_min1, y_min2))
y_max = np.nanmax((y_max1, y_max2))
x_min = self.atl08.lat.min()
x_max = self.atl08.lat.max()
ax.set_xlim((x_min, x_max))
ax.set_ylim((y_min, y_max))
# label the axes
ax.set_title(title)
ax.set_xlabel('latitude')
ax.set_ylabel('elevation in meters')
# add a legend
ax.legend(loc='lower right')
# add some text to provide info on what is plotted
info = 'ICESat-2 track {track:d}-{beam:s} on {date:s}\n({lon:.4f}E, {lat:.4f}N)'.format(track=self.track,
beam=self.beam,
date=self.date,
lon=np.mean(self.lonlims),
lat=np.mean(self.latlims))
infotext = ax.text(0.03, 0.03, info,
horizontalalignment='left',
verticalalignment='bottom',
transform=ax.transAxes,
fontsize=7,
bbox=dict(edgecolor=None, facecolor='white', alpha=0.9, linewidth=0))
if axes_not_specified:
fig.tight_layout()
return fig
else:
return ax
################################################################################################
def plotData_hv(self):
import holoviews as hv
from holoviews import opts
hv.extension('bokeh', 'matplotlib')
confdict = {'Noise': -1.0, 'Buffer': 0.0, 'Low': 1.0, 'Medium': 2.0, 'High': 3.0}
self.atl03['conf_num'] = [confdict[x] for x in self.atl03.conf]
self.atl08['canopy_h'] = self.atl08.h + self.atl08.canopy
atl03scat = hv.Scatter(self.atl03, 'lat', vdims=['h', 'conf_num'], label='ATL03')\
.opts(color='conf_num', alpha=1, cmap='dimgray_r')
atl06line = hv.Curve(self.atl06, 'lat', 'h', label='ATL06')\
.opts(color='r', alpha=0.5, line_width=3)
atl08line = hv.Curve(self.atl08, 'lat', 'h', label='ATL08')\
.opts(color='b', alpha=1, line_width=1)
atl08scat = hv.Scatter(self.atl08, 'lat', 'canopy_h', label='ATL08 Canopy')
atl08scat = atl08scat.opts(alpha=1, color='g', size=4)
hrange = self.atl06.h.max() - self.atl06.h.min()
overlay = (atl03scat * atl06line * atl08line * atl08scat).opts(
height=500,
width=800,
xlabel='latitude',
ylabel='elevation',
title='ICESat-2 track %d %s on %s' % (self.track,self.beam.upper(),self.date),
legend_position='bottom_right',
ylim=(self.atl06.h.min()-hrange, self.atl06.h.max()+hrange),
xlim=(self.atl06.lat.min(), self.atl06.lat.max())
)
return overlay
################################################################################################
def makeGEEmap(self, days_buffer=25):
# get data if not already there
if 'atl03' not in vars(self).keys():
print('Data has not yet been requested from OpenAltimetry yet. Doing this now.')
self.requestData(verbose=True)
def dist_latlon2meters(lat1, lon1, lat2, lon2):
# returns the distance between two lat/lon coordinate points along the earth's surface in meters
R = 6371000
def deg2rad(deg):
return deg * (np.pi/180)
dlat = deg2rad(lat2-lat1)
dlon = deg2rad(lon2-lon1)
a = np.sin(dlat/2) * np.sin(dlat/2) + np.cos(deg2rad(lat1)) * np.cos(deg2rad(lat2)) * np.sin(dlon/2) * np.sin(dlon/2)
c = 2 * np.arctan2(np.sqrt(a), np.sqrt(1-a))
return R * c
lat1, lat2 = self.atl08.lat[0], self.atl08.lat.iloc[-1]
lon1, lon2 = self.atl08.lon[0], self.atl08.lon.iloc[-1]
center_lat = (lat1 + lat2) / 2
center_lon = (lon1 + lon2) / 2
ground_track_length = dist_latlon2meters(lat1, lon1, lat2, lon2)
print('The ground track is %d meters long.' % np.round(ground_track_length))
collection_name1 = 'COPERNICUS/S2_SR' # Sentinel-2 earth engine collection
# https://developers.google.com/earth-engine/datasets/catalog/COPERNICUS_S2_SR
collection_name2 = 'LANDSAT/LC08/C01/T2' # Landsat 8 earth engine collection
# https://developers.google.com/earth-engine/datasets/catalog/LANDSAT_LC08_C01_T2
# Note: Landsat 8 ingestion into Earth Engine seems to not have reached Antarctica yet, so using raw scenes...
# the point of interest (center of the track) as an Earth Engine Geometry
point_of_interest = ee.Geometry.Point(center_lon, center_lat)
def query_scenes(self, days_buffer):
# get the dates
datetime_requested = datetime.strptime(self.date, '%Y-%m-%d')
search_start = (datetime_requested - timedelta(days=days_buffer)).strftime('%Y-%m-%d')
search_end = (datetime_requested + timedelta(days=days_buffer)).strftime('%Y-%m-%d')
print('Search for imagery from {start:s} to {end:s}.'.format(start=search_start, end=search_end))
# the collection to query:
# 1) merge Landsat 8 and Sentinel-2 collections
# 2) filter by acquisition date
# 3) filter by the point of interest
# 4) sort by acquisition date
collection = ee.ImageCollection(collection_name1) \
.merge(ee.ImageCollection(collection_name2)) \
.filterDate(search_start, search_end) \
.filterBounds(point_of_interest) \
.sort('system:time_start')
info = collection.getInfo()
n_imgs = len(info['features'])
print('--> Number of scenes found within +/- %d days of ICESat-2 overpass: %d' % (days_buffer, n_imgs))
return (collection, info, n_imgs)
# query collection for initial days_buffer
collection, info, n_imgs = query_scenes(self, days_buffer)
# if query returns more than 20 images, try to narrow it down
tries = 0
while (n_imgs > 20) & (tries<5):
print('----> This is too many. Narrowing it down...')
days_buffer = np.round(days_buffer * 15 / n_imgs)
collection, info, n_imgs = query_scenes(self, days_buffer)
n_imgs = len(info['features'])
tries += 1
# if query returns no images, then return
if n_imgs < 1:
print('NO SCENES FOUND. Try to widen your search by including more dates.')
return
# region of interest around the ground track (use this area to scale visualization factors)
buffer_around_center_meters = ground_track_length/2
region_of_interest = point_of_interest.buffer(buffer_around_center_meters)
# make an earth engine feature collection from the ground track so we can show it on the map
ground_track_coordinates = list(zip(self.atl08.lon, self.atl08.lat))
ground_track_projection = 'EPSG:4326' # <-- this specifies that our data longitude/latitude in degrees [https://epsg.io/4326]
gtx_feature = ee.FeatureCollection(ee.Geometry.LineString(coords=ground_track_coordinates,
proj=ground_track_projection,
geodesic=True))
Map = geemap.Map(center=(40, -100), zoom=4)
Map.add_basemap('HYBRID')
for i, feature in enumerate(info['features']):
# get the relevant info
thisDate = datetime.fromtimestamp(feature['properties']['system:time_start']/1e3)
dtstr = thisDate.strftime('%Y-%m-%d')
dt = (thisDate - datetime.strptime(self.date, '%Y-%m-%d')).days
ID = feature['id']
rel = 'before' if dt<0 else 'after'
print('%02d: %s (%3d days %s ICESat-2 overpass): %s' % (i, dtstr, np.abs(dt), rel, ID))
# get image by id, and normalize rgb range
image_id = feature['id']
thisScene = ee.Image(image_id)
rgb = thisScene.select('B4', 'B3', 'B2')
rgbmax = rgb.reduce(ee.Reducer.max()).reduceRegion(reducer=ee.Reducer.max(), geometry=region_of_interest, bestEffort=True, maxPixels=1e6)
rgbmin = rgb.reduce(ee.Reducer.min()).reduceRegion(reducer=ee.Reducer.min(), geometry=region_of_interest, bestEffort=True, maxPixels=1e6)
rgb = rgb.unitScale(ee.Number(rgbmin.get('min')), ee.Number(rgbmax.get('max'))).clamp(0.0, 1.0)
# if the image is Landsat 8, then pan-sharpen the image
if 'LANDSAT' in ID:
pan = thisScene.select('B8').unitScale(ee.Number(rgbmin.get('min')), ee.Number(rgbmax.get('max'))).clamp(0.0, 1.0)
huesat = rgb.rgbToHsv().select('hue', 'saturation')
rgb = ee.Image.cat(huesat, pan).hsvToRgb().clamp(0.0, 1.0)
# make the image uint8
rgb = rgb.multiply(255).uint8()
# add to map (only show the first layer, then can toggle others on in map)
show_layer = True if i==0 else False
Map.addLayer(rgb, name='%02d: %d days, %s'%(i,dt,ID), shown=show_layer)
# show ground track on map, and center on our region of interest
Map.addLayer(gtx_feature, {'color': 'red'}, 'ground track')
Map.centerObject(region_of_interest,zoom=11)
return Map
################################################################################################
def plotDataAndMap(self, scene_id, crs='EPSG:3857', title='ICESat-2 Data'):
from utils.curve_intersect import intersection
# get data if not already there
if 'atl03' not in vars(self).keys():
print('Data has not yet been requested from OpenAltimetry yet. Doing this now.')
self.requestData(verbose=True)
# plot the ICESat-2 data
fig = plt.figure(figsize=[12,5])
ax_data = fig.add_subplot(122)
self.plotData(ax_data, title=title)
# get the image and plot
ax_img = fig.add_subplot(121)
def dist_latlon2meters(lat1, lon1, lat2, lon2):
# returns the distance between two lat/lon coordinate points along the earth's surface in meters
R = 6371000
def deg2rad(deg):
return deg * (np.pi/180)
dlat = deg2rad(lat2-lat1)
dlon = deg2rad(lon2-lon1)
a = np.sin(dlat/2) * np.sin(dlat/2) + np.cos(deg2rad(lat1)) * np.cos(deg2rad(lat2)) * np.sin(dlon/2) * np.sin(dlon/2)
c = 2 * np.arctan2(np.sqrt(a), np.sqrt(1-a))
return R * c
lat1, lat2 = self.atl08.lat[0], self.atl08.lat.iloc[-1]
lon1, lon2 = self.atl08.lon[0], self.atl08.lon.iloc[-1]
center_lat = (lat1 + lat2) / 2
center_lon = (lon1 + lon2) / 2
ground_track_length = dist_latlon2meters(lat1, lon1, lat2, lon2)
# the point of interest (center of the track) as an Earth Engine Geometry
point_of_interest = ee.Geometry.Point(center_lon, center_lat)
# region of interest around the ground track (use this area to scale visualization factors)
buffer_around_center_meters = ground_track_length*0.52
region_of_interest = point_of_interest.buffer(buffer_around_center_meters)
thisScene = ee.Image(scene_id)
info = thisScene.getInfo()
# get the relevant info
thisDate = datetime.fromtimestamp(info['properties']['system:time_start']/1e3)
dtstr = thisDate.strftime('%Y-%m-%d')
download_folder = 'downloads/'
download_filename = '%s%s-8bitRGB.tif' % (download_folder, scene_id.replace('/', '-'))
if os.path.exists(download_filename):
print('This file already exists, not downloading again: %s' % download_filename)
else:
# get image by id, and normalize rgb range
rgb = thisScene.select('B4', 'B3', 'B2')
rgbmax = rgb.reduce(ee.Reducer.max()).reduceRegion(reducer=ee.Reducer.max(), geometry=region_of_interest, bestEffort=True, maxPixels=1e6)
rgbmin = rgb.reduce(ee.Reducer.min()).reduceRegion(reducer=ee.Reducer.min(), geometry=region_of_interest, bestEffort=True, maxPixels=1e6)
rgb = rgb.unitScale(ee.Number(rgbmin.get('min')), ee.Number(rgbmax.get('max'))).clamp(0.0, 1.0)
# if the image is Landsat 8, then pan-sharpen the image
if 'LANDSAT' in scene_id:
pan = thisScene.select('B8').unitScale(ee.Number(rgbmin.get('min')), ee.Number(rgbmax.get('max'))).clamp(0.0, 1.0)
huesat = rgb.rgbToHsv().select('hue', 'saturation')
rgb = ee.Image.cat(huesat, pan).hsvToRgb().clamp(0.0, 1.0)
# make the image uint8
rgb = rgb.multiply(255).uint8()
rgb_info = rgb.getInfo()
downloadURL = rgb.getDownloadUrl({'name': 'mySatelliteImage',
'crs': crs,
'scale': rgb_info['bands'][0]['crs_transform'][0],
'region': region_of_interest,
'filePerBand': False,
'format': 'GEO_TIFF'})
response = requests.get(downloadURL)
if not os.path.exists(download_folder): os.makedirs(download_folder)
with open(download_filename, 'wb') as fd:
fd.write(response.content)
print('Downloaded %s' % download_filename)
img = rio.open(download_filename)
plot.show(img, ax=ax_img)
# get the graticule right
latlon_bbox = warp.transform(img.crs, {'init': 'epsg:4326'},
[img.bounds[i] for i in [0,2,2,0,0]],
[img.bounds[i] for i in [1,1,3,3,1]])
min_lat = np.min(latlon_bbox[1])
max_lat = np.max(latlon_bbox[1])
min_lon = np.min(latlon_bbox[0])
max_lon = np.max(latlon_bbox[0])
latdiff = max_lat-min_lat
londiff = max_lon-min_lon
diffs = np.array([0.0001, 0.0002, 0.00025, 0.0004, 0.0005,
0.001, 0.002, 0.0025, 0.004, 0.005,
0.01, 0.02, 0.025, 0.04, 0.05, 0.1, 0.2, 0.25, 0.4, 0.5, 1, 2])
latstep = np.min(diffs[diffs>latdiff/8])
lonstep = np.min(diffs[diffs>londiff/8])
minlat = np.floor(min_lat/latstep)*latstep
maxlat = np.ceil(max_lat/latstep)*latstep
minlon = np.floor(min_lon/lonstep)*lonstep
maxlon = np.ceil(max_lon/lonstep)*lonstep
# plot meridians and parallels
xl = (img.bounds.left, img.bounds.right)
yl = (img.bounds.bottom, img.bounds.top)
meridians = | np.arange(minlon,maxlon, step=lonstep) | numpy.arange |
from dataTool import ReadLabels, ReadXYZ, VisualizePointCloudClassesAsync, modelPath, DataTool
from imports import *
import math
import numpy as np
from time import time
import tensorflow as tf
from tensorflow.keras.models import Model
from tensorflow.keras.utils import Sequence
from tensorflow.keras.layers import Input, BatchNormalization, Dense, Dropout, InputLayer
from sklearn.neighbors import KDTree
from sklearn.metrics import confusion_matrix
from PIL import Image, ImageEnhance, ImageOps
import random
# from notify_run import Notify
class Const:
@staticmethod
def IsWindowsMachine():
if os.path.isdir("C:/Program Files"):
return True
else:
return False
if os.path.isdir("C:/Program Files"):
batchSize = 8
else:
batchSize = 16 #25
#Placeholders
classCount = Label.Semantic3D.Count-1
classNames = Label.Semantic3D.Names
testFiles = []
excludeFiles = []
Paths = Paths.Semantic3D
epochs = 100
pointComponents = 3
featureComponents = 3 #rgb
classCount = 0
npoints = 8192
blocksize = 8
test_step = 0.5
name = ""
#Algorithm configuration
noFeature = False
Fusion = False
Scale = False
Rotate = False
Mirror = False
Jitter = False
FtrAugment = False
logsPath = "./logs"
### MODEL CONFIG
pl = 64
### MODEL CONFIG
def BuildSpecDict(self):
return {"noFeature" : self.noFeature,
"Fusion" : self.Fusion,
"Scale" : self.Scale,
"Rotate" : self.Rotate,
"Mirror" : self.Mirror,
"Jitter" : self.Jitter,
"FtrAugment" : False if self.noFeature else self.FtrAugment,
}
def Name(self, UID = ""):
modelName = self.name
modelName += f"({len(self.TrainFiles())}&{len(self.TestFiles())})"
for spec, value in self.BuildSpecDict().items():
if(value == True):
modelName += f"({spec})"
if(UID != ""):
modelName += f"_{UID}"
return modelName
@staticmethod
def RemoveUID(name : str):
return name.replace(f"_{Const.ParseModelUID(name)}", "")
@staticmethod
def UID():
import uuid
return uuid.uuid4().hex
@staticmethod
def ParseModelConfig(file):
config = Paths.FileName(file).split("_")[0].replace("("," ").replace(")","").replace("vox ","").split(" ")
const = None
if(config[0] == NPM3D.name):
const = NPM3D()
if(config[0] == Semantic3D.name):
const = Semantic3D()
for conf in config[1:]:
if conf == "noFeature" or conf == "NOCOL":
const.noFeature = True
elif conf == "Fusion":
const.Fusion = True
elif conf == "Scale":
const.Scale = True
elif conf == "Rotate":
const.Rotate = True
elif conf == "Mirror":
const.Mirror = True
elif conf == "Jitter":
const.Jitter = True
elif conf == "FtrAugment":
const.FtrAugment = True
return const
@staticmethod
def ParseModelUID(file):
parts = Paths.FileName(file).split("_")
if(len(parts) >= 2):
return parts[1]
else:
return None
@staticmethod
def ParseModelName(file, withUID = True):
parts = Paths.FileName(file, withoutExt = False).split("_")
name = parts[0]
if(withUID and len(parts) > 1):
name += "_"+parts[1]
return name
def TestFiles(self):
return Paths.JoinPaths(self.Paths.processedTrain, self.testFiles)
def TrainFiles(self):
return Paths.GetFiles(self.Paths.processedTrain, excludeFiles = self.TestFiles()+self.excludeFiles)
class Semantic3D(Const):
pointComponents = 3
featureComponents = 3 #rgb
classCount = Label.Semantic3D.Count-1
classNames = Label.Semantic3D.Names
test_step = 0.8
name = "Sem3D"
Paths = Paths.Semantic3D
testFiles = [
"untermaederbrunnen_station3_xyz_intensity_rgb_voxels.npy",
"domfountain_station1_xyz_intensity_rgb_voxels.npy",
]
excludeFiles = []
fileNames = {"birdfountain_station1_xyz_intensity_rgb" : "birdfountain1",
"castleblatten_station1_intensity_rgb" : "castleblatten1",
"castleblatten_station5_xyz_intensity_rgb" : "castleblatten5",
"marketplacefeldkirch_station1_intensity_rgb" : "marketsquarefeldkirch1",
"marketplacefeldkirch_station4_intensity_rgb" : "marketsquarefeldkirch4",
"marketplacefeldkirch_station7_intensity_rgb" : "marketsquarefeldkirch7",
"sg27_station3_intensity_rgb" : "sg27_3",
"sg27_station6_intensity_rgb" : "sg27_6",
"sg27_station8_intensity_rgb" : "sg27_8",
"sg27_station10_intensity_rgb" : "sg27_10",
"sg28_station2_intensity_rgb" : "sg28_2",
"sg28_station5_xyz_intensity_rgb" : "sg28_5",
"stgallencathedral_station1_intensity_rgb" : "stgallencathedral1",
"stgallencathedral_station3_intensity_rgb" : "stgallencathedral3",
"stgallencathedral_station6_intensity_rgb" : "stgallencathedral6",
"MarketplaceFeldkirch_Station4_rgb_intensity-reduced" : "marketsquarefeldkirch4-reduced",
"sg27_station10_rgb_intensity-reduced" : "sg27_10-reduced",
"sg28_Station2_rgb_intensity-reduced" : "sg28_2-reduced",
"StGallenCathedral_station6_rgb_intensity-reduced" : "stgallencathedral6-reduced",
}
class Curbs(Const):
pointComponents = 3
featureComponents = 3
classCount = 2
classNames = Label.Curbs.Names
test_step = 0.5
name = "Curbs"
Paths = Paths.Curbs
if os.path.isdir("C:/Program Files"):
batchSize = 8
else:
batchSize = 25
testFiles = [
"park_extracted.npy",
"Jelskio_str_trimmed.npy",
]
excludeFiles = [
"powerlines_dataset"
]
def FilterCurbAndLineFiles(self, files):
return [file for file in files if not file.endswith("_curbs.npy") and not file.endswith("_lines.npy")]
def TestFiles(self):
return self.FilterCurbAndLineFiles(super(Curbs, self).TestFiles())
def TrainFiles(self):
return self.FilterCurbAndLineFiles(super(Curbs, self).TrainFiles())
class NPM3D(Const):
pointComponents = 3
featureComponents = 1
classCount = Label.NPM3D.Count-1
classNames = Label.NPM3D.Names
test_step = 0.5
name = "NPM3D"
Paths = Paths.NPM3D
testFiles = [
# "Lille1_1_0.npy",
# "Lille1_1_1.npy",
# "Lille1_1_2.npy",
# "Lille1_1_3.npy",
# "Lille1_1_4.npy",
# "Lille1_1_5.npy",
# "Lille1_1_6.npy",
# "Lille1_1_7.npy",
# "Lille1_1_8.npy",
# "Lille1_2_0.npy",
# "Lille1_2_1.npy",
"Lille2_0.npy",
"Lille2_1.npy",
"Lille2_2.npy",
"Lille2_8.npy",
"Lille2_9.npy",
# "Paris_0.npy",
# "Paris_1.npy",
]
excludeFiles = [
# "Lille1_1_7.npy",
# "Lille1_2_2.npy",
"Lille2_10.npy",
# "Paris_2.npy",
]
class WeightsMul(tf.keras.layers.Layer):
def __init__(self, shape, lowBound, highBound, **kwargs):
super(WeightsMul, self).__init__(**kwargs)
self.shape = shape
self.lowBound = lowBound
self.highBound = highBound
def build(self, input_shape):
init = tf.random_uniform_initializer(self.lowBound, self.highBound)
self.vars = self.add_weight(shape=(self.shape),
initializer = init,
trainable = True, dtype=tf.float32)
def call(self, inputs):
return tf.matmul(inputs, self.vars)
def get_config(self):
config = super(WeightsMul, self).get_config()
config.update({'shape': self.shape, 'lowBound': self.lowBound, 'highBound': self.highBound})
return config
class GatherNDLayer(tf.keras.layers.Layer):
def __init__(self, **kwargs):
super(GatherNDLayer, self).__init__(**kwargs)
def call(self, array, indices):
return tf.gather_nd(array, indices, batch_dims=1)
def get_config(self):
config = super(GatherNDLayer, self).get_config()
return config
class SubstractCenters(tf.keras.layers.Layer):
def __init__(self, dim, n_centers, **kwargs):
super(SubstractCenters, self).__init__(**kwargs)
self.dim = dim
self.n_centers = n_centers
def build(self, input_shape):
center_data = np.zeros((self.dim, self.n_centers))
for i in range(self.n_centers):
coord = np.random.rand(self.dim)*2 - 1
while (coord**2).sum() > 1:
coord = np.random.rand(self.dim)*2 - 1
center_data[:,i] = coord
self.centers = self.add_weight(shape = (center_data.shape),
initializer = tf.constant_initializer(center_data),
trainable = True, dtype=tf.float32)
def call(self, points):
return points - self.centers
def get_config(self):
config = super(SubstractCenters, self).get_config()
config.update({'dim': self.dim, 'n_centers': self.n_centers})
return config
class UnitBallNormalize(tf.keras.layers.Layer):
def __init__(self, **kwargs):
super(UnitBallNormalize, self).__init__(**kwargs)
def call(self, points):
maxi = tf.sqrt(tf.reduce_max(tf.reduce_sum(tf.square(tf.stop_gradient(points)), axis = 3), axis = 2))
maxi = tf.where(tf.equal(maxi, 0.0), tf.constant(1.0), maxi)
points = points / tf.expand_dims(tf.expand_dims(maxi, 2), 3)
return points
def get_config(self):
config = super(UnitBallNormalize, self).get_config()
return config
def PtConv(fts, points, K, next_pts, in_features, out_features, n_centers = 16):
next_pts_ = None
if isinstance(next_pts, int) and points.get_shape()[1] != next_pts:
# convolution with reduction
indices, next_pts_ = KDTreeSampleLayer(K, next_pts)(points)
elif (next_pts is None) or (isinstance(next_pts, int) and points.get_shape()[1] == next_pts):
# convolution without reduction
indices = KDTreeLayer(K)(points, points)
next_pts_ = points
else:
# convolution with up sampling or projection on given points
indices = KDTreeLayer(K)(points, next_pts)
next_pts_ = next_pts
if next_pts is None or isinstance(next_pts, int):
next_pts = next_pts_
# get the features and point cooridnates associated with the indices
pts = GatherNDLayer()(points, indices)
if fts is None:
features = tf.expand_dims(tf.ones_like(pts[:,:,:,0]), 3)
else:
features = GatherNDLayer()(fts, indices)
# center the neighborhoods
pts = pts - tf.expand_dims(next_pts,2)
# normalize to unit ball, or not
pts = UnitBallNormalize()(pts)
# compute the distances
dists = SubstractCenters(3, n_centers)(tf.expand_dims(pts, 4))
dShape = dists.shape
dists = tf.reshape(dists, (-1, dShape[1], dShape[2], dShape[3]*dShape[4]))
dists = DenseInitialized(2*n_centers, activation="relu")(dists)
dists = DenseInitialized(n_centers, activation="relu")(dists)
dists = DenseInitialized(n_centers, activation="relu")(dists)
# compute features
fs = features.shape # [batch, points, n_centers, in_features]
ds = dists.shape
features = tf.transpose(features,[0, 1, 3, 2])
features = tf.reshape(features, (-1, features.shape[2], features.shape[3])) #features.shape[0]*features.shape[1]
dists = tf.reshape(dists, (-1, dists.shape[2], dists.shape[3])) #dists.shape[0]*dists.shape[1]
features = tf.matmul(features, dists)
features = tf.reshape(features, (-1, ds[1], features.shape[1]*features.shape[2]))
bound = math.sqrt(3.0) * math.sqrt(2.0 / (in_features + out_features))
features = WeightsMul([in_features * n_centers, out_features], -bound, bound)(features)
features = features / fs[2]
# normalization and activation
features = BatchNormalization(epsilon = 1e-05, momentum=0.9)(features)
features = tf.nn.relu(features)
return features, next_pts
def LinearInitializer(k):
k = np.sqrt(1.0/float(k))
return tf.random_uniform_initializer(k*-1, k)
def DenseInitialized(out_features, activation = None, name = None):
def DenseInit(x):
return Dense(out_features,
kernel_initializer = tf.initializers.lecun_normal(),
bias_initializer = tf.initializers.lecun_normal(),
activation = activation,
name = name,
)(x)
return DenseInit
def CreateModel(classCount, ftsComp, in_fts = None, in_pts = None, returnFeatures = False, noColor = False, applySoftmax = True):
print("Creating new model...")
if(in_fts is None and in_pts is None):
in_pts = Input(shape=(Const.npoints, Const.pointComponents), dtype=tf.float32) #points
if(noColor):
in_fts = None
else:
in_fts = Input(shape=(Const.npoints, ftsComp), dtype=tf.float32) #featuress
if(noColor):
in_fts = None
pl = Const.pl
### Down Sample
x0, _ = PtConv(in_fts, in_pts, K = 16, next_pts = None, in_features = ftsComp, out_features = pl)
x1, pts1 = PtConv(x0, in_pts, K = 16, next_pts = 2048, in_features = pl, out_features = pl)
x2, pts2 = PtConv(x1, pts1, K = 16, next_pts = 1024, in_features = pl, out_features = pl)
x3, pts3 = PtConv(x2, pts2, K = 16, next_pts = 256, in_features = pl, out_features = pl)
x4, pts4 = PtConv(x3, pts3, K = 8, next_pts = 64, in_features = pl, out_features = pl*2)
x5, pts5 = PtConv(x4, pts4, K = 8, next_pts = 16, in_features = pl*2, out_features = pl*2)
x6, pts6 = PtConv(x5, pts5, K = 4, next_pts = 8, in_features = pl*2, out_features = pl*2)
## Up Sample
x5d, _ = PtConv(x6, pts6, K = 4, next_pts = pts5, in_features = pl*2, out_features = pl*2)
x5d = tf.concat([x5d, x5], axis = 2)
x4d, _ = PtConv(x5d, pts5, K = 4, next_pts = pts4, in_features = pl*4, out_features = pl*2)
x4d = tf.concat([x4d, x4], axis = 2)
x3d, _ = PtConv(x4d, pts4, K = 4, next_pts = pts3, in_features = pl*4, out_features = pl)
x3d = tf.concat([x3d, x3], axis = 2)
x2d, _ = PtConv(x3d, pts3, K = 8, next_pts = pts2, in_features = pl*2, out_features = pl)
x2d = tf.concat([x2d, x2], axis = 2)
x1d, _ = PtConv(x2d, pts2, K = 8, next_pts = pts1, in_features = pl*2, out_features = pl)
x1d = tf.concat([x1d, x1], axis = 2)
x0d, _ = PtConv(x1d, pts1, K = 8, next_pts = in_pts, in_features = pl*2, out_features = pl)
x0d = tf.concat([x0d, x0], axis = 2)
### Output layer
out_labels = Dropout(rate=0.5)(x0d)
out_labels = tf.reshape(out_labels, (-1, out_labels.shape[2]))
out_labels = DenseInitialized(classCount)(out_labels)
out_labels = tf.reshape(out_labels, (-1, x0d.shape[1], out_labels.shape[1]))
if(applySoftmax):
out_labels = tf.nn.softmax(out_labels)
if(noColor):
inputList = [in_pts]
else:
inputList = [in_fts, in_pts]
if(returnFeatures):
return Model(inputList, [x0d, out_labels], name ="model")
model = Model(inputList, out_labels, name ="model")
model = CompileModel(model, classCount)
# print(model.summary())
return model
def ModifyModelOutput(model, classCount):
dropoutLayer = model.layers[len(model.layers)-5] #take output of the drop out layer
out_labels = dropoutLayer.output
out_labels = tf.reshape(out_labels, (-1, out_labels.shape[2]), name = "lbl_reshape_1")
out_labels = DenseInitialized(classCount, name = "lbl_dense")(out_labels)
out_labels = tf.reshape(out_labels, (-1, dropoutLayer.input.shape[1], out_labels.shape[1]), name = "lbl_reshape_2")
out_labels = tf.nn.softmax(out_labels, name = "lbl_softmax")
return Model(model.inputs, out_labels, name ="model")
def ReadModel(modelPath):
if(not modelPath.endswith(".h5")):
modelPath += ".h5"
if(not os.path.exists(modelPath)):
if(os.path.exists(os.path.join("." , "data", modelPath))):
modelPath = os.path.join("." , "data", modelPath)
elif(os.path.exists(os.path.join("." , "data", Const.ParseModelName(modelPath, False)))):
file = os.path.basename(modelPath)
folder = os.path.join("." , "data", Const.ParseModelName(modelPath, False))
modelPath = os.path.join(folder, file)
elif(os.path.exists(os.path.join("." , "data", Const.ParseModelName(modelPath)))):
file = os.path.basename(modelPath)
folder = os.path.join("." , "data", Const.ParseModelName(modelPath))
modelPath = os.path.join(folder, file)
if(not os.path.exists(modelPath)):
raise FileNotFoundError
model = tf.keras.models.load_model(modelPath, compile=False,
custom_objects={'NearestNeighborsLayer': NearestNeighborsLayer,
'SampleNearestNeighborsLayer': SampleNearestNeighborsLayer,
'SubstractCenters': SubstractCenters,
'WeightsMul': WeightsMul,
'GatherNDLayer':GatherNDLayer,
'UnitBallNormalize':UnitBallNormalize,
'KDTreeSampleLayer':KDTreeSampleLayer,
'KDTreeLayer':KDTreeLayer,
})
PrintToLog("{} model loaded".format(modelPath))
return model
def LatestModel(path):
if(Const.ParseModelUID(path) is None):
folders = [os.path.join("." , "data",folder) for folder in os.listdir(os.path.join("." , "data"))
if os.path.isdir(os.path.join("." , "data",folder))
and path == Const.RemoveUID(Const.ParseModelName(folder))
and len(Paths.GetFiles(os.path.join("." , "data",folder), findExtesions=".h5")) > 0]
path = max(folders, key=os.path.getctime)
else:
path = os.path.join("." , "data", Const.ParseModelName(path))
try:
latestModel = max(Paths.GetFiles(path, findExtesions=".h5"), key=os.path.getctime)
except:
print(f"No model found in: {path}")
latestModel = None
return latestModel
import re
def ModelValMIOU(path):
result = re.findall("val\((.+)\)", path)
return float(result[0])
def HighestValMIOUModel(path):
if(not os.path.isdir(path)):
path = os.path.join("." , "data", os.path.basename(path).split("_")[0])
latestModel = max(Paths.GetFiles(path, findExtesions=".h5"), key=ModelValMIOU)
return latestModel
def LoadModel(modelPath, consts):
model = ReadModel(modelPath)
modified = False
if(model.output.shape[2] != consts.classCount):
print("Model output {} classes changed to {}".format(model.output.shape[2], consts.classCount))
modified = True
model = ModifyModelOutput(model, consts.classCount)
model = CompileModel(model, consts.classCount)
# model.summary()
return model, modified
def ReadModelConfig(path):
Model = ReadModel(path)
modelConfig = Const.ParseModelConfig(path)
return Model, modelConfig
def CreateModelCopy(Model, modelConfig, in_pts, in_RGB):
inputFeatures = 1 if modelConfig.noFeature else modelConfig.featureComponents
newModel = CreateModel(modelConfig.classCount, inputFeatures, in_RGB, in_pts, noColor=modelConfig.noFeature, returnFeatures=True, applySoftmax=False)
if(Model != None):
for new_layer, layer in zip(newModel.layers, Model.layers):
new_layer.set_weights(layer.get_weights())
return newModel
def FuseModels(modelPaths, consts):
fusionModel = None
assert(len(modelPaths) == 2 or modelPaths is None)
print("Model fusion")
if(not modelPaths is None):
ModelA, modelAConfig = ReadModelConfig(modelPaths[0])
ModelB, modelBConfig = ReadModelConfig(modelPaths[1])
else:
consts.noFeature = False
modelAConfig = consts
consts.noFeature = True
modelBConfig = consts
in_RGB = None
if(not modelAConfig.noFeature or not modelBConfig.noFeature):
in_RGB = Input(shape=(Const.npoints, consts.featureComponents), dtype=tf.float32, name = "In_RGB") #features
in_pts = Input(shape=(Const.npoints, Const.pointComponents), dtype=tf.float32, name = "In_pts") #points
newModelA = CreateModelCopy(ModelA, modelAConfig, in_pts, in_RGB)
newModelB = CreateModelCopy(ModelB, modelBConfig, in_pts, in_RGB)
x = tf.concat((newModelA.output[0], newModelB.output[0]), axis = 2) #fuse features from both models
x1, _ = PtConv(x, in_pts, K = 16, next_pts = Const.npoints, in_features = 2*128, out_features = 96)
x2, _ = PtConv(x1, in_pts, K = 16, next_pts = Const.npoints, in_features = 96, out_features = 48)
x0d = tf.concat([x2, newModelA.output[1], newModelB.output[1]], axis = 2)
out_labels = tf.reshape(x0d, (-1, x0d.shape[2]))
out_labels = Dropout(rate=0.5)(out_labels)
out_labels = DenseInitialized(consts.classCount)(out_labels)
out_labels = tf.reshape(out_labels, (-1, x0d.shape[1], out_labels.shape[1]))
out_labels = tf.nn.softmax(out_labels)
fusionModel = Model([in_pts] if in_RGB is None else [in_RGB, in_pts], out_labels, name ="model")
nontrainableNames = [x.name for x in newModelA.layers] + [x.name for x in newModelB.layers]
# nontrainableNames = [x.name for x in newModelA.layers]
count = 0
for i, layer in enumerate(fusionModel.layers):
if(layer.name in nontrainableNames):
layer.trainable = False
count += 1
PrintToLog(f"{len(fusionModel.layers)-count}/{len(fusionModel.layers)} layers are trainable.")
fusionModel = CompileModel(fusionModel, consts.classCount)
# fusionModel.summary()
return fusionModel
class MIOU(tf.keras.metrics.Metric):
def __init__(self, classCount, name='miou', **kwargs):
super(MIOU, self).__init__(name=name, **kwargs)
self.cm = self.add_weight(name=name, shape = (classCount, classCount), initializer='zeros', dtype = tf.int64)
self.classCount = classCount
def update_state(self, y_true, y_pred, sample_weight=None):
TrueLbl = tf.argmax(tf.reshape(y_true, [-1, self.classCount]), axis= 1)
PredLbl = tf.argmax(tf.reshape(y_pred, [-1, self.classCount]), axis= 1)
confusion_matrix = tf.math.confusion_matrix(TrueLbl, PredLbl, self.classCount)
self.cm.assign_add(tf.cast(confusion_matrix, tf.int64))
def result(self):
union = tf.linalg.diag_part(self.cm)
rowSum = tf.math.reduce_sum(self.cm, axis = 0)
colSum = tf.math.reduce_sum(self.cm, axis = 1)
intersection = (colSum + rowSum - union)
intersection = tf.where(tf.equal(intersection, tf.constant(0, dtype=tf.int64)), tf.constant(1, dtype=tf.int64), intersection)
iou = union / intersection
miou = tf.expand_dims(tf.convert_to_tensor(tf.reduce_sum(iou) / tf.cast(iou.shape[0], dtype=np.float64)), 0)
return tf.concat((tf.expand_dims(miou,1), tf.cast(tf.expand_dims(iou,1), tf.float64)), 0)
def reset_states(self):
# The state of the metric will be reset at the start of each epoch.
self.cm.assign(tf.zeros((self.classCount, self.classCount), dtype=tf.int64))
def moving_miou_metric(classCount):
def moving_iou(y_true, y_pred):
TrueLbl = tf.argmax(tf.reshape(y_true, [-1, classCount]), axis= 1)
PredLbl = tf.argmax(tf.reshape(y_pred, [-1, classCount]), axis= 1)
cm = tf.math.confusion_matrix(TrueLbl, PredLbl, classCount)
union = tf.linalg.diag_part(cm)
rowSum = tf.math.reduce_sum(cm, axis = 0)
colSum = tf.math.reduce_sum(cm, axis = 1)
intersection = (colSum + rowSum - union)+1
iou = union / intersection
return tf.reduce_sum(iou) / tf.cast(tf.math.maximum(iou.shape[0], 1), dtype=np.float64)
return moving_iou
class IOU(tf.keras.metrics.Metric):
def __init__(self, classCount, classIndex, name='iou', **kwargs):
super(IOU, self).__init__(name=name, **kwargs)
self.cm = self.add_weight(name=name, shape = (classCount, classCount), initializer='zeros', dtype = tf.int64)
self.classCount = classCount
self.classIndex = classIndex
def update_state(self, y_true, y_pred, sample_weight=None):
TrueLbl = tf.argmax(tf.reshape(y_true, [-1, self.classCount]), axis= 1)
PredLbl = tf.argmax(tf.reshape(y_pred, [-1, self.classCount]), axis= 1)
confusion_matrix = tf.math.confusion_matrix(TrueLbl, PredLbl, self.classCount)
self.cm.assign_add(tf.cast(confusion_matrix, tf.int64))
def result(self):
union = tf.linalg.diag_part(self.cm)
rowSum = tf.math.reduce_sum(self.cm, axis = 0)
colSum = tf.math.reduce_sum(self.cm, axis = 1)
intersection = (colSum + rowSum - union)
intersection = tf.where(tf.equal(intersection, tf.constant(0, dtype=tf.int64)), tf.constant(1, dtype=tf.int64), intersection)
iou = union / intersection
return tf.cast(tf.expand_dims(iou, 1)[self.classIndex], tf.float64)
def reset_states(self):
# The state of the metric will be reset at the start of each epoch.
self.cm.assign(tf.zeros((self.classCount, self.classCount), dtype=tf.int64))
def weighted_categorical_crossentropy(weights):
# weights = [0.9,0.05,0.04,0.01]
def wcce(y_true, y_pred):
Kweights = tf.constant(weights)
y_true = tf.cast(y_true, y_pred.dtype)
return tf.keras.losses.categorical_crossentropy(y_true, y_pred) * tf.math.reduce_sum(y_true * Kweights, axis=-1)
return wcce
def CompileModel(model, classCount):
model.compile(
optimizer = tf.keras.optimizers.Adam(learning_rate=1e-3, epsilon = 1e-8),
loss = tf.keras.losses.CategoricalCrossentropy(),
# loss = weighted_categorical_crossentropy([0.7, 5]),
metrics= [IOU(classCount, 0, name="other"), IOU(classCount, 1, name="curb")] if classCount == 2 else [MIOU(classCount)]
)
return model
class IOUPerClass(tf.keras.callbacks.Callback):
def __init__(self, plot_path, classNames, firstEpoch = 0, metric = "miou"):
self.metric = metric
self.epoch = firstEpoch
self.classCount = len(classNames)
self.classNames = classNames
self.path = plot_path
print(f"IOU logs path: {self.path}")
self.writers = []
self.val_writers = []
ioupath = os.path.join(plot_path, "iou")
os.makedirs(ioupath, exist_ok=True)
for i in range(self.classCount):
path = os.path.join(ioupath, classNames[i])
os.makedirs(path, exist_ok=True)
self.writers.append(tf.summary.create_file_writer(path))
path = os.path.join(ioupath, "val_"+classNames[i])
os.makedirs(path, exist_ok=True)
self.val_writers.append(tf.summary.create_file_writer(path))
# print("Writer path: ", path)
self.InitializeMIOUWriter()
def InitializeMIOUWriter(self):
mioupath = os.path.join(self.path, "miou")
os.makedirs(mioupath, exist_ok=True)
path = os.path.join(mioupath, "miou")
os.makedirs(path, exist_ok=True)
self.miou_writer = tf.summary.create_file_writer(path)
path = os.path.join(mioupath, "val_miou")
os.makedirs(path, exist_ok=True)
self.val_miou_writer = tf.summary.create_file_writer(path)
def WriteLog(self, writer, metric, logs, epoch, tag = "miou"):
value = logs.get(metric)
if(value is None):
print(f"Failed getting {metric} log")
return False
with writer.as_default():
tf.summary.scalar(tag, value[0][0], step=epoch)
writer.flush()
def WriteLogs(self, writers, metric, logs, epoch, tag = "iou"):
metrix = logs.get(metric)
if(metrix is None):
print(f"Failed getting {metric} log")
return False
iou = [i[0] for i in metrix[len(metrix)-self.classCount:]]
for i in range(len(iou)):
with writers[i].as_default():
tf.summary.scalar(tag, iou[i], step=epoch)
writers[i].flush()
def on_epoch_end(self, batch, logs=None):
self.WriteLogs(self.writers, self.metric, logs, self.epoch)
self.WriteLogs(self.val_writers, "val_"+self.metric, logs, self.epoch)
self.WriteLog(self.miou_writer, self.metric, logs, self.epoch)
self.WriteLog(self.val_miou_writer, "val_"+self.metric, logs, self.epoch)
self.epoch += 1
logSaveDir = ""
def WriteToLog(msg):
if(os.path.isdir(logSaveDir)):
logFile = open(logSaveDir+f"/training.log", "a")
logFile.write(msg+"\n")
logFile.close()
def PrintToLog(msg):
print(msg)
WriteToLog(msg)
class ModelSaveCallback(tf.keras.callbacks.Callback):
def __init__(self, saveDir, trainingSteps, metric = "accuracy", modelNamePrefix = "", sendNotifications = False):
super().__init__()
self.saveDir = saveDir
self.metric = metric
self.modelNamePrefix = modelNamePrefix
self.epoch = 0
self.trainingSteps = trainingSteps
self.sendNotifications = sendNotifications
if(self.sendNotifications):
self.notifyDevice = Notify()
os.makedirs(self.saveDir, exist_ok=True)
WriteToLog(f"Training: {modelNamePrefix}")
def on_epoch_end(self, epoch, logs=None):
self.epoch = epoch + 1
if(len(logs) > 0):
miou = logs.get(self.metric)[0]*100
val_metric = "val_"+self.metric
val_miou = logs.get(val_metric)[0]*100
SaveModel(self.saveDir, epoch, self.model, miou, val_miou, self.modelNamePrefix)
message = "Ep: {0}. {1}: {2:.3}%. {3}: {4:.3}%".format(self.epoch, self.metric, miou, val_metric, val_miou)
WriteToLog(message)
f = open("demofile3.txt", "w")
f.write("Woops! I have deleted the content!")
f.close()
if(self.sendNotifications):
try:
self.notifyDevice.send(self.modelNamePrefix + " " + message)
except:
print("notifyDevice error")
# def on_batch_end(self, batch, logs=None):
# progress = batch/self.trainingSteps * 100
# if(progress % 10 == 0):
# try:
# message = "Ep. {0} {1}% done. {2}: {3:.3}%".format(self.epoch+1, int(progress), self.metric, logs.get(self.metric)*100)
# self.notifyDevice.send(message)
# except:
# print("notifyDevice error")
def ParseEpoch(modelPath):
filename = os.path.basename(modelPath)
return int(filename.split("_")[2])
def GetValidationData(testFiles, consts, batchesCount = 100, newDataGeneration = False):
print("Gathering validation data...")
print(f"Test files: {testFiles}")
if(newDataGeneration):
PrintToLog("Use TestSequence for validation.")
assert(len(testFiles) == 1)
seq = TestSequence(testFiles[0], consts, test = True)
else:
PrintToLog("Use TrainSequence for validation.")
seq = TrainSequence(testFiles, batchesCount, consts, dataAugmentation = False)
if not consts.noFeature:
ftsList = np.zeros((0, consts.npoints, consts.featureComponents), np.float32)
ptsList = np.zeros((0, consts.npoints, 3), np.float32)
lbsList = np.zeros((0, consts.npoints, consts.classCount), np.uint8)
if(newDataGeneration):
indexes = np.arange(min(batchesCount, len(seq)))
np.random.shuffle(indexes)
else:
indexes = range(batchesCount)
for i in indexes:
if consts.noFeature:
if(newDataGeneration):
ptslbl = seq.__getitem__(i)
else:
pts, lbl = seq.__getitem__(i)
ptslbl = [pts[0], lbl]
ptsList = np.concatenate((ptsList, ptslbl[0]), 0)
lbsList = np.concatenate((lbsList, ptslbl[1]), 0)
else:
if(newDataGeneration):
ftsptslbl = seq.__getitem__(i)
else:
ftspts, lbl = seq.__getitem__(i)
ftsptslbl = [ftspts[0], ftspts[1], lbl]
ftsList = np.concatenate((ftsList, ftsptslbl[0]), 0)
ptsList = np.concatenate((ptsList, ftsptslbl[1]), 0)
lbsList = np.concatenate((lbsList, ftsptslbl[2]), 0)
PrintToLog(f"Generated {len(lbsList)} validation samples.")
if consts.noFeature:
return (ptsList, lbsList)
else:
return ([ftsList, ptsList], lbsList)
def TrainModel(trainFiles, testFiles, consts : Const, modelPath = None, saveDir = Paths.dataPath, classes = None, first_epoch = 0, epochs = None, sendNotifications = False):
model = None
modelName = None
if(modelPath != None):
if(not isinstance(modelPath, list)):
modelName = Const.ParseModelName(modelPath)
if(consts.Name() != Const.RemoveUID(modelName)):
modelName = consts.Name(consts.UID())
logSaveDir = saveDir + f"/{modelName}/"
if(isinstance(modelPath, list)):
model = FuseModels(modelPath, consts)
else:
model, modified = LoadModel(modelPath, consts)
if(not modified):
first_epoch = ParseEpoch(modelPath) +1
else:
if(consts.Fusion):
model = FuseModels(None, consts)
else:
model = CreateModel(consts.classCount, 1 if consts.noFeature else consts.featureComponents, noColor=consts.noFeature)
if(modelName is None or modelName == ""):
modelName = consts.Name(consts.UID())
logSaveDir = saveDir + f"/{modelName}/"
PrintToLog("Train {} on {} files. Test on {} files".format(modelName, len(trainFiles), len(testFiles)))
PrintToLog("Validate on :" + str(testFiles))
trainingSteps = int((1000*16)/consts.batchSize) if not Const.IsWindowsMachine() else int(10)
PrintToLog("Batch size: {}, trainingSteps: {}".format(consts.batchSize, trainingSteps))
logsPath = os.path.join(consts.logsPath, Const.RemoveUID(modelName))
os.makedirs(logsPath, exist_ok=True)
callbacks_list = []
callbacks_list.append(ModelSaveCallback(logSaveDir, trainingSteps, "curb", modelNamePrefix = modelName, sendNotifications=sendNotifications))
# callbacks_list.append(IOUPerClass(logsPath, consts.classNames[1:], first_epoch+1))
# callbacks_list.append(tf.keras.callbacks.TensorBoard(log_dir=logsPath, update_freq="batch", histogram_freq=0, profile_batch = 0)) # tensorboard 2.0.2
seq = TrainSequence(trainFiles, trainingSteps, consts)
validationSteps = int(((150 if not Const.IsWindowsMachine() else 10) * 16)/consts.batchSize)
validationData = None if len(testFiles) == 0 else GetValidationData(testFiles, consts, validationSteps)
if(epochs is None):
epochs = 20 if consts.Fusion else 100
model.fit(seq, validation_data = validationData, epochs = epochs, batch_size = consts.batchSize, workers = consts.batchSize, max_queue_size = 300, callbacks=callbacks_list, initial_epoch = first_epoch)
def EvaluateModels(modelsList, testFiles, consts, x = None, y = None):
if(x is None or y is None):
validationSteps = int(((150 if not Const.IsWindowsMachine() else 10) * 16)/consts.batchSize)
x, y = GetValidationData(testFiles, consts, validationSteps, newDataGeneration = False)
for file in modelsList:
model, _ = LoadModel(file, consts)
metrics = model.evaluate(x, y, batch_size = consts.batchSize, workers = consts.batchSize, max_queue_size = 300)
# print(f"miou: {metrics[2][0][0]*100:.3}")
def SaveModel(saveDir, epoch, model, train_score, val_score=0, modelNamePrefix = ""):
if(modelNamePrefix != ""):
modelNamePrefix += "_"
fileName = saveDir+"/{0}{1}{2}{3}.h5".format(modelNamePrefix, epoch, f"_train({train_score:.3})", f"_val({val_score:.3})" if val_score != 0 else "")
if(not os.path.isdir(saveDir)):
os.mkdir(saveDir)
if(os.path.exists(fileName)):
os.remove(fileName)
model.save(fileName, include_optimizer=False)
def RotatePointCloud(batch_data):
""" Randomly rotate the point clouds to augument the dataset
rotation is per shape based along up direction
Input:
BxNx3 array, original batch of point clouds
Return:
BxNx3 array, rotated batch of point clouds
"""
rotation_angle = np.random.uniform() * 2 * np.pi
cosval = np.cos(rotation_angle)
sinval = np.sin(rotation_angle)
rotation_matrix = np.array([[cosval, sinval, 0],
[-sinval, cosval, 0],
[0, 0, 1],])
return np.dot(batch_data, rotation_matrix)
def JitterRGB(features):
features = features.astype(np.uint8)
assert(np.max(features) > 1)
img = Image.fromarray(np.expand_dims(features,0), mode="RGB")
low = 0.4
high = 1.6
#1 is baseline
img = ImageEnhance.Brightness(img).enhance(np.random.uniform(low, high))
img = ImageEnhance.Color(img).enhance(np.random.uniform(low, high))
img = ImageEnhance.Contrast(img).enhance(np.random.uniform(low, high))
img = ImageEnhance.Sharpness(img).enhance(np.random.uniform(low, high))
if(np.random.uniform(low, high) > 1):
img = ImageOps.equalize(img)
if(np.random.uniform(low, high) > 1):
img = ImageOps.autocontrast(img)
new_features = np.array(img).reshape((-1, 3))
return new_features
def JitterReflectance(features, sigma=40): #input [0; 255]
assert(features.shape[1] == 1)
randJitters = np.random.randint(-sigma, sigma, size = features.shape)
features += randJitters
features = np.clip(features, 0, 255)
return features
def JitterPoints(points, sigma=0.01):
""" Randomly jitter points. jittering is per point.
Input:
BxNx3 array, original batch of point clouds
Return:
BxNx3 array, jittered batch of point clouds
"""
C = 3
assert(points.shape[1] == C)
randJitters = np.random.uniform(-sigma, sigma, size = points.shape)
return points + randJitters
def Mirror(points, axis, min = True):
if(min):
axisValue = np.amin(points[:,axis])
else:
axisValue = np.amax(points[:,axis])
distances = np.abs(points[:, axis] - axisValue)
newpoints = np.array(points, copy=True)
newpoints[:,axis] = newpoints[:,axis] + distances*(-2 if min else 2)
return newpoints
def MirrorPoints(points):
assert(len(points.shape) == 2 and points.shape[1] == 3)
mirrorDirection = random.choice(["xMin", "xMax", "yMin", "yMax", ""])
if(mirrorDirection == "xMin"):
points = Mirror(points, 0, min = True)
elif(mirrorDirection == "xMax"):
points = Mirror(points, 0, min = False)
elif(mirrorDirection == "yMin"):
points = Mirror(points, 1, min = True)
elif(mirrorDirection == "yMax"):
points = Mirror(points, 1, min = False)
return points
def ScalePoints(points, sigma = 0.02):
""" Scale up or down random by small percentage
Input:
BxNx3 array, original batch of point clouds
Return:
BxNx3 array, scaled batch of point clouds
"""
assert(points.shape[1]==3)
scale = np.random.uniform(1-sigma, 1+sigma)
scale_matrix = np.array([[scale, 0, 0],
[0, scale, 0],
[0, 0, scale]])
scaled = np.dot(points, scale_matrix)
return scaled
class TrainSequence(Sequence):
def __init__(self, filelist, iteration_number, consts : Const, dataAugmentation = True):
self.filelist = filelist
self.ptsList = [np.load(file) for file in self.filelist]
self.ptsList = sorted(self.ptsList, key=len)
self.ptsListCount = np.cumsum([len(pts) for pts in self.ptsList])
self.cts = consts
self.dataAugmentation = dataAugmentation
self.iterations = iteration_number
def __len__(self):
return int(self.iterations)
def PickRandomPoint(self, lbl):
lblIdx = []
while True:
randClass = random.randint(0, self.cts.classCount-1)
lblIdx = np.where(lbl == randClass)[0]
if(len(lblIdx) >= 2):
break
return lblIdx[random.randint(0, len(lblIdx)-1)]
def __getitem__(self, _):
if not self.cts.noFeature:
ftsList = np.zeros((self.cts.batchSize, self.cts.npoints, self.cts.featureComponents), np.float32)
ptsList = np.zeros((self.cts.batchSize, self.cts.npoints, 3), np.float32)
lbsList = np.zeros((self.cts.batchSize, self.cts.npoints, self.cts.classCount), np.uint8)
for i in range(self.cts.batchSize):
# load the data
ptIdx = random.randint(0, self.ptsListCount[-1])
pts = self.ptsList[np.argmax(self.ptsListCount >= ptIdx)]
# if(self.cts.featureComponents == 1):
# keepPts = (pts[:, 4] != 0)
# else:
# keepPts = (pts[:, 6] != 0)
# pts = pts[keepPts]
# get the features
if(self.cts.featureComponents == 1):
if not self.cts.noFeature:
fts = np.expand_dims(pts[:,3], 1).astype(np.float32)
lbs = pts[:,4].astype(int)
else:
if not self.cts.noFeature:
fts = pts[:,3:6].astype(np.float32)
lbs = pts[:,6].astype(int)
if(np.min(lbs) == 1):
lbs -= 1 #class 0 is filtered out
# get the point coordinates
pts = pts[:, :3]
# pick a random point
pt_id = random.randint(0, pts.shape[0]-1)
pt = pts[pt_id]
# create the mask
mask_x = np.logical_and(pts[:,0]<pt[0]+self.cts.blocksize/2, pts[:,0]>pt[0]-self.cts.blocksize/2)
mask_y = np.logical_and(pts[:,1]<pt[1]+self.cts.blocksize/2, pts[:,1]>pt[1]-self.cts.blocksize/2)
mask = np.logical_and(mask_x, mask_y)
temppts = pts[mask]
templbs = lbs[mask]
if not self.cts.noFeature:
tempfts = fts[mask]
# random selection
choice = np.random.choice(temppts.shape[0], self.cts.npoints, replace=True)
temppts = temppts[choice]
if not self.cts.noFeature:
tempfts = tempfts[choice]
templbs = templbs[choice]
encodedLbs = np.zeros((len(templbs), self.cts.classCount))
encodedLbs[np.arange(len(templbs)),templbs] = 1
templbs = encodedLbs
# if self.dataAugmentation:
# dt = DataTool()
# dt.VisualizePointCloudAsync([temppts], [tempfts/255])
# data augmentation
if self.dataAugmentation:
if(self.cts.Mirror):
temppts = MirrorPoints(temppts)
if(self.cts.Rotate):
temppts = RotatePointCloud(temppts)
if(self.cts.Scale):
temppts = ScalePoints(temppts, sigma = 0.02)
if(self.cts.Jitter):
temppts = JitterPoints(temppts, sigma = 0.01)
if(not self.cts.noFeature and self.cts.FtrAugment):
if(self.cts.featureComponents == 3):
tempfts = JitterRGB(tempfts)
elif(self.cts.featureComponents == 1):
tempfts = JitterReflectance(tempfts)
if(not self.cts.noFeature):
tempfts = tempfts.astype(np.float32)
tempfts = tempfts/255 # - 0.5
# if self.dataAugmentation:
# # visualize data
# dt = DataTool()
# dt.VisualizePointCloud([temppts], [tempfts], windowName = "Augmented")
# linePoints = np.where(templbs[:, 1] == 1)[0]
# DataTool().VisualizePointCloud([np.delete(temppts, linePoints, axis=0), temppts[linePoints]], [[0,0,1], [1,0,0]], windowName="Sampled")
if not self.cts.noFeature:
ftsList[i] = np.expand_dims(tempfts, 0)
ptsList[i] = np.expand_dims(temppts, 0)
lbsList[i] = np.expand_dims(templbs, 0)
if self.cts.noFeature:
return [ptsList], lbsList
else: # works for RGB and fusion models
return [ftsList, ptsList], lbsList
class TestSequence(Sequence):
def __init__(self, filename, consts, splitDataSetToParts = -1, windowsMachineCap = True, test = False):
self.filename = filename
self.batchSize = consts.batchSize
self.npoints = consts.npoints
self.nocolor = consts.noFeature
self.bs = consts.blocksize
self.featureComponents = consts.featureComponents
self.fusion = consts.Fusion
self.test = test
if(self.test):
self.classCount = consts.classCount
self.lbl = []
if(self.filename.endswith(".ply")):
from plyfile import PlyData
plydata = PlyData.read(self.filename)
x = plydata["vertex"].data["x"].astype(np.float32)
y = plydata["vertex"].data["y"].astype(np.float32)
z = plydata["vertex"].data["z"].astype(np.float32)
fts = plydata["vertex"].data["reflectance"].astype(np.float32)
self.xyzrgb = np.concatenate((np.expand_dims(x,1), np.expand_dims(y,1), np.expand_dims(z,1), np.expand_dims(fts, 1)), axis=1)
elif(self.filename.endswith(".npy")):
xyzftsl = np.load(self.filename)
if(xyzftsl.shape[1] == 5):
self.xyzrgb = xyzftsl[:, :4]
if(self.test):
self.lbl = xyzftsl[:, 4] - 1
else: #if(xyzftsl.shape[1] == 7):
self.xyzrgb = xyzftsl[:, :6]
if(self.test):
self.lbl = xyzftsl[:, 6] - 1
elif(self.filename.endswith(".las")):
from dataTool import ReadXYZRGB
xyz, rgb = ReadXYZRGB(self.filename)
self.xyzrgb = np.concatenate((xyz, rgb), 1)
print("Test_step:", consts.test_step)
step = consts.test_step
discretized = ((self.xyzrgb[:,:2]).astype(float)/step).astype(int)
self.allpts = np.unique(discretized, axis=0)
self.allpts = self.allpts.astype(np.float)*step
if(consts.IsWindowsMachine() and windowsMachineCap):
self.allpts = self.allpts[:115] #small sample for testing
self.splitDataSetToParts = splitDataSetToParts
if(self.splitDataSetToParts != -1):
self.ptIndex = 0
else:
self.pts = self.allpts
self.idxList = np.zeros((len(self.pts), self.npoints), np.int64)
self.sparseCubes = 0
self.sparseCubesPtCount = 0
def LenParts(self):
if(self.splitDataSetToParts != -1):
return math.ceil(len(self.allpts)/self.splitDataSetToParts)
else:
return 1
def NextPart(self):
if(self.splitDataSetToParts <= 0):
return False
if(self.ptIndex >= len(self.allpts)):
return False
self.nextIndex = np.min([self.ptIndex+self.splitDataSetToParts, len(self.allpts)])
self.pts = self.allpts[self.ptIndex : self.nextIndex]
self.ptIndex = self.nextIndex
self.idxList = np.zeros((len(self.pts), self.npoints), np.int64)
return True
def __len__(self):
return math.ceil(len(self.pts)/self.batchSize)
def compute_mask(self, pt, bs):
# build the mask
mask_x = np.logical_and(self.xyzrgb[:,0]<pt[0]+bs/2, self.xyzrgb[:,0]>pt[0]-bs/2)
mask_y = np.logical_and(self.xyzrgb[:,1]<pt[1]+bs/2, self.xyzrgb[:,1]>pt[1]-bs/2)
mask = np.logical_and(mask_x, mask_y)
return mask
def __getitem__(self, index):
size = min(self.batchSize, len(self.pts) - (index * self.batchSize))
if not self.nocolor:
ftsList = np.zeros((size, self.npoints, self.featureComponents), np.float32)
ptsList = | np.zeros((size, self.npoints, 3), np.float32) | numpy.zeros |
import numpy as np
class NaiveBayes:
def fit(self,X,y):
n_samples, n_features = X.shape
self._classes = np.unique(y)
n_classes = len(self._classes)
self._mean = np.zeros((n_classes, n_features),dtype=np.float64)
self._var = np.zeros((n_classes, n_features), dtype=np.float64)
self._priors = np.zeros(n_classes, dtype=np.float64)
for c in self._classes:
X_c = X[c==y]
self._mean[c,:] = X_c.mean(axis=0)
self._var[c, :] = X_c.var(axis=0)
self._priors[c] = X_c.shape[0]/float(n_samples)
def predict(self,X):
y_pred = [self._predict(x) for x in X]
return y_pred
def _predict(self,x):
posteriors = []
for idx, c in enumerate(self._classes):
prior = np.log(self._priors[idx])
class_conditional = np.prod(self.calculate(idx,x))
posterior = prior+class_conditional
posteriors.append(posterior)
return self._classes[ | np.argmax(posteriors) | numpy.argmax |
#!/usr/bin/env python3
# -*- coding: utf-8 -*-
# BCDI: tools for pre(post)-processing Bragg coherent X-ray diffraction imaging data
# (c) 07/2017-06/2019 : CNRS UMR 7344 IM2NP
# (c) 07/2019-present : DESY PHOTON SCIENCE
# authors:
# <NAME>, <EMAIL>
import datetime
import gc
import sys
import time
import tkinter as tk
from tkinter import filedialog
import numpy as np
from matplotlib import pyplot as plt
from skimage.feature import peak_local_max
import bcdi.graph.graph_utils as gu
import bcdi.postprocessing.postprocessing_utils as pu
import bcdi.simulation.simulation_utils as simu
import bcdi.utils.utilities as util
from bcdi.experiment.detector import create_detector
from bcdi.graph.colormap import ColormapFactory
helptext = """
Calculate the position of the Bragg peaks for a mesocrystal given the lattice type,
the unit cell parameter and beamline-related parameters. Assign 3D Gaussians to each
lattice point and rotates the unit cell in order to maximize the cross-correlation of
the simulated data with experimental data. The experimental data should be sparse
(using a photon threshold), and Bragg peaks maximum must be clearly identifiable.
Laboratory frame convention (CXI): z downstream, y vertical up, x outboard.
Reciprocal space basis: qx downstream, qz vertical up, qy outboard.
"""
datadir = "D:/data/P10_March2020_CDI/test_april/data/align_06_00248/pynx/"
savedir = "D:/data/P10_March2020_CDI/test_april/data/align_06_00248/simu/"
comment = "" # should start with _
################
# sample setup #
################
unitcell = "fcc" # supported unit cells: 'cubic', 'bcc', 'fcc', 'bct'
# It can be a number or tuple of numbers depending on the unit cell.
unitcell_ranges = [22.9, 22.9] # in nm, values of the unit cell parameters to test
# cubic, FCC or BCC unit cells: [start, stop].
# BCT unit cell: [start1, stop1, start2, stop2] (stop is included)
unitcell_step = 0.05 # in nm
#########################
# unit cell orientation #
#########################
angles_ranges = [
-45,
-45,
-45,
45,
-45,
45,
] # [start, stop, start, stop, start, stop], in degrees
# ranges to span for the rotation around qx downstream, qz vertical up and
# qy outboard respectively (stop is included)
angular_step = 5 # in degrees
#######################
# beamline parameters #
#######################
sdd = 4.95 # in m, sample to detector distance
energy = 8250 # in ev X-ray energy
##################
# detector setup #
##################
detector = "Eiger4M" # "Eiger2M" or "Maxipix" or "Eiger4M"
direct_beam = (
1303,
1127,
) # tuple of int (vertical, horizontal): position of the direct beam in pixels
# this parameter is important for gridding the data onto the laboratory frame
roi_detector = []
# [direct_beam[0] - 972, direct_beam[0] + 972,
# direct_beam[1] - 883, direct_beam[1] + 883]
# [Vstart, Vstop, Hstart, Hstop], leave [] to use the full detector
binning = [4, 4, 4] # binning of the detector
##########################
# peak detection options #
##########################
photon_threshold = 1000 # intensity below this value will be set to 0
min_distance = 50 # minimum distance between Bragg peaks in pixels
peak_width = 0 # the total width will be (2*peak_width+1)
###########
# options #
###########
kernel_length = 11 # width of the 3D gaussian window
debug = True # True to see more plots
correct_background = False # True to create a 3D background
bckg_method = "normalize" # 'subtract' or 'normalize'
##################################
# end of user-defined parameters #
##################################
#######################
# Initialize detector #
#######################
detector = create_detector(name=detector, binning=binning, roi=roi_detector)
###################
# define colormap #
###################
bad_color = "1.0" # white background
my_cmap = ColormapFactory(bad_color=bad_color).cmap
plt.ion()
###################################
# load experimental data and mask #
###################################
root = tk.Tk()
root.withdraw()
file_path = filedialog.askopenfilename(
initialdir=datadir, title="Select the data to fit", filetypes=[("NPZ", "*.npz")]
)
data = np.load(file_path)["data"]
nz, ny, nx = data.shape
print(
"Sparsity of the data:",
str("{:.2f}".format((data == 0).sum() / (nz * ny * nx) * 100)),
"%",
)
try:
file_path = filedialog.askopenfilename(
initialdir=datadir, title="Select the mask", filetypes=[("NPZ", "*.npz")]
)
mask = np.load(file_path)["mask"]
data[np.nonzero(mask)] = 0
del mask
gc.collect()
except FileNotFoundError:
pass
try:
file_path = filedialog.askopenfilename(
initialdir=datadir, title="Select q values", filetypes=[("NPZ", "*.npz")]
)
exp_qvalues = np.load(file_path)
qvalues_flag = True
except FileNotFoundError:
exp_qvalues = None
qvalues_flag = False
##########################
# apply photon threshold #
##########################
data[data < photon_threshold] = 0
print(
"Sparsity of the data after photon threshold:",
str("{:.2f}".format((data == 0).sum() / (nz * ny * nx) * 100)),
"%",
)
######################
# calculate q values #
######################
if unitcell == "bct":
pivot, _, q_values, _, _ = simu.lattice(
energy=energy,
sdd=sdd,
direct_beam=direct_beam,
detector=detector,
unitcell=unitcell,
unitcell_param=[unitcell_ranges[0], unitcell_ranges[2]],
euler_angles=[0, 0, 0],
offset_indices=True,
)
else:
pivot, _, q_values, _, _ = simu.lattice(
energy=energy,
sdd=sdd,
direct_beam=direct_beam,
detector=detector,
unitcell=unitcell,
unitcell_param=unitcell_ranges[0],
euler_angles=[0, 0, 0],
offset_indices=True,
)
nbz, nby, nbx = len(q_values[0]), len(q_values[1]), len(q_values[2])
comment = (
comment
+ str(nbz)
+ "_"
+ str(nby)
+ "_"
+ str(nbx)
+ "_"
+ str(binning[0])
+ "_"
+ str(binning[1])
+ "_"
+ str(binning[2])
)
if (nbz != nz) or (nby != ny) or (nbx != nx):
print(
"The experimental data and calculated q values have different shape,"
' check "roi_detector" parameter!'
)
sys.exit()
print("Origin of the reciprocal space at pixel", pivot)
##########################
# plot experimental data #
##########################
if debug:
gu.multislices_plot(
data,
sum_frames=True,
title="data",
vmin=0,
vmax=np.log10(data).max(),
scale="log",
plot_colorbar=True,
cmap=my_cmap,
is_orthogonal=True,
reciprocal_space=True,
)
if qvalues_flag:
gu.contour_slices(
data,
q_coordinates=(exp_qvalues["qx"], exp_qvalues["qz"], exp_qvalues["qy"]),
sum_frames=True,
title="Experimental data",
levels=np.linspace(0, np.log10(data.max()) + 1, 20, endpoint=False),
scale="log",
plot_colorbar=False,
is_orthogonal=True,
reciprocal_space=True,
)
else:
gu.contour_slices(
data,
q_coordinates=q_values,
sum_frames=True,
title="Experimental data",
levels=np.linspace(0, np.log10(data.max()) + 1, 20, endpoint=False),
scale="log",
plot_colorbar=False,
is_orthogonal=True,
reciprocal_space=True,
)
################################################
# remove background from the experimental data #
################################################
if correct_background:
file_path = filedialog.askopenfilename(
initialdir=datadir,
title="Select the 1D background file",
filetypes=[("NPZ", "*.npz")],
)
avg_background = np.load(file_path)["background"]
distances = np.load(file_path)["distances"]
if qvalues_flag:
data = util.remove_avg_background(
array=data,
avg_background=avg_background,
avg_qvalues=distances,
q_values=(exp_qvalues["qx"], exp_qvalues["qz"], exp_qvalues["qy"]),
method=bckg_method,
)
else:
print("Using calculated q values for background subtraction")
data = util.remove_avg_background(
array=data,
q_values=q_values,
avg_background=avg_background,
avg_qvalues=distances,
method=bckg_method,
)
np.savez_compressed(datadir + "data-background_" + comment + ".npz", data=data)
gu.multislices_plot(
data,
sum_frames=True,
title="Background subtracted data",
vmin=0,
vmax=np.log10(data).max(),
scale="log",
plot_colorbar=True,
cmap=my_cmap,
is_orthogonal=True,
reciprocal_space=True,
)
#############################################
# find Bragg peaks in the experimental data #
#############################################
density_map = np.copy(data)
# find peaks
local_maxi = peak_local_max(
density_map, exclude_border=False, min_distance=min_distance, indices=True
)
nb_peaks = local_maxi.shape[0]
print("Number of Bragg peaks isolated:", nb_peaks)
print("Bragg peaks positions:")
print(local_maxi)
density_map[:] = 0
for idx in range(nb_peaks):
piz, piy, pix = local_maxi[idx]
density_map[
piz - peak_width : piz + peak_width + 1,
piy - peak_width : piy + peak_width + 1,
pix - peak_width : pix + peak_width + 1,
] = 1
nonzero_indices = np.nonzero(density_map)
bragg_peaks = density_map[
nonzero_indices
] # 1D array of length: nb_peaks*(2*peak_width+1)**3
if debug:
gu.multislices_plot(
density_map,
sum_frames=True,
title="Bragg peaks positions",
slice_position=pivot,
vmin=0,
vmax=1,
scale="linear",
cmap=my_cmap,
is_orthogonal=True,
reciprocal_space=True,
)
plt.pause(0.1)
#########################
# define the peak shape #
#########################
peak_shape = pu.blackman_window(
shape=(kernel_length, kernel_length, kernel_length), normalization=100
)
#####################################
# define the list of angles to test #
#####################################
angles_qx = np.linspace(
start=angles_ranges[0],
stop=angles_ranges[1],
num=max(1, np.rint((angles_ranges[1] - angles_ranges[0]) / angular_step) + 1),
)
angles_qz = np.linspace(
start=angles_ranges[2],
stop=angles_ranges[3],
num=max(1, np.rint((angles_ranges[3] - angles_ranges[2]) / angular_step) + 1),
)
angles_qy = np.linspace(
start=angles_ranges[4],
stop=angles_ranges[5],
num=max(1, np.rint((angles_ranges[5] - angles_ranges[4]) / angular_step) + 1),
)
nb_angles = len(angles_qx) * len(angles_qz) * len(angles_qy)
print("Number of angles to test: ", nb_angles)
####################################################
# loop over rotation angles and lattice parameters #
####################################################
start = time.time()
if unitcell == "bct":
a_values = np.linspace(
start=unitcell_ranges[0],
stop=unitcell_ranges[1],
num=max(
1, np.rint((unitcell_ranges[1] - unitcell_ranges[0]) / unitcell_step) + 1
),
)
c_values = np.linspace(
start=unitcell_ranges[2],
stop=unitcell_ranges[3],
num=max(
1, np.rint((unitcell_ranges[3] - unitcell_ranges[2]) / unitcell_step) + 1
),
)
nb_lattices = len(a_values) * len(c_values)
print("Number of lattice parameters to test: ", nb_lattices)
print("Total number of iterations: ", nb_angles * nb_lattices)
corr = np.zeros(
(len(angles_qx), len(angles_qz), len(angles_qy), len(a_values), len(c_values))
)
for idz, alpha in enumerate(angles_qx):
for idy, beta in enumerate(angles_qz):
for idx, gamma in enumerate(angles_qy):
for idw, a in enumerate(a_values):
for idv, c in enumerate(c_values):
_, _, _, rot_lattice, _ = simu.lattice(
energy=energy,
sdd=sdd,
direct_beam=direct_beam,
detector=detector,
unitcell=unitcell,
unitcell_param=(a, c),
euler_angles=(alpha, beta, gamma),
offset_indices=False,
)
# peaks in the format [[h, l, k], ...]:
# CXI convention downstream , vertical up, outboard
# assign the peak shape to each lattice point
struct_array = simu.assign_peakshape(
array_shape=(nbz, nby, nbx),
lattice_list=rot_lattice,
peak_shape=peak_shape,
pivot=pivot,
)
# calculate the correlation between experimental data
# and simulated data
corr[idz, idy, idx, idw, idv] = np.multiply(
bragg_peaks, struct_array[nonzero_indices]
).sum()
else:
a_values = np.linspace(
start=unitcell_ranges[0],
stop=unitcell_ranges[1],
num=max(
1, np.rint((unitcell_ranges[1] - unitcell_ranges[0]) / unitcell_step) + 1
),
)
nb_lattices = len(a_values)
print("Number of lattice parameters to test: ", nb_lattices)
print("Total number of iterations: ", nb_angles * nb_lattices)
corr = np.zeros((len(angles_qx), len(angles_qz), len(angles_qy), len(a_values)))
for idz, alpha in enumerate(angles_qx):
for idy, beta in enumerate(angles_qz):
for idx, gamma in enumerate(angles_qy):
for idw, a in enumerate(a_values):
_, _, _, rot_lattice, _ = simu.lattice(
energy=energy,
sdd=sdd,
direct_beam=direct_beam,
detector=detector,
unitcell=unitcell,
unitcell_param=a,
euler_angles=(alpha, beta, gamma),
offset_indices=False,
)
# peaks in the format [[h, l, k], ...]:
# CXI convention downstream , vertical up, outboard
# assign the peak shape to each lattice point
struct_array = simu.assign_peakshape(
array_shape=(nbz, nby, nbx),
lattice_list=rot_lattice,
peak_shape=peak_shape,
pivot=pivot,
)
# calculate the correlation between experimental data
# and simulated data
corr[idz, idy, idx, idw] = np.multiply(
bragg_peaks, struct_array[nonzero_indices]
).sum()
end = time.time()
print(
"\nTime ellapsed in the loop over angles and lattice parameters:",
str(datetime.timedelta(seconds=int(end - start))),
)
##########################################
# plot the correlation matrix at maximum #
##########################################
comment = comment + "_" + unitcell
if unitcell == "bct": # corr is 5D
piz, piy, pix, piw, piv = np.unravel_index(abs(corr).argmax(), corr.shape)
alpha, beta, gamma = angles_qx[piz], angles_qz[piy], angles_qy[pix]
best_param = a_values[piw], c_values[piv]
text = (
unitcell
+ " unit cell of parameter(s) = {:.2f} nm, {:.2f}".format(
best_param[0], best_param[1]
)
+ " nm"
)
print(
"Maximum correlation for (angle_qx, angle_qz, angle_qy) = "
"{:.2f}, {:.2f}, {:.2f}".format(alpha, beta, gamma)
)
print("Maximum correlation for a", text)
corr_angles = np.copy(corr[:, :, :, piw, piv])
corr_lattice = np.copy(corr[piz, piy, pix, :, :])
vmin = corr_lattice.min()
vmax = 1.1 * corr_lattice.max()
save_lattice = True
if all(corr_lattice.shape[idx] > 1 for idx in range(corr_lattice.ndim)): # 2D
fig, ax = plt.subplots(nrows=1, ncols=1)
plt0 = ax.contourf(
c_values,
a_values,
corr_lattice,
np.linspace(vmin, vmax, 20, endpoint=False),
cmap=my_cmap,
)
plt.colorbar(plt0, ax=ax)
ax.set_ylabel("a parameter (nm)")
ax.set_xlabel("c parameter (nm)")
ax.set_title("Correlation map for lattice parameters")
else: # 1D or 0D
nonzero_dim = np.nonzero(np.asarray(corr_lattice.shape) != 1)[0]
if len(nonzero_dim) == 0: # 0D
print("The unit cell lattice parameters are not scanned")
save_lattice = False
else: # 1D
corr_lattice = np.squeeze(corr_lattice)
labels = ["a parameter (nm)", "c parameter (nm)"]
fig = plt.figure()
if nonzero_dim[0] == 0:
plt.plot(a_values, corr_lattice, ".-r")
else: # index 1
plt.plot(c_values, corr_lattice, ".-r")
plt.xlabel(labels[nonzero_dim[0]])
plt.ylabel("Correlation")
plt.pause(0.1)
if save_lattice:
plt.savefig(
savedir
+ "correlation_lattice_"
+ comment
+ "_param a={:.2f}nm,c={:.2f}nm".format(best_param[0], best_param[1])
+ ".png"
)
else: # corr is 4D
piz, piy, pix, piw = np.unravel_index(abs(corr).argmax(), corr.shape)
alpha, beta, gamma = angles_qx[piz], angles_qz[piy], angles_qy[pix]
best_param = a_values[piw]
text = (
unitcell
+ " unit cell of parameter = "
+ str("{:.2f}".format(best_param))
+ " nm"
)
print(
"Maximum correlation for (angle_qx, angle_qz, angle_qy) = "
"{:.2f}, {:.2f}, {:.2f}".format(alpha, beta, gamma)
)
print("Maximum correlation for a", text)
corr_angles = np.copy(corr[:, :, :, piw])
corr_lattice = np.copy(corr[piz, piy, pix, :])
fig = plt.figure()
plt.plot(a_values, corr_lattice, ".r")
plt.xlabel("a parameter (nm)")
plt.ylabel("Correlation")
plt.pause(0.1)
plt.savefig(
savedir
+ "correlation_lattice_"
+ comment
+ "_param a={:.2f}nm".format(best_param)
+ ".png"
)
vmin = corr_angles.min()
vmax = 1.1 * corr_angles.max()
save_angles = True
if all(corr_angles.shape[idx] > 1 for idx in range(corr_angles.ndim)): # 3D
fig, _, _ = gu.contour_slices(
corr_angles,
(angles_qx, angles_qz, angles_qy),
sum_frames=False,
title="Correlation map for rotation angles",
slice_position=[piz, piy, pix],
plot_colorbar=True,
levels= | np.linspace(vmin, vmax, 20, endpoint=False) | numpy.linspace |
import unittest
from unittest.mock import Mock, MagicMock, patch, call, mock_open
from lattice_mc import init_lattice
from lattice_mc.lattice_site import Site
from lattice_mc.lattice import Lattice
import numpy as np
#TODO write integration tests for creating lattices
class InitLatticeTestCase( unittest.TestCase ):
"""Test for Specific Lattice initialisation routines"""
@patch( 'lattice_mc.lattice_site.Site' )
@patch( 'lattice_mc.lattice.Lattice' )
def test_square_lattice( self, mock_lattice, mock_site ):
mock_site.side_effect = range(2*3)
mock_lattice.return_value = 'foo'
a = 2
b = 3
spacing = 1.0
lattice = init_lattice.square_lattice( a, b, spacing )
expected_site_calls = [ [ 1, np.array([ 0., 0., 0.]), [2, 2, 5, 3], 0.0, 'L' ],
[ 2, np.array([ 1., 0., 0.]), [1, 1, 6, 4], 0.0, 'L' ],
[ 3, | np.array([ 0., 1., 0.]) | numpy.array |
import numpy as np
class MemsZonalReconstructor(object):
THRESHOLD_RMS = 0.25 # threshold for wf rms to select actuators outside the specified mask
def __init__(self, cmask, ifs_stroke, ifs):
self._cmask = cmask
self._ifs = ifs
self._check_input_mask()
self._ifs[:].mask = cmask
self._ifs_stroke = ifs_stroke
self._num_of_acts = ifs.shape[0]
self._normalize_influence_function()
self._reset()
def _reset(self):
self._acts_in_pupil = None
self._im = None
self._rec = None
def _compute_all(self):
self._build_valid_actuators_list()
self._build_interaction_matrix()
self._build_reconstruction_matrix_via_pinv()
def _get_svd(self):
self.u, self.s, self.vh = np.linalg.svd(
self.interaction_matrix, full_matrices=False)
def _check_input_mask(self):
'''
Checks cmask dimensions
True if cmask is fully inscribed in the ifs mask
'''
assert self.mask.shape == self._ifs[
0].shape, "cmask has not the same dimension of ifs mask!\nGot:{self.mask.shape}\nShould be:{self._ifs[0].shape}"
ifs_mask = self._ifs[0].mask
intersection_mask = np.ma.mask_or(ifs_mask, self.mask)
assert (intersection_mask == self.mask).all(
) == True, "input mask is not valid!\nShould be fully inscribed in the ifs mask!"
def _normalize_influence_function(self):
# normalizzare al pushpull o al max registarto nel pixel
self._normalized_ifs = self._ifs[:] / self._ifs_stroke
def _build_interaction_matrix(self):
self._im = np.column_stack([self._normalized_ifs[act][self.mask == False]
for act in self.selected_actuators])
@property
def interaction_matrix(self):
if self._im is None:
self._build_interaction_matrix()
return self._im
def _build_reconstruction_matrix_via_pinv(self):
self._rec = | np.linalg.pinv(self.interaction_matrix) | numpy.linalg.pinv |
# <NAME>, Imaging Biomarkers and Computer-Aided Diagnosis Laboratory,
# National Institutes of Health Clinical Center, July 2019
"""Procedure in the demo mode"""
import os
import numpy as np
from time import time
import torch
import nibabel as nib
from tqdm import tqdm
import cv2
from openpyxl import load_workbook
import matplotlib.pyplot as plt
import pandas as pd
from maskrcnn.config import cfg
from maskrcnn.data.datasets.load_ct_img import load_prep_img
from maskrcnn.structures.image_list import to_image_list
from maskrcnn.data.datasets.evaluation.DeepLesion.post_process import post_process_results
from maskrcnn.data.datasets.load_ct_img import windowing, windowing_rev
from maskrcnn.utils.draw import draw_results
def exec_model(model):
"""test model on user-provided data, instead of the preset DeepLesion dataset"""
import_tag_data()
model.eval()
device = torch.device(cfg.MODEL.DEVICE)
#while True:
#info = "Please input the path of a nifti CT volume >> "
#while True:
#path = input(info)
# ------- Zhoubing 100 datasets -------
# for num in range(12):
# if num + 1 < 10:
# img_num = 'img000' + str(num + 1)
# #data_dir = '/nfs/masi/leeh43/MULAN_universal_lesion_analysis/results'
# #img_dir = '_nfs_masi_leeh43_zhoubing100_img_' + img_num + '.nii.gz/'
# #result = os.path.join(data_dir, img_dir + 'results.txt' )
# main_dir = '/nfs/masi/leeh43/zhoubing100/img/'
# img_dir = os.path.join(main_dir, img_num + '.nii.gz')
#
# if num + 1 >= 10 and num + 1 < 100:
# img_num = 'img00' + str(num + 1)
# main_dir = '/nfs/masi/leeh43/zhoubing100/img/'
# img_dir = os.path.join(main_dir, img_num + '.nii.gz')
#
# if num + 1 == 100:
# img_num = 'img0' + str(num + 1)
# main_dir = '/nfs/masi/leeh43/zhoubing100/img/'
# img_dir = os.path.join(main_dir, img_num + '.nii.gz')
# if not os.path.exists(img_dir):
# print('file does not exist!')
# continue
# #try:
# ------- ImageVU B Datasets -------
data_dir = os.path.join('/nfs/masi/tangy5/ImageVU_B_bpr_pipeline/INPUTS/cropped/images')
count = 0
for item in os.listdir(data_dir):
img_dir = os.path.join(data_dir, item)
print('reading image ...')
nifti_data = nib.load(img_dir)
count = count + 1
print('Number of Datasets: %d' % count)
print('Load Image: %s' % img_dir)
#break
#except:
#print('load nifti file error!')
while True:
win_sel = '1' #input('Window to show, 1:soft tissue, 2:lung, 3: bone >> ')
if win_sel not in ['1', '2', '3']:
continue
win_show = [[-175, 275], [-1500, 500], [-500, 1300]]
win_show = win_show[int(win_sel)-1]
break
vol, spacing, slice_intv = load_preprocess_nifti(nifti_data)
slice_num_per_run = max(1, int(float(cfg.TEST.TEST_SLICE_INTV_MM)/slice_intv+.5))
num_total_slice = vol.shape[2]
total_time = 0
imageVU_dir = 'ImageVU_B_result'
output_dir = os.path.join(cfg.RESULTS_DIR,imageVU_dir,img_dir.replace(os.sep, '_'))
if not os.path.exists(output_dir):
os.mkdir(output_dir)
slices_to_process = range(int(slice_num_per_run/2), num_total_slice, slice_num_per_run)
msgs_all = []
print('predicting ...')
for slice_idx in tqdm(slices_to_process):
log_file_s = os.path.join(output_dir, 'slice_' + str(slice_idx) + '_resize_shape.csv')
log_file_c = os.path.join(output_dir, 'slice_' + str(slice_idx) + '_contour_location.csv')
log_file_r = os.path.join(output_dir, 'slice_' + str(slice_idx) + '_recist_location.csv')
log_file_mask = os.path.join(output_dir, 'slice_' + str(slice_idx) + '_mask_c.csv')
mask_list = []
ims, im_np, im_scale, crop, mask_list = get_ims(slice_idx, vol, spacing, slice_intv, mask_list)
im_list = to_image_list(ims, cfg.DATALOADER.SIZE_DIVISIBILITY).to(device)
start_time = time()
with torch.no_grad():
result = model(im_list)
result = [o.to("cpu") for o in result]
df_resize = pd.DataFrame()
df_contours = pd.DataFrame()
df_recists = pd.DataFrame()
df_mask = pd.DataFrame()
shape_0, shape_1 = [], []
cour_list1, cour_list2 = [], []
recist_list1, recist_list2 = [], []
info = {'spacing': spacing, 'im_scale': im_scale}
post_process_results(result[0], info)
total_time += time() - start_time
output_fn = os.path.join(output_dir, '%d.png'%(slice_idx+1))
real_slice_num = slice_idx + 1
#contour_list.append('Slice_'+str(real_slice_num))
#recist_list.append(('Slice_'+str(real_slice_num)))
shape_0.append(im_np.shape[0])
shape_1.append(im_np.shape[1])
overlay, msgs = gen_output(im_np, result[0], info, win_show, cour_list1, cour_list2, recist_list1, recist_list2)
df_resize['Shape_0'] = shape_0
df_resize['Shape_1'] = shape_1
df_contours['list1'] = cour_list1
df_contours['list2'] = cour_list2
df_recists['list1'] = recist_list1
df_mask['c'] = mask_list
df_resize.to_csv(log_file_s, index=False)
df_contours.to_csv(log_file_c, index = False)
df_recists.to_csv(log_file_r, index = False)
df_mask.to_csv(log_file_mask, index = False)
cv2.imwrite(output_fn, overlay)
msgs_all.append('slice %d\r\n' % (slice_idx+1))
for msg in msgs:
msgs_all.append(msg+'\r\n')
msgs_all.append('\r\n')
#np.savetxt(log_file_c, cour_list1, cour_list2, delimiter=',', fmt='%s')
with open(os.path.join(output_dir, 'results.txt'), 'w') as f:
f.writelines(msgs_all)
print('result images and text saved to', output_dir)
print('processing time: %d ms per slice' % int(1000.*total_time/len(slices_to_process)))
def import_tag_data():
cellname = lambda row, col: '%s%d' % (chr(ord('A') + col - 1), row)
fn = os.path.join(cfg.PROGDAT_DIR, '%s_%s.xlsx' % ('test_handlabeled', cfg.EXP_NAME))
wb = load_workbook(fn)
sheet = wb.get_active_sheet()
tags = []
thresolds = []
for p in range(2, sheet.max_row):
tags.append(sheet[cellname(p, 1)].value)
thresolds.append(float(sheet[cellname(p, 8)].value))
assert tags == cfg.runtime_info.tag_list
cfg.runtime_info.tag_sel_val = torch.tensor(thresolds).to(torch.float)
def load_preprocess_nifti(data):
vol = (data.get_data().astype('int32') + 32768).astype('uint16') # to be consistent with png files
# spacing = -data.get_affine()[0,1]
# slice_intv = -data.get_affine()[2,2]
aff = data.get_affine()[:3, :3]
spacing = np.abs(aff[:2, :2]).max()
slice_intv = np.abs(aff[2, 2])
# TODO: Ad-hoc code for normalizing the orientation of the volume.
# The aim is to make vol[:,:,i] an supine right-left slice
# It works for the authors' data, but maybe not suitable for some kinds of nifti files
if | np.abs(aff[0, 0]) | numpy.abs |
import numpy as np
from PTSS import PtssJoint as ptssjnt
from PTSS import Ptss as ptss
from CFNS import CyFns as cfns
# define the 4th order Runge-Kutta algorithm.
class RK4:
def rk4(self, y0, dy, step):
k1 = step * dy
k2 = step * (dy + 1 / 2 * k1)
k3 = step * (dy + 1 / 2 * k2)
k4 = step * (dy + k3)
y1 = y0 + 1 / 6 * (k1 + 2 * k2 + 2 * k3 + k4)
return y1
# list the functions used in the modules
class FNS:
def __init__(self):
self.cfns = cfns()
# ____________________________________________________________________________________________________________
# Common Functions
# threshold at some value
def thresh_fn(self, x, thresh):
return np.sign(x - thresh) * (x - thresh) * self.indic_fn(x - thresh)
# bound within some interval
def bound_fn(self, x, thresh):
rightbd = np.heaviside(thresh - x, 0)
out = x * rightbd + thresh * (rightbd + 1) % 2
leftbd = np.heaviside(out - -thresh, 0)
out = out * leftbd + -thresh * ((leftbd + 1) % 2)
return out
# cutoff outside some interval
def cutoff_fn(self, x, thresh):
rightbd = np.heaviside(x - thresh, 0)
rightout = x * rightbd
leftbd = np.heaviside(-x - thresh, 0)
leftout = x * leftbd
return rightout + leftout
# check at some value
def delta_fn(self, x, a):
if np.all(x == a) == True:
return 1
else:
return 0
# check within some interval
def cond_fn(self, x, a):
out = np.heaviside(a - x, 0) * np.heaviside(x - -a, 0)
return out
# check at some index
def index_fn(self, j, i, b, a):
return 1 - self.delta_fn(j, b) * self.delta_fn(i, a)
# check sign at zero
def indic_fn(self, x):
return np.heaviside(x, 0)
# binary sampling function
def sample_fn(self, x, thresh):
return 1 * self.indic_fn(x - thresh)
# sigmoid sampling function
def sigmoid_fn(self, x, offset, power):
return x**power / (offset**power + x**power)
# enlarge array size
def enlarge(self, x, y, stride, num, type):
# given x is large and y is small and type is number of relevant dimensions
if type == '3':
for a in range(0, num, stride):
for b in range(0, num, stride):
for c in range(0, num, stride):
new_a = a // stride
new_b = b // stride
new_c = c // stride
x[a][b][c] = y[new_a][new_b][new_c]
return x
if type == '2':
for a in range(0, num, stride):
for b in range(0, num, stride):
new_a = a // stride
new_b = b // stride
x[a][b] = y[new_a][new_b]
return x
# shrink array size
def shrink(self, x, y, stride, num, type):
# given x is large and y is small and type is number of relevant dimensions
new_num = num // stride
if type == '3':
for a in range(0, new_num):
for b in range(0, new_num):
for c in range(0, new_num):
new_a = self.index_bound(a * stride, num)
new_b = self.index_bound(b * stride, num)
new_c = self.index_bound(c * stride, num)
y[a][b][c] = x[new_a][new_b][new_c]
return y
if type == '2':
for a in range(0, new_num):
for b in range(0, new_num):
new_a = self.index_bound(a * stride, num)
new_b = self.index_bound(b * stride, num)
y[a][b] = x[new_a][new_b]
return y
# bound index
def index_bound(self, x, size):
if x < size:
return x
else:
return size - 1
# ____________________________________________________________________________________________________________
# EYES Module
# bound array index
def retmap_bound(self, x, size):
if x < 0:
return 0
if x > size - 1:
return size - 1
else:
return x
# check if maximal value of array is not at center
def fixate(self, gaz_map, size):
fix_map = np.ones((2, 2 * size, 2 * size))
for s in range(2):
fix_map[s][size, size] = 1
if np.array_equal(fix_map, gaz_map) == True:
return 0
else:
return 1
# compute difference btw agonist and antagonist for learning variables
def diff_mat(self, x, size):
mat = np.zeros((2, 2, 2, size, size))
for s in range(2):
for m in range(2):
mat[s][m] = x[s][m][0] - x[s][m][1], x[s][m][1] - x[s][m][0]
return mat
# check epoch within an interval in the forward direction
def forwd_period(self, t, T, interval):
if (t // interval) * interval + 0 <= t and t < (t // interval) * interval + T:
return 1
else:
return 0
# check epoch within an interval in the backward direction
def backw_period(self, t, T, interval):
if (t // interval + 1) * interval - T <= t and t < (t // interval + 1) * interval + 0:
return 1
else:
return 0
# list epochs within some interval
def intv_period(self, t, interval):
lb = (t // interval) * interval
ub = (t // interval + 1) * interval
return np.arange(lb, ub, 1)
# list epoch-value pairs within some interval
def add_error(self, z, t, interval):
range = self.intv_period(t, interval)
value = [z] * interval
add = [(x, y) for x, y in zip(range, value)]
return add
# check if equal to the zero array
def test_zero(self, x):
if np.array_equal(x, np.zeros(x.shape)):
return 1
else:
return 0
# extract index of maximal value for an array centered at zero
def argmax(self, x, size):
if self.test_zero(x) != 1:
out = np.array(np.unravel_index(np.argmax(x), x.shape)) - size # format is (height, width)
return out
else:
return np.zeros(2)
# populate in a neighborhood around the given index
def arrmax(self, max, size):
ptts = ptss(2 * size, 2 * size)
out = np.zeros((2, 2 * size, 2 * size))
for s in range(2):
b_max, a_max = np.array(max[s], dtype=int)
bound = ptts.ptss_bound(b_max, a_max, 2 * size, 2 * size, '2')
for b in bound[0]:
for a in bound[1]:
out[s][b][a] = ptts.ptss_gradient(b, a, b_max, a_max, '2')
return out
# compute sum btw agonist and antagonist
def sum_mus(self, x):
mus = np.zeros((2))
for s in range(2):
mus[s] = np.sum(x[s])
return mus
# extract agonist
def extract_ang(self, x):
out = np.zeros(3)
for k in range(3):
out[k] = x[k][0]
return out
# convert normalized activity into angle for eye variables
def conv_targ(self, x):
# for eye movement and representation
ang_rang = 1.0 * | np.radians([-45, 45]) | numpy.radians |
#-*- coding:utf-8 -*-
"""
@author: scorpio.lu
@datetime:2020-06-11 15:22
@software: PyCharm
@contact: <EMAIL>
----------
路有敬亭山
----------
"""
import os
import argparse
import sys
from datetime import datetime
import numpy as np
import math
import cv2
from scipy.spatial.distance import cdist
import oneflow as flow
from reid_model import resreid, HS_reid
from data_loader import Market1501
parser = argparse.ArgumentParser(description="flags for person re-identification")
parser.add_argument("--gpu_num_per_node", type=int, default=1, required=False)
parser.add_argument("--model", type=str, default="resreid", required=False, help="resreid or pcbreid")
parser.add_argument("--batch_size", type=int, default=300, required=False)
parser.add_argument("--data_dir", type=str, default='/home/oneflow_reid/person_reid/dataset', required=False, help="dataset directory")
parser.add_argument("-image_height", "--image_height", type=int, default=256, required=False)
parser.add_argument("-image_width", "--image_width", type=int, default=128, required=False)
parser.add_argument("--use_tensorrt", dest="use_tensorrt", action="store_true", default=False, required=False, help="inference with tensorrt")
parser.add_argument("--model_load_dir", type=str, default='/home/oneflow_reid/person_reid/model', required=False, help="model load directory")
parser.add_argument("--log_dir", type=str, default="./output", required=False, help="log info save directory")
args = parser.parse_args()
model={'resreid': resreid, 'HS-reid': HS_reid}
func_config = flow.FunctionConfig()
func_config.default_data_type(flow.float)
flow.config.gpu_device_num(args.gpu_num_per_node)
if args.use_tensorrt:
func_config.use_tensorrt(True)
input_blob = flow.FixedTensorDef((args.batch_size, 3, args.image_height, args.image_width), dtype=flow.float)
#input_blob = flow.MirroredTensorDef((args.batch_size, 3, args.image_height, args.image_width), dtype=flow.float)
def resize_image(img, origin_h, origin_w, image_height, image_width):
w = image_width
h = image_height
resized=np.zeros((3, image_height, image_width), dtype=np.float32)
part=np.zeros((3, origin_h, image_width), dtype = np.float32)
w_scale = (float)(origin_w - 1) / (w - 1)
h_scale = (float)(origin_h - 1) / (h - 1)
for c in range(w):
if c == w-1 or origin_w == 1:
val = img[:, :, origin_w-1]
else:
sx = c * w_scale
ix = int(sx)
dx = sx - ix
val = (1 - dx) * img[:, :, ix] + dx * img[:, :, ix+1]
part[:, :, c] = val
for r in range(h):
sy = r * h_scale
iy = int(sy)
dy = sy - iy
val = (1-dy)*part[:, iy, :]
resized[:, r, :] = val
if r==h-1 or origin_h==1:
continue
resized[:, r, :] = resized[:, r, :] + dy * part[:, iy+1, :]
return resized
def batch_image_preprocess(img_paths, img_height, img_weidth):
result_list = []
base = np.ones([args.image_height, args.image_width])
norm_mean = [base * 0.485, base * 0.456, base * 0.406] # imagenet mean
norm_std = [0.229, 0.224, 0.225] # imagenet std
for img_path in img_paths:
img = cv2.imread(img_path, cv2.IMREAD_COLOR)
img = img.transpose(2, 0, 1).astype(np.float32) # hwc->chw
img = img / 255 # /255 # to tensor
img[[0, 1, 2], :, :] = img[[2, 1, 0], :, :] # bgr2rgb
w = img_weidth
h = img_height
origin_h = img.shape[1]
origin_w = img.shape[2]
resize_img = resize_image(img, origin_h, origin_w, h, w)
# normalize
resize_img[0] = (resize_img[0] - norm_mean[0])/ norm_std[0]
resize_img[1] = (resize_img[1] - norm_mean[1]) / norm_std[1]
resize_img[2] = (resize_img[2] - norm_mean[2]) / norm_std[2]
result_list.append(resize_img)
results = np.asarray(result_list).astype(np.float32)
return results
def evaluate(qf, q_pids, q_camids, gf, g_pids, g_camids, max_rank=50):
num_g = len(gf)
num_q = len(qf)
print('Computing distance matrix ...')
dist = cdist(qf, gf).astype(np.float16)
dist = np.power(dist, 2).astype(np.float16)
print('Computing CMC and mAP ...')
if num_g < max_rank:
max_rank = num_g
print('Note: number of gallery samples is quite small, got {}'.format(num_g))
indices = np.argsort(dist, axis=1)
matches = (g_pids[indices] == q_pids[:, np.newaxis]).astype(np.int32)
all_cmc = []
all_AP = []
num_valid_q = 0.
for q_idx in range(num_q):
q_pid = q_pids[q_idx]
q_camid = q_camids[q_idx]
order = indices[q_idx]
remove = (g_pids[order] == q_pid) & (g_camids[order] == q_camid)
keep = np.invert(remove)
raw_cmc = matches[q_idx][keep]
if not np.any(raw_cmc):
continue
cmc = raw_cmc.cumsum()
cmc[cmc > 1] = 1
all_cmc.append(cmc[:max_rank])
num_valid_q += 1.
num_rel = raw_cmc.sum()
tmp_cmc = raw_cmc.cumsum()
tmp_cmc = [x / (i + 1.) for i, x in enumerate(tmp_cmc)]
tmp_cmc = np.asarray(tmp_cmc) * raw_cmc
AP = tmp_cmc.sum() / num_rel
all_AP.append(AP)
assert num_valid_q > 0, 'Error: all query identities do not appear in gallery'
all_cmc = np.asarray(all_cmc).astype(np.float32)
all_cmc = all_cmc.sum(0) / num_valid_q
mAP = np.mean(all_AP)
return all_cmc, mAP
@flow.function(func_config)
def reid_eval_job(image=input_blob):
features = resreid(image, trainable=False)
return features
class ReIDInference(object):
def __init__(self):
check_point = flow.train.CheckPoint()
if args.model_load_dir:
assert os.path.isdir(args.model_load_dir)
print("Restoring model from {}.".format(args.model_load_dir))
check_point.load(args.model_load_dir)
else:
print("Init model on demand.")
check_point.init()
snapshot_save_path = os.path.join(args.model_save_dir, "last_snapshot")
if not os.path.exists(snapshot_save_path):
os.makedirs(snapshot_save_path)
print("Saving model to {}.".format(snapshot_save_path))
check_point.save(snapshot_save_path)
def inference(self, imgs):
query_images = batch_image_preprocess(imgs, args.image_height, args.image_width)
batch_times = math.ceil(len(imgs)/args.batch_size)
features = []
for i in range(batch_times):
start = max(0, i*args.batch_size)
end = min((i+1)*args.batch_size, len(query_images))
array = query_images[start:end]
feature = reid_eval_job([array]).get()
features.extend(feature.ndarray_list_[0])
return features
def main():
print("=".ljust(66, "="))
print("Running {}: num_gpu = {}.".format(args.model, args.gpu_num_per_node))
print("=".ljust(66, "="))
for arg in vars(args):
print("{} = {}".format(arg, getattr(args, arg)))
print("-".ljust(66, "-"))
print("Time stamp: {}".format(str(datetime.now().strftime("%Y-%m-%d-%H:%M:%S"))))
flow.env.grpc_use_no_signal()
flow.env.log_dir(args.log_dir)
obj = ReIDInference()
print("Loading data from {}".format(args.data_dir))
dataset = Market1501(root=args.data_dir)
query_img, query_id, query_cam_id = zip(*dataset.query)
gallery_img, gallery_id, gallery_cam_id = zip(*dataset.gallery)
print('extracting query features...')
query_features = obj.inference(query_img)
print('extracting query features done...')
print('extracting gallery features...')
gallery_features = obj.inference(gallery_img)
print('extracting gallery features done...')
cmc, mAP = evaluate(query_features, np.array(query_id), | np.array(query_cam_id) | numpy.array |
from __future__ import division, absolute_import, print_function
import warnings
import numpy as np
from numpy.testing import (
run_module_suite, TestCase, assert_, assert_equal, assert_almost_equal,
assert_no_warnings, assert_raises, assert_array_equal, suppress_warnings
)
# Test data
_ndat = np.array([[0.6244, np.nan, 0.2692, 0.0116, np.nan, 0.1170],
[0.5351, -0.9403, np.nan, 0.2100, 0.4759, 0.2833],
[np.nan, np.nan, np.nan, 0.1042, np.nan, -0.5954],
[0.1610, np.nan, np.nan, 0.1859, 0.3146, np.nan]])
# Rows of _ndat with nans removed
_rdat = [np.array([0.6244, 0.2692, 0.0116, 0.1170]),
np.array([0.5351, -0.9403, 0.2100, 0.4759, 0.2833]),
np.array([0.1042, -0.5954]),
np.array([0.1610, 0.1859, 0.3146])]
# Rows of _ndat with nans converted to ones
_ndat_ones = np.array([[0.6244, 1.0, 0.2692, 0.0116, 1.0, 0.1170],
[0.5351, -0.9403, 1.0, 0.2100, 0.4759, 0.2833],
[1.0, 1.0, 1.0, 0.1042, 1.0, -0.5954],
[0.1610, 1.0, 1.0, 0.1859, 0.3146, 1.0]])
# Rows of _ndat with nans converted to zeros
_ndat_zeros = np.array([[0.6244, 0.0, 0.2692, 0.0116, 0.0, 0.1170],
[0.5351, -0.9403, 0.0, 0.2100, 0.4759, 0.2833],
[0.0, 0.0, 0.0, 0.1042, 0.0, -0.5954],
[0.1610, 0.0, 0.0, 0.1859, 0.3146, 0.0]])
class TestNanFunctions_MinMax(TestCase):
nanfuncs = [np.nanmin, np.nanmax]
stdfuncs = [np.min, np.max]
def test_mutation(self):
# Check that passed array is not modified.
ndat = _ndat.copy()
for f in self.nanfuncs:
f(ndat)
assert_equal(ndat, _ndat)
def test_keepdims(self):
mat = np.eye(3)
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
for axis in [None, 0, 1]:
tgt = rf(mat, axis=axis, keepdims=True)
res = nf(mat, axis=axis, keepdims=True)
assert_(res.ndim == tgt.ndim)
def test_out(self):
mat = np.eye(3)
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
resout = np.zeros(3)
tgt = rf(mat, axis=1)
res = nf(mat, axis=1, out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
def test_dtype_from_input(self):
codes = 'efdgFDG'
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
for c in codes:
mat = np.eye(3, dtype=c)
tgt = rf(mat, axis=1).dtype.type
res = nf(mat, axis=1).dtype.type
assert_(res is tgt)
# scalar case
tgt = rf(mat, axis=None).dtype.type
res = nf(mat, axis=None).dtype.type
assert_(res is tgt)
def test_result_values(self):
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
tgt = [rf(d) for d in _rdat]
res = nf(_ndat, axis=1)
assert_almost_equal(res, tgt)
def test_allnans(self):
mat = np.array([np.nan]*9).reshape(3, 3)
for f in self.nanfuncs:
for axis in [None, 0, 1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(f(mat, axis=axis)).all())
assert_(len(w) == 1, 'no warning raised')
assert_(issubclass(w[0].category, RuntimeWarning))
# Check scalars
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(f(np.nan)))
assert_(len(w) == 1, 'no warning raised')
assert_(issubclass(w[0].category, RuntimeWarning))
def test_masked(self):
mat = np.ma.fix_invalid(_ndat)
msk = mat._mask.copy()
for f in [np.nanmin]:
res = f(mat, axis=1)
tgt = f(_ndat, axis=1)
assert_equal(res, tgt)
assert_equal(mat._mask, msk)
assert_(not np.isinf(mat).any())
def test_scalar(self):
for f in self.nanfuncs:
assert_(f(0.) == 0.)
def test_matrices(self):
# Check that it works and that type and
# shape are preserved
mat = np.matrix(np.eye(3))
for f in self.nanfuncs:
res = f(mat, axis=0)
assert_(isinstance(res, np.matrix))
assert_(res.shape == (1, 3))
res = f(mat, axis=1)
assert_(isinstance(res, np.matrix))
assert_(res.shape == (3, 1))
res = f(mat)
assert_(np.isscalar(res))
# check that rows of nan are dealt with for subclasses (#4628)
mat[1] = np.nan
for f in self.nanfuncs:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = f(mat, axis=0)
assert_(isinstance(res, np.matrix))
assert_(not np.any(np.isnan(res)))
assert_(len(w) == 0)
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = f(mat, axis=1)
assert_(isinstance(res, np.matrix))
assert_(np.isnan(res[1, 0]) and not np.isnan(res[0, 0])
and not np.isnan(res[2, 0]))
assert_(len(w) == 1, 'no warning raised')
assert_(issubclass(w[0].category, RuntimeWarning))
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = f(mat)
assert_(np.isscalar(res))
assert_(res != np.nan)
assert_(len(w) == 0)
class TestNanFunctions_ArgminArgmax(TestCase):
nanfuncs = [np.nanargmin, np.nanargmax]
def test_mutation(self):
# Check that passed array is not modified.
ndat = _ndat.copy()
for f in self.nanfuncs:
f(ndat)
assert_equal(ndat, _ndat)
def test_result_values(self):
for f, fcmp in zip(self.nanfuncs, [np.greater, np.less]):
for row in _ndat:
with suppress_warnings() as sup:
sup.filter(RuntimeWarning, "invalid value encountered in")
ind = f(row)
val = row[ind]
# comparing with NaN is tricky as the result
# is always false except for NaN != NaN
assert_(not np.isnan(val))
assert_(not fcmp(val, row).any())
assert_(not np.equal(val, row[:ind]).any())
def test_allnans(self):
mat = np.array([np.nan]*9).reshape(3, 3)
for f in self.nanfuncs:
for axis in [None, 0, 1]:
assert_raises(ValueError, f, mat, axis=axis)
assert_raises(ValueError, f, np.nan)
def test_empty(self):
mat = np.zeros((0, 3))
for f in self.nanfuncs:
for axis in [0, None]:
assert_raises(ValueError, f, mat, axis=axis)
for axis in [1]:
res = f(mat, axis=axis)
assert_equal(res, np.zeros(0))
def test_scalar(self):
for f in self.nanfuncs:
assert_(f(0.) == 0.)
def test_matrices(self):
# Check that it works and that type and
# shape are preserved
mat = np.matrix(np.eye(3))
for f in self.nanfuncs:
res = f(mat, axis=0)
assert_(isinstance(res, np.matrix))
assert_(res.shape == (1, 3))
res = f(mat, axis=1)
assert_(isinstance(res, np.matrix))
assert_(res.shape == (3, 1))
res = f(mat)
assert_(np.isscalar(res))
class TestNanFunctions_IntTypes(TestCase):
int_types = (np.int8, np.int16, np.int32, np.int64, np.uint8,
np.uint16, np.uint32, np.uint64)
mat = np.array([127, 39, 93, 87, 46])
def integer_arrays(self):
for dtype in self.int_types:
yield self.mat.astype(dtype)
def test_nanmin(self):
tgt = np.min(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanmin(mat), tgt)
def test_nanmax(self):
tgt = np.max(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanmax(mat), tgt)
def test_nanargmin(self):
tgt = np.argmin(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanargmin(mat), tgt)
def test_nanargmax(self):
tgt = np.argmax(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanargmax(mat), tgt)
def test_nansum(self):
tgt = np.sum(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nansum(mat), tgt)
def test_nanprod(self):
tgt = np.prod(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanprod(mat), tgt)
def test_nancumsum(self):
tgt = np.cumsum(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nancumsum(mat), tgt)
def test_nancumprod(self):
tgt = np.cumprod(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nancumprod(mat), tgt)
def test_nanmean(self):
tgt = np.mean(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanmean(mat), tgt)
def test_nanvar(self):
tgt = np.var(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanvar(mat), tgt)
tgt = np.var(mat, ddof=1)
for mat in self.integer_arrays():
assert_equal(np.nanvar(mat, ddof=1), tgt)
def test_nanstd(self):
tgt = np.std(self.mat)
for mat in self.integer_arrays():
assert_equal(np.nanstd(mat), tgt)
tgt = np.std(self.mat, ddof=1)
for mat in self.integer_arrays():
assert_equal(np.nanstd(mat, ddof=1), tgt)
class SharedNanFunctionsTestsMixin(object):
def test_mutation(self):
# Check that passed array is not modified.
ndat = _ndat.copy()
for f in self.nanfuncs:
f(ndat)
assert_equal(ndat, _ndat)
def test_keepdims(self):
mat = np.eye(3)
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
for axis in [None, 0, 1]:
tgt = rf(mat, axis=axis, keepdims=True)
res = nf(mat, axis=axis, keepdims=True)
assert_(res.ndim == tgt.ndim)
def test_out(self):
mat = np.eye(3)
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
resout = np.zeros(3)
tgt = rf(mat, axis=1)
res = nf(mat, axis=1, out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
def test_dtype_from_dtype(self):
mat = np.eye(3)
codes = 'efdgFDG'
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
for c in codes:
with suppress_warnings() as sup:
if nf in {np.nanstd, np.nanvar} and c in 'FDG':
# Giving the warning is a small bug, see gh-8000
sup.filter(np.ComplexWarning)
tgt = rf(mat, dtype=np.dtype(c), axis=1).dtype.type
res = nf(mat, dtype=np.dtype(c), axis=1).dtype.type
assert_(res is tgt)
# scalar case
tgt = rf(mat, dtype=np.dtype(c), axis=None).dtype.type
res = nf(mat, dtype=np.dtype(c), axis=None).dtype.type
assert_(res is tgt)
def test_dtype_from_char(self):
mat = np.eye(3)
codes = 'efdgFDG'
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
for c in codes:
with suppress_warnings() as sup:
if nf in {np.nanstd, np.nanvar} and c in 'FDG':
# Giving the warning is a small bug, see gh-8000
sup.filter(np.ComplexWarning)
tgt = rf(mat, dtype=c, axis=1).dtype.type
res = nf(mat, dtype=c, axis=1).dtype.type
assert_(res is tgt)
# scalar case
tgt = rf(mat, dtype=c, axis=None).dtype.type
res = nf(mat, dtype=c, axis=None).dtype.type
assert_(res is tgt)
def test_dtype_from_input(self):
codes = 'efdgFDG'
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
for c in codes:
mat = np.eye(3, dtype=c)
tgt = rf(mat, axis=1).dtype.type
res = nf(mat, axis=1).dtype.type
assert_(res is tgt, "res %s, tgt %s" % (res, tgt))
# scalar case
tgt = rf(mat, axis=None).dtype.type
res = nf(mat, axis=None).dtype.type
assert_(res is tgt)
def test_result_values(self):
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
tgt = [rf(d) for d in _rdat]
res = nf(_ndat, axis=1)
assert_almost_equal(res, tgt)
def test_scalar(self):
for f in self.nanfuncs:
assert_(f(0.) == 0.)
def test_matrices(self):
# Check that it works and that type and
# shape are preserved
mat = np.matrix(np.eye(3))
for f in self.nanfuncs:
res = f(mat, axis=0)
assert_(isinstance(res, np.matrix))
assert_(res.shape == (1, 3))
res = f(mat, axis=1)
assert_(isinstance(res, np.matrix))
assert_(res.shape == (3, 1))
res = f(mat)
assert_(np.isscalar(res))
class TestNanFunctions_SumProd(TestCase, SharedNanFunctionsTestsMixin):
nanfuncs = [np.nansum, np.nanprod]
stdfuncs = [np.sum, np.prod]
def test_allnans(self):
# Check for FutureWarning
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
res = np.nansum([np.nan]*3, axis=None)
assert_(res == 0, 'result is not 0')
assert_(len(w) == 0, 'warning raised')
# Check scalar
res = np.nansum(np.nan)
assert_(res == 0, 'result is not 0')
assert_(len(w) == 0, 'warning raised')
# Check there is no warning for not all-nan
np.nansum([0]*3, axis=None)
assert_(len(w) == 0, 'unwanted warning raised')
def test_empty(self):
for f, tgt_value in zip([np.nansum, np.nanprod], [0, 1]):
mat = np.zeros((0, 3))
tgt = [tgt_value]*3
res = f(mat, axis=0)
assert_equal(res, tgt)
tgt = []
res = f(mat, axis=1)
assert_equal(res, tgt)
tgt = tgt_value
res = f(mat, axis=None)
assert_equal(res, tgt)
class TestNanFunctions_CumSumProd(TestCase, SharedNanFunctionsTestsMixin):
nanfuncs = [np.nancumsum, np.nancumprod]
stdfuncs = [np.cumsum, np.cumprod]
def test_allnans(self):
for f, tgt_value in zip(self.nanfuncs, [0, 1]):
# Unlike other nan-functions, sum/prod/cumsum/cumprod don't warn on all nan input
with assert_no_warnings():
res = f([np.nan]*3, axis=None)
tgt = tgt_value*np.ones((3))
assert_(np.array_equal(res, tgt), 'result is not %s * np.ones((3))' % (tgt_value))
# Check scalar
res = f(np.nan)
tgt = tgt_value*np.ones((1))
assert_(np.array_equal(res, tgt), 'result is not %s * np.ones((1))' % (tgt_value))
# Check there is no warning for not all-nan
f([0]*3, axis=None)
def test_empty(self):
for f, tgt_value in zip(self.nanfuncs, [0, 1]):
mat = np.zeros((0, 3))
tgt = tgt_value*np.ones((0, 3))
res = f(mat, axis=0)
assert_equal(res, tgt)
tgt = mat
res = f(mat, axis=1)
assert_equal(res, tgt)
tgt = np.zeros((0))
res = f(mat, axis=None)
assert_equal(res, tgt)
def test_keepdims(self):
for f, g in zip(self.nanfuncs, self.stdfuncs):
mat = np.eye(3)
for axis in [None, 0, 1]:
tgt = f(mat, axis=axis, out=None)
res = g(mat, axis=axis, out=None)
assert_(res.ndim == tgt.ndim)
for f in self.nanfuncs:
d = np.ones((3, 5, 7, 11))
# Randomly set some elements to NaN:
rs = np.random.RandomState(0)
d[rs.rand(*d.shape) < 0.5] = np.nan
res = f(d, axis=None)
assert_equal(res.shape, (1155,))
for axis in np.arange(4):
res = f(d, axis=axis)
assert_equal(res.shape, (3, 5, 7, 11))
def test_matrices(self):
# Check that it works and that type and
# shape are preserved
mat = np.matrix(np.eye(3))
for f in self.nanfuncs:
for axis in np.arange(2):
res = f(mat, axis=axis)
assert_(isinstance(res, np.matrix))
assert_(res.shape == (3, 3))
res = f(mat)
assert_(res.shape == (1, 3*3))
def test_result_values(self):
for axis in (-2, -1, 0, 1, None):
tgt = np.cumprod(_ndat_ones, axis=axis)
res = np.nancumprod(_ndat, axis=axis)
assert_almost_equal(res, tgt)
tgt = np.cumsum(_ndat_zeros,axis=axis)
res = np.nancumsum(_ndat, axis=axis)
assert_almost_equal(res, tgt)
def test_out(self):
mat = np.eye(3)
for nf, rf in zip(self.nanfuncs, self.stdfuncs):
resout = np.eye(3)
for axis in (-2, -1, 0, 1):
tgt = rf(mat, axis=axis)
res = nf(mat, axis=axis, out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
class TestNanFunctions_MeanVarStd(TestCase, SharedNanFunctionsTestsMixin):
nanfuncs = [np.nanmean, np.nanvar, np.nanstd]
stdfuncs = [np.mean, np.var, np.std]
def test_dtype_error(self):
for f in self.nanfuncs:
for dtype in [np.bool_, np.int_, np.object_]:
assert_raises(TypeError, f, _ndat, axis=1, dtype=dtype)
def test_out_dtype_error(self):
for f in self.nanfuncs:
for dtype in [np.bool_, np.int_, np.object_]:
out = np.empty(_ndat.shape[0], dtype=dtype)
assert_raises(TypeError, f, _ndat, axis=1, out=out)
def test_ddof(self):
nanfuncs = [np.nanvar, np.nanstd]
stdfuncs = [np.var, np.std]
for nf, rf in zip(nanfuncs, stdfuncs):
for ddof in [0, 1]:
tgt = [rf(d, ddof=ddof) for d in _rdat]
res = nf(_ndat, axis=1, ddof=ddof)
assert_almost_equal(res, tgt)
def test_ddof_too_big(self):
nanfuncs = [np.nanvar, np.nanstd]
stdfuncs = [np.var, np.std]
dsize = [len(d) for d in _rdat]
for nf, rf in zip(nanfuncs, stdfuncs):
for ddof in range(5):
with suppress_warnings() as sup:
sup.record(RuntimeWarning)
sup.filter(np.ComplexWarning)
tgt = [ddof >= d for d in dsize]
res = nf(_ndat, axis=1, ddof=ddof)
assert_equal(np.isnan(res), tgt)
if any(tgt):
assert_(len(sup.log) == 1)
else:
assert_(len(sup.log) == 0)
def test_allnans(self):
mat = np.array([np.nan]*9).reshape(3, 3)
for f in self.nanfuncs:
for axis in [None, 0, 1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(f(mat, axis=axis)).all())
assert_(len(w) == 1)
assert_(issubclass(w[0].category, RuntimeWarning))
# Check scalar
assert_(np.isnan(f(np.nan)))
assert_(len(w) == 2)
assert_(issubclass(w[0].category, RuntimeWarning))
def test_empty(self):
mat = np.zeros((0, 3))
for f in self.nanfuncs:
for axis in [0, None]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(f(mat, axis=axis)).all())
assert_(len(w) == 1)
assert_(issubclass(w[0].category, RuntimeWarning))
for axis in [1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_equal(f(mat, axis=axis), np.zeros([]))
assert_(len(w) == 0)
class TestNanFunctions_Median(TestCase):
def test_mutation(self):
# Check that passed array is not modified.
ndat = _ndat.copy()
np.nanmedian(ndat)
assert_equal(ndat, _ndat)
def test_keepdims(self):
mat = np.eye(3)
for axis in [None, 0, 1]:
tgt = np.median(mat, axis=axis, out=None, overwrite_input=False)
res = np.nanmedian(mat, axis=axis, out=None, overwrite_input=False)
assert_(res.ndim == tgt.ndim)
d = np.ones((3, 5, 7, 11))
# Randomly set some elements to NaN:
w = np.random.random((4, 200)) * np.array(d.shape)[:, None]
w = w.astype(np.intp)
d[tuple(w)] = np.nan
with suppress_warnings() as sup:
sup.filter(RuntimeWarning)
res = np.nanmedian(d, axis=None, keepdims=True)
assert_equal(res.shape, (1, 1, 1, 1))
res = np.nanmedian(d, axis=(0, 1), keepdims=True)
assert_equal(res.shape, (1, 1, 7, 11))
res = np.nanmedian(d, axis=(0, 3), keepdims=True)
assert_equal(res.shape, (1, 5, 7, 1))
res = np.nanmedian(d, axis=(1,), keepdims=True)
assert_equal(res.shape, (3, 1, 7, 11))
res = np.nanmedian(d, axis=(0, 1, 2, 3), keepdims=True)
assert_equal(res.shape, (1, 1, 1, 1))
res = np.nanmedian(d, axis=(0, 1, 3), keepdims=True)
assert_equal(res.shape, (1, 1, 7, 1))
def test_out(self):
mat = np.random.rand(3, 3)
nan_mat = np.insert(mat, [0, 2], np.nan, axis=1)
resout = np.zeros(3)
tgt = np.median(mat, axis=1)
res = np.nanmedian(nan_mat, axis=1, out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
# 0-d output:
resout = np.zeros(())
tgt = np.median(mat, axis=None)
res = np.nanmedian(nan_mat, axis=None, out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
res = np.nanmedian(nan_mat, axis=(0, 1), out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
def test_small_large(self):
# test the small and large code paths, current cutoff 400 elements
for s in [5, 20, 51, 200, 1000]:
d = np.random.randn(4, s)
# Randomly set some elements to NaN:
w = np.random.randint(0, d.size, size=d.size // 5)
d.ravel()[w] = np.nan
d[:,0] = 1. # ensure at least one good value
# use normal median without nans to compare
tgt = []
for x in d:
nonan = np.compress(~np.isnan(x), x)
tgt.append(np.median(nonan, overwrite_input=True))
assert_array_equal(np.nanmedian(d, axis=-1), tgt)
def test_result_values(self):
tgt = [np.median(d) for d in _rdat]
res = np.nanmedian(_ndat, axis=1)
assert_almost_equal(res, tgt)
def test_allnans(self):
mat = np.array([np.nan]*9).reshape(3, 3)
for axis in [None, 0, 1]:
with suppress_warnings() as sup:
sup.record(RuntimeWarning)
assert_(np.isnan(np.nanmedian(mat, axis=axis)).all())
if axis is None:
assert_(len(sup.log) == 1)
else:
assert_(len(sup.log) == 3)
# Check scalar
assert_(np.isnan(np.nanmedian(np.nan)))
if axis is None:
assert_(len(sup.log) == 2)
else:
assert_(len(sup.log) == 4)
def test_empty(self):
mat = np.zeros((0, 3))
for axis in [0, None]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(np.nanmedian(mat, axis=axis)).all())
assert_(len(w) == 1)
assert_(issubclass(w[0].category, RuntimeWarning))
for axis in [1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_equal(np.nanmedian(mat, axis=axis), np.zeros([]))
assert_(len(w) == 0)
def test_scalar(self):
assert_(np.nanmedian(0.) == 0.)
def test_extended_axis_invalid(self):
d = np.ones((3, 5, 7, 11))
assert_raises(np.AxisError, np.nanmedian, d, axis=-5)
assert_raises(np.AxisError, np.nanmedian, d, axis=(0, -5))
assert_raises(np.AxisError, np.nanmedian, d, axis=4)
assert_raises(np.AxisError, np.nanmedian, d, axis=(0, 4))
assert_raises(ValueError, np.nanmedian, d, axis=(1, 1))
def test_float_special(self):
with suppress_warnings() as sup:
sup.filter(RuntimeWarning)
for inf in [np.inf, -np.inf]:
a = np.array([[inf, np.nan], [np.nan, np.nan]])
assert_equal(np.nanmedian(a, axis=0), [inf, np.nan])
assert_equal(np.nanmedian(a, axis=1), [inf, np.nan])
assert_equal(np.nanmedian(a), inf)
# minimum fill value check
a = np.array([[np.nan, np.nan, inf],
[np.nan, np.nan, inf]])
assert_equal(np.nanmedian(a), inf)
assert_equal(np.nanmedian(a, axis=0), [np.nan, np.nan, inf])
assert_equal(np.nanmedian(a, axis=1), inf)
# no mask path
a = np.array([[inf, inf], [inf, inf]])
assert_equal(np.nanmedian(a, axis=1), inf)
a = np.array([[inf, 7, -inf, -9],
[-10, np.nan, np.nan, 5],
[4, np.nan, np.nan, inf]],
dtype=np.float32)
if inf > 0:
assert_equal(np.nanmedian(a, axis=0), [4., 7., -inf, 5.])
assert_equal(np.nanmedian(a), 4.5)
else:
assert_equal(np.nanmedian(a, axis=0), [-10., 7., -inf, -9.])
assert_equal(np.nanmedian(a), -2.5)
assert_equal(np.nanmedian(a, axis=-1), [-1., -2.5, inf])
for i in range(0, 10):
for j in range(1, 10):
a = np.array([([np.nan] * i) + ([inf] * j)] * 2)
assert_equal(np.nanmedian(a), inf)
assert_equal(np.nanmedian(a, axis=1), inf)
assert_equal(np.nanmedian(a, axis=0),
([np.nan] * i) + [inf] * j)
a = np.array([([np.nan] * i) + ([-inf] * j)] * 2)
assert_equal(np.nanmedian(a), -inf)
assert_equal(np.nanmedian(a, axis=1), -inf)
assert_equal(np.nanmedian(a, axis=0),
([np.nan] * i) + [-inf] * j)
class TestNanFunctions_Percentile(TestCase):
def test_mutation(self):
# Check that passed array is not modified.
ndat = _ndat.copy()
np.nanpercentile(ndat, 30)
assert_equal(ndat, _ndat)
def test_keepdims(self):
mat = np.eye(3)
for axis in [None, 0, 1]:
tgt = np.percentile(mat, 70, axis=axis, out=None,
overwrite_input=False)
res = np.nanpercentile(mat, 70, axis=axis, out=None,
overwrite_input=False)
assert_(res.ndim == tgt.ndim)
d = np.ones((3, 5, 7, 11))
# Randomly set some elements to NaN:
w = np.random.random((4, 200)) * np.array(d.shape)[:, None]
w = w.astype(np.intp)
d[tuple(w)] = np.nan
with suppress_warnings() as sup:
sup.filter(RuntimeWarning)
res = np.nanpercentile(d, 90, axis=None, keepdims=True)
assert_equal(res.shape, (1, 1, 1, 1))
res = np.nanpercentile(d, 90, axis=(0, 1), keepdims=True)
assert_equal(res.shape, (1, 1, 7, 11))
res = np.nanpercentile(d, 90, axis=(0, 3), keepdims=True)
assert_equal(res.shape, (1, 5, 7, 1))
res = np.nanpercentile(d, 90, axis=(1,), keepdims=True)
assert_equal(res.shape, (3, 1, 7, 11))
res = np.nanpercentile(d, 90, axis=(0, 1, 2, 3), keepdims=True)
assert_equal(res.shape, (1, 1, 1, 1))
res = np.nanpercentile(d, 90, axis=(0, 1, 3), keepdims=True)
assert_equal(res.shape, (1, 1, 7, 1))
def test_out(self):
mat = np.random.rand(3, 3)
nan_mat = np.insert(mat, [0, 2], np.nan, axis=1)
resout = np.zeros(3)
tgt = np.percentile(mat, 42, axis=1)
res = np.nanpercentile(nan_mat, 42, axis=1, out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
# 0-d output:
resout = np.zeros(())
tgt = np.percentile(mat, 42, axis=None)
res = np.nanpercentile(nan_mat, 42, axis=None, out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
res = np.nanpercentile(nan_mat, 42, axis=(0, 1), out=resout)
assert_almost_equal(res, resout)
assert_almost_equal(res, tgt)
def test_result_values(self):
tgt = [np.percentile(d, 28) for d in _rdat]
res = np.nanpercentile(_ndat, 28, axis=1)
assert_almost_equal(res, tgt)
# Transpose the array to fit the output convention of numpy.percentile
tgt = np.transpose([np.percentile(d, (28, 98)) for d in _rdat])
res = np.nanpercentile(_ndat, (28, 98), axis=1)
assert_almost_equal(res, tgt)
def test_allnans(self):
mat = np.array([np.nan]*9).reshape(3, 3)
for axis in [None, 0, 1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(np.nanpercentile(mat, 60, axis=axis)).all())
if axis is None:
assert_(len(w) == 1)
else:
assert_(len(w) == 3)
assert_(issubclass(w[0].category, RuntimeWarning))
# Check scalar
assert_(np.isnan(np.nanpercentile(np.nan, 60)))
if axis is None:
assert_(len(w) == 2)
else:
assert_(len(w) == 4)
assert_(issubclass(w[0].category, RuntimeWarning))
def test_empty(self):
mat = np.zeros((0, 3))
for axis in [0, None]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_(np.isnan(np.nanpercentile(mat, 40, axis=axis)).all())
assert_(len(w) == 1)
assert_(issubclass(w[0].category, RuntimeWarning))
for axis in [1]:
with warnings.catch_warnings(record=True) as w:
warnings.simplefilter('always')
assert_equal(np.nanpercentile(mat, 40, axis=axis), np.zeros([]))
assert_(len(w) == 0)
def test_scalar(self):
assert_equal(np.nanpercentile(0., 100), 0.)
a = np.arange(6)
r = np.nanpercentile(a, 50, axis=0)
assert_equal(r, 2.5)
assert_(np.isscalar(r))
def test_extended_axis_invalid(self):
d = np.ones((3, 5, 7, 11))
assert_raises(np.AxisError, np.nanpercentile, d, q=5, axis=-5)
| assert_raises(np.AxisError, np.nanpercentile, d, q=5, axis=(0, -5)) | numpy.testing.assert_raises |
#!/usr/bin/env python
# coding: utf-8
DESCRIPTION="This script do sparse encoding."
#from memory_profiler import profile
import numpy as np
import argparse
import logging
import random
from sklearn.decomposition import sparse_encode
import sys
from os.path import dirname
sys.path.append(dirname(__file__))
from my_target_counter import TargetCounter
logger = logging.getLogger(__file__)
def my_sparse_encode(X,alpha,method,n_references):
refs = list(range(X.shape[0]))
random.shuffle(refs)
if 0<=n_references:
# use limited number of samples for space coding (if n_references is directed)
refs = refs[:n_references]
X_sparse = np.zeros((X.shape[0],len(refs)))
references = np.array([X[i] for i in refs])
for i in range(X.shape[0]):
if i in refs:
idx = refs.index(i)
x_sps = sparse_encode([X[i]],\
np.r_[references[:idx],np.array([[0]*X.shape[1]]),references[idx+1:]],\
algorithm=method,\
alpha=alpha,\
n_jobs=1)[0]
X_sparse[i] = x_sps
else:
X_sparse[i] = sparse_encode([X[i]],references,algorithm=method,alpha=alpha,n_jobs=1)[0]
X_sparse = | np.array(X_sparse) | numpy.array |
""" Tools for calculating Gradient Descent for ||Ax-b||. """
import matplotlib.pyplot as plt
import numpy as np
def main():
################################################################################
# TODO(student): Input Variables
A = | np.array([[1, 0], [0, 1]]) | numpy.array |
import os
import re
import sys
sys.path.append('.')
import cv2
import math
import time
import scipy
import argparse
import matplotlib
import numpy as np
import pylab as plt
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from collections import OrderedDict
from scipy.ndimage.morphology import generate_binary_structure
from scipy.ndimage.filters import gaussian_filter, maximum_filter
from lib.network.rtpose_vgg import get_model
from lib.network import im_transform
from lib.config import update_config, cfg
from evaluate.coco_eval import get_outputs, handle_paf_and_heat
from lib.utils.common import Human, BodyPart, CocoPart, CocoColors, CocoPairsRender, draw_humans
from lib.utils.paf_to_pose import paf_to_pose_cpp
def compare(pose1,pose2):
diff = np.mean(abs(pose1-pose2))
return diff
def homography(P,Q,R,S,b):
A= np.zeros((8,8))
A[0,0:3]=P
A[1,3:6]=P
A[2,0:3]=Q
A[3,3:6]=Q
A[4,0:3]=R
A[5,3:6]=R
A[6,0:3]=S
A[7,3:6]=S
for j in range(0,4):
A[2*j,6:8]= -b[2*j] * A[2*j,0:2]
A[2*j+1,6:8]= -b[2*j+1] * A[2*j+1,3:5]
#print(A)
#Calculate the homography
h= np.dot( | np.linalg.inv(A) | numpy.linalg.inv |
import logging
import os
import random
import time
import warnings
from collections import OrderedDict
from contextlib import contextmanager
from pathlib import Path
from typing import List, Optional
import cv2
import numpy as np
import pandas as pd
import torch
import torch.nn as nn
import torch.nn.functional as F
import torch.optim as optim
import torch.utils.data as data
from IPython.display import Audio
from sklearn.metrics import average_precision_score, f1_score
from sklearn.model_selection import StratifiedKFold
import librosa
import librosa.display as display
import soundfile as sf
import utils
from catalyst.dl import Callback, CallbackOrder, State
class DFTBase(nn.Module):
def __init__(self):
"""Base class for DFT and IDFT matrix"""
super(DFTBase, self).__init__()
def dft_matrix(self, n):
(x, y) = np.meshgrid(np.arange(n), np.arange(n))
omega = np.exp(-2 * np.pi * 1j / n)
W = np.power(omega, x * y)
return W
def idft_matrix(self, n):
(x, y) = np.meshgrid(np.arange(n), np.arange(n))
omega = | np.exp(2 * np.pi * 1j / n) | numpy.exp |
import os
import timeit
from typing import List
import numpy as np
from numpy.random import RandomState
from numpy.testing import assert_allclose, assert_almost_equal
import pytest
from scipy.special import gamma
import arch.univariate.recursions_python as recpy
CYTHON_COVERAGE = os.environ.get("ARCH_CYTHON_COVERAGE", "0") in ("true", "1", "True")
try:
import arch.univariate.recursions as rec_cython
missing_extension = False
except ImportError:
missing_extension = True
if missing_extension:
rec = recpy
else:
rec = rec_cython
try:
import numba # noqa
missing_numba = False
except ImportError:
missing_numba = True
pytestmark = pytest.mark.filterwarnings("ignore::arch.compat.numba.PerformanceWarning")
class Timer(object):
def __init__(
self,
first,
first_name,
second,
second_name,
model_name,
setup,
repeat=5,
number=10,
) -> None:
self.first_code = first
self.second_code = second
self.setup = setup
self.first_name = first_name
self.second_name = second_name
self.model_name = model_name
self.repeat = repeat
self.number = number
self._run = False
self.times: List[float] = []
self._codes = [first, second]
self.ratio = np.inf
def display(self):
if not self._run:
self.time()
self.ratio = self.times[0] / self.times[1]
title = self.model_name + " timing"
print("\n" + title)
print("-" * len(title))
print(self.first_name + ": " + "{:0.3f} ms".format(1000 * self.times[0]))
print(self.second_name + ": " + "{:0.3f} ms".format(1000 * self.times[1]))
if self.ratio < 1:
print(
"{0} is {1:0.1f}% faster".format(
self.first_name, 100 * (1 / self.ratio - 1)
)
)
else:
print(
"{0} is {1:0.1f}% faster".format(
self.second_name, 100 * (self.ratio - 1)
)
)
print(
self.first_name
+ "/"
+ self.second_name
+ " Ratio: {:0.3f}\n".format(self.ratio)
)
def time(self):
self.times = []
for code in self._codes:
timer = timeit.Timer(code, setup=self.setup)
self.times.append(min(timer.repeat(self.repeat, self.number)))
class TestRecursions(object):
@classmethod
def setup_class(cls):
cls.nobs = 1000
cls.rng = RandomState(12345)
cls.resids = cls.rng.standard_normal(cls.nobs)
cls.sigma2 = np.zeros_like(cls.resids)
var = cls.resids.var()
var_bounds = np.array([var / 1000000.0, var * 1000000.0])
cls.var_bounds = np.ones((cls.nobs, 2)) * var_bounds
cls.backcast = 1.0
cls.timer_setup = """
import numpy as np
import arch.univariate.recursions as rec
import arch.univariate.recursions_python as recpy
nobs = 10000
resids = np.random.standard_normal(nobs)
sigma2 = np.zeros_like(resids)
var = resids.var()
backcast = 1.0
var_bounds = np.array([var / 1000000.0, var * 1000000.0])
var_bounds = np.ones((nobs, 2)) * var_bounds
"""
def test_garch(self):
nobs, resids = self.nobs, self.resids
sigma2, backcast = self.sigma2, self.backcast
parameters = np.array([0.1, 0.4, 0.3, 0.2])
fresids = resids ** 2.0
sresids = np.sign(resids)
recpy.garch_recursion(
parameters,
fresids,
sresids,
sigma2,
1,
1,
1,
nobs,
backcast,
self.var_bounds,
)
sigma2_numba = sigma2.copy()
recpy.garch_recursion_python(
parameters,
fresids,
sresids,
sigma2,
1,
1,
1,
nobs,
backcast,
self.var_bounds,
)
sigma2_python = sigma2.copy()
rec.garch_recursion(
parameters,
fresids,
sresids,
sigma2,
1,
1,
1,
nobs,
backcast,
self.var_bounds,
)
assert_almost_equal(sigma2_numba, sigma2)
assert_almost_equal(sigma2_python, sigma2)
parameters = np.array([0.1, -0.4, 0.3, 0.2])
recpy.garch_recursion_python(
parameters,
fresids,
sresids,
sigma2,
1,
1,
1,
nobs,
backcast,
self.var_bounds,
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
parameters = np.array([0.1, 0.4, 3, 2])
recpy.garch_recursion_python(
parameters,
fresids,
sresids,
sigma2,
1,
1,
1,
nobs,
backcast,
self.var_bounds,
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
parameters = np.array([0.1, 0.4, 0.3, 0.2])
mod_fresids = fresids.copy()
mod_fresids[:1] = np.inf
recpy.garch_recursion_python(
parameters,
mod_fresids,
sresids,
sigma2,
1,
1,
1,
nobs,
backcast,
self.var_bounds,
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
rec.garch_recursion(
parameters,
mod_fresids,
sresids,
sigma2,
1,
1,
1,
nobs,
backcast,
self.var_bounds,
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
def test_harch(self):
nobs, resids = self.nobs, self.resids
sigma2, backcast = self.sigma2, self.backcast
parameters = np.array([0.1, 0.4, 0.3, 0.2])
lags = np.array([1, 5, 22], dtype=np.int32)
recpy.harch_recursion_python(
parameters, resids, sigma2, lags, nobs, backcast, self.var_bounds
)
sigma2_python = sigma2.copy()
recpy.harch_recursion(
parameters, resids, sigma2, lags, nobs, backcast, self.var_bounds
)
sigma2_numba = sigma2.copy()
rec.harch_recursion(
parameters, resids, sigma2, lags, nobs, backcast, self.var_bounds
)
assert_almost_equal(sigma2_numba, sigma2)
assert_almost_equal(sigma2_python, sigma2)
parameters = np.array([-0.1, -0.4, 0.3, 0.2])
recpy.harch_recursion_python(
parameters, resids, sigma2, lags, nobs, backcast, self.var_bounds
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
parameters = np.array([0.1, 4e8, 3, 2])
recpy.harch_recursion_python(
parameters, resids, sigma2, lags, nobs, backcast, self.var_bounds
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
parameters = np.array([0.1, 4e8, 3, 2])
mod_resids = resids.copy()
mod_resids[:10] = np.inf
recpy.harch_recursion_python(
parameters, mod_resids, sigma2, lags, nobs, backcast, self.var_bounds
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
rec.harch_recursion(
parameters, mod_resids, sigma2, lags, nobs, backcast, self.var_bounds
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert np.all(sigma2 <= 2 * self.var_bounds[:, 1])
def test_arch(self):
nobs, resids = self.nobs, self.resids
sigma2, backcast = self.sigma2, self.backcast
parameters = np.array([0.1, 0.4, 0.3, 0.2])
p = 3
recpy.arch_recursion_python(
parameters, resids, sigma2, p, nobs, backcast, self.var_bounds
)
sigma2_python = sigma2.copy()
recpy.arch_recursion(
parameters, resids, sigma2, p, nobs, backcast, self.var_bounds
)
sigma2_numba = sigma2.copy()
rec.arch_recursion(
parameters, resids, sigma2, p, nobs, backcast, self.var_bounds
)
assert_almost_equal(sigma2_numba, sigma2)
assert_almost_equal(sigma2_python, sigma2)
parameters = np.array([-0.1, -0.4, 0.3, 0.2])
recpy.arch_recursion_python(
parameters, resids, sigma2, p, nobs, backcast, self.var_bounds
)
assert np.all(sigma2 >= self.var_bounds[:, 0])
assert | np.all(sigma2 <= 2 * self.var_bounds[:, 1]) | numpy.all |
import numpy as np
import cv2
import warnings
warnings.filterwarnings('ignore')
import matplotlib
matplotlib.use('Qt5Agg')
import matplotlib.pyplot as plt
import os
import scipy
import imageio
from scipy.ndimage import gaussian_filter1d, gaussian_filter
from sklearn import linear_model
from sklearn.model_selection import train_test_split
from matplotlib.colors import ListedColormap
import statsmodels.api as sm
import pandas as pd
from statsmodels.stats.anova import AnovaRM
from sklearn import linear_model
from helper_code.registration_funcs import model_arena, get_arena_details
from helper_code.processing_funcs import speed_colors
from helper_code.analysis_funcs import *
from important_code.shuffle_test import permutation_test, permutation_correlation
plt.rcParams.update({'font.size': 30})
def plot_traversals(self):
''' plot all traversals across the arena '''
# initialize parameters
sides = ['back', 'front']
# sides = ['back']
types = ['spontaneous'] #, 'evoked']
fast_color = np.array([.5, 1, .5])
slow_color = np.array([1, .9, .9])
edge_vector_color = np.array([1, .95, .85])
homing_vector_color = np.array([.725, .725, .725])
edge_vector_color = np.array([.98, .9, .6])**4
homing_vector_color = np.array([0, 0, 0])
non_escape_color = np.array([0,0,0])
condition_colors = [[.5,.5,.5], [.3,.5,.8], [0,.7,1]]
time_thresh = 15 #20 for ev comparison
speed_thresh = 2
p = 0
HV_cutoff = .681 # .5 for exploratory analysis
# initialize figures
fig, fig2, fig3, ax, ax2, ax3 = initialize_figures_traversals(self) #, types = len(types)+1)
# initialize lists for stats
all_data = []
all_conditions = []
edge_vector_time_all = np.array([])
# loop over spontaneous vs evoked
for t, type in enumerate(types):
# loop over experiments and conditions
for c, (experiment, condition) in enumerate(zip(self.experiments, self.conditions)):
strategies = [0, 0, 0]
# extract experiments from nested list
sub_experiments, sub_conditions = extract_experiments(experiment, condition)
# initialize the arena
arena, arena_color, scaling_factor, obstacle = initialize_arena(self, sub_experiments, sub_conditions)
path_ax, path_fig = get_arena_plot(obstacle, sub_conditions, sub_experiments)
# initialize edginess
all_traversals_edgy = {}
all_traversals_homy = {}
proportion_edgy = {}
for s in sides:
all_traversals_edgy[s] = []
all_traversals_homy[s] = []
proportion_edgy[s] = []
m = 0
# loop over each experiment and condition
for e, (experiment, condition) in enumerate(zip(sub_experiments, sub_conditions)):
# loop over each mouse in the experiment
for i, mouse in enumerate(self.analysis[experiment][condition]['back traversal']):
mouse_data = []
print(mouse)
# loop over back and front sides
for s, start in enumerate(sides):
if start == 'front' and type == 'evoked': continue
# find all the paths across the arena
traversal = self.analysis[experiment][condition][start + ' traversal'][mouse]
# get the duration of those paths
# duration = traversal[t*5+3]
if traversal:
if traversal[t*5]:
x_end_loc = np.array([x_loc[-1] * scaling_factor for x_loc in np.array(traversal[t * 5 + 0])[:, 0]])
if traversal[4] < 10: continue
number_of_edge_vectors = np.sum((np.array(traversal[t*5+3]) < speed_thresh) * \
(np.array(traversal[t*5+2]) > HV_cutoff) * \
# (abs(x_end_loc - 50) < 30) * \
(np.array(traversal[t*5+1]) < time_thresh*30*60) ) / min(traversal[4], time_thresh) * time_thresh
# print(traversal[4])
number_of_homing_vectors = np.sum((np.array(traversal[t*5+3]) < speed_thresh) * \
(np.array(traversal[t*5+2]) < HV_cutoff) * \
# (abs(x_end_loc - 50) < 30) * \
(np.array(traversal[t*5+1]) < time_thresh*30*60) )/ min(traversal[4], time_thresh) * time_thresh
all_traversals_edgy[start].append( number_of_edge_vectors )
all_traversals_homy[start].append(number_of_homing_vectors)
# print(number_of_edge_vectors)
mouse_data.append(number_of_edge_vectors)
# get the time of edge vectors
if condition == 'obstacle' and 'wall' in experiment:
edge_vector_idx = ( (np.array(traversal[t * 5 + 3]) < speed_thresh) * (np.array(traversal[t * 5 + 2]) > HV_cutoff) )
edge_vector_time = np.array(traversal[t*5+1])[edge_vector_idx] / 30 / 60
edge_vector_time_all = np.concatenate((edge_vector_time_all, edge_vector_time))
# prop_edgy = np.sum((np.array(traversal[t*5 + 3]) < speed_thresh) * \
# (np.array(traversal[t*5 + 2]) > HV_cutoff) * \
# (np.array(traversal[t * 5 + 1]) < time_thresh * 30 * 60)) / \
# np.sum((np.array(traversal[t * 5 + 3]) < speed_thresh) * \
# (np.array(traversal[t * 5 + 1]) < time_thresh * 30 * 60))
else:
all_traversals_edgy[start].append(0)
all_traversals_homy[start].append(0)
# if np.isnan(prop_edgy): prop_edgy = .5
# prop_edgy = prop_edgy / .35738
# proportion_edgy[start].append(prop_edgy)
traversal_coords = np.array(traversal[t*5+0])
pre_traversal = np.array(traversal[10])
else:
# all_traversals_edginess[start].append(0)
continue
m += .5
# loop over all paths
show = False
if show and traversal:
for trial in range(traversal_coords.shape[0]):
# make sure it qualifies
if traversal[t * 5 + 3][trial] > speed_thresh: continue
if traversal[t*5+1][trial] > time_thresh*30*60: continue
if not len(pre_traversal[0][0]): continue
# if abs(traversal_coords[trial][0][-1]*scaling_factor - 50) > 30: continue
# downsample to get even coverage
# if c == 2 and np.random.random() > (59 / 234): continue
# if c == 1 and np.random.random() > (59 / 94): continue
if traversal[t*5+2][trial]> HV_cutoff: plot_color = edge_vector_color
else: plot_color = homing_vector_color
display_traversal(scaling_factor, traversal_coords, pre_traversal, trial, path_ax, plot_color)
if mouse_data:
# all_data.append(mouse_data)
all_conditions.append(c)
# save image
path_fig.savefig(os.path.join(self.summary_plots_folder, self.labels[c] + ' traversals.eps'), format='eps', bbox_inches='tight', pad_inches=0)
# plot the data
if type == 'spontaneous' and len(sides) > 1:
plot_number_edgy = np.array(all_traversals_edgy['front']).astype(float) + np.array(all_traversals_edgy['back']).astype(float)
plot_number_homy = np.array(all_traversals_homy['front']).astype(float) + np.array(all_traversals_homy['back']).astype(float)
print(np.sum(plot_number_edgy + plot_number_homy))
# plot_proportion_edgy = (np.array(proportion_edgy['front']).astype(float) + np.array(proportion_edgy['back']).astype(float)) / 2
plot_proportion_edgy = plot_number_edgy / (plot_number_edgy + plot_number_homy)
all_data.append(plot_number_edgy)
else:
plot_number_edgy = np.array(all_traversals_edgy[sides[0]]).astype(float)
plot_number_homy = np.array(all_traversals_homy[sides[0]]).astype(float)
plot_proportion_edgy = plot_number_edgy / (plot_number_edgy + plot_number_homy)
# plot_proportion_edgy = np.array(proportion_edgy[sides[0]]).astype(float)
for i, (plot_data, ax0) in enumerate(zip([plot_number_edgy, plot_number_homy], [ax, ax3])): #, plot_proportion_edgy , ax2
print(plot_data)
print(np.sum(plot_data))
# plot each trial
# scatter_axis = scatter_the_axis( (p*4/3+.5/3), plot_data)
ax0.scatter(np.ones_like(plot_data)* (p*4/3+.5/3)* 3 - .2, plot_data, color=[0,0,0, .4], edgecolors='none', s=25, zorder=99)
# do kde
# if i==0: bw = .5
# else: bw = .02
bw = .5
kde = fit_kde(plot_data, bw=bw)
plot_kde(ax0, kde, plot_data, z=4 * p + .8, vertical=True, normto=.3, color=[.5, .5, .5], violin=False, clip=True)
ax0.plot([4 * p + -.2, 4 * p + -.2], [np.percentile(plot_data, 25), np.percentile(plot_data, 75)], color = [0,0,0])
ax0.plot([4 * p + -.4, 4 * p + -.0], [np.percentile(plot_data, 50), np.percentile(plot_data, 50)], color = [1,1,1], linewidth = 2)
# else:
# # kde = fit_kde(plot_data, bw=.03)
# # plot_kde(ax0, kde, plot_data, z=4 * p + .8, vertical=True, normto=1.2, color=[.5, .5, .5], violin=False, clip=True)
# bp = ax0.boxplot([plot_data, [0, 0]], positions=[4 * p + -.2, -10], showfliers=False, zorder=99)
# ax0.set_xlim([-1, 4 * len(self.experiments) - 1])
p+=1
# plot a stacked bar of strategies
# fig3 = plot_strategies(strategies, homing_vector_color, non_escape_color, edge_vector_color)
# fig3.savefig(os.path.join(self.summary_plots_folder, 'Traversal categories - ' + self.labels[c] + '.png'), format='png', bbox_inches = 'tight', pad_inches = 0)
# fig3.savefig(os.path.join(self.summary_plots_folder, 'Traversal categories - ' + self.labels[c] + '.eps'), format='eps', bbox_inches = 'tight', pad_inches = 0)
# make timing hist
plt.figure()
bins = np.arange(0,22.5,2.5)
plt.hist(edge_vector_time_all, bins = bins, color = [0,0,0], weights = np.ones_like(edge_vector_time_all) / 2.5 / m) #condition_colors[c])
plt.ylim([0,2.1])
plt.show()
# # save the plot
fig.savefig(os.path.join(self.summary_plots_folder, 'Traversal # EVS comparison.png'), format='png', bbox_inches='tight', pad_inches=0)
fig.savefig(os.path.join(self.summary_plots_folder, 'Traversal # EVS comparison.eps'), format='eps', bbox_inches='tight', pad_inches=0)
fig3.savefig(os.path.join(self.summary_plots_folder, 'Traversal # HVS comparison.png'), format='png', bbox_inches='tight', pad_inches=0)
fig3.savefig(os.path.join(self.summary_plots_folder, 'Traversal # HVS comparison.eps'), format='eps', bbox_inches='tight', pad_inches=0)
group_A = [[d] for d in all_data[0]]
group_B = [[d] for d in all_data[2]]
permutation_test(group_A, group_B, iterations = 100000, two_tailed = False)
group_A = [[d] for d in all_data[2]]
group_B = [[d] for d in all_data[1]]
permutation_test(group_A, group_B, iterations = 10000, two_tailed = True)
# fig2.savefig(os.path.join(self.summary_plots_folder, 'Traversal proportion edgy.png'), format='png', bbox_inches='tight', pad_inches=0)
# fig2.savefig(os.path.join(self.summary_plots_folder, 'Traversal proportion edgy.eps'), format='eps', bbox_inches='tight', pad_inches=0)
plt.show()
def plot_speed_traces(self, speed = 'absolute'):
''' plot the speed traces '''
max_speed = 60
# loop over experiments and conditions
for c, (experiment, condition) in enumerate(zip(self.experiments, self.conditions)):
# extract experiments from nested list
sub_experiments, sub_conditions = extract_experiments(experiment, condition)
# get the number of trials
number_of_trials = get_number_of_trials(sub_experiments, sub_conditions, self.analysis)
number_of_mice = get_number_of_mice(sub_experiments, sub_conditions, self.analysis)
RT, end_idx, scaling_factor, speed_traces, subgoal_speed_traces, time, time_axis, trial_num = \
initialize_variables(number_of_trials, self,sub_experiments)
# create custom colormap
colormap = speed_colormap(scaling_factor, max_speed, n_bins=256, v_min=0, v_max=max_speed)
# loop over each experiment and condition
for e, (experiment, condition) in enumerate(zip(sub_experiments, sub_conditions)):
# loop over each mouse
for i, mouse in enumerate(self.analysis[experiment][condition]['speed']):
# control analysis
if self.analysis_options['control'] and not mouse=='control': continue
if not self.analysis_options['control'] and mouse=='control': continue
# loop over each trial
for trial in range(len(self.analysis[experiment][condition]['speed'][mouse])):
if trial > 2: continue
trial_num = fill_in_trial_data(RT, condition, end_idx, experiment, mouse, scaling_factor, self,
speed_traces, subgoal_speed_traces, time, trial, trial_num)
# print some useful metrics
print_metrics(RT, end_idx, number_of_mice, number_of_trials)
# put the speed traces on the plot
fig = show_speed_traces(colormap, condition, end_idx, experiment, number_of_trials, speed, speed_traces, subgoal_speed_traces, time_axis, max_speed)
# save the plot
fig.savefig(os.path.join(self.summary_plots_folder,'Speed traces - ' + self.labels[c] + '.png'), format='png', bbox_inches = 'tight', pad_inches = 0)
fig.savefig(os.path.join(self.summary_plots_folder,'Speed traces - ' + self.labels[c] + '.eps'), format='eps', bbox_inches = 'tight', pad_inches = 0)
plt.show()
print('done')
def plot_escape_paths(self):
''' plot the escape paths '''
# initialize parameters
edge_vector_color = [np.array([1, .95, .85]), np.array([.98, .9, .6])**4]
homing_vector_color = [ np.array([.725, .725, .725]), np.array([0, 0, 0])]
non_escape_color = np.array([0,0,0])
fps = 30
escape_duration = 18 #6 #9 for food # 18 for U
min_distance_to_shelter = 30
HV_cutoff = 0.681 #.75 #.7
# initialize all data for stats
all_data = [[], [], [], []]
all_conditions = []
# loop over experiments and conditions
for c, (experiment, condition) in enumerate(zip(self.experiments, self.conditions)):
# extract experiments from nested list
sub_experiments, sub_conditions = extract_experiments(experiment, condition)
# initialize the arena
arena, arena_color, scaling_factor, obstacle = initialize_arena(self, sub_experiments, sub_conditions)
# more arena stuff for this analysis type
arena_reference = arena_color.copy()
arena_color[arena_reference == 245] = 255
get_arena_details(self, experiment=sub_experiments[0])
shelter_location = [s / scaling_factor / 10 for s in self.shelter_location]
# initialize strategy array
strategies = np.array([0,0,0])
path_ax, path_fig = get_arena_plot(obstacle, sub_conditions, sub_experiments)
# loop over each experiment and condition
for e, (experiment, condition) in enumerate(zip(sub_experiments, sub_conditions)):
if 'void' in experiment or 'dark' in experiment or ('off' in experiment and condition == 'no obstacle') or 'quick' in experiment:
escape_duration = 18
elif 'food' in experiment:
escape_duration = 9
else:
escape_duration = 12
# loop over each mouse
for i, mouse in enumerate(self.analysis[experiment][condition]['speed']):
print(mouse)
# control analysis
if self.analysis_options['control'] and not mouse=='control': continue
if not self.analysis_options['control'] and mouse=='control': continue
# color based on visual vs tactile obst avoidance
# if mouse == 'CA7190' or mouse == 'CA3210' or mouse == 'CA3155' or mouse == 'CA8100':
# edge_vector_color = [np.array([.6, .4, .99]),np.array([.6, .4, .99])]
# homing_vector_color = [np.array([.6, .4, .99]),np.array([.6, .4, .99])]
# else:
# edge_vector_color = [np.array([.8, .95, 0]),np.array([.8, .95, 0])]
# homing_vector_color = [np.array([.8, .95, 0]),np.array([.8, .95, 0])]
# show escape paths
show_escape_paths(HV_cutoff, arena, arena_color, arena_reference, c, condition, edge_vector_color, escape_duration, experiment, fps,
homing_vector_color, min_distance_to_shelter, mouse, non_escape_color, scaling_factor, self, shelter_location, strategies, path_ax,
determine_strategy = False) #('dark' in experiment and condition=='obstacle'))
# save image
# scipy.misc.imsave(os.path.join(self.summary_plots_folder, 'Escape paths - ' + self.labels[c] + '.png'), arena_color[:,:,::-1])
imageio.imwrite(os.path.join(self.summary_plots_folder, 'Escape paths - ' + self.labels[c] + '.png'), arena_color[:,:,::-1])
path_fig.savefig(os.path.join(self.summary_plots_folder, 'Escape plot - ' + self.labels[c] + '.png'), format='png', bbox_inches='tight', pad_inches=0)
path_fig.savefig(os.path.join(self.summary_plots_folder, 'Escape plot - ' + self.labels[c] + '.eps'), format='eps', bbox_inches='tight', pad_inches=0)
# plot a stacked bar of strategies
fig = plot_strategies(strategies, homing_vector_color, non_escape_color, edge_vector_color)
fig.savefig(os.path.join(self.summary_plots_folder, 'Escape categories - ' + self.labels[c] + '.png'), format='png', bbox_inches = 'tight', pad_inches = 0)
fig.savefig(os.path.join(self.summary_plots_folder, 'Escape categories - ' + self.labels[c] + '.eps'), format='eps', bbox_inches = 'tight', pad_inches = 0)
plt.show()
print('escape')
# strategies = np.array([4,5,0])
# fig = plot_strategies(strategies, homing_vector_color, non_escape_color, edge_vector_color)
# plt.show()
# fig.savefig(os.path.join(self.summary_plots_folder, 'Trajectory by previous edge-vectors 2.png'), format='png', bbox_inches='tight', pad_inches=0)
# fig.savefig(os.path.join(self.summary_plots_folder, 'Trajectory by previous edge-vectors 2.eps'), format='eps', bbox_inches='tight', pad_inches=0)
# group_A = [[0],[1],[0,0,0],[0,0],[0,1],[1,0],[0,0,0]]
# group_B = [[1,0,0],[0,0,0,0],[0,0,0],[1,0,0],[0,0,0]]
# permutation_test(group_B, group_A, iterations = 10000, two_tailed = False)
obstacle = [[0],[1],[0,0,0],[0,0],[0,1],[1],[0,0,0], [1]]
# obstacle_exp = [[0,1],[0,0,0,0,1],[0,1],[0]]
open_field = [[1,0,0,0,0],[0,0,0,0,0],[0,0,0,0],[1,0,0,0,0,0],[0,0,0,0,0,0],[0,0,0,0,0,0,0,0]]
# U_shaped = [[0,1],[1,1], [1,1], [0,0,1], [0,0,0], [0], [1], [0], [0,1], [0,1,0,0], [0,0,0]]
# permutation_test(open_field, obstacle, iterations = 10000, two_tailed = False)
# do same edgy homing then stop to both
obstacle = [[0],[1],[0,0,0],[0,0],[0,1],[1],[0,0,0], [1], [1], [0,0,0]]
open_field = [[1],[0,0,0],[0,0,0],[1,0,0],[0,0,0],[0,0,1]] #stop at 3 trials
# do same edgy homing then stop to both --> exclude non escapes
obstacle = [[0],[1],[0,0,0],[0],[0,1],[1],[0,0,0], [1], [1], [0,0,0]]
open_field = [[1],[0,0],[0,0,0],[1,0,0],[0,0,0],[0,1]] #stop at 3 trials
def plot_edginess(self):
# initialize parameters
fps = 30
escape_duration = 12 #9 #6
HV_cutoff = .681 #.681
ETD = 10 #10
traj_loc = 40
edge_vector_color = np.array([.98, .9, .6])**5
edge_vector_color = np.array([.99, .94, .6]) ** 3
# edge_vector_color = np.array([.99, .95, .6]) ** 5
homing_vector_color = np.array([0, 0, 0])
# homing_vector_color = np.array([.85, .65, .8])
# edge_vector_color = np.array([.65, .85, .7])
# colors for diff conditions
colors = [np.array([.7, 0, .3]), np.array([0, .8, .5])]
colors = [np.array([.3,.3,.3]), np.array([1, .2, 0]), np.array([0, .8, .4]), np.array([0, .7, .9])]
colors = [np.array([.3, .3, .3]), np.array([1, .2, 0]), np.array([.7, 0, .7]), np.array([0, .7, .9]), np.array([0,1,0])]
# colors = [np.array([0, 0, 0]), np.array([0, 0, 0]),np.array([0, 0, 0]), np.array([0, 0, 0])]
offset = [0,.2, .2, 0]
# initialize figures
fig, fig2, fig3, fig4, _, ax, ax2, ax3 = initialize_figures(self)
# initialize all data for stats
all_data = [[],[],[],[]]
all_conditions = []
mouse_ID = []; m = 1
dist_data_EV_other_all = []
delta_ICs, delta_x_end = [], []
time_to_shelter, was_escape = [], []
repetitions = 1
for rand_select in range(repetitions):
m = -1
# loop over experiments and conditions
for c, (experiment, condition) in enumerate(zip(self.experiments, self.conditions)):
num_trials_total = 0
num_trials_escape = 0
# extract experiments from nested list
sub_experiments, sub_conditions = extract_experiments(experiment, condition)
# get the number of trials
number_of_trials = get_number_of_trials(sub_experiments, sub_conditions, self.analysis)
number_of_mice = get_number_of_mice(sub_experiments, sub_conditions, self.analysis)
t_total = 0
# initialize array to fill in with each trial's data
edginess, end_idx, time_since_down, time_to_shelter, time_to_shelter_all, prev_edginess, scaling_factor, time_in_center, trial_num, _, _, dist_to_SH, dist_to_other_SH = \
initialize_variable_edginess(number_of_trials, self, sub_experiments)
mouse_ID_trial = edginess.copy()
# loop over each experiment and condition
for e, (experiment, condition) in enumerate(zip(sub_experiments, sub_conditions)):
if 'void' in experiment or 'dark' in experiment or ('off' in experiment and condition == 'no obstacle') or 'quick' in experiment:
escape_duration = 18
elif 'food' in experiment:
escape_duration = 12
else: escape_duration = 12
# elif 'up' in experiment and 'probe' in condition:
# escape_duration = 12
# loop over each mouse
for i, mouse in enumerate(self.analysis[experiment][condition]['start time']):
m+=1
# initialize mouse data for stats
mouse_data = [[],[],[],[]]
print(mouse)
skip_mouse = False
if self.analysis_options['control'] and not mouse=='control': continue
if not self.analysis_options['control'] and mouse=='control': continue
# loop over each trial
prev_homings = []
x_edges_used = []
t = 0
for trial in range(len(self.analysis[experiment][condition]['end time'][mouse])):
trial_num += 1
# impose conditions
if 'food' in experiment:
if t > 12: continue
if condition == 'no obstacle' and self.analysis[experiment][condition]['start time'][mouse][trial] < 20: continue
num_trials_total += 1
elif 'void' in experiment:
if t > 5: continue
else:
if t>2: continue
# if trial > 2: continue
num_trials_total += 1
# if trial!=2: continue
# if 'off' in experiment and trial: continue
# if trial < 3 and 'wall down' in experiment: continue
# if condition == 'obstacle' and not 'non' in experiment and \
# self.analysis[experiment][condition]['start time'][mouse][trial] < 20: continue
# if c == 0 and not (trial > 0): continue
# if c == 1 and not (trial): continue
# if c == 2 and not (trial == 0): continue
# if trial and ('lights on off' in experiment and not 'baseline' in experiment): continue
if 'Square' in experiment:
HV_cutoff = .56
HV_cutoff = 0
y_idx = self.analysis[experiment][condition]['path'][mouse][trial][1]
if y_idx[0] * scaling_factor > 50: continue
else:
# skip certain trials
y_start = self.analysis[experiment][condition]['path'][mouse][trial][1][0] * scaling_factor
x_start = self.analysis[experiment][condition]['path'][mouse][trial][0][0] * scaling_factor
# print(y_start)
# print(x_start)
if y_start > 25: continue
if abs(x_start-50) > 30: continue
end_idx[trial_num] = self.analysis[experiment][condition]['end time'][mouse][trial]
RT = self.analysis[experiment][condition]['RT'][mouse][trial]
if np.isnan(end_idx[trial_num]) or (end_idx[trial_num] > escape_duration * fps):
# if not ('up' in experiment and 'probe' in condition and not np.isnan(RT)):
# mouse_data[3].append(0)
continue
''' check for previous edgy homings '''
# if 'dark' in experiment or True:
# num_prev_edge_vectors, x_edge = get_num_edge_vectors(self, experiment, condition, mouse, trial)
# # print(num_prev_edge_vectors)
# if num_prev_edge_vectors and c: continue
# if not num_prev_edge_vectors and not c: continue
# if num_prev_edge_vectors < 3 and (c==0): continue
# if num_prev_edge_vectors > 0 and c < 4: continue
# if t>1 and c == 2: continue
# if num_prev_edge_vectors >= 2: print('prev edgy homing'); continue
# if x_edge in x_edges_used: print('prev edgy escape'); continue
#
# print('-----------' + mouse + '--------------')
#
# if self.analysis[experiment][condition]['edginess'][mouse][trial] <= HV_cutoff:
# print(' HV ')
# else:
# print(' EDGY ')
# # edgy trial has occurred
# print('EDGY TRIAL ' + str(trial))
# x_edges_used.append(x_edge)
#
# # select only *with* prev homings
# if not num_prev_edge_vectors:
# if not x_edge in x_edges_used:
# if self.analysis[experiment][condition]['edginess'][mouse][trial] > HV_cutoff:
# x_edges_used.append(x_edge)
# continue
# print(t)
num_trials_escape += 1
# add data
edginess[trial_num] = self.analysis[experiment][condition]['edginess'][mouse][trial]
time_since_down[trial_num] = np.sqrt((x_start - 50)**2 + (y_start - 50)**2 )# self.analysis[experiment][condition]['start angle'][mouse][trial]
print(edginess[trial_num])
if 'Square' in experiment:
if edginess[trial_num] <=-.3: # and False: #.15
edginess[trial_num] = np.nan
continue
# edginess to current edge as opposed to specific edge
if (('moves left' in experiment and condition == 'no obstacle') \
or ('moves right' in experiment and condition== 'obstacle')): # and False:
if edginess[trial_num] <= -0: # and False:
edginess[trial_num] = np.nan
continue
edginess[trial_num] = edginess[trial_num] - 1
# shelter edginess
if False:
y_pos = self.analysis[experiment][condition]['path'][mouse][trial][1][:int(end_idx[trial_num])] * scaling_factor
x_pos = self.analysis[experiment][condition]['path'][mouse][trial][0][:int(end_idx[trial_num])] * scaling_factor
# get the latter phase traj
y_pos_1 = 55
y_pos_2 = 65
x_pos_1 = x_pos[np.argmin(abs(y_pos - y_pos_1))]
x_pos_2 = x_pos[np.argmin(abs(y_pos - y_pos_2))]
#where does it end up
slope = (y_pos_2 - y_pos_1) / (x_pos_2 - x_pos_1)
intercept = y_pos_1 - x_pos_1 * slope
x_pos_proj = (80 - intercept) / slope
# compared to
x_pos_shelter_R = 40 #40.5 # defined as mean of null dist
# if 'long' in self.labels[c]:
# x_pos_shelter_R += 18
# compute the metric
shelter_edginess = (x_pos_proj - x_pos_shelter_R) / 18
edginess[trial_num] = -shelter_edginess
# if condition == 'obstacle' and 'left' in experiment:edginess[trial_num] = -edginess[trial_num] # for putting conditions together
# get previous edginess #TEMPORARY COMMENT
# if not t:
# SH_data = self.analysis[experiment][condition]['prev homings'][mouse][-1]
# time_to_shelter.append(np.array(SH_data[2]))
# was_escape.append(np.array(SH_data[4]))
if False: # or True:
time_to_shelter, SR = get_prev_edginess(ETD, condition, experiment, mouse, prev_edginess, dist_to_SH, dist_to_other_SH,
scaling_factor, self, traj_loc, trial, trial_num, edginess, delta_ICs, delta_x_end)
print(prev_edginess[trial_num])
print(trial + 1)
print('')
# get time in center
# time_in_center[trial_num] = self.analysis[experiment][condition]['time exploring obstacle'][mouse][trial]
# time_in_center[trial_num] = num_PORHVs
# if num_PORHVs <= 1:
# edginess[trial_num] = np.nan
# continue
# if (prev_edginess[trial_num] < HV_cutoff and not t) or skip_mouse:
# edginess[trial_num] = np.nan
# skip_mouse = True
# continue
''' qualify by prev homings '''
# if prev_edginess[trial_num] < .4: # and c:
# edginess[trial_num] = np.nan
# prev_edginess[trial_num] = np.nan
# continue
num_prev_edge_vectors, x_edge = get_num_edge_vectors(self, experiment, condition, mouse, trial, ETD = 10)
# print(str(num_prev_edge_vectors) + ' EVs')
#
# if not num_prev_edge_vectors >= 1 and c ==0:
# edginess[trial_num] = np.nan
# t+=1
# continue
# if not num_prev_edge_vectors < 1 and c ==1:
# edginess[trial_num] = np.nan
# t+=1
# continue
# print(num_prev_edge_vectors)
# if num_prev_edge_vectors !=0 and c==3:
# edginess[trial_num] = np.nan
# t+=1
# continue
# if num_prev_edge_vectors != 1 and c == 2:
# edginess[trial_num] = np.nan
# t += 1
# continue
# if num_prev_edge_vectors != 2 and num_prev_edge_vectors != 3 and c ==1:
# edginess[trial_num] = np.nan
# t += 1
# continue
#
# if num_prev_edge_vectors < 4 and c ==0:
# edginess[trial_num] = np.nan
# t += 1
# continue
#
# print(trial + 1)
# print(prev_edginess[trial_num])
# print(edginess[trial_num])
# print('')
# print(t)
# get time since obstacle removal?
# time_since_down[trial_num] = self.analysis[experiment][condition]['start time'][mouse][trial] - self.analysis[experiment]['probe']['start time'][mouse][0]
# add data for stats
mouse_data[0].append(int(edginess[trial_num] > HV_cutoff))
mouse_data[1].append(edginess[trial_num])
mouse_data[2].append(prev_edginess[trial_num])
mouse_data[3].append(self.analysis[experiment][condition]['start time'][mouse][trial] - self.analysis[experiment][condition]['start time'][mouse][0])
mouse_ID_trial[trial_num] = m
t += 1
t_total += 1
#append data for stats
if mouse_data[0]:
all_data[0].append(mouse_data[0])
all_data[1].append(mouse_data[1])
all_data[2].append(mouse_data[2])
all_data[3].append(mouse_data[3])
all_conditions.append(c)
mouse_ID.append(m); m+= 1
else:
print(mouse)
print('0 trials')
# get prev homings
time_to_shelter_all.append(time_to_shelter)
dist_data_EV_other_all = np.append(dist_data_EV_other_all, dist_to_other_SH[edginess > HV_cutoff])
# print(t_total)
''' plot edginess by condition '''
# get the data
# data = abs(edginess)
data = edginess
plot_data = data[~np.isnan(data)]
# print(np.percentile(plot_data, 25))
# print(np.percentile(plot_data, 50))
# print(np.percentile(plot_data, 75))
# print(np.mean(plot_data > HV_cutoff))
# plot each trial
scatter_axis = scatter_the_axis(c, plot_data)
ax.scatter(scatter_axis[plot_data>HV_cutoff], plot_data[plot_data>HV_cutoff], color=edge_vector_color[::-1], s=15, zorder = 99)
ax.scatter(scatter_axis[plot_data<=HV_cutoff], plot_data[plot_data<=HV_cutoff], color=homing_vector_color[::-1], s=15, zorder = 99)
bp = ax.boxplot([plot_data, [0,0]], positions = [3 * c - .2, -10], showfliers=False, zorder=99)
plt.setp(bp['boxes'], color=[.5,.5,.5], linewidth = 2)
plt.setp(bp['whiskers'], color=[.5,.5,.5], linewidth = 2)
plt.setp(bp['medians'], linewidth=2)
ax.set_xlim([-1, 3 * len(self.experiments) - 1])
# ax.set_ylim([-.1, 1.15])
ax.set_ylim([-.1, 1.3])
#do kde
try:
if 'Square' in experiment:
kde = fit_kde(plot_data, bw=.06)
plot_kde(ax, kde, plot_data, z=3*c + .3, vertical=True, normto=.8, color=[.5,.5,.5], violin=False, clip=False, cutoff = HV_cutoff+0.0000001, cutoff_colors = [homing_vector_color[::-1], edge_vector_color[::-1]])
ax.set_ylim([-1.5, 1.5])
else:
kde = fit_kde(plot_data, bw=.04)
plot_kde(ax, kde, plot_data, z=3*c + .3, vertical=True, normto=1.3, color=[.5,.5,.5], violin=False, clip=True, cutoff = HV_cutoff, cutoff_colors = [homing_vector_color[::-1], edge_vector_color[::-1]])
except: pass
# plot the polar plot or initial trajectories
# plt.figure(fig4.number)
fig4 = plt.figure(figsize=( 5, 5))
# ax4 = plt.subplot(1,len(self.experiments),len(self.experiments) - c, polar=True)
ax4 = plt.subplot(1, 1, 1, polar=True)
plt.axis('off')
ax.margins(0, 0)
ax.xaxis.set_major_locator(plt.NullLocator())
ax.yaxis.set_major_locator(plt.NullLocator())
ax4.set_xlim([-np.pi / 2 - .1, 0])
# ax4.set_xlim([-np.pi - .1, 0])
mean_value_color = max(0, min(1, np.mean(plot_data)))
mean_value_color = np.sum(plot_data > HV_cutoff) / len(plot_data)
mean_value = np.mean(plot_data)
value_color = mean_value_color * edge_vector_color[::-1] + (1 - mean_value_color) * homing_vector_color[::-1]
ax4.arrow(mean_value + 3 * np.pi / 2, 0, 0, 1.9, color=[abs(v)**1 for v in value_color], alpha=1, width = 0.05, linewidth=2)
ax4.plot([0, 0 + 3 * np.pi / 2], [0, 2.25], color=[.5,.5,.5], alpha=1, linewidth=1, linestyle = '--')
ax4.plot([0, 1 + 3 * np.pi / 2], [0, 2.25], color=[.5,.5,.5], alpha=1, linewidth=1, linestyle = '--')
# ax4.plot([0, -1 + 3 * np.pi / 2], [0, 2.25], color=[.5, .5, .5], alpha=1, linewidth=1, linestyle='--')
scatter_axis_EV = scatter_the_axis_polar(plot_data[plot_data > HV_cutoff], 2.25, 0) #0.05
scatter_axis_HV = scatter_the_axis_polar(plot_data[plot_data <= HV_cutoff], 2.25, 0)
ax4.scatter(plot_data[plot_data > HV_cutoff] + 3 * np.pi/2, scatter_axis_EV, s = 30, color=edge_vector_color[::-1], alpha = .8, edgecolors = None)
ax4.scatter(plot_data[plot_data <= HV_cutoff] + 3 * np.pi/2, scatter_axis_HV, s = 30, color=homing_vector_color[::-1], alpha=.8, edgecolors = None)
fig4.savefig(os.path.join(self.summary_plots_folder, 'Angle comparison - ' + self.labels[c] + '.png'), format='png', transparent=True, bbox_inches='tight', pad_inches=0)
fig4.savefig(os.path.join(self.summary_plots_folder, 'Angle comparison - ' + self.labels[c] + '.eps'), format='eps', transparent=True, bbox_inches='tight', pad_inches=0)
# print(len(plot_data))
if len(plot_data) > 1 and False: # or True:
''' plot the correlation '''
# do both prev homings and time in center # np.array(time_since_down) # 'Time since removal'
for plot_data_corr, fig_corr, ax_corr, data_label in zip([prev_edginess, time_in_center], [fig2, fig3], [ax2, ax3], ['Prior homings','Exploration']): #
plot_data_corr = plot_data_corr[~np.isnan(data)]
# plot data
ax_corr.scatter(plot_data_corr, plot_data, color=colors[c], s=60, alpha=1, edgecolors=colors[c]/2, linewidth=1) #color=[.5, .5, .5] #edgecolors=[.2, .2, .2]
# do correlation
r, p = scipy.stats.pearsonr(plot_data_corr, plot_data)
print(r, p)
# do linear regression
plot_data_corr, prediction = do_linear_regression(plot_data, plot_data_corr)
# plot linear regresssion
ax_corr.plot(plot_data_corr, prediction['Pred'].values, color=colors[c], linewidth=1, linestyle='--', alpha=.7) #color=[.0, .0, .0]
ax_corr.fill_between(plot_data_corr, prediction['lower'].values, prediction['upper'].values, color=colors[c], alpha=.075) #color=[.2, .2, .2]
fig_corr.savefig(os.path.join(self.summary_plots_folder, 'Edginess by ' + data_label + ' - ' + self.labels[c] + '.png'), format='png')
fig_corr.savefig(os.path.join(self.summary_plots_folder, 'Edginess by ' + data_label + ' - ' + self.labels[c] + '.eps'), format='eps')
# test correlation and stats thru permutation test
# data_x = list(np.array(all_data[2])[np.array(all_conditions) == c])
# data_y = list(np.array(all_data[1])[np.array(all_conditions) == c])
# permutation_correlation(data_x, data_y, iterations=10000, two_tailed=False, pool_all = True)
print(num_trials_escape)
print(num_trials_total)
print(num_trials_escape / num_trials_total)
# save the plot
fig.savefig(os.path.join(self.summary_plots_folder, 'Edginess comparison.png'), format='png', bbox_inches='tight', pad_inches=0)
fig.savefig(os.path.join(self.summary_plots_folder, 'Edginess comparison.eps'), format='eps', bbox_inches='tight', pad_inches=0)
# fig5.savefig(os.path.join(self.summary_plots_folder, 'Angle dist comparison.png'), format='png', bbox_inches='tight', pad_inches=0)
# fig5.savefig(os.path.join(self.summary_plots_folder, 'Angle dist comparison.eps'), format='eps', bbox_inches='tight', pad_inches=0)
plt.show()
time_to_shelter_all = np.concatenate(list(flatten(time_to_shelter_all))).astype(float)
np.percentile(time_to_shelter_all, 25)
np.percentile(time_to_shelter_all, 75)
group_A = list(np.array(all_data[0])[np.array(all_conditions) == 2])
group_B = list(np.array(all_data[0])[np.array(all_conditions) == 3])
permutation_test(group_A, group_B, iterations = 10000, two_tailed = False)
group_A = list(np.array(all_data[1])[(np.array(all_conditions) == 1) + (np.array(all_conditions) == 2)])
group_B = list(np.array(all_data[1])[np.array(all_conditions) == 3])
permutation_test(group_A, group_B, iterations = 10000, two_tailed = False)
import pandas
df = pandas.DataFrame(data={"mouse_id": mouse_ID, "condition": all_conditions, "x-data": all_data[2], "y-data": all_data[1]})
df.to_csv("./Foraging Path Types.csv", sep=',', index=False)
group_B = list(flatten(np.array(all_data[0])[np.array(all_conditions) == 1]))
np.sum(group_B) / len(group_B)
np.percentile(abs(time_since_down[edginess < HV_cutoff]), 50)
np.percentile(abs(time_since_down[edginess < HV_cutoff]), 25)
np.percentile(abs(time_since_down[edginess < HV_cutoff]), 75)
np.percentile(abs(time_since_down[edginess > HV_cutoff]), 50)
np.percentile(abs(time_since_down[edginess > HV_cutoff]), 25)
np.percentile(abs(time_since_down[edginess > HV_cutoff]), 75)
group_A = [[d] for d in abs(time_since_down[edginess > HV_cutoff])]
group_B = [[d] for d in abs(time_since_down[edginess < HV_cutoff])]
permutation_test(group_A, group_B, iterations=10000, two_tailed=True)
WE = np.concatenate(was_escape)
TTS_spont = np.concatenate(time_to_shelter)[~WE]
TTS_escape = np.concatenate(time_to_shelter)[WE]
trials = np.array(list(flatten(all_data[3])))
edgy = np.array(list(flatten(all_data[0])))
np.mean(edgy[trials == 0])
np.mean(edgy[trials == 1])
np.mean(edgy[trials == 2])
np.mean(edgy[trials == 3])
np.mean(edgy[trials == 4])
np.mean(edgy[trials == 5])
np.mean(edgy[trials == 6])
np.mean(edgy[trials == 7])
np.mean(edgy[trials == 8])
np.mean(edgy[trials == 9])
np.mean(edgy[trials == 10])
np.mean(edgy[trials == 11])
np.mean(edgy[trials == 12])
np.mean(edgy[trials == 13])
'''
TRADITIONAL METRICS
'''
def plot_metrics_by_strategy(self):
''' plot the escape paths '''
# initialize parameters
edge_vector_color = np.array([1, .95, .85])
homing_vector_color = np.array([.725, .725, .725])
non_escape_color = np.array([0,0,0])
ETD = 10#0
traj_loc = 40
fps = 30
# escape_duration = 12 #12 #9 #12 9 for food 12 for dark
HV_cutoff = .681 #.65
edgy_cutoff = .681
# loop over experiments and conditions
for c, (experiment, condition) in enumerate(zip(self.experiments, self.conditions)):
# extract experiments from nested list
sub_experiments, sub_conditions = extract_experiments(experiment, condition)
# get the number of trials
number_of_trials = get_number_of_trials(sub_experiments, sub_conditions, self.analysis)
number_of_mice = get_number_of_mice(sub_experiments, sub_conditions, self.analysis)
# initialize array to fill in with each trial's data
efficiency, efficiency_RT, end_idx, num_prev_homings_EV, duration_RT, duration, prev_edginess, edginess, _, _, _, _, \
_, _, _, _, _, scaling_factor, time, trial_num, trials, edginess, avg_speed, avg_speed_RT, peak_speed, RT, escape_speed, strategy = \
initialize_variables_efficiency(number_of_trials, self, sub_experiments)
mouse_id = efficiency.copy()
m = 0
# loop over each experiment and condition
for e, (experiment, condition) in enumerate(zip(sub_experiments, sub_conditions)):
# loop over each mouse
for i, mouse in enumerate(self.analysis[experiment][condition]['speed']):
print(mouse)
# control analysis
if self.analysis_options['control'] and not mouse=='control': continue
if not self.analysis_options['control'] and mouse=='control': continue
# loop across all trials
t = 0
for trial in range(len(self.analysis[experiment][condition]['end time'][mouse])):
if 'food' in experiment: escape_duration = 9
else: escape_duration = 12
trial_num += 1
# impose coniditions - escape duration
end_time = self.analysis[experiment][condition]['end time'][mouse][trial]
if np.isnan(end_time) or (end_time > (escape_duration * fps)): continue
# skip certain trials
y_start = self.analysis[experiment][condition]['path'][mouse][trial][1][0] * scaling_factor
x_start = self.analysis[experiment][condition]['path'][mouse][trial][0][0] * scaling_factor
# needs to start at top
if y_start > 25: continue
if abs(x_start - 50) > 30: continue
# get the strategy used
# edgy_escape = self.analysis[experiment][condition]['edginess'][mouse][trial] > edgy_cutoff
# is it a homing vector
# strategy_code = 0
# TEMPORARY COMMENTING
# if not edgy_escape:
# if self.analysis[experiment][condition]['edginess'][mouse][trial] < HV_cutoff: strategy_code = 0 # homing vector
# else: continue
# else:
# get the strategy used -- NUMBER OF PREVIOUS EDGE VECTOR HOMINGS
time_to_shelter, SR = get_prev_edginess(ETD, condition, experiment, mouse, prev_edginess, [], [],
scaling_factor, self, traj_loc, trial, trial_num, edginess, [], [])
if t > 2: continue
# if c == 0 and trial: continue
# if c == 1 and trial != 2: continue
t+=1
# if prev_edginess[trial_num] >= HV_cutoff: strategy_code = 1 # path learning
# elif prev_edginess[trial_num] < HV_cutoff: strategy_code = 2 # map-based
# else: continue
# how many prev homings to that edge: if 0, then map-based, if >1, then PL
if len(self.analysis[experiment]['probe']['start time'][mouse]):
edge_time = self.analysis[experiment]['probe']['start time'][mouse][0] - 1
else: edge_time = 19
edge_time = np.min((edge_time, self.analysis[experiment][condition]['start time'][mouse][trial]))
# print(edge_time)
num_edge_vectors, _ = get_num_edge_vectors(self, experiment, condition, mouse, trial, ETD=ETD, time_threshold=edge_time, other_side = False)
num_edge_vectors = get_num_homing_vectors(self, experiment, condition, mouse, trial, spontaneous = False, time_threshold = edge_time)
print(num_edge_vectors)
# if 'wall up' in experiment and 'no' in condition: num_edge_vectors = 0
# print(num_edge_vectors)
if False or True:
if num_edge_vectors == 1:
strategy_code = 1
# print('EV -- ' + mouse + ' - trial ' + str(trial))
elif num_edge_vectors == 0:
strategy_code = 0
# print('NO EV -- ' + mouse + ' - trial ' + str(trial))
else: continue
else:
strategy_code = 0
strategy[trial_num] = strategy_code
# add data for each metric
RT[trial_num] = self.analysis[experiment][condition]['RT'][mouse][trial]
avg_speed[trial_num] = np.mean(self.analysis[experiment][condition]['speed'][mouse][trial][10*fps : 10*fps+int(end_time)]) * scaling_factor * 30
avg_speed_RT[trial_num] = np.mean(self.analysis[experiment][condition]['speed'][mouse][trial][10*fps + int(RT[trial_num]*30) : 10*fps+int(end_time)]) * scaling_factor * 30
peak_speed[trial_num] = np.max(self.analysis[experiment][condition]['speed'][mouse][trial][10*fps : 10*fps+int(end_time)])*fps*scaling_factor
escape_speed[trial_num] = self.analysis[experiment][condition]['optimal path length'][mouse][trial] * scaling_factor / (end_time/30)
efficiency[trial_num] = np.min((1, self.analysis[experiment][condition]['optimal path length'][mouse][trial] / \
self.analysis[experiment][condition]['full path length'][mouse][trial]))
efficiency_RT[trial_num] = np.min((1, self.analysis[experiment][condition]['optimal RT path length'][mouse][trial] / \
self.analysis[experiment][condition]['RT path length'][mouse][trial]))
duration_RT[trial_num] = (end_time / fps - RT[trial_num]) / self.analysis[experiment][condition]['optimal RT path length'][mouse][trial] / scaling_factor * 100
duration[trial_num] = end_time / fps / self.analysis[experiment][condition]['optimal path length'][mouse][trial] / scaling_factor * 100
# duration[trial_num] = trial
# duration_RT[trial_num] = self.analysis[experiment][condition]['start time'][mouse][trial]
avg_speed[trial_num] = self.analysis[experiment][condition]['time exploring far (pre)'][mouse][trial] / 60
# add data for stats
mouse_id[trial_num] = m
m+=1
# for metric, data in zip(['Reaction time', 'Peak speed', 'Avg speed', 'Path efficiency - RT','Duration - RT', 'Duration'],\
# [RT, peak_speed, avg_speed_RT, efficiency_RT, duration_RT, duration]):
# for metric, data in zip(['Reaction time', 'Avg speed', 'Path efficiency - RT'], #,'Peak speed', 'Duration - RT', 'Duration'], \
# [RT, avg_speed_RT, efficiency_RT]): #peak_speed, , duration_RT, duration
for metric, data in zip(['Path efficiency - RT'], [efficiency_RT]):
# for metric, data in zip([ 'Duration - RT'],
# [ duration_RT]):
# for metric, data in zip(['trial', 'time', 'time exploring back'],
# [duration, duration_RT, avg_speed]):
# format data
x_data = strategy[~np.isnan(data)]
y_data = data[~np.isnan(data)]
if not c: OF_data = y_data
# make figure
fig, ax = plt.subplots(figsize=(11, 9))
plt.axis('off')
# ax.margins(0, 0)
ax.xaxis.set_major_locator(plt.NullLocator())
ax.yaxis.set_major_locator(plt.NullLocator())
# ax.set_title(metric)
if 'Reaction time' in metric:
ax.plot([-.75, 3], [0, 0], linestyle='--', color=[.5, .5, .5, .5])
ax.plot([-.75, 3], [1, 1], linestyle='--', color=[.5, .5, .5, .5])
ax.plot([-.75, 3], [2, 2], linestyle='--', color=[.5, .5, .5, .5])
ax.plot([-.75, 3], [3, 3], linestyle='--', color=[.5, .5, .5, .5])
ax.plot([-.75, 3], [4, 4], linestyle='--', color=[.5, .5, .5, .5])
elif 'Peak speed' in metric:
ax.plot([-.75, 3], [40, 40], linestyle='--', color=[.5, .5, .5, .5])
ax.plot([-.75, 3], [80, 80], linestyle='--', color=[.5, .5, .5, .5])
ax.plot([-.75, 3], [120, 120], linestyle='--', color=[.5, .5, .5, .5])
elif 'Avg speed' in metric:
ax.plot([-.75, 3], [25, 25], linestyle='--', color=[.5, .5, .5, .5])
ax.plot([-.75, 3], [50, 50], linestyle='--', color=[.5, .5, .5, .5])
ax.plot([-.75, 3], [75, 75], linestyle='--', color=[.5, .5, .5, .5])
ax.plot([-.75, 3], [0, 0], linestyle='--', color=[.5, .5, .5, .5])
elif 'Path efficiency' in metric:
ax.plot([-.75, 3], [.5,.5], linestyle='--', color=[.5, .5, .5, .5])
ax.plot([-.75, 3], [.75, .75], linestyle='--', color=[.5, .5, .5, .5])
ax.plot([-.75, 3], [1, 1], linestyle='--', color=[.5, .5, .5, .5])
elif 'Duration' in metric:
ax.plot([-.75, 3], [0, 0], linestyle='--', color=[.5, .5, .5, .5])
ax.plot([-.75, 3], [10, 10], linestyle='--', color=[.5, .5, .5, .5])
ax.plot([-.75, 3], [5, 5], linestyle='--', color=[.5, .5, .5, .5])
elif 'time' == metric:
ax.plot([-.75, 3], [0, 0], linestyle='--', color=[.5, .5, .5, .5])
ax.plot([-.75, 3], [10, 10], linestyle='--', color=[.5, .5, .5, .5])
ax.plot([-.75, 3], [20, 20], linestyle='--', color=[.5, .5, .5, .5])
ax.plot([-.75, 3], [30, 30], linestyle='--', color=[.5, .5, .5, .5])
ax.plot([-.75, 3], [40, 40], linestyle='--', color=[.5, .5, .5, .5])
elif 'exploring' in metric:
ax.plot([-.75, 3], [2.5, 2.5], linestyle='--', color=[.5, .5, .5, .5])
ax.plot([-.75, 3], [5.0, 5.0], linestyle='--', color=[.5, .5, .5, .5])
ax.plot([-.75, 3], [7.5, 7.5], linestyle='--', color=[.5, .5, .5, .5])
ax.plot([-.75, 3], [0, 0], linestyle='--', color=[.5, .5, .5, .5])
#initialize stats array
stats_data = [[], [], []]
# go thru each strategy
for s in [0,1,2]:
# format data
if not np.sum(x_data==s): continue
plot_data = y_data[x_data==s]
median = np.percentile(plot_data, 50);
third_quartile = np.percentile(plot_data, 75);
first_quartile = np.percentile(plot_data, 25)
# print(first_quartile)
# print(median)
# print(third_quartile)
# if 'Reaction' in metric: print(str(first_quartile), str(median), str(third_quartile))
IQR = third_quartile - first_quartile
# remove outliers
if not metric == 'trial':
outliers = abs(plot_data - median) > 2*IQR
# plot_data = plot_data[~outliers]
# plot all data
ax.scatter(np.ones_like(plot_data)*s, plot_data, color=[0,0,0], s=30, zorder = 99)
# plot kde
if 'efficiency' in metric: bw_factor = .02
elif 'speed' in metric or 'efficiency' in metric or metric == 'time': bw_factor = .04
elif 'exploring' in metric: bw_factor = .06
elif 'Duration' in metric: bw_factor = .07
else: bw_factor = .09
kde = fit_kde(plot_data, bw=np.median(y_data)*bw_factor)
plot_kde(ax, kde, plot_data, z= s + .1, vertical=True, normto=.4, color=[.75, .75, .75], violin=False, clip=True)
# plot errorbar
ax.errorbar(s - .15, median, yerr=np.array([[median - first_quartile], [third_quartile - median]]), color=[0, 0, 0], capsize=10, capthick=3, alpha=1, linewidth=3)
ax.scatter(s - .15, median, color=[0, 0, 0], s=175, alpha=1)
# print(len(plot_data))
# get mouse ids for stats
mouse_id_stats = mouse_id[~np.isnan(data)]
mouse_id_stats = mouse_id_stats[x_data==s]
if not metric == 'trial': mouse_id_stats = mouse_id_stats[~outliers]
# for m in np.unique(mouse_id_stats):
# stats_data[s].append( list(plot_data[mouse_id_stats==m]) )
print(metric)
# for ss in [[0,1]]: #, [0,2], [1,2]]:
# group_A = stats_data[ss[0]]
# group_B = stats_data[ss[1]]
# permutation_test(group_A, group_B, iterations=10000, two_tailed=True)
# save figure
fig.savefig(os.path.join(self.summary_plots_folder, metric + ' - ' + self.labels[c] + '.png'), format='png', bbox_inches='tight', pad_inches=0)
fig.savefig(os.path.join(self.summary_plots_folder, metric + ' - ' + self.labels[c] + '.eps'), format='eps', bbox_inches='tight', pad_inches=0)
plt.show()
plt.close('all')
group_A = [[e] for e in tr1_eff]
group_B = [[e] for e in tr3_eff]
group_C = [[e] for e in OF_eff]
permutation_test(group_A, group_B, iterations=10000, two_tailed=True)
permutation_test(group_A, group_C, iterations=10000, two_tailed=True)
permutation_test(group_B, group_C, iterations=10000, two_tailed=True)
'''
DIST OF TURN ANGLES
'''
# def plot_metrics_by_strategy(self):
# ''' plot the escape paths '''
#
# ETD = 10
# traj_loc = 40
#
# fps = 30
# escape_duration = 12
#
# colors = [[.3,.3,.3,.5], [.5,.5,.8, .5]]
#
# # make figure
# fig, ax = plt.subplots(figsize=(11, 9))
# fig2, ax2 = plt.subplots(figsize=(11, 9))
# # plt.axis('off')
# # ax.margins(0, 0)
# # ax.xaxis.set_major_locator(plt.NullLocator())
# # ax.yaxis.set_major_locator(plt.NullLocator())
# all_angles_pre = []
# all_angles_escape = []
#
#
# # loop over experiments and conditions
# for c, (experiment, condition) in enumerate(zip(self.experiments, self.conditions)):
# # extract experiments from nested list
# sub_experiments, sub_conditions = extract_experiments(experiment, condition)
# # get the number of trials
# number_of_trials = get_number_of_trials(sub_experiments, sub_conditions, self.analysis)
# number_of_mice = get_number_of_mice(sub_experiments, sub_conditions, self.analysis)
# # initialize array to fill in with each trial's data
# shape = self.analysis[sub_experiments[0]]['obstacle']['shape']
# scaling_factor = 100 / shape[0]
# turn_angles_pre = []
# turn_angles_escape = []
#
# # loop over each experiment and condition
# for e, (experiment, condition) in enumerate(zip(sub_experiments, sub_conditions)):
# # loop over each mouse
# for i, mouse in enumerate(self.analysis[experiment][condition]['speed']):
# print(mouse)
# # control analysis
# if self.analysis_options['control'] and not mouse=='control': continue
# if not self.analysis_options['control'] and mouse=='control': continue
# # loop across all trials
# t = 0
# for trial in range(len(self.analysis[experiment][condition]['end time'][mouse])):
# # impose coniditions - escape duration
# end_time = self.analysis[experiment][condition]['end time'][mouse][trial]
# if np.isnan(end_time) or (end_time > (escape_duration * fps)): continue
#
#
# ## COMMENT ONE OR THE OTHER IF TESTING PRE OR ESCAPE
# #pre
# # if trial < 2: continue
# # if t: continue
#
# # escape
# if t > 2: continue
#
# # skip certain trials
# y_start = self.analysis[experiment][condition]['path'][mouse][trial][1][0] * scaling_factor
# x_start = self.analysis[experiment][condition]['path'][mouse][trial][0][0] * scaling_factor
# # needs to start at top
# if y_start > 25: continue
# if abs(x_start - 50) > 30: continue
#
# turn_angles_pre.append(list(abs(np.array(self.analysis[experiment][condition]['prev movements'][mouse][trial][3])))) # >145
# turn_angles_escape.append(abs(self.analysis[experiment][condition]['movement'][mouse][trial][2])) # >145
# #
# # turn_angles_pre.append(list(np.array(self.analysis[experiment][condition]['prev movements'][mouse][trial][3])))
# # turn_angles_escape.append(self.analysis[experiment][condition]['movement'][mouse][trial][2])
#
# t += 1
#
#
#
# # format data
# hist_data_pre = np.array(list(flatten(turn_angles_pre)))
# hist_data_escape = np.array(list(flatten(turn_angles_escape)))
#
# # for permutation test
# # all_angles_pre.append(turn_angles_pre)
# # all_angles_escape.append([[tae] for tae in turn_angles_escape])
#
# ax.set_title('Prior movement angles')
# ax2.set_title('Escape movement angles')
# ax.plot([0, 0], [0, .4], linestyle='--', color=[.5, .5, .5, .5])
# ax.plot([90, 90],[0, .4], linestyle='--', color=[.5, .5, .5, .5])
# ax.plot([180, 180],[0, .4], linestyle='--', color=[.5, .5, .5, .5])
# ax2.plot([0, 0], [0, .4], linestyle='--', color=[.5, .5, .5, .5])
# ax2.plot([90, 90],[0, .4], linestyle='--', color=[.5, .5, .5, .5])
# ax2.plot([180, 180],[0, .4], linestyle='--', color=[.5, .5, .5, .5])
#
# # format data
# bin_width = 30
# hist_pre, n, _ = ax.hist(hist_data_pre, bins=np.arange(-0, 180+bin_width, bin_width), color=colors[c], weights = np.ones_like(hist_data_pre) * 1/ len(hist_data_pre))
# hist_escape, n, _ = ax2.hist(hist_data_escape, bins=np.arange(-0, 180+bin_width, bin_width), color=colors[c], weights = np.ones_like(hist_data_escape) * 1/ len(hist_data_escape))
#
# count_pre, n = np.histogram(hist_data_pre, bins=np.arange(-0, 180+bin_width, bin_width))
# count_escape, n = np.histogram(hist_data_escape, bins=np.arange(-0, 180+bin_width, bin_width))
#
# # for chi squared
# all_angles_pre.append(count_pre)
# all_angles_escape.append(count_escape)
#
#
# # save figure
# fig.savefig(os.path.join(self.summary_plots_folder, 'Prior Angle dist.png'), format='png', bbox_inches='tight', pad_inches=0)
# fig.savefig(os.path.join(self.summary_plots_folder, 'Prior Angle dist.eps'), format='eps', bbox_inches='tight', pad_inches=0)
# # save figure
# fig2.savefig(os.path.join(self.summary_plots_folder, 'Escape Angle dist.png'), format='png', bbox_inches='tight', pad_inches=0)
# fig2.savefig(os.path.join(self.summary_plots_folder, 'Escape Angle dist.eps'), format='eps', bbox_inches='tight', pad_inches=0)
#
# plt.show()
#
#
# scipy.stats.chi2_contingency(all_angles_pre)
# scipy.stats.chi2_contingency(all_angles_escape)
#
#
# group_A = all_angles_pre[0]
# group_B = all_angles_pre[1]
# permutation_test(group_A, group_B, iterations = 10000, two_tailed = True)
#
# group_A = all_angles_escape[0]
# group_B = all_angles_escape[1]
# permutation_test(group_A, group_B, iterations = 10000, two_tailed = True)
#
# plt.close('all')
#
# '''
# DIST OF EDGE VECTORS
# '''
# def plot_metrics_by_strategy(self):
# ''' plot the escape paths '''
#
# ETD = 10
# traj_loc = 40
#
# fps = 30
# escape_duration = 12
#
# dist_thresh = 5
# time_thresh = 20
#
# colors = [[.3,.3,.3,.5], [.5,.5,.8, .5]]
#
# # make figure
# fig1, ax1 = plt.subplots(figsize=(11, 9))
# fig2, ax2 = plt.subplots(figsize=(11, 9))
# # plt.axis('off')
# # ax.margins(0, 0)
# # ax.xaxis.set_major_locator(plt.NullLocator())
# # ax.yaxis.set_major_locator(plt.NullLocator())
# all_EVs = []
# all_HVs = []
#
#
# # loop over experiments and conditions
# for c, (experiment, condition) in enumerate(zip(self.experiments, self.conditions)):
# # extract experiments from nested list
# sub_experiments, sub_conditions = extract_experiments(experiment, condition)
# # get the number of trials
# number_of_trials = get_number_of_trials(sub_experiments, sub_conditions, self.analysis)
# number_of_mice = get_number_of_mice(sub_experiments, sub_conditions, self.analysis)
# # initialize array to fill in with each trial's data
# shape = self.analysis[sub_experiments[0]]['obstacle']['shape']
# scaling_factor = 100 / shape[0]
# EVs = []
# HVs = []
# edge_vector_time_exp = []
#
# # loop over each experiment and condition
# for e, (experiment, condition) in enumerate(zip(sub_experiments, sub_conditions)):
# # loop over each mouse
# for i, mouse in enumerate(self.analysis[experiment][condition]['speed']):
# print(mouse)
# # control analysis
# if self.analysis_options['control'] and not mouse=='control': continue
# if not self.analysis_options['control'] and mouse=='control': continue
# # just take the last trial
# trial = len(self.analysis[experiment][condition]['start time'][mouse])-1
# if trial < 0:
# if condition == 'obstacle':
# condition_use = 'no obstacle'
# trial = 0
# elif condition == 'no obstacle':
# condition_use = 'obstacle'
# trial = len(self.analysis[experiment][condition]['start time'][mouse])-1
# if mouse == 'CA7220': trial = 1 #compensate for extra vid
# else: condition_use = condition
#
# # get the prev homings
# SH_data = self.analysis[experiment][condition_use]['prev homings'][mouse][trial]
#
# # get their start time
# homing_time = np.array(SH_data[3])
# edge_vector_time_exp.append(list(homing_time))
#
# # get their x value
# SH_x = np.array(SH_data[0])
#
# # only use spontaneous
# stim_evoked = np.array(SH_data[4])
# SH_x = SH_x[~stim_evoked]
# homing_time = homing_time[~stim_evoked]
#
# # normalize to 20 min
# SH_x = SH_x[homing_time < time_thresh] / np.min((time_thresh, self.analysis[experiment][condition_use]['start time'][mouse][trial])) * 20
#
# # get number of edge vectors
# num_edge_vectors = np.sum(abs(SH_x - 25) < dist_thresh) + np.sum(abs(SH_x - 75) < dist_thresh)
# num_homing_vectors = np.sum(abs(SH_x - 50) < dist_thresh)
# print(num_edge_vectors)
#
#
# # get the prev anti homings
# anti_SH_data = self.analysis[experiment][condition_use]['prev anti-homings'][mouse][trial]
#
# # get their start time
# homing_time = np.array(anti_SH_data[3])
# edge_vector_time_exp.append(list(homing_time))
#
# # get their x value
# anti_SH_x = np.array(anti_SH_data[0])
#
# # limit to 20 min
# anti_SH_x = anti_SH_x[homing_time < time_thresh] / np.min((time_thresh, self.analysis[experiment][condition_use]['start time'][mouse][trial])) * 20
#
# # get number of edge vectors
# num_anti_edge_vectors = np.sum(abs(anti_SH_x - 25) < dist_thresh) + np.sum(abs(anti_SH_x - 75) < dist_thresh)
# num_anti_homing_vectors = np.sum(abs(anti_SH_x - 50) < dist_thresh)
# print(num_anti_edge_vectors)
#
# # append to list
# EVs.append(num_edge_vectors + num_anti_edge_vectors )
# HVs.append(num_edge_vectors + num_anti_edge_vectors - (num_homing_vectors + num_anti_homing_vectors))
# print(EVs)
# all_EVs.append(EVs)
# all_HVs.append(HVs)
#
# # make timing hist
# plt.figure()
# plt.hist(list(flatten(edge_vector_time_exp)), bins=np.arange(0, 22.5, 2.5)) #, color=condition_colors[c])
#
# # plot EVs and HVs
# for plot_data, ax, fig in zip([EVs, HVs], [ax1, ax2], [fig1, fig2]):
#
# scatter_axis = scatter_the_axis(c * 4 / 3 + .5 / 3, plot_data)
# ax.scatter(scatter_axis, plot_data, color=[0, 0, 0], s=25, zorder=99)
# # do kde
# kde = fit_kde(plot_data, bw=.5)
# plot_kde(ax, kde, plot_data, z=4 * c + .8, vertical=True, normto=1.2, color=[.5, .5, .5], violin=False, clip=False) # True)
#
# # save figure
# fig.savefig(os.path.join(self.summary_plots_folder, 'EV dist - ' + self.labels[c] + '.png'), format='png', bbox_inches='tight', pad_inches=0)
# fig.savefig(os.path.join(self.summary_plots_folder, 'EV dist - ' + self.labels[c] + '.eps'), format='eps', bbox_inches='tight', pad_inches=0)
#
#
# plt.show()
#
#
# group_A = all_EVs[1]
# group_B = all_EVs[2]
# permutation_test(group_A, group_B, iterations = 10000, two_tailed = True)
#
# group_A = all_HVs[0]
# group_B = all_HVs[1]
# permutation_test(group_A, group_B, iterations = 10000, two_tailed = True)
#
# plt.close('all')
'''
PREDICTION PLOTS, BY TURN ANGLE OR EXPLORATION/EDGINESS
|
|
v
'''
def plot_prediction(self):
by_angle_not_edginess = False
if by_angle_not_edginess:
# initialize parameters
fps = 30
escape_duration = 12
ETD = 10 #4
traj_loc = 40
# initialize figures
fig1, ax1, fig2, ax2, fig3, ax3 = initialize_figures_prediction(self)
plt.close(fig2); plt.close(fig3)
# loop over experiments and conditions
for c, (experiment, condition) in enumerate(zip(self.experiments, self.conditions)):
# extract experiments from nested list
sub_experiments, sub_conditions = extract_experiments(experiment, condition)
# get the number of trials
number_of_trials = get_number_of_trials(sub_experiments, sub_conditions, self.analysis)
number_of_mice = get_number_of_mice(sub_experiments, sub_conditions, self.analysis)
mouse_trial_list = []
IC_x_all, IC_y_all, IC_angle_all, IC_time_all, turn_angles_all = [], [], [], [], []
# initialize array to fill in with each trial's data
efficiency, efficiency_RT, end_idx, x_pred, y_pred, angle_pred, time_pred, mean_pred, initial_body_angle, initial_x, initial_y, x_edge, _, \
_, _, _, _, scaling_factor, time, trial_num, trials, edginess, prev_edginess, dist_to_SH, dist_to_other_SH, RT_all, avg_speed, _ = \
initialize_variables_efficiency(number_of_trials, self, sub_experiments)
# initialize array to fill in with each trial's data
edginess, end_idx, angle_turned, _, _, prev_edginess, scaling_factor, _, trial_num, _, _, dist_to_SH, dist_to_other_SH = \
initialize_variable_edginess(number_of_trials, self, sub_experiments)
for shuffle_time in [False, True]:
angle_turned_all, x_pred_all, y_pred_all, angle_pred_all, time_pred_all, mean_pred_all = [], [], [], [], [], []
num_repeats = shuffle_time * 499 + 1 #* 19
num_repeats = shuffle_time * 19 + 1 # * 19
prediction_scores_all = []
for r in range(num_repeats):
trial_num = -1
# loop over each experiment and condition
for e, (experiment_real, condition_real) in enumerate(zip(sub_experiments, sub_conditions)):
# loop over each mouse
for i, mouse_real in enumerate(self.analysis[experiment_real][condition_real]['start time']):
if self.analysis_options['control'] and not mouse_real=='control': continue
if not self.analysis_options['control'] and mouse_real=='control': continue
# loop over each trial
prev_homings = []
t = 0
for trial_real in range(len(self.analysis[experiment_real][condition_real]['end time'][mouse_real])):
trial_num += 1
# impose conditions
if t > 2: continue
end_idx[trial_num] = self.analysis[experiment_real][condition_real]['end time'][mouse_real][trial_real]
if np.isnan(end_idx[trial_num]): continue
if (end_idx[trial_num] > escape_duration * fps): continue
# skip certain trials
y_start = self.analysis[experiment_real][condition_real]['path'][mouse_real][trial_real][1][0] * scaling_factor
x_start = self.analysis[experiment_real][condition_real]['path'][mouse_real][trial_real][0][0] * scaling_factor
if y_start > 25: continue
if abs(x_start-50) > 30: continue
# use different data if shuffle:
# if shuffle_time:
# experiment, condition, mouse, trial = mouse_trial_list[np.random.randint(len(mouse_trial_list))]
# else:
# experiment, condition, mouse, trial = experiment_real, condition_real, mouse_real, trial_real
''' just use real mouse '''
experiment, condition, mouse, trial = experiment_real, condition_real, mouse_real, trial_real
''' control ICs, real escape '''
# # get the angle turned during the escape
angle_turned[trial_num] = self.analysis[experiment_real][condition_real]['movement'][mouse_real][trial_real][2]
# angle_turned[trial_num] = abs(self.analysis[experiment_real][condition_real]['edginess'][mouse_real][trial_real])
# get the angle turned, delta x, delta y, and delta phi of previous homings
bout_start_angle = self.analysis[experiment_real][condition_real]['movement'][mouse_real][trial_real][1]
bout_start_position = self.analysis[experiment_real][condition_real]['movement'][mouse_real][trial_real][0]
start_time = self.analysis[experiment_real][condition_real]['start time'][mouse_real][trial_real]
# get initial conditions and endpoint quantities
IC_x = np.array(self.analysis[experiment][condition]['prev movements'][mouse][trial][0][-ETD:])
IC_y = np.array(self.analysis[experiment][condition]['prev movements'][mouse][trial][1][-ETD:])
IC_angle = np.array(self.analysis[experiment][condition]['prev movements'][mouse][trial][2][-ETD:])
IC_time = np.array(self.analysis[experiment][condition]['prev homings'][mouse][trial][3][-ETD:])
turn_angles = np.array(self.analysis[experiment][condition]['prev movements'][mouse][trial][3][-ETD:])
# MOE = 10
# x_edge_trial = self.analysis[experiment][condition]['x edge'][mouse][trial]
# SH_x = np.array(self.analysis[experiment][condition]['prev homings'][mouse][trial][0][-ETD:])
# if x_edge_trial > 50 and np.sum(SH_x > 25 + MOE):
# IC_x = IC_x[SH_x > 25 + MOE]
# IC_y = IC_y[SH_x > 25 + MOE]
# IC_angle = IC_angle[SH_x > 25 + MOE]
# IC_time = IC_time[SH_x > 25 + MOE]
# turn_angles = turn_angles[SH_x > 25 + MOE]
# elif np.sum(SH_x > 75 - MOE):
# IC_x = IC_x[SH_x > 75 - MOE]
# IC_y = IC_y[SH_x > 75 - MOE]
# IC_angle = IC_angle[SH_x > 75 - MOE]
# IC_time = IC_time[SH_x > 75 - MOE]
# turn_angles = turn_angles[SH_x > 75 - MOE]
if not shuffle_time: # gather previous movements
IC_x_all = np.concatenate((IC_x_all, IC_x))
IC_y_all = np.concatenate((IC_y_all, IC_y))
IC_angle_all = np.concatenate((IC_angle_all, IC_angle))
IC_time_all = np.concatenate((IC_time_all, IC_time))
turn_angles_all = np.concatenate((turn_angles_all, turn_angles))
else:
# sample randomly from these movements
random_idx = np.random.choice(len(IC_x_all), len(IC_x_all), replace = False)
IC_x = IC_x_all[random_idx]
IC_y = IC_y_all[random_idx]
IC_angle = IC_angle_all[random_idx]
IC_time = IC_time_all[random_idx]
turn_angles = turn_angles_all[random_idx]
# calculate difference in ICs
delta_x = abs( np.array(IC_x - bout_start_position[0]) )
delta_y = abs( np.array(IC_y - bout_start_position[1]) )
delta_angle = abs( np.array(IC_angle - bout_start_angle) )
delta_angle[delta_angle > 180] = 360 - delta_angle[delta_angle > 180]
delta_time = start_time - np.array(IC_time)
''' prediction data -- angle turned is a function of prev movement and ICs '''
x_weights = (1 / (delta_x+.0001)) / np.sum(1/(delta_x+.0001))
y_weights = (1 / (delta_y+.0001)) / np.sum(1 / (delta_y+.0001))
angle_weights = (1 / (delta_angle+.0001)) / np.sum(1 / (delta_angle+.0001))
time_weights = (1 / (delta_time+.0001)) / np.sum(1 / (delta_time+.0001))
x_pred[trial_num] = np.sum(turn_angles * x_weights)
y_pred[trial_num] = np.sum(turn_angles * y_weights)
angle_pred[trial_num] = np.sum(turn_angles * angle_weights)
time_pred[trial_num] = np.sum(turn_angles * time_weights) * 0
mean_pred[trial_num] = np.mean(turn_angles) * 0
# try mean pred is the *closest* angle to real
# x_pred[trial_num] = 0
# y_pred[trial_num] = 0
# angle_pred[trial_num] = 0
# time_pred[trial_num] = 0
# mean_pred[trial_num] = turn_angles[np.argmin( abs(turn_angles - angle_turned[trial_num]) )]
# ''' turn angle prediction to edginess prediction '''
if not shuffle_time:
edginess[trial_num] = abs(self.analysis[experiment][condition]['edginess'][mouse][trial])
initial_body_angle[trial_num] = self.analysis[experiment_real][condition_real]['movement'][mouse_real][trial_real][1]
initial_x[trial_num] = self.analysis[experiment_real][condition_real]['movement'][mouse_real][trial_real][0][0]
initial_y[trial_num] = self.analysis[experiment_real][condition_real]['movement'][mouse_real][trial_real][0][1]
x_edge[trial_num] = self.analysis[experiment][condition]['x edge'][mouse][trial_real]
# add mouse and trial to list of mice and trials
if not shuffle_time:
mouse_trial_list.append([experiment, condition, mouse, trial])
t+=1
''' concatenate??... '''
# angle_turned_all = np.concatenate((angle_turned_all, angle_turned))
#
# x_pred_all = np.concatenate((x_pred_all, x_pred))
# y_pred_all = np.concatenate((y_pred_all, y_pred))
# angle_pred_all = np.concatenate((angle_pred_all, angle_pred))
# time_pred_all = np.concatenate((time_pred_all, time_pred ))
# mean_pred_all = np.concatenate((mean_pred_all, mean_pred ))
#
#
# IC_angle_array = np.ones((len(angle_turned_all[~np.isnan(angle_turned_all)]), 5))
# angle_metrics = [x_pred_all[~np.isnan(angle_turned_all)], y_pred_all[~np.isnan(angle_turned_all)], angle_pred_all[~np.isnan(angle_turned_all)], \
# time_pred_all[~np.isnan(angle_turned_all)], mean_pred_all[~np.isnan(angle_turned_all)]]
# for i, angle_metric in enumerate(angle_metrics): #
# IC_angle_array[:, i] = angle_metric
#
# # get the data
# predict_data_y_all = [ angle_turned_all[~np.isnan(angle_turned_all)].reshape(-1, 1)] # for the movements input data
''' don't concatenate... '''
IC_angle_array = np.ones((len(angle_turned[~np.isnan(angle_turned)]), 5))
angle_metrics = [x_pred[~np.isnan(angle_turned)], y_pred[~np.isnan(angle_turned)],
angle_pred[~np.isnan(angle_turned)], \
time_pred[~np.isnan(angle_turned)], mean_pred[~np.isnan(angle_turned)]]
for i, angle_metric in enumerate(angle_metrics): #
IC_angle_array[:, i] = angle_metric
# get the data
predict_data_y_all_angle = [angle_turned[~np.isnan(angle_turned)].reshape(-1, 1)] # for the movements input data
predict_data_y_all_edgy = [edginess[~np.isnan(edginess)].reshape(-1, 1)] # for the movements input data
data_y_labels = ['angle']
predict_data_x_all = [IC_angle_array] # turn angles
predict_data_y_all = predict_data_y_all_angle # angles
''' predict edginess from turn angle '''
predict_edginess = True
if predict_edginess:
if not shuffle_time:
initial_body_angle = initial_body_angle[~np.isnan(initial_body_angle)].reshape(-1, 1)
initial_x = initial_x[~np.isnan(initial_x)].reshape(-1, 1)
initial_y = initial_y[~np.isnan(initial_y)].reshape(-1, 1)
x_edge = x_edge[~np.isnan(x_edge)].reshape(-1, 1)
# create the model
LR = linear_model.Ridge(alpha=.1)
# train the model
LR.fit(predict_data_x_all[0], predict_data_y_all_angle[0])
print(LR.score(predict_data_x_all[0], predict_data_y_all_angle[0]))
# get the model prediction
# model_prediction = LR.predict(predict_data_x_all[0])
model_prediction = predict_data_y_all_angle[0]
# predict body angles after turn
predicted_body_angle = initial_body_angle[~np.isnan(initial_body_angle)].reshape(-1, 1) - model_prediction
predicted_body_angle[predicted_body_angle >180] = predicted_body_angle[predicted_body_angle >180] - 360
predicted_body_angle[(predicted_body_angle > 0) * (predicted_body_angle < 90)] = -1 # super edgy to the right
predicted_body_angle[(predicted_body_angle > 0) * (predicted_body_angle > 90)] = 1 # super edgy to the right
# predict position at y = 40; set reasonable boundaries
x_at_40 = np.maximum(15 * np.ones_like(initial_x), np.minimum(90 * np.ones_like(initial_x),
initial_x - (40 - initial_y) / np.tan(np.deg2rad(predicted_body_angle)) ))
# get edginess
y_pos_end = 86.5; x_pos_end = 50; y_edge = 50
slope = (y_pos_end - initial_y) / (x_pos_end - (initial_x+.0001))
intercept = initial_y - initial_x * slope
distance_to_line = abs(40 - slope * x_at_40 - intercept) / np.sqrt((-slope) ** 2 + (1) ** 2)
homing_vector_at_center = (40 - intercept) / slope
# do line from starting position to edge position
slope = (y_edge - initial_y) / (x_edge - initial_x)
intercept = initial_y - initial_x * slope
distance_to_edge = abs(40 - slope * x_at_40 - intercept) / np.sqrt((-slope) ** 2 + (1) ** 2)
# compute the max possible deviation
edge_vector_at_center = (40 - intercept) / slope
line_to_edge_offset = abs(homing_vector_at_center - edge_vector_at_center) # + 5
# get index at center point (wall location)
# prev_edginess = np.maximum(np.zeros_like(distance_to_line), np.minimum(1.2*np.ones_like(distance_to_line),
# (distance_to_line - distance_to_edge + line_to_edge_offset) / (2 * line_to_edge_offset) ))
prev_edginess = abs((distance_to_line - distance_to_edge + line_to_edge_offset) / (2 * line_to_edge_offset))
predict_data_x_all = [prev_edginess] # predicted prev edginess #scipy.stats.zscore(
predict_data_y_all = predict_data_y_all_edgy # edginess
# edgy input colors
input_colors = [ [[0, .6, .4], [.5,.5,.5]], [[0, .6, .4], [.5,.5,.5]], [[.6, 0, .4], [.5,.5,.5]] ]
# split the data for cross val
num_trials = 1000 - 985 * shuffle_time #985
# loop acros angle prediction and traj prediction
for i, (fig, ax, predict_data_x) in enumerate(zip([fig1, fig2, fig3],[ax1, ax2, ax3], predict_data_x_all)):
# get prediction data
predict_data_y = predict_data_y_all[i]
# get color
color = input_colors[i][int(shuffle_time)]
# initialize prediction arrays
prediction_scores = np.zeros(num_trials)
for j in range(num_trials):
test_size = 0.5
# test_size = 0.25
# if shuffle_time: test_size = 0.25
# get x-val set
X_train, X_test, y_train, y_test = train_test_split(predict_data_x, \
predict_data_y, test_size=test_size, random_state=j)
# create the model
LR = linear_model.Ridge(alpha = .1) # .15, .5
# train the model
LR.fit(X_train, y_train)
# get the score
prediction_scores[j] = LR.score(X_test, y_test)
# exclude super negative ones
# prediction_scores = prediction_scores[prediction_scores > np.percentile(prediction_scores, 10)]
# put into larger array
prediction_scores_all = np.concatenate((prediction_scores_all, prediction_scores))
print(np.median(prediction_scores_all))
# exclude super negative ones
# prediction_scores_all = prediction_scores_all[prediction_scores_all > np.percentile(prediction_scores_all, 5)]
#do kde
kde = fit_kde(prediction_scores_all, bw=.03) # .04
plot_kde(ax, kde, prediction_scores_all, z = 0, vertical=False, color=color, violin=False, clip=False) # True)
#plt.show()
fig.savefig(os.path.join(self.summary_plots_folder,'Predictions of ' + data_y_labels[i] + ' - ' + self.labels[c] + '.png'), format='png')
fig.savefig(os.path.join(self.summary_plots_folder,'Predictions of ' + data_y_labels[i] + ' - ' + self.labels[c] + '.eps'), format='eps')
plt.show()
print('hi')
else:
'''
PREDICTION PLOTS EDGINESS OR BY **EXPLORATION**
'''
fps = 30
escape_duration = 12
ETD = 10 #4
traj_loc = 40
# mean_types = ['even', 'space', 'angle'] #, 'time', 'shelter time']
mean_types = ['space', 'angle', 'shelter time'] #, 'escape']
mean_type = 'even'
mean_colors = [[0, .6, .4], [0, .6, .8], [0, .6, .8], [.4, 0, 1] ]
mean_colors = [[0, .6, .4], [.4, 0, .8], [0, .6, .8], [.5, .5, .5]]
# initialize figures
fig1, ax1, fig2, ax2, fig3, ax3 = initialize_figures_prediction(self)
for m, mean_type in enumerate(mean_types):
# loop over experiments and conditions
for c, (experiment, condition) in enumerate(zip(self.experiments, self.conditions)):
# extract experiments from nested list
sub_experiments, sub_conditions = extract_experiments(experiment, condition)
# get the number of trials
number_of_trials = get_number_of_trials(sub_experiments, sub_conditions, self.analysis)
number_of_mice = get_number_of_mice(sub_experiments, sub_conditions, self.analysis)
mouse_trial_list = []
# initialize array to fill in with each trial's data
edginess, end_idx, angle_turned, _, _, prev_edginess, scaling_factor, _, trial_num, prev_movement_and_ICs, data_y_for_prev_movement, dist_to_SH, dist_to_other_SH = \
initialize_variable_edginess(number_of_trials, self, sub_experiments)
# initialize array to fill in with each trial's data
efficiency, efficiency_RT, end_idx, num_prev_homings_EV, num_prev_homings_front_EV, num_prev_homings_other_EV, num_prev_homings_HV, time_exploring_pre, time_exploring_post, distance_exploring_pre, distance_exploring_post, time_exploring_obstacle_pre, \
time_exploring_obstacle_post, time_exploring_far_pre, time_exploring_far_post, time_exploring_edge, time_exploring_other_edge, scaling_factor, time, trial_num, trials, edginess, prev_edginess, dist_to_SH, dist_to_other_SH, RT_all, avg_speed, _ = \
initialize_variables_efficiency(number_of_trials, self, sub_experiments)
for shuffle_time in [False]:
num_repeats = shuffle_time * 19 + 1
for r in range(num_repeats):
trial_num = -1
# loop over each experiment and condition
for e, (experiment_real, condition_real) in enumerate(zip(sub_experiments, sub_conditions)):
# loop over each mouse
for i, mouse_real in enumerate(self.analysis[experiment_real][condition_real]['start time']):
if self.analysis_options['control'] and not mouse_real=='control': continue
if not self.analysis_options['control'] and mouse_real=='control': continue
# loop over each trial
prev_homings = []
t = 0
for trial_real in range(len(self.analysis[experiment_real][condition_real]['end time'][mouse_real])):
trial_num += 1
# impose conditions
if t > 2: continue
end_idx[trial_num] = self.analysis[experiment_real][condition_real]['end time'][mouse_real][trial_real]
if np.isnan(end_idx[trial_num]): continue
if (end_idx[trial_num] > escape_duration * fps): continue
# skip certain trials
y_start = self.analysis[experiment_real][condition_real]['path'][mouse_real][trial_real][1][0] * scaling_factor
x_start = self.analysis[experiment_real][condition_real]['path'][mouse_real][trial_real][0][0] * scaling_factor
if y_start > 25: continue
if abs(x_start-50) > 30: continue
# use different data if shuffle:
if shuffle_time:
experiment, condition, mouse, trial = mouse_trial_list[np.random.randint(len(mouse_trial_list))]
else:
experiment, condition, mouse, trial = experiment_real, condition_real, mouse_real, trial_real
# just add real data for edginess etc
if not shuffle_time:
# add data
edginess[trial_num] = abs(self.analysis[experiment][condition]['edginess'][mouse][trial])
# get previous edginess
time_to_shelter, SR = get_prev_edginess(ETD, condition_real, experiment_real, mouse_real, prev_edginess, dist_to_SH,
dist_to_other_SH, scaling_factor, self, traj_loc, trial_real, trial_num, edginess,
[], [], mean = mean_type, get_initial_conditions=True)
# _, _, prev_edginess_all, elig_idx = get_all_prev_edginess(ETD, condition, experiment, mouse, prev_edginess, dist_to_SH, dist_to_other_SH, scaling_factor, self, traj_loc, trial, trial_num, edginess, [], [])
# add data
fill_in_trial_data_efficiency(ETD, condition, efficiency, efficiency_RT, experiment, mouse, num_prev_homings_EV,
num_prev_homings_front_EV, num_prev_homings_other_EV,
num_prev_homings_HV,
time_exploring_pre, time_exploring_post, distance_exploring_pre, distance_exploring_post,
time_exploring_obstacle_pre,
time_exploring_obstacle_post, time_exploring_far_pre, time_exploring_far_post, time_exploring_edge,
time_exploring_other_edge,
self, time, trial, trial_num, trials, edginess, t)
# add mouse and trial to list of mice and trials
if not shuffle_time:
mouse_trial_list.append([experiment, condition, mouse, trial])
t+=1
# format mean prior trajectory
if not shuffle_time:
prev_edginess = prev_edginess[~np.isnan(edginess)]
exploration_array = np.ones((len(edginess[~np.isnan(edginess)]), 2))
exploration_metrics = [time_exploring_far_pre[~np.isnan(edginess)], time_exploring_far_post[~np.isnan(edginess)]]
for i, exploration_metric in enumerate(exploration_metrics): #
exploration_array[:, i] = exploration_metric
if shuffle_time: # regress out other variable
m = (((np.mean(prev_edginess) * np.mean(exploration_array[:, i])) - np.mean(prev_edginess * exploration_array[:, i])) /
((np.mean(prev_edginess) ** 2) - np.mean(prev_edginess ** 2)))
regressed_data = exploration_array[:, i] - prev_edginess * m
exploration_array[:, i] = regressed_data
if shuffle_time: # regress out exploration from mean prior traj
for exploration_metric in exploration_metrics:
m = (((np.mean(exploration_metric) * np.mean(prev_edginess)) - np.mean(exploration_metric * prev_edginess)) /
((np.mean(exploration_metric) ** 2) - np.mean(exploration_metric ** 2)))
regressed_data = prev_edginess - exploration_array[:, 0] * m
prev_edginess = regressed_data
# get the data
predict_data_y_all = [ edginess[~np.isnan(edginess)].reshape(-1, 1), # for the EXPLORATION input data
edginess[~np.isnan(edginess)].reshape(-1, 1)] # for the mean edginess input data
# turn_angle_for_prev_movement ] # for the movements input data
data_y_labels = ['exploration','trajectory'] #, 'angle']
predict_data_x_all = [exploration_array, # exploration data
prev_edginess.reshape(-1, 1)]#, # mean prev edginess
# prev_movements_and_ICs_array] # all prev homing movements
# edgy input colors
input_colors = [ [[0, .6, .4], [.5,.5,.5]], [[0, .6, .4], [.5,.5,.5]], [[.6, 0, .4], [.5,.5,.5]] ]
# split the data for cross val
num_trials = 1000
# loop acros angle prediction and traj prediction
for i, (fig, ax, predict_data_x) in enumerate(zip([fig1, fig2, fig3],[ax1, ax2, ax3], predict_data_x_all)):
# get prediction data
predict_data_y = predict_data_y_all[i]
# get color
color = input_colors[i][int(shuffle_time)]
# color = mean_colors[m]
# initialize prediction arrays
prediction_scores = np.zeros(num_trials)
for j in range(num_trials):
test_size = 0.5
if shuffle_time and i==2:
test_size = .025
# get x-val set
X_train, X_test, y_train, y_test = train_test_split(predict_data_x, \
predict_data_y, test_size=test_size, random_state=j)
# create the model
# LR = linear_model.LinearRegression()
# if i:
# LR = linear_model.LogisticRegression()
# else:
LR = linear_model.Ridge(alpha = .1) # .15, .5
# train the model
# try:
LR.fit(X_train, y_train)
# except:
# print('i=h')
# print(LR.coef_)
# get the score
prediction_scores[j] = LR.score(X_test, y_test)
print(data_y_labels[i])
print(np.median(prediction_scores))
# exclude super negative ones
prediction_scores = prediction_scores[prediction_scores > np.percentile(prediction_scores, 10)]
# plot the scores
# ax.scatter(prediction_scores, np.zeros_like(prediction_scores), color=color, s=20, alpha = .1)
#do kde
kde = fit_kde(prediction_scores, bw=.04) # .04
plot_kde(ax, kde, prediction_scores, z = 0, vertical=False, color=color, violin=False, clip=False) # True)
fig.savefig(os.path.join(self.summary_plots_folder,'Prediction of ' + data_y_labels[i] + ' - ' + self.labels[c] + '.png'), format='png')
fig.savefig(os.path.join(self.summary_plots_folder,'Precition of ' + data_y_labels[i] + ' - ' + self.labels[c] + '.eps'), format='eps')
plt.show()
print('hi')
# # get the correlation
# r, p = scipy.stats.pearsonr(exploration_array[:, 0], edginess)
# print('r = ' + str(np.round(r, 3)) + '\np = ' + str(np.round(p, 3)))
#
# m = (((np.mean(prev_edginess) * np.mean(exploration_array[:, 0])) - np.mean(prev_edginess * exploration_array[:, 0])) /
# ((np.mean(prev_edginess) ** 2) - np.mean(prev_edginess ** 2)))
#
# regressed_data = exploration_array[:, 0] - prev_edginess * m
# r, p = scipy.stats.pearsonr(prev_edginess, regressed_data)
# print('r = ' + str(np.round(r, 3)) + '\np = ' + str(np.round(p, 3)))
#
# # get the correlation after regressing out prev edginess
# r, p = scipy.stats.pearsonr(regressed_data, edginess)
# print('r = ' + str(np.round(r, 3)) + '\n= ' + str(np.round(p, 3)))
# #
# def plot_efficiency(self):
# # initialize parameters
# fps = 30
# traj_loc = 40
# escape_duration = 12 # 12 #6
# HV_cutoff = .681
# ETD = 10
# # ax2, fig2, ax3, fig3 = initialize_figures_efficiency(self)
# efficiency_data = [[], [], [], []]
# duration_data = [[], [], [], []]
# # initialize arrays for stats
# efficiency_data_all = []
# duration_data_all = []
# prev_homings_data_all = []
# all_conditions = []
# mouse_ID = [];
# m = 1
# data_condition = ['naive', 'experienced']
# # data_condition = ['food','escape']
# # data_condition = ['OR - EV', 'OR - HV', 'OF']
# fig1, ax1 = plt.subplots(figsize=(13, 5))
#
# colors = [[1,0,0],[0,0,0]]
# kde_colors = [ [1, .4, .4], [.75, .75, .75]]
#
# # loop over experiments and conditions
# for c, (experiment, condition) in enumerate(zip(self.experiments, self.conditions)):
# # extract experiments from nested list
# sub_experiments, sub_conditions = extract_experiments(experiment, condition)
# # get the number of trials
# number_of_trials = get_number_of_trials(sub_experiments, sub_conditions, self.analysis)
# number_of_mice = get_number_of_mice(sub_experiments, sub_conditions, self.analysis)
# # initialize array to fill in with each trial's data
# efficiency, efficiency_RT, end_idx, num_prev_homings_EV, num_prev_homings_other_EV, num_prev_homings_HV, time_exploring, distance_exploring, time_exploring_obstacle, time_exploring_far, \
# scaling_factor, time, trial_num, trials, edginess, prev_edginess, dist_to_SH, dist_to_other_SH, RT_all, avg_speed, _ = \
# initialize_variables_efficiency(number_of_trials, self, sub_experiments)
# # loop over each experiment and condition
# for e, (experiment, condition) in enumerate(zip(sub_experiments, sub_conditions)):
# if 'void' in experiment or 'dark' in experiment:
# escape_duration = 12
# # loop over each mouse
# for i, mouse in enumerate(self.analysis[experiment][condition]['full path length']):
# # initialize arrays for stats
# efficiency_data_mouse = []
# duration_data_mouse = []
# prev_homings_data_mouse = []
# # control analysis
# if self.analysis_options['control'] and not mouse == 'control': continue
# if not self.analysis_options['control'] and mouse == 'control': continue
# # loop over each trial
# t = 0
# for trial in range(len(self.analysis[experiment][condition]['end time'][mouse])):
#
# trial_num += 1
# if t > 2 and not 'food' in experiment and not 'void' in experiment: continue
#
# if t > 8: continue
# # print(t)
# # impose coniditions
# end_idx[trial_num] = self.analysis[experiment][condition]['end time'][mouse][trial]
# if (end_idx[trial_num] > escape_duration * fps) or np.isnan(end_idx[trial_num]): continue
# # skip certain trials
# y_start = self.analysis[experiment][condition]['path'][mouse][trial][1][0] * scaling_factor
# x_start = self.analysis[experiment][condition]['path'][mouse][trial][0][0] * scaling_factor
# if y_start > 25: continue
# if abs(x_start - 50) > 25: continue # 25
#
# # get prev edginess
# _, _ = get_prev_edginess(ETD, condition, experiment, mouse, prev_edginess, dist_to_SH, dist_to_other_SH,
# scaling_factor, self, traj_loc, trial, trial_num, edginess, [], [])
#
# # only do predict edgy:
# # if c == 0:
# # if prev_edginess[trial_num] <= HV_cutoff and 'down' in experiment: continue
# # elif c == 1:
# # if prev_edginess[trial_num] > HV_cutoff and 'down' in experiment: continue
#
# # add data
# fill_in_trial_data_efficiency(ETD, condition, efficiency, efficiency_RT, experiment, mouse, num_prev_homings_EV, num_prev_homings_other_EV, num_prev_homings_HV,
# time_exploring, distance_exploring, time_exploring_obstacle, time_exploring_far,
# self, time, trial, trial_num, trials, edginess, t)
#
# # normalize end idx to
# RT = self.analysis[experiment][condition]['RT'][mouse][trial]
# if not RT:
# print(RT)
# continue
# RT_all[trial_num] = RT
#
# avg_speed[trial_num] = self.analysis[experiment][condition]['RT path length'][mouse][trial] * scaling_factor / (
# (end_idx[trial_num] - RT) / fps)
# # avg_speed[trial_num] = self.analysis[experiment][condition]['full path length'][mouse][trial] * scaling_factor / (end_idx[trial_num] / fps)
#
# end_idx[trial_num] = (end_idx[trial_num] / fps) / self.analysis[experiment][condition]['optimal path length'][mouse][
# trial] / scaling_factor * 100
#
# # add data for stats
# efficiency_data_mouse.append(efficiency[trial_num])
# # duration_data_mouse.append(end_idx[trial_num]) #TEMP COMMENTING
# duration_data_mouse.append(RT)
# prev_homings_data_mouse.append(num_prev_homings_EV[trial_num])
#
# t += 1
#
# # append data for stats
# if efficiency_data_mouse:
# efficiency_data_all.append(efficiency_data_mouse)
# duration_data_all.append(duration_data_mouse)
# prev_homings_data_all.append(prev_homings_data_mouse)
# all_conditions.append(data_condition[c])
# mouse_ID.append(m);
# m += 1
#
# # format end ind
# # end_idx = np.array([e/30 for e in end_idx])
# end_idx[np.isnan(efficiency)] = np.nan
# # loop over data to plot
# for i, (data, data_label) in enumerate(zip([efficiency_RT, end_idx, RT_all, avg_speed, edginess],
# ['Efficiency'])): # , 'Duration', 'Reaction Time', 'Speed', 'Trajectory'])): #edginess, 'Trajectory',
# # for i, (data, data_label) in enumerate(zip([edginess], ['Trajectory'])): # edginess, 'Trajectory',
#
# # for i, (data, data_label) in enumerate(zip([edginess, efficiency, end_idx], ['Trajectory', 'Efficiency', 'Duration'])):
# # for x_data, x_data_label in zip([num_prev_homings], ['Prior homings']):
# plot_data = data[~np.isnan(data)]
#
# # for x_data, x_data_label in zip([trials, time, num_prev_homings_EV, num_prev_homings_HV, prev_edginess, time_exploring, distance_exploring, time_exploring_far, time_exploring_obstacle],
# # ['Trials', 'Time', 'Edge vector homings', 'Homing vector homings', 'Mean prior trajectory','Time exploring', 'Distance explored', 'Time exploring far side', 'Time exploring obstacle']):
#
# for x_data, x_data_label in zip([trials, time_exploring], ['trial number']): # , 'Time exploring']):
#
# print('\nCorrelation between ' + data_label + ' and ' + x_data_label)
#
# # only plot escapes
# data_for_box_plot = data[~np.isnan(data)]
# print(len(data_for_box_plot))
# x_data = x_data[~np.isnan(data)]
#
# # get the correlation
# r, p = scipy.stats.pearsonr(x_data, data_for_box_plot)
# print('r = ' + str(np.round(r, 3)) + '\np = ' + str(np.round(p, 3)))
#
# # initialize figure
# plt.title(data_label + ' x ' + x_data_label)
# # set up the figure
# # if data_label=='Efficiency': ax1.set_ylim([-.03, 1.03])
# # elif data_label=='Duration': ax1.set_ylim([-.1, 7])
#
# if np.max(x_data) < 5:
# ax1.set_xticks(np.unique(x_data).astype(int))
# else:
# ax1.set_xticks(np.arange(5, 25, 5))
# # ax1.set_xlim([5,20])
#
# # jitter the axis
# scatter_axis = scatter_the_axis_efficiency(plot_data, x_data + c/3 - .2)
# # plot each trial
# ax1.scatter(scatter_axis, plot_data, color=colors[c], s=15, alpha=1, edgecolor=colors[c], linewidth=1)
#
# for x in np.unique(x_data):
# # plot kde
# kde = fit_kde(plot_data[x_data==x], bw=.02) #.2) # .04
# plot_kde(ax1, kde, plot_data[x_data==x], z=x + c/3 - .15, vertical=True, normto=.15, color=kde_colors[c], violin=False, clip=True)
#
# # box and whisker
# bp = ax1.boxplot([plot_data[x_data==x], [0, 0]], positions=[x + c / 3 - .2, -10], showfliers=False, widths = [0.05, .05], zorder=99)
# plt.setp(bp['boxes'], color=[.5, .5, .5], linewidth=2)
# plt.setp(bp['whiskers'], color=[.5, .5, .5], linewidth=2)
# plt.setp(bp['medians'], linewidth=2)
# ax1.set_xlim(.25, 3.75)
# ax1.set_ylim(.5, 1.05)
# # ax1.set_ylim(.95, 1.9)
# ax1.set_xticks([1,2,3])
# ax1.set_xticklabels([1,2,3])
#
#
#
# # # for each trial
# # for x in np.unique(x_data):
# # # plot kde
# # kde = fit_kde(plot_data[x_data>=0], bw=.02) #.2) # .04
# # plot_kde(ax1, kde, plot_data[x_data>=0], z=x + c/3 - .15, vertical=True, normto=.15, color=kde_colors[c], violin=False, clip=True)
# #
# # # box and whisker
# # bp = ax1.boxplot([plot_data[x_data>=0], [0, 0]], positions=[x + c / 3 - .2, -10], showfliers=False, widths = [0.05, .05], zorder=99)
# # plt.setp(bp['boxes'], color=[.5, .5, .5], linewidth=2)
# # plt.setp(bp['whiskers'], color=[.5, .5, .5], linewidth=2)
# # plt.setp(bp['medians'], linewidth=2)
# # ax1.set_xlim(.25, 3.75)
# # ax1.set_ylim(.5, 1.05)
# # # ax1.set_ylim(.95, 1.9)
# # ax1.set_xticks([1,2,3])
# # ax1.set_xticklabels([1,2,3])
# #
# # # jitter the axis
# # scatter_axis = scatter_the_axis_efficiency(plot_data, np.ones_like(plot_data) * (x + c/3 - .2))
# # # plot each trial
# # ax1.scatter(scatter_axis, plot_data, color=colors[c], s=15, alpha=1, edgecolor=colors[c], linewidth=1)
#
#
#
# ax1.plot([-1, 4], [1, 1], linestyle='--', color=[.5, .5, .5, .5])
# # save the plot
# plt.savefig(os.path.join(self.summary_plots_folder, data_label + ' by ' + x_data_label + ' - ' + self.labels[c] + '.png'), format='png')
# plt.savefig(os.path.join(self.summary_plots_folder, data_label + ' by ' + x_data_label + ' - ' + self.labels[c] + '.eps'), format='eps')
#
# plt.show()
# print('done')
#
#
#
def plot_efficiency(self):
# initialize parameters
fps = 30
traj_loc = 40
escape_duration = 12 #12 #6
HV_cutoff = .681
ETD = 10
# ax2, fig2, ax3, fig3 = initialize_figures_efficiency(self)
efficiency_data = [[],[],[],[]]
duration_data = [[],[],[],[]]
# initialize arrays for stats
efficiency_data_all = []
duration_data_all = []
prev_homings_data_all = []
all_conditions = []
mouse_ID = []; m = 1
# data_condition = ['naive','experienced']
data_condition = ['escape', 'food']
# data_condition = ['OR - EV', 'OR - HV', 'OF']
# data_condition = ['Obstacle removed (no shelter)', 'obstacle removed', 'acute OR', 'obstacle']
colors = [[0,0,0],[1,0,0]]
#
plot_stuff = True
do_traversals = False
# loop over experiments and conditions
for c, (experiment, condition) in enumerate(zip(self.experiments, self.conditions)):
print(' - - - -- - - - -- - - - - - - -- - - - - - - - - -')
# extract experiments from nested list
sub_experiments, sub_conditions = extract_experiments(experiment, condition)
# get the number of trials
number_of_trials = get_number_of_trials(sub_experiments, sub_conditions, self.analysis)
number_of_mice = get_number_of_mice(sub_experiments, sub_conditions, self.analysis)
# initialize array to fill in with each trial's data
efficiency, efficiency_RT, end_idx, num_prev_homings_EV, num_prev_homings_front_EV, num_prev_homings_other_EV, num_prev_homings_HV, time_exploring_pre, time_exploring_post, distance_exploring_pre, distance_exploring_post, time_exploring_obstacle_pre,\
time_exploring_obstacle_post,time_exploring_far_pre,time_exploring_far_post, time_exploring_edge, time_exploring_other_edge, scaling_factor, time, trial_num, trials, edginess, prev_edginess, dist_to_SH, dist_to_other_SH, RT_all, avg_speed, _ = \
initialize_variables_efficiency(number_of_trials, self, sub_experiments)
# loop over each experiment and condition
for e, (experiment, condition) in enumerate(zip(sub_experiments, sub_conditions)):
if 'void' in experiment or 'dark' in experiment:
escape_duration = 12
if 'food' in experiment: escape_duration = 9
# else:escape_duration = 9
# loop over each mouse
for i, mouse in enumerate(self.analysis[experiment][condition]['start time']):
print(mouse)
# initialize arrays for stats
efficiency_data_mouse = []
duration_data_mouse = []
prev_homings_data_mouse = []
# control analysis
if self.analysis_options['control'] and not mouse=='control': continue
if not self.analysis_options['control'] and mouse=='control': continue
# loop over each trial
t = 0
for trial in range(len(self.analysis[experiment][condition]['end time'][mouse])):
trial_num += 1
if t > 2 and not 'food' in experiment and not 'void' in experiment and not 'dark' in experiment: continue
if 'food' in experiment and condition == 'no obstacle' and self.analysis[experiment][condition]['start time'][mouse][trial] < 20: continue
if t > 8: continue
# if t > 2: continue
# if 'on off' in experiment and trial: continue
# print(t)
# impose coniditions
end_idx[trial_num] = self.analysis[experiment][condition]['end time'][mouse][trial]
if (end_idx[trial_num] > escape_duration * fps) or np.isnan(end_idx[trial_num]): continue
# skip certain trials
y_start = self.analysis[experiment][condition]['path'][mouse][trial][1][0] * scaling_factor
x_start = self.analysis[experiment][condition]['path'][mouse][trial][0][0] * scaling_factor
if y_start > 25: continue
if abs(x_start-50) > 30: continue #25
# get prev edginess
_, _ = get_prev_edginess(ETD, condition, experiment, mouse, prev_edginess, dist_to_SH, dist_to_other_SH,
scaling_factor, self, traj_loc, trial, trial_num, edginess, [], [])
# only do predict edgy:
# if c == 0:
# if prev_edginess[trial_num] <= HV_cutoff and 'down' in experiment: continue
# elif c == 1:
# if prev_edginess[trial_num] > HV_cutoff and 'down' in experiment: continue
# add data
fill_in_trial_data_efficiency(ETD, condition, efficiency, efficiency_RT, experiment, mouse, num_prev_homings_EV,num_prev_homings_front_EV, num_prev_homings_other_EV,num_prev_homings_HV,
time_exploring_pre, time_exploring_post, distance_exploring_pre, distance_exploring_post, time_exploring_obstacle_pre,
time_exploring_obstacle_post, time_exploring_far_pre, time_exploring_far_post, time_exploring_edge, time_exploring_other_edge,
self, time, trial, trial_num, trials, edginess, t)
# if edginess[trial_num] < HV_cutoff: continue
if do_traversals:
traversal = self.analysis[experiment][condition]['back traversal'][mouse]
# get the duration of those paths
# duration = traversal[t*5+3]
if traversal:
x_edge = self.analysis[experiment][condition]['x edge'][mouse][trial]
# if x_edge==25: x_edge = 75
# else: x_edge = 25
spont_edge = []
for trav in traversal[0 * 5 + 0]:
spont_edge.append(trav[0][-1]*scaling_factor)
esc_edge = []
for trav in traversal[1 * 5 + 0]:
esc_edge.append(trav[0][-1]*scaling_factor)
num_prev_homings_EV[trial_num] = np.sum((np.array(traversal[0 * 5 + 3]) < 1.5) * (abs(np.array(spont_edge)-x_edge) < 25) * \
(np.array(traversal[0 * 5 + 2]) > HV_cutoff) * \
(np.array(traversal[0 * 5 + 1]) < self.analysis[experiment][condition]['start time'][mouse][trial] * 30 * 60) * \
(np.array(traversal[0 * 5 + 1]) > (self.analysis[experiment][condition]['start time'][mouse][trial]-(15+20*('void' in experiment))) * 30 * 60)) + \
np.sum((np.array(traversal[1 * 5 + 3]) < 1.5) * (abs(np.array(esc_edge)-x_edge) < 25) * \
(np.array(traversal[1 * 5 + 2]) > HV_cutoff) * \
(np.array(traversal[1 * 5 + 1]) < self.analysis[experiment][condition]['start time'][mouse][trial] * 30 * 60) * \
(np.array(traversal[1 * 5 + 1]) > (self.analysis[experiment][condition]['start time'][mouse][trial] - (15+20*('void' in experiment))) * 30 * 60))
num_prev_homings_HV[trial_num] = np.sum((np.array(traversal[0 * 5 + 3]) < 1.5) * (abs(np.array(spont_edge)-x_edge) < 25) * \
(np.array(traversal[0 * 5 + 2]) < HV_cutoff) * \
(np.array(traversal[0 * 5 + 1]) < self.analysis[experiment][condition]['start time'][mouse][trial] * 30 * 60) * \
(np.array(traversal[0 * 5 + 1]) > (self.analysis[experiment][condition]['start time'][mouse][trial]-(15+20*('void' in experiment))) * 30 * 60)) + \
np.sum((np.array(traversal[1 * 5 + 3]) < 1.5) * (abs(np.array(esc_edge)-x_edge) < 25) * \
(np.array(traversal[1 * 5 + 2]) < HV_cutoff) * \
(np.array(traversal[1 * 5 + 1]) < self.analysis[experiment][condition]['start time'][mouse][trial] * 30 * 60) * \
(np.array(traversal[1 * 5 + 1]) > (self.analysis[experiment][condition]['start time'][mouse][trial] - (15+20*('void' in experiment))) * 30 * 60))
eligible_homings = ~((np.array(traversal[0 * 5 + 2]) > HV_cutoff) * (abs(np.array(spont_edge)-x_edge) > 40)) * (np.array(traversal[0 * 5 + 3]) < 3) * \
(np.array(traversal[0 * 5 + 1]) < self.analysis[experiment][condition]['start time'][mouse][trial] * 30 * 60) * \
(np.array(traversal[0 * 5 + 1]) > (self.analysis[experiment][condition]['start time'][mouse][trial] - 15) * 30 * 60)
if np.sum(eligible_homings):
mean_homing = np.mean(np.array(traversal[0 * 5 + 2])[eligible_homings])
else: mean_homing = 0
eligible_escapes = ~((np.array(traversal[1 * 5 + 2]) > HV_cutoff) * (abs(np.array(esc_edge) - x_edge) > 40)) * (np.array(traversal[1 * 5 + 3]) < 3) * \
(np.array(traversal[1 * 5 + 1]) < self.analysis[experiment][condition]['start time'][mouse][trial] * 30 * 60) * \
(np.array(traversal[1 * 5 + 1]) > (self.analysis[experiment][condition]['start time'][mouse][trial] - 15) * 30 * 60)
if np.sum(eligible_escapes):
mean_escape = np.mean(np.array(traversal[1 * 5 + 2])[eligible_escapes])
else: mean_escape = 0
prev_edginess[trial_num] = ( mean_homing * np.sum(eligible_homings) + mean_escape * np.sum(eligible_escapes) ) / \
(np.sum(eligible_homings) + np.sum(eligible_escapes))
else:
num_prev_homings_EV[trial_num] = 0
# prev_edginess[trial_num] = 0
if np.isnan(prev_edginess[trial_num]):
prev_edginess[trial_num] = 0
traversal = self.analysis[experiment][condition]['front traversal'][mouse]
# get the duration of those paths
# duration = traversal[t*5+3]
if traversal:
x_edge = self.analysis[experiment][condition]['x edge'][mouse][trial]
spont_edge = []
for trav in traversal[0 * 5 + 0]:
spont_edge.append(trav[0][-1]*scaling_factor)
esc_edge = []
for trav in traversal[1 * 5 + 0]:
esc_edge.append(trav[0][-1]*scaling_factor)
num_prev_homings_other_EV[trial_num] = np.sum((np.array(traversal[0 * 5 + 3]) < 1.5) * (abs(np.array(spont_edge)-x_edge) < 25) * \
(np.array(traversal[0 * 5 + 2]) > HV_cutoff) * \
(np.array(traversal[0 * 5 + 1]) < self.analysis[experiment][condition]['start time'][mouse][trial] * 30 * 60))
else:
num_prev_homings_other_EV[trial_num] = 0
# print(mouse)
# print(trial + 1)
# print(num_prev_homings_EV[trial_num])
# print(num_prev_homings_other_EV[trial_num])
# print(edginess[trial_num])
# print('')
# normalize end idx to
RT = self.analysis[experiment][condition]['RT'][mouse][trial]
# if not RT:
# print(RT)
# continue
RT_all[trial_num] = RT
avg_speed[trial_num] = self.analysis[experiment][condition]['RT path length'][mouse][trial] * scaling_factor / ((end_idx[trial_num] - RT) / fps)
# avg_speed[trial_num] = self.analysis[experiment][condition]['full path length'][mouse][trial] * scaling_factor / (end_idx[trial_num] / fps)
time[trial_num] = self.analysis[experiment][condition]['start time'][mouse][trial]
time[trial_num] = (end_idx[trial_num] / fps) / self.analysis[experiment][condition]['optimal path length'][mouse][trial] / scaling_factor * 100
time[trial_num] = abs(50 - x_start)
end_idx[trial_num] = (end_idx[trial_num] / fps - RT) / self.analysis[experiment][condition]['optimal RT path length'][mouse][trial] / scaling_factor * 100
# add data for stats
efficiency_data_mouse.append([efficiency_RT[trial_num], trial])
duration_data_mouse.append([end_idx[trial_num], trial]) #TEMP COMMENTING #RT
# duration_data_mouse.append(num_prev_homings_EV[trial_num])
prev_homings_data_mouse.append(num_prev_homings_EV[trial_num])
t += 1
# print(trial+1)
#append data for stats
if efficiency_data_mouse:
efficiency_data_all.append(efficiency_data_mouse)
duration_data_all.append(duration_data_mouse)
prev_homings_data_all.append(prev_homings_data_mouse)
all_conditions.append(data_condition[c])
mouse_ID.append(m); m+= 1
# format end ind
# end_idx = np.array([e/30 for e in end_idx])
end_idx[np.isnan(efficiency)] = np.nan
# loop over data to plot
# for i, (data, data_label) in enumerate(zip([edginess, efficiency_RT, end_idx, RT_all, avg_speed], ['Trajectory'])): #,'Efficiency', 'Duration', 'Reaction Time', 'Speed', 'Trajectory'])): #edginess, 'Trajectory',
# for i, (data, data_label) in enumerate(zip([edginess], ['Trajectory'])):
for i, (data, data_label) in enumerate(zip([end_idx], ['RT duration', 'RT duration', 'Efficiency', 'RT'])): # time, , efficiency_RT, RT_all
# for i, (data, data_label) in enumerate(zip([RT_all], ['Reaction time'])):
# for i, (data, data_label) in enumerate(zip([edginess, efficiency, end_idx], ['Trajectory', 'Efficiency', 'Duration'])):
# for x_data, x_data_label in zip([num_prev_homings], ['Prior homings']):
plot_data = data[~np.isnan(data)]
if False or True:
# for x_data, x_data_label in zip([trials, time, num_prev_homings_EV, num_prev_homings_other_EV, num_prev_homings_HV, prev_edginess, time_exploring, distance_exploring, time_exploring_far, time_exploring_obstacle, time_exploring_edge, time_exploring_other_edge],
# ['Trials', 'Time', 'Edge vector homings','Other edge vector homings', 'Homing vector homings', 'Mean prior trajectory','Time exploring', 'Distance explored', 'Time exploring far side', 'Time exploring obstacle', 'Time exploring edge', 'Time exploring other edge']):
# for x_data, x_data_label in zip([trials, time, time_exploring_pre, distance_exploring_pre, time_exploring_post, distance_exploring_post,
# time_exploring_far_pre,time_exploring_far_post, time_exploring_obstacle_pre, time_exploring_obstacle_post, time_exploring_other_edge, time_exploring_edge],
# ['Trials', 'Time', 'Time exploring (pre)', 'Distance explored (pre)', 'Time exploring (post)', 'Distance explored (post)',
# 'Time exploring far side (pre)', 'Time exploring far side (post)', 'Time exploring obstacle (pre)', 'Time exploring obstacle (post)',
# 'Time exploring other edge (pre)', 'Time exploring edge (pre)']):
# num_homings_combined = (num_prev_homings_EV>0).astype(int) - (num_prev_homings_HV>0).astype(int)
# num_homings_combined[num_prev_homings_EV==0] = -1
#
# for x_data, x_data_label in zip([time, num_prev_homings_EV>0, num_prev_homings_EV, num_prev_homings_other_EV, num_prev_homings_other_EV>0,
# num_prev_homings_front_EV, num_prev_homings_front_EV>0, prev_edginess, num_prev_homings_HV, num_prev_homings_HV>2, num_homings_combined],
# ['Time', '1 Edge vector homings', 'Edge vector homings','Other edge vector homings','1 other edge vector homings',
# 'Front edge vectors','1 front edge vector', 'Mean prior trajectory', 'Homing vector homings', '1 Homing vector homing', 'Combined homings']):
# for x_data, x_data_label in zip([trials, num_prev_homings_EV>0, num_prev_homings_EV, prev_edginess], ['trial', '1 Edge vector homings', 'Edge vector homings', 'Mean prior trajectory']):
for x_data, x_data_label in zip([trials], ['trial']): # ,edginess>HV_cutoff #, 'edginess'
print('\nCorrelation between ' + data_label + ' and ' + x_data_label)
# only plot escapes
data_for_box_plot = data[~np.isnan(data)]
x_data = x_data[~np.isnan(data)]
print(np.sum(x_data==0))
# get the correlation
r, p = scipy.stats.pearsonr(x_data, data_for_box_plot)
print('r = ' + str(np.round(r, 3)) + '\np = ' + str(p))
if p < .05: print('SIGGY STATDUST')
# m = (((np.mean(x_data) * np.mean(data_for_box_plot)) - np.mean(x_data * data_for_box_plot)) /
# ((np.mean(x_data) ** 2) - np.mean(x_data ** 2)))
# regressed_data = data_for_box_plot - x_data * m
# r, p = scipy.stats.pearsonr(x_data, regressed_data)
# print('r = ' + str(np.round(r, 3)) + '\np = ' + str(np.round(p, 3)))
if plot_stuff and not np.isnan(r):
fig1, ax1 = plt.subplots(figsize=(15, 15))
# initialize figure
# fig1, ax1 = plt.subplots(figsize=(9, 9))
plt.title(data_label + ' x ' + x_data_label)
# set up the figure
# if data_label=='Efficiency': ax1.set_ylim([-.03, 1.03])
# elif data_label=='Duration': ax1.set_ylim([-.1, 7])
# if np.max(x_data) < 5:
# ax1.set_xticks(np.unique(x_data).astype(int))
# else:
# ax1.set_xticks(np.arange(5, 25, 5))
# ax1.set_xlim([5,20])
# jitter the axis
scatter_axis = scatter_the_axis_efficiency(plot_data, x_data)
# plot each trial
ax1.scatter(scatter_axis, plot_data, color=colors[0], s=40, alpha=1, edgecolor=colors[0], linewidth=1)
# ax1.scatter(scatter_axis[plot_data > HV_cutoff], plot_data[plot_data > HV_cutoff], color=[0,0,0], s=50, alpha=1, edgecolor=[0, 0, 0], linewidth=1)
# do a linear regression
try:
x_data, prediction = do_linear_regression(plot_data, x_data.astype(int))
except:
print('hi')
# # plot kde
kde = fit_kde(plot_data, bw=.02) #.2) # .04
plot_kde(ax1, kde, plot_data, z=c + .1, vertical=True, normto=.3, color=[.75, .75, .75], violin=False, clip=True) # True)
# plot the linear regression
# ax1.plot(x_data, prediction['Pred'].values, color=colors[0], linewidth=1, linestyle='--', alpha=.7)
# ax1.fill_between(x_data, prediction['lower'].values, prediction['upper'].values, color=colors[0], alpha=.05) # 6
# save the plot
plt.savefig(os.path.join(self.summary_plots_folder, data_label + ' by ' + x_data_label + ' - ' + self.labels[c] + '.png'), format='png')
plt.savefig(os.path.join(self.summary_plots_folder, data_label + ' by ' + x_data_label + ' - ' + self.labels[c] + '.eps'), format='eps')
# plt.show()
# plt.close()
# plot the boxplot
# if data_label == 'Efficiency':
# ax, fig = ax2, fig2
# efficiency_data[c] = plot_data
# elif data_label == 'Duration':
# ax, fig = ax3, fig3
# duration_data[c] = plot_data
# else: continue
# scatter_axis = scatter_the_axis_efficiency(plot_data, np.ones_like(plot_data)*c)
# ax.scatter(scatter_axis, plot_data, color=[0, 0, 0], s=40, alpha=1, edgecolor=[0, 0, 0], linewidth=1)
# # plot kde
# kde = fit_kde(plot_data, bw=.02) #.2) # .04
# plot_kde(ax, kde, plot_data, z=c + .1, vertical=True, normto=.3, color=[.75, .75, .75], violin=False, clip=True) # True)
# # plot errorbar
# median = np.percentile(plot_data, 50)
# third_quartile = np.percentile(plot_data, 75)
# first_quartile = np.percentile(plot_data, 25)
# ax.errorbar(c - .2, median, yerr=np.array([[median - first_quartile], [third_quartile - median]]), color=[0,0,0], capsize=10, capthick=3, alpha=1, linewidth=3)
# ax.scatter(c - .2, median, color=[0,0,0], s=175, alpha=1)
# # save the plot
# fig.savefig(os.path.join(self.summary_plots_folder, data_label + ' comparison - ' + self.labels[c] + '.png'), format='png')
# fig.savefig(os.path.join(self.summary_plots_folder, data_label + ' comparison - ' + self.labels[c] + '.eps'), format='eps')
plt.show()
# test correlation and stats thru permutation test
data_x = prev_homings_data_all
data_y = efficiency_data_all
# permutation_correlation(data_x, data_y, iterations=10000, two_tailed=False, pool_all=False)
#
# # do t test
# t, p = scipy.stats.ttest_ind(efficiency_data[0], efficiency_data[1], equal_var=False)
# print('Efficiency: ' + str(p))
# print(np.mean(efficiency_data[0]))
# print(np.mean(efficiency_data[1]))
#
# t, p = scipy.stats.ttest_ind(duration_data[0], duration_data[1], equal_var=False)
# print('Duration: ' + str(p))
# print(np.mean(duration_data[0]))
# print(np.mean(duration_data[1]))
#
efficiency_0 = []
efficiency_more = []
for m, mouse_data in enumerate(efficiency_data_all):
EV_array = np.array(duration_data_all[m])
efficiency_array = | np.array(mouse_data) | numpy.array |
import os
import numpy as np
import qtawesome as qta
from qtpy.QtCore import Qt
from qtpy.QtWidgets import QDialog, QMessageBox, QDialogButtonBox, QFileDialog
from common import parse_file, np_to_str
from ui.preferences import Ui_preferencesDialog
DISPLAY_SMOOTH_GRAPHS = 'display/smooth_graphs'
STYLE_MATPLOTLIB_THEME_DEFAULT = 'beq_dark'
STYLE_MATPLOTLIB_THEME = 'style/matplotlib_theme'
LOGGING_LEVEL = 'logging/level'
SCREEN_GEOMETRY = 'screen/geometry'
SCREEN_WINDOW_STATE = 'screen/window_state'
SYSTEM_CHECK_FOR_UPDATES = 'system/check_for_updates'
SYSTEM_CHECK_FOR_BETA_UPDATES = 'system/check_for_beta_updates'
RECORDER_TARGET_FS = 'recorder/target/fs'
RECORDER_TARGET_SAMPLES_PER_BATCH = 'recorder/target/samples_per_batch'
RECORDER_TARGET_ACCEL_ENABLED = 'recorder/target/accel_enabled'
RECORDER_TARGET_ACCEL_SENS = 'recorder/target/accel_sens'
RECORDER_TARGET_GYRO_ENABLED = 'recorder/target/gyro_enabled'
RECORDER_TARGET_GYRO_SENS = 'recorder/target/gyro_sens'
RECORDER_SAVED_IPS = 'recorder/saved_ips'
BUFFER_SIZE = 'buffer/size'
ANALYSIS_RESOLUTION = 'analysis/resolution'
ANALYSIS_TARGET_FS = 'analysis/target_fs'
ANALYSIS_WINDOW_DEFAULT = 'Default'
ANALYSIS_AVG_WINDOW = 'analysis/avg_window'
ANALYSIS_PEAK_WINDOW = 'analysis/peak_window'
ANALYSIS_DETREND = 'analysis/detrend'
ANALYSIS_HPF_RTA = 'analysis/hpfrta'
CHART_MAG_MIN = 'chart/mag_min'
CHART_MAG_MAX = 'chart/mag_max'
CHART_FREQ_MIN = 'chart/freq_min'
CHART_FREQ_MAX = 'chart/freq_max'
CHART_SPECTRO_SCALE_FACTOR = 'chart/spectro/scale_factor'
CHART_SPECTRO_SCALE_ALGO = 'chart/spectro/scale_algo'
SUM_X_SCALE = 'sum/x_scale'
SUM_Y_SCALE = 'sum/y_scale'
SUM_Z_SCALE = 'sum/z_scale'
WAV_DOWNLOAD_DIR = 'wav/download_dir'
SNAPSHOT_GROUP = 'snapshot'
RTA_TARGET = 'rta/target'
RTA_HOLD_SECONDS = 'rta/hold_secs'
RTA_SMOOTH_WINDOW = 'rta/smooth_window'
RTA_SMOOTH_POLY = 'rta/smooth_poly'
DEFAULT_PREFS = {
ANALYSIS_RESOLUTION: 1.0,
ANALYSIS_TARGET_FS: 1000,
ANALYSIS_AVG_WINDOW: ANALYSIS_WINDOW_DEFAULT,
ANALYSIS_PEAK_WINDOW: ANALYSIS_WINDOW_DEFAULT,
ANALYSIS_DETREND: 'constant',
ANALYSIS_HPF_RTA: False,
BUFFER_SIZE: 30,
CHART_MAG_MIN: 40,
CHART_MAG_MAX: 120,
CHART_FREQ_MIN: 1,
CHART_FREQ_MAX: 125,
CHART_SPECTRO_SCALE_FACTOR: '8x',
CHART_SPECTRO_SCALE_ALGO: 'Lanczos',
DISPLAY_SMOOTH_GRAPHS: True,
RECORDER_TARGET_FS: 500,
RECORDER_TARGET_SAMPLES_PER_BATCH: 8,
RECORDER_TARGET_ACCEL_ENABLED: True,
RECORDER_TARGET_ACCEL_SENS: 4,
RECORDER_TARGET_GYRO_ENABLED: False,
RECORDER_TARGET_GYRO_SENS: 500,
RTA_HOLD_SECONDS: 10.0,
RTA_SMOOTH_WINDOW: 31,
RTA_SMOOTH_POLY: 7,
SUM_X_SCALE: 2.2,
SUM_Y_SCALE: 2.4,
SUM_Z_SCALE: 1.0,
STYLE_MATPLOTLIB_THEME: STYLE_MATPLOTLIB_THEME_DEFAULT,
SYSTEM_CHECK_FOR_UPDATES: True,
SYSTEM_CHECK_FOR_BETA_UPDATES: False,
WAV_DOWNLOAD_DIR: os.path.join(os.path.expanduser('~'), 'Music'),
}
TYPES = {
ANALYSIS_RESOLUTION: float,
ANALYSIS_TARGET_FS: int,
ANALYSIS_HPF_RTA: bool,
BUFFER_SIZE: int,
CHART_MAG_MIN: int,
CHART_MAG_MAX: int,
CHART_FREQ_MIN: int,
CHART_FREQ_MAX: int,
DISPLAY_SMOOTH_GRAPHS: bool,
RECORDER_TARGET_FS: int,
RECORDER_TARGET_SAMPLES_PER_BATCH: int,
RECORDER_TARGET_ACCEL_ENABLED: bool,
RECORDER_TARGET_ACCEL_SENS: int,
RECORDER_TARGET_GYRO_ENABLED: bool,
RECORDER_TARGET_GYRO_SENS: int,
RTA_HOLD_SECONDS: float,
RTA_SMOOTH_POLY: int,
RTA_SMOOTH_WINDOW: int,
SUM_X_SCALE: float,
SUM_Y_SCALE: float,
SUM_Z_SCALE: float,
SYSTEM_CHECK_FOR_UPDATES: bool,
SYSTEM_CHECK_FOR_BETA_UPDATES: bool,
}
singleton = None
class Preferences:
def __init__(self, settings):
self.__settings = settings
global singleton
singleton = self
def has(self, key):
'''
checks for existence of a value.
:param key: the key.
:return: True if we have a value.
'''
return self.get(key) is not None
def get(self, key, default_if_unset=True):
'''
Gets the value, if any.
:param key: the settings key.
:param default_if_unset: if true, return a default value.
:return: the value.
'''
default_value = DEFAULT_PREFS.get(key, None) if default_if_unset is True else None
value_type = TYPES.get(key, None)
if value_type is not None:
return self.__settings.value(key, defaultValue=default_value, type=value_type)
else:
return self.__settings.value(key, defaultValue=default_value)
def enter(self, key):
self.__settings.beginGroup(key)
def get_children(self):
return self.__settings.childKeys()
def get_child_groups(self):
return self.__settings.childGroups()
def exit(self):
self.__settings.endGroup()
def get_all(self, prefix):
'''
Get all values with the given prefix.
:param prefix: the prefix.
:return: the values, if any.
'''
self.__settings.beginGroup(prefix)
try:
return set(filter(None.__ne__, [self.__settings.value(x) for x in self.__settings.childKeys()]))
finally:
self.__settings.endGroup()
def set(self, key, value):
'''
sets a new value.
:param key: the key.
:param value: the value.
'''
if value is None:
self.__settings.remove(key)
else:
self.__settings.setValue(key, value)
def clear_all(self, prefix):
''' clears all under the given group '''
self.__settings.beginGroup(prefix)
self.__settings.remove('')
self.__settings.endGroup()
def clear(self, key):
'''
Removes the stored value.
:param key: the key.
'''
self.set(key, None)
def reset(self):
'''
Resets all preferences.
'''
self.__settings.clear()
class PreferencesDialog(QDialog, Ui_preferencesDialog):
'''
Allows user to set some basic preferences.
'''
def __init__(self, preferences, style_root, recorder_store, spectro, parent=None):
super(PreferencesDialog, self).__init__(parent)
self.__style_root = style_root
self.__recorder_store = recorder_store
self.setupUi(self)
self.__preferences = preferences
self.__spectro = spectro
self.__should_clear_target = False
self.__new_target = None
self.buttonBox.button(QDialogButtonBox.RestoreDefaults).clicked.connect(self.__reset)
self.checkForUpdates.setChecked(self.__preferences.get(SYSTEM_CHECK_FOR_UPDATES))
self.checkForBetaUpdates.setChecked(self.__preferences.get(SYSTEM_CHECK_FOR_BETA_UPDATES))
self.xScale.setValue(self.__preferences.get(SUM_X_SCALE))
self.yScale.setValue(self.__preferences.get(SUM_Y_SCALE))
self.zScale.setValue(self.__preferences.get(SUM_Z_SCALE))
self.magMin.setValue(self.__preferences.get(CHART_MAG_MIN))
self.magMax.setValue(self.__preferences.get(CHART_MAG_MAX))
self.highpassRTA.setChecked(self.__preferences.get(ANALYSIS_HPF_RTA))
self.init_combo(ANALYSIS_DETREND, self.detrend, lambda a: f"{a[0].upper()}{a[1:]}")
self.magMin.valueChanged['int'].connect(self.__balance_mag)
self.magMax.valueChanged['int'].connect(self.__balance_mag)
self.freqMin.setValue(self.__preferences.get(CHART_FREQ_MIN))
self.freqMax.setValue(self.__preferences.get(CHART_FREQ_MAX))
self.freqMin.valueChanged['int'].connect(self.__balance_freq)
self.freqMax.valueChanged['int'].connect(self.__balance_freq)
self.wavSaveDir.setText(self.__preferences.get(WAV_DOWNLOAD_DIR))
self.spectroScaleAlgo.setCurrentText(self.__preferences.get(CHART_SPECTRO_SCALE_ALGO))
self.spectroScaleFactor.setCurrentText(self.__preferences.get(CHART_SPECTRO_SCALE_FACTOR))
self.wavSaveDirPicker.setIcon(qta.icon('fa5s.folder-open'))
self.addRecorderButton.setIcon(qta.icon('fa5s.plus'))
self.deleteRecorderButton.setIcon(qta.icon('fa5s.times'))
enable_delete = False
if self.__preferences.get(RECORDER_SAVED_IPS) is not None:
ips = self.__preferences.get(RECORDER_SAVED_IPS).split('|')
for ip in ips:
self.recorders.addItem(ip)
enable_delete = True
else:
self.recorderIP.setFocus(Qt.OtherFocusReason)
self.deleteRecorderButton.setEnabled(enable_delete)
self.addRecorderButton.setEnabled(False)
self.__reset_target_buttons()
self.clearTarget.setIcon(qta.icon('fa5s.times', color='red'))
self.loadTarget.setIcon(qta.icon('fa5s.folder-open'))
self.createTarget.setIcon(qta.icon('fa5s.bezier-curve'))
self.createTarget.setToolTip('Draw a target curve')
self.createTarget.clicked.connect(self.__create_target)
def __reset_target_buttons(self):
has_target = self.__preferences.has(RTA_TARGET)
self.clearTarget.setEnabled(has_target)
self.targetSet.setChecked(has_target)
def __create_target(self):
from model.target import CreateTargetDialog
dialog = CreateTargetDialog(self, self.__preferences, fs=self.__preferences.get(RECORDER_TARGET_FS))
dialog.exec()
self.__reset_target_buttons()
def __balance_mag(self, val):
keep_range(self.magMin, self.magMax, 10)
def __balance_freq(self, val):
keep_range(self.freqMin, self.freqMax, 10)
def validate_ip(self, ip):
valid_ip = self.__is_valid_ip(ip)
existing_ip = self.recorders.findText(ip, Qt.MatchExactly)
self.addRecorderButton.setEnabled(valid_ip and existing_ip == -1)
def add_recorder(self):
self.recorders.addItem(self.recorderIP.text())
self.recorderIP.clear()
def delete_recorder(self):
idx = self.recorders.currentIndex()
if idx > -1:
self.recorders.removeItem(idx)
self.deleteRecorderButton.setEnabled(self.recorders.count() > 0)
def clear_target(self):
'''
Clears any RTA target.
'''
self.__should_clear_target = True
self.__new_target = None
self.targetSet.setChecked(False)
def load_target(self):
'''
Allows user to select an FRD file to set the target.
'''
parsers = {'frd': self.__parse_frd, 'txt': self.__parse_frd}
_, data = parse_file('FRD (*.frd *.txt)', 'Load Target', parsers)
self.__new_target = data
self.targetSet.setChecked(data is not None)
@staticmethod
def __is_valid_ip(ip):
''' checks if the string is a valid ip:port. '''
tokens = ip.split(':')
if len(tokens) == 2:
ip_tokens = tokens[0].split('.')
if len(ip_tokens) == 4:
try:
first, *nums = [int(i) for i in ip_tokens]
if 0 < first <= 255:
if all(0 <= i <= 255 for i in nums):
return 0 < int(tokens[1]) < 65536
except Exception as e:
pass
return False
def __reset(self):
'''
Reset all settings
'''
result = QMessageBox.question(self,
'Reset Preferences?',
f"All preferences will be restored to their default values. This action is irreversible.\nAre you sure you want to continue?",
QMessageBox.Yes | QMessageBox.No,
QMessageBox.No)
if result == QMessageBox.Yes:
self.__preferences.reset()
self.alert_on_change('Defaults Restored')
self.reject()
def init_combo(self, key, combo, translater=lambda a: a):
'''
Initialises a combo box from either settings or a default value.
:param key: the settings key.
:param combo: the combo box.
:param translater: a lambda to translate from the stored value to the display name.
'''
stored_value = self.__preferences.get(key)
idx = -1
if stored_value is not None:
idx = combo.findText(translater(stored_value))
if idx != -1:
combo.setCurrentIndex(idx)
def accept(self):
'''
Saves the locations if they exist.
'''
self.__preferences.set(SYSTEM_CHECK_FOR_UPDATES, self.checkForUpdates.isChecked())
self.__preferences.set(SYSTEM_CHECK_FOR_BETA_UPDATES, self.checkForBetaUpdates.isChecked())
self.__preferences.set(SUM_X_SCALE, self.xScale.value())
self.__preferences.set(SUM_Y_SCALE, self.yScale.value())
self.__preferences.set(SUM_Z_SCALE, self.zScale.value())
self.__preferences.set(WAV_DOWNLOAD_DIR, self.wavSaveDir.text())
self.__preferences.set(CHART_MAG_MIN, self.magMin.value())
self.__preferences.set(CHART_MAG_MAX, self.magMax.value())
self.__preferences.set(CHART_FREQ_MIN, self.freqMin.value())
self.__preferences.set(CHART_FREQ_MAX, self.freqMax.value())
self.__preferences.set(CHART_SPECTRO_SCALE_ALGO, self.spectroScaleAlgo.currentText())
self.__preferences.set(CHART_SPECTRO_SCALE_FACTOR, self.spectroScaleFactor.currentText())
self.__preferences.set(ANALYSIS_DETREND, self.detrend.currentText().lower())
self.__preferences.set(ANALYSIS_HPF_RTA, self.highpassRTA.isChecked())
# TODO would be nicer to be able to listen to specific values
self.__spectro.update_scale()
if self.recorders.count() > 0:
ips = [self.recorders.itemText(i) for i in range(self.recorders.count())]
self.__preferences.set(RECORDER_SAVED_IPS, '|'.join(ips))
self.__recorder_store.load(ips)
else:
self.__preferences.clear(RECORDER_SAVED_IPS)
self.__recorder_store.clear()
if self.__should_clear_target is True:
self.__preferences.clear(RTA_TARGET)
if self.__new_target is not None:
self.__preferences.set(RTA_TARGET, self.__new_target)
QDialog.accept(self)
def pick_save_dir(self):
dir_name = QFileDialog.getExistingDirectory(self, 'Export WAV', self.wavSaveDir.text(),
QFileDialog.ShowDirsOnly)
if len(dir_name) > 0:
self.wavSaveDir.setText(dir_name)
@staticmethod
def alert_on_change(title, text='Change will not take effect until the application is restarted',
icon=QMessageBox.Warning):
msg_box = QMessageBox()
msg_box.setText(text)
msg_box.setIcon(icon)
msg_box.setWindowTitle(title)
msg_box.exec()
@staticmethod
def __parse_frd(file_name):
'''
Reads an FRD file and converts it into x,y vals but returns the raw txt (i.e. we validate the data on load).
:param file_name: the file.
:return: file_name, the frd as an ndarray in str format.
'''
if file_name is not None:
comment_char = None
with open(file_name) as f:
c = f.read(1)
if not c.isalnum():
comment_char = c
f, m = | np.genfromtxt(file_name, comments=comment_char, unpack=True, usecols=(0, 1)) | numpy.genfromtxt |
from spada.io.error import SpadaError
from itertools import combinations
import numpy as np
class GeneExpression:
def __init__(self, txs, ctrl_ids, case_ids):
self._allTxs = txs
self._storedTxs = []
self._top_ctrl = (None, 0)
self._top_case = (None, 0)
self._expressionCtrl = np.array([])
self._expressionCase = np.array([])
self._matchedExpressionCtrl = np.array([])
self._dPSI = np.array([])
self._wtdPSI = np.array([])
self._dExp = np.array([])
self._wtdExp = np.array([])
self._idsCase = case_ids
self._idsCtrl = ctrl_ids
self._complete = False
def addTx(self, tx, expressionCtrl, expressionCase):
if self._expressionCtrl.size == 0:
self._expressionCtrl = np.empty(shape = (0, expressionCtrl.shape[1]))
self._expressionCtrl = np.vstack((self._expressionCtrl, expressionCtrl))
medianCtrl = np.median(expressionCtrl)
if medianCtrl > self._top_ctrl[1]:
self._top_ctrl = (tx, medianCtrl)
if self._expressionCase.size == 0:
self._expressionCase = np.empty(shape = (0, expressionCase.shape[1]))
self._expressionCase = np.vstack((self._expressionCase, expressionCase))
medianCase = np.median(expressionCase)
if medianCase > self._top_case[1]:
self._top_case = (tx, medianCase)
self._storedTxs.append(tx)
@property
def isComplete(self):
if not self._complete and not self._allTxs.difference(self._storedTxs):
self._matchedExpressionCtrl = self.matchExpressions(self._expressionCtrl)
psiCtrl = self.computePSI(self._matchedExpressionCtrl)
psiCase = self.computePSI(self._expressionCase)
self._dPSI = psiCase - psiCtrl
self._wtdPSI = self.computeExpectedDelta(expression = self._expressionCtrl)
geneMatchedExpressionCtrl = self._matchedExpressionCtrl.sum(axis = 0)
geneExpressionCtrl = self._expressionCtrl.sum(axis = 0)
geneExpressionCase = self._expressionCase.sum(axis = 0)
self._dExp = geneExpressionCase - geneMatchedExpressionCtrl
self._wtdExp = self.computeExpectedDelta(psi = geneExpressionCtrl)
self._complete = True
return self._complete
def computePSI(self, expression, nan_rm = False):
if expression.shape == (0,):
raise SpadaError("Expression empty.")
psi = expression / expression.sum(axis = 0)
if nan_rm:
nancols = np.where(np.isnan(psi))[1]
psi = np.delete(psi, nancols, axis = 1)
return psi
def computeExpectedDelta(self, expression = None, psi = np.array([]), nan_rm = True):
if psi.shape == (0,):
psi = self.computePSI(expression, nan_rm)
expectedDelta = [ abs(a - b) for a, b in combinations(psi.T, 2) ]
expectedDelta = np.array(expectedDelta)
expectedDelta = expectedDelta.T
return expectedDelta
def matchExpressions(self, expressionCtrl):
mask_case = [ i in self._idsCtrl for i in self._idsCase ]
mask_ctrl = [ self._idsCtrl.index(i) for i in self._idsCase if i in self._idsCtrl ]
median = np.median(expressionCtrl, axis=1)
matched = np.tile(median, (len(self._idsCase),1)).T
matched[:,mask_case] = expressionCtrl[:,mask_ctrl]
return matched
def cutoff(self, wt, percent):
if wt.shape == (0,):
cutoff = np.inf
else:
cutoff = np.percentile(wt, percent, axis = 1)
cutoff = cutoff.reshape(cutoff.shape[0], 1)
return cutoff
def detectSwitches(self, minExpression = 0.1, pSplicing = 95, pDE = 95):
switches = {}
if self.isComplete:
bigChange = abs(self._dPSI) > self.cutoff(self._wtdPSI, pSplicing)
notDE = abs(self._dExp) < self.cutoff( | np.expand_dims(self._wtdExp, 0) | numpy.expand_dims |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
import numpy as np
import matplotlib.pyplot as plt
from pywt import WaveletPacket2D
import pywt.data
arr = pywt.data.aero()
wp2 = WaveletPacket2D(arr, 'db2', 'symmetric', maxlevel=2)
# Show original figure
plt.imshow(arr, interpolation="nearest", cmap=plt.cm.gray)
path = ['d', 'v', 'h', 'a']
# Show level 1 nodes
fig = plt.figure()
for i, p2 in enumerate(path):
ax = fig.add_subplot(2, 2, i + 1)
ax.imshow(np.sqrt(np.abs(wp2[p2].data)), origin='image',
interpolation="nearest", cmap=plt.cm.gray)
ax.set_title(p2)
# Show level 2 nodes
for p1 in path:
fig = plt.figure()
for i, p2 in enumerate(path):
ax = fig.add_subplot(2, 2, i + 1)
p1p2 = p1 + p2
ax.imshow(np.sqrt(np.abs(wp2[p1p2].data)), origin='image',
interpolation="nearest", cmap=plt.cm.gray)
ax.set_title(p1p2)
fig = plt.figure()
i = 1
for row in wp2.get_level(2, 'freq'):
for node in row:
ax = fig.add_subplot(len(row), len(row), i)
ax.set_title("%s=(%s row, %s col)" % (
(node.path,) + wp2.expand_2d_path(node.path)))
ax.imshow(np.sqrt( | np.abs(node.data) | numpy.abs |
## UNCOMMENTING THESE TWO LINES WILL FORCE KERAS/TF TO RUN ON CPU
#import os
#os.environ['CUDA_VISIBLE_DEVICES'] = '-1'
import tensorflow as tf
from tensorflow.python.keras.models import Sequential
from tensorflow.python.keras.callbacks import ModelCheckpoint
from tensorflow.python.keras.models import model_from_json
from tensorflow.python.keras.layers import ZeroPadding3D, Dense, Activation,Conv3D,MaxPooling3D,AveragePooling3D,Flatten,Dropout
from tensorflow.keras import utils
import numpy as np
import matplotlib.pyplot as plt
import cv2
import os
import scipy.io
import generate_trend
RESULTS_PATH = "results"
try:
open(RESULTS_PATH, 'w')
except:
print(f"Invalid path: {RESULTS_PATH}")
exit()
# CONSTANTS
NB_VIDEOS_BY_CLASS_TRAIN = 200
NB_VIDEOS_BY_CLASS_VALIDATION = 200
# Tendencies (linear, 2nd order, 3rd order)
TENDANCIES_MIN = (-3,-1,-1)
TENDANCIES_MAX = (3,1,1)
TENDANCIES_ORDER = (1,2,3)
LENGTH_VIDEO = 60
IMAGE_WIDTH = 25
IMAGE_HEIGHT = 25
IMAGE_CHANNELS = 1
SAMPLING = 1 / 30
t = np.linspace(0, LENGTH_VIDEO * SAMPLING - SAMPLING, LENGTH_VIDEO)
# coefficients for the fitted-ppg method
a0 = 0.440240602542388
a1 = -0.334501803331783
b1 = -0.198990393984879
a2 = -0.050159136439220
b2 = 0.099347477830878
w = 2 * np.pi
HEART_RATES = np.linspace(55, 240, 75)
NB_CLASSES = len(HEART_RATES)
# prepare labels and label categories
labels = np.zeros(NB_CLASSES + 1)
for i in range(NB_CLASSES + 1):
labels[i] = i
labels_cat = utils.to_categorical(labels)
EPOCHS = 5000
CONTINUE_TRAINING = False
SAVE_ALL_MODELS = False
train_loss = []
val_loss = []
train_acc = []
val_acc = []
# 1. DEFINE OR LOAD MODEL / WEIGHTS
if (CONTINUE_TRAINING == False):
init_batch_nb = 0
model = Sequential()
model.add(Conv3D(filters=32, kernel_size=(58,20,20), input_shape=(LENGTH_VIDEO, IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANNELS)))
model.add(MaxPooling3D(pool_size=(2,2,2)))
model.add(Activation('relu'))
model.add(Dropout(0.2))
model.add(Flatten())
model.add(Dense(512, activation='relu'))
model.add(Dropout(0.2))
model.add(Dense(NB_CLASSES + 1, activation='softmax'))
else:
# load model
model = model_from_json(open('../../model_conv3D.json').read())
model.load_weights('../../weights_conv3D.h5')
# load statistics
dummy = np.loadtxt('../../statistics_loss_acc.txt')
init_batch_nb = dummy.shape[0]
train_loss = dummy[:,0].tolist()
train_acc = dummy[:,1].tolist()
val_loss = dummy[:,2].tolist()
val_acc = dummy[:,3].tolist()
model.summary()
model.compile(loss='categorical_crossentropy', optimizer='adam', metrics=['accuracy'])
data = {}
# 2. GENERATE VALIDATION DATA
xvalidation = np.zeros(shape=((NB_CLASSES + 1) * NB_VIDEOS_BY_CLASS_VALIDATION, LENGTH_VIDEO, IMAGE_HEIGHT, IMAGE_WIDTH, IMAGE_CHANNELS))
yvalidation = np.zeros(shape=((NB_CLASSES + 1) * NB_VIDEOS_BY_CLASS_VALIDATION, NB_CLASSES + 1))
c = 0
# for each frequency
for i_freq in range(len(HEART_RATES)):
for i_videos in range(NB_VIDEOS_BY_CLASS_VALIDATION):
t2 = t + (np.random.randint(low=0, high=33) * SAMPLING) # phase. 33 corresponds to a full phase shift for HR=55 bpm
signal = a0 + a1 * np.cos(t2 * w * HEART_RATES[i_freq] / 60) + b1 * np.sin(t2 * w * HEART_RATES[i_freq] / 60) + a2 * np.cos(2 * t2 * w * HEART_RATES[i_freq] / 60) + b2 * np.sin(2 * t2 * w * HEART_RATES[i_freq] / 60)
signal = signal - np.min(signal)
signal = signal / | np.max(signal) | numpy.max |
import os
import sys
import glob
import cv2
import numpy as np
import _pickle as cPickle
from tqdm import tqdm
sys.path.append('../lib')
from align import align_nocs_to_depth
from utils import load_depth
def create_img_list(data_dir):
""" Create train/val/test data list for CAMERA and Real. """
# # CAMERA dataset
# for subset in ['train', 'val']:
# img_list = []
# img_dir = os.path.join(data_dir, 'CAMERA', subset)
# folder_list = [name for name in os.listdir(img_dir) if os.path.isdir(os.path.join(img_dir, name))]
# for i in range(10*len(folder_list)):
# folder_id = int(i) // 10
# img_id = int(i) % 10
# img_path = os.path.join(subset, '{:05d}'.format(folder_id), '{:04d}'.format(img_id))
# img_list.append(img_path)
# with open(os.path.join(data_dir, 'CAMERA', subset+'_list_all.txt'), 'w') as f:
# for img_path in img_list:
# f.write("%s\n" % img_path)
# Real dataset
for subset in ['train', 'test']:
img_list = []
img_dir = os.path.join(data_dir, 'Real', subset)
folder_list = [name for name in sorted(os.listdir(img_dir)) if os.path.isdir(os.path.join(img_dir, name))]
for folder in folder_list:
img_paths = glob.glob(os.path.join(img_dir, folder, '*_color.png'))
img_paths = sorted(img_paths)
for img_full_path in img_paths:
img_name = os.path.basename(img_full_path)
img_ind = img_name.split('_')[0]
img_path = os.path.join(subset, folder, img_ind)
img_list.append(img_path)
with open(os.path.join(data_dir, 'Real', subset+'_list_all.txt'), 'w') as f:
for img_path in img_list:
f.write("%s\n" % img_path)
print('Write all data paths to file done!')
def process_data(img_path, depth):
""" Load instance masks for the objects in the image. """
mask_path = img_path + '_mask.png'
mask = cv2.imread(mask_path)[:, :, 2]
mask = np.array(mask, dtype=np.int32)
all_inst_ids = sorted(list(np.unique(mask)))
assert all_inst_ids[-1] == 255
del all_inst_ids[-1] # remove background
num_all_inst = len(all_inst_ids)
h, w = mask.shape
coord_path = img_path + '_coord.png'
coord_map = cv2.imread(coord_path)[:, :, :3]
coord_map = coord_map[:, :, (2, 1, 0)]
# flip z axis of coord map
coord_map = np.array(coord_map, dtype=np.float32) / 255
coord_map[:, :, 2] = 1 - coord_map[:, :, 2]
class_ids = []
instance_ids = []
model_list = []
masks = np.zeros([h, w, num_all_inst], dtype=np.uint8)
coords = np.zeros((h, w, num_all_inst, 3), dtype=np.float32)
bboxes = np.zeros((num_all_inst, 4), dtype=np.int32)
meta_path = img_path + '_meta.txt'
with open(meta_path, 'r') as f:
i = 0
for line in f:
line_info = line.strip().split(' ')
inst_id = int(line_info[0])
cls_id = int(line_info[1])
# background objects and non-existing objects
if cls_id == 0 or (inst_id not in all_inst_ids):
continue
if len(line_info) == 3:
model_id = line_info[2] # Real scanned objs
else:
model_id = line_info[3] # CAMERA objs
# remove one mug instance in CAMERA train due to improper model
if model_id == 'b9be7cfe653740eb7633a2dd89cec754':
continue
# process foreground objects
inst_mask = np.equal(mask, inst_id)
# bounding box
horizontal_indicies = np.where(np.any(inst_mask, axis=0))[0]
vertical_indicies = np.where(np.any(inst_mask, axis=1))[0]
assert horizontal_indicies.shape[0], print(img_path)
x1, x2 = horizontal_indicies[[0, -1]]
y1, y2 = vertical_indicies[[0, -1]]
# x2 and y2 should not be part of the box. Increment by 1.
x2 += 1
y2 += 1
# object occupies full image, rendering error, happens in CAMERA dataset
if np.any(np.logical_or((x2-x1) > 600, (y2-y1) > 440)):
return None, None, None, None, None, None
# not enough valid depth observation
final_mask = np.logical_and(inst_mask, depth > 0)
if np.sum(final_mask) < 64:
continue
class_ids.append(cls_id)
instance_ids.append(inst_id)
model_list.append(model_id)
masks[:, :, i] = inst_mask
coords[:, :, i, :] = np.multiply(coord_map, np.expand_dims(inst_mask, axis=-1))
bboxes[i] = np.array([y1, x1, y2, x2])
i += 1
# no valid foreground objects
if i == 0:
return None, None, None, None, None, None
masks = masks[:, :, :i]
coords = np.clip(coords[:, :, :i, :], 0, 1)
bboxes = bboxes[:i, :]
return masks, coords, class_ids, instance_ids, model_list, bboxes
def annotate_camera_train(data_dir):
""" Generate gt labels for CAMERA train data. """
camera_train = open(os.path.join(data_dir, 'CAMERA', 'train_list_all.txt')).read().splitlines()
intrinsics = np.array([[577.5, 0, 319.5], [0, 577.5, 239.5], [0, 0, 1]])
# meta info for re-label mug category
with open(os.path.join(data_dir, 'obj_models/mug_meta.pkl'), 'rb') as f:
mug_meta = cPickle.load(f)
valid_img_list = []
for img_path in tqdm(camera_train):
img_full_path = os.path.join(data_dir, 'CAMERA', img_path)
all_exist = os.path.exists(img_full_path + '_color.png') and \
os.path.exists(img_full_path + '_coord.png') and \
os.path.exists(img_full_path + '_depth.png') and \
os.path.exists(img_full_path + '_mask.png') and \
os.path.exists(img_full_path + '_meta.txt')
if not all_exist:
continue
depth = load_depth(img_full_path)
masks, coords, class_ids, instance_ids, model_list, bboxes = process_data(img_full_path, depth)
if instance_ids is None:
continue
# Umeyama alignment of GT NOCS map with depth image
scales, rotations, translations, error_messages, _ = \
align_nocs_to_depth(masks, coords, depth, intrinsics, instance_ids, img_path)
if error_messages:
continue
# re-label for mug category
for i in range(len(class_ids)):
if class_ids[i] == 6:
T0 = mug_meta[model_list[i]][0]
s0 = mug_meta[model_list[i]][1]
T = translations[i] - scales[i] * rotations[i] @ T0
s = scales[i] / s0
scales[i] = s
translations[i] = T
# write results
gts = {}
gts['class_ids'] = class_ids # int list, 1 to 6
gts['bboxes'] = bboxes # np.array, [[y1, x1, y2, x2], ...]
gts['scales'] = scales.astype(np.float32) # np.array, scale factor from NOCS model to depth observation
gts['rotations'] = rotations.astype(np.float32) # np.array, R
gts['translations'] = translations.astype(np.float32) # np.array, T
gts['instance_ids'] = instance_ids # int list, start from 1
gts['model_list'] = model_list # str list, model id/name
with open(img_full_path + '_label.pkl', 'wb') as f:
cPickle.dump(gts, f)
valid_img_list.append(img_path)
# write valid img list to file
with open(os.path.join(data_dir, 'CAMERA/train_list.txt'), 'w') as f:
for img_path in valid_img_list:
f.write("%s\n" % img_path)
def annotate_real_train(data_dir):
""" Generate gt labels for Real train data through PnP. """
real_train = open(os.path.join(data_dir, 'Real/train_list_all.txt')).read().splitlines()
intrinsics = np.array([[591.0125, 0, 322.525], [0, 590.16775, 244.11084], [0, 0, 1]])
# scale factors for all instances
scale_factors = {}
path_to_size = glob.glob(os.path.join(data_dir, 'obj_models/real_train', '*_norm.txt'))
for inst_path in sorted(path_to_size):
instance = os.path.basename(inst_path).split('.')[0]
bbox_dims = np.loadtxt(inst_path)
scale_factors[instance] = np.linalg.norm(bbox_dims)
# meta info for re-label mug category
with open(os.path.join(data_dir, 'obj_models/mug_meta.pkl'), 'rb') as f:
mug_meta = cPickle.load(f)
valid_img_list = []
for img_path in tqdm(real_train):
img_full_path = os.path.join(data_dir, 'Real', img_path)
all_exist = os.path.exists(img_full_path + '_color.png') and \
os.path.exists(img_full_path + '_coord.png') and \
os.path.exists(img_full_path + '_depth.png') and \
os.path.exists(img_full_path + '_mask.png') and \
os.path.exists(img_full_path + '_meta.txt')
if not all_exist:
continue
depth = load_depth(img_full_path)
masks, coords, class_ids, instance_ids, model_list, bboxes = process_data(img_full_path, depth)
if instance_ids is None:
continue
# compute pose
num_insts = len(class_ids)
scales = np.zeros(num_insts)
rotations = np.zeros((num_insts, 3, 3))
translations = np.zeros((num_insts, 3))
for i in range(num_insts):
s = scale_factors[model_list[i]]
mask = masks[:, :, i]
idxs = np.where(mask)
coord = coords[:, :, i, :]
coord_pts = s * (coord[idxs[0], idxs[1], :] - 0.5)
coord_pts = coord_pts[:, :, None]
img_pts = np.array([idxs[1], idxs[0]]).transpose()
img_pts = img_pts[:, :, None].astype(float)
distCoeffs = np.zeros((4, 1)) # no distoration
retval, rvec, tvec = cv2.solvePnP(coord_pts, img_pts, intrinsics, distCoeffs)
assert retval
R, _ = cv2.Rodrigues(rvec)
T = | np.squeeze(tvec) | numpy.squeeze |
"""This module contains methods to randomly sample, either uniformly or using
LHS, test problems for a fix budget (inclusive of the initial training data).
"""
import numpy as np
from pyDOE2 import lhs
from .. import test_problems
def perform_LHS_runs(problem_name, budget=250, n_exp_start=1, n_exp_end=51):
"""Generates the LHS samples for a fixed budget (inclusive of training).
Generates a fixed budget of LHS locations (inclusive of those in the
training data) for optimisation runs in [n_exp_start, n_exp_end]. This
function is not for the PitzDaily test problem because it has a
constraint function and therefore needs to be uniformly sampled from to
make sure each sample generated does not violate the constraint; instead,
``perform_uniform_runs`` can be used.
The results of the will be stored in the 'results' directory with the name:
'{problem_name:}_{run_no:}_{budget:}_LHS.npz'
Parameters
----------
problem_name : string
Test problem name to perform the optimisation run on. This will
attempt to import problem_name from the test_problems module.
budget : int
Total number of expensive evaluations to carry out. Note that
the budget includes points in the initial training data.
n_exp_start : int
Starting number of the experiment.
n_exp_end : int
Ending number (inclusive) of the experiment.
"""
exp_nos = | np.arange(n_exp_start, n_exp_end + 1) | numpy.arange |
# -*- coding: utf-8 -*-
"""
For testing netneurotools.stats functionality
"""
import itertools
import numpy as np
import pytest
from netneurotools import datasets, stats
@pytest.mark.xfail
def test_permtest_1samp():
assert False
# n1, n2, n3 = 10, 15, 20
# rs = np.random.RandomState(1234)
# rvn1 = rs.normal(loc=8, scale=10, size=(n1, n2, n3))
# t1, p1 = stats.permtest_1samp(rvn1, 1, axis=0)
def test_permtest_rel():
dr, pr = -0.0005, 0.4175824175824176
dpr = ([dr, -dr], [pr, pr])
rvs1 = np.linspace(1, 100, 100)
rvs2 = np.linspace(1.01, 99.989, 100)
rvs1_2D = np.array([rvs1, rvs2])
rvs2_2D = | np.array([rvs2, rvs1]) | numpy.array |
import struct
import socket
import pickle
import json
from torch.optim import SGD, Adam, AdamW
import sys
import time
import random
import numpy as np # linear algebra
import pandas as pd # data processing, CSV file I/O (e.g. pd.read_csv)
import matplotlib.pyplot as plt
#import seaborn as sns
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.utils.data import Dataset, DataLoader
from torch.autograd import Variable
from sklearn.model_selection import train_test_split
from sklearn.metrics import precision_recall_curve
from sklearn.metrics import classification_report, confusion_matrix
from sklearn.metrics import accuracy_score, auc, f1_score, precision_score, recall_score, roc_auc_score
from sklearn.preprocessing import MinMaxScaler
import Metrics
import wfdb
import ast
import math
import os.path
import utils
import Models
#np.set_printoptions(threshold=np.inf)
cwd = os.path.dirname(os.path.abspath(__file__))
mlb_path = os.path.join(cwd, "..","Benchmark", "output", "mlb.pkl")
scaler_path = os.path.join(cwd, "..","Benchmark", "output", "standard_scaler.pkl")
ptb_path = os.path.join(cwd, "..", "server", "../server/PTB-XL", "ptb-xl/")
import wandb
wandb.init(project="non-IID,clean", entity="split-learning-medical")
client_num = 1
num_classes = 2
pretrain_this_client = 0
simultrain_this_client = 0
pretrain_epochs = 50
IID = 0
f = open('parameter_client.json', )
data = json.load(f)
# set parameters fron json file
#epoch = data["training_epochs"]
lr = data["learningrate"]
batchsize = data["batchsize"]
batch_concat = data["batch_concat"]
host = data["host"]
port = data["port"]
max_recv = data["max_recv"]
autoencoder = data["autoencoder"]
detailed_output = data["detailed_output"]
count_flops = data["count_flops"]
plots = data["plots"]
autoencoder_train = data["autoencoder_train"]
deactivate_train_after_num_epochs = data["deactivate_train_after_num_epochs"]
grad_encode = data["grad_encode"]
train_gradAE_active = data["train_gradAE_active"]
deactivate_grad_train_after_num_epochs = data["deactivate_grad_train_after_num_epochs"]
wandb.init(config={
"learning_rate": lr,
#"epochs": epoch,
"batch_size": batchsize,
"autoencoder": autoencoder
})
wandb.config.update({"learning_rate": lr, "PC: ": 2})
def print_json():
print("learningrate: ", lr)
print("grad_encode: ", grad_encode)
print("gradAE_train: ", train_gradAE_active)
print("deactivate_grad_train_after_num_epochs: ", deactivate_grad_train_after_num_epochs)
#print("Getting the metadata epoch: ", epoch)
print("Getting the metadata host: ", host)
print("Getting the metadata port: ", port)
print("Getting the metadata batchsize: ", batchsize)
print("Autoencoder: ", autoencoder)
print("detailed_output: ", detailed_output)
print("count_flops: ", count_flops)
print("plots: ", plots)
print("autoencoder_train: ", autoencoder_train)
print("deactivate_train_after_num_epochs: ", deactivate_train_after_num_epochs)
# load data from json file
class PTB_XL(Dataset):
def __init__(self, stage=None):
self.stage = stage
if self.stage == 'train':
global X_train
global y_train
self.y_train = y_train
self.X_train = X_train
if self.stage == 'val':
global y_val
global X_val
self.y_val = y_val
self.X_val = X_val
if self.stage == 'test':
global y_test
global X_test
self.y_test = y_test
self.X_test = X_test
if self.stage == 'raw':
global y_raw
global X_raw
self.y_raw = y_raw
self.X_raw = X_raw
def __len__(self):
if self.stage == 'train':
return len(self.y_train)
if self.stage == 'val':
return len(self.y_val)
if self.stage == 'test':
return len(self.y_test)
if self.stage == 'raw':
return len(self.y_raw)
def __getitem__(self, idx):
if self.stage == 'train':
sample = self.X_train[idx].transpose((1, 0)), self.y_train[idx]
if self.stage == 'val':
sample = self.X_val[idx].transpose((1, 0)), self.y_val[idx]
if self.stage == 'test':
sample = self.X_test[idx].transpose((1, 0)), self.y_test[idx]
if self.stage == 'raw':
sample = self.X_raw[idx].transpose((1, 0)), self.y_raw[idx]
return sample
def init():
train_dataset = PTB_XL('train')
val_dataset = PTB_XL('val')
if IID:
train_1, rest1 = torch.utils.data.random_split(train_dataset, [3853, 15414], generator=torch.Generator().manual_seed(42))
train_2, rest2 = torch.utils.data.random_split(rest1, [3853, 11561], generator=torch.Generator().manual_seed(42))
train_3, rest3 = torch.utils.data.random_split(rest2, [3853, 7708], generator=torch.Generator().manual_seed(42))
train_4, train_5 = torch.utils.data.random_split(rest3, [3853, 3855], generator=torch.Generator().manual_seed(42))
if client_num == 1: train_dataset = train_1
if client_num == 2: train_dataset = train_2
if client_num == 3: train_dataset = train_3
if client_num == 4: train_dataset = train_4
if client_num == 5: train_dataset = train_5
if pretrain_this_client:
raw_dataset = PTB_XL('raw')
print("len raw dataset", len(raw_dataset))
pretrain_dataset, no_dataset = torch.utils.data.random_split(raw_dataset, [963, 18304],
generator=torch.Generator().manual_seed(42))
print("pretrain_dataset length: ", len(pretrain_dataset))
global pretrain_loader
pretrain_loader = torch.utils.data.DataLoader(pretrain_dataset, batch_size=batchsize, shuffle=True)
if simultrain_this_client:
raw_dataset = PTB_XL('raw')
print("len raw dataset", len(raw_dataset))
pretrain_dataset, no_dataset = torch.utils.data.random_split(raw_dataset, [963, 18304],
generator=torch.Generator().manual_seed(42))
print("len train dataset", len(train_dataset))
train_dataset = torch.utils.data.ConcatDataset((pretrain_dataset, train_dataset))
print("len mixed-train dataset", len(train_dataset))
print("train_dataset length: ", len(train_dataset))
print("val_dataset length: ", len(train_dataset))
global train_loader
global val_loader
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batchsize, shuffle=True)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=batchsize, shuffle=True)
"""
def new_split():
global train_loader
global val_loader
train_dataset, val_dataset = torch.utils.data.random_split(training_dataset,
[size_train_dataset,
len(training_dataset) - size_train_dataset])
print("train_dataset size: ", size_train_dataset)
print("val_dataset size: ", len(training_dataset) - size_train_dataset)
train_loader = torch.utils.data.DataLoader(train_dataset, batch_size=batchsize, shuffle=True)
val_loader = torch.utils.data.DataLoader(val_dataset, batch_size=batchsize, shuffle=True)
"""
if count_flops: #Does not work on the Jetson Nano yet. The amount of FLOPs doesn't depend on the architecture. Measuring FLOPs on the PC and JetsonNano would result in the same outcome.
# The paranoid switch prevents the FLOPs count
# Solution: sudo sh -c 'echo 1 >/proc/sys/kernel/perf_event_paranoid'
# Needs to be done after every restart of the PC
from ptflops import get_model_complexity_info
from pypapi import events, papi_high as high
def str_to_number(label):
a = np.zeros(5)
if not label:
return a
for i in label:
if i == 'NORM':
a[0] = 1
if i == 'MI':
a[1] = 1
if i == 'STTC':
a[2] = 1
if i == 'HYP':
a[3] = 1
if i == 'CD':
a[4] = 1
return a
#send/recieve system:
def send_msg(sock, getid, content):
"""
pickles the content (creates bitstream), adds header and send message via tcp port
:param sock: socket
:param content: content to send via tcp port
"""
msg = [getid, content] # add getid
msg = pickle.dumps(msg)
msg = struct.pack('>I', len(msg)) + msg # add 4-byte length in network byte order
#print("communication overhead send: ", sys.getsizeof(msg), " bytes")
global data_send_per_epoch
data_send_per_epoch += sys.getsizeof(msg)
sock.sendall(msg)
def recieve_msg(sock):
"""
recieves the meassage with helper function, umpickles the message and separates the getid from the actual massage content
:param sock: socket
"""
msg = recv_msg(sock) # receive client message from socket
msg = pickle.loads(msg)
return msg
def recieve_request(sock):
"""
recieves the meassage with helper function, umpickles the message and separates the getid from the actual massage content
:param sock: socket
"""
msg = recv_msg(sock) # receive client message from socket
msg = pickle.loads(msg)
getid = msg[0]
content = msg[1]
handle_request(sock, getid, content)
def recv_msg(sock):
"""
gets the message length (which corresponds to the first
4 bytes of the recieved bytestream) with the recvall function
:param sock: socket
:return: returns the data retrieved from the recvall function
"""
# read message length and unpack it into an integer
raw_msglen = recvall(sock, 4)
if not raw_msglen:
return None
msglen = struct.unpack('>I', raw_msglen)[0]
#print("Message length:", msglen)
global data_recieved_per_epoch
data_recieved_per_epoch += msglen
# read the message data
return recvall(sock, msglen)
def recvall(sock, n):
"""
returns the data from a recieved bytestream, helper function
to receive n bytes or return None if EOF is hit
:param sock: socket
:param n: length in bytes (number of bytes)
:return: message
"""
#
data = b''
while len(data) < n:
if detailed_output:
print("Start function sock.recv")
packet = sock.recv(n - len(data))
if not packet:
return None
data += packet
# print("Daten: ", data)
return data
def handle_request(sock, getid, content):
"""
executes the requested function, depending on the get id, and passes the recieved message
:param sock: socket
:param getid: id of the function, that should be executed if the message is recieved
:param content: message content
"""
#print("request mit id:", getid)
switcher = {
0: initialize_model,
1: train_epoch,
2: val_stage,
3: test_stage,
}
switcher.get(getid, "invalid request recieved")(sock, content)
def serverHandler(conn):
while True:
recieve_request(conn)
def grad_postprocessing(grad):
grad_new = grad.numpy()
for a in range(64):
#scaler.fit(grad[a])
grad_new[a] = scaler.inverse_transform(grad[a])
grad_new = torch.DoubleTensor(grad_new).to(device)
return grad_new
def train_epoch(s, pretraining):
#new_split() #new random dist between train and val
loss_grad_total = 0
global epoch
epoch += 1
flops_forward_epoch, flops_encoder_epoch, flops_backprop_epoch, flops_rest, flops_send = 0,0,0,0,0
#Specify AE configuration
train_active = 0 #default: AE is pretrained
train_grad_active = 0
if epoch < deactivate_train_after_num_epochs:
if autoencoder_train:
train_active = 1
if epoch < deactivate_grad_train_after_num_epochs:
if train_gradAE_active:
train_grad_active = 1
global data_send_per_epoch, data_recieved_per_epoch, data_send_per_epoch_total, data_recieved_per_epoch_total
data_send_per_epoch, data_recieved_per_epoch = 0, 0
correct_train, total_train, train_loss = 0, 0, 0
batches_aborted, total_train_nr, total_val_nr, total_test_nr = 0, 0, 0, 0
hamming_epoch, precision_epoch, recall_epoch, f1_epoch, auc_train = 0, 0, 0, 0, 0
#encoder_grad_server = 0
epoch_start_time = time.time()
loader = pretrain_loader if pretraining else train_loader
for b, batch in enumerate(loader):
if count_flops:
x = high.read_counters()
#print("batch: ", b)
# print("FLOPs dataloader: ", x)
# if b % 100 == 0:
# print("batch ", b, " / ", total_batch)
forward_time = time.time()
active_training_time_batch_client = 0
start_time_batch_forward = time.time()
# define labels and data per batch
x_train, label_train = batch
x_train = x_train.to(device)
# x_train = x_train.to(device)
label_train = label_train.double().to(device)
if len(x_train) != 64:
break
if count_flops:
x = high.read_counters()
flops_rest += x[0] # reset Flop Counter
optimizer.zero_grad() # sets gradients to 0 - start for backprop later
client_output_backprop = client(x_train)
client_output_train = client_output_backprop.detach().clone()
if count_flops:
x = high.read_counters()
#print("FLOPs forward: ", x)
flops_forward_epoch += x[0]
client_output_train_without_ae_send = 0
if autoencoder:
if train_active:
optimizerencode.zero_grad()
# client_output_train_without_ae = client_output_train.clone().detach().requires_grad_(False)
client_encoded = encode(client_output_train)
client_output_send = client_encoded.detach().clone()
if train_active:
client_output_train_without_ae_send = client_output_train.detach().clone()
else:
client_output_send = client_output_train.detach().clone()
# client_output_send = encode(client_output_train)
if count_flops:
x = high.read_counters()
flops_encoder_epoch += x[0]
global encoder_grad_server
msg = {
'client_output_train': client_output_send,
'client_output_train_without_ae': client_output_train_without_ae_send,
'label_train': label_train, # concat_labels,
'batch_concat': batch_concat,
'batchsize': batchsize,
'train_active': train_active,
'encoder_grad_server': encoder_grad_server,
'train_grad_active': train_grad_active,
'grad_encode': grad_encode
}
active_training_time_batch_client += time.time() - start_time_batch_forward
if detailed_output:
print("Send the message to server")
send_msg(s, 0, msg)
# while concat_counter_recv < concat_counter_send:
msg = recieve_msg(s)
# print("msg: ", msg)
if pretraining == 0:
wandb.log({"dropout_threshold": msg["dropout_threshold"]}, commit=False)
# decode grad:
client_grad_without_encode = msg["client_grad_without_encode"]
client_grad = msg["grad_client"]
global scaler
scaler = msg["scaler"]
if msg["grad_encode"]:
if train_grad_active:
# print("train_active")
optimizer_grad_decoder.zero_grad()
client_grad = Variable(client_grad, requires_grad=True)
client_grad_decode = grad_decoder(client_grad)
if train_grad_active:
loss_grad_autoencoder = error_grad_autoencoder(client_grad_without_encode, client_grad_decode)
loss_grad_total += loss_grad_autoencoder.item()
loss_grad_autoencoder.backward()
encoder_grad_server = client_grad.grad.detach().clone()#
optimizer_grad_decoder.step()
# print("loss_grad_autoencoder: ", loss_grad_autoencoder)
else:
encoder_grad_server = 0
client_grad_decode = grad_postprocessing(client_grad_decode.detach().clone().cpu())
else:
if msg["client_grad_abort"] == 0:
client_grad_decode = client_grad.detach().clone()
#else:
# client_grad = "abort"
encoder_grad_server = 0
start_time_batch_backward = time.time()
encoder_grad = msg["encoder_grad"]
if client_grad == "abort":
# print("client_grad: ", client_grad)
train_loss_add, add_correct_train, add_total_train = msg["train_loss"], msg["add_correct_train"], \
msg["add_total_train"]
correct_train += add_correct_train
total_train_nr += 1
total_train += add_total_train
train_loss += train_loss_add
batches_aborted += 1
output_train = msg["output_train"]
# print("train_loss: ", train_loss/total_train_nr)
# meter.update(output_train, label_train, train_loss/total_train_nr)
pass
else:
if train_active:
client_encoded.backward(encoder_grad)
optimizerencode.step()
# concat_tensors[concat_counter_recv].to(device)
# concat_tensors[concat_counter_recv].backward(client_grad)
# client_output_backprob.to(device)
# if b % 1000 == 999:
# print("Backprop with: ", client_grad)
if count_flops:
x = high.read_counters() # reset counter
flops_rest += x[0]
flops_send += x[0]
client_output_backprop.backward(client_grad_decode)
optimizer.step()
if count_flops:
x = high.read_counters()
# print("FLOPs backprob: ", x)
flops_backprop_epoch += x[0]
train_loss_add, add_correct_train, add_total_train = msg["train_loss"], msg["add_correct_train"], \
msg["add_total_train"]
correct_train += add_correct_train
total_train_nr += 1
total_train += add_total_train
train_loss += train_loss_add
output_train = msg["output_train"]
# print("train_loss: ", train_loss/total_train_nr)
# meter.update(output_train, label_train, train_loss/total_train_nr)
# wandb.watch(client, log_freq=100)
output = torch.round(output_train)
# if np.sum(label.cpu().detach().numpy()[0]) > 1:
# if np.sum(output.cpu().detach().numpy()[0] > 1):
# print("output[0]: ", output.cpu().detach().numpy()[0])
# print("label [0]: ", label.cpu().detach().numpy()[0])
#if (total_train_nr % 100 == 0):
# print("output[0]: ", output.cpu().detach().numpy()[0])
# print("label [0]: ", label_train.cpu().detach().numpy()[0])
#global batches_abort_rate_total
#batches_abort_rate_total.append(batches_aborted / total_train_nr)
active_training_time_batch_client += time.time() - start_time_batch_backward
#active_training_time_batch_server = msg["active_trtime_batch_server"]
#active_training_time_epoch_client += active_training_time_batch_client
#active_training_time_epoch_server += active_training_time_batch_server
#
try:
roc_auc = roc_auc_score(label_train.detach().clone().cpu(), torch.round(output).detach().clone().cpu(),average='micro')
auc_train += roc_auc
except:
# print("auc_train_exception: ")
# print("label: ", label)
# print("output: ", output)
pass
hamming_epoch += Metrics.Accuracy(label_train.detach().clone().cpu(), torch.round(output).detach().clone().cpu())
# accuracy_score(label_train.detach().clone().cpu(), torch.round(output).detach().clone().cpu())
precision_epoch += precision_score(label_train.detach().clone().cpu(),
torch.round(output).detach().clone().cpu(),
average='micro', zero_division=0)
# recall_epoch += Plots.Recall(label_train.detach().clone().cpu(), output.detach().clone().cpu()).item()
recall_epoch += recall_score(label_train.detach().clone().cpu(), torch.round(output).detach().clone().cpu(),
average='micro', zero_division=0)
# f1_epoch += Plots.F1Measure(label_train.detach().clone().cpu(), output.detach().clone().cpu()).item()
f1_epoch += f1_score(label_train.detach().clone().cpu(), torch.round(output).detach().clone().cpu(),
average='micro', zero_division=0)
epoch_endtime = time.time() - epoch_start_time
if pretraining:
status_epoch_train = "epoch: {}, AUC_train: {:.4f}, Accuracy: {:.4f}, Precision: {:.4f}, Recall: {:.4f}, F1: {:.4f}, trainingtime for epoch: {:.6f}s, batches abortrate:{:.2f}, train_loss: {:.4f} ".format(
epoch, auc_train / total_train_nr, hamming_epoch / total_train_nr, precision_epoch / total_train_nr,
recall_epoch / total_train_nr,
f1_epoch / total_train_nr, epoch_endtime, batches_aborted / total_train_nr,
train_loss / total_train_nr)
print("status_epoch_pretrain: ", status_epoch_train)
else:
flops_client_forward_total.append(flops_forward_epoch)
flops_client_encoder_total.append(flops_encoder_epoch)
flops_client_backprop_total.append(flops_backprop_epoch)
print("data_send_per_epoch: ", data_send_per_epoch / 1000000, " MegaBytes")
print("data_recieved_per_epoch: ", data_recieved_per_epoch / 1000000, "MegaBytes")
data_send_per_epoch_total.append(data_send_per_epoch)
data_recieved_per_epoch_total.append(data_recieved_per_epoch)
status_epoch_train = "epoch: {}, AUC_train: {:.4f}, Accuracy: {:.4f}, Precision: {:.4f}, Recall: {:.4f}, F1: {:.4f}, trainingtime for epoch: {:.6f}s, batches abortrate:{:.2f}, train_loss: {:.4f} ".format(
epoch, auc_train / total_train_nr, hamming_epoch / total_train_nr, precision_epoch / total_train_nr,
recall_epoch / total_train_nr,
f1_epoch / total_train_nr, epoch_endtime, batches_aborted / total_train_nr,
train_loss / total_train_nr)
print("status_epoch_train: ", status_epoch_train)
if count_flops:
print("MegaFLOPS_forward_epoch", flops_forward_epoch / 1000000)
print("MegaFLOPS_encoder_epoch", flops_encoder_epoch / 1000000)
print("MegaFLOPS_backprop_epoch", flops_backprop_epoch / 1000000)
print("MegaFLOPS_rest", flops_rest / 1000000)
print("MegaFLOPS_send", flops_send / 1000000)
wandb.log({"Batches Abortrate": batches_aborted / total_train_nr,
"MegaFLOPS Client Encoder": flops_encoder_epoch / 1000000,
"MegaFLOPS Client Forward": flops_forward_epoch / 1000000,
"MegaFLOPS Client Backprop": flops_backprop_epoch / 1000000},
commit=False)
global auc_train_log
auc_train_log = auc_train / total_train_nr
global accuracy_train_log
accuracy_train_log = hamming_epoch / total_train_nr
global batches_abort_rate_total
batches_abort_rate_total.append(batches_aborted / total_train_nr)
initial_weights = client.state_dict()
send_msg(s, 2, initial_weights)
msg = 0
send_msg(s, 3, msg)
def val_stage(s, pretraining=0):
total_val_nr, val_loss_total, correct_val, total_val = 0, 0, 0, 0
val_losses, val_accs = [], []
hamming_epoch, precision_epoch, recall_epoch, f1_epoch, accuracy, auc_val = 0, 0, 0, 0, 0, 0
val_time = time.time()
with torch.no_grad():
for b_t, batch_t in enumerate(val_loader):
x_val, label_val = batch_t
x_val, label_val = x_val.to(device), label_val.double().to(device)
optimizer.zero_grad()
output_val = client(x_val, drop=False)
client_output_val = output_val.clone().detach().requires_grad_(True)
if autoencoder:
client_output_val = encode(client_output_val)
msg = {'client_output_val/test': client_output_val,
'label_val/test': label_val,
}
if detailed_output:
print("The msg is:", msg)
send_msg(s, 1, msg)
if detailed_output:
print("294: send_msg success!")
msg = recieve_msg(s)
if detailed_output:
print("296: recieve_msg success!")
correct_val_add = msg["correct_val/test"]
val_loss = msg["val/test_loss"]
output_val_server = msg["output_val/test_server"]
val_loss_total += val_loss
correct_val += correct_val_add
total_val_add = len(label_val)
total_val += total_val_add
total_val_nr += 1
try:
roc_auc = roc_auc_score(label_val.detach().clone().cpu(), torch.round(output_val_server).detach().clone().cpu(), average='micro')
auc_val += roc_auc
except:
# print("auc_train_exception: ")
# print("label: ", label)
# print("output: ", output)
pass
output_val_server = torch.round(output_val_server)
hamming_epoch += Metrics.Accuracy(label_val.detach().clone().cpu(), output_val_server.detach().clone().cpu())
#accuracy_score(label_val.detach().clone().cpu(),
# torch.round(output_val_server).detach().clone().cpu())
precision_epoch += precision_score(label_val.detach().clone().cpu(),
output_val_server.detach().clone().cpu(),
average='micro', zero_division=0)
# recall_epoch += Plots.Recall(label_train.detach().clone().cpu(), output.detach().clone().cpu()).item()
recall_epoch += recall_score(label_val.detach().clone().cpu(), output_val_server.detach().clone().cpu(),
average='micro', zero_division=0)
# f1_epoch += Plots.F1Measure(label_train.detach().clone().cpu(), output.detach().clone().cpu()).item()
f1_epoch += f1_score(label_val.detach().clone().cpu(), output_val_server.detach().clone().cpu(),
average='micro', zero_division=0)
status_epoch_val = "epoch: {},AUC_val: {:.4f} ,Accuracy: {:.4f}, Precision: {:.4f}, Recall: {:.4f}, F1: {:.4f}, val_loss: {:.4f}".format(
epoch, auc_val / total_val_nr, hamming_epoch / total_val_nr, precision_epoch / total_val_nr,
recall_epoch / total_val_nr,
f1_epoch / total_val_nr, val_loss_total / total_val_nr)
print("status_epoch_val: ", status_epoch_val)
if pretraining == 0:
wandb.log({"Loss_val": val_loss_total / total_val_nr,
"Accuracy_val_micro": hamming_epoch / total_val_nr,
"F1_val": f1_epoch / total_val_nr,
"AUC_val": auc_val / total_val_nr,
"AUC_train": auc_train_log,
"Accuracy_train_micro": accuracy_train_log})
send_msg(s, 3, 0)
def test_stage(s, epoch):
loss_test = 0.0
correct_test, total_test = 0, 0
hamming_epoch = 0
precision_epoch = 0
recall_epoch = 0
f1_epoch = 0
total_test_nr = 0
with torch.no_grad():
for b_t, batch_t in enumerate(val_loader):
x_test, label_test = batch_t
x_test, label_test = x_test.to(device), label_test.double().to(device)
optimizer.zero_grad()
output_test = client(x_test, drop=False)
client_output_test = output_test.clone().detach().requires_grad_(True)
if autoencoder:
client_output_test = encode(client_output_test)
msg = {'client_output_val/test': client_output_test,
'label_val/test': label_test,
}
if detailed_output:
print("The msg is:", msg)
send_msg(s, 1, msg)
if detailed_output:
print("294: send_msg success!")
msg = recieve_msg(s)
if detailed_output:
print("296: recieve_msg success!")
correct_test_add = msg["correct_val/test"]
test_loss = msg["val/test_loss"]
output_test_server = msg["output_val/test_server"]
loss_test += test_loss
correct_test += correct_test_add
total_test_add = len(label_test)
total_test += total_test_add
total_test_nr += 1
output_test_server = torch.round(output_test_server)
hamming_epoch += Metrics.Accuracy(label_test.detach().clone().cpu(), output_test_server.detach().clone().cpu())
#accuracy_score(label_test.detach().clone().cpu(),
#torch.round(output_test_server).detach().clone().cpu())
precision_epoch += precision_score(label_test.detach().clone().cpu(),
output_test_server.detach().clone().cpu(),
average='micro')
# recall_epoch += Plots.Recall(label_train.detach().clone().cpu(), output.detach().clone().cpu()).item()
recall_epoch += recall_score(label_test.detach().clone().cpu(),
output_test_server.detach().clone().cpu(),
average='micro')
# f1_epoch += Plots.F1Measure(label_train.detach().clone().cpu(), output.detach().clone().cpu()).item()
f1_epoch += f1_score(label_test.detach().clone().cpu(),
output_test_server.detach().clone().cpu(),
average='micro')
status_test = "test: hamming_epoch: {:.4f}, precision_epoch: {:.4f}, recall_epoch: {:.4f}, f1_epoch: {:.4f}".format(
hamming_epoch / total_test_nr, precision_epoch / total_test_nr, recall_epoch / total_test_nr,
f1_epoch / total_test_nr)
print("status_test: ", status_test)
global data_send_per_epoch_total
global data_recieved_per_epoch_total
global batches_abort_rate_total
data_transfer_per_epoch = 0
average_dismissal_rate = 0
total_flops_forward = 0
total_flops_encoder = 0
total_flops_backprob = 0
for data in data_send_per_epoch_total:
data_transfer_per_epoch += data
for data in data_recieved_per_epoch_total:
data_transfer_per_epoch += data
for data in batches_abort_rate_total:
average_dismissal_rate += data
for flop in flops_client_forward_total:
total_flops_forward += flop
for flop in flops_client_encoder_total:
total_flops_encoder += flop
for flop in flops_client_backprop_total:
total_flops_backprob += flop
total_flops = total_flops_backprob + total_flops_encoder + total_flops_forward
print("total FLOPs forward: ", total_flops_forward)
print("total FLOPs encoder: ", total_flops_encoder)
print("total FLOPs backprob: ", total_flops_backprob)
print("total FLOPs client: ", total_flops)
print("Average data transfer/epoch: ", data_transfer_per_epoch / epoch / 1000000, " MB")
print("Average dismissal rate: ", average_dismissal_rate / epoch)
wandb.config.update({"Average data transfer/epoch (MB): ": data_transfer_per_epoch / epoch / 1000000,
"Average dismissal rate: ": average_dismissal_rate / epoch,
"total_MegaFLOPS_forward": total_flops_forward/1000000, "total_MegaFLOPS_encoder": total_flops_encoder/1000000,
"total_MegaFLOPS_backprob": total_flops_backprob/1000000, "total_MegaFLOPS": total_flops/1000000})
msg = 0
send_msg(s, 3, msg)
def initialize_model(s, msg):
"""
if new connected client is not the first connected client,
the initial weights are fetched from the server
:param conn:
"""
#msg = recieve_msg(s)
if msg == 0:
#print("msg == 0")
pass
else:
print("msg != 0")
client.load_state_dict(msg, strict=False)
print("model successfully initialized")
#print("start_training")
# start_training(s)
#train_epoch(s)
def initIID():
global X_train, X_val, y_val, y_train, y_test, X_test
sampling_frequency = 100
datafolder = ptb_path
task = 'superdiagnostic'
outputfolder = mlb_path
# Load PTB-XL data
data, raw_labels = utils.load_dataset(datafolder, sampling_frequency)
# Preprocess label data
labels = utils.compute_label_aggregations(raw_labels, datafolder, task)
# Select relevant data and convert to one-hot
data, labels, Y, _ = utils.select_data(data, labels, task, min_samples=0, outputfolder=outputfolder)
input_shape = data[0].shape
print(input_shape)
# 1-9 for training
X_train = data[labels.strat_fold < 10]
y_train = Y[labels.strat_fold < 10]
# 10 for validation
X_val = data[labels.strat_fold == 10]
y_val = Y[labels.strat_fold == 10]
# X_test = data[labels.strat_fold == 10]
# y_test = Y[labels.strat_fold == 10]
num_classes = 5 # <=== number of classes in the finetuning dataset
input_shape = [1000, 12] # <=== shape of samples, [None, 12] in case of different lengths
print(X_train.shape, y_train.shape, X_val.shape, y_val.shape) # , X_test.shape, y_test.shape)
import pickle
standard_scaler = pickle.load(open(scaler_path, "rb"))
X_train = utils.apply_standardizer(X_train, standard_scaler)
X_val = utils.apply_standardizer(X_val, standard_scaler)
global X_raw, y_raw
X_raw = X_train
y_raw = y_train
def init_nonIID():
global X_train, X_val, y_val, y_train, y_test, X_test
norm, mi, sttc, hyp, cd = [],[],[],[],[]
for a in range(len(y_train)):
if label_class(y_train[a], 0):
sttc.append(X_train[a])
if label_class(y_train[a], 1):
hyp.append(X_train[a])
if label_class(y_train[a], 2):
mi.append(X_train[a])
if label_class(y_train[a], 3):
norm.append(X_train[a])
if label_class(y_train[a], 4):
cd.append(X_train[a])
"""
print("norm shape: ", len(norm))
print("mi shape: ", len(mi))
print("sttc shape: ", len(sttc))
print("hyp shape: ", len(hyp))
print("cd shape: ", len(cd))
print("norm label: ", label_norm[0])
print("mi label: ", label_mi[0])
print("sttc label: ", label_sttc[0])
print("hyp label: ", label_hyp[0])
print("cd label: ", label_cd[0])
print("norm label: ", len(label_norm))
print("mi label: ", len(label_mi))
print("sttc label: ", len(label_sttc))
print("hyp label: ", len(label_hyp))
print("cd label: ", len(label_cd))
"""
if client_num == 1:
if num_classes == 1:
print("Client number: ", client_num, " Class norm")
X_train = norm
y_train = label_norm
if num_classes == 2:
print("Client number: ", client_num, " Class norm, mi")
X_train = np.concatenate((norm, mi), axis=0)
y_train = np.concatenate((label_norm, label_mi), axis=0)
if num_classes == 3:
print("Client number: ", client_num, " Class norm, mi, sttc")
X_train = np.concatenate((norm, mi), axis=0)
X_train = np.concatenate((X_train, sttc), axis=0)
y_train = np.concatenate((label_norm, label_mi), axis=0)
y_train = np.concatenate((y_train, label_sttc), axis=0)
if client_num == 2:
if num_classes == 1:
print("Client number: ", client_num, " Class mi")
X_train = mi
y_train = label_mi
if num_classes == 2:
print("Client number: ", client_num, " Class mi, sttc")
X_train = np.concatenate((mi, sttc), axis=0)
y_train = | np.concatenate((label_mi, label_sttc), axis=0) | numpy.concatenate |
from gym import Env
from gym.utils import seeding
from gym import spaces
import numpy as np
import keras.backend as K
class BaseEnv(Env):
metadata = {'render.modes': ['human', 'ansi']}
def __init__(self, action_mapping):
self._seed()
self.verbose = 0
self.viewer = None
self.batch_size = 32
self.optimizer = None
self.model = None
self.current_step = 0
self.action_mapping = action_mapping
self.action_space = action_mapping.action_space
bounds = float('inf')
self.observation_space = spaces.Box(-bounds, bounds, (4,))
self.viewer = None
self.best = None
self.evaluate_test = False
Env.__init__(self)
def _seed(self, seed=None):
self.np_random, seed = seeding.np_random(seed)
return [seed]
def create_model(self):
pass
def create_optimizer(self):
pass
def loss_scale(self, loss):
return -np.log(loss)
def _step(self, action):
self.action_mapping.step(self.optimizer, action)
loss_before = self.losses(self.data_val)
if self.best is None:
self.best = loss_before
self.model.fit(self.data_train[0], self.data_train[1],
validation_data=(self.data_val[0], self.data_val[1]),
nb_epoch=1, verbose=self.verbose, batch_size=self.batch_size)
loss_after = self.losses(self.data_val)
self.current_step += 1
observation = self._observation()
if (loss_after > 1e10) or (not np.all( | np.isfinite(observation) | numpy.isfinite |
# Copyright 2020-2022 OpenDR European Project
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
# OS Libraries
import os
import os.path
import datetime
# Data Structure Libraries
from collections import deque
# ROS Libraries
import rospy
# ROS Messages
from std_msgs.msg import Header
from sensor_msgs.msg import Image
from gmapping.msg import doubleMap, mapModel
# Math Libraries
import numpy as np
import numpy.ma as ma
from cv_bridge import CvBridge
import matplotlib
import matplotlib.pyplot as plt
# Project Libraries
from fmp_slam_eval.map_colorizer import MapColorizer
from fmp_slam_eval.enums import DiscreteStates as DiSt
from map_simulator.utils import map_msg_to_numpy, map_msg_extent, mkdir_p
# Use non-interactive plotting back-end due to issues with rospy.spin()
matplotlib.use('SVG')
class FMPPlotter:
"""
Class for plotting/coloring different statistics from the Full Map Posterior distribution
and publishing them as images or saving them in files.
"""
def __init__(self):
"""
Constructor
"""
rospy.init_node('fmp_plot')
# Object for pseudo-coloring and plotting the maps
self._map_colorizer = MapColorizer()
self._sub_topic_map_model = "map_model"
self._sub_topic_fmp_alpha = "fmp_alpha"
self._sub_topic_fmp_beta = "fmp_beta"
self._map_model = None
# TODO: this two guys:
# do_img_raw = rospy.get_param("~img_raw" , False)
# do_img_fmp = rospy.get_param("~img_fmp" , False)
do_img_stat = rospy.get_param("~img_stat", False)
do_img_mlm = rospy.get_param("~img_mlm", False)
do_img_para = rospy.get_param("~img_para", False)
self._pub_img = rospy.get_param("~pub_img", False)
self._topic_prefix = rospy.get_param("~pub_topic_prefix", "/fmp_img/")
self._save_img = rospy.get_param("~save_img", False)
self._resolution = rospy.get_param("~resolution", 300)
timestamp = datetime.datetime.now().strftime('%y%m%d_%H%M%S')
path_prefix = rospy.get_param("~path_prefix", "exp")
default_path = os.path.join(os.path.join(os.path.expanduser('~')), 'Desktop')
default_path = os.path.join(default_path, 'FMP_img')
default_path = os.path.join(default_path, path_prefix + "_" + timestamp)
save_dir = rospy.get_param("~save_dir", default_path)
save_dir = os.path.expanduser(save_dir)
save_dir = os.path.expandvars(save_dir)
save_dir = os.path.normpath(save_dir)
self._save_dir = save_dir
# Image config dictionary
sub_img_stat_mean_cfg = {"key": "mean", "dir": os.path.join("stats", "mean"), "file_prefix": "mean",
"topic": "stats/mean", "calc_f": self._calc_mean}
sub_img_stat_var_cfg = {"key": "var", "dir": os.path.join("stats", "var"), "file_prefix": "var",
"topic": "stats/var", "calc_f": self._calc_var}
img_stat_cfg = {"do": do_img_stat, "img": [sub_img_stat_mean_cfg, sub_img_stat_var_cfg]}
sub_img_mlm_cfg = {"key": "mlm", "dir": "mlm", "file_prefix": "mlm",
"topic": "mlm", "calc_f": self._calc_mlm}
img_mlm_cfg = {"do": do_img_mlm, "img": [sub_img_mlm_cfg]}
sub_img_par_alpha_cfg = {"key": "alpha", "dir": os.path.join("param", "alpha"), "file_prefix": "alpha",
"topic": "param/alpha", "calc_f": self._calc_para_alpha}
sub_img_par_beta_cfg = {"key": "beta", "dir": os.path.join("param", "beta"), "file_prefix": "beta",
"topic": "param/beta", "calc_f": self._calc_para_beta}
img_par_cfg = {"do": do_img_para, "img": [sub_img_par_alpha_cfg, sub_img_par_beta_cfg]}
self._img_cfg = {
"stat": img_stat_cfg,
"mlm": img_mlm_cfg,
"par": img_par_cfg
}
fmp_param_sub_required = False
# Queues for storing messages
self._alpha_beta_dict = {}
self._alpha_beta_queue = deque()
# Max and Min dictionaries for stabilizing the color scales for continuous values
self._max_values = {}
self._min_values = {}
# Create Publishers
self._publishers = {}
for img_set_key, img_set_cfg in self._img_cfg.items():
fmp_param_sub_required = fmp_param_sub_required or img_set_cfg['do']
if self._pub_img and img_set_cfg['do']:
for img_cfg in img_set_cfg['img']:
key = img_cfg['key']
topic = self._topic_prefix + img_cfg['topic']
self._publishers[key] = rospy.Publisher(topic, Image, latch=True, queue_size=1)
something_to_do = (self._pub_img or self._save_img) and fmp_param_sub_required
# Don't start the node if not needed...
if not something_to_do:
rospy.logerr("Nothing to do here! Why though?!?")
rospy.logdebug("Setting values:")
rospy.logdebug("\tpub_img: {}, save_img: {}".format(self._pub_img, self._save_img))
rospy.logdebug("\tdo_img_stat: {}, do_img_mlm: {}, do_img_para: {}".format(do_img_stat,
do_img_mlm, do_img_para))
rospy.logdebug("\tsomething_to_do: {}".format(something_to_do))
rospy.signal_shutdown('Nothing to do')
return
# Create Subscribers
# To map model
rospy.Subscriber(self._sub_topic_map_model, mapModel, self._map_model_callback)
# To alpha and beta parameters (if publishing or saving images, and at least one image is generated)
if (self._pub_img or self._save_img) and fmp_param_sub_required:
rospy.Subscriber(self._sub_topic_fmp_alpha, doubleMap, self._map2d_alpha_callback, queue_size=1)
rospy.Subscriber(self._sub_topic_fmp_beta, doubleMap, self._map2d_beta_callback, queue_size=1)
# Create save path if not exists
if self._save_img and fmp_param_sub_required:
if not os.path.exists(self._save_dir):
mkdir_p(self._save_dir)
self._busy = False # Thread lock flag for plot_from_queue
rospy.Timer(rospy.Duration(1), self._plot_from_queue)
rospy.spin()
def _plot_from_queue(self, event):
"""
Function called periodically to check if there are any maps in the queue to be plotted.
While there are still alpha and beta maps stored in the queue, it will plot the configured images.
:param event: Caller event. Unused except for logging.
:return: None
"""
if self._busy:
rospy.loginfo("Another thread is already plotting. Caller: {}".format(event))
else:
self._busy = True
while self._alpha_beta_queue:
seq = self._alpha_beta_queue.popleft()
self._plot(seq, self._alpha_beta_dict[seq])
del self._alpha_beta_dict[seq]
self._busy = False
def _plot(self, seq, dic):
"""
Generates the desired images and plots for a given sequence of alpha and beta maps.
:param seq: (int) Sequence number of the received maps
:params dic: (dict) Dictionary containing the alpha and beta maps, as well as their prior values.
It should be formatted as:
dic = {'alpha': {'prior': (int), 'map': (2D np.ndarray)},
'beta' : {'prior': (int), 'map': (2D np.ndarray)}}
:return: None
"""
if not self._pub_img and not self._save_img:
return
extent_a = dic['alpha']['extent']
extent_b = dic['beta']['extent']
if extent_a != extent_b:
raise ValueError("Map extent of alpha {} differs from beta {}!".format(extent_a, extent_b))
self._map_colorizer.set_wm_extent(extent_a)
alpha = dic['alpha']['map'] + dic['alpha']['prior']
beta = dic['beta']['map'] + dic['beta']['prior']
for img_set_key, img_set_cfg in self._img_cfg.items():
if img_set_cfg['do']:
rospy.loginfo('Plotting %s', img_set_key)
for img_cfg in img_set_cfg['img']:
img_key = img_cfg['key']
img_calc = img_cfg['calc_f']
rospy.loginfo("\tComputing continuous and discrete images for %s.", img_key)
# Compute the images to plot using the configured calculation_function ('calc_f')
img_cont, img_disc, ds_list, v_min, v_max, occ, log_scale = img_calc(alpha, beta)
self._map_colorizer.set_disc_state_list(ds_list)
self._map_colorizer.set_cont_bounds(img_cont, v_min=v_min, v_max=v_max, occupancy_map=occ,
log_scale=log_scale)
rgba_img = self._map_colorizer.colorize(img_cont, img_disc)
del img_cont
del img_disc
if self._save_img:
path = os.path.join(self._save_dir, img_cfg['dir'])
if not os.path.exists(path):
mkdir_p(path)
filename = img_cfg['file_prefix'] + '_s' + str(seq)
raw_filename = 'raw_' + filename + '.png'
filename = filename + '.svg'
mlp_path = os.path.join(path, filename)
raw_path = os.path.join(path, raw_filename)
fig, ax = plt.subplots(figsize=[20, 20])
ax.imshow(rgba_img, extent=extent_a)
self._map_colorizer.draw_cb_cont(fig)
if ds_list:
self._map_colorizer.draw_cb_disc(fig)
rospy.loginfo("\t\tSaving image %s to %s.", img_key, mlp_path)
plt.savefig(mlp_path, bbox_inches='tight', dpi=self._resolution)
plt.close()
del fig
del ax
rospy.loginfo("\t\tSaving image %s to %s.", img_key, raw_path)
plt.imsave(raw_path, rgba_img, vmin=0, vmax=1)
plt.close()
rospy.loginfo("\t\tImages saved.")
if self._pub_img:
publisher = self._publishers[img_key]
rospy.loginfo("\t\tGenerating image message to %s.", img_key)
rgba_img = 255 * rgba_img
rgba_img = rgba_img.astype(np.uint8)
image_msg_head = Header()
image_msg_head.seq = seq
image_msg_head.stamp = rospy.Time.now()
image_msg_head.frame_id = 'map'
br = CvBridge()
image_msg = br.cv2_to_imgmsg(rgba_img, encoding="rgba8")
del rgba_img
image_msg.header = image_msg_head
publisher.publish(image_msg)
del image_msg
rospy.loginfo("\t\tImage published.")
def _map_model_callback(self, msg):
"""
Method called when receiving a map model type. It just sets the local field with the message's value.
:param msg: (gmapping.mapModel) An integer stating the type of map model used by the SLAM algorithm and some
constants for comparisons.
:return: None
"""
mm = msg.map_model
mm_str = ''
if mm == mapModel.REFLECTION_MODEL:
mm_str = 'Reflection Model'
elif mm == mapModel.DECAY_MODEL:
mm_str = 'Exponential Decay Model'
else:
rospy.logerr('No idea what kind of model %d is! Going with Reflection Model.', mm)
mm = mapModel.REFLECTION_MODEL
rospy.loginfo("Received Map Model: (%d, %s)", mm, mm_str)
self._map_model = mm
def _add_to_dict(self, a_b, msg):
"""
Adds the received map and prior to the object's buffer dictionary.
:param a_b: (string) Indicates which of the parameters has been received: "alpha"|"beta"
:param msg: (gmapping.doubleMap) Double Map message containing the prior and map parameters.
:return: None
"""
seq = msg.header.seq
map_dict = {
a_b: {
'map': map_msg_to_numpy(msg),
'extent': map_msg_extent(msg),
'prior': msg.param
}
}
if a_b == 'alpha':
b_a = 'beta'
else:
b_a = 'alpha'
rospy.loginfo('Received msg for {} with seq {}'.format(a_b, seq))
if seq in self._alpha_beta_dict:
self._alpha_beta_dict[seq][a_b] = map_dict[a_b]
if b_a in self._alpha_beta_dict[seq]:
rospy.loginfo('Collected alpha/beta info for seq {}'.format(seq))
self._alpha_beta_queue.append(seq)
else:
self._alpha_beta_dict[seq] = map_dict
def _map2d_alpha_callback(self, msg):
"""
Method called when receiving a map with the alpha parameters of the full posterior map distribution.
It adds the received map to the buffer dictionary until both parameter maps have been received.
:param msg: (gmapping.doubleMap) A floating point gmapping map message.
:return: None
"""
self._add_to_dict('alpha', msg)
def _map2d_beta_callback(self, msg):
"""
Method called when receiving a map with the beta parameters of the full posterior map distribution.
It adds the received map to the buffer dictionary until both parameter maps have been received.
:param msg: (gmapping.doubleMap) A floating point gmapping map message.
:return: None
"""
self._add_to_dict('beta', msg)
def _calc_mean(self, alpha, beta):
"""
Takes the alpha and beta parameter maps and computes the mean depending on the mapping model used.
:param alpha: (nd.array) A 2D array containing the alpha parameters of the PDF of the map posterior.
:param beta: (nd.array) A 2D array containing the beta parameters of the PDF of the map posterior.
:return: (tuple) A tuple consisting of:
* means (ma.array),
* special-case discrete-valued means (ma.array),
* list of special discrete states (list)
* minimum continuous value (float) for color map scaling
* maximum continuous value (float) for color map scaling
* whether the map represents occupancy (bool)
* whether the color scale should be logarithmic (bool)
"""
shape = alpha.shape
v_min = 0
occ = True
if self._map_model == mapModel.DECAY_MODEL:
numerator = alpha
denominator = beta
undef_mask = (denominator == 0)
zero_mask = (numerator == 0)
all_mask = np.logical_or(undef_mask, zero_mask)
numerator = ma.masked_array(numerator)
numerator[all_mask] = ma.masked
means = ma.divide(numerator, denominator)
means_ds = ma.zeros(shape, dtype=np.int8)
means_ds[undef_mask] = DiSt.UNDEFINED.value
means_ds[zero_mask] = DiSt.ZERO.value
means_ds[~all_mask] = ma.masked
ds_list = [DiSt.UNDEFINED, DiSt.ZERO]
v_max = None
log_scale = True
elif self._map_model == mapModel.REFLECTION_MODEL:
denominator = alpha + beta
undef_mask = (denominator == 0)
numerator = ma.masked_array(alpha)
numerator[undef_mask] = ma.masked
means = ma.divide(numerator, denominator)
means_ds = ma.zeros(shape, dtype=np.int8)
means_ds[undef_mask] = DiSt.UNDEFINED.value
means_ds[~undef_mask] = ma.masked
ds_list = [DiSt.UNDEFINED]
v_max = 1
log_scale = False
else:
means = ma.ones(shape)
means_ds = None
ds_list = []
v_max = None
log_scale = False
rospy.logerr('No valid map model defined!')
return means, means_ds, ds_list, v_min, v_max, occ, log_scale
def _calc_var(self, alpha, beta):
"""
Takes the alpha and beta parameter maps and computes the variance depending on the mapping model used.
:param alpha: (nd.array) A 2D array containing the alpha parameters of the PDF of the map posterior.
:param beta: (nd.array) A 2D array containing the beta parameters of the PDF of the map posterior.
:return: (tuple) A tuple consisting of:
* variances (ma.array),
* special-case discrete-valued variances (ma.array),
* list of special discrete states (list)
* minimum continuous value (float) for color map scaling
* maximum continuous value (float) for color map scaling
* whether the map represents occupancy (bool)
* whether the color scale should be logarithmic (bool)
"""
shape = alpha.shape
v_min = 0
occ = False
if self._map_model == mapModel.DECAY_MODEL:
numerator = alpha
denominator = np.multiply(beta, beta)
undef_mask = (denominator == 0)
zero_mask = (numerator == 0)
all_mask = np.logical_or(undef_mask, zero_mask)
numerator = ma.masked_array(numerator)
numerator[all_mask] = ma.masked
variances = ma.divide(numerator, denominator)
vars_ds = ma.zeros(shape, dtype=np.int8)
vars_ds[undef_mask] = DiSt.UNDEFINED.value
vars_ds[zero_mask] = DiSt.ZERO.value
vars_ds[~all_mask] = ma.masked
ds_list = [DiSt.UNDEFINED, DiSt.ZERO]
v_max = None
log_scale = True
elif self._map_model == mapModel.REFLECTION_MODEL:
a_plus_b = alpha + beta
numerator = np.multiply(alpha, beta)
denominator = np.multiply(np.multiply(a_plus_b, a_plus_b), (a_plus_b + 1))
undef_mask = (denominator == 0)
numerator = ma.masked_array(numerator)
numerator[undef_mask] = ma.masked
variances = ma.divide(numerator, denominator)
vars_ds = ma.zeros(shape, dtype=np.int8)
vars_ds[undef_mask] = DiSt.UNDEFINED.value
vars_ds[~undef_mask] = ma.masked
ds_list = [DiSt.UNDEFINED]
v_max = None
log_scale = False
else:
variances = ma.ones(shape)
vars_ds = None
ds_list = []
v_max = 1
log_scale = False
rospy.logerr('No valid map model defined!')
return variances, vars_ds, ds_list, v_min, v_max, occ, log_scale
def _calc_mlm(self, alpha, beta):
"""
Takes the alpha and beta parameter maps and computes the most-likely map depending on the mapping model used.
:param alpha: (nd.array) A 2D array containing the alpha parameters of the PDF of the map posterior.
:param beta: (nd.array) A 2D array containing the beta parameters of the PDF of the map posterior.
:return: (tuple) A tuple consisting of:
* most-likely map values (ma.array),
* special-case discrete-valued most-likely map values (ma.array),
* list of special discrete states (list)
* minimum continuous value (float) for color map scaling
* maximum continuous value (float) for color map scaling
* whether the map represents occupancy (bool)
* whether the color scale should be logarithmic (bool)
"""
shape = alpha.shape
numerator = ma.masked_array(alpha - 1)
v_min = 0
if self._map_model == mapModel.REFLECTION_MODEL:
denominator = alpha + beta - 2
undef_mask = (denominator == 0)
n_undef_mask = ~undef_mask
unif_mask = np.logical_and(alpha == 1, beta == 1)
unif_mask = np.logical_and(unif_mask, n_undef_mask)
bimod_mask = np.logical_and(alpha < 1, beta < 1)
bimod_mask = np.logical_and(bimod_mask, n_undef_mask)
mask = np.logical_or(undef_mask, unif_mask)
mask = np.logical_or(mask, bimod_mask)
numerator[mask] = ma.masked
mlm = ma.divide(numerator, denominator)
mlm_ds = ma.zeros(shape, dtype=np.int8)
mlm_ds[~mask] = ma.masked
mlm_ds[unif_mask] = DiSt.UNIFORM.value
mlm_ds[undef_mask] = DiSt.UNDEFINED.value
mlm_ds[bimod_mask] = DiSt.BIMODAL.value
ds_list = [DiSt.UNDEFINED, DiSt.UNIFORM, DiSt.BIMODAL]
v_max = 1
log_scale = False
elif self._map_model == mapModel.DECAY_MODEL:
denominator = beta
undef_mask = np.logical_or(denominator == 0, alpha < 1)
n_undef_mask = ~undef_mask
zero_mask = np.logical_and(numerator == 0, n_undef_mask)
all_mask = | np.logical_or(undef_mask, zero_mask) | numpy.logical_or |
#!/usr/bin/env python
"""
TODO: Modify module doc.
"""
from __future__ import division
__author__ = "<NAME>"
__copyright__ = "Copyright 2013, The Materials Virtual Lab"
__version__ = "0.1"
__maintainer__ = "<NAME>"
__email__ = "<EMAIL>"
__date__ = "4/12/14"
import inspect
import itertools
import numpy as np
from pymatgen import Lattice
import matplotlib.pyplot as plt
from mpl_toolkits.mplot3d import Axes3D
from pymatgen.util.plotting_utils import get_publication_quality_plot
class SpaceGroupVisualizer(object):
def __init__(self):
pass
def plot(self, sg):
cs = sg.crystal_system
params = {
"a": 10,
"b": 12,
"c": 14,
"alpha": 20,
"beta": 30,
"gamma": 40
}
cs = "rhombohedral" if cs == "Trigonal" else cs
func = getattr(Lattice, cs.lower())
kw = {k: params[k] for k in inspect.getargspec(func).args}
lattice = func(**kw)
global plt
fig = plt.figure(figsize=(10, 10))
#ax = fig.add_subplot(111, projection='3d')
for i in range(2):
plt.plot([0, lattice.matrix[i][0]], [0, lattice.matrix[i][1]],
'k-')
plt.plot([lattice.matrix[0][0], lattice.matrix[0][0]],
[0, lattice.matrix[1][1]],
'k-')
plt.plot([0, lattice.matrix[0][0]],
[lattice.matrix[1][1], lattice.matrix[1][1]],
'k-')
l = | np.arange(0, 0.02, 0.02 / 100) | numpy.arange |
import math
import numpy as np
from genetic_algorithm.ga import GA
def objective_1():
cfg = {}
cfg["name"] = "Sphere-2D"
cfg["dimension"] = 2
cfg["obj_func"] = lambda x: np.sum(np.power(x, 2))
cfg["fitness_func"] = lambda x: np.sum( | np.power(x, 2) | numpy.power |
#!/usr/bin/env python
"""Computes the raw detections using the DPM.
Additionally, estimates 3D pose for each detection."""
import itertools
import os
import argparse
import logging
import math
from collections import namedtuple
from nyc3dcars import SESSION, Photo, Detection, Model, VehicleType, IMAGE_DIR
from sqlalchemy import func
from sqlalchemy.orm import joinedload
import numpy
import scipy.misc
from celery.task import task
import pygeo
import pydro.io
import pydro.features
def in_range(val, low, high):
"""Checks if angle is within a certain range."""
low -= 1e-5
high += 1e-5
twopi = 2 * math.pi
low = (low % twopi + twopi) % twopi
val = (val % twopi + twopi) % twopi
high = (high % twopi + twopi) % twopi
while high < low:
high += 2 * math.pi
while val < low:
val += 2 * math.pi
return val < high
def compute_car_pose(photo, bbox, angle, vehicle_types):
"""Compute 3D pose for 2D bounding box."""
camera_rotation = numpy.array([[photo.r11, photo.r12, photo.r13],
[photo.r21, photo.r22, photo.r23],
[photo.r31, photo.r32, photo.r33]])
camera_position = - \
camera_rotation.T.dot([[photo.t1], [photo.t2], [photo.t3]])
# Small correction factor computed from NYC3DCars annotation results.
dataset_correction = numpy.array([
[photo.dataset.t1],
[photo.dataset.t2],
[photo.dataset.t3],
])
camera_position += dataset_correction
# Just approximate it for this first calculation and correct it later.
vehicle_height = 1.445
det_focal = photo.focal
det_height = photo.height
det_width = photo.width
det_bottom = bbox.y2 * det_height
det_top = bbox.y1 * det_height
det_middle = (bbox.x1 + bbox.x2) / 2 * det_width
new_dir = numpy.array([[(det_middle - det_width / 2) / det_focal],
[(det_height / 2 - det_bottom) / det_focal],
[-1]])
distance = vehicle_height / ((det_height / 2 - det_top) / det_focal - (
det_height / 2 - det_bottom) / det_focal)
car_position_wrt_camera = distance * new_dir
car_position = camera_rotation.T.dot(car_position_wrt_camera)
car_ecef = car_position + camera_position
car_lla = pygeo.ECEFToLLA(car_ecef.T)
car_enu = pygeo.LLAToENU(car_lla).reshape((3, 3))
middle_x = (bbox.x1 + bbox.x2) / 2
middle_y = (bbox.y1 + bbox.y2) / 2
left_ray = numpy.array(
[[(bbox.x1 * photo.width - det_width / 2) / det_focal],
[(det_height / 2 - middle_y * photo.height) / det_focal],
[-1]])
left_ray_enu = car_enu.T.dot(camera_rotation.T.dot(left_ray))
right_ray = numpy.array(
[[(bbox.x2 * photo.width - det_width / 2) / det_focal],
[(det_height / 2 - middle_y * photo.height) / det_focal],
[-1]])
right_ray_enu = car_enu.T.dot(camera_rotation.T.dot(right_ray))
middle_ray = numpy.array(
[[(middle_x * photo.width - det_width / 2) / det_focal],
[(det_height / 2 - middle_y * photo.height) / det_focal],
[-1]])
middle_ray_enu = car_enu.T.dot(camera_rotation.T.dot(middle_ray))
top_ray = numpy.array(
[[(middle_x * photo.width - det_width / 2) / det_focal],
[(det_height / 2 - bbox.y1 * photo.height) / det_focal],
[-1]])
top_ray_enu = car_enu.T.dot(camera_rotation.T.dot(top_ray))
bottom_ray = numpy.array(
[[(middle_x * photo.width - det_width / 2) / det_focal],
[(det_height / 2 - bbox.y2 * photo.height) / det_focal],
[-1]])
bottom_ray_enu = car_enu.T.dot(camera_rotation.T.dot(bottom_ray))
middle_angle = math.atan2(middle_ray_enu[1], middle_ray_enu[0])
right_angle = math.atan2(right_ray_enu[1], right_ray_enu[0])
left_angle = math.atan2(left_ray_enu[1], left_ray_enu[0])
if not angle:
total_angle = middle_angle
else:
total_angle = middle_angle + angle
for vehicle_type in vehicle_types:
half_width = 0.3048 * vehicle_type.tight_width / 2
half_length = 0.3048 * vehicle_type.tight_length / 2
height = 0.3048 * vehicle_type.tight_height
pointa = numpy.array([[half_width], [half_length]])
pointb = numpy.array([[half_width], [-half_length]])
pointc = numpy.array([[-half_width], [-half_length]])
pointd = | numpy.array([[-half_width], [half_length]]) | numpy.array |
import numpy as np
import pandas as pd
import matplotlib.pyplot as plt
import torch
import torch.tensor as ts
import mdn_base
from data_handler import Data_Handler as DH
from misc_mixture_density_network import Mixture_Density_Network as MDN
from myfilter import KalmanFilter, model_CV, fill_diag
print("Program: evaluation\n")
data_path = './data/eval_data1.csv' # eval_data1 or eval_data1
model_path = './model/Model_CASE_p5m20_512_256_256_128_64'
df = pd.read_csv(data_path) # for test
data = df.to_numpy()[:,:-2]
labels = df.to_numpy()[:,-2:]
N = data.shape[0]
print('There are {} samples.'.format(N))
# Don't change these parameters
maxT = 20
past = 5 # take the past instants
num_gaus = 10 # the number of Gaussian components
layer_param = [512, 256, 256, 128, 64] # Body architecture (MLP layers)
data_shape = (1, past*2+4)
label_shape = (1, 2)
# Load the MDN model
myMDN = MDN(data_shape, label_shape, num_gaus=num_gaus, layer_param=layer_param, verbose=False)
myMDN.build_Network()
model = myMDN.model
model.load_state_dict(torch.load(model_path))
model.eval()
Loss_MDN = []
Loss1_MDN = []
Loss_KF = []
# fig,ax = plt.subplots()
for idx in range(N):
if (idx%2000 == 0):
print("\r{}/{} ".format(idx,N), end='')
sample = data[idx,:] # list: [x_h, y_h, x_t, y_t, T, type]
label = labels[idx,:]
### Kalman filter
X0 = np.array([[sample[0],sample[1],0,0]]).transpose()
kf_model = model_CV(X0, Ts=1)
P0 = fill_diag((1,1,1,1))
Q = fill_diag((1,1,1,1))
R = fill_diag((1,1))
KF = KalmanFilter(kf_model,P0,Q,R)
Y = [np.array(sample[2:4]), np.array(sample[4:6]),
np.array(sample[6:8]), np.array(sample[8:10]), np.array(sample[10:12])]
for kf_i in range(len(Y)+int(sample[-2])):
if kf_i<len(Y):
KF.one_step(np.array([[0]]), np.array(Y[kf_i]).reshape(2,1))
else:
KF.predict(np.array([[0]]),evolve_P=False)
KF.append_state(KF.X)
### MDN
beta_test = ts(np.tile(np.array([sample]), (1,1)).astype(np.float32))
alp, mu, sigma = model(beta_test)
alp1, mu1, sigma1 = mdn_base.take_mainCompo(alp, mu, sigma, main=1)
alp, mu, sigma = mdn_base.take_mainCompo(alp, mu, sigma, main=3)
alp, mu, sigma = mdn_base.take_goodCompo(alp, mu, sigma, thre=0.1)
### Loss
_, loss_KF = mdn_base.loss_MaDist(ts([1]), ts([KF.X[:2].reshape(-1)]),
ts([[KF.P[0,0],KF.P[1,1]]]), ts(label))
Loss_KF.append(loss_KF.detach().float().item())
_, loss_MDN = mdn_base.loss_MaDist(alp[0],mu[0],sigma[0],ts(label))
Loss_MDN.append(loss_MDN.detach().float().item())
_, loss1_MDN = mdn_base.loss_MaDist(alp1[0],mu1[0],sigma1[0],ts(label))
Loss1_MDN.append(loss1_MDN.detach().float().item())
if idx==5000:
fig, ax1 = plt.subplots()
h1 = ax1.hist(np.array(Loss_KF)[np.array(Loss_KF)<10], bins=20, alpha=0.5, label='KF')
h2 = ax1.hist(np.array(Loss_MDN)[np.array(Loss_MDN)<10], bins=20, alpha=0.5, label='MDN')
h3 = ax1.hist( | np.array(Loss1_MDN) | numpy.array |
import warnings
import pytest
import numpy as np
import os,sys,inspect
currentdir = os.path.dirname(os.path.abspath(inspect.getfile(inspect.currentframe())))
parentdir = os.path.dirname(currentdir)
sys.path.insert(0,parentdir)
from ADPYNE.AutoDiff import AutoDiff, vectorize
import ADPYNE.elemFunctions as ef
# helper function tests
def test_convertNonArray_array():
AD = AutoDiff(np.array([[1,2]]),1)
assert np.all(np.equal(AD.val, np.array([[1,2]])))
def test_convertNonArray_num():
AD = AutoDiff(1,1)
assert np.all(np.equal(AD.val, np.array([[1]])))
def test_calcJacobian_array():
AD = AutoDiff(1,2)
assert np.all(np.equal(AD.jacobian, np.array([[1]])))
def test_calcJacobian_array_withJ():
AD = AutoDiff(1,1,1,0,np.array([[1]]))
assert np.all(np.equal(AD.jacobian, np.array([[1]])))
def test_calcJacobian_vector():
AD = AutoDiff(4, np.array([[2, 1]]).T, n=2, k=1)
assert np.all(np.equal(AD.jacobian, np.array([[1, 0]])))
AD = AutoDiff(3, np.array([[1, 2]]).T, n=2, k=2)
assert np.all(np.equal(AD.jacobian, np.array([[0, 1]])))
def test_calcDerivative():
AD = AutoDiff(4, 2, n=4, k=3)
assert np.all(np.equal(AD.der, np.array([[0, 0, 2, 0]])))
# addition tests
def test_add_ad_results():
# single input cases
# positive numbers
x = AutoDiff(5, 2)
f = x + x
assert f.val == 10
assert f.der == 4
assert f.jacobian == 2
# negative numbers
y = AutoDiff(-5, 2)
f = y + y
assert f.val == -10
assert f.der == 4
assert f.jacobian == 2
def test_add_vector_results():
x = AutoDiff(np.array([[3],[1]]), np.array([[2, 1]]).T, 2, 1)
y = AutoDiff(np.array([[2],[-3]]), np.array([[3, 2]]).T, 2, 2)
f = x + y
assert np.all(f.val == np.array([[5], [-2]]))
assert np.all(f.der == np.array([[2, 3], [1, 2]]))
assert np.all(f.jacobian == np.array([[1, 1], [1, 1]]))
def test_add_constant_results():
# single input case
# positive numbers
x = AutoDiff(5, 2)
f = x + 3
assert f.val == 8
assert f.der == 2
assert f.jacobian == 1
# negative numbers
x = AutoDiff(-5, 2)
f = x + 3
assert f.val == -2
assert f.der == 2
assert f.jacobian == 1
def test_add_constant_vector_results():
x = AutoDiff(np.array([[1, 3]]).T, np.array([[2, 1]]).T, 2, 1)
f = x + 3
assert np.all(f.val == np.array([[4, 6]]).T)
assert np.all(f.der == np.array([[2, 0], [1, 0]]))
assert np.all(f.jacobian == np.array([[1, 0], [1, 0]]))
# reverse addition tests
def test_radd_constant_results():
# single input case
# positive numbers
x = AutoDiff(5, 2)
f = 3 + x
assert f.val == 8
assert f.der == 2
assert f.jacobian == 1
# negative numbers
x = AutoDiff(-5, 2)
f = 3 + x
assert f.val == -2
assert f.der == 2
assert f.jacobian == 1
def test_radd_constant_vector_results():
x = AutoDiff(np.array([[1, 3]]).T, np.array([[2, 1]]).T, 2, 1)
f = 3 + x
assert np.all(f.val == np.array([[4, 6]]).T)
assert np.all(f.der == np.array([[2, 0], [1, 0]]))
assert np.all(f.jacobian == np.array([[1, 0], [1, 0]]))
# subtraction tests
def test_sub_ad_results():
# single input cases
# positive numbers
x = AutoDiff(5, 2)
f = x - x
assert f.val == 0
assert f.der == 0
assert f.jacobian == 0
# negative numbers
y = AutoDiff(-5, 2)
f = y - y
assert f.val == 0
assert f.der == 0
assert f.jacobian == 0
def test_sub_vector_results():
x = AutoDiff([3, 1], [2, 1], 2, 1)
y = AutoDiff([2, -3], [3, 2], 2, 2)
f = x - y
assert np.all(f.val == np.array([[1], [4]]))
assert np.all(f.der == np.array([[2, -3], [1, -2]]))
assert np.all(f.jacobian == np.array([[1, -1], [1, -1]]))
def test_sub_constant_results():
# single input case
# positive numbers
x = AutoDiff(5, 2)
f = x - 3
assert f.val == 2
assert f.der == 2
assert f.jacobian == 1
# negative numbers
x = AutoDiff(-5, 2)
f = x - 3
assert f.val == -8
assert f.der == 2
assert f.jacobian == 1
def test_sub_constant_vector_results():
x = AutoDiff([1, 3], [2, 1], 2, 1)
f = x - 3
assert np.all(f.val == np.array([[-2, 0]]).T)
assert np.all(f.der == np.array([[2, 0], [1, 0]]))
assert np.all(f.jacobian == np.array([[1, 0], [1, 0]]))
# reverse subtraction tests
def test_rsub_constant_results():
# single input case
# positive numbers
x = AutoDiff(5, 2)
f = 3 - x
assert f.val == -2
assert f.der == 2
assert f.jacobian == 1
# negative numbers
x = AutoDiff(-5, 2)
f = 3 - x
assert f.val == 8
assert f.der == 2
assert f.jacobian == 1
def test_rsub_constant_vector_results():
x = AutoDiff([1, 3], [2, 1], 2, 1)
f = 3 - x
assert np.all(f.val == np.array([[2, 0]]).T)
assert np.all(f.der == np.array([[2, 0], [1, 0]]))
assert np.all(f.jacobian == np.array([[1, 0], [1, 0]]))
# multiplication tests
def test_mul_ad_results():
# single input case
# positive numbers
x = AutoDiff(5, 2)
f = x * x
assert f.val == 25
assert f.der == 20
assert f.jacobian == 10
# negative numbers
x = AutoDiff(-5, 2)
f = x * x
assert f.val == 25
assert f.der == -20
assert f.jacobian == -10
def test_mul_vector_results():
x = AutoDiff([3, 1], [2, 1], 2, 1)
y = AutoDiff([2, -3], [1, 2], 2, 2)
f = x*y
assert np.all(f.val == np.array([[6, -3]]).T)
assert np.all(f.der == np.array([[4, 3], [-3, 2]]))
assert np.all(f.jacobian == np.array([[2, 3], [-3, 1]]))
def test_mul_constant_results():
# single input case
# positive numbers
x = AutoDiff(5, 2)
f = x * 3
assert f.val == 15
assert f.der == 6
assert f.jacobian == 3
# negative numbers
x = AutoDiff(-5, 2)
f = x * 3
assert f.val == -15
assert f.der == 6
assert f.jacobian == 3
def test_mul_constant_vector_results():
x = AutoDiff([3, 1], [2, 1], 2, 1)
f = x * 3
assert np.all(f.val == np.array([[9, 3]]).T)
assert np.all(f.der == np.array([[6, 0], [3, 0]]))
assert np.all(f.jacobian == np.array([[3, 0], [3, 0]]))
# reverse multiplication tests
def test_rmul_constant_results():
# single input case
# positive numbers
x = AutoDiff(5, 2)
f = 3 * x
assert f.val == 15
assert f.der == 6
assert f.jacobian == 3
# negative numbers
x = AutoDiff(-5, 2)
f = 3 * x
assert f.val == -15
assert f.der == 6
assert f.jacobian == 3
def test_rmul_constant_vector_results():
x = AutoDiff([3, 1], [2, 1], 2, 1)
f = 3 * x
assert np.all(f.val == np.array([[9, 3]]).T)
assert np.all(f.der == np.array([[6, 0], [3, 0]]))
assert np.all(f.jacobian == np.array([[3, 0], [3, 0]]))
# division tests
def test_truediv_ad_results():
# single input case
# positive numbers
x = AutoDiff(5, 2)
f = x / x
assert f.val == 1
assert f.der == 0
assert f.jacobian == 0
# negative numbers
x = AutoDiff(-5, 2)
f = x / x
assert f.val == 1
assert f.der == 0
assert f.jacobian == 0
def test_truediv_vector_results():
x = AutoDiff([3, 1], [2, 1], 2, 1)
y = AutoDiff([2, -3], [1, 2], 2, 2)
f = x/y
assert np.all(f.val == np.array([[3/2, -1/3]]).T)
assert np.all(f.der == np.array([[1, -0.75], [-1/3, -2/9]]))
assert np.all(f.jacobian == np.array([[0.5, -0.75], [-1/3, -1/9]]))
def test_truediv_constant_results():
# single input case
# positive numbers
x = AutoDiff(9, 6)
f = x / 3
assert f.val == 3
assert f.der == 2
assert f.jacobian == (1/3)
# negative numbers
x = AutoDiff(-9, 6)
f = x / 3
assert f.val == -3
assert f.der == 2
assert f.jacobian == (1/3)
def test_truediv_constant_vector_results():
x = AutoDiff([-9, 3], [2, 1], 2, 1)
f = x / 3
assert np.all(f.val == np.array([[-3, 1]]).T)
assert np.all(f.der == np.array([[2/3, 0], [1/3, 0]]))
assert np.all(f.jacobian == np.array([[1/3, 0], [1/3, 0]]))
# reverse division tests
def test_rtruediv_constant_results():
# single input case
# positive numbers
x = AutoDiff(3, 2)
f = 6 / x
assert f.val == 2
assert f.der == -4/3
assert f.jacobian == -2/3
# negative numbers
x = AutoDiff(-3, 2)
f = 6 / x
assert f.val == -2
assert f.der == -4/3
assert f.jacobian == -2/3
def test_rtruediv_constant_vector_results():
x = AutoDiff([-9, 3], [2, 1], 1, 1)
f = 3 / x
assert np.all(f.val == np.array([[-1/3, 1]]).T)
assert np.all(f.der == np.array([[-3*((-9)**(-2))*2], [-3*((3)**(-2))*1]]))
assert np.all(f.jacobian == np.array([[-3*((-9)**(-2))*1], [-3*((3)**(-2))*1]]))
# power tests
def test_pow_ad_results():
x = AutoDiff(2, 1)
f = x**x
assert f.val == 4
assert f.der == 4 + np.log(16)
assert f.jacobian == 4 + np.log(16)
def test_rpow_vector_results():
x = AutoDiff([4, 3], [2, 1], 2, 1)
y = AutoDiff([2, 1], [1, 3], 2, 2)
f = x**y
assert np.all(f.val == np.array([[4**2, 3**1]]).T)
assert np.all(f.der == np.array([[2*(4**(2-1))*2, (4**2) * np.log(4) * 1], [1*(3**(1-1))*1, (3**1) * np.log(3)*3]]))
assert np.all(f.jacobian == np.array([[2*(4**(2-1))*1, (4**2) * np.log(4) * 1], [1*(3**(1-1))*1, (3**1) * np.log(3)*1]]))
def test_pow_constant_results():
# positive numbers
x = AutoDiff(5, 2)
f = x**3
assert f.val == 125
assert f.der == 150
assert f.jacobian == 75
# negative numbers
x = AutoDiff(-5, 2)
f = x**3
assert f.val == -125
assert f.der == 150
assert f.jacobian == 75
def test_pow_constant_vector_results():
x = AutoDiff([4, 3], [2, 1], 1, 1)
f = x**3
assert np.all(f.val == np.array([[4**3, 3**3]]).T)
assert np.all(f.der == np.array([[3*(4**2)*2], [3*(3**2)*1]]))
assert np.all(f.jacobian == np.array([[3*(4**2)*1], [3*(3**2)*1]]))
# reverse power tests
def test_rpow_constant_results():
x = AutoDiff(5, 2)
f = 3**x
assert f.val == 243
assert f.der == 486 * np.log(3)
assert f.jacobian == 243 * np.log(3)
def test_rpow_constant_vector_results():
x = AutoDiff([4, 3], [2, 1], 1, 1)
f = 3**x
assert np.all(f.val == np.array([[3**(4), 3**3]]).T)
assert np.all(f.der == np.array([[(3**(4))*2 * np.log(3)], [(3**(3))*1 * np.log(3)]]))
assert np.all(f.jacobian == np.array([[(3**(4))*1 * np.log(3)], [(3**(3))*1 * np.log(3)]]))
# positive tests
def test_pos_results():
# positive numbers
x = AutoDiff(5, 2)
f = + x
assert f.val == 5
assert f.der == 2
assert f.jacobian == 1
# negative numbers
y = AutoDiff(-5, 2)
f = + y
assert f.val == -5
assert f.der == 2
assert f.jacobian == 1
def test_pos_vector_results():
x = AutoDiff([4, 3], [2, 1], 1, 1)
f = + x
assert np.all(f.val == np.array([[4, 3]]).T)
assert np.all(f.der == np.array([[2], [1]]))
assert np.all(f.jacobian == np.array([[1], [1]]))
y = AutoDiff([-4, -3], [2, 1], 1, 1)
g = + y
assert np.all(g.val == np.array([[-4, -3]]).T)
assert np.all(g.der == np.array([[2], [1]]))
assert np.all(g.jacobian == np.array([[1], [1]]))
# negation tests
def test_neg_results():
# positive numbers
x = AutoDiff(5, 2)
f = - x
assert f.val == -5
assert f.der == -2
assert f.jacobian == -1
# negative numbers
y = AutoDiff(-5, 2)
f = - y
assert f.val == 5
assert f.der == -2
assert f.jacobian == -1
def test_neg_vector_results():
x = AutoDiff([4, 3], [2, 1], 1, 1)
f = - x
assert np.all(f.val == np.array([[-4, -3]]).T)
assert np.all(f.der == np.array([[-2], [-1]]))
assert np.all(f.jacobian == np.array([[-1], [-1]]))
y = AutoDiff([-4, -3], [2, 1], 1, 1)
g = - y
assert np.all(g.val == np.array([[4, 3]]).T)
assert np.all(g.der == np.array([[-2], [-1]]))
assert np.all(g.jacobian == np.array([[-1], [-1]]))
def test_neg_constant_results():
x = 3
f = - x
assert f == -3
# absolute value tests
def test_abs_results():
# positive numbers
x = AutoDiff(5, 2)
f = abs(x)
assert f.val == 5
assert f.der == 2
assert f.jacobian == 1
# negative numbers
y = AutoDiff(-5, 2)
f = abs(y)
assert f.val == 5
assert f.der == -2
assert f.jacobian == -1
def test_abs_vector_results():
x = AutoDiff([4, 3], [2, 1], 1, 1)
f = abs(x)
assert np.all(f.val == np.array([[4, 3]]).T)
assert np.all(f.der == np.array([[2], [1]]))
assert np.all(f.jacobian == np.array([[1], [1]]))
y = AutoDiff([-4, -3], [2, 1], 1, 1)
g = abs(y)
assert np.all(g.val == np.array([[4, 3]]).T)
assert np.all(g.der == np.array([[-2], [-1]]))
assert np.all(g.jacobian == np.array([[-1], [-1]]))
def test_abs_constant_results():
x = -3
f = abs(x)
assert f == 3
# Comparison tests
def test_eq_results():
x = AutoDiff(5, 2)
y = AutoDiff(5, 2)
z = AutoDiff(5, 1)
assert x == y
assert (x == z) == False
def test_eq_vector_results():
w = AutoDiff([4, 5], [2, 1], 1, 1)
x = AutoDiff([4, 3], [2, 1], 1, 1)
y = AutoDiff([4, 3], [2, 1], 1, 1)
z = AutoDiff([4, 5], [2, 1], 1, 1)
assert np.all(x == y)
assert np.all(x==z) == False
assert np.all(w==x) == False
def test_eq_constant():
x = AutoDiff(5, 2)
assert (x == 5) == False
def test_ne_results():
x = AutoDiff(5, 2)
y = AutoDiff(5, 2)
z = AutoDiff(5, 1)
assert x != z
assert (x != y) == False
def test_neq_vector_results():
w = AutoDiff([4, 5], [2, 1], 1, 1)
x = AutoDiff([4, 3], [2, 1], 1, 1)
y = AutoDiff([4, 3], [2, 1], 1, 1)
z = AutoDiff([4, 5], [2, 1], 1, 1)
assert np.all(x != y) == False
assert | np.all(x!=z) | numpy.all |
import pickle
import os
from typing import Set
import torch
import torch.nn
import numpy as np
# from numpy import random
import random
import scipy.signal
from collections import deque
import matplotlib.pyplot as plt
#from running_state import ZFilter
import math
import logging
def set_random_seed(seed: int, using_cuda: bool = False) -> None:
"""
Seed the different random generators
:param seed: (int)
:param using_cuda: (bool)
"""
# Seed python RNG
random.seed(seed)
# Seed numpy RNG
np.random.seed(seed)
# seed the RNG for all devices (both CPU and CUDA)
torch.manual_seed(seed)
if using_cuda:
torch.cuda.manual_seed(seed)
# Deterministic operations for CuDNN, it may impact performances
torch.backends.cudnn.deterministic = True
# torch.backends.cudnn.benchmark = False
def dump_pickle(saved_fn, variable):
with open(saved_fn, 'wb') as ff:
pickle.dump(variable, ff)
def load_pickle(fn):
if not os.path.exists(fn):
print(fn, " notexist")
return
with open(fn, "rb") as f:
lookup = pickle.load(f)
# print(fn)
return lookup
# InfoGail related:
def discount(x, gamma):
assert x.ndim >= 1
#print("discount filter:", x)
#print("::::::::", scipy.signal.lfilter([1], [1, -gamma], x[::-1], axis=0)[::-1])
return scipy.signal.lfilter([1], [1, -gamma], x[::-1], axis=0)[::-1]
# ZFilter
def gauss_prob_np(mu, logstd, x):
std = np.exp(logstd)
var = np.square(std)
gp = np.exp(-np.square(x - mu)/(2*var)) / ((2*np.pi)**.5 * std)
return np.prod(gp, axis=1)
def gauss_prob(mu, logstd, x):
std = torch.exp(logstd)
var = torch.square(std)
gp = torch.exp(-torch.square(x - mu)/(2*var)) / ((2*np.pi)**.5 * std)
return torch.reduce_prod(gp, [1])
def normal_entropy(std):
var = std.pow(2)
entropy = 0.5 + 0.5 * torch.log(2 * var * math.pi)
return entropy.sum(1, keepdim=True)
def normal_log_density(x, mean, log_std, std):
# pylint: disable=not-callable
var = std.pow(2)
torch_pi = torch.asin(torch.tensor(1.))
log_density = -(x - mean).pow(2) / (
2 * var) - 0.5 * torch.log(2 * torch_pi) - log_std
return log_density.sum(1, keepdim=True)
# def normal_log_density(x, mean, log_std, std):
# var = std.pow(2)
# log_density = -(x - mean).pow(2) / (
# 2 * var) - 0.5 * math.log(2 * math.pi) - log_std
# return log_density.sum(1, keepdim=True)
def normal_log_density_fixedstd(x, mean):
std = torch.from_numpy( | np.array([2, 2]) | numpy.array |
import numpy as np
import csv
import math
import matplotlib.pyplot as plt
import pandas as pd
import random
plt.ion()
class Waypoints:
file_mapping = {
"offroad_1": 'Offroad_1.csv',
"offroad_2": 'Offroad_2.csv',
"offroad_3": 'Offroad_3.csv',
"offroad_4": 'Offroad_4.csv',
"offroad_5": 'Offroad_5.csv',
"offroad_6": 'Offroad_6.csv',
"offroad_7": 'Offroad_7.csv',
"offroad_8": 'Offroad_8.csv'
}
def __init__(self, city_name):
try:
self.raw_waypoints = pd.read_csv("carla_game/waypoints/" + self.file_mapping[city_name.lower()])
except:
self.raw_waypoints = pd.read_csv(self.file_mapping[city_name.lower()])
self.city_name = city_name
self.city_num = int(self.city_name[-1])
#process cm to m
self.point_columns_labels = []
for col in self.raw_waypoints.columns:
if '_id' not in str(col):
self.point_columns_labels.append(str(col))
self.raw_waypoints[self.point_columns_labels] /= 100
nparray = self.raw_waypoints[self.point_columns_labels].to_numpy()
self.total_min = np.min(nparray)
self.total_max = np.max(nparray)
#nums
self.points_num = len(self.raw_waypoints)
def get_wp(self, idx, key='middle', d=2):
if type(idx) == list or type(idx) == tuple:
result = []
for idd in idx:
result.append(self.get_wp(idd))
return result
else:
point = self.raw_waypoints.iloc[idx]
data = []
for xyz in ['.x', '.y', '.z']:
data.append(point[key+xyz])
data = data[:d]
return data
def get_init_pos(self):
index = random.randint(0, self.points_num - 1)
point = self.raw_waypoints.iloc[index]
idxs = self.get_nearest_waypoints_idx(index)
prev, next = idxs[random.randint(0, len(idxs) - 1)]
yaw = get_degree(self.get_wp(prev[-1]), self.get_wp(next[0]))
init_pos = (point["middle.x"], point["middle.y"], point["middle.z"], yaw)
paths = self.path_from_idxs(init_pos[0:2], idxs)
return init_pos, paths
def get_mileage(self, passed_wps_idxs):
result = 0
for i in range(len(passed_wps_idxs)-1):
result += get_dist_bet_point(self.get_wp(passed_wps_idxs[i]), self.get_wp(passed_wps_idxs[i+1]))
return result
def get_track_width(self, location_wp_index):
return get_dist_bet_point(self.get_wp(location_wp_index, key='side1'), self.get_wp(location_wp_index, key='side2'))
def get_nearest_waypoints_idx(self, location_wp_index, k=10):
raise NotImplementedError
def get_all_wps(self):
result = []
for i in range(self.points_num):
result.append(self.get_wp(i))
result.append(self.get_wp(i, key='side1'))
result.append(self.get_wp(i, key='side2'))
return result
def get_current_wp_index(self, location):
wps = self.raw_waypoints[["middle.x", "middle.y"]].values
return find_nearest_waypoints(wps, location, 1)[0]
def path_from_idxs(self, location, idxs):
paths = []
for prev, next in idxs:
temp = {
"prev_wps": np.asarray(self.get_wp(prev)),
"next_wps": np.asarray(self.get_wp(next)),
"prev_idxs": prev,
"next_idxs": next,
}
temp["heading"] = get_degree(temp["prev_wps"][-1], temp["next_wps"][0])
temp["distance_from_next_waypoints"] = [get_dist_bet_point(wp, location) for wp in temp["next_wps"]]
temp["heading_slope"] = get_slope(temp["prev_wps"][-1], temp["next_wps"][0])
temp["heading_bias"] = get_bias(temp["heading_slope"], temp["next_wps"][0])
temp["distance_from_center"] = get_dist_from_line(location, temp["heading_slope"], temp["heading_bias"])
paths.append(temp)
return paths
def get_paths(self, location, location_wp_index, prev_location_wp_index):
idxs = self.get_prev_next_waypoints_idx(location_wp_index, prev_location_wp_index)
return self.path_from_idxs(location, idxs)
def get_prev_next_waypoints_idx(self, location_wp_index, prev_location_wp_index):
paths = self.get_nearest_waypoints_idx(location_wp_index)
if any([prev_location_wp_index in prev for prev, next in paths]):
pass
elif any([prev_location_wp_index in next for prev, next in paths]):
# reverse paths
for i in range(len(paths)):
prev, next = paths[i]
paths[i] = list(reversed(next)), list(reversed(prev))
'''
else:
raise RuntimeError("Worng location_wp_index, prev_location_wp_index : {}, {}".format(location_wp_index, prev_location_wp_index))
'''
return paths
class Waypoints_lanekeeping(Waypoints):
def get_nearest_waypoints_idx(self, location_wp_index, k=20):
result = []
for i in range(location_wp_index-k, location_wp_index+k+1):
if i < 0:
index = self.points_num + i
else:
index = i
index = index % self.points_num
result.append(index)
return [[result[:k], result[k+1:]]]
class Waypoints_forked(Waypoints):
def __init__(self, city_name):
super(Waypoints_forked, self).__init__(city_name)
self.groups_num = len(set(self.raw_waypoints["group_id"]))
# gather indexs by path
self.wp_idxs_by_path = []
for gid in range(self.groups_num):
temp = []
for i in range(self.points_num):
point = self.raw_waypoints.iloc[i]
if point["group_id"] == gid:
temp.append(i)
self.wp_idxs_by_path.append(temp)
def get_nearest_waypoints_idx(self, location_wp_index):
for path in self.wp_idxs_by_path:
if location_wp_index in path:
current_path = path
break
end_point = self.raw_waypoints.iloc[current_path[-1]]
start_point = self.raw_waypoints.iloc[current_path[0]]
front_paths = []
end_paths = []
#get available paths.
for i in range(self.points_num):
if end_point["inter_id"] == self.raw_waypoints.iloc[i]["inter_id"]\
and end_point["group_id"] != self.raw_waypoints.iloc[i]["group_id"]:
for path in self.wp_idxs_by_path:
if i in path:
temp_path = path
if path[-1] == i:
temp_path.reverse()
elif path[0] == i:
pass
else:
print(current_path, path, i, end_point["inter_id"])
assert False, "invaild waypoints csv"
front_paths.append(temp_path)
elif start_point["inter_id"] == self.raw_waypoints.iloc[i]["inter_id"]\
and start_point["group_id"] != self.raw_waypoints.iloc[i]["group_id"]:
for path in self.wp_idxs_by_path:
if i in path:
temp_path = path
if path[0] == i:
temp_path.reverse()
elif path[-1] == i:
pass
else:
print(current_path, path, i, start_point["inter_id"])
assert False, "invaild waypoints csv"
end_paths.append(temp_path)
#set points seq through heading
current_idx = current_path.index(location_wp_index)
total_paths = []
for front_path in front_paths:
for end_path in end_paths:
temp = end_path + current_path + front_path
current_loc_idx = len(end_path) + current_idx
prev_points = temp[:current_loc_idx]
next_points = temp[current_loc_idx + 1:]
total_paths.append([prev_points, next_points])
#remove overlap
for i in range(len(total_paths)):
total_paths[i] = list(total_paths[i])
total_paths[i][0] = tuple(total_paths[i][0])
total_paths[i][1] = tuple(total_paths[i][1])
total_paths[i] = tuple(total_paths[i])
total_paths = list(set(tuple(total_paths)))
return total_paths
def get_waypoints_manager(city_name):
if int(city_name[-1]) > 4:
return Waypoints_forked(city_name)
else:
return Waypoints_lanekeeping(city_name)
class Animator:
def __init__(self, figsize=(10, 10), lims=(-400, 400)):
self.fig, self.ax = plt.subplots(figsize=figsize)
self.ax.set_xlim(lims)
# for legend, expand y max limit
self.ax.set_ylim([lims[0], lims[1]+70])
self.points_controller = {}
self.linear_controller = {}
def plot_points(self, dictt):
'''
dictt[key] = [array, dotsize]
'''
for key in dictt:
if key in self.points_controller.keys():
self.points_controller[key].set_data(dictt[key][0][:, 1], dictt[key][0][:, 0])
else:
self.points_controller[key] = plot_points(* [self.ax]+dictt[key]+[key])
def plot_linears(self, dictt):
'''
dictt[key] = [slope, bias, minv, maxv]
'''
for key in dictt:
if key in self.linear_controller.keys():
x, y = get_dots_from_linear(*dictt[key])
self.linear_controller[key].set_data(y, x)
else:
self.linear_controller[key] = plot_linear(* [self.ax]+dictt[key]+[key])
def update(self):
self.ax.legend(fontsize=10, loc='upper left')
self.fig.canvas.draw()
def __del__(self):
plt.close(self.fig)
def plot_points(ax, array, dotsize, label):
data_setter = ax.plot(
array[:, 1],
array[:, 0],
marker='o',
linestyle='',
markersize=dotsize,
label=label
)
return data_setter[0]
def get_dots_from_linear(slope, bias, minv, maxv):
linear = lambda x: x * slope + bias
width = maxv - minv
x = np.linspace(minv, maxv, width)
y = linear(x)
return x, y
def plot_linear(ax, slope, bias, minv, maxv, label=''):
x, y = get_dots_from_linear(slope, bias, minv, maxv)
return ax.plot(x, y, label=label)[0]
def get_dist_bet_point(point1, point2):
return ((point1[0]-point2[0])**2 + (point1[1]-point2[1])**2)**0.5
def get_dist_from_line(point, slope, b):
x, y = point[0], point[1]
ax, by, c = slope, -1, b
return abs(ax*x + by*y + c)/(ax**2 + by**2)**(1/2)
def get_slope(point1, point2):
return (point1[1] - point2[1])/(point1[0] - point2[0])
def get_vertical_slope(point1, point2):
return -1/get_slope(point1, point2)
def get_bias(slope, point):
b = -slope*point[0] + point[1]
return b
def sign(num):
if num==0:
return 0
result = int(num/abs(num))
assert result==1 or result==-1, "sign error | num:{}, result:{}".format(num, result)
return result
def find_nearest_waypoints(waypoints, location, k):
num_wps = len(waypoints)
repeated_location = np.repeat(np.expand_dims(location, 0), num_wps, axis=0)
mse = np.sum((repeated_location - waypoints)**2, axis = 1)
idx = np.argpartition(mse, k)
return idx[:k]
def load_waypoints(path):
txts = []
with open(path,'r') as f:
reader = csv.reader(f)
for txt in reader:
txts.append(txt)
x_idx = txts[0].index('location.x')
y_idx = txts[0].index('location.y')
waypoints = np.array([[i[x_idx], i[y_idx]] for i in txts[1:]], dtype=np.float32)
return waypoints
def get_vector_from_degree(degree):
radian = degree / 180 * 3.14
return np.array([math.cos(radian), math.sin(radian)])
def linear_transform(basis_vector, vector):
transformer = np.zeros((2, 2))
transformer[0][0] = basis_vector[0]
transformer[0][1] = basis_vector[1]
transformer[1][0] = -basis_vector[1]
transformer[1][1] = basis_vector[0]
transformer = | np.linalg.inv(transformer) | numpy.linalg.inv |
"""
Copyright 2015 <NAME>
Licensed under the Apache License, Version 2.0 (the "License");
you may not use this file except in compliance with the License.
You may obtain a copy of the License at
http://www.apache.org/licenses/LICENSE-2.0
Unless required by applicable law or agreed to in writing, software
distributed under the License is distributed on an "AS IS" BASIS,
WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
See the License for the specific language governing permissions and
limitations under the License.
"""
import heapq
import random
import mmh3
import numpy as np
from sklearn.feature_extraction import FeatureHasher
from .feature_extraction import *
COUNTS_FEATURE_TYPE = "allele-counts"
CATEGORIES_FEATURE_TYPE = "genotype-categories"
RESERVOIR_SAMPLING = "reservoir"
FEATURE_HASHING = "feature-hashing"
BOTTOMK_SKETCHING = "bottom-k"
class FeatureHashingAccumulator(object):
def __init__(self, n_features, n_samples):
self.n_features = n_features
self.n_samples = n_samples
def transform(self, stream):
feature_columns = dict()
for feature_idx, ((chrom, pos, gt), column) in enumerate(stream, start=1):
feature_name = "{}_{}_{}".format(chrom, pos, gt)
# this will cause collisions. that's okay -- we want that.
hash_ = abs(mmh3.hash(feature_name)) % self.n_features
# allows for sparse storage
if hash_ in feature_columns:
feature_columns[hash_] += np.array(column)
else:
feature_columns[hash_] = np.array(column)
if feature_idx % 10000 == 0:
print("Chunk", feature_idx // 10000, len(feature_columns))
# need to transpose, otherwise we get (n_features, n_individuals) instead
feature_matrix = np.array(list(feature_columns.values())).T
print(feature_matrix.shape)
return feature_matrix
class BottomKAccumulator(object):
"""
Online sampling of columns using bottom-k sketching
"""
def __init__(self, n_features):
self.n_features = n_features
def transform(self, stream):
feature_columns = []
for feature_idx, ((chrom, pos, gt), column) in enumerate(stream):
feature_name = "{}_{}_{}".format(chrom, pos, gt)
# Python's built-in heap is a min heap, so it
# will keep the largest elements. In practice,
# this probably doesn't matter but we are going
# to negate the hashes anyway so it keeps the
# smallest elements. Note that for signed ints,
# 0 replaces one of the positive values so we can
# convert any positive value to negative without an
# overflow but not the other way around
# Also, mmh3.hash returns a 32-bit signed int
hash_ = abs(mmh3.hash(feature_name))
# we use the feature_idx to break ties since numpy arrays
# are not comparable (sortable)
if len(feature_columns) < self.n_features:
heapq.heappush(feature_columns,
(hash_, feature_idx, column))
else:
heapq.heapreplace(feature_columns,
(hash_, feature_idx, column))
if feature_idx % 10000 == 0:
print("Chunk", feature_idx // 10000, len(feature_columns))
# drop the hash and feature idx
feature_columns = [column for _, _, column in feature_columns]
# need to transpose, otherwise we get (n_features, n_individuals) instead
feature_matrix = | np.array(feature_columns) | numpy.array |
"""
====================================================================
Plot the decision surfaces of ensembles of trees on the iris dataset
====================================================================
Plot the decision surfaces of forests of randomized trees trained on pairs of
features of the iris dataset.
This plot compares the decision surfaces learned by a decision tree classifier
(first column), by a random forest classifier (second column), by an extra-
trees classifier (third column) and by an AdaBoost classifier (fourth column).
In the first row, the classifiers are built using the sepal width and
the sepal length features only, on the second row using the petal length and
sepal length only, and on the third row using the petal width and the
petal length only.
In descending order of quality, when trained (outside of this example) on all
4 features using 30 estimators and scored using 10 fold cross validation,
we see::
ExtraTreesClassifier() # 0.95 score
RandomForestClassifier() # 0.94 score
AdaBoost(DecisionTree(max_depth=3)) # 0.94 score
DecisionTree(max_depth=None) # 0.94 score
Increasing `max_depth` for AdaBoost lowers the standard deviation of
the scores (but the average score does not improve).
See the console's output for further details about each model.
In this example you might try to:
1) vary the ``max_depth`` for the ``DecisionTreeClassifier`` and
``AdaBoostClassifier``, perhaps try ``max_depth=3`` for the
``DecisionTreeClassifier`` or ``max_depth=None`` for ``AdaBoostClassifier``
2) vary ``n_estimators``
It is worth noting that RandomForests and ExtraTrees can be fitted in parallel
on many cores as each tree is built independently of the others. AdaBoost's
samples are built sequentially and so do not use multiple cores.
"""
print(__doc__)
import numpy as np
import matplotlib.pyplot as plt
from matplotlib.colors import ListedColormap
from sklearn.datasets import load_iris
from sklearn.ensemble import (RandomForestClassifier, ExtraTreesClassifier,
AdaBoostClassifier)
from sklearn.tree import DecisionTreeClassifier
# Parameters
n_classes = 3
n_estimators = 30
cmap = plt.cm.RdYlBu
plot_step = 0.02 # fine step width for decision surface contours
plot_step_coarser = 0.5 # step widths for coarse classifier guesses
RANDOM_SEED = 13 # fix the seed on each iteration
# Load data
iris = load_iris()
plot_idx = 1
models = [DecisionTreeClassifier(max_depth=None),
RandomForestClassifier(n_estimators=n_estimators),
ExtraTreesClassifier(n_estimators=n_estimators),
AdaBoostClassifier(DecisionTreeClassifier(max_depth=3),
n_estimators=n_estimators)]
for pair in ([0, 1], [0, 2], [2, 3]):
for model in models:
# We only take the two corresponding features
X = iris.data[:, pair]
y = iris.target
# Shuffle
idx = np.arange(X.shape[0])
np.random.seed(RANDOM_SEED)
| np.random.shuffle(idx) | numpy.random.shuffle |
"""
CBMA methods from the multilevel kernel density analysis (MKDA) family
"""
import logging
import multiprocessing as mp
import numpy as np
import nibabel as nib
from tqdm.auto import tqdm
from scipy import ndimage, special
from nilearn.masking import apply_mask, unmask
from statsmodels.sandbox.stats.multicomp import multipletests
from .kernel import MKDAKernel, KDAKernel
from ...results import MetaResult
from .base import CBMAEstimator
from .kernel import KernelTransformer
from ...stats import null_to_p, p_to_z, one_way, two_way
from ...due import due
from ... import references
LGR = logging.getLogger(__name__)
@due.dcite(references.MKDA, description='Introduces MKDA.')
class MKDADensity(CBMAEstimator):
r"""
Multilevel kernel density analysis- Density analysis [1]_.
Parameters
----------
kernel_estimator : :obj:`nimare.meta.cbma.base.KernelTransformer`, optional
Kernel with which to convolve coordinates from dataset. Default is
MKDAKernel.
**kwargs
Keyword arguments. Arguments for the kernel_estimator can be assigned
here, with the prefix '\kernel__' in the variable name.
References
----------
.. [1] Wager, <NAME>., <NAME>, and <NAME>. "Meta-analysis
of functional neuroimaging data: current and future directions." Social
cognitive and affective neuroscience 2.2 (2007): 150-158.
https://doi.org/10.1093/scan/nsm015
"""
def __init__(self, kernel_estimator=MKDAKernel, **kwargs):
kernel_args = {k.split('kernel__')[1]: v for k, v in kwargs.items()
if k.startswith('kernel__')}
if not issubclass(kernel_estimator, KernelTransformer):
raise ValueError('Argument "kernel_estimator" must be a '
'KernelTransformer')
kwargs = {k: v for k, v in kwargs.items() if not k.startswith('kernel__')}
for k in kwargs.keys():
LGR.warning('Keyword argument "{0}" not recognized'.format(k))
self.kernel_estimator = kernel_estimator(**kernel_args)
self.mask = None
self.dataset = None
self.results = None
def _fit(self, dataset):
"""
Perform MKDA density meta-analysis on dataset.
Parameters
----------
dataset : :obj:`nimare.dataset.Dataset`
Dataset to analyze.
"""
self.dataset = dataset
self.mask = dataset.masker.mask_img
ma_values = self.kernel_estimator.transform(dataset, masked=True)
# Weight each SCM by square root of sample size
ids_df = self.dataset.coordinates.groupby('id').first()
if 'n' in ids_df.columns and 'inference' not in ids_df.columns:
ids_n = ids_df['n'].astype(float).values
weight_vec = np.sqrt(ids_n)[:, None] / np.sum(np.sqrt(ids_n))
elif 'n' in ids_df.columns and 'inference' in ids_df.columns:
ids_n = ids_df['n'].astype(float).values
ids_inf = ids_df['inference'].map({'ffx': 0.75,
'rfx': 1.}).values
weight_vec = ((np.sqrt(ids_n)[:, None] * ids_inf[:, None]) /
np.sum(np.sqrt(ids_n) * ids_inf))
else:
weight_vec = np.ones((ma_values.shape[0], 1))
self.weight_vec = weight_vec
ma_values *= self.weight_vec
of_values = np.sum(ma_values, axis=0)
images = {'of': of_values}
return images
def _run_fwe_permutation(self, params):
iter_ijk, iter_df, conn, voxel_thresh = params
iter_ijk = np.squeeze(iter_ijk)
iter_df[['i', 'j', 'k']] = iter_ijk
iter_ma_maps = self.kernel_estimator.transform(iter_df, mask=self.mask, masked=True)
iter_ma_maps *= self.weight_vec
iter_of_map = np.sum(iter_ma_maps, axis=0)
iter_max_value = np.max(iter_of_map)
iter_of_map = unmask(iter_of_map, self.mask)
vthresh_iter_of_map = iter_of_map.get_data().copy()
vthresh_iter_of_map[vthresh_iter_of_map < voxel_thresh] = 0
labeled_matrix = ndimage.measurements.label(vthresh_iter_of_map, conn)[0]
clust_sizes = [np.sum(labeled_matrix == val) for val in np.unique(labeled_matrix)]
clust_sizes = clust_sizes[1:] # First cluster is zeros in matrix
if clust_sizes:
iter_max_cluster = np.max(clust_sizes)
else:
iter_max_cluster = 0
return iter_max_value, iter_max_cluster
def _fwe_correct_permutation(self, result, voxel_thresh=0.01, n_iters=1000,
n_cores=-1):
of_map = result.get_map('of', return_type='image')
null_ijk = np.vstack(np.where(self.mask.get_data())).T
if n_cores <= 0:
n_cores = mp.cpu_count()
elif n_cores > mp.cpu_count():
LGR.warning(
'Desired number of cores ({0}) greater than number '
'available ({1}). Setting to {1}.'.format(n_cores,
mp.cpu_count()))
n_cores = mp.cpu_count()
vthresh_of_map = of_map.get_data().copy()
vthresh_of_map[vthresh_of_map < voxel_thresh] = 0
rand_idx = np.random.choice(
null_ijk.shape[0],
size=(self.dataset.coordinates.shape[0], n_iters))
rand_ijk = null_ijk[rand_idx, :]
iter_ijks = np.split(rand_ijk, rand_ijk.shape[1], axis=1)
iter_df = self.dataset.coordinates.copy()
conn = np.ones((3, 3, 3))
# Define parameters
iter_conn = [conn] * n_iters
iter_dfs = [iter_df] * n_iters
iter_voxel_thresh = [voxel_thresh] * n_iters
params = zip(iter_ijks, iter_dfs, iter_conn, iter_voxel_thresh)
if n_cores == 1:
perm_results = []
for pp in tqdm(params, total=n_iters):
perm_results.append(self._run_fwe_permutation(pp))
else:
with mp.Pool(n_cores) as p:
perm_results = list(tqdm(p.imap(self._run_fwe_permutation, params),
total=n_iters))
perm_max_values, perm_clust_sizes = zip(*perm_results)
# Cluster-level FWE
labeled_matrix, n_clusters = ndimage.measurements.label(vthresh_of_map, conn)
cfwe_map = np.zeros(self.mask.shape)
for i_clust in range(1, n_clusters + 1):
clust_size = np.sum(labeled_matrix == i_clust)
clust_idx = np.where(labeled_matrix == i_clust)
cfwe_map[clust_idx] = -np.log(null_to_p(
clust_size, perm_clust_sizes, 'upper'))
cfwe_map[np.isinf(cfwe_map)] = -np.log(np.finfo(float).eps)
cfwe_map = apply_mask(nib.Nifti1Image(cfwe_map, self.mask.affine),
self.mask)
# Voxel-level FWE
vfwe_map = apply_mask(of_map, self.mask)
for i_vox, val in enumerate(vfwe_map):
vfwe_map[i_vox] = -np.log(null_to_p(val, perm_max_values, 'upper'))
vfwe_map[np.isinf(vfwe_map)] = -np.log(np.finfo(float).eps)
vthresh_of_map = apply_mask(nib.Nifti1Image(vthresh_of_map,
of_map.affine),
self.mask)
images = {'vthresh': vthresh_of_map,
'logp_level-cluster': cfwe_map,
'logp_level-voxel': vfwe_map}
return images
@due.dcite(references.MKDA, description='Introduces MKDA.')
class MKDAChi2(CBMAEstimator):
r"""
Multilevel kernel density analysis- Chi-square analysis [1]_.
Parameters
----------
prior : float, optional
Uniform prior probability of each feature being active in a map in
the absence of evidence from the map. Default: 0.5
kernel_estimator : :obj:`nimare.meta.cbma.base.KernelTransformer`, optional
Kernel with which to convolve coordinates from dataset. Default is
MKDAKernel.
**kwargs
Keyword arguments. Arguments for the kernel_estimator can be assigned
here, with the prefix '\kernel__' in the variable name.
References
----------
.. [1] Wager, <NAME>., <NAME>, and <NAME>. "Meta-analysis
of functional neuroimaging data: current and future directions." Social
cognitive and affective neuroscience 2.2 (2007): 150-158.
https://doi.org/10.1093/scan/nsm015
"""
def __init__(self, prior=0.5, kernel_estimator=MKDAKernel, **kwargs):
kernel_args = {k.split('kernel__')[1]: v for k, v in kwargs.items()
if k.startswith('kernel__')}
if not issubclass(kernel_estimator, KernelTransformer):
raise ValueError('Argument "kernel_estimator" must be a '
'KernelTransformer')
kwargs = {k: v for k, v in kwargs.items() if not k.startswith('kernel__')}
for k in kwargs.keys():
LGR.warning('Keyword argument "{0}" not recognized'.format(k))
self.kernel_estimator = kernel_estimator(**kernel_args)
self.prior = prior
def fit(self, dataset, dataset2):
"""
Fit Estimator to datasets.
Parameters
----------
dataset, dataset2 : :obj:`nimare.dataset.Dataset`
Dataset objects to analyze.
Returns
-------
:obj:`nimare.base.base.MetaResult`
Results of Estimator fitting.
"""
self._validate_input(dataset)
self._validate_input(dataset2)
maps = self._fit(dataset, dataset2)
self.results = MetaResult(self, dataset.masker.mask_img, maps)
return self.results
def _fit(self, dataset, dataset2):
self.dataset = dataset
self.dataset2 = dataset2
self.mask = dataset.masker.mask_img
ma_maps1 = self.kernel_estimator.transform(self.dataset, mask=self.mask, masked=True)
ma_maps2 = self.kernel_estimator.transform(self.dataset2, mask=self.mask, masked=True)
# Calculate different count variables
n_selected = ma_maps1.shape[0]
n_unselected = ma_maps2.shape[0]
n_mappables = n_selected + n_unselected
# Transform MA maps to 1d arrays
ma_maps_all = np.vstack((ma_maps1, ma_maps2))
n_selected_active_voxels = np.sum(ma_maps1, axis=0)
n_unselected_active_voxels = np.sum(ma_maps2, axis=0)
# Nomenclature for variables below: p = probability,
# F = feature present, g = given, U = unselected, A = activation.
# So, e.g., pAgF = p(A|F) = probability of activation
# in a voxel if we know that the feature is present in a study.
pF = (n_selected * 1.0) / n_mappables
pA = np.array(np.sum(ma_maps_all, axis=0) / n_mappables).squeeze()
# Conditional probabilities
pAgF = n_selected_active_voxels * 1.0 / n_selected
pAgU = n_unselected_active_voxels * 1.0 / n_unselected
pFgA = pAgF * pF / pA
# Recompute conditionals with uniform prior
pAgF_prior = self.prior * pAgF + (1 - self.prior) * pAgU
pFgA_prior = pAgF * self.prior / pAgF_prior
# One-way chi-square test for consistency of activation
pAgF_chi2_vals = one_way(np.squeeze(n_selected_active_voxels),
n_selected)
pAgF_p_vals = special.chdtrc(1, pAgF_chi2_vals)
pAgF_sign = np.sign(n_selected_active_voxels -
np.mean(n_selected_active_voxels))
pAgF_z = p_to_z(pAgF_p_vals, tail='two') * pAgF_sign
# Two-way chi-square for specificity of activation
cells = np.squeeze(
np.array([[n_selected_active_voxels, n_unselected_active_voxels],
[n_selected - n_selected_active_voxels,
n_unselected - n_unselected_active_voxels]]).T)
pFgA_chi2_vals = two_way(cells)
pFgA_p_vals = special.chdtrc(1, pFgA_chi2_vals)
pFgA_p_vals[pFgA_p_vals < 1e-240] = 1e-240
pFgA_sign = np.sign(pAgF - pAgU).ravel()
pFgA_z = p_to_z(pFgA_p_vals, tail='two') * pFgA_sign
images = {
'pA': pA,
'pAgF': pAgF,
'pFgA': pFgA,
('pAgF_given_pF=%0.2f' % self.prior): pAgF_prior,
('pFgA_given_pF=%0.2f' % self.prior): pFgA_prior,
'consistency_z': pAgF_z,
'specificity_z': pFgA_z,
'consistency_chi2': pAgF_chi2_vals,
'specificity_chi2': pFgA_chi2_vals,
'consistency_p': pAgF_p_vals,
'specificity_p': pFgA_p_vals,
}
return images
def _run_fwe_permutation(self, params):
iter_df1, iter_df2, iter_ijk1, iter_ijk2 = params
iter_ijk1 = np.squeeze(iter_ijk1)
iter_ijk2 = np.squeeze(iter_ijk2)
iter_df1[['i', 'j', 'k']] = iter_ijk1
iter_df2[['i', 'j', 'k']] = iter_ijk2
temp_ma_maps1 = self.kernel_estimator.transform(iter_df1, self.mask, masked=True)
temp_ma_maps2 = self.kernel_estimator.transform(iter_df2, self.mask, masked=True)
n_selected = temp_ma_maps1.shape[0]
n_unselected = temp_ma_maps2.shape[0]
n_selected_active_voxels = np.sum(temp_ma_maps1, axis=0)
n_unselected_active_voxels = np.sum(temp_ma_maps2, axis=0)
# Conditional probabilities
# pAgF = n_selected_active_voxels * 1.0 / n_selected
# pAgU = n_unselected_active_voxels * 1.0 / n_unselected
# One-way chi-square test for consistency of activation
pAgF_chi2_vals = one_way(np.squeeze(n_selected_active_voxels),
n_selected)
iter_pAgF_chi2 = np.max(pAgF_chi2_vals)
# Two-way chi-square for specificity of activation
cells = np.squeeze(
np.array([[n_selected_active_voxels, n_unselected_active_voxels],
[n_selected - n_selected_active_voxels,
n_unselected - n_unselected_active_voxels]]).T)
pFgA_chi2_vals = two_way(cells)
iter_pFgA_chi2 = np.max(pFgA_chi2_vals)
return iter_pAgF_chi2, iter_pFgA_chi2
def _fwe_correct_permutation(self, result, voxel_thresh=0.01, n_iters=5000,
n_cores=-1):
null_ijk = np.vstack(np.where(self.mask.get_data())).T
pAgF_chi2_vals = result.get_map('consistency_chi2', return_type='array')
pFgA_chi2_vals = result.get_map('specificity_chi2', return_type='array')
pAgF_z_vals = result.get_map('consistency_z', return_type='array')
pFgA_z_vals = result.get_map('specificity_z', return_type='array')
pAgF_sign = np.sign(pAgF_z_vals)
pFgA_sign = np.sign(pFgA_z_vals)
if n_cores <= 0:
n_cores = mp.cpu_count()
elif n_cores > mp.cpu_count():
LGR.warning(
'Desired number of cores ({0}) greater than number '
'available ({1}). Setting to {1}.'.format(n_cores,
mp.cpu_count()))
n_cores = mp.cpu_count()
iter_df1 = self.dataset.coordinates.copy()
iter_df2 = self.dataset2.coordinates.copy()
iter_dfs1 = [iter_df1] * n_iters
iter_dfs2 = [iter_df2] * n_iters
rand_idx1 = np.random.choice(null_ijk.shape[0],
size=(iter_df1.shape[0], n_iters))
rand_ijk1 = null_ijk[rand_idx1, :]
iter_ijks1 = | np.split(rand_ijk1, rand_ijk1.shape[1], axis=1) | numpy.split |
"""
Tools for DESI spectroperfectionism extractions implemented for a CPU
"""
import sys
import numpy as np
from numpy.polynomial.legendre import legvander, legval
from numpy.polynomial import hermite_e as He
import scipy.special
import numba
#-------------------------------------------------------------------------
def evalcoeffs(psfdata, wavelengths, specmin=0, nspec=None):
'''
evaluate PSF coefficients parameterized as Legendre polynomials
Args:
psfdata: PSF data from io.read_psf() of Gauss Hermite PSF file
wavelengths: 1D array of wavelengths
Options:
specmin: first spectrum to include
nspec: number of spectra to include (default: all)
Returns a dictionary params[paramname] = value[nspec, nwave]
The Gauss Hermite coefficients are treated differently:
params['GH'] = value[i,j,nspec,nwave]
The dictionary also contains scalars with the recommended spot size
2*(HSIZEX, HSIZEY)+1 and Gauss-Hermite degrees GHDEGX, GHDEGY
(which is also derivable from the dimensions of params['GH'])
'''
if nspec is None:
nspec = psfdata['PSF']['COEFF'].shape[1]
p = dict(WAVE=wavelengths)
#- Evaluate X and Y which have different dimensionality from the
#- PSF coefficients (and might have different WAVEMIN, WAVEMAX)
meta = psfdata['XTRACE'].meta
wavemin, wavemax = meta['WAVEMIN'], meta['WAVEMAX']
ww = (wavelengths - wavemin) * (2.0 / (wavemax - wavemin)) - 1.0
p['X'] = legval(ww, psfdata['XTRACE']['X'][specmin:specmin+nspec].T)
meta = psfdata['YTRACE'].meta
wavemin, wavemax = meta['WAVEMIN'], meta['WAVEMAX']
ww = (wavelengths - wavemin) * (2.0 / (wavemax - wavemin)) - 1.0
p['Y'] = legval(ww, psfdata['YTRACE']['Y'][specmin:specmin+nspec].T)
#- Evaluate the remaining PSF coefficients with a shared dimensionality
#- and WAVEMIN, WAVEMAX
meta = psfdata['PSF'].meta
wavemin, wavemax = meta['WAVEMIN'], meta['WAVEMAX']
ww = (wavelengths - wavemin) * (2.0 / (wavemax - wavemin)) - 1.0
L = np.polynomial.legendre.legvander(ww, meta['LEGDEG'])
nparam = psfdata['PSF']['COEFF'].shape[0]
ndeg = psfdata['PSF']['COEFF'].shape[2]
nwave = L.shape[0]
nghx = meta['GHDEGX']+1
nghy = meta['GHDEGY']+1
p['GH'] = np.zeros((nghx, nghy, nspec, nwave))
for name, coeff in zip(psfdata['PSF']['PARAM'], psfdata['PSF']['COEFF']):
name = name.strip()
coeff = coeff[specmin:specmin+nspec]
if name.startswith('GH-'):
i, j = map(int, name.split('-')[1:3])
p['GH'][i,j] = L.dot(coeff.T).T
else:
p[name] = L.dot(coeff.T).T
#- Include some additional keywords that we'll need
for key in ['HSIZEX', 'HSIZEY', 'GHDEGX', 'GHDEGY']:
p[key] = meta[key]
return p
def calc_pgh(ispec, wavelengths, psfparams):
'''
Calculate pixelated Gauss Hermite for all wavelengths of a single spectrum
Args:
ispec : integer spectrum number
wavelengths : array of wavelengths to evaluate
psfparams : dictionary of PSF parameters returned by evalcoeffs
returns pGHx, pGHy
where pGHx[ghdeg+1, nwave, nbinsx] contains the pixel-integrated
Gauss-Hermite polynomial for all degrees at all wavelengths across
nbinsx bins spaning the PSF spot, and similarly for pGHy. The core
PSF will then be evaluated as
PSFcore = sum_ij c_ij outer(pGHy[j], pGHx[i])
'''
#- shorthand
p = psfparams
#- spot size (ny,nx)
nx = 2*p['HSIZEX']+1
ny = 2*p['HSIZEY']+1
nwave = len(wavelengths)
# print('Spot size (ny,nx) = {},{}'.format(ny, nx))
# print('nwave = {}'.format(nwave))
#- x and y edges of bins that span the center of the PSF spot
xedges = np.repeat(np.arange(nx+1) - nx//2 - 0.5, nwave).reshape(nx+1, nwave)
yedges = np.repeat(np.arange(ny+1) - ny//2 - 0.5, nwave).reshape(ny+1, nwave)
#- Shift to be relative to the PSF center and normalize
#- by the PSF sigma (GHSIGX, GHSIGY).
#- Note: x,y = 0,0 is center of pixel 0,0 not corner
#- Dimensions: xedges[nx+1, nwave], yedges[ny+1, nwave]
dx = (p['X'][ispec]+0.5)%1 - 0.5
dy = (p['Y'][ispec]+0.5)%1 - 0.5
xedges = ((xedges - dx)/p['GHSIGX'][ispec])
yedges = ((yedges - dy)/p['GHSIGY'][ispec])
# print('xedges.shape = {}'.format(xedges.shape))
# print('yedges.shape = {}'.format(yedges.shape))
#- Degree of the Gauss-Hermite polynomials
ghdegx = p['GHDEGX']
ghdegy = p['GHDEGY']
#- Evaluate the Hermite polynomials at the pixel edges
#- HVx[ghdegx+1, nwave, nx+1]
#- HVy[ghdegy+1, nwave, ny+1]
HVx = He.hermevander(xedges, ghdegx).T
HVy = He.hermevander(yedges, ghdegy).T
# print('HVx.shape = {}'.format(HVx.shape))
# print('HVy.shape = {}'.format(HVy.shape))
#- Evaluate the Gaussians at the pixel edges
#- Gx[nwave, nx+1]
#- Gy[nwave, ny+1]
Gx = np.exp(-0.5*xedges**2).T / np.sqrt(2. * np.pi) # (nwave, nedges)
Gy = np.exp(-0.5*yedges**2).T / np.sqrt(2. * np.pi)
# print('Gx.shape = {}'.format(Gx.shape))
# print('Gy.shape = {}'.format(Gy.shape))
#- Combine into Gauss*Hermite
GHx = HVx * Gx
GHy = HVy * Gy
#- Integrate over the pixels using the relationship
# Integral{ H_k(x) exp(-0.5 x^2) dx} = -H_{k-1}(x) exp(-0.5 x^2) + const
#- pGHx[ghdegx+1, nwave, nx]
#- pGHy[ghdegy+1, nwave, ny]
pGHx = np.zeros((ghdegx+1, nwave, nx))
pGHy = np.zeros((ghdegy+1, nwave, ny))
pGHx[0] = 0.5 * np.diff(scipy.special.erf(xedges/np.sqrt(2.)).T)
pGHy[0] = 0.5 * np.diff(scipy.special.erf(yedges/np.sqrt(2.)).T)
pGHx[1:] = GHx[:ghdegx,:,0:nx] - GHx[:ghdegx,:,1:nx+1]
pGHy[1:] = GHy[:ghdegy,:,0:ny] - GHy[:ghdegy,:,1:ny+1]
# print('pGHx.shape = {}'.format(pGHx.shape))
# print('pGHy.shape = {}'.format(pGHy.shape))
return pGHx, pGHy
@numba.jit(nopython=True)
def multispot(pGHx, pGHy, ghc):
'''
TODO: Document
'''
nx = pGHx.shape[-1]
ny = pGHy.shape[-1]
nwave = pGHx.shape[1]
spots = np.zeros((nwave, ny, nx))
for iwave in range(nwave):
for i in range(pGHx.shape[0]):
px = pGHx[i,iwave]
for j in range(0, pGHy.shape[0]):
py = pGHy[j,iwave]
c = ghc[i,j,iwave]
#- c * outer(py, px)
for iy in range(len(py)):
for ix in range(len(px)):
spots[iwave, iy, ix] += c * py[iy] * px[ix]
return spots
def get_spots(specmin, nspec, wavelengths, psfdata):
'''Calculate PSF spots for the specified spectra and wavelengths
Args:
specmin: first spectrum to include
nspec: number of spectra to evaluate spots for
wavelengths: 1D array of wavelengths
psfdata: PSF data from io.read_psf() of Gauss Hermite PSF file
Returns:
spots: 4D array[ispec, iwave, ny, nx] of PSF spots
corners: (xc,yc) where each is 2D array[ispec,iwave] lower left corner of spot
'''
nwave = len(wavelengths)
p = evalcoeffs(psfdata, wavelengths, specmin, nspec)
nx = 2*p['HSIZEX']+1
ny = 2*p['HSIZEY']+1
spots = np.zeros((nspec, nwave, ny, nx))
for ispec in range(nspec):
pGHx, pGHy = calc_pgh(ispec, wavelengths, p)
spots[ispec] = multispot(pGHx, pGHy, p['GH'][:,:,ispec,:])
#- ensure positivity and normalize
#- TODO: should this be within multispot itself?
spots = spots.clip(0.0)
norm = np.sum(spots, axis=(2,3)) #- norm[nspec, nwave] = sum over each spot
spots = (spots.T / norm.T).T #- transpose magic for numpy array broadcasting
#- Define corners of spots
#- extra 0.5 is because X and Y are relative to center of pixel not edge
xc = np.floor(p['X'] - p['HSIZEX'] + 0.5).astype(int)
yc = np.floor(p['Y'] - p['HSIZEY'] + 0.5).astype(int)
corners = (xc, yc)
return spots, corners, p
@numba.jit
def get_xyrange(ispec, nspec, iwave, nwave, spots, corners):
"""
Find xy ranges that these spectra cover
Args:
ispec: starting spectrum index
nspec: number of spectra
iwave: starting wavelength index
nwave: number of wavelengths
spots: 4D array[ispec, iwave, ny, nx] of PSF spots
corners: (xc,yc) where each is 2D array[ispec,iwave] lower left corner of spot
Returns (xmin, xmax, ymin, ymax)
spots[ispec:ispec+nspec,iwave:iwave+nwave] touch pixels[ymin:ymax,xmin:xmax]
"""
ny, nx = spots.shape[2:4]
xc = corners[0][ispec:ispec+nspec, iwave:iwave+nwave]
yc = corners[1][ispec:ispec+nspec, iwave:iwave+nwave]
xmin = np.min(xc)
xmax = np.max(xc) + nx
ymin = np.min(yc)
ymax = np.max(yc) + ny
return xmin, xmax, ymin, ymax
@numba.jit
def projection_matrix(ispec, nspec, iwave, nwave, spots, corners):
'''
Create the projection matrix A for p = Af
Args:
ispec: starting spectrum index
nspec: number of spectra
iwave: starting wavelength index
nwave: number of wavelengths
spots: 4D array[ispec, iwave, ny, nx] of PSF spots
corners: (xc,yc) where each is 2D array[ispec,iwave] lower left corner of spot
Returns (A[iy, ix, ispec, iwave], (xmin, xmax, ymin, ymax))
Cast to 2D for using with linear algebra:
nypix, nxpix, nspec, nwave = A.shape
A2D = A.reshape((nypix*nxpix, nspec*nwave))
pix1D = A2D.dot(flux1D)
'''
ny, nx = spots.shape[2:4]
xc, yc = corners
xmin, xmax, ymin, ymax = get_xyrange(ispec, nspec, iwave, nwave, spots, corners)
A = np.zeros((ymax-ymin,xmax-xmin,nspec,nwave))
for i in range(nspec):
for j in range(nwave):
ixc = xc[ispec+i, iwave+j] - xmin
iyc = yc[ispec+i, iwave+j] - ymin
A[iyc:iyc+ny, ixc:ixc+nx, i, j] = spots[ispec+i,iwave+j]
return A, (xmin, xmax, ymin, ymax)
def get_spec_padding(ispec, nspec, bundlesize):
"""
Calculate padding needed for boundary spectra
Args:
ispec: starting spectrum index
nspec: number of spectra to extract (not including padding)
bundlesize: size of fiber bundles; padding not needed on their edges
returns specmin, nspecpad
"""
#- if not at upper boundary, extract one additional spectrum
if (ispec+nspec) % bundlesize == 0:
nspecpad = nspec
else:
nspecpad = nspec + 1
#- if not at lower boundary, start one lower and extract one more
if ispec % bundlesize == 0:
specmin = ispec
else:
specmin = ispec-1
nspecpad += 1
assert nspecpad <= nspec+2
assert specmin >= ispec-1
assert specmin+nspecpad <= ispec+nspec+1
return specmin, nspecpad
def get_resolution_diags(R, ndiag, ispec, nspec, nwave, wavepad):
"""Returns the diagonals of R in a form suited for creating scipy.sparse.dia_matrix
Args:
R: dense resolution matrix
ndiag: number of diagonal elements to keep in the resolution matrix
ispec: starting spectrum index relative to padding
nspec: number of spectra to extract (not including padding)
nwave: number of wavelengths to extract (not including padding)
wavepad: number of extra wave bins to extract (and discard) on each end
Returns:
Rdiags (nspec, 2*ndiag+1, nwave): resolution matrix diagonals
"""
nwavetot = 2*wavepad + nwave
Rdiags = np.zeros( (nspec, 2*ndiag+1, nwave) )
#- TODO: check indexing
for i in np.arange(ispec, ispec+nspec):
#- subregion of R for this spectrum
ii = slice(nwavetot*i, nwavetot*(i+1))
Rx = R[ii, ii]
#- subregion of non-padded wavelengths for this spectrum
for j in range(wavepad,wavepad+nwave):
# Rdiags dimensions [nspec, 2*ndiag+1, nwave]
Rdiags[i-ispec, :, j-wavepad] = Rx[j-ndiag:j+ndiag+1, j]
return Rdiags
def ex2d_padded(image, imageivar, patch, spots, corners, pixpad_frac, regularize, model, psferr):
"""
Extracted a patch with border padding, but only return results for patch
Args:
image: full image (not trimmed to a particular xy range)
imageivar: image inverse variance (same dimensions as image)
ispec: starting spectrum index relative to `spots` indexing
nspec: number of spectra to extract (not including padding)
iwave: starting wavelength index
nwave: number of wavelengths to extract (not including padding)
spots: array[nspec, nwave, ny, nx] pre-evaluated PSF spots
corners: tuple of arrays xcorners[nspec, nwave], ycorners[nspec, nwave]
wavepad: number of extra wave bins to extract (and discard) on each end
Options:
bundlesize: size of fiber bundles; padding not needed on their edges
"""
ispec = patch.ispec - patch.bspecmin
nspec = patch.nspectra_per_patch
iwave = patch.iwave
nwave = patch.nwavestep
wavepad = patch.wavepad
specmin, nspecpad = get_spec_padding(ispec, nspec, patch.bundlesize)
#- Total number of wavelengths to be extracted, including padding
nwavetot = nwave+2*wavepad
#- Get the projection matrix for the full wavelength range with padding
A4, xyrange = projection_matrix(specmin, nspecpad,
iwave-wavepad, nwave+2*wavepad, spots, corners)
xmin, xmax, ypadmin, ypadmax = xyrange
#- But we only want to use the pixels covered by the original wavelengths
#- TODO: this unnecessarily also re-calculates xranges
xlo, xhi, ymin, ymax = get_xyrange(specmin, nspecpad, iwave, nwave, spots, corners)
ypadlo = int((ymin - ypadmin) * (1 - pixpad_frac))
ypadhi = int((ymax - ypadmin) + (ypadmax - ymax) * (pixpad_frac))
A4 = A4[ypadlo:ypadhi]
#- Number of image pixels in y and x
ny, nx = A4.shape[0:2]
ymin = ypadmin+ypadlo
ymax = ypadmin+ypadhi
#- Check dimensions
assert A4.shape[2] == nspecpad
assert A4.shape[3] == nwave + 2*wavepad
#- Diagonals of R in a form suited for creating scipy.sparse.dia_matrix
ndiag = spots.shape[2]//2
specslice = np.s_[ispec-specmin:ispec-specmin+nspec,wavepad:wavepad+nwave]
if (0 <= ymin) & (ymin+ny <= image.shape[0]):
xyslice = np.s_[ymin:ymin+ny, xmin:xmin+nx]
patchpixels = image[xyslice]
patchivar = imageivar[xyslice]
fx, ivarfx, R = ex2d_patch(patchpixels, patchivar, A4, regularize=regularize)
#- Select the non-padded spectra x wavelength core region
specflux = fx[specslice]
specivar = ivarfx[specslice]
#- Diagonals of R in a form suited for creating scipy.sparse.dia_matrix
Rdiags = get_resolution_diags(R, ndiag, ispec-specmin, nspec, nwave, wavepad)
else:
#- TODO: this zeros out the entire patch if any of it is off the edge
#- of the image; we can do better than that
fx = np.zeros((nspecpad, nwavetot))
specflux = np.zeros((nspec, nwave))
specivar = np.zeros((nspec, nwave))
Rdiags = np.zeros( (nspec, 2*ndiag+1, nwave) )
# xyslice = np.s_[
# max(0, ymin):min(ymin+ny, image.shape[0]),
# max(0, xmin):min(xmin+nx, image.shape[1])
# ]
xyslice = None
patchivar = np.zeros((ny, nx))
patchpixels = np.zeros((ny, nx))
if np.any(np.isnan(specflux)):
raise RuntimeError('Found NaN in extracted flux')
Apadded = A4.reshape(ny*nx, nspecpad*nwavetot)
Apatch = A4[:, :, ispec-specmin:ispec-specmin+nspec, wavepad:wavepad+nwave]
Apatch = Apatch.reshape(ny*nx, nspec*nwave)
pixmask_fraction = Apatch.T.dot(patchivar.ravel() == 0)
pixmask_fraction = pixmask_fraction.reshape(nspec, nwave)
modelpadded = Apadded.dot(fx.ravel()).reshape(ny, nx)
modelivar = (modelpadded*psferr + 1e-32)**-2
ii = (modelivar > 0 ) & (patchivar > 0)
totpix_ivar = np.zeros((ny, nx))
totpix_ivar[ii] = 1.0 / (1.0/modelivar[ii] + 1.0/patchivar[ii])
#- Weighted chi2 of pixels that contribute to each flux bin;
#- only use unmasked pixels and avoid dividing by 0
chi = (patchpixels - modelpadded)*np.sqrt(totpix_ivar)
psfweight = Apadded.T.dot(totpix_ivar.ravel() > 0)
bad = psfweight == 0
#- Compute chi2pix and reshape
chi2pix = (Apadded.T.dot(chi.ravel()**2) * ~bad) / (psfweight + bad)
chi2pix = chi2pix.reshape(nspecpad, nwavetot)[specslice]
if model:
modelimage = Apatch.dot(specflux.ravel()).reshape(ny, nx)
else:
modelimage = None
#- TODO: add chi2pix, pixmask_fraction, optionally modelimage; see specter
result = dict(
flux = specflux,
ivar = specivar,
Rdiags = Rdiags,
modelimage = modelimage,
xyslice = xyslice,
pixmask_fraction = pixmask_fraction,
chi2pix = chi2pix,
)
return result
#- Simplest form of A.T.dot( Diag(w).dot(A) )
def dotdot1(A, w):
'''
return A.T.dot( Diag(w).dot(A) ) = (A.T * w).dot(A)
'''
return (A.T * w).dot(A)
#- 2x faster than dotdot1 by using sparse arrays
def dotdot2(A, w):
'''
return A.T.dot( Diag(w).dot(A) ) when A is sparse
'''
import scipy.sparse
W = scipy.sparse.spdiags(data=w, diags=[0,], m=len(w), n=len(w))
Ax = scipy.sparse.csc_matrix(A)
return Ax.T.dot(W.dot(Ax)).toarray()
#- 3x faster than dotdot1 by using numba and sparsity
@numba.jit(nopython=True)
def dotdot3(A, w):
'''
return A.T.dot( Diag(w).dot(A) ) when A is sparse using numba
'''
n, m = A.shape
B = np.zeros((m,m))
for i in range(n):
for j1 in range(m):
Aw = w[i] * A[i,j1]
if Aw != 0.0:
for j2 in range(j1, m):
tmp = Aw * A[i,j2]
B[j1, j2] += tmp
#- fill in other half
for j1 in range(m-1):
for j2 in range(j1+1, m):
B[j2, j1] = B[j1, j2]
return B
@numba.jit(nopython=True)
def dotall(p, w, A):
'''Compute icov, y and fluxweight in the same loop(s)
icov = A^T W A
y = A^T W p
fluxweight = (A^T W).sum(axis=1)
Arguments:
pixel_values: pixel values
pixel_ivar: pixel weights
A: projection matrix
Returns:
icov, y, fluxweight
'''
n, m = A.shape
icov = np.zeros((m,m))
y = np.zeros(m)
fluxweight = np.zeros(m)
for i in range(n):
for j1 in range(m):
Aw = w[i] * A[i,j1]
if Aw != 0.0:
for j2 in range(j1, m):
tmp = Aw * A[i,j2]
icov[j1, j2] += tmp
fluxweight[j1] += Aw
y[j1] += Aw * p[i]
#- fill in other half
for j1 in range(m-1):
for j2 in range(j1+1, m):
icov[j2, j1] = icov[j1, j2]
return icov, y, fluxweight
def deconvolve(pixel_values, pixel_ivar, A, regularize=0, debug=False):
"""Calculate the weighted linear least-squares flux solution for an observed trace.
Args:
pixel_values (ny*nx,): 1D array of pixel values
pixel_ivar (ny*nx,): 1D array of pixel inverse variances to use for weighting
A (ny*nx, nspec*nwave): projection matrix that transforms a 1D spectrum into a 2D image
Returns:
deconvolved (nspec*nwave): the best-fit 1D array of flux values
iCov (nspec*nwave, nspec*nwave): the correlated inverse covariance matrix of the deconvolved flux
"""
#- Set up the equation to solve (B&S eq 4)
iCov, y, fluxweight = dotall(pixel_values, pixel_ivar, A)
#- Add a weak flux=0 prior to avoid singular matrices
#- TODO: review this; compare to specter
minweight = 1e-4*np.max(fluxweight)
ibad = fluxweight < minweight
lambda_squared = regularize*regularize*np.ones_like(y)
lambda_squared[ibad] = minweight - fluxweight[ibad]
if np.any(lambda_squared):
iCov += np.diag(lambda_squared)
#- Solve the linear least-squares problem.
deconvolved = scipy.linalg.solve(iCov, y)
return deconvolved, iCov
def decorrelate_noise(iCov, debug=False):
"""Calculate the decorrelated errors and resolution matrix via BS Eq 10-13
Args:
iCov (nspec*nwave, nspec*nwave): the inverse covariance matrix
Returns:
ivar (ny*nx,): uncorrelated flux inverse variances
R (nspec*nwave, nspec*nwave): resoultion matrix
"""
# Calculate the matrix square root of iCov to diagonalize the flux errors.
u, v = np.linalg.eigh(iCov)
# Check that all eigenvalues are positive.
assert not debug or np.all(u > 0), 'Found some negative iCov eigenvalues.'
# Check that the eigenvectors are orthonormal so that vt.v = 1
assert not debug or np.allclose(np.eye(len(u)), v.T.dot(v))
Q = (v * np.sqrt(u)).dot(v.T)
# Check BS eqn.10
assert not debug or np.allclose(iCov, Q.dot(Q))
#- Calculate the corresponding resolution matrix and diagonal flux errors. (BS Eq 11-13)
s = np.sum(Q, axis=1)
R = Q/s[:, np.newaxis]
ivar = s**2
# Check BS eqn.14
assert not debug or np.allclose(iCov, R.T.dot(np.diag(ivar).dot(R)))
return ivar, R
def decorrelate_blocks(iCov, block_size, debug=False):
"""Calculate the decorrelated errors and resolution matrix via BS Eq 19
Args:
iCov (nspec*nwave, nspec*nwave): the inverse covariance matrix
block_size (int): size of the block corresponding to a single spectrum (i.e. nwave)
Returns:
ivar (ny*nx,): uncorrelated flux inverse variances
R (nspec*nwave, nspec*nwave): resoultion matrix
"""
size = iCov.shape[0]
assert not debug or size % block_size == 0
#- Invert iCov (B&S eq 17)
u, v = np.linalg.eigh((iCov + iCov.T)/2.)
assert not debug or np.all(u > 0), 'Found some negative iCov eigenvalues.'
# Check that the eigenvectors are orthonormal so that vt.v = 1
assert not debug or np.allclose(np.eye(len(u)), v.T.dot(v))
if debug:
threshold = 10.0 * sys.float_info.epsilon
maxval = np.max(u)
minval = maxval * threshold
i = u > minval
if np.any(~i):
raise RuntimeError(f'Eigenvalue below minval {minval}: {u[i]}')
# u = np.clip(u, minval, None)
C = (v * (1.0/u)).dot(v.T)
#- Calculate C^-1 = QQ (B&S eq 17-19)
Q = np.zeros_like(iCov)
#- Proceed one block at a time
for i in range(0, size, block_size):
s = np.s_[i:i+block_size, i:i+block_size]
#- Invert this block
bu, bv = np.linalg.eigh(C[s])
assert not debug or | np.all(bu > 0) | numpy.all |
from tqdm import tqdm
from taskinit import ms, tb, qa
from taskinit import iatool
from taskinit import cltool
from delmod_cli import delmod_cli as delmod
from clearcal_cli import clearcal_cli as clearcal
from suncasa.utils import mstools as mstl
from suncasa.utils import helioimage2fits as hf
import shutil, os
import sunpy.coordinates.ephemeris as eph
import numpy as np
from gaincal_cli import gaincal_cli as gaincal
from applycal_cli import applycal_cli as applycal
from flagdata_cli import flagdata_cli as flagdata
from flagmanager_cli import flagmanager_cli as flagmanager
from uvsub_cli import uvsub_cli as uvsub
from split_cli import split_cli as split
from tclean_cli import tclean_cli as tclean
from ft_cli import ft_cli as ft
from suncasa.utils import mstools as mstl
# def ant_trange(vis):
# ''' Figure out nominal times for tracking of old EOVSA antennas, and return time
# range in CASA format
# '''
# import eovsa_array as ea
# from astropy.time import Time
# # Get the Sun transit time, based on the date in the vis file name (must have UDByyyymmdd in the name)
# aa = ea.eovsa_array()
# date = vis.split('UDB')[-1][:8]
# slashdate = date[:4] + '/' + date[4:6] + '/' + date[6:8]
# aa.date = slashdate
# sun = aa.cat['Sun']
# mjd_transit = Time(aa.next_transit(sun).datetime(), format='datetime').mjd
# # Construct timerange based on +/- 3h55m from transit time (when all dishes are nominally tracking)
# trange = Time(mjd_transit - 0.1632, format='mjd').iso[:19] + '~' + Time(mjd_transit + 0.1632, format='mjd').iso[:19]
# trange = trange.replace('-', '/').replace(' ', '/')
# return trange
def ant_trange(vis):
''' Figure out nominal times for tracking of old EOVSA antennas, and return time
range in CASA format
'''
import eovsa_array as ea
from astropy.time import Time
from taskinit import ms
# Get timerange from the visibility file
# msinfo = dict.fromkeys(['vis', 'scans', 'fieldids', 'btimes', 'btimestr', 'inttimes', 'ras', 'decs', 'observatory'])
ms.open(vis)
# metadata = ms.metadata()
scans = ms.getscansummary()
sk = np.sort(scans.keys())
vistrange = np.array([scans[sk[0]]['0']['BeginTime'], scans[sk[-1]]['0']['EndTime']])
# Get the Sun transit time, based on the date in the vis file name (must have UDByyyymmdd in the name)
aa = ea.eovsa_array()
date = vis.split('UDB')[-1][:8]
slashdate = date[:4] + '/' + date[4:6] + '/' + date[6:8]
aa.date = slashdate
sun = aa.cat['Sun']
mjd_transit = Time(aa.next_transit(sun).datetime(), format='datetime').mjd
# Construct timerange limits based on +/- 3h55m from transit time (when all dishes are nominally tracking)
# and clip the visibility range not to exceed those limits
mjdrange = np.clip(vistrange, mjd_transit - 0.1632, mjd_transit + 0.1632)
trange = Time(mjdrange[0], format='mjd').iso[:19] + '~' + Time(mjdrange[1], format='mjd').iso[:19]
trange = trange.replace('-', '/').replace(' ', '/')
return trange
def gaussian2d(x, y, amplitude, x0, y0, sigma_x, sigma_y, theta):
x0 = float(x0)
y0 = float(y0)
a = (np.cos(theta) ** 2) / (2 * sigma_x ** 2) + (np.sin(theta) ** 2) / (2 * sigma_y ** 2)
b = -(np.sin(2 * theta)) / (4 * sigma_x ** 2) + (np.sin(2 * theta)) / (4 * sigma_y ** 2)
c = (np.sin(theta) ** 2) / (2 * sigma_x ** 2) + (np.cos(theta) ** 2) / (2 * sigma_y ** 2)
g = amplitude * np.exp(- (a * ((x - x0) ** 2) + 2 * b * (x - x0) * (y - y0) + c * ((y - y0) ** 2)))
return g
def writediskxml(dsize, fdens, freq, xmlfile='SOLDISK.xml'):
import xml.etree.ElementTree as ET
# create the file structure
sdk = ET.Element('SOLDISK')
sdk_dsize = ET.SubElement(sdk, 'item')
sdk_fdens = ET.SubElement(sdk, 'item')
sdk_freqs = ET.SubElement(sdk, 'item')
sdk_dsize.set('disk_size', ','.join(dsize))
sdk_fdens.set('flux_dens', ','.join(['{:.1f}Jy'.format(s) for s in fdens]))
sdk_freqs.set('freq', ','.join(freq))
# create a new XML file with the results
mydata = ET.tostring(sdk)
if os.path.exists(xmlfile):
os.system('rm -rf {}'.format(xmlfile))
with open(xmlfile, 'w') as sf:
sf.write(mydata)
return xmlfile
def readdiskxml(xmlfile):
import astropy.units as u
import xml.etree.ElementTree as ET
tree = ET.parse(xmlfile)
root = tree.getroot()
diskinfo = {}
for elem in root:
d = elem.attrib
for k, v in d.items():
v_ = v.split(',')
v_ = [u.Unit(f).to_string().split(' ') for f in v_]
diskinfo[k] = []
for val, uni in v_:
diskinfo[k].append(float(val))
diskinfo[k] = np.array(diskinfo[k]) * u.Unit(uni)
return diskinfo
def image_adddisk(eofile, diskinfo, edgeconvmode='frommergeddisk', caltbonly=False):
'''
:param eofile:
:param diskxmlfile:
:param edgeconvmode: available mode: frommergeddisk,frombeam
:return:
'''
from sunpy import map as smap
from suncasa.utils import plot_mapX as pmX
from scipy import constants
import astropy.units as u
from sunpy import io as sio
dsize = diskinfo['disk_size']
fdens = diskinfo['flux_dens']
freqs = diskinfo['freq']
eomap = smap.Map(eofile)
eomap_ = pmX.Sunmap(eomap)
header = eomap.meta
bmaj = header['bmaj'] * 3600 * u.arcsec
bmin = header['bmin'] * 3600 * u.arcsec
cell = (header['cdelt1'] * u.Unit(header['cunit1']) + header['cdelt2'] * u.Unit(header['cunit2'])) / 2.0
bmsize = (bmaj + bmin) / 2.0
data = eomap.data # remember the data order is reversed due to the FITS convension
keys = header.keys()
values = header.values()
mapx, mapy = eomap_.map2wcsgrids(cell=False)
mapx = mapx[:-1, :-1]
mapy = mapy[:-1, :-1]
rdisk = np.sqrt(mapx ** 2 + mapy ** 2)
k_b = constants.k
c_l = constants.c
const = 2. * k_b / c_l ** 2
pix_area = (cell.to(u.rad).value) ** 2
jy_to_si = 1e-26
factor2 = 1.
faxis = keys[values.index('FREQ')][-1]
if caltbonly:
edgeconvmode = ''
if edgeconvmode == 'frommergeddisk':
nul = header['CRVAL' + faxis] + header['CDELT' + faxis] * (1 - header['CRPIX' + faxis])
nuh = header['CRVAL' + faxis] + header['CDELT' + faxis] * (header['NAXIS' + faxis] - header['CRPIX' + faxis])
## get the frequency range of the image
nu_bound = (np.array([nul, nuh]) + 0.5 * np.array([-1, 1]) * header['CDELT' + faxis]) * u.Unit(
header['cunit' + faxis])
nu_bound = nu_bound.to(u.GHz)
## get the frequencies of the disk models
fidxs = np.logical_and(freqs > nu_bound[0], freqs < nu_bound[1])
ny, nx = rdisk.shape
freqs_ = freqs[fidxs]
fdens_ = fdens[fidxs] / 2.0 # divide by 2 because fdens is 2x solar flux density
dsize_ = dsize[fidxs]
fdisk_ = np.empty((len(freqs_), ny, nx))
fdisk_[:] = np.nan
for fidx, freq in enumerate(freqs_):
fdisk_[fidx, ...][rdisk <= dsize_[fidx].value] = 1.0
# nu = header['CRVAL' + faxis] + header['CDELT' + faxis] * (1 - header['CRPIX' + faxis])
factor = const * freq.to(u.Hz).value ** 2 # SI unit
jy2tb = jy_to_si / pix_area / factor * factor2
fdisk_[fidx, ...] = fdisk_[fidx, ...] / np.nansum(fdisk_[fidx, ...]) * fdens_[fidx].value
fdisk_[fidx, ...] = fdisk_[fidx, ...] * jy2tb
# # fdisk_[np.isnan(fdisk_)] = 0.0
tbdisk = np.nanmean(fdisk_, axis=0)
tbdisk[np.isnan(tbdisk)] = 0.0
sig2fwhm = 2.0 * np.sqrt(2 * np.log(2))
x0, y0 = 0, 0
sigx, sigy = bmaj.value / sig2fwhm, bmin.value / sig2fwhm
theta = -(90.0 - header['bpa']) * u.deg
x = (np.arange(31) - 15) * cell.value
y = (np.arange(31) - 15) * cell.value
x, y = np.meshgrid(x, y)
kernel = gaussian2d(x, y, 1.0, x0, y0, sigx, sigy, theta.to(u.radian).value)
kernel = kernel / np.nansum(kernel)
from scipy import signal
tbdisk = signal.fftconvolve(tbdisk, kernel, mode='same')
else:
nu = header['CRVAL' + faxis] + header['CDELT' + faxis] * (1 - header['CRPIX' + faxis])
freqghz = nu / 1.0e9
factor = const * nu ** 2 # SI unit
jy2tb = jy_to_si / pix_area / factor * factor2
p_dsize = np.poly1d(np.polyfit(freqs.value, dsize.value, 15))
p_fdens = np.poly1d(
np.polyfit(freqs.value, fdens.value, 15)) / 2. # divide by 2 because fdens is 2x solar flux density
if edgeconvmode == 'frombeam':
from scipy.special import erfc
factor_erfc = 2.0 ## erfc function ranges from 0 to 2
fdisk = erfc((rdisk - p_dsize(freqghz)) / bmsize.value) / factor_erfc
else:
fdisk = np.zeros_like(rdisk)
fdisk[rdisk <= p_dsize(freqghz)] = 1.0
fdisk = fdisk / np.nansum(fdisk) * p_fdens(freqghz)
tbdisk = fdisk * jy2tb
tb_disk = np.nanmax(tbdisk)
if caltbonly:
return tb_disk
else:
datanew = data + tbdisk
# datanew[np.isnan(data)] = 0.0
header['TBDISK'] = tb_disk
header['TBUNIT'] = 'K'
eomap_disk = smap.Map(datanew, header)
nametmp = eofile.split('.')
nametmp.insert(-1, 'disk')
outfits = '.'.join(nametmp)
datanew = datanew.astype(np.float32)
if os.path.exists(outfits):
os.system('rm -rf {}'.format(outfits))
sio.write_file(outfits, datanew, header)
return eomap_disk, tb_disk, outfits
def read_ms(vis):
''' Read a CASA ms file and return a dictionary of amplitude, phase, uvdistance,
uvangle, frequency (GHz) and time (MJD). Currently only returns the XX IF channel.
vis Name of the visibility (ms) folder
'''
ms.open(vis)
spwinfo = ms.getspectralwindowinfo()
nspw = len(spwinfo.keys())
for i in range(nspw):
print('Working on spw', i)
ms.selectinit(datadescid=0, reset=True)
ms.selectinit(datadescid=i)
if i == 0:
spw = ms.getdata(['amplitude', 'phase', 'u', 'v', 'axis_info'], ifraxis=True)
xxamp = spw['amplitude']
xxpha = spw['phase']
fghz = spw['axis_info']['freq_axis']['chan_freq'][:, 0] / 1e9
band = np.ones_like(fghz) * i
mjd = spw['axis_info']['time_axis']['MJDseconds'] / 86400.
uvdist = np.sqrt(spw['u'] ** 2 + spw['v'] ** 2)
uvang = np.angle(spw['u'] + 1j * spw['v'])
else:
spw = ms.getdata(['amplitude', 'phase', 'axis_info'], ifraxis=True)
xxamp = np.concatenate((xxamp, spw['amplitude']), 1)
xxpha = np.concatenate((xxpha, spw['phase']), 1)
fg = spw['axis_info']['freq_axis']['chan_freq'][:, 0] / 1e9
fghz = np.concatenate((fghz, fg))
band = np.concatenate((band, np.ones_like(fg) * i))
ms.close()
return {'amp': xxamp, 'phase': xxpha, 'fghz': fghz, 'band': band, 'mjd': mjd, 'uvdist': uvdist, 'uvangle': uvang}
def im2cl(imname, clname, convol=True, verbose=False):
if os.path.exists(clname):
os.system('rm -rf {}'.format(clname))
ia = iatool()
ia.open(imname)
ia2 = iatool()
ia2.open(imname.replace('.model', '.image'))
bm = ia2.restoringbeam()
bmsize = (qa.convert(qa.quantity(bm['major']), 'arcsec')['value'] +
qa.convert(qa.quantity(bm['minor']), 'arcsec')['value']) / 2.0
if convol:
im2 = ia.sepconvolve(types=['gaussian', 'gaussian'], widths="{0:}arcsec {0:}arcsec".format(2.5*bmsize),
overwrite=True)
ia2.done()
else:
im2 = ia
cl = cltool()
srcs = im2.findsources(point=False, cutoff=0.3, width=int(np.ceil(bmsize/2.5)))
# srcs = ia.findsources(point=False, cutoff=0.1, width=5)
if verbose:
for k, v in srcs.iteritems():
if k.startswith('comp'):
## note: Stokes I to XX
print(srcs[k]['flux']['value'])
# srcs[k]['flux']['value'] = srcs[k]['flux']['value'] / 2.0
cl.fromrecord(srcs)
cl.rename(clname)
cl.done()
ia.done()
im2.done()
def fit_diskmodel(out, bidx, rstn_flux, uvfitrange=[1, 150], angle_tolerance=np.pi / 2, doplot=True):
''' Given the result returned by read_ms(), plots the amplitude vs. uvdistance
separately for polar and equatorial directions rotated for P-angle, then overplots
a disk model for a disk enlarged by eqfac in the equatorial direction, and polfac
in the polar direction. Also requires the RSTN flux spectrum for the date of the ms,
determined from (example for 2019-09-01):
import rstn
frq, flux = rstn.rd_rstnflux(t=Time('2019-09-01'))
rstn_flux = rstn.rstn2ant(frq, flux, out['fghz']*1000, t=Time('2019-09-01'))
'''
from util import bl2ord, lobe
import matplotlib.pylab as plt
import sun_pos
from scipy.special import j1
import scipy.constants
mperns = scipy.constants.c / 1e9 # speed of light in m/ns
# Rotate uv angle for P-angle
pa, b0, r = sun_pos.get_pb0r(out['mjd'][0], arcsec=True)
uvangle = lobe(out['uvangle'] - pa * np.pi / 180.)
a = 2 * r * np.pi ** 2 / (180. * 3600.) # Initial scale for z, uses photospheric radius of the Sun
if doplot: f, ax = plt.subplots(3, 1)
uvmin, uvmax = uvfitrange
uvdeq = []
uvdpol = []
ampeq = []
amppol = []
zeq = []
zpol = []
# Loop over antennas 1-4
antmax = 7
at = angle_tolerance
for i in range(4):
fidx, = np.where(out['band'] == bidx) # Array of frequency indexes for channels in this band
for j, fi in enumerate(fidx):
amp = out['amp'][0, fi, bl2ord[i, i + 1:antmax]].flatten() / 10000. # Convert to sfu
# Use only non-zero amplitudes
good, = np.where(amp != 0)
amp = amp[good]
uva = uvangle[bl2ord[i, i + 1:antmax]].flatten()[good]
# Equatorial points are within +/- pi/8 of solar equator
eq, = np.where(np.logical_or(np.abs(uva) < at / 2, np.abs(uva) >= np.pi - at / 2))
# Polar points are within +/- pi/8 of solar pole
pol, = np.where(np.logical_and(np.abs(uva) >= np.pi / 2 - at / 2, np.abs(uva) < np.pi / 2 + at / 2))
uvd = out['uvdist'][bl2ord[i, i + 1:antmax]].flatten()[good] * out['fghz'][fi] / mperns # Wavelengths
# Add data for this set of baselines to global arrays
uvdeq.append(uvd[eq])
uvdpol.append(uvd[pol])
ampeq.append(amp[eq])
amppol.append(amp[pol])
zeq.append(uvd[eq])
zpol.append(uvd[pol])
uvdeq = np.concatenate(uvdeq)
uvdpol = np.concatenate(uvdpol)
uvdall = np.concatenate((uvdeq, uvdpol))
ampeq = np.concatenate(ampeq)
amppol = np.concatenate(amppol)
ampall = np.concatenate((ampeq, amppol))
zeq = np.concatenate(zeq)
zpol = np.concatenate(zpol)
zall = np.concatenate((zeq, zpol))
# These indexes are for a restricted uv-range to be fitted
ieq, = np.where(np.logical_and(uvdeq > uvmin, uvdeq <= uvmax))
ipol, = np.where(np.logical_and(uvdpol > uvmin, uvdpol <= uvmax))
iall, = np.where(np.logical_and(uvdall > uvmin, uvdall <= uvmax))
if doplot:
# Plot all of the data points
ax[0].plot(uvdeq, ampeq, 'k+')
ax[1].plot(uvdpol, amppol, 'k+')
ax[2].plot(uvdall, ampall, 'k+')
# Overplot the fitted data points in a different color
ax[0].plot(uvdeq[ieq], ampeq[ieq], 'b+')
ax[1].plot(uvdpol[ipol], amppol[ipol], 'b+')
ax[2].plot(uvdall[iall], ampall[iall], 'b+')
# Minimize ratio of points to model
ntries = 300
solfac = np.linspace(1.0, 1.3, ntries)
d2m_eq = np.zeros(ntries, np.float)
d2m_pol = np.zeros(ntries, np.float)
d2m_all = np.zeros(ntries, np.float)
sfac = | np.zeros(ntries, np.float) | numpy.zeros |
import pytest
from xarray import DataArray
import scipy.stats as st
from numpy import (
argmin,
array,
concatenate,
dot,
exp,
eye,
kron,
nan,
reshape,
sqrt,
zeros,
)
from numpy.random import RandomState
from numpy.testing import assert_allclose, assert_array_equal
from pandas import DataFrame
from limix.qc import normalise_covariance
from limix.qtl import scan
from limix.stats import linear_kinship, multivariate_normal as mvn
def _test_qtl_scan_st(lik):
random = RandomState(0)
n = 30
ncovariates = 3
M = random.randn(n, ncovariates)
v0 = random.rand()
v1 = random.rand()
G = random.randn(n, 4)
K = random.randn(n, n + 1)
K = normalise_covariance(K @ K.T)
beta = random.randn(ncovariates)
alpha = random.randn(G.shape[1])
m = M @ beta + G @ alpha
y = mvn(random, m, v0 * K + v1 * eye(n))
idx = [[0, 1], 2, [3]]
if lik == "poisson":
y = random.poisson(exp(y))
elif lik == "bernoulli":
y = random.binomial(1, 1 / (1 + exp(-y)))
elif lik == "probit":
y = random.binomial(1, st.norm.cdf(y))
elif lik == "binomial":
ntrials = random.randint(0, 30, len(y))
y = random.binomial(ntrials, 1 / (1 + exp(-y)))
lik = (lik, ntrials)
r = scan(G, y, lik=lik, idx=idx, K=K, M=M, verbose=False)
str(r)
str(r.stats.head())
str(r.effsizes["h2"].head())
str(r.h0.trait)
str(r.h0.likelihood)
str(r.h0.lml)
str(r.h0.effsizes)
str(r.h0.variances)
def test_qtl_scan_st():
_test_qtl_scan_st("normal")
_test_qtl_scan_st("poisson")
_test_qtl_scan_st("bernoulli")
_test_qtl_scan_st("probit")
_test_qtl_scan_st("binomial")
def test_qtl_scan_three_hypotheses_mt():
random = RandomState(0)
n = 30
ntraits = 2
ncovariates = 3
A = random.randn(ntraits, ntraits)
A = A @ A.T
M = random.randn(n, ncovariates)
C0 = random.randn(ntraits, ntraits)
C0 = C0 @ C0.T
C1 = random.randn(ntraits, ntraits)
C1 = C1 @ C1.T
G = random.randn(n, 4)
A0 = random.randn(ntraits, 1)
A1 = random.randn(ntraits, 2)
A01 = concatenate((A0, A1), axis=1)
K = random.randn(n, n + 1)
K = normalise_covariance(K @ K.T)
beta = vec(random.randn(ntraits, ncovariates))
alpha = vec(random.randn(A01.shape[1], G.shape[1]))
m = kron(A, M) @ beta + kron(A01, G) @ alpha
Y = unvec(mvn(random, m, kron(C0, K) + kron(C1, eye(n))), (n, -1))
idx = [[0, 1], 2, [3]]
r = scan(G, Y, idx=idx, K=K, M=M, A=A, A0=A0, A1=A1, verbose=False)
str(r)
def test_qtl_scan_two_hypotheses_mt():
random = RandomState(0)
n = 30
ntraits = 2
ncovariates = 3
A = random.randn(ntraits, ntraits)
A = A @ A.T
M = random.randn(n, ncovariates)
C0 = random.randn(ntraits, ntraits)
C0 = C0 @ C0.T
C1 = random.randn(ntraits, ntraits)
C1 = C1 @ C1.T
G = random.randn(n, 4)
A0 = random.randn(ntraits, 1)
A1 = random.randn(ntraits, 2)
A01 = concatenate((A0, A1), axis=1)
K = random.randn(n, n + 1)
K = normalise_covariance(K @ K.T)
beta = vec(random.randn(ntraits, ncovariates))
alpha = vec(random.randn(A01.shape[1], G.shape[1]))
m = kron(A, M) @ beta + kron(A01, G) @ alpha
Y = unvec(mvn(random, m, kron(C0, K) + kron(C1, eye(n))), (n, -1))
idx = [[0, 1], 2, [3]]
r = scan(G, Y, idx=idx, K=K, M=M, A=A, A1=A1, verbose=False)
str(r)
def test_qtl_scan_two_hypotheses_mt_A0A1_none():
random = RandomState(0)
n = 30
ntraits = 2
ncovariates = 3
A = random.randn(ntraits, ntraits)
A = A @ A.T
M = random.randn(n, ncovariates)
C0 = random.randn(ntraits, ntraits)
C0 = C0 @ C0.T
C1 = random.randn(ntraits, ntraits)
C1 = C1 @ C1.T
G = random.randn(n, 4)
A1 = eye(ntraits)
K = random.randn(n, n + 1)
K = normalise_covariance(K @ K.T)
beta = vec(random.randn(ntraits, ncovariates))
alpha = vec(random.randn(A1.shape[1], G.shape[1]))
m = kron(A, M) @ beta + kron(A1, G) @ alpha
Y = unvec(mvn(random, m, kron(C0, K) + kron(C1, eye(n))), (n, -1))
Y = DataArray(Y, dims=["sample", "trait"], coords={"trait": ["WA", "Cx"]})
idx = [[0, 1], 2, [3]]
r = scan(G, Y, idx=idx, K=K, M=M, A=A, verbose=False)
df = r.effsizes["h2"]
df = df[df["test"] == 0]
assert_array_equal(df["trait"], ["WA"] * 3 + ["Cx"] * 3 + [None] * 4)
assert_array_equal(
df["env"], [None] * 6 + ["env1_WA", "env1_WA", "env1_Cx", "env1_Cx"]
)
str(r)
def test_qtl_scan_lmm():
random = RandomState(0)
nsamples = 50
G = random.randn(50, 100)
K = linear_kinship(G[:, 0:80], verbose=False)
y = dot(G, random.randn(100)) / sqrt(100) + 0.2 * random.randn(nsamples)
M = G[:, :5]
X = G[:, 68:70]
result = scan(X, y, lik="normal", K=K, M=M, verbose=False)
pv = result.stats["pv20"]
ix_best_snp = argmin(array(pv))
M = concatenate((M, X[:, [ix_best_snp]]), axis=1)
result = scan(X, y, "normal", K, M=M, verbose=False)
pv = result.stats["pv20"]
assert_allclose(pv[ix_best_snp], 1.0, atol=1e-6)
def test_qtl_scan_lmm_nokinship():
random = RandomState(0)
nsamples = 50
G = random.randn(50, 100)
K = linear_kinship(G[:, 0:80], verbose=False)
y = dot(G, random.randn(100)) / sqrt(100) + 0.2 * random.randn(nsamples)
M = G[:, :5]
X = G[:, 68:70]
result = scan(X, y, "normal", K, M=M, verbose=False)
pv = result.stats["pv20"].values
assert_allclose(pv[:2], [8.159539103135342e-05, 0.10807353641893498], atol=1e-5)
def test_qtl_scan_lmm_repeat_samples_by_index():
random = RandomState(0)
nsamples = 30
samples = ["sample{}".format(i) for i in range(nsamples)]
G = random.randn(nsamples, 100)
G = DataFrame(data=G, index=samples)
K = linear_kinship(G.values[:, 0:80], verbose=False)
K = DataFrame(data=K, index=samples, columns=samples)
y0 = dot(G, random.randn(100)) / sqrt(100) + 0.2 * random.randn(nsamples)
y1 = dot(G, random.randn(100)) / sqrt(100) + 0.2 * random.randn(nsamples)
y = concatenate((y0, y1))
y = DataFrame(data=y, index=samples + samples)
M = G.values[:, :5]
X = G.values[:, 68:70]
M = DataFrame(data=M, index=samples)
X = DataFrame(data=X, index=samples)
result = scan(X, y, "normal", K, M=M, verbose=False)
pv = result.stats["pv20"]
assert_allclose(pv.values[0], 0.9920306566395604, rtol=1e-6)
ix_best_snp = argmin(array(result.stats["pv20"]))
M = concatenate((M, X.loc[:, [ix_best_snp]]), axis=1)
M = DataFrame(data=M, index=samples)
result = scan(X, y, "normal", K, M=M, verbose=False)
pv = result.stats["pv20"]
assert_allclose(pv[ix_best_snp], 1.0, rtol=1e-6)
assert_allclose(pv.values[0], 0.6684700834450028, rtol=1e-6)
X.sort_index(inplace=True, ascending=False)
X = DataFrame(X.values, index=X.index.values)
result = scan(X, y, "normal", K, M=M, verbose=False)
pv = result.stats["pv20"]
assert_allclose(pv[ix_best_snp], 1.0, rtol=1e-6)
assert_allclose(pv.values[0], 0.6684700834450028, rtol=1e-6)
def test_qtl_scan_lmm_different_samples_order():
random = RandomState(0)
nsamples = 50
samples = ["sample{}".format(i) for i in range(nsamples)]
G = random.randn(nsamples, 100)
G = DataFrame(data=G, index=samples)
K = linear_kinship(G.values[:, 0:80], verbose=False)
K = DataFrame(data=K, index=samples, columns=samples)
y = dot(G, random.randn(100)) / sqrt(100) + 0.2 * random.randn(nsamples)
y = DataFrame(data=y, index=samples)
M = G.values[:, :5]
X = G.values[:, 68:70]
M = DataFrame(data=M, index=samples)
X = DataFrame(data=X, index=samples)
result = scan(X, y, "normal", K, M=M, verbose=False)
pv = result.stats["pv20"]
assert_allclose(pv.values[1], 0.10807353644788478, rtol=1e-6)
X.sort_index(inplace=True, ascending=False)
X = DataFrame(X.values, index=X.index.values)
result = scan(X, y, "normal", K, M=M, verbose=False)
pv = result.stats["pv20"]
assert_allclose(pv.values[1], 0.10807353644788478, rtol=1e-6)
def test_qtl_scan_glmm_binomial():
random = RandomState(0)
nsamples = 25
X = random.randn(nsamples, 2)
G = random.randn(nsamples, 100)
K = dot(G, G.T)
ntrials = random.randint(1, 100, nsamples)
z = dot(G, random.randn(100)) / sqrt(100)
successes = zeros(len(ntrials), int)
for i, nt in enumerate(ntrials):
for _ in range(nt):
successes[i] += int(z[i] + 0.5 * random.randn() > 0)
result = scan(X, successes, ("binomial", ntrials), K, verbose=False)
pv = result.stats["pv20"]
assert_allclose(pv, [0.9315770010211236, 0.8457015828837173], atol=1e-6, rtol=1e-6)
def test_qtl_scan_glmm_wrong_dimensions():
random = RandomState(0)
nsamples = 25
X = random.randn(nsamples, 2)
G = random.randn(nsamples, 100)
K = dot(G, G.T)
ntrials = random.randint(1, 100, nsamples)
z = dot(G, random.randn(100)) / sqrt(100)
successes = zeros(len(ntrials), int)
for i, nt in enumerate(ntrials):
for _ in range(nt):
successes[i] += int(z[i] + 0.5 * random.randn() > 0)
M = random.randn(49, 2)
scan(X, successes, ("binomial", ntrials), K, M=M, verbose=False)
def test_qtl_scan_glmm_bernoulli():
random = RandomState(0)
nsamples = 25
X = random.randn(nsamples, 2)
G = random.randn(nsamples, 100)
K = dot(G, G.T)
ntrials = random.randint(1, 2, nsamples)
z = dot(G, random.randn(100)) / sqrt(100)
successes = zeros(len(ntrials), int)
for i, nt in enumerate(ntrials):
for _ in range(nt):
successes[i] += int(z[i] + 0.5 * random.randn() > 0)
result = scan(X, successes, "bernoulli", K, verbose=False)
pv = result.stats["pv20"]
assert_allclose(pv, [0.3399326545917558, 0.8269454251659921], rtol=1e-5)
def test_qtl_scan_glmm_bernoulli_nokinship():
random = RandomState(0)
nsamples = 25
X = random.randn(nsamples, 2)
G = random.randn(nsamples, 100)
ntrials = random.randint(1, 2, nsamples)
z = dot(G, random.randn(100)) / sqrt(100)
successes = zeros(len(ntrials), int)
for i, nt in enumerate(ntrials):
for _ in range(nt):
successes[i] += int(z[i] + 0.5 * random.randn() > 0)
result = scan(X, successes, "bernoulli", verbose=False)
pv = result.stats["pv20"]
assert_allclose(pv, [0.3399067917883736, 0.8269568797830423], rtol=1e-5)
def test_qtl_scan_lm():
random = RandomState(0)
nsamples = 25
G = random.randn(nsamples, 100)
y = dot(G, random.randn(100)) / sqrt(100) + 0.2 * random.randn(nsamples)
M = G[:, :5]
X = G[:, 5:]
result = scan(X, y, "normal", M=M, verbose=False)
pv = result.stats["pv20"]
assert_allclose(pv[:2], [0.02625506841465465, 0.9162689001409643], rtol=1e-5)
def test_qtl_scan_gmm_binomial():
random = RandomState(0)
nsamples = 25
X = random.randn(nsamples, 2)
ntrials = random.randint(1, nsamples, nsamples)
z = dot(X, random.randn(2))
successes = zeros(len(ntrials), int)
for i in range(len(ntrials)):
for _ in range(ntrials[i]):
successes[i] += int(z[i] + 0.5 * random.randn() > 0)
result = scan(X, successes, ("binomial", ntrials), verbose=False)
pv = result.stats["pv20"]
assert_allclose(
pv, [2.4604711379400065e-06, 0.01823278752006871], rtol=1e-5, atol=1e-5
)
def test_qtl_finite():
random = | RandomState(0) | numpy.random.RandomState |
from __future__ import absolute_import, print_function, division
import theano
import theano.tensor as T
from theano import function, shared
from theano.tests import unittest_tools as utt
from theano.tensor.nnet.ConvTransp3D import convTransp3D, ConvTransp3D
from theano.tensor.nnet.ConvGrad3D import convGrad3D, ConvGrad3D
from theano.tensor.nnet.Conv3D import conv3D, Conv3D
from theano.tests.unittest_tools import attr
from nose.plugins.skip import SkipTest
import numpy as N
from six.moves import xrange
import copy
import theano.sparse
if theano.sparse.enable_sparse:
from scipy import sparse
floatX = theano.config.floatX
# TODO: each individual test method should seed rng with utt.fetch_seed()
# as it is right now, setUp does the seeding, so if you run just
# a subset of the tests they will do different things than if you
# run all of them
class DummyConv3D:
"""A dummy version of Conv3D passed to verify_grad
Stores a fixed stride, since stride is not differentiable
Exposes only one scalar argument, which is used as the position
along a parametrically defined line, with 0 being at VwbVals
Direction of the line is chosen randomly at construction
The reason for locking the inputs to lie on this line is so that the
verify_grad will not need to test hundreds of variables. Disadvantage
is we can't be certain that all of them are correct, advantange is that
this random projection lets us test lots of variables very quickly """
def __init__(self, rng, VWbVals, d):
"""
param: rng Random number generator used to pick direction of the
line
param: VWbVals tuple containing values to test V,W,b around
param: d shared variable for d, the stride
"""
self.V, self.W, self.b = VWbVals
self.dV = shared(rng.uniform(-1, 1,
self.V.get_value(borrow=True).shape))
self.dW = shared(rng.uniform(-1, 1,
self.W.get_value(borrow=True).shape))
self.db = shared(rng.uniform(-1, 1,
self.b.get_value(borrow=True).shape))
self.d = d
def __call__(self, t):
output = conv3D(self.V + t * self.dV, self.W + t * self.dW,
self.b + t * self.db, self.d)
return output
class DummyConvGrad3D:
def __init__(self, rng, VdHvals, d, WShape):
"""
param: rng Random number generator used to pick direction of the
line
param: VWbVals tuple containing values to test V,W,b around
param: d shared variable for d, the stride
"""
self.V, self.dCdH = VdHvals
self.dV = shared(rng.uniform(-1, 1,
self.V.get_value(borrow=True).shape))
self.ddCdH = shared(rng.uniform(
-1, 1, self.dCdH.get_value(borrow=True).shape))
self.d = d
self.WShape = WShape
def __call__(self, t):
output = convGrad3D(self.V + t * self.dV, self.d, self.WShape,
self.dCdH + t * self.ddCdH)
return output
class DummyConvTransp3D:
def __init__(self, rng, WbHvals, d, RShape):
"""
param: rng Random number generator used to pick direction of the
line
param: VWbVals tuple containing values to test V,W,b around
param: d shared variable for d, the stride
"""
self.W, self.b, self.H = WbHvals
self.dW = rng.uniform(-1, 1, self.W.get_value(borrow=True).shape)
self.db = rng.uniform(-1, 1, self.b.get_value(borrow=True).shape)
self.dH = rng.uniform(-1, 1, self.H.get_value(borrow=True).shape)
self.dW, self.db = shared(self.dW), shared(self.db),
self.dH = shared(self.dH)
self.d = d
self.RShape = RShape
def __call__(self, t):
output = convTransp3D(self.W + t * self.dW, self.b + t * self.db,
self.d, self.H + t * self.dH, self.RShape)
return output
class TestConv3D(utt.InferShapeTester):
def setUp(self):
super(TestConv3D, self).setUp()
utt.seed_rng()
self.rng = N.random.RandomState(utt.fetch_seed())
mode = copy.copy(theano.compile.mode.get_default_mode())
mode.check_py_code = False
self.W = shared(N.ndarray(shape=(1, 1, 1, 1, 1), dtype=floatX))
self.W.name = 'W'
self.b = shared( | N.zeros(1, dtype=floatX) | numpy.zeros |
import torch
import argparse
import os
import json
import data_def
import numpy as np
from pprint import pprint
from tqdm import tqdm
from data_utils import *
import data_loader_util
import reprocess_labels_utils
import spatial_transforms
from PIL import Image
DEFAULT_VIDEO_PATH = '../data/video_small'
DEFAULT_LABEL_PATH = '../data/label_aligned'
DEFAULT_SEGMENTED_LABEL_PATH = '../data/label_cropped/annotation.json'
DEFAULT_CLIP_LENGTH = 200 # frames
DATALOADER_NUM_WORKERS = 1
TRAIN_FILENAMES_MINI = [
'russell_stable3.mp4',
]
TRAIN_FILENAMES = [
'kevin_random_moves_quick.mp4',
'kevin_random_moves_quick_2.mp4',
'kevin_random_moves_quick_3.mp4',
'kevin_random_moves_quick_4.mp4',
'kevin_rotate_1.mp4',
'kevin_simple_shuffle_1.mp4',
'kevin_single_moves_2.mp4',
'kevin_single_solve_1.mp4',
'kevin_solve_play_1.mp4',
'kevin_solve_play_10.mp4',
'kevin_solve_play_11.mp4',
'kevin_solve_play_12.mp4',
'kevin_solve_play_13.mp4',
'kevin_solve_play_2.mp4',
'kevin_solve_play_3.mp4',
'kevin_solve_play_7.mp4',
'kevin_solve_play_8.mp4',
'kevin_solve_play_9.mp4',
'russell_scramble0.mp4',
'russell_scramble1.mp4',
'russell_scramble3.mp4',
'russell_scramble4.mp4',
'russell_scramble5.mp4',
'russell_scramble7.mp4',
'russell_stable0.mp4',
'russell_stable1.mp4',
'russell_stable2.mp4',
'russell_stable3.mp4',
'zhouheng_cfop_solve.mp4',
'zhouheng_long_solve_1.mp4',
'zhouheng_long_solve_2.mp4',
'zhouheng_long_solve_3.mp4',
'zhouheng_oll_algorithm.mp4',
'zhouheng_pll_algorithm_fast.mp4',
'zhouheng_rotation.mp4',
'zhouheng_scramble_01.mp4',
'zhouheng_scramble_03.mp4',
'zhouheng_weird_turns.mp4',
]
DEV_FILENAMES = [
'kevin_single_moves_1.mp4',
'kevin_solve_play_6.mp4',
'zhouheng_scramble_02.mp4',
'russell_scramble2.mp4',
]
TEST_FILENAMES = [
'zhouheng_long_solve_5.mp4',
'kevin_solve_play_5.mp4',
'zhouheng_long_solve_4.mp4',
'russell_scramble6.mp4',
]
EXCLUDE_FILENAMES = [
# 'kevin_solve_play_1.mp4',
# 'kevin_solve_play_10.mp4',
# 'kevin_solve_play_11.mp4',
# 'kevin_solve_play_12.mp4',
# 'kevin_solve_play_13.mp4',
# 'kevin_solve_play_2.mp4',
# 'kevin_solve_play_3.mp4',
# 'kevin_solve_play_7.mp4',
# 'kevin_solve_play_8.mp4',
# 'kevin_solve_play_9.mp4',
# 'kevin_solve_play_6.mp4',
# 'kevin_solve_play_5.mp4',
]
def get_train_data(B, L, verbose_init=True, shorten_factor=1, spatial_augment=True):
train_dataset = VideoDataset(
DEFAULT_VIDEO_PATH,
DEFAULT_LABEL_PATH,
clip_length=L,
verbose_init=verbose_init,
video_filenames=TRAIN_FILENAMES,
shorten_factor=shorten_factor,
spatial_augment=spatial_augment,
)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=B, num_workers=DATALOADER_NUM_WORKERS,
)
return train_dataset, train_loader
def get_dev_data(B, L, verbose_init=True):
dev_dataset = VideoDatasetNoSample(
DEFAULT_VIDEO_PATH,
DEFAULT_LABEL_PATH,
clip_length=L,
verbose_init=verbose_init,
video_filenames=DEV_FILENAMES
)
dev_loader = torch.utils.data.DataLoader(
dev_dataset, batch_size=B, num_workers=DATALOADER_NUM_WORKERS,
)
return dev_dataset, dev_loader
def get_eval_dev_data(video_filenames, overlap_length=15):
dev_dataset = VideoDatasetOverlapped(
DEFAULT_VIDEO_PATH,
DEFAULT_LABEL_PATH,
video_filenames=video_filenames,
overlap_length=overlap_length
)
dev_loader = torch.utils.data.DataLoader(
dev_dataset, batch_size=1, num_workers=DATALOADER_NUM_WORKERS,
)
return dev_dataset, dev_loader
def get_segmented_train_data(B, L, spatial_transform=None, temporal_shift=False, verbose_init=True):
print("\nGetting TRAIN data...")
train_dataset = VideoDatasetSegmented(
DEFAULT_VIDEO_PATH,
DEFAULT_SEGMENTED_LABEL_PATH,
spatial_transform=spatial_transform,
temporal_shift=temporal_shift,
verbose_init=verbose_init,
min_accepted_frames=L,
video_filenames=TRAIN_FILENAMES
# video_filenames=TRAIN_FILENAMES_MINI
)
train_loader = torch.utils.data.DataLoader(
train_dataset, batch_size=B, num_workers=DATALOADER_NUM_WORKERS,
)
return train_dataset, train_loader
def get_segmented_dev_data(B, L, verbose_init=True):
print("\nGetting DEV data...")
dev_dataset = VideoDatasetSegmented(
DEFAULT_VIDEO_PATH,
DEFAULT_SEGMENTED_LABEL_PATH,
verbose_init=verbose_init,
min_accepted_frames=L,
video_filenames=DEV_FILENAMES
# video_filenames=TRAIN_FILENAMES_MINI
)
dev_loader = torch.utils.data.DataLoader(
dev_dataset, batch_size=B, num_workers=DATALOADER_NUM_WORKERS,
)
return dev_dataset, dev_loader
def get_segmented_eval_data(L, verbose_init=True):
print("\nGetting TEST data...")
test_dataset = VideoDatasetSegmented(
DEFAULT_VIDEO_PATH,
DEFAULT_SEGMENTED_LABEL_PATH,
verbose_init=verbose_init,
min_accepted_frames=L,
video_filenames=TEST_FILENAMES
# video_filenames=TRAIN_FILENAMES_MINI
)
test_loader = torch.utils.data.DataLoader(
test_dataset, batch_size=5, num_workers=DATALOADER_NUM_WORKERS,
)
return test_dataset, test_loader
class VideoDataset(torch.utils.data.Dataset):
def __init__(
self,
video_path,
label_path,
clip_length=DEFAULT_CLIP_LENGTH,
verbose_init=False,
video_filenames=None,
shorten_factor=1,
spatial_augment=True,
):
self.clip_length = clip_length
self.spatial_augment = spatial_augment
if video_filenames is None:
video_filenames = data_loader_util.walk_video_filenames(video_path)
json_filenames = data_loader_util.get_json_filenames(video_filenames)
for f in json_filenames:
assert os.path.exists(
os.path.join(label_path, f)
), f'Found video, but [{f}] does not exist.'
# load json labels
self.label_dicts = data_loader_util.load_json_labels(
label_path, json_filenames)
# load video specs
self.video_specs = [
get_video_specs(os.path.join(video_path, f))
for f in video_filenames
]
self.num_videos = len(self.video_specs)
# check that all videos have same height and width
assert len(set([spec['height'] for spec in self.video_specs])) == 1, \
'videos are of different height'
assert len(set([spec['width'] for spec in self.video_specs])) == 1, \
'videos are of different width'
data_loader_util.populate_segmentable_ranges(
self.label_dicts, self.video_specs, self.clip_length
)
# check that sampleable segment frames are at least clip_length in each video
for spec in self.video_specs:
assert spec['max_seg_frame'] - spec['min_seg_frame'] >= clip_length,\
f'{spec["filename"]} does not have at least {clip_length} frames within sample-able range'
# when sampling, randomly choose a video proportional to its (num_frames - clip_length + 1)
# such that any valid sampleable segments in the dataset has an equal chance of selected
total_num_valid_segments = [
num_valid_segments(
spec['min_seg_frame'], spec['max_seg_frame'], clip_length
)
for spec in self.video_specs
]
self.random_video_p = np.array(
total_num_valid_segments) / np.sum(total_num_valid_segments)
# since we're randomly sampling clip_length frames per sample, we say that the length of
# this dataset is however many expected sample needed to span number of total frames
self.len = int(np.ceil(sum(total_num_valid_segments) /
clip_length / shorten_factor))
self.random_state = None
if verbose_init:
print('VideoDataset __init__')
print('clip_length:', clip_length)
print(
'frame size:', self.video_specs[0]['height'], 'x', self.video_specs[0]['width']
)
print('video files:')
pprint(video_filenames)
# print('json files:', json_filenames)
def __len__(self):
return self.len
def __getitem__(self, idx):
if self.random_state is None:
self.random_state = | np.random.RandomState() | numpy.random.RandomState |
import matplotlib, zipfile
matplotlib.use('agg')
import sys, numpy as np, matplotlib.pyplot as plt, os, tools21cm as t2c, matplotlib.gridspec as gridspec
from sklearn.metrics import matthews_corrcoef
from glob import glob
from tensorflow.keras.models import load_model
from tqdm import tqdm
from config.net_config import NetworkConfig
from utils.other_utils import RotateCube
from utils_network.metrics import iou, iou_loss, dice_coef, dice_coef_loss, balanced_cross_entropy, phi_coef
from utils_network.prediction import SegUnet21cmPredict
from myutils.utils import OrderNdimArray
title_a = '\t\t _ _ _ _ _ \n\t\t| | | | \ | | | | \n\t\t| | | | \| | ___| |_ \n\t\t| | | | . ` |/ _ \ __|\n\t\t| |__| | |\ | __/ |_ \n\t\t \____/|_| \_|\___|\__|\n'
title_b = ' _____ _ _ _ ___ __ \n| __ \ | (_) | | |__ \/_ | \n| |__) | __ ___ __| |_ ___| |_ ___ ) || | ___ _ __ ___ \n| ___/ `__/ _ \/ _` | |/ __| __/ __| / / | |/ __| `_ ` _ \ \n| | | | | __/ (_| | | (__| |_\__ \ / /_ | | (__| | | | | |\n|_| |_| \___|\__,_|_|\___|\__|___/ |____||_|\___|_| |_| |_|\n'
print(title_a+'\n'+title_b)
config_file = sys.argv[1]
conf = PredictionConfig(config_file)
PATH_OUT = conf.path_out
PATH_INPUT = conf.path+conf.pred_data
print(' PATH_INPUT = %s' %PATH_INPUT)
if(PATH_INPUT[-3:] == 'zip'):
ZIPFILE = True
PATH_IN_ZIP = PATH_INPUT[PATH_INPUT.rfind('/')+1:-4]+'/'
PATH_UNZIP = PATH_INPUT[:PATH_INPUT.rfind('/')+1]
MAKE_PLOTS = True
# load model
avail_metrics = {'binary_accuracy':'binary_accuracy', 'iou':iou, 'dice_coef':dice_coef, 'iou_loss':iou_loss, 'dice_coef_loss':dice_coef_loss, 'phi_coef':phi_coef, 'mse':'mse', 'mae':'mae', 'binary_crossentropy':'binary_crossentropy', 'balanced_cross_entropy':balanced_cross_entropy}
MODEL_EPOCH = conf.best_epoch
METRICS = [avail_metrics[m] for m in np.append(conf.loss, conf.metrics)]
cb = {func.__name__:func for func in METRICS if not isinstance(func, str)}
model = load_model('%smodel-sem21cm_ep%d.h5' %(PATH_OUT+'checkpoints/', MODEL_EPOCH), custom_objects=cb)
try:
os.makedirs(PATH_OUT+'predictions')
except:
pass
PATH_OUT += 'predictions/pred_tobs1200/'
print(' PATH_OUTPUT = %s' %PATH_OUT)
try:
os.makedirs(PATH_OUT+'data')
os.makedirs(PATH_OUT+'plots')
except:
pass
if(os.path.exists('%sastro_data.txt' %PATH_OUT)):
astr_data = np.loadtxt('%sastro_data.txt' %PATH_OUT, unpack=True)
restarts = astr_data[6:].argmin(axis=1)
if(all(int(np.mean(restarts)) == restarts)):
restart = int(np.mean(restarts))
print(' Restart from idx=%d' %restart)
else:
ValueError(' Restart points does not match.')
phicoef_seg, phicoef_err, phicoef_sp, xn_mask, xn_seg, xn_err, xn_sp, b0_true, b1_true, b2_true, b0_seg, b1_seg, b2_seg, b0_sp, b1_sp, b2_sp = astr_data[6:]
astr_data = astr_data[:6]
else:
if(ZIPFILE):
with zipfile.ZipFile(PATH_INPUT, 'r') as myzip:
astr_data = np.loadtxt(myzip.open('%sastro_params.txt' %PATH_IN_ZIP), unpack=True)
else:
astr_data = np.loadtxt('%sastro_params.txt' %PATH_INPUT, unpack=True)
restart = 0
phicoef_seg = np.zeros(astr_data.shape[1])
phicoef_err = np.zeros_like(phicoef_seg)
phicoef_sp = np.zeros_like(phicoef_seg)
xn_mask = np.zeros_like(phicoef_seg)
xn_seg = np.zeros_like(phicoef_seg)
xn_err = np.zeros_like(phicoef_seg)
xn_sp = np.zeros_like(phicoef_sp)
b0_true = np.zeros_like(phicoef_sp)
b1_true = np.zeros_like(phicoef_sp)
b2_true = np.zeros_like(phicoef_sp)
b0_sp = np.zeros_like(phicoef_sp)
b1_sp = np.zeros_like(phicoef_sp)
b2_sp = np.zeros_like(phicoef_sp)
b0_seg = np.zeros_like(phicoef_sp)
b1_seg = np.zeros_like(phicoef_sp)
b2_seg = np.zeros_like(phicoef_sp)
params = {'HII_DIM':128, 'DIM':384, 'BOX_LEN':256}
my_ext = [0, params['BOX_LEN'], 0, params['BOX_LEN']]
zc = (astr_data[1,:] < 7.5) + (astr_data[1,:] > 8.3)
c1 = (astr_data[5,:]<=0.25)*(astr_data[5,:]>=0.15)*zc
c2 = (astr_data[5,:]<=0.55)*(astr_data[5,:]>=0.45)*zc
c3 = (astr_data[5,:]<=0.75)*(astr_data[5,:]>=0.85)*zc
indexes = astr_data[0,:]
new_idx = indexes[c1+c2+c3].astype(int)
#for i in tqdm(range(restart, astr_data.shape[1])):
print(new_idx)
for new_i in tqdm(range(3, new_idx.size)):
i = new_idx[new_i]
z = astr_data[1,i]
zeta = astr_data[2,i]
Rmfp = astr_data[3,i]
Tvir = astr_data[4,i]
xn = astr_data[5,i]
#print('z = %.3f x_n =%.3f zeta = %.3f R_mfp = %.3f T_vir = %.3f' %(z, xn, zeta, Rmfp, Tvir))
if(ZIPFILE):
with zipfile.ZipFile(PATH_INPUT, 'r') as myzip:
f = myzip.extract(member='%simage_21cm_i%d.bin' %(PATH_IN_ZIP+'data/', i), path=PATH_UNZIP)
dT3 = t2c.read_cbin(f)
f = myzip.extract(member='%smask_21cm_i%d.bin' %(PATH_IN_ZIP+'data/', i), path=PATH_UNZIP)
mask_xn = t2c.read_cbin(f)
os.system('rm -r %s/' %(PATH_UNZIP+PATH_IN_ZIP))
else:
dT3 = t2c.read_cbin('%simage_21cm_i%d.bin' %(PATH_INPUT+'data/', i))
mask_xn = t2c.read_cbin('%smask_21cm_i%d.bin' %(PATH_INPUT+'data/', i))
# Calculate Betti number
b0_true[i] = t2c.betti0(data=mask_xn)
b1_true[i] = t2c.betti1(data=mask_xn)
b2_true[i] = t2c.betti2(data=mask_xn)
xn_mask[i] = np.mean(mask_xn)
plt.rcParams['font.size'] = 20
plt.rcParams['xtick.direction'] = 'in'
plt.rcParams['ytick.direction'] = 'in'
plt.rcParams['xtick.top'] = False
plt.rcParams['ytick.right'] = False
plt.rcParams['axes.linewidth'] = 1.2
ls = 22
# -------- predict with SegUnet 3D --------
print(' calculating predictioon for data i = %d...' %i)
X_tta = SegUnet21cmPredict(unet=model, x=dT3, TTA=True)
X_seg = np.round(np.mean(X_tta, axis=0))
X_seg_err = np.std(X_tta, axis=0)
# calculate Matthew coef and mean neutral fraction
phicoef_seg[i] = matthews_corrcoef(mask_xn.flatten(), X_seg.flatten())
xn_seg[i] = np.mean(X_seg)
# calculate errors
phicoef_tta = np.zeros(X_tta.shape[0])
xn_tta = np.zeros(X_tta.shape[0])
for k in tqdm(range(len(X_tta))):
xn_tta[k] = np.mean(np.round(X_tta[k]))
phicoef_tta[k] = matthews_corrcoef(mask_xn.flatten(), np.round(X_tta[k]).flatten())
xn_err[i] = np.std(xn_tta)
phicoef_err[i] = np.std(phicoef_tta)
# Calculate Betti number
b0_seg[i] = t2c.betti0(data=X_seg)
b1_seg[i] = t2c.betti1(data=X_seg)
b2_seg[i] = t2c.betti2(data=X_seg)
# --------------------------------------------
# -------- predict with Super-Pixel --------
labels = t2c.slic_cube(dT3.astype(dtype='float64'), n_segments=5000, compactness=0.1, max_iter=20, sigma=0, min_size_factor=0.5, max_size_factor=3, cmap=None)
superpixel_map = t2c.superpixel_map(dT3, labels)
X_sp = 1-t2c.stitch_superpixels(dT3, labels, bins='knuth', binary=True, on_superpixel_map=True)
# calculate Matthew coef and mean neutral fraction
phicoef_sp[i] = matthews_corrcoef(mask_xn.flatten(), X_sp.flatten())
xn_sp[i] = np.mean(X_sp)
# Calculate Betti number
b0_sp[i] = t2c.betti0(data=X_sp)
b1_sp[i] = t2c.betti1(data=X_sp)
b2_sp[i] = t2c.betti2(data=X_sp)
# --------------------------------------------
if(i in new_idx and MAKE_PLOTS):
plt.rcParams['xtick.direction'] = 'out'
plt.rcParams['ytick.direction'] = 'out'
plt.rcParams['figure.figsize'] = [20, 10]
idx = params['HII_DIM']//2
# Plot visual comparison
fig, axs = plt.subplots(figsize=(20,10), ncols=3, sharey=True, sharex=True)
(ax0, ax1, ax2) = axs
ax0.set_title('Super-Pixel ($r_{\phi}=%.3f$)' %phicoef_sp[i], size=ls)
ax0.imshow(X_sp[:,:,idx], origin='lower', cmap='jet', extent=my_ext)
ax0.contour(mask_xn[:,:,idx], colors='lime', levels=[0.5], extent=my_ext)
ax0.set_xlabel('x [Mpc]'), ax0.set_ylabel('y [Mpc]')
ax1.set_title('SegU-Net ($r_{\phi}=%.3f$)' %phicoef_seg[i], size=ls)
ax1.imshow(X_seg[:,:,idx], origin='lower', cmap='jet', extent=my_ext)
ax1.contour(mask_xn[:,:,idx], colors='lime', levels=[0.5], extent=my_ext)
ax1.set_xlabel('x [Mpc]')
ax2.set_title('SegUNet Pixel-Error', size=ls)
im = plt.imshow(X_seg_err[:,:,idx], origin='lower', cmap='jet', extent=my_ext)
fig.colorbar(im, label=r'$\sigma_{std}$', ax=ax2, pad=0.02, cax=fig.add_axes([0.905, 0.25, 0.02, 0.51]))
ax2.set_xlabel('x [Mpc]')
plt.subplots_adjust(hspace=0.1, wspace=0.01)
for ax in axs.flat: ax.label_outer()
plt.savefig('%svisual_comparison_i%d.png' %(PATH_OUT+'plots/', i), bbox_inches='tight'), plt.clf()
# Plot BSD-MFP of the prediction
mfp_pred_ml = t2c.bubble_stats.mfp(X_seg, xth=0.5, boxsize=params['BOX_LEN'], iterations=2000000, verbose=False, upper_lim=False, bins=None, r_min=None, r_max=None)
mfp_pred_sp = t2c.bubble_stats.mfp(X_sp, xth=0.5, boxsize=params['BOX_LEN'], iterations=2000000, verbose=False, upper_lim=False, bins=None, r_min=None, r_max=None)
mfp_true = t2c.bubble_stats.mfp(mask_xn, xth=0.5, boxsize=params['BOX_LEN'], iterations=2000000, verbose=False, upper_lim=False, bins=None, r_min=None, r_max=None)
mfp_tta = np.zeros((X_tta.shape[0], 2, 128))
for j in tqdm(range(0, X_tta.shape[0])):
mfp_pred_ml1, mfp_pred_ml2 = t2c.bubble_stats.mfp(np.round(X_tta[j]), xth=0.5, boxsize=params['BOX_LEN'], iterations=2000000, verbose=False, upper_lim=False, bins=None, r_min=None, r_max=None)
mfp_tta[j,0] = mfp_pred_ml1
mfp_tta[j,1] = mfp_pred_ml2
plt.rcParams['xtick.direction'] = 'in'
plt.rcParams['ytick.direction'] = 'in'
compare_ml = (mfp_pred_ml[1]/mfp_true[1])
compare_ml_tta = (mfp_tta[:,1,:]/mfp_true[1])
compare_sp = (mfp_pred_sp[1]/mfp_true[1])
fig, ax0 = plt.subplots(figsize=(12, 9))
gs = gridspec.GridSpec(2, 1, height_ratios=[4, 1.8]) # set height ratios for sublots
ax0 = plt.subplot(gs[0])
ax0.set_title('$z=%.3f$\t$x_n=%.3f$\t$r_{\phi}=%.3f$' %(z, xn_mask[i], phicoef_seg[i]), fontsize=ls)
ax0.fill_between(mfp_pred_ml[0], np.min(mfp_tta[:,1,:], axis=0), np.max(mfp_tta[:,1,:], axis=0), color='tab:blue', alpha=0.2)
ax0.loglog(mfp_pred_ml[0], mfp_pred_ml[1], '-', color='tab:blue', label='SegUNet', lw=2)
ax0.loglog(mfp_pred_sp[0], mfp_pred_sp[1], '-', color='tab:orange', label='Super-Pixel', lw=2)
ax0.loglog(mfp_true[0], mfp_true[1], 'k--', label='Ground true', lw=2)
ax0.legend(loc=0, borderpad=0.5)
ax0.tick_params(axis='both', length=7, width=1.2)
ax0.tick_params(axis='both', which='minor', length=5, width=1.2)
ax0.set_ylabel('RdP/dR', size=18), ax0.set_xlabel('R (Mpc)')
ax1 = plt.subplot(gs[1], sharex=ax0)
ax1.loglog(mfp_true[0], compare_ml, '-', lw=2)
ax1.loglog(mfp_true[0], compare_sp, '-', lw=2)
ax1.loglog(mfp_true[0], np.ones_like(mfp_true[0]), 'k--', lw=2)
ax1.fill_between(mfp_true[0], np.min(compare_ml_tta, axis=0), np.max(compare_ml_tta, axis=0), color='tab:blue', alpha=0.2)
ax1.tick_params(axis='both', length=7, width=1.2, labelsize=15)
ax1.set_ylabel('difference (%)', size=15)
ax1.set_xlabel('R (Mpc)', size=18)
plt.setp(ax0.get_xticklabels(), visible=False)
plt.subplots_adjust(hspace=0.0)
ax1.tick_params(which='minor', axis='both', length=5, width=1.2)
plt.savefig('%sbs_comparison_i%d.png' %(PATH_OUT+'plots/', i), bbox_inches='tight'), plt.clf()
# Plot dimensioneless power spectra of the x field
ps_true, ks_true = t2c.power_spectrum_1d(mask_xn, kbins=20, box_dims=256, binning='log')
ps_pred_sp, ks_pred_sp = t2c.power_spectrum_1d(X_sp, kbins=20, box_dims=256, binning='log')
ps_pred_ml, ks_pred_ml = t2c.power_spectrum_1d(X_seg, kbins=20, box_dims=256, binning='log')
ps_tta = np.zeros((X_tta.shape[0],20))
for k in range(0,X_tta.shape[0]):
ps_tta[k], ks_pred_ml = t2c.power_spectrum_1d(np.round(X_tta[k]), kbins=20, box_dims=256, binning='log')
compare_ml = 100*(ps_pred_ml/ps_true - 1.)
compare_ml_tta = 100*(ps_tta/ps_true - 1.)
compare_sp = 100*(ps_pred_sp/ps_true - 1.)
fig, ax = plt.subplots(figsize=(16, 12))
gs = gridspec.GridSpec(2, 1, height_ratios=[4, 1.8])
ax0 = plt.subplot(gs[0])
ax0.set_title('$z=%.3f$\t$x_n=%.3f$\t$r_{\phi}=%.3f$' %(z, xn_mask[i], phicoef_seg[i]), fontsize=ls)
ax0.fill_between(ks_pred_ml, np.min(ps_tta*ks_pred_ml**3/2/np.pi**2, axis=0), np.max(ps_tta*ks_pred_ml**3/2/np.pi**2, axis=0), color='tab:blue', alpha=0.2)
ax0.loglog(ks_pred_ml, ps_pred_ml*ks_pred_ml**3/2/np.pi**2, '-', color='tab:blue', label='SegUNet', lw=2)
ax0.loglog(ks_pred_sp, ps_pred_sp*ks_pred_sp**3/2/np.pi**2, '-', color='tab:orange', label='Super-Pixel', lw=2)
ax0.loglog(ks_true, ps_true*ks_true**3/2/np.pi**2, 'k--', label='Ground true', lw=2)
ax0.set_yscale('log')
ax1 = plt.subplot(gs[1], sharex=ax0)
ax1.semilogx(ks_true, compare_ml, '-', lw=2)
ax1.semilogx(ks_true, compare_sp, '-', lw=2)
ax1.semilogx(ks_true, np.zeros_like(ks_true), 'k--', lw=2)
ax1.fill_between(ks_true, np.min(compare_ml_tta, axis=0), np.max(compare_ml_tta, axis=0), color='tab:blue', alpha=0.2)
ax1.tick_params(axis='both', length=7, width=1.2, labelsize=15)
ax1.set_xlabel('k (Mpc$^{-1}$)'), ax0.set_ylabel('$\Delta^2_{xx}$')
ax1.set_ylabel('difference (%)', size=15)
ax0.tick_params(axis='both', length=10, width=1.2)
ax0.tick_params(which='minor', axis='both', length=5, width=1.2)
ax1.tick_params(which='minor', axis='both', length=5, width=1.2)
ax0.legend(loc=0, borderpad=0.5)
plt.setp(ax0.get_xticklabels(), visible=False)
plt.subplots_adjust(hspace=0.0)
plt.savefig('%sPk_comparison_i%d.png' %(PATH_OUT+'plots/', i), bbox_inches='tight'), plt.clf()
ds_data = np.vstack((ks_true, np.vstack((ps_true*ks_true**3/2/np.pi**2, np.vstack((np.vstack((ps_pred_ml*ks_pred_ml**3/2/np.pi**2, np.vstack((np.min(ps_tta*ks_pred_ml**3/2/np.pi**2, axis=0), np.max(ps_tta*ks_pred_ml**3/2/np.pi**2, axis=0))))), ps_pred_sp*ks_pred_sp**3/2/np.pi**2))))))
bsd_data = np.vstack((mfp_true[0], np.vstack((mfp_true[1], np.vstack((np.vstack((mfp_pred_ml[1], np.vstack((np.min(mfp_tta[:,1,:], axis=0), np.max(mfp_tta[:,1,:], axis=0))))), mfp_pred_sp[1]))))))
np.savetxt('%sds_data_i%d.txt' %(PATH_OUT+'data/', i), ds_data.T, fmt='%.6e', delimiter='\t', header='k [Mpc^-1]\tds_true\tds_seg_mean\tds_err_min\tds_err_max\tds_sp')
np.savetxt('%sbsd_data_i%d.txt' %(PATH_OUT+'data/', i), bsd_data.T, fmt='%.6e', delimiter='\t', header='R [Mpc]\tbs_true\tbs_seg_mean\tb_err_min\tbs_err_max\tbs_sp')
new_astr_data = np.vstack((astr_data, phicoef_seg))
new_astr_data = np.vstack((new_astr_data, phicoef_err))
new_astr_data = np.vstack((new_astr_data, phicoef_sp))
new_astr_data = np.vstack((new_astr_data, xn_mask))
new_astr_data = np.vstack((new_astr_data, xn_seg))
new_astr_data = np.vstack((new_astr_data, xn_err))
new_astr_data = np.vstack((new_astr_data, xn_sp))
new_astr_data = np.vstack((new_astr_data, b0_true))
new_astr_data = np.vstack((new_astr_data, b1_true))
new_astr_data = np.vstack((new_astr_data, b2_true))
new_astr_data = np.vstack((new_astr_data, b0_seg))
new_astr_data = np.vstack((new_astr_data, b1_seg))
new_astr_data = np.vstack((new_astr_data, b2_seg))
new_astr_data = np.vstack((new_astr_data, b0_sp))
new_astr_data = np.vstack((new_astr_data, b1_sp))
new_astr_data = np.vstack((new_astr_data, b2_sp))
np.savetxt('%sastro_data.txt' %(PATH_OUT), new_astr_data.T, fmt='%d\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d', header='i\tz\teff_f\tRmfp\tTvir\tx_n\tphi_ML\tphi_err phi_SP\txn_mask xn_seg\txn_err\txn_sp\tb0 true b1\tb2\tb0 ML\tb1\tb2\tb0 SP\tb1\tb2')
np.savetxt('%sastro_data_sample.txt' %(PATH_OUT+'data/'), new_astr_data[:,new_idx].T, fmt='%d\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%.3f\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d\t%d', header='i\tz\teff_f\tRmfp\tTvir\tx_n\tphi_ML\tphi_err phi_SP\txn_mask xn_seg\txn_err\txn_sp\tb0 true b1\tb2\tb0 ML\tb1\tb2\tb0 SP\tb1\tb2')
# Plot phi coeff
plt.rcParams['font.size'] = 16
redshift, xfrac, phicoef_seg, phicoef_seg_err, phicoef_sp, xn_mask_true, xn_seg, xn_seg_err, xn_sp = OrderNdimArray(np.loadtxt(PATH_OUT+'astro_data.txt', unpack=True, usecols=(1,5,6,7,8,9,10,11,12)), 1)
print('phi_coef = %.3f +/- %.3f\t(SegUnet)' %(np.mean(phicoef_seg), np.std(phicoef_seg)))
print('phi_coef = %.3f\t\t(Superpixel)' %(np.mean(phicoef_sp)))
fig, (ax0, ax1) = plt.subplots(ncols=2, figsize=(20,8))
#ax0.hlines(y=np.mean(phicoef_seg), xmin=0, xmax=1, ls='--', alpha=0.5)
#ax0.fill_between(x=np.linspace(0, 1, 100), y1=np.mean(phicoef_seg)+np.std(phicoef_seg), y2=np.mean(phicoef_seg)-np.std(phicoef_seg), alpha=0.5, color='lightgray')
# MCC SegUnet
cm = matplotlib.cm.plasma
sc = ax0.scatter(xfrac, phicoef_seg, c=redshift, vmin=7, vmax=9, s=25, cmap=cm, marker='.')
norm = matplotlib.colors.Normalize(vmin=7, vmax=9, clip=True)
mapper = matplotlib.cm.ScalarMappable(norm=norm, cmap=cm)
redshift_color = np.array([(mapper.to_rgba(v)) for v in redshift])
for x, y, e, clr in zip(xfrac, phicoef_seg, phicoef_seg_err, redshift_color):
ax0.errorbar(x, y, e, lw=1, marker='o', capsize=3, color=clr)
ax0.set_xlim(xfrac.min()-0.02, xfrac.max()+0.02), ax0.set_xlabel(r'$x_i$')
ax0.set_ylim(-0.02, 1.02), ax0.set_ylabel(r'$r_{\phi}$')
fig.colorbar(sc, ax=ax0, pad=0.01, label=r'$z$')
ax2 = ax0.twinx()
ax2.hist(xfrac, np.linspace(0.09, 0.81, 15), density=True, histtype='step', color='tab:blue', alpha=0.5)
ax2.axes.get_yaxis().set_visible(False)
# MCC comparison
ax1.hlines(y=np.mean(phicoef_seg), xmin=0, xmax=1, ls='--', alpha=0.5, color='tab:blue')
ax1.hlines(y=np.mean(phicoef_sp), xmin=0, xmax=1, ls='--', alpha=0.5, color='tab:orange')
new_x = np.linspace(xfrac.min(), xfrac.max(), 100)
f1 = np.poly1d(np.polyfit(xfrac, phicoef_seg, 10))
ax1.plot(new_x, f1(new_x), label='SegUnet', color='tab:blue')
f2 = np.poly1d(np.polyfit(xfrac, phicoef_sp, 10))
ax1.plot(new_x, f2(new_x), label='Super-Pixel', color='tab:orange')
ax1.set_xlim(xfrac.min()-0.02, xfrac.max()+0.02), ax1.set_xlabel(r'$x_i$')
ax1.set_ylim(-0.02, 1.02), ax1.set_ylabel(r'$r_{\phi}$')
ax1.legend(loc=4)
plt.savefig('%sphi_coef.png' %PATH_OUT, bbox_inches="tight"), plt.clf()
# Plot correlation
fig, (ax0, ax1) = plt.subplots(ncols=2)
ax0.plot(xn_mask_true, xn_mask_true, 'k--')
cm = matplotlib.cm.plasma
sc = ax0.scatter(xn_mask_true, xn_seg, c=redshift, vmin=7, vmax=9, s=25, cmap=cm, marker='.')
norm = matplotlib.colors.Normalize(vmin=7, vmax=9, clip=True)
mapper = matplotlib.cm.ScalarMappable(norm=norm, cmap='plasma')
redshift_color = np.array([(mapper.to_rgba(v)) for v in redshift])
for x, y, e, clr in zip(xn_mask_true, xn_seg, xn_seg_err, redshift_color):
ax0.errorbar(x, y, e, lw=1, marker='o', capsize=3, color=clr)
ax0.set_xlim(xn_mask_true.min()-0.02, xn_mask_true.max()+0.02), ax0.set_xlabel(r'$\rm x_{n,\,true}$')
ax0.set_ylim(xn_mask_true.min()-0.02, xn_mask_true.max()+0.02), ax0.set_ylabel(r'$\rm x_{n,\,predict}$')
fig.colorbar(sc, ax=ax0, pad=0.01, label=r'$z$')
ax1.plot(xn_mask_true, xn_mask_true, 'k--', label='Ground True')
ax1.scatter(xn_mask_true, xn_seg, color='tab:blue', marker='o', label='SegUnet')
ax1.scatter(xn_mask_true, xn_sp, color='tab:orange', marker='o', label='Super-Pixel')
ax1.set_xlim(xn_mask_true.min()-0.02, xn_mask_true.max()+0.02), ax1.set_xlabel(r'$\rm x_{n,\,true}$')
ax1.set_ylim(xn_mask_true.min()-0.02, xn_mask_true.max()+0.02), ax1.set_ylabel(r'$\rm x_{n,\,predict}$')
plt.legend(loc=4)
plt.savefig('%scorr.png' %PATH_OUT, bbox_inches="tight"), plt.clf()
# Betti numbers plot
fig, (ax0, ax1, ax2) = plt.subplots(ncols=3, figsize=(23,5), sharex=True)
h = np.histogram(xn_mask_true, np.linspace(1e-5, 1., 20), density=True)
new_x = h[1][:-1]+0.5*(h[1][1:]-h[1][:-1])
# Betti 0
f_b0_true = np.array([ | np.mean(b0_true[(xn_mask_true < h[1][i+1]) * (xn_mask_true >= h[1][i])]) | numpy.mean |
import isopy
import numpy as np
import pytest
# calculate_mass_fractionation_factor, remove_mass_fractionation, add_mass_fractionation
def test_mass_fractionation1():
# Testing with input as isotope array
# Using default reference values
mass_ref = isopy.refval.isotope.mass_W17
fraction_ref = isopy.refval.isotope.best_measurement_fraction_M16
unfractionated = isopy.random(100, (1, 0.001), keys=isopy.refval.element.isotopes['pd'], seed = 46)
unfractionated = unfractionated * fraction_ref
unfractionated['108pd'] = fraction_ref.get('108pd/105pd') * unfractionated['105pd']
mf_factor = isopy.random(100, (0, 2), seed=47)
c_fractionated1 = isopy.tb.add_mass_fractionation(unfractionated, mf_factor, '105pd')
c_fractionated2 = isopy.tb.add_mass_fractionation(unfractionated, mf_factor)
assert c_fractionated1.keys == unfractionated.keys
assert c_fractionated1.size == unfractionated.size
assert c_fractionated2.keys == unfractionated.keys
assert c_fractionated2.size == unfractionated.size
c_unfractionated1 = isopy.tb.remove_mass_fractionation(c_fractionated1, mf_factor, '105pd')
c_unfractionated2 = isopy.tb.remove_mass_fractionation(c_fractionated2, mf_factor)
assert c_unfractionated1.keys == unfractionated.keys
assert c_unfractionated1.size == unfractionated.size
assert c_unfractionated2.keys == unfractionated.keys
assert c_unfractionated2.size == unfractionated.size
c_mf_factor2 = isopy.tb.calculate_mass_fractionation_factor(c_fractionated1, '108pd/105pd')
np.testing.assert_allclose(c_mf_factor2, mf_factor)
for key in unfractionated.keys:
mass_diff = mass_ref.get(key/'105pd')
fractionated = unfractionated[key] * (mass_diff ** mf_factor)
np.testing.assert_allclose(c_fractionated1[key], fractionated)
np.testing.assert_allclose(c_unfractionated1[key], unfractionated[key])
np.testing.assert_allclose(c_unfractionated2[key], unfractionated[key])
#Changing reference values
mass_ref = isopy.refval.isotope.mass_number
fraction_ref = isopy.refval.isotope.initial_solar_system_fraction_L09
unfractionated = isopy.random(100, (1, 0.001), keys=isopy.refval.element.isotopes['pd'],
seed=46)
unfractionated = unfractionated * fraction_ref
unfractionated['108pd'] = fraction_ref.get('108pd/105pd') * unfractionated['105pd']
unfractionated2 = unfractionated.ratio('105pd')
mf_factor = isopy.random(100, (0, 2), seed=47)
c_fractionated1 = isopy.tb.add_mass_fractionation(unfractionated, mf_factor, '105pd', isotope_masses=mass_ref)
c_fractionated2 = isopy.tb.add_mass_fractionation(unfractionated, mf_factor, isotope_masses=mass_ref)
assert c_fractionated1.keys == unfractionated.keys
assert c_fractionated1.size == unfractionated.size
assert c_fractionated2.keys == unfractionated.keys
assert c_fractionated2.size == unfractionated.size
c_unfractionated1 = isopy.tb.remove_mass_fractionation(c_fractionated1, mf_factor, '105pd', isotope_masses=mass_ref)
c_unfractionated2 = isopy.tb.remove_mass_fractionation(c_fractionated2, mf_factor, isotope_masses=mass_ref)
assert c_unfractionated1.keys == unfractionated.keys
assert c_unfractionated1.size == unfractionated.size
assert c_unfractionated2.keys == unfractionated.keys
assert c_unfractionated2.size == unfractionated.size
c_mf_factor2 = isopy.tb.calculate_mass_fractionation_factor(c_fractionated1, '108pd/105pd',
isotope_masses=mass_ref, isotope_fractions=fraction_ref)
np.testing.assert_allclose(c_mf_factor2, mf_factor)
for key in unfractionated.keys:
mass_diff = mass_ref.get(key / '105pd')
fractionated = unfractionated[key] * (mass_diff ** mf_factor)
np.testing.assert_allclose(c_fractionated1[key], fractionated)
np.testing.assert_allclose(c_unfractionated1[key], unfractionated[key])
np.testing.assert_allclose(c_unfractionated2[key], unfractionated[key])
# calculate_mass_fractionation_factor, remove_mass_fractionation, add_mass_fractionation
def test_mass_fractionation2():
# Testing with input as ratio array
# Using default reference values
mass_ref = isopy.refval.isotope.mass_W17
fraction_ref = isopy.refval.isotope.best_measurement_fraction_M16
unfractionated = isopy.random(100, (1, 0.001), keys=isopy.refval.element.isotopes['pd'],
seed=46)
unfractionated = unfractionated * fraction_ref
unfractionated['108pd'] = fraction_ref.get('108pd/105pd') * unfractionated['105pd']
unfractionated = unfractionated.ratio('105pd')
mf_factor = isopy.random(100, (0, 2), seed=47)
c_fractionated2 = isopy.tb.add_mass_fractionation(unfractionated, mf_factor)
assert c_fractionated2.keys == unfractionated.keys
assert c_fractionated2.size == unfractionated.size
c_unfractionated2 = isopy.tb.remove_mass_fractionation(c_fractionated2, mf_factor)
assert c_unfractionated2.keys == unfractionated.keys
assert c_unfractionated2.size == unfractionated.size
c_mf_factor2 = isopy.tb.calculate_mass_fractionation_factor(c_fractionated2, '108pd/105pd')
np.testing.assert_allclose(c_mf_factor2, mf_factor)
for key in unfractionated.keys:
mass_diff = mass_ref.get(key)
fractionated = unfractionated[key] * (mass_diff ** mf_factor)
np.testing.assert_allclose(c_fractionated2[key], fractionated)
np.testing.assert_allclose(c_unfractionated2[key], unfractionated[key])
# Changing reference values
mass_ref = isopy.refval.isotope.mass_number
fraction_ref = isopy.refval.isotope.initial_solar_system_fraction_L09
unfractionated = isopy.random(100, (1, 0.001), keys=isopy.refval.element.isotopes['pd'],
seed=46)
unfractionated = unfractionated * fraction_ref
unfractionated['108pd'] = fraction_ref.get('108pd/105pd') * unfractionated['105pd']
unfractionated = unfractionated.ratio('105pd')
mf_factor = isopy.random(100, (0, 2), seed=47)
c_fractionated2 = isopy.tb.add_mass_fractionation(unfractionated, mf_factor, isotope_masses=mass_ref)
assert c_fractionated2.keys == unfractionated.keys
assert c_fractionated2.size == unfractionated.size
c_unfractionated2 = isopy.tb.remove_mass_fractionation(c_fractionated2, mf_factor, isotope_masses=mass_ref)
assert c_unfractionated2.keys == unfractionated.keys
assert c_unfractionated2.size == unfractionated.size
c_mf_factor2 = isopy.tb.calculate_mass_fractionation_factor(c_fractionated2, '108pd/105pd',
isotope_masses=mass_ref, isotope_fractions=fraction_ref)
np.testing.assert_allclose(c_mf_factor2, mf_factor)
for key in unfractionated.keys:
mass_diff = mass_ref.get(key)
fractionated = unfractionated[key] * (mass_diff ** mf_factor)
np.testing.assert_allclose(c_fractionated2[key], fractionated)
np.testing.assert_allclose(c_unfractionated2[key], unfractionated[key])
class Test_MassIndependentCorrection:
def test_one(self):
# Default reference values
mass_ref = isopy.refval.isotope.mass_W17
fraction_ref = isopy.refval.isotope.best_measurement_fraction_M16
unfractionated1 = isopy.random(100, (1, 0.001), keys=isopy.refval.element.isotopes['pd'],
seed=46)
unfractionated1 = unfractionated1 * fraction_ref
unfractionated1['108pd'] = fraction_ref.get('108pd/105pd') * unfractionated1['105pd']
unfractionated2 = unfractionated1.ratio('105pd')
n_unfractionated2 = (unfractionated2 / fraction_ref - 1) * 10000
mf_factor = isopy.random(100, (0, 2), seed=47)
fractionated1 = isopy.tb.add_mass_fractionation(unfractionated2, mf_factor)
fractionated2 = fractionated1.deratio(unfractionated1['105pd'])
self.run(fractionated1, unfractionated2, '108pd/105pd')
self.run(fractionated2, unfractionated2, '108pd/105pd')
self.run(fractionated1, n_unfractionated2, '108pd/105pd', factor=10_000)
self.run(fractionated2, n_unfractionated2, '108pd/105pd', factor=10_000)
self.run(fractionated1, n_unfractionated2, '108pd/105pd', factor='epsilon')
self.run(fractionated2, n_unfractionated2, '108pd/105pd', factor='epsilon')
# Different reference values
mass_ref = isopy.refval.isotope.mass_number
fraction_ref = isopy.refval.isotope.initial_solar_system_fraction_L09
unfractionated1 = isopy.random(100, (1, 0.001), keys=isopy.refval.element.isotopes['pd'],
seed=46)
unfractionated1 = unfractionated1 * fraction_ref
unfractionated1['108pd'] = fraction_ref.get('108pd/105pd') * unfractionated1['105pd']
unfractionated2 = unfractionated1.ratio('105pd')
n_unfractionated2 = (unfractionated2 / fraction_ref - 1) * 10000
mf_factor = isopy.random(100, (0, 2), seed=47)
fractionated1 = isopy.tb.add_mass_fractionation(unfractionated2, mf_factor,
isotope_masses=mass_ref)
fractionated2 = fractionated1.deratio(unfractionated1['105pd'])
self.run(fractionated1, unfractionated2, '108pd/105pd', mass_ref=mass_ref, fraction_ref=fraction_ref)
self.run(fractionated2, unfractionated2, '108pd/105pd', mass_ref=mass_ref, fraction_ref=fraction_ref)
self.run(fractionated1, n_unfractionated2, '108pd/105pd', factor=10_000, mass_ref=mass_ref, fraction_ref=fraction_ref)
self.run(fractionated2, n_unfractionated2, '108pd/105pd', factor=10_000, mass_ref=mass_ref, fraction_ref=fraction_ref)
self.run(fractionated1, n_unfractionated2, '108pd/105pd', factor='epsilon', mass_ref=mass_ref, fraction_ref=fraction_ref)
self.run(fractionated2, n_unfractionated2, '108pd/105pd', factor='epsilon', mass_ref=mass_ref, fraction_ref=fraction_ref)
def test_two(self):
# With interference correctionn
# We wont get an exact match here so we have to lower the tolerance.
# Default reference values
mass_ref = isopy.refval.isotope.mass_W17
fraction_ref = isopy.refval.isotope.best_measurement_fraction_M16
mf_factor = isopy.random(100, (0, 2), seed=47)
data = isopy.random(100, (1, 0.1), keys='101ru 102pd 104pd 105pd 106pd 108pd 110pd 111cd'.split(), seed=46)
data = data * fraction_ref
data['108pd'] = fraction_ref.get('108pd/105pd') * data['105pd']
fractionated = data.copy()
fractionated = isopy.tb.add_mass_fractionation(fractionated, mf_factor)
for key in fractionated.keys.filter(element_symbol='pd'):
if (ru:=fraction_ref.get(f'ru{key.mass_number}/ru101', 0)) > 0:
ru *= fractionated['101ru'] * (mass_ref.get(f'ru{key.mass_number}/ru101', 0) ** mf_factor)
fractionated[key] += ru
if (cd:=fraction_ref.get(f'cd{key.mass_number}/cd111', 0)) > 0:
cd *= fractionated['111cd'] * (mass_ref.get(f'cd{key.mass_number}/cd111', 0) ** mf_factor)
fractionated[key] += cd
correct1 = data.copy(element_symbol = 'pd').ratio('105pd')
correct2 = (correct1 / fraction_ref - 1)
correct3 = (correct1 / fraction_ref - 1) * 10_000
self.run(fractionated, correct1, '108pd/105pd')
self.run(fractionated, correct2, '108pd/105pd', factor=1)
self.run(fractionated, correct3, '108pd/105pd', factor=10_000)
self.run(fractionated, correct3, '108pd/105pd', factor='epsilon')
# Different reference values
mass_ref = isopy.refval.isotope.mass_number
fraction_ref = isopy.refval.isotope.initial_solar_system_fraction_L09
mf_factor = isopy.random(100, (0, 2), seed=47)
data = isopy.random(100, (1, 0.1), keys='101ru 102pd 104pd 105pd 106pd 108pd 110pd 111cd'.split(), seed=46)
data = data * fraction_ref
data['108pd'] = fraction_ref.get('108pd/105pd') * data['105pd']
fractionated = data.copy()
fractionated = isopy.tb.add_mass_fractionation(fractionated, mf_factor, isotope_masses=mass_ref)
for key in fractionated.keys.filter(element_symbol='pd'):
if (ru := fraction_ref.get(f'ru{key.mass_number}/ru101', 0)) > 0:
ru *= fractionated['101ru'] * (
mass_ref.get(f'ru{key.mass_number}/ru101', 0) ** mf_factor)
fractionated[key] += ru
if (cd := fraction_ref.get(f'cd{key.mass_number}/cd111', 0)) > 0:
cd *= fractionated['111cd'] * (
mass_ref.get(f'cd{key.mass_number}/cd111', 0) ** mf_factor)
fractionated[key] += cd
correct1 = data.copy(element_symbol='pd').ratio('105pd')
correct2 = (correct1 / fraction_ref - 1)
correct3 = (correct1 / fraction_ref - 1) * 10_000
self.run(fractionated, correct1, '108pd/105pd', mass_ref=mass_ref, fraction_ref=fraction_ref)
self.run(fractionated, correct2, '108pd/105pd', factor=1, mass_ref=mass_ref, fraction_ref=fraction_ref)
self.run(fractionated, correct3, '108pd/105pd', factor=10_000, mass_ref=mass_ref, fraction_ref=fraction_ref)
self.run(fractionated, correct3, '108pd/105pd', factor='epsilon', mass_ref=mass_ref, fraction_ref=fraction_ref)
def test_three(self):
# Normalisations
# Default reference values
mass_ref = isopy.refval.isotope.mass_W17
fraction_ref = isopy.refval.isotope.best_measurement_fraction_M16
mf_factor = isopy.random(100, (0, 2), seed=47)
data = isopy.random(100, (1, 0.1), keys='102pd 104pd 105pd 106pd 108pd 110pd'.split(), seed=46)
data = data * fraction_ref
data['108pd'] = fraction_ref.get('108pd/105pd') * data['105pd']
fractionated = data.copy()
fractionated = isopy.tb.add_mass_fractionation(fractionated, mf_factor)
correct1 = data.copy(element_symbol='pd').ratio('105pd')
correct2 = (correct1 / fraction_ref - 1)
correct3 = correct2 * 1000
correct4 = correct2 * 10_000
correct5 = correct2 * 1_000_000
self.run(fractionated, correct1, '108pd/105pd')
self.run(fractionated, correct2, '108pd/105pd', factor=1)
self.run(fractionated, correct3, '108pd/105pd', factor=1000)
self.run(fractionated, correct3, '108pd/105pd', factor='ppt')
self.run(fractionated, correct3, '108pd/105pd', factor='permil')
self.run(fractionated, correct4, '108pd/105pd', factor=10_000)
self.run(fractionated, correct4, '108pd/105pd', factor='epsilon')
self.run(fractionated, correct5, '108pd/105pd', factor=1_000_000)
self.run(fractionated, correct5, '108pd/105pd', factor='mu')
self.run(fractionated, correct5, '108pd/105pd', factor='ppm')
# Single value
std1 = isopy.random(100, (1, 0.1), keys='102pd 104pd 105pd 106pd 108pd 110pd'.split(), seed=48)
std1 = std1 * fraction_ref
rstd1 = std1.ratio('pd105')
correct1 = data.copy(element_symbol='pd').ratio('105pd')
correct2 = (correct1 / np.mean(rstd1) - 1)
correct3 = correct2 * 1000
correct4 = correct2 * 10_000
correct5 = correct2 * 1_000_000
self.run(fractionated, correct2, '108pd/105pd', norm_val=rstd1)
self.run(fractionated, correct2, '108pd/105pd', factor=1, norm_val=rstd1)
self.run(fractionated, correct3, '108pd/105pd', factor=1000, norm_val=rstd1)
self.run(fractionated, correct3, '108pd/105pd', factor='ppt', norm_val=rstd1)
self.run(fractionated, correct3, '108pd/105pd', factor='permil', norm_val=rstd1)
self.run(fractionated, correct4, '108pd/105pd', factor=10_000, norm_val=rstd1)
self.run(fractionated, correct4, '108pd/105pd', factor='epsilon', norm_val=rstd1)
self.run(fractionated, correct5, '108pd/105pd', factor=1_000_000, norm_val=rstd1)
self.run(fractionated, correct5, '108pd/105pd', factor='mu', norm_val=rstd1)
self.run(fractionated, correct5, '108pd/105pd', factor='ppm', norm_val=rstd1)
std1 = np.mean(std1)
rstd1 = np.mean(rstd1)
self.run(fractionated, correct2, '108pd/105pd', norm_val=rstd1)
self.run(fractionated, correct2, '108pd/105pd', factor=1, norm_val=rstd1)
self.run(fractionated, correct3, '108pd/105pd', factor=1000, norm_val=rstd1)
self.run(fractionated, correct3, '108pd/105pd', factor='ppt', norm_val=rstd1)
self.run(fractionated, correct3, '108pd/105pd', factor='permil', norm_val=rstd1)
self.run(fractionated, correct4, '108pd/105pd', factor=10_000, norm_val=rstd1)
self.run(fractionated, correct4, '108pd/105pd', factor='epsilon', norm_val=rstd1)
self.run(fractionated, correct5, '108pd/105pd', factor=1_000_000, norm_val=rstd1)
self.run(fractionated, correct5, '108pd/105pd', factor='mu', norm_val=rstd1)
self.run(fractionated, correct5, '108pd/105pd', factor='ppm', norm_val=rstd1)
# Multiple
std1 = isopy.random(100, (1, 0.1), keys='102pd 104pd 105pd 106pd 108pd 110pd'.split(),
seed=48)
std1 = std1 * fraction_ref
rstd1 = std1.ratio('pd105')
std2 = isopy.random(50, (1, 0.1), keys='102pd 104pd 105pd 106pd 108pd 110pd'.split(),
seed=49)
std2 = std2 * fraction_ref
rstd2 = std2.ratio('pd105')
correct1 = data.copy(element_symbol='pd').ratio('105pd')
correct2 = (correct1 / (np.mean(rstd1)/2 + np.mean(rstd2)/2) - 1)
correct3 = correct2 * 1000
correct4 = correct2 * 10_000
correct5 = correct2 * 1_000_000
self.run(fractionated, correct2, '108pd/105pd', norm_val=(rstd1, rstd2))
self.run(fractionated, correct2, '108pd/105pd', factor=1, norm_val=(rstd1, rstd2))
self.run(fractionated, correct3, '108pd/105pd', factor=1000, norm_val=(rstd1, rstd2))
self.run(fractionated, correct3, '108pd/105pd', factor='ppt', norm_val=(rstd1, rstd2))
self.run(fractionated, correct3, '108pd/105pd', factor='permil', norm_val=(rstd1, rstd2))
self.run(fractionated, correct4, '108pd/105pd', factor=10_000, norm_val=(rstd1, rstd2))
self.run(fractionated, correct4, '108pd/105pd', factor='epsilon', norm_val=(rstd1, rstd2))
self.run(fractionated, correct5, '108pd/105pd', factor=1_000_000, norm_val=(rstd1, rstd2))
self.run(fractionated, correct5, '108pd/105pd', factor='mu', norm_val=(rstd1, rstd2))
self.run(fractionated, correct5, '108pd/105pd', factor='ppm', norm_val=(rstd1, rstd2))
std1 = np.mean(std1)
rstd1 = np.mean(rstd1)
self.run(fractionated, correct2, '108pd/105pd', norm_val=(rstd1, rstd2))
self.run(fractionated, correct2, '108pd/105pd', factor=1, norm_val=(rstd1, rstd2))
self.run(fractionated, correct3, '108pd/105pd', factor=1000, norm_val=(rstd1, rstd2))
self.run(fractionated, correct3, '108pd/105pd', factor='ppt', norm_val=(rstd1, rstd2))
self.run(fractionated, correct3, '108pd/105pd', factor='permil', norm_val=(rstd1, rstd2))
self.run(fractionated, correct4, '108pd/105pd', factor=10_000, norm_val=(rstd1, rstd2))
self.run(fractionated, correct4, '108pd/105pd', factor='epsilon', norm_val=(rstd1, rstd2))
self.run(fractionated, correct5, '108pd/105pd', factor=1_000_000, norm_val=(rstd1, rstd2))
self.run(fractionated, correct5, '108pd/105pd', factor='mu', norm_val=(rstd1, rstd2))
self.run(fractionated, correct5, '108pd/105pd', factor='ppm', norm_val=(rstd1, rstd2))
std2 = np.mean(std2)
rstd2 = np.mean(rstd2)
self.run(fractionated, correct2, '108pd/105pd', norm_val=(rstd1, rstd2))
self.run(fractionated, correct2, '108pd/105pd', factor=1, norm_val=(rstd1, rstd2))
self.run(fractionated, correct3, '108pd/105pd', factor=1000, norm_val=(rstd1, rstd2))
self.run(fractionated, correct3, '108pd/105pd', factor='ppt', norm_val=(rstd1, rstd2))
self.run(fractionated, correct3, '108pd/105pd', factor='permil', norm_val=(rstd1, rstd2))
self.run(fractionated, correct4, '108pd/105pd', factor=10_000, norm_val=(rstd1, rstd2))
self.run(fractionated, correct4, '108pd/105pd', factor='epsilon', norm_val=(rstd1, rstd2))
self.run(fractionated, correct5, '108pd/105pd', factor=1_000_000, norm_val=(rstd1, rstd2))
self.run(fractionated, correct5, '108pd/105pd', factor='mu', norm_val=(rstd1, rstd2))
self.run(fractionated, correct5, '108pd/105pd', factor='ppm', norm_val=(rstd1, rstd2))
# Different reference values
mass_ref = isopy.refval.isotope.mass_number
fraction_ref = isopy.refval.isotope.initial_solar_system_fraction_L09
mf_factor = isopy.random(100, (0, 2), seed=47)
data = isopy.random(100, (1, 0.1), keys='102pd 104pd 105pd 106pd 108pd 110pd'.split(),
seed=46)
data = data * fraction_ref
data['108pd'] = fraction_ref.get('108pd/105pd') * data['105pd']
fractionated = data.copy()
fractionated = isopy.tb.add_mass_fractionation(fractionated, mf_factor, isotope_masses=mass_ref)
correct1 = data.copy(element_symbol='pd').ratio('105pd')
correct2 = (correct1 / fraction_ref - 1)
correct3 = correct2 * 1000
correct4 = correct2 * 10_000
correct5 = correct2 * 1_000_000
self.run(fractionated, correct1, '108pd/105pd', mass_ref=mass_ref, fraction_ref=fraction_ref)
self.run(fractionated, correct2, '108pd/105pd', factor=1, mass_ref=mass_ref, fraction_ref=fraction_ref)
self.run(fractionated, correct3, '108pd/105pd', factor=1000, mass_ref=mass_ref, fraction_ref=fraction_ref)
self.run(fractionated, correct3, '108pd/105pd', factor='ppt', mass_ref=mass_ref, fraction_ref=fraction_ref)
self.run(fractionated, correct3, '108pd/105pd', factor='permil', mass_ref=mass_ref, fraction_ref=fraction_ref)
self.run(fractionated, correct4, '108pd/105pd', factor=10_000, mass_ref=mass_ref, fraction_ref=fraction_ref)
self.run(fractionated, correct4, '108pd/105pd', factor='epsilon', mass_ref=mass_ref, fraction_ref=fraction_ref)
self.run(fractionated, correct5, '108pd/105pd', factor=1_000_000, mass_ref=mass_ref, fraction_ref=fraction_ref)
self.run(fractionated, correct5, '108pd/105pd', factor='mu', mass_ref=mass_ref, fraction_ref=fraction_ref)
self.run(fractionated, correct5, '108pd/105pd', factor='ppm', mass_ref=mass_ref, fraction_ref=fraction_ref)
def run(self, data, correct, mb_ratio, factor = None, mass_ref = None, fraction_ref=None, norm_val = None):
if type(factor) is str:
func = getattr(isopy.tb.internal_normalisation, factor)
factor2 = None
else:
factor2 = factor
func = isopy.tb.internal_normalisation
kwargs = {}
if factor2 is not None: kwargs['extnorm_factor'] = factor2
if mass_ref is not None: kwargs['isotope_masses'] = mass_ref
if fraction_ref is not None: kwargs['isotope_fractions'] = fraction_ref
if norm_val is not None: kwargs['extnorm_value'] = norm_val
corrected = func(data, mb_ratio, **kwargs)
assert corrected.keys == correct.keys - mb_ratio
assert corrected.size == correct.size
assert corrected.ndim == correct.ndim
for key in corrected.keys:
np.testing.assert_allclose(corrected[key], correct[key])
# mass independent correction
if type(factor) is str:
func = getattr(isopy.tb.mass_independent_correction, factor)
factor2 = None
else:
factor2 = factor
func = isopy.tb.mass_independent_correction
kwargs = {}
if factor2 is not None: kwargs['normalisation_factor'] = factor2
if mass_ref is not None: kwargs['isotope_masses'] = mass_ref
if fraction_ref is not None: kwargs['isotope_fractions'] = fraction_ref
if norm_val is not None: kwargs['normalisation_value'] = norm_val
corrected = func(data, mb_ratio, **kwargs)
assert corrected.keys == correct.keys - mb_ratio
assert corrected.size == correct.size
assert corrected.ndim == correct.ndim
for key in corrected.keys:
np.testing.assert_allclose(corrected[key], correct[key])
class Test_IsobaricInterferences:
def test_one(self):
# No mass fractionation factor
# Single interference isotope
# Default reference values
fraction_ref = isopy.refval.isotope.best_measurement_fraction_M16
base_data = isopy.random(100, (1, 0.01), keys='101ru 102pd 104pd 105pd 106pd 108pd 110pd 111cd'.split())
base_data = base_data * fraction_ref
data = base_data.copy()
for key in data.keys.filter(element_symbol='pd'):
data[key] += fraction_ref.get(f'ru{key.mass_number}/ru101', 0) * data['101ru']
data[key] += fraction_ref.get(f'cd{key.mass_number}/cd111', 0) * data['111cd']
interferences1 = {'ru': ('102pd', '104pd'), 'cd': ('106pd', '108pd', '110pd')}
correct1 = base_data.copy()
correct1['101ru', '111cd'] = 0
interferences2 = {'ru': ('104pd',), 'cd': ('106pd', '108pd')}
correct2 = base_data.copy()
correct2['101ru', '111cd'] = 0
correct2['102pd'] = data['102pd']
correct2['110pd'] = data['110pd']
self.run(data, data, correct1, correct2, interferences1, interferences2, '105pd')
# Different reference values
fraction_ref = isopy.refval.isotope.initial_solar_system_fraction_L09
base_data = isopy.random(100, (1, 0.01), keys='101ru 102pd 104pd 105pd 106pd 108pd 110pd 111cd'.split())
base_data = base_data * fraction_ref
data = base_data.copy()
for key in data.keys.filter(element_symbol='pd'):
data[key] += fraction_ref.get(f'ru{key.mass_number}/ru101', 0) * data['101ru']
data[key] += fraction_ref.get(f'cd{key.mass_number}/cd111', 0) * data['111cd']
interferences1 = {'ru': ('102pd', '104pd'), 'cd': ('106pd', '108pd', '110pd')}
correct1 = base_data.copy()
correct1['101ru', '111cd'] = 0
interferences2 = {'ru': ('104pd',), 'cd': ('106pd', '108pd')}
correct2 = base_data.copy()
correct2['101ru', '111cd'] = 0
correct2['102pd'] = data['102pd']
correct2['110pd'] = data['110pd']
self.run(data, data, correct1, correct2, interferences1, interferences2, '105pd',
fraction_ref=fraction_ref)
def test_two(self):
# No mass fractionation factor
# Multiple interference isotopes
# Default reference values
fraction_ref = isopy.refval.isotope.best_measurement_fraction_M16
base_data = isopy.random(100, (1, 0.01), keys='99ru 101ru 102pd 104pd 105pd 106pd 108pd 110pd 111cd 112cd'.split())
# 112cd > 111cd, 101ru > 99ru
base_data = base_data * fraction_ref
data1 = base_data.copy()
data1['99ru', '111cd'] = -1 # so that we dont accidentally make this the largest isotope
for key in data1.keys.filter(key_neq = '<KEY>'.split()):
data1[key] += fraction_ref.get(f'ru{key.mass_number}/ru101', 0) * data1['101ru']
data1[key] += fraction_ref.get(f'cd{key.mass_number}/cd112', 0) * data1['112cd']
interferences1 = {'ru': ('102pd', '104pd'), 'cd': ('106pd', '108pd', '110pd')}
correct1 = base_data.copy()
correct1['101ru', '112cd'] = 0
correct1['99ru', '111cd'] = -1
interferences2 = {'ru99': ('104pd',), 'cd111': ('106pd', '108pd')}
data2 = base_data.copy()
data2['ru101', 'cd112'] = -1 # so that we dont accidentally make this the largest isotope
for key in data2.keys.filter(key_neq='ru99 cd111 102pd 110pd'.split()):
data2[key] += fraction_ref.get(f'ru{key.mass_number}/ru99', 0) * data2['99ru']
data2[key] += fraction_ref.get(f'cd{key.mass_number}/cd111', 0) * data2['111cd']
correct2 = base_data.copy()
correct2['99ru', '111cd'] = 0
correct2['101ru', '112cd'] = -1
self.run(data1, data2, correct1, correct2, interferences1, interferences2, '105pd')
# Different reference values
fraction_ref = isopy.refval.isotope.initial_solar_system_fraction_L09
base_data = isopy.random(100, (1, 0.01),
keys='99ru 101ru 102pd 104pd 105pd 106pd 108pd 110pd 111cd 112cd'.split())
# 112cd > 111cd, 101ru > 99ru
base_data = base_data * fraction_ref
data1 = base_data.copy()
data1['99ru', '111cd'] = -1 # so that we dont accidentally make this the largest isotope
for key in data1.keys.filter(key_neq='<KEY>'.split()):
data1[key] += fraction_ref.get(f'ru{key.mass_number}/ru101', 0) * data1['101ru']
data1[key] += fraction_ref.get(f'cd{key.mass_number}/cd112', 0) * data1['112cd']
interferences1 = {'ru': ('102pd', '104pd'), 'cd': ('106pd', '108pd', '110pd')}
correct1 = base_data.copy()
correct1['101ru', '112cd'] = 0
correct1['99ru', '111cd'] = -1
interferences2 = {'ru99': ('104pd',), 'cd111': ('106pd', '108pd')}
data2 = base_data.copy()
data2['ru101', 'cd112'] = -1 # so that we dont accidentally make this the largest isotope
for key in data2.keys.filter(key_neq='<KEY>'.split()):
data2[key] += fraction_ref.get(f'ru{key.mass_number}/ru99', 0) * data2['99ru']
data2[key] += fraction_ref.get(f'cd{key.mass_number}/cd111', 0) * data2['111cd']
correct2 = base_data.copy()
correct2['99ru', '111cd'] = 0
correct2['101ru', '112cd'] = -1
self.run(data1, data2, correct1, correct2, interferences1, interferences2, '105pd',
fraction_ref=fraction_ref)
def test_three(self):
#Mass fractionation
#Single interference isotope
mass_ref = isopy.refval.isotope.mass_W17
fraction_ref = isopy.refval.isotope.best_measurement_fraction_M16
base_data = isopy.random(100, (1, 0.01),
keys='<KEY>'.split())
mf_factor = isopy.random(100, (0,2))
base_data = base_data * fraction_ref
data = base_data.copy()
for key in data.keys.filter(element_symbol='pd'):
if (ru:=fraction_ref.get(f'ru{key.mass_number}/ru101', 0)) > 0:
ru *= data['101ru'] * (mass_ref.get(f'ru{key.mass_number}/ru101', 0) ** mf_factor)
data[key] += ru
if (cd:=fraction_ref.get(f'cd{key.mass_number}/cd111', 0)) > 0:
cd *= data['111cd'] * (mass_ref.get(f'cd{key.mass_number}/cd111', 0) ** mf_factor)
data[key] += cd
interferences1 = {'ru': ('102pd', '104pd'), 'cd': ('106pd', '108pd', '110pd')}
correct1 = base_data.copy()
correct1['101ru', '111cd'] = 0
interferences2 = {'ru': ('104pd',), 'cd': ('106pd', '108pd')}
correct2 = base_data.copy()
correct2['101ru', '111cd'] = 0
correct2['102pd'] = data['102pd']
correct2['110pd'] = data['110pd']
self.run(data, data, correct1, correct2, interferences1, interferences2, '105pd',
mf_factor=mf_factor)
#M Multiple interference isotopes
# Different reference values
mass_ref = isopy.refval.isotope.mass_number
fraction_ref = isopy.refval.isotope.initial_solar_system_fraction_L09
base_data = isopy.random(100, (1, 0.01),
keys='99ru 101ru 102pd 104pd 105pd 106pd 108pd 110pd 111cd 112cd'.split())
# 112cd > 111cd, 101ru > 99ru
base_data = base_data * fraction_ref
data1 = base_data.copy()
data1['99ru', '111cd'] = -1 # so that we dont accidentally make this the largest isotope
for key in data1.keys.filter(key_neq='<KEY>'.split()):
if (ru:=fraction_ref.get(f'ru{key.mass_number}/ru101', 0)) > 0:
ru *= data1['101ru'] * (mass_ref.get(f'ru{key.mass_number}/ru101', 0) ** mf_factor)
data1[key] += ru
if (cd:=fraction_ref.get(f'cd{key.mass_number}/cd112', 0)) > 0:
cd *= data1['cd112'] * (mass_ref.get(f'cd{key.mass_number}/cd112', 0) ** mf_factor)
data1[key] += cd
interferences1 = {'ru': ('102pd', '104pd'), 'cd': ('106pd', '108pd', '110pd')}
correct1 = base_data.copy()
correct1['101ru', '112cd'] = 0
correct1['99ru', '111cd'] = -1
interferences2 = {'ru99': ('104pd',), 'cd111': ('106pd', '108pd')}
data2 = base_data.copy()
data2['ru101', 'cd112'] = -1 # so that we dont accidentally make this the largest isotope
for key in data2.keys.filter(key_neq='ru99 cd111 102pd 110pd'.split()):
if (ru:=fraction_ref.get(f'ru{key.mass_number}/ru99', 0)) > 0:
ru *= data2['ru99'] * (mass_ref.get(f'ru{key.mass_number}/ru99', 0) ** mf_factor)
data2[key] += ru
if (cd:=fraction_ref.get(f'cd{key.mass_number}/cd111', 0)) > 0:
cd *= data2['111cd'] * (mass_ref.get(f'cd{key.mass_number}/cd111', 0) ** mf_factor)
data2[key] += cd
correct2 = base_data.copy()
correct2['99ru', '111cd'] = 0
correct2['101ru', '112cd'] = -1
self.run(data1, data2, correct1, correct2, interferences1, interferences2, '105pd',
fraction_ref=fraction_ref, mass_ref=mass_ref, mf_factor=mf_factor)
def run(self, data1, data2, correct1, correct2, interferences1, interferences2, denom=None,
mf_factor=None, fraction_ref=None, mass_ref=None):
interferences = isopy.tb.find_isobaric_interferences('pd', data1)
assert len(interferences) == len(interferences)
for key in interferences1:
assert key in interferences
assert interferences[key] == interferences1[key]
corrected1 = isopy.tb.remove_isobaric_interferences(data1, interferences,
mf_factor=mf_factor,
isotope_fractions=fraction_ref,
isotope_masses=mass_ref)
assert corrected1.keys == correct1.keys
assert corrected1.size == correct1.size
for key in corrected1.keys:
np.testing.assert_allclose(corrected1[key], correct1[key])
corrected2 = isopy.tb.remove_isobaric_interferences(data2, interferences2,
mf_factor=mf_factor,
isotope_fractions=fraction_ref,
isotope_masses=mass_ref)
assert corrected2.keys == correct2.keys
assert corrected2.size == correct2.size
for key in corrected2.keys:
np.testing.assert_allclose(corrected2[key], correct2[key])
#Ratio test data
if denom is not None:
data1 = data1.ratio(denom)
data2 = data2.ratio(denom)
correct1 = correct1.ratio(denom)
correct2 = correct2.ratio(denom)
interferences = isopy.tb.find_isobaric_interferences('pd', data1)
assert len(interferences) == len(interferences)
for key in interferences1:
assert key in interferences
assert interferences[key] == interferences1[key]
corrected1 = isopy.tb.remove_isobaric_interferences(data1, interferences,
mf_factor=mf_factor,
isotope_fractions=fraction_ref,
isotope_masses=mass_ref)
assert corrected1.keys == correct1.keys
assert corrected1.size == correct1.size
for key in corrected1.keys:
np.testing.assert_allclose(corrected1[key], correct1[key])
corrected2 = isopy.tb.remove_isobaric_interferences(data2, interferences2,
mf_factor=mf_factor,
isotope_fractions=fraction_ref,
isotope_masses=mass_ref)
assert corrected2.keys == correct2.keys
assert corrected2.size == correct2.size
for key in corrected2.keys:
np.testing.assert_allclose(corrected2[key], correct2[key])
def test_find(self):
interferences = isopy.tb.find_isobaric_interferences('pd', ('ru', 'cd'))
assert len(interferences) == 2
assert 'ru' in interferences
assert interferences['ru'] == ('102Pd', '104Pd')
assert 'cd' in interferences
assert interferences['cd'] == ('106Pd', '108Pd', '110Pd')
interferences = isopy.tb.find_isobaric_interferences('pd', ('ru', 'rh', 'ag', 'cd'))
assert len(interferences) == 2
assert 'ru' in interferences
assert interferences['ru'] == ('102Pd', '104Pd')
assert 'cd' in interferences
assert interferences['cd'] == ('106Pd', '108Pd', '110Pd')
interferences = isopy.tb.find_isobaric_interferences('ce')
assert len(interferences) == 4
assert 'xe' in interferences
assert interferences['xe'] == ('136Ce',)
assert 'ba' in interferences
assert interferences['ba'] == ('136Ce', '138Ce')
assert 'la' in interferences
assert interferences['la'] == ('138Ce', )
assert 'nd' in interferences
assert interferences['nd'] == ('142Ce',)
interferences = isopy.tb.find_isobaric_interferences('138ce')
assert len(interferences) == 2
assert 'ba' in interferences
assert interferences['ba'] == ('138Ce',)
assert 'la' in interferences
assert interferences['la'] == ('138Ce',)
interferences = isopy.tb.find_isobaric_interferences('zn', ('ni', 'ge', 'ba++'))
assert len(interferences) == 3
assert 'ni' in interferences
assert interferences['ni'] == ('64Zn',)
assert 'ge' in interferences
assert interferences['ge'] == ('70Zn',)
assert 'ba++' in interferences
assert interferences['ba++'] == ('66Zn', '67Zn', '68Zn')
class Test_rDelta():
def test_rDelta1(self):
# Data is a single value
data = isopy.refval.isotope.fraction.to_array(element_symbol='pd')
# Dict
reference = isopy.refval.isotope.fraction
correct1 = isopy.zeros(None, data.keys)
correct2 = isopy.ones(None, data.keys)
self.run(data, data, reference, correct1, correct2)
# Single array
reference = isopy.random(100, keys=data.keys)
correct1 = data / np.mean(reference) - 1
correct2 = data / np.mean(reference)
self.run(data, data, reference, correct1, correct2)
self.run(data, data, np.mean(reference), correct1, correct2)
correct1 = correct1 * 10_000
correct2 = correct2 * 10_000
self.run(data, data, reference, correct1, correct2, 10_000)
self.run(data, data, np.mean(reference), correct1, correct2, 10_000)
# Multiple values
reference1 = isopy.random(100, keys=data.keys)
reference2 = isopy.random(100, keys=data.keys)
meanmean = np.mean(reference1)/2 + np.mean(reference2)/2
correct1 = data / meanmean - 1
correct2 = data / meanmean
self.run(data, data, (reference1, reference2), correct1, correct2)
self.run(data, data, (np.mean(reference1), reference2), correct1, correct2)
self.run(data, data, (np.mean(reference1), np.mean(reference2)), correct1, correct2)
correct1 = correct1 * 10_000
correct2 = correct2 * 10_000
self.run(data, data, (reference1, reference2), correct1, correct2, 10_000)
self.run(data, data, (np.mean(reference1), reference2), correct1, correct2, 10_000)
self.run(data, data, (np.mean(reference1), np.mean(reference2)), correct1, correct2, 10_000)
# Keys that do not match
data2 = data.copy()
data2['105pd', '106pd'] = np.nan
reference1 = isopy.random(100, keys='101ru 102pd 104pd 105pd 108pd 110pd 111cd'.split())
reference2 = isopy.random(100, keys='101ru 102pd 104pd 106pd 108pd 110pd 111cd'.split())
meanmean = np.mean(reference1) / 2 + np.mean(reference2) / 2
correct1 = data / meanmean - 1
correct2 = data / meanmean
self.run(data, data2, (reference1, reference2), correct1, correct2)
self.run(data, data2, (np.mean(reference1), reference2), correct1, correct2)
self.run(data, data2, (np.mean(reference1), np.mean(reference2)), correct1, correct2)
correct1 = correct1 * 10_000
correct2 = correct2 * 10_000
self.run(data, data2, (reference1, reference2), correct1, correct2, 10_000)
self.run(data, data2, (np.mean(reference1), reference2), correct1, correct2, 10_000)
self.run(data, data2, (np.mean(reference1), np.mean(reference2)), correct1, correct2, 10_000)
def test_rDelta2(self):
data = isopy.random(100, keys=isopy.refval.element.isotopes['pd'])
data = data * isopy.refval.isotope.fraction
# Dict
reference = isopy.refval.isotope.fraction
correct1 = data / reference - 1
correct2 = data / reference
self.run(data, data, reference, correct1, correct2)
# Single array
reference = isopy.random(100, keys=data.keys)
correct1 = data / np.mean(reference) - 1
correct2 = data / np.mean(reference)
self.run(data, data, reference, correct1, correct2)
self.run(data, data, np.mean(reference), correct1, correct2)
correct1 = correct1 * 10_000
correct2 = correct2 * 10_000
self.run(data, data, reference, correct1, correct2, 10_000)
self.run(data, data, np.mean(reference), correct1, correct2, 10_000)
# Multiple values
reference1 = isopy.random(100, keys=data.keys)
reference2 = isopy.random(100, keys=data.keys)
meanmean = np.mean(reference1)/2 + np.mean(reference2)/2
correct1 = data / meanmean - 1
correct2 = data / meanmean
self.run(data, data, (reference1, reference2), correct1, correct2)
self.run(data, data, (np.mean(reference1), reference2), correct1, correct2)
self.run(data, data, (np.mean(reference1), np.mean(reference2)), correct1, correct2)
correct1 = correct1 * 10_000
correct2 = correct2 * 10_000
self.run(data, data, (reference1, reference2), correct1, correct2, 10_000)
self.run(data, data, (np.mean(reference1), reference2), correct1, correct2, 10_000)
self.run(data, data, (np.mean(reference1), np.mean(reference2)), correct1, correct2, 10_000)
# Keys that do not match
data2 = data.copy()
data2['105pd', '106pd'] = np.nan
reference1 = isopy.random(100, keys='101ru 102pd 104pd 105pd 108pd 110pd 111cd'.split())
reference2 = isopy.random(100, keys='101ru 102pd 104pd 106pd 108pd 110pd 111cd'.split())
meanmean = np.mean(reference1) / 2 + np.mean(reference2) / 2
correct1 = data / meanmean - 1
correct2 = data / meanmean
self.run(data, data2, (reference1, reference2), correct1, correct2)
self.run(data, data2, (np.mean(reference1), reference2), correct1, correct2)
self.run(data, data2, (np.mean(reference1), np.mean(reference2)), correct1, correct2)
correct1 = correct1 * 10_000
correct2 = correct2 * 10_000
self.run(data, data2, (reference1, reference2), correct1, correct2, 10_000)
self.run(data, data2, (np.mean(reference1), reference2), correct1, correct2, 10_000)
self.run(data, data2, (np.mean(reference1), np.mean(reference2)), correct1, correct2, 10_000)
def test_presets(self):
data = isopy.random(100, keys=isopy.refval.element.isotopes['pd'])
data = data * isopy.refval.isotope.fraction
reference = isopy.refval.isotope.fraction
correct = (data / reference - 1) * 1000
normalised = isopy.tb.rDelta.ppt(data, reference)
denormalised = isopy.tb.inverse_rDelta.ppt(normalised, reference)
self.compare(correct, normalised)
self.compare(data, denormalised)
correct = (data / reference - 1) * 1000
normalised = isopy.tb.rDelta.permil(data, reference)
denormalised = isopy.tb.inverse_rDelta.permil(normalised, reference)
self.compare(correct, normalised)
self.compare(data, denormalised)
correct = (data / reference - 1) * 10_000
normalised = isopy.tb.rDelta.epsilon(data, reference)
denormalised = isopy.tb.inverse_rDelta.epsilon(normalised, reference)
self.compare(correct, normalised)
self.compare(data, denormalised)
correct = (data / reference - 1) * 1_000_000
normalised = isopy.tb.rDelta.mu(data, reference)
denormalised = isopy.tb.inverse_rDelta.mu(normalised, reference)
self.compare(correct, normalised)
self.compare(data, denormalised)
correct = (data / reference - 1) * 1_000_000
normalised = isopy.tb.rDelta.ppm(data, reference)
denormalised = isopy.tb.inverse_rDelta.ppm(normalised, reference)
self.compare(correct, normalised)
self.compare(data, denormalised)
def run(self, data1, data2, reference_value, correct1, correct2, factor=1):
normalised = isopy.tb.rDelta(data1, reference_value, factor=factor)
assert normalised.keys == data1.keys
assert normalised.size == data1.size
assert normalised.ndim == data1.ndim
for key in normalised.keys:
np.testing.assert_allclose(normalised[key], correct1[key])
denormalised = isopy.tb.inverse_rDelta(normalised, reference_value, factor=factor)
assert denormalised.keys == data1.keys
assert denormalised.size == data1.size
assert denormalised.ndim == data1.ndim
for key in denormalised.keys:
np.testing.assert_allclose(denormalised[key], data2[key])
normalised = isopy.tb.rDelta(data1, reference_value, factor=factor, deviations=0)
assert normalised.keys == data1.keys
assert normalised.size == data1.size
assert normalised.ndim == data1.ndim
for key in normalised.keys:
np.testing.assert_allclose(normalised[key], correct2[key])
denormalised = isopy.tb.inverse_rDelta(normalised, reference_value, factor=factor, deviations=0)
assert denormalised.keys == data1.keys
assert denormalised.size == data1.size
assert denormalised.ndim == data1.ndim
for key in denormalised.keys:
np.testing.assert_allclose(denormalised[key], data2[key])
def compare(self, correct, calculated):
assert calculated.keys == correct.keys
assert calculated.size == correct.size
assert calculated.ndim == correct.ndim
for key in calculated.keys:
np.testing.assert_allclose(calculated[key], correct[key])
class Test_OutliersLimits:
def test_limits(self):
data = isopy.random(100, (1,1), keys=isopy.refval.element.isotopes['pd'])
median = np.median(data)
mean = np.mean(data)
mad3 = isopy.mad3(data)
sd2 = isopy.sd2(data)
upper = isopy.tb.upper_limit(data)
assert upper == median + mad3
upper = isopy.tb.upper_limit(data, np.mean, isopy.sd2)
assert upper == mean + sd2
upper = isopy.tb.upper_limit.sd2(data)
assert upper == mean + sd2
upper = isopy.tb.upper_limit(data, 1, isopy.sd2)
assert upper == 1 + sd2
upper = isopy.tb.upper_limit(data, np.mean, 1)
assert upper == mean + 1
upper = isopy.tb.upper_limit(data, 1, 1)
assert upper == 2
lower = isopy.tb.lower_limit(data)
assert lower == median - mad3
lower = isopy.tb.lower_limit.sd2(data)
assert lower == mean - sd2
lower = isopy.tb.lower_limit(data, np.mean, isopy.sd2)
assert lower == mean - sd2
lower = isopy.tb.lower_limit(data, 1, isopy.sd2)
assert lower == 1 - sd2
lower = isopy.tb.lower_limit(data, np.mean, 1)
assert lower == mean - 1
lower = isopy.tb.lower_limit(data, 1, 1)
assert lower == 0
def test_find_outliers1(self):
#axis = 0
data = isopy.random(100, (1, 1), keys=isopy.refval.element.isotopes['pd'])
median = np.median(data)
mean = np.mean(data)
mad3 = isopy.mad3(data)
sd = isopy.sd(data)
median_outliers = (data > (median + mad3)) + (data < (median - mad3))
mean_outliers = (data > (mean + sd)) + (data < (mean - sd))
mean_outliers1 = (data > (1 + sd)) + (data < (1 - sd))
mean_outliers2 = (data > (mean + 1)) + (data < (mean - 1))
mean_outliers3 = (data > (1 + 1)) + (data < (1 - 1))
outliers = isopy.tb.find_outliers(data)
assert outliers.keys == data.keys
assert outliers.size == data.size
for key in outliers.keys:
np.testing.assert_allclose(outliers[key], median_outliers[key])
outliers = isopy.tb.find_outliers(data, np.mean, isopy.sd)
assert outliers.keys == data.keys
assert outliers.size == data.size
for key in outliers.keys:
np.testing.assert_allclose(outliers[key], mean_outliers[key])
outliers = isopy.tb.find_outliers.sd(data)
assert outliers.keys == data.keys
assert outliers.size == data.size
for key in outliers.keys:
np.testing.assert_allclose(outliers[key], mean_outliers[key])
outliers = isopy.tb.find_outliers(data, 1, isopy.sd)
assert outliers.keys == data.keys
assert outliers.size == data.size
for key in outliers.keys:
np.testing.assert_allclose(outliers[key], mean_outliers1[key])
outliers = isopy.tb.find_outliers(data, np.mean, 1)
assert outliers.keys == data.keys
assert outliers.size == data.size
for key in outliers.keys:
np.testing.assert_allclose(outliers[key], mean_outliers2[key])
outliers = isopy.tb.find_outliers(data, 1, 1)
assert outliers.keys == data.keys
assert outliers.size == data.size
for key in outliers.keys:
np.testing.assert_allclose(outliers[key], mean_outliers3[key])
# invert
median_outliers = np.invert(median_outliers)
mean_outliers = np.invert(mean_outliers)
mean_outliers1 = np.invert(mean_outliers1)
mean_outliers2 = np.invert(mean_outliers2)
mean_outliers3 = np.invert(mean_outliers3)
outliers = isopy.tb.find_outliers(data, invert=True)
assert outliers.keys == data.keys
assert outliers.size == data.size
for key in outliers.keys:
np.testing.assert_allclose(outliers[key], median_outliers[key])
outliers = isopy.tb.find_outliers(data, np.mean, isopy.sd, invert=True)
assert outliers.keys == data.keys
assert outliers.size == data.size
for key in outliers.keys:
np.testing.assert_allclose(outliers[key], mean_outliers[key])
outliers = isopy.tb.find_outliers.sd(data, invert=True)
assert outliers.keys == data.keys
assert outliers.size == data.size
for key in outliers.keys:
| np.testing.assert_allclose(outliers[key], mean_outliers[key]) | numpy.testing.assert_allclose |
# -*- coding: utf-8 -*-
"""
Special case: binning data with only (x, y) coords.
:copyright: 2022 Agile Geoscience
:license: Apache 2.0
"""
import numpy as np
import scipy.spatial.distance as sd
from scipy.stats import gaussian_kde
from scipy.signal import find_peaks
from scipy.interpolate import interp1d
def nearest_neighbours(x, y, precision=5):
"""
Vector between 2 closest points. If there's a tie (there
likely will be), then we get the first point with a minimally
nearest neighbour (and its first such neighbour).
The precision cut-off is needed to deal with floating-point
imprecision.
"""
X = np.array([x, y]).T
# Make the pairwise distance matrix, D.
D = sd.squareform(sd.pdist(X))
D[D==0] = np.inf
D = | np.round(D, decimals=precision) | numpy.round |
# -*- coding: utf-8 -*-
"""
utils
~~~~~
Utility functions.
"""
import numpy as np
def vec_to_beta(vec, d):
"""Convert vector into correlated model beta structure.
Parameters
----------
vec : array_like
Vector that needed to the convert.
d : array_like
Number of covariates for each parameter and outcome. Come from
correlated model.
Returns
-------
:obj: `list` of :obj: `list` of :obj: `numpy.ndarray`
`beta` structure in the correlated model.
"""
num_covs = | np.sum(d, axis=1) | numpy.sum |
""" PatternDetector: train classifier to find pattern occurrences in time series.
"""
# Authors: <NAME>, 2018.
import sys, os, time
import numpy as np
import pandas as pd
import scipy.stats as sps
from datetime import datetime
from collections import Counter
from tqdm import tqdm
from dtaidistance import dtw
from sklearn.cluster import SpectralClustering
from sklearn.preprocessing import StandardScaler, MinMaxScaler, RobustScaler
from .utils.validation import check_time_series
# -------------
# CLASSES
# -------------
class FingerprintDetector():
""" Find occurrences of a pattern in the time series """
def __init__(self,
features='all',
n_clusters=10,
warping_width=0.1,
alpha=0.5,
detector_type='dtw', # 'dtw', 'feature'
tol=1e-8, verbose=False):
# class parameters
self.features = str(features)
self.n_clusters = int(n_clusters)
self.warping_width = float(warping_width)
self.alpha = float(alpha)
self.detector_type = str(detector_type)
self.tol = float(tol)
self.verbose = bool(verbose)
def fit_predict_fingerprints(self, timestamps, time_series, y):
""" Find all pattern occurrences in the time series
:param timestamps : np.array(), shape (n_samples)
The timestamps of the time series, datetime.datetime objects.
:param time_series : np.array(), shape (n_samples, n_variables)
The measured time series data.
:param y : np.array(), shape (n_samples)
Indicates the user-labeled y of a pattern (0, 1).
:returns exact_locations : np.array(), shape (n_samples)
Exact locations of each pattern: 1 = pattern, 0 = no pattern.
"""
# TODO: make better, this is just to save time on the feature construction
self.fit_fingerprints(timestamps, time_series, y)
return self.exact_locations
def fit_fingerprints(self, timestamps, time_series, y):
""" Fit the model using the time series data.
:param timestamps : np.array(), shape (n_samples)
The timestamps of the time series, datetime.datetime objects.
:param time_series : np.array(), shape (n_samples, n_variables)
The measured time series data.
:param y : np.array(), shape (n_samples)
Indicates the user-labeled y of a pattern (0, 1).
:returns self : object
"""
times, ts, y = check_time_series(timestamps, time_series, y)
n_samples = len(ts)
# 1. determine the exact locations of the known patterns + window size
t = time.time()
ranges = self._find_pattern_ranges(y)
user_labeled_ranges = ranges.copy()
self._average_length = np.mean([len(r) for r in user_labeled_ranges])
self._average_deviation = np.std([len(r) for r in user_labeled_ranges])
patterns = np.array([time_series[ixs] for ixs in ranges])
self.w_size = self._find_window_size(ranges)
# 2. find the shape templates: the raw patterns or feature vectors
if self.detector_type == 'dtw':
# also normalize the patterns
self.shape_templates = []
for i, p in enumerate(patterns):
m, s = np.mean(p), np.std(p)
if s == 0.0:
self.shape_templates.append(p)
else:
self.shape_templates.append((p - m) / s)
self.shape_templates = np.array(self.shape_templates)
elif self.detector_type == 'feature':
# the shape templates are feature vectors
self.pattern_templates = self._find_shape_templates(patterns)
features = self._construct_features_and_labels(times, ts, self.pattern_templates, ranges, labels=False)
self.scaler = StandardScaler()
features = self.scaler.fit_transform(features)
""" TODO: this is a bit ad-hoc! """
labels = np.array([ix[0] for ix in ranges])
self.shape_templates = features[labels, :].copy()
else:
pass
# 3. apply the shape templates to find determine the detection threshold
self.max_threshold = self._determine_fingerprint_threshold(self.shape_templates)
# 4. detect the remaining occurrences using the threshold
if self.detector_type == 'dtw':
segments = np.zeros((int(n_samples-self.w_size)+1, int(self.w_size)), dtype=float)
for i in range(int(n_samples-self.w_size)+1):
segment = ts[i:int(i+self.w_size)]
segments[i, :] = segment
scaler = StandardScaler()
segments = scaler.fit_transform(segments.T).T
elif self.detector_type == 'feature':
segments = features
else:
pass
self.exact_locations = self._find_exact_fingerprint_locations(segments, n_samples, self.w_size, self.shape_templates)
""" FIX: correction if it does not find all labeled ranges """
ix_found = np.where(self.exact_locations > 0.0)[0]
if len(ix_found) < len(user_labeled_ranges):
print('The PatternDetector does not pick up on all given occurrences, only:', len(ix_found), '/', len(user_labeled_ranges))
for r in user_labeled_ranges:
ixr = int((r[-1] + r[0]) / 2)
self.exact_locations[ixr] = 1.0
return self
def predict_fingerprints(self, timestamps, time_series):
""" Compute the anomaly score + predict the label of instances in X.
:returns exact_locations : np.array(), shape (n_samples)
Exact locations of each pattern: 1 = pattern, 0 = no pattern.
"""
times, ts, _ = check_time_series(timestamps, time_series, None)
n_samples = len(ts)
# 1. construct the feature vectors (if needed)
if self.detector_type == 'dtw':
segments = np.zeros((int(n_samples-self.w_size)+1, int(self.w_size)), dtype=float)
for i in range(int(n_samples-self.w_size)+1):
segment = ts[i:int(i+self.w_size)]
segments[i, :] = segment
scaler = StandardScaler()
segments = scaler.fit_transform(segments.T).T
elif self.detector_type == 'feature':
features = self._construct_features_and_labels(times, ts, self.pattern_templates, labels=False)
segments = self.scaler.transform(features)
else:
pass
# 2. predict the occurrences
exact_locations = self._find_exact_fingerprint_locations(segments, n_samples, self.w_size, self.shape_templates)
return exact_locations
def _determine_fingerprint_threshold(self, shapes):
""" Determine the max threshold """
thresholds = []
for i, s1 in enumerate(shapes):
for j, s2 in enumerate(shapes):
if i == j:
continue
else:
if self.detector_type == 'dtw':
d = dtw.distance(s1, s2, use_c=True, window=int(self.warping_width * self.w_size))
elif self.detector_type == 'feature':
d = np.linalg.norm(s1 - s2)
else:
pass
thresholds.append(d)
""" TODO: ad-hoc trimming of outliers """
thresholds = np.sort(np.array(thresholds))
return np.amax(thresholds[:int(0.9 * len(thresholds))])
def _find_exact_fingerprint_locations(self, segments, n, w, shapes):
""" Find the exact locations """
ns = len(segments)
w = int(w)
w1 = int(w / 2) - 1
w2 = w - w1 - 1
# 1. fingerprint detection
pattern_locations = np.zeros(n, dtype=np.float)
for _, shape in tqdm(enumerate(shapes), disable=not(self.verbose)):
# distance between each segment and a shape (sliding window with w_increment = 1)
dists = np.zeros(n, dtype=np.float)
for i in range(ns):
v = segments[i, :]
if self.detector_type == 'dtw':
dists[i] = dtw.distance(shape, v, use_c=True, window=int(self.warping_width * w))
elif self.detector_type == 'feature':
dists[i] = np.linalg.norm(shape - v)
else:
pass
new_locations = np.zeros(n, dtype=np.float)
new_locations[dists < self.max_threshold] = 1.0
pattern_locations += new_locations
pattern_locations = np.minimum(pattern_locations, 1.0)
""" FIX: it is possible that the threshold is too high!!! and everything is considered to be the pattern """
""" THIS IS NOT FOOL-PROOF: it could be that the thresholds on the IF-tests are not strong enough """
correction = False
pattern_ranges = self._find_pattern_ranges(pattern_locations)
if len(pattern_ranges) == 1 and len(pattern_ranges[0]) > self._average_length * 10: # HIGHLY suspicious
correction = True
if abs(len(np.where(pattern_locations > 0.0)[0]) - ns) < self._average_length:
correction = True
if correction:
# no patterns found because it is NOT discriminative enough
exact_locations = np.zeros(n, dtype=np.float)
return exact_locations
# 2. only keep the center occurrence if multiple detected
range_ixs = np.array([int((ix[0] + ix[-1]) / 2) for ix in pattern_ranges])
remove = []
for i, ix in enumerate(range_ixs):
if i > 1:
if ix - prev_ix < w:
remove.append(ix)
continue
prev_ix = ix
remove = np.array(remove)
locs = np.setdiff1d(range_ixs, remove)
# 3. exact locations + shift with window size / 2
exact_locations = np.zeros(n, dtype=float)
exact_locations[locs] = 1.0
exact_locations = np.pad(exact_locations[:-w+1], (w1, w2), 'constant', constant_values=(0.0, 0.0))
return exact_locations
def _construct_features_and_labels(self, times, ts, shape_templates, ranges=None, labels=True):
""" Construct the feature vectors and labels.
:returns features : array, shape (n_segments, n_features)
Feature vectors constructed from the the time series.
:returns labels : array, shape (n_segments)
Labels for the constructed segments.
"""
# construct the segments
n = len(ts)
segments = np.zeros((int(n-self.w_size)+1, int(self.w_size)), dtype=float)
for i in range(int(n-self.w_size)+1):
segment = ts[i:int(i+self.w_size)]
segments[i, :] = segment
# construct feature vectors and labels
features = self._construct_feature_matrix(segments, times, int(n-self.w_size)+1, shape_templates)
if labels:
labels = self._construct_labeling(ranges, int(n-self.w_size)+1)
return features, labels
else:
return features
def _construct_labeling(self, ranges, n):
""" Construct labels for the segments. Rules:
1. if contained in labeled segment: give it that label
2. if not contained but overlapping: ignore later on
3. if not contained and not overlapping: unlabeled
:returns labels : array, shape (n_segments)
Labels for the constructed segments.
"""
# we ignore all segments that contain only a part of the pattern unless they are fully overlapped by the pattern
labeling = np.zeros(n, dtype=float)
pattern_locations = [[ixs[0], ixs[-1]+1] for ixs in ranges]
for _, v in enumerate(pattern_locations):
"""FIX: does not work when the final pattern is too close to the end of the series """
b = v[1] - self.w_size
if b < v[0]:
# pattern is shorter than w_size: every segment fully containing the pattern is pos
pos = np.arange(b, v[0]+1, 1)
ign = np.concatenate((np.arange(v[0]-self.w_size+1, b, 1),
np.arange(v[0]+1, v[1], 1)))
else:
# pattern is longer than w_size: every segment fully contained in the pattern is pos
pos = np.arange(v[0], v[1]-self.w_size+1, 1)
ign = np.concatenate((np.arange(v[0]-self.w_size+1, v[0], 1),
np.arange(v[1]-self.w_size+1, v[1], 1)))
# annotate
labeling[pos.astype(int)] = 1.0
labeling[ign.astype(int)] = -1.0
return labeling
def _construct_feature_matrix(self, segments, times, n, shape_templates):
""" Construct the feature vectors.
:returns features : array, shape (n_segments, n_features)
Feature vectors constructed from the the time series.
"""
if self.features == 'all':
use_features = ['stat', 'time', 'shape']
elif self.features == 'stat_time':
use_features = ['stat', 'time']
elif self.features == 'stat_shape':
use_features = ['stat', 'shape']
elif self.features == 'time_shape':
use_features = ['time', 'shape']
else:
use_features = [self.features] # stat, time, shape
# summary statistics
if 'stat' in use_features:
if self.verbose: print('\tconstructing statistics...')
avg = pd.Series(np.mean(segments, axis=1))
std = pd.Series(np.std(segments, axis=1))
vari = pd.Series(np.var(segments, axis=1))
maxi = pd.Series(np.amax(segments, axis=1))
mini = pd.Series(np.amin(segments, axis=1))
med = pd.Series(np.median(segments, axis=1))
tsum = pd.Series(np.sum(segments, axis=1))
skew = pd.Series(sps.describe(segments, axis=1, bias=False).skewness)
kurt = pd.Series(sps.describe(segments, axis=1, bias=False).kurtosis)
# time features
if 'time' in use_features:
if self.verbose: print('\tconstructing time features...')
time_stamps = np.array([ts.hour for ts in times[:n]])
xhr = pd.Series(np.sin(2 * np.pi * time_stamps / 24))
yhr = pd.Series(np.cos(2 * np.pi * time_stamps / 24))
# shape features
if 'shape' in use_features:
# find nearest euclid function
def _find_nearest_euclid(a, b):
if len(a) >= len(b):
short = b
long = a
else:
short = a
long = b
n1, n2 = len(long), len(short)
d = np.zeros(n1-n2+1, dtype=float)
for i in range(n1-n2+1):
d[i] = np.linalg.norm(long[i:i+n2] - short)
return | np.amin(d) | numpy.amin |
# -*- coding: utf-8 -*-
"""Script to show text from DeepOBS text datasets."""
import os
import sys
import pickle
import numpy as np
import tensorflow as tf
import matplotlib.pyplot as plt
sys.path.insert(
0,
os.path.dirname(
os.path.dirname(os.path.dirname(os.path.abspath(__file__)))
),
)
from deepobs.tensorflow import datasets
import deepobs.config as config
def display_text(dataset_cls, grid_size=5, phase="train"):
"""Display text from a DeepOBS text dataset.
Args:
dataset_cls: The DeepOBS dataset class to display text from. Is assumed to
yield a tuple (x, y) of input and output text.
phase (str): Images from this phase ('train', 'train_eval', 'test') will be
displayed.
"""
tf.reset_default_graph()
dataset = dataset_cls(batch_size=grid_size * grid_size)
x, y = dataset.batch
if phase == "train":
init_op = dataset.train_init_op
elif phase == "train_eval":
init_op = dataset.train_eval_init_op
elif phase == "valid":
init_op = dataset.valid_init_op
elif phase == "test":
init_op = dataset.test_init_op
else:
raise ValueError(
"Choose 'phase' from ['train', 'train_eval', 'valid', 'test']."
)
with tf.Session() as sess:
sess.run(init_op)
x_, y_ = sess.run([x, y])
x_next, y_next = sess.run([x, y]) # Next batch, will be plotted in red
label_dict = load_label_dict(dataset_cls.__name__)
fig = plt.figure()
for i in range(grid_size * grid_size):
axis = fig.add_subplot(grid_size, grid_size, i + 1)
input_txt = "".join([label_dict[char] for char in np.squeeze(x_[i])])
output_txt = "".join([label_dict[char] for char in | np.squeeze(y_[i]) | numpy.squeeze |
from __future__ import absolute_import
from __future__ import division
from __future__ import print_function
import numpy as np
from .analysis_gauss_numerical_integration import gauss_numerical_integration
from .exoplanet_orbit import exoplanet_orbit
def integral_r_claret(limb_darkening_coefficients, r):
a1, a2, a3, a4 = limb_darkening_coefficients
mu44 = 1.0 - r * r
mu24 = np.sqrt(mu44)
mu14 = np.sqrt(mu24)
return - (2.0 * (1.0 - a1 - a2 - a3 - a4) / 4) * mu44 \
- (2.0 * a1 / 5) * mu44 * mu14 \
- (2.0 * a2 / 6) * mu44 * mu24 \
- (2.0 * a3 / 7) * mu44 * mu24 * mu14 \
- (2.0 * a4 / 8) * mu44 * mu44
def num_claret(r, limb_darkening_coefficients, rprs, z):
a1, a2, a3, a4 = limb_darkening_coefficients
rsq = r * r
mu44 = 1.0 - rsq
mu24 = np.sqrt(mu44)
mu14 = np.sqrt(mu24)
return ((1.0 - a1 - a2 - a3 - a4) + a1 * mu14 + a2 * mu24 + a3 * mu24 * mu14 + a4 * mu44) \
* r * np.arccos(np.minimum((-rprs ** 2 + z * z + rsq) / (2.0 * z * r), 1.0))
def integral_r_f_claret(limb_darkening_coefficients, rprs, z, r1, r2, precision=3):
return gauss_numerical_integration(num_claret, r1, r2, precision, limb_darkening_coefficients, rprs, z)
# integral definitions for zero method
def integral_r_zero(limb_darkening_coefficients, r):
musq = 1 - r * r
return (-1.0 / 6) * musq * 3.0
def num_zero(r, limb_darkening_coefficients, rprs, z):
rsq = r * r
return r * np.arccos(np.minimum((-rprs ** 2 + z * z + rsq) / (2.0 * z * r), 1.0))
def integral_r_f_zero(limb_darkening_coefficients, rprs, z, r1, r2, precision=3):
return gauss_numerical_integration(num_zero, r1, r2, precision, limb_darkening_coefficients, rprs, z)
# integral definitions for linear method
def integral_r_linear(limb_darkening_coefficients, r):
a1 = limb_darkening_coefficients[0]
musq = 1 - r * r
return (-1.0 / 6) * musq * (3.0 + a1 * (-3.0 + 2.0 * np.sqrt(musq)))
def num_linear(r, limb_darkening_coefficients, rprs, z):
a1 = limb_darkening_coefficients[0]
rsq = r * r
return (1.0 - a1 * (1.0 - np.sqrt(1.0 - rsq))) \
* r * np.arccos(np.minimum((-rprs ** 2 + z * z + rsq) / (2.0 * z * r), 1.0))
def integral_r_f_linear(limb_darkening_coefficients, rprs, z, r1, r2, precision=3):
return gauss_numerical_integration(num_linear, r1, r2, precision, limb_darkening_coefficients, rprs, z)
# integral definitions for quadratic method
def integral_r_quad(limb_darkening_coefficients, r):
a1, a2 = limb_darkening_coefficients[:2]
musq = 1 - r * r
mu = np.sqrt(musq)
return (1.0 / 12) * (-4.0 * (a1 + 2.0 * a2) * mu * musq + 6.0 * (-1 + a1 + a2) * musq + 3.0 * a2 * musq * musq)
def num_quad(r, limb_darkening_coefficients, rprs, z):
a1, a2 = limb_darkening_coefficients[:2]
rsq = r * r
cc = 1.0 - np.sqrt(1.0 - rsq)
return (1.0 - a1 * cc - a2 * cc * cc) \
* r * np.arccos(np.minimum((-rprs ** 2 + z * z + rsq) / (2.0 * z * r), 1.0))
def integral_r_f_quad(limb_darkening_coefficients, rprs, z, r1, r2, precision=3):
return gauss_numerical_integration(num_quad, r1, r2, precision, limb_darkening_coefficients, rprs, z)
# integral definitions for square root method
def integral_r_sqrt(limb_darkening_coefficients, r):
a1, a2 = limb_darkening_coefficients[:2]
musq = 1 - r * r
mu = np.sqrt(musq)
return ((-2.0 / 5) * a2 * np.sqrt(mu) - (1.0 / 3) * a1 * mu + (1.0 / 2) * (-1 + a1 + a2)) * musq
def num_sqrt(r, limb_darkening_coefficients, rprs, z):
a1, a2 = limb_darkening_coefficients[:2]
rsq = r * r
mu = np.sqrt(1.0 - rsq)
return (1.0 - a1 * (1 - mu) - a2 * (1.0 - np.sqrt(mu))) \
* r * np.arccos(np.minimum((-rprs ** 2 + z * z + rsq) / (2.0 * z * r), 1.0))
def integral_r_f_sqrt(limb_darkening_coefficients, rprs, z, r1, r2, precision=3):
return gauss_numerical_integration(num_sqrt, r1, r2, precision, limb_darkening_coefficients, rprs, z)
# dictionaries containing the different methods,
# if you define a new method, include the functions in the dictionary as well
integral_r = {
'claret': integral_r_claret,
'linear': integral_r_linear,
'quad': integral_r_quad,
'sqrt': integral_r_sqrt,
'zero': integral_r_zero
}
integral_r_f = {
'claret': integral_r_f_claret,
'linear': integral_r_f_linear,
'quad': integral_r_f_quad,
'sqrt': integral_r_f_sqrt,
'zero': integral_r_f_zero,
}
def integral_centred(method, limb_darkening_coefficients, rprs, ww1, ww2):
return (integral_r[method](limb_darkening_coefficients, rprs)
- integral_r[method](limb_darkening_coefficients, 0.0)) * np.abs(ww2 - ww1)
def integral_plus_core(method, limb_darkening_coefficients, rprs, z, ww1, ww2, precision=3):
if len(z) == 0:
return z
rr1 = z * np.cos(ww1) + np.sqrt(np.maximum(rprs ** 2 - (z * np.sin(ww1)) ** 2, 0))
rr1 = np.clip(rr1, 0, 1)
rr2 = z * np.cos(ww2) + np.sqrt(np.maximum(rprs ** 2 - (z * np.sin(ww2)) ** 2, 0))
rr2 = np.clip(rr2, 0, 1)
w1 = np.minimum(ww1, ww2)
r1 = np.minimum(rr1, rr2)
w2 = np.maximum(ww1, ww2)
r2 = np.maximum(rr1, rr2)
parta = integral_r[method](limb_darkening_coefficients, 0.0) * (w1 - w2)
partb = integral_r[method](limb_darkening_coefficients, r1) * w2
partc = integral_r[method](limb_darkening_coefficients, r2) * (-w1)
partd = integral_r_f[method](limb_darkening_coefficients, rprs, z, r1, r2, precision=precision)
return parta + partb + partc + partd
def integral_minus_core(method, limb_darkening_coefficients, rprs, z, ww1, ww2, precision=3):
if len(z) == 0:
return z
rr1 = z * | np.cos(ww1) | numpy.cos |
"""Utilities to estimate and evaluate Chebyshev coefficients of a function.
Implementation of Newhall, <NAME>. 1989, Celestial Mechanics, 45, p. 305-310
"""
import numpy as np
__all__ = ['chebeval', 'chebfit', 'makeChebMatrix', 'makeChebMatrixOnlyX']
# Evaluation routine.
def chebeval(x, p, interval=(-1., 1.), doVelocity=True, mask=False):
"""Evaluate a Chebyshev series and first derivative at points x.
If p is of length n + 1, this function returns:
y_hat(x) = p_0 * T_0(x*) + p_1 * T_1(x*) + ... + p_n * T_n(x*)
where T_n(x*) are the orthogonal Chebyshev polynomials of the
first kind, defined on the interval [-1, 1] and p_n are the
coefficients. The scaled variable x* is defined on the [-1, 1]
interval such that (x*) = (2*x - a - b)/(b - a), and x is defined
on the [a, b] interval.
Parameters
----------
x: scalar or numpy.ndarray
Points at which to evaluate the polynomial.
p: numpy.ndarray
Chebyshev polynomial coefficients, as returned by chebfit.
interval: 2-element list/tuple
Bounds the x-interval on which the Chebyshev coefficients were fit.
doVelocity: bool
If True, compute the first derivative at points x.
mask: bool
If True, return Nans when the x goes beyond 'interval'.
If False, extrapolate fit beyond 'interval' limits.
Returns
-------
scalar or numpy.ndarray, scalar or numpy.ndarray
Y (position) and velocity values (if computed)
"""
if len(interval) != 2:
raise RuntimeError("interval must have length 2")
intervalBegin = float(interval[0])
intervalEnd = float(interval[-1])
t = 2. * np.array(x, dtype=np.float64) - intervalBegin - intervalEnd
t /= intervalEnd - intervalBegin
y = 0.
v = 0.
y0 = np.ones_like(t)
y1 = t
v0 = np.zeros_like(t)
v1 = np.ones_like(t)
v2 = 4. * t
t = 2. * t
N = len(p)
if doVelocity:
for i in np.arange(0, N, 2):
if i == N - 1:
y1 = 0.
v1 = 0.
j = min(i + 1, N - 1)
y += p[i] * y0 + p[j] * y1
v += p[i] * v0 + p[j] * v1
y2 = t * y1 - y0
y3 = t * y2 - y1
v2 = t * v1 - v0 + 2 * y1
v3 = t * v2 - v1 + 2 * y2
y0 = y2
y1 = y3
v0 = v2
v1 = v3
if mask:
mask = np.where((x < intervalBegin) | (x > intervalEnd), True, False)
y = np.where(mask, np.nan, y)
v = | np.where(mask, np.nan, v) | numpy.where |
import numpy as np
import gym
from gym import spaces
import math
MAX_MARCH = 20
EPSILON = 0.1
DEG_TO_RAD = 0.0174533
WINDOW_SIZE = (200, 300) # Width x Height in pixels
def generate_box(pos=None, size=[10, 25], inside_window=True, color=(255, 255, 255), is_goal=False):
'''
Generate a box with width and height drawn randomly uniformly from size[0] to size[1]
if inside_window is True, we force the box to stay inside the window
'''
box_size = | np.random.uniform([size[0], size[0]], [size[1], size[1]]) | numpy.random.uniform |
import numpy as np
import pandas as pd
from collections import Counter
from copy import deepcopy
from queue import Queue
from math import floor, log
from random import randint
from scipy import stats
from sklearn.base import ClassifierMixin
class RandomForest(ClassifierMixin):
def __init__(self, num_trees=10, d=5):
self.num_trees = num_trees
self.d = d
def fit(self, X, y):
self.trees = []
X, y = np.array(X), np.array(y)
shuffle_idx = np.array(range(0, y.size))
np.random.shuffle(shuffle_idx)
X_s, y_s = X[shuffle_idx], y[shuffle_idx]
bag_size = floor(X_s.shape[0]/self.num_trees)
# Bag data and train trees
for t in range(0, self.num_trees):
start, end = t*bag_size, min((t + 1)*bag_size, y.size)
X_b, y_b = X_s[start: end], y_s[start: end]
self.trees.append(DecisionTree(self.d))
self.trees[t].fit(X_b, y_b)
def predict(self, X):
votes = [self.trees[t].predict(X) for t in range(0, self.num_trees)]
predictions = stats.mode(np.array(votes))[0][0]
return predictions
class DecisionTree(ClassifierMixin):
def __init__(self, d=None):
self.tree = {}
self.nodes = 0
self.d = d
def fit(self, X, y):
queue = Queue()
queue.put([np.array(X), np.array(y), self.tree])
while not queue.empty():
[X_full, y, subtree] = queue.get()
if self.d is None:
X = X_full
else:
subset_idx = np.array(range(0, X_full.shape[1]))
# Remove attributes randomly
while subset_idx.size > self.d:
subset_idx = np.delete(subset_idx, randint(0, subset_idx.size - 1))
X = X_full[:, subset_idx]
gain_ratios = [self.gain_ratio(x, y) for x in X.T]
max_idx = np.argmax(np.array(gain_ratios))
# If no information, store proportion of y
if len(X) == 0 or gain_ratios[max_idx] <= 0:
subtree['col'] = -1
subtree['p'], subtree['label'] = self.p(y)
continue
x = X[:, max_idx]
categories = set(x)
# If information gain is positive, column & branches of largest gain
subtree['col'] = max_idx
subtree['branches'] = {}
subtree['p'], subtree['label'] = self.p(y)
for category in categories:
rows_c = np.where(x == category)
x_c, y_c = x[rows_c], y[rows_c]
subtree_c = {}
subtree['branches'][category] = subtree_c
# X_c = np.delete(X[rows_c], max_idx, axis=1)
X_c = X_full[rows_c]
self.nodes += 1
queue.put([X_c, y_c, subtree_c])
def prune(self, X, y):
if len(self.tree) == 0:
print("Classifier has not been trained / Classifier's tree is empty")
should_continue = True
iteration = 1
while should_continue:
tree_copy = deepcopy(self.tree)
scores = []
leaves = self.find_leaves()
if len(leaves) == 0:
should_continue = False
continue
for [parent, child] in leaves:
col = parent['col']
parent['col'] = -1
scores.append(self.score(X, y))
parent['col'] = col
# Get current accuracy
scores = np.array(scores)
max_idx = np.argmax(scores)
if scores[max_idx] >= self.score(X, y):
[parent, child] = leaves[max_idx]
parent['col'] = -1
parent['branches'] = {}
else:
should_continue = False
print('Iteration ', iteration, ' is complete')
iteration += 1
return self.score(X, y)
def find_leaves(self):
queue = Queue()
queue.put([None, self.tree])
leaves = []
while not queue.empty():
[parent, child] = queue.get()
if child['col'] > -1:
for bkey, branch in child['branches'].items():
queue.put([child, branch])
else:
if parent is not None:
leaves.append([parent, child])
return leaves
def predict(self, X):
X = np.array(X)
predictions = []
if X.ndim == 1:
X = np.array([X])
for x in X:
tree = self.tree
while tree['col'] > -1:
category = x[tree['col']]
if category in tree['branches']:
tree = tree['branches'][category]
else:
break
predictions.append(tree['label'])
return predictions
def gain_ratio(self, x, y):
# if x.dtype.kind in set('buifc'):
categories = Counter(x)
if len(categories) == 1:
return 0
data_len = len(x)
gain = self.entropy(self.p(y, array=True)) # information gain
iv = 0 #intrinsic value
for category, count in categories.items():
y_c = y[ | np.where(x == category) | numpy.where |
from skimage.feature import greycoprops,greycomatrix
import matplotlib.image as mpimg
import pandas as pd
import numpy as np
import glob
import SimpleITK as sitk
import scipy.misc as sci
# Returns all column names with patch NO., distance and angles
def getGLCMColumnNames(patch=[1,2,3,4,5],distances = [1,3,5],angles=[0,np.pi/4.0,np.pi/2.0, 3*np.pi/4.0]):
glcm_columns = []
for i in range(len(patch)):
for j in range(len(distances)):
for k in range(len(angles)):
glcm_columns.append('GlCM_CONTRAST_{}_{}_{}'.format(patch[i],distances[j],angles[k]))
glcm_columns.append('GlCM_DISSIMILARITY_{}_{}_{}'.format(patch[i],distances[j],angles[k]))
glcm_columns.append('GlCM_HOMOGENEITY_{}_{}_{}'.format(patch[i],distances[j],angles[k]))
glcm_columns.append('GlCM_ASM_{}_{}_{}'.format(patch[i],distances[j],angles[k]))
glcm_columns.append('GlCM_ENERGY_{}_{}_{}'.format(patch[i],distances[j],angles[k]))
glcm_columns.append('GlCM_CORRELATION_{}_{}_{}'.format(patch[i],distances[j],angles[k]))
return glcm_columns
# returns the FeatureVector for all distance and angles
def getGLCMFeatures(image, distances=[1,3,5], angles=[0, np.pi / 4.0, np.pi / 2.0, 3 * np.pi / 4.0]):
glcm_feature_vector = []
x = 0
for i in range(0,5):
patch = image[x:x+20][:]
x+=20
glcm = greycomatrix(patch, distances, angles, 2, symmetric=True, normed=True)
for j in range(len(distances)):
for k in range(len(angles)):
contrast = float(greycoprops(glcm, 'contrast')[j, k])
glcm_feature_vector.append(contrast)
dissimilarity = greycoprops(glcm, 'dissimilarity')[j, k]
glcm_feature_vector.append(dissimilarity)
homogeneity = greycoprops(glcm, 'homogeneity')[j, k]
glcm_feature_vector.append(homogeneity)
ASM = greycoprops(glcm, 'ASM')[j, k]
glcm_feature_vector.append(ASM)
energy = greycoprops(glcm, 'energy')[j, k]
glcm_feature_vector.append(energy)
correlation = greycoprops(glcm, 'correlation')[j, k]
glcm_feature_vector.append(correlation)
glcm_feature_vector = np.array(glcm_feature_vector)
return glcm_feature_vector
def preprocess(img):
s_image = sitk.GetImageFromArray(img)
#Inversion and threshold
if(np.max(img) <= 1):
inverted = sitk.InvertIntensity(s_image, maximum=1)
thresh = sitk.BinaryThreshold(inverted, 0.5, 1)
elif( | np.max(img) | numpy.max |
from __future__ import unicode_literals
import Levenshtein
import numpy as np
def representative_sampling(words, k):
dist = distances(words)
medoids, _ = best_of(dist, k)
for m in medoids:
yield words[m]
def distances(words):
# symmetry is wasted
dist = Levenshtein.compare_lists(words, words, 0.0, 0)
return dist
def k_medoids(dist, k, tmax=100):
m, n = dist.shape
# randomly initialize an array of k medoid indices
medoids = np.arange(n)
np.random.shuffle(medoids)
medoids = medoids[:k]
medoids_old = np.copy(medoids)
clusters = {}
for t in xrange(tmax):
# determine clusters, i.e. arrays of data indices
J = | np.argmin(dist[:, medoids], axis=1) | numpy.argmin |
#!/usr/bin/env python
#This code is to plot the result from ImpactZ
#Input : fort.xx
#Output: figures about beam size and emittance
# plots are saved at '/post'
import matplotlib
matplotlib.use('TkAgg')
import matplotlib.pyplot as plt
import tkinter as tk
from tkinter import ttk,filedialog
import time,os,sys
from matplotlib.backends.backend_tkagg import FigureCanvasTkAgg, NavigationToolbar2TkAgg
from matplotlib.figure import Figure
from matplotlib.ticker import MultipleLocator, FormatStrFormatter
from scipy.stats import gaussian_kde
import numpy as np
import ParticlePlot, SlicePlot
_height=300
_width =200
IMPACT_T_ADVANCED_PLOT_TYPE= {'Centriod location (mm)' :2,
'Rms size (mm)' :3,
'Centriod momentum (MC)' :4,
'Rms momentum (MC)' :5,
'Twiss' :6,
'Emittance (mm-mrad)' :7}
IMPACT_T_SciFormatter = FormatStrFormatter('%2.1E')
IMPACT_T_sciMaxLimit = 99999 *2
IMPACT_T_sciMinLimit = 0.0001*2
class AdvancedPlotControlFrame(tk.Toplevel):
"""Output"""
def __init__(self, master=None, cnf={}, **kw):
tk.Toplevel.__init__(self, master, cnf, **kw)
self.title('ImpactT Plot')
self.focus_set()
"""Plot Control"""
self.frame_plotButton = tk.Frame(self)
self.frame_plotButton.grid(column=0, row = 0, pady=5 ,padx=10, sticky="we")
self.frame_radio = tk.Frame(self.frame_plotButton)
self.frame_radio.pack(side='top')
self.plotDirct = tk.IntVar()
self.plotDirct.set(0)
self.frame_radio.x = tk.Radiobutton(self.frame_radio, variable=self.plotDirct,
text="X", value=0)
self.frame_radio.x.pack(side='left')
self.frame_radio.y = tk.Radiobutton(self.frame_radio, variable=self.plotDirct,
text="Y", value=1)
self.frame_radio.y.pack(side='left')
self.frame_radio.z = tk.Radiobutton(self.frame_radio, variable=self.plotDirct,
text="Z", value=2)
self.frame_radio.z.pack(side='left')
self.plotTypeComx = tk.StringVar(self.frame_plotButton,'Rms size (mm)')
self.plotType = ttk.Combobox(self.frame_plotButton,text=self.plotTypeComx,
width = 20,
values=list(IMPACT_T_ADVANCED_PLOT_TYPE.keys()))
self.plotType.pack(side = 'top')
self.plot = tk.Button(self.frame_plotButton,text='plot',command=self.makePlot)
self.plot.pack(fill = 'both',expand =1,side = 'top',padx=10)
self.t = ttk.Separator(self, orient=tk.HORIZONTAL).grid(column=0, row = 1, sticky="we")
self.frame2 = tk.Frame(self, height =_height/5, width = _width)
self.frame2.grid(column=0, row = 2, pady=5 ,padx=10, sticky="nswe")
rowN=0
self.button_overall = tk.Button(self.frame2,text='Overall',
command = self.overallPlot)
self.button_overall.grid(row = rowN, column=0, pady=5 ,padx=5, columnspan = 2, sticky="nswe")
rowN+=1
self.button_emitGrowth = tk.Button(self.frame2,text='EmitGrowth',
command = self.emitGrowthPlot)
self.button_emitGrowth .grid(row = rowN, column=0, pady=5 ,padx=5, sticky="nswe")
self.button_Ek = tk.Button(self.frame2,text='Kinetic Energy',
command = lambda: self.energyPlot(3,'Kinetic Energy (MeV)'))
self.button_Ek .grid(row = rowN, column=1, pady=5 ,padx=5, sticky="nswe")
rowN+=1
'''
self.button_beta = tk.Button(self.frame2,text='Beta',
command = lambda: self.energyPlot(4,'Beta'))
self.button_beta .grid(row = rowN, column=0, pady=5 ,padx=5, sticky="nswe")
self.button_gamma = tk.Button(self.frame2,text='Gamma',
command = lambda: self.energyPlot(2,'Gamma'))
self.button_gamma .grid(row = rowN, column=1, pady=5 ,padx=5, sticky="nswe")
rowN+=1
'''
self.button_rmax = tk.Button(self.frame2,text='Rmax',
command = lambda: self.energyPlot(5,'Rmax (mm)'))
self.button_rmax .grid(row = rowN, column=0, pady=5 ,padx=5, sticky="nswe")
self.button_dw = tk.Button(self.frame2,text='Rms delta E',
command = lambda: self.energyPlot(6,'Rms delta E (MC^2)'))
self.button_dw .grid(row = rowN, column=1, pady=5 ,padx=5, sticky="nswe")
rowN+=1
self.button_Temperature = tk.Button(self.frame2,text='Temperature Plot',
command = self.makeTemperaturePlot)
self.button_Temperature .grid(row = rowN, column=0, pady=5 ,padx=5, sticky="nswe")
self.button_Loss = tk.Button(self.frame2,text='live Particle #',
command = self.liveParticlePlot)
self.button_Loss .grid(row = rowN, column=1, pady=5 ,padx=5, sticky="nswe")
rowN+=1
self.t = ttk.Separator(self.frame2, orient=tk.HORIZONTAL).grid(column=0, row = rowN, columnspan=2,sticky="we")
rowN+=1
self.max = tk.Button(self.frame2,text='Max amplitude',
command = self.maxPlot)
self.max .grid(row = rowN, column=0, pady=5 ,padx=5, columnspan=2,sticky="nswe")
rowN+=1
self.button_3order = tk.Button(self.frame2,text='3 order parameter',
command = self.make3orderPlot)
self.button_3order .grid(row = rowN, column=0, pady=5 ,padx=5, sticky="nswe")
self.button_4order = tk.Button(self.frame2,text='4 order parameter',
command = self.make4orderPlot)
self.button_4order .grid(row = rowN, column=1, pady=5 ,padx=5, sticky="nswe")
rowN+=1
self.t = ttk.Separator(self.frame2, orient=tk.HORIZONTAL).grid(column=0, row = rowN, columnspan=2,sticky="we")
rowN+=1
self.button_Particle = tk.Button(self.frame2,text='Phase Space Plot',
command = self.ParticlePlot)
self.button_Particle .grid(row = rowN, column=0, pady=5 ,padx=5, sticky="nswe")
self.button_ParticleDesity1D = tk.Button(self.frame2,text='Density1D',
command = self.ParticleDensityPlot1D)
self.button_ParticleDesity1D .grid(row = rowN, column=1, pady=5 ,padx=5, sticky="nswe")
rowN+=1
self.button_ParticleDensity = tk.Button(self.frame2,text='Density2D (by Grid)',
command = self.ParticleDensityPlot)
self.button_ParticleDensity .grid( row = rowN, column=0, pady=5 ,padx=5, sticky="nswe")
self.button_ParticleDensity2 = tk.Button(self.frame2,text='Density2D (by Ptc)',
command = self.ParticleDensityPlot2)
self.button_ParticleDensity2 .grid(row = rowN, column=1, pady=5 ,padx=5, sticky="nswe")
rowN+=1
self.t = ttk.Separator(self.frame2, orient=tk.HORIZONTAL).grid(column=0, row = rowN, columnspan=2,sticky="we")
rowN+=1
self.button_SlicePlot = tk.Button(self.frame2,text='Slice plot',
command = self.SlicePlot)
self.button_SlicePlot .grid( row = rowN, column=0, columnspan=2, pady=5 ,padx=5, sticky="nswe")
rowN+=1
def overallPlot(self):
print(self.__class__.__name__)
plotWindow = tk.Toplevel(self)
plotWindow.title('Plot')
l=OverallFrame(plotWindow)
l.pack()
def energyPlot(self,y,ylabel):
print(sys._getframe().f_back.f_code.co_name)
plotWindow = tk.Toplevel(self)
plotWindow.title(sys._getframe().f_back.f_code.co_name)
l=PlotFrame(plotWindow,'fort.18',1,y,ylabel)
l.pack()
def emitGrowthPlot(self):
print(sys._getframe().f_back.f_code.co_name)
plotWindow = tk.Toplevel(self)
plotWindow.title('Plot')
l=EmitGrowthFrame(plotWindow)
l.pack()
def makeTemperaturePlot(self):
print((self.plotType))
plotWindow = tk.Toplevel(self)
plotWindow.title('Plot')
l=TemperatureFrame(plotWindow)
l.pack()
def liveParticlePlot(self):
print(sys._getframe().f_back.f_code.co_name)
plotWindow = tk.Toplevel(self)
plotWindow.title(sys._getframe().f_back.f_code.co_name)
l=PlotFrame(plotWindow,'fort.28',1,4,'Live particle number')
l.pack()
def ParticlePlot(self):
print(self.__class__.__name__)
filename = filedialog.askopenfilename(parent=self)
try:
t=open(filename)
t.close()
except:
return
plotWindow = tk.Toplevel(self)
plotWindow.title('Phase Space Plot')
l=ParticlePlot.ParticleFrame(plotWindow,filename,1.0,'ImpactT')
l.pack()
def ParticleDensityPlot(self):
print(self.__class__.__name__)
fileName=filedialog.askopenfilename(parent=self)
try:
t=open(fileName)
t.close()
except:
return
plotWindow = tk.Toplevel(self)
plotWindow.title('Plot')
l=ParticlePlot.ParticleDensityFrame_weight2D(plotWindow,fileName,1.0,'ImpactT')
l.pack()
def ParticleDensityPlot1D(self):
print(self.__class__.__name__)
fileName=filedialog.askopenfilename(parent=self)
try:
t=open(fileName)
t.close()
except:
return
plotWindow = tk.Toplevel(self)
plotWindow.title('Plot')
l=ParticlePlot.ParticleDensityFrame_weight1D(plotWindow,fileName,1.0,'ImpactT')
l.pack()
def ParticleDensityPlot2(self):
print(self.__class__.__name__)
fileName=filedialog.askopenfilename(parent=self)
try:
t=open(fileName)
t.close()
except:
return
plotWindow = tk.Toplevel(self)
plotWindow.title('Plot')
l=ParticlePlot.ParticleDensityFrame2D_slow(plotWindow,fileName,1.0,'ImpactT')
l.pack()
def SlicePlot(self):
fileName=filedialog.askopenfilename(parent=self)
try:
t=open(fileName)
t.close()
except:
return
plotWindow = tk.Toplevel(self)
plotWindow.title('Slice Plot')
l=SlicePlot.SliceBaseFrame(plotWindow,fileName)
l.pack()
def makePlot(self):
print(self.__class__.__name__)
PlotFileName='fort.'+str(self.plotDirct.get()+24)
yx=IMPACT_T_ADVANCED_PLOT_TYPE[self.plotType.get()]
yl=yx if self.plotDirct.get()!=2 else yx-1
plotWindow = tk.Toplevel(self)
plotWindow.title('Plot')
l=PlotFrame(plotWindow,PlotFileName,1,yl,self.plotType.get())
l.pack()
def maxPlot(self):
print(self.__class__.__name__)
filename = 'fort.27'
try:
t=open(filename)
t.close()
except:
return
plotWindow = tk.Toplevel(self)
plotWindow.title('maxPlot')
l=PlotMaxFrame(plotWindow,filename)
l.pack()
def make3orderPlot(self):
print(self.__class__.__name__)
filename = 'fort.29'
try:
t=open(filename)
t.close()
except:
return
plotWindow = tk.Toplevel(self)
plotWindow.title('make3orderPlot')
l=Plot3orderFrame(plotWindow,filename)
l.pack()
def make4orderPlot(self):
print(self.__class__.__name__)
filename = 'fort.30'
try:
t=open(filename)
t.close()
except:
return
plotWindow = tk.Toplevel(self)
plotWindow.title('make4orderPlot')
l=Plot4orderFrame(plotWindow,filename)
l.pack()
class PlotBaseFrame(tk.Frame):
def __init__(self, parent):
tk.Frame.__init__(self, parent)
self.fig = Figure(figsize=(7,5), dpi=100)
self.subfig = self.fig.add_subplot(111)
self.canvas = FigureCanvasTkAgg(self.fig, self)
self.canvas.show()
self.canvas.get_tk_widget().pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True)
self.toolbar = NavigationToolbar2TkAgg(self.canvas, self)
self.toolbar.update()
self.canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
class PlotFrame(tk.Frame):
def __init__(self, parent,PlotFileName,xl,yl,labelY):
tk.Frame.__init__(self, parent)
#LARGE_FONT= ("Verdana", 12)
#label = tk.Label(self, font=LARGE_FONT,
# text='plot '+PlotFileName+
# ' use '+str(xl)+':'+str(yl))
#label.pack(pady=10,padx=10)
try:
fin = open(PlotFileName,'r')
except:
print(( " ERRPR! Can't open file '" + PlotFileName + "'"))
return
linesList = fin.readlines()
fin .close()
linesList = [line.split() for line in linesList ]
x = np.array([float(xrt[xl]) for xrt in linesList])
y = np.array([float(xrt[yl]) for xrt in linesList])
if labelY in ['Centriod location (mm)','Rms size (mm)','Rmax (mm)']:
y = y*1.0e3 # unit convert from m to mm
elif labelY in ['Emittance (mm-mrad)']:
y = y*1.0e6 # unit convert from (m-rad) to (mm-mrad)
fig = Figure(figsize=(7,5), dpi=100)
subfig = fig.add_subplot(111)
subfig.plot(x,y)
subfig.set_xlabel('Z (m)')
subfig.set_ylabel(labelY)
xMax = np.max(x)
xMin = np.min(x)
yMax = np.max(y)
yMin = np.min(y)
if (xMax-xMin)>IMPACT_T_sciMaxLimit or (xMax-xMin)<IMPACT_T_sciMinLimit:
self.subfig.xaxis.set_major_formatter(IMPACT_T_SciFormatter)
if (yMax-yMin)>IMPACT_T_sciMaxLimit or (yMax-yMin)<IMPACT_T_sciMinLimit:
self.subfig.yaxis.set_major_formatter(IMPACT_T_SciFormatter)
#xmajorFormatter = FormatStrFormatter('%2.2E')
#subfig.yaxis.set_major_formatter(xmajorFormatter)
box = subfig.get_position()
subfig.set_position([box.x0*1.45, box.y0*1.1, box.width, box.height])
canvas = FigureCanvasTkAgg(fig, self)
canvas.show()
canvas.get_tk_widget().pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True)
toolbar = NavigationToolbar2TkAgg(canvas, self)
toolbar.update()
canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
def quit(self):
self.destroy()
class OverallFrame(tk.Frame):
def __init__(self, parent):
tk.Frame.__init__(self, parent)
self.fig = Figure(figsize=(12,5), dpi=100)
self.subfig = []
self.subfig.append(self.fig.add_subplot(221))
self.subfig.append(self.fig.add_subplot(222))
self.subfig.append(self.fig.add_subplot(223))
self.subfig.append(self.fig.add_subplot(224))
self.canvas = FigureCanvasTkAgg(self.fig, self)
self.canvas.show()
self.canvas.get_tk_widget().pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True)
self.toolbar = NavigationToolbar2TkAgg(self.canvas, self)
self.toolbar.update()
self.canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
self.plot()
def plot(self):
picNum = 4
fileList = [[]*2]*picNum
saveName = []
labelList = [[]*2]*picNum
xdataList = [[]*2]*picNum
ydataList = [[]*2]*picNum
xyLabelList = [[]*2]*picNum
xl = 2
saveName.append('sizeX')
fileList[0] = ['fort.24','fort.27']
labelList[0] = ['rms.X','max.X']
xdataList[0] = [xl,xl]
ydataList[0] = [4,3]
xyLabelList[0] = ['z drection (m)','beam size in X (mm)']
saveName.append('sizeY')
fileList[1] = ['fort.25','fort.27']
labelList[1] = ['rms.Y','max.Y']
xdataList[1] = [xl,xl]
ydataList[1] = [4,5]
xyLabelList[1] = ['z drection (m)','beam size in Y (mm)']
saveName.append('sizeZ')
fileList[2] = ['fort.26','fort.27']
labelList[2] = ['rms.Z','max.Z']
xdataList[2] = [xl,xl]
ydataList[2] = [3,7]
xyLabelList[2] = ['z drection (m)','beam size in Z (mm)']
saveName.append('emitXY')
fileList[3] = ['fort.24','fort.25']
labelList[3] = ['emit.nor.X','emit.nor.Y']
xdataList[3] = [xl,xl]
ydataList[3] = [8,8]
xyLabelList[3] = ['z drection (m)','emittance at X and Y (mm*mrad)']
lineType = ['r-','b--']
for i in range(0,picNum):
for j in range(0,2):
try:
fin = open(fileList[i][j],'r')
except:
print("ERRPR Can't open file ' " + fileList[i][j] + "'")
return
linesList = fin.readlines()
fin .close()
linesList = [line.split() for line in linesList ]
xId = xdataList[i][j]-1
yId = ydataList[i][j]-1
x = np.array([float(xrt[xId]) for xrt in linesList])
y = np.array([float(xrt[yId]) for xrt in linesList])
if i in range(0,picNum-1):
y=y*1.0e3
elif i == picNum-1:
y=y*1.0e6
self.subfig[i].plot(x, y, lineType[j], linewidth=2, label=labelList[i][j])
self.subfig[i].set_xlabel(xyLabelList[i][0])
self.subfig[i].set_ylabel(xyLabelList[i][1])
box = self.subfig[i].get_position()
self.subfig[i].set_position([box.x0*1.1, box.y0*1.1, box.width, box.height *0.88])
xMax = np.max(x)
xMin = np.min(x)
yMax = np.max(y)
yMin = np.min(y)
if (xMax-xMin)>IMPACT_T_sciMaxLimit or (xMax-xMin)<IMPACT_T_sciMinLimit:
self.subfig[i].xaxis.set_major_formatter(IMPACT_T_SciFormatter)
if (yMax-yMin)>IMPACT_T_sciMaxLimit or (yMax-yMin)<IMPACT_T_sciMinLimit:
self.subfig[i].yaxis.set_major_formatter(IMPACT_T_SciFormatter)
self.subfig[i].legend(loc='upper center', bbox_to_anchor=(0.5, 1.21),fancybox=True, shadow=True, ncol=5)
self.canvas.draw()
class EmitGrowthFrame(PlotBaseFrame):
def __init__(self, parent):
PlotBaseFrame.__init__(self, parent)
self.plot()
def plot(self):
fileList = ['fort.24','fort.25']
xdataList = [2,2]
ydataList = [8,8]
xyLabelList = ['Z (m)','Avg emit growth in X and Y']
lineType = ['r-','b--']
try:
fin1 = open(fileList[0],'r')
except:
print(" ERRPR! Can't open file '" + fileList[0] + "'")
return
try:
fin2 = open(fileList[1],'r')
except:
print(" ERRPR! Can't open file '" + fileList[1] + "'")
return
linesList1 = fin1.readlines()
linesList2 = fin2.readlines()
fin1 .close()
fin2 .close()
linesList1 = [line.split() for line in linesList1 ]
linesList2 = [line.split() for line in linesList2 ]
xId = xdataList[0]-1
yId = ydataList[0]-1
try:
x = [float(xrt[xId]) for xrt in linesList1]
start = (float(linesList1[0][yId]) + float(linesList2[0][yId]))/2
if start < 1.0e-16:
start=1.0e-16
y = [(float(linesList1[k][yId]) + float(linesList2[k][yId]))/2 / start -1 for k in range(len(linesList1))]
except:
print(" ERRPR! Can't read data '" + fileList[1] + "'")
self.subfig.cla()
self.subfig.plot(x, y, lineType[0], linewidth=2, label='emit.growth')
box = self.subfig.get_position()
self.subfig.set_position([box.x0*1.4, box.y0, box.width, box.height])
self.subfig.set_xlabel(xyLabelList[0])
self.subfig.set_ylabel(xyLabelList[1])
self.subfig.legend()
self.canvas.draw()
class TemperatureFrame(PlotBaseFrame):
def __init__(self, parent):
PlotBaseFrame.__init__(self, parent)
self.plot()
def plot(self):
arg=['ct','fort.24','fort.25','fort.26']
labelList= ['X','Y','Z']
lineType = ['-','--',':']
col = ['b','g','r']
linew = [2,2,3]
picNum = len(arg) - 1
plotPath = './post'
if os.path.exists(plotPath) == False:
os.makedirs(plotPath)
self.subfig.cla()
for i in range(1,picNum+1):
try:
fin = open(arg[i],'r')
except:
print( " ERRPR! Can't open file '" + arg[i] + "'")
return
linesList = fin.readlines()
fin .close()
linesList = [line.split() for line in linesList ]
x = [float(xrt[0]) for xrt in linesList]
yl=5
if i==3:
yl=4
y = [float(xrt[yl])*float(xrt[yl]) for xrt in linesList]
self.subfig.plot(x, y, color = col[(i-1)],linestyle=lineType[i-1], linewidth=linew[i-1],label=labelList[i-1])
box = self.subfig.get_position()
self.subfig.set_position([box.x0*1.2, box.y0, box.width, box.height])
self.subfig.set_xlabel('T (s)')
self.subfig.set_ylabel('Temperature')
self.subfig.legend()
self.canvas.draw()
class PlotHighOrderBaseFrame(tk.Frame):
ParticleDirec = {'X (mm)' :2,
'Px (MC)' :3,
'Y (mm)' :4,
'Py (MC)' :5,
'Z (mm)' :6,
'Pz (MC)' :7}
data = np.array([])
def __init__(self, parent, PlotFileName):
tk.Frame.__init__(self, parent)
try:
self.data = np.loadtxt(PlotFileName)
except:
print(( " ERROR! Can't open file '" + PlotFileName + "'"))
return
self.data = np.transpose(self.data)
for i in range(0,6,2):
self.data[i] = self.data[i] * 1e3 # from m to mm
self.frame_PlotParticleControl = tk.Frame(self)
self.frame_PlotParticleControl.pack()
self.label_x = tk.Label(self.frame_PlotParticleControl, text="Direction:")
self.label_x.pack(side='left')
self.ppc1Value = tk.StringVar(self.frame_PlotParticleControl,'X (mm)')
self.ppc1 = ttk.Combobox(self.frame_PlotParticleControl,text=self.ppc1Value,
width=6,
values=['X (mm)', 'Px (MC)', 'Y (mm)', 'Py (MC)','Z (mm)','Pz (MC)'])
self.ppc1.pack(fill = 'both',expand =1,side = 'left')
LARGE_FONT= ("Verdana", 12)
self.button_ppc=tk.Button(self.frame_PlotParticleControl)
self.button_ppc["text"] = "Plot"
self.button_ppc["foreground"] = "blue"
self.button_ppc["bg"] = "red"
self.button_ppc["font"] = LARGE_FONT
self.button_ppc["command"] = self.plot
self.button_ppc.pack(fill = 'both',expand =1,side = 'left')
x = 1
y = self.ParticleDirec[self.ppc1.get()]
self.fig = Figure(figsize=(7,5), dpi=100)
self.subfig = self.fig.add_subplot(111)
self.subfig.scatter(self.data[x],self.data[y],s=1)
xmajorFormatter = FormatStrFormatter('%2.2E')
self.subfig.yaxis.set_major_formatter(xmajorFormatter)
box = self.subfig.get_position()
self.subfig.set_position([box.x0*1.4, box.y0, box.width, box.height])
self.canvas = FigureCanvasTkAgg(self.fig, self)
self.canvas.show()
self.canvas.get_tk_widget().pack(side=tk.BOTTOM, fill=tk.BOTH, expand=True)
self.toolbar = NavigationToolbar2TkAgg(self.canvas, self)
self.toolbar.update()
self.canvas._tkcanvas.pack(side=tk.TOP, fill=tk.BOTH, expand=True)
self.plot()
class PlotMaxFrame(PlotHighOrderBaseFrame):
def __init__(self, parent,ifile):
PlotHighOrderBaseFrame.__init__(self, parent, ifile)
def plot(self):
y = self.ParticleDirec[self.ppc1.get()]
self.subfig.cla()
self.subfig.plot(self.data[1],self.data[y])
axis_format_T(self.data[1],self.data[y], self.subfig)
self.subfig.set_xlabel('Z (m)')
if y%2==0:
self.subfig.set_ylabel('Max '+ self.ppc1.get())
else:
self.subfig.set_ylabel('Max '+ self.ppc1.get())
self.canvas.draw()
class Plot3orderFrame(PlotHighOrderBaseFrame):
def __init__(self, parent,ifile):
PlotHighOrderBaseFrame.__init__(self, parent, ifile)
def plot(self):
y = self.ParticleDirec[self.ppc1.get()]
self.subfig.cla()
self.subfig.plot(self.data[1],self.data[y])
xmajorFormatter = FormatStrFormatter('%2.2E')
self.subfig.yaxis.set_major_formatter(xmajorFormatter)
self.subfig.set_xlabel('Z (m)')
if y%2==0:
self.subfig.set_ylabel('cubic root of 3rd'+ self.ppc1.get())
else:
self.subfig.set_ylabel('cubic root of 3rd'+ self.ppc1.get())
self.canvas.draw()
class Plot4orderFrame(PlotHighOrderBaseFrame):
def __init__(self, parent,ifile):
PlotHighOrderBaseFrame.__init__(self, parent, ifile)
def plot(self):
y = self.ParticleDirec[self.ppc1.get()]
self.subfig.cla()
self.subfig.plot(self.data[1],self.data[y])
#xmajorFormatter = FormatStrFormatter('%2.2E')
#self.subfig.yaxis.set_major_formatter(xmajorFormatter)
self.subfig.set_xlabel('Z (m)')
if y%2==0:
self.subfig.set_ylabel('square square root of 4th '+ self.ppc1.get())
else:
self.subfig.set_ylabel('square square root of 4th '+ self.ppc1.get())
self.canvas.draw()
def axis_format_T(xData,yData,subfig):
xMax = | np.max(xData) | numpy.max |
# Authors:
#
# <NAME>
#
# License: BSD 3 clause
import warnings
import itertools
import numpy as np
import numpy.linalg as la
from scipy import sparse, stats
import pytest
from sklearn.utils import gen_batches
from sklearn.utils._testing import assert_almost_equal
from sklearn.utils._testing import assert_array_almost_equal
from sklearn.utils._testing import assert_array_equal
from sklearn.utils._testing import assert_array_less
from sklearn.utils._testing import assert_allclose
from sklearn.utils._testing import assert_allclose_dense_sparse
from sklearn.utils._testing import skip_if_32bit
from sklearn.utils._testing import _convert_container
from sklearn.utils.sparsefuncs import mean_variance_axis
from sklearn.preprocessing import Binarizer
from sklearn.preprocessing import KernelCenterer
from sklearn.preprocessing import Normalizer
from sklearn.preprocessing import normalize
from sklearn.preprocessing import StandardScaler
from sklearn.preprocessing import scale
from sklearn.preprocessing import MinMaxScaler
from sklearn.preprocessing import minmax_scale
from sklearn.preprocessing import QuantileTransformer
from sklearn.preprocessing import quantile_transform
from sklearn.preprocessing import MaxAbsScaler
from sklearn.preprocessing import maxabs_scale
from sklearn.preprocessing import RobustScaler
from sklearn.preprocessing import robust_scale
from sklearn.preprocessing import add_dummy_feature
from sklearn.preprocessing import PowerTransformer
from sklearn.preprocessing import power_transform
from sklearn.preprocessing._data import _handle_zeros_in_scale
from sklearn.preprocessing._data import BOUNDS_THRESHOLD
from sklearn.exceptions import NotFittedError
from sklearn.base import clone
from sklearn.pipeline import Pipeline
from sklearn.model_selection import cross_val_predict
from sklearn.svm import SVR
from sklearn.utils import shuffle
from sklearn import datasets
iris = datasets.load_iris()
# Make some data to be used many times
rng = np.random.RandomState(0)
n_features = 30
n_samples = 1000
offsets = rng.uniform(-1, 1, size=n_features)
scales = rng.uniform(1, 10, size=n_features)
X_2d = rng.randn(n_samples, n_features) * scales + offsets
X_1row = X_2d[0, :].reshape(1, n_features)
X_1col = X_2d[:, 0].reshape(n_samples, 1)
X_list_1row = X_1row.tolist()
X_list_1col = X_1col.tolist()
def toarray(a):
if hasattr(a, "toarray"):
a = a.toarray()
return a
def _check_dim_1axis(a):
return np.asarray(a).shape[0]
def assert_correct_incr(i, batch_start, batch_stop, n, chunk_size,
n_samples_seen):
if batch_stop != n:
assert (i + 1) * chunk_size == n_samples_seen
else:
assert (i * chunk_size + (batch_stop - batch_start) ==
n_samples_seen)
def test_raises_value_error_if_sample_weights_greater_than_1d():
# Sample weights must be either scalar or 1D
n_sampless = [2, 3]
n_featuress = [3, 2]
for n_samples, n_features in zip(n_sampless, n_featuress):
X = rng.randn(n_samples, n_features)
y = rng.randn(n_samples)
scaler = StandardScaler()
# make sure Error is raised the sample weights greater than 1d
sample_weight_notOK = rng.randn(n_samples, 1) ** 2
with pytest.raises(ValueError):
scaler.fit(X, y, sample_weight=sample_weight_notOK)
@pytest.mark.parametrize(['Xw', 'X', 'sample_weight'],
[([[1, 2, 3], [4, 5, 6]],
[[1, 2, 3], [1, 2, 3], [4, 5, 6]],
[2., 1.]),
([[1, 0, 1], [0, 0, 1]],
[[1, 0, 1], [0, 0, 1], [0, 0, 1], [0, 0, 1]],
np.array([1, 3])),
([[1, np.nan, 1], [np.nan, np.nan, 1]],
[[1, np.nan, 1], [np.nan, np.nan, 1],
[np.nan, np.nan, 1], [np.nan, np.nan, 1]],
np.array([1, 3])),
])
@pytest.mark.parametrize(
"array_constructor", ["array", "sparse_csr", "sparse_csc"]
)
def test_standard_scaler_sample_weight(
Xw, X, sample_weight, array_constructor):
with_mean = not array_constructor.startswith("sparse")
X = _convert_container(X, array_constructor)
Xw = _convert_container(Xw, array_constructor)
# weighted StandardScaler
yw = np.ones(Xw.shape[0])
scaler_w = StandardScaler(with_mean=with_mean)
scaler_w.fit(Xw, yw, sample_weight=sample_weight)
# unweighted, but with repeated samples
y = np.ones(X.shape[0])
scaler = StandardScaler(with_mean=with_mean)
scaler.fit(X, y)
X_test = [[1.5, 2.5, 3.5], [3.5, 4.5, 5.5]]
assert_almost_equal(scaler.mean_, scaler_w.mean_)
assert_almost_equal(scaler.var_, scaler_w.var_)
assert_almost_equal(scaler.transform(X_test), scaler_w.transform(X_test))
def test_standard_scaler_1d():
# Test scaling of dataset along single axis
for X in [X_1row, X_1col, X_list_1row, X_list_1row]:
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
if isinstance(X, list):
X = np.array(X) # cast only after scaling done
if _check_dim_1axis(X) == 1:
assert_almost_equal(scaler.mean_, X.ravel())
assert_almost_equal(scaler.scale_, np.ones(n_features))
assert_array_almost_equal(X_scaled.mean(axis=0),
np.zeros_like(n_features))
assert_array_almost_equal(X_scaled.std(axis=0),
np.zeros_like(n_features))
else:
assert_almost_equal(scaler.mean_, X.mean())
assert_almost_equal(scaler.scale_, X.std())
assert_array_almost_equal(X_scaled.mean(axis=0),
np.zeros_like(n_features))
assert_array_almost_equal(X_scaled.mean(axis=0), .0)
assert_array_almost_equal(X_scaled.std(axis=0), 1.)
assert scaler.n_samples_seen_ == X.shape[0]
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X)
# Constant feature
X = np.ones((5, 1))
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert_almost_equal(scaler.mean_, 1.)
assert_almost_equal(scaler.scale_, 1.)
assert_array_almost_equal(X_scaled.mean(axis=0), .0)
assert_array_almost_equal(X_scaled.std(axis=0), .0)
assert scaler.n_samples_seen_ == X.shape[0]
@pytest.mark.parametrize("sparse_constructor",
[None, sparse.csc_matrix, sparse.csr_matrix])
@pytest.mark.parametrize("add_sample_weight", [False, True])
def test_standard_scaler_dtype(add_sample_weight, sparse_constructor):
# Ensure scaling does not affect dtype
rng = np.random.RandomState(0)
n_samples = 10
n_features = 3
if add_sample_weight:
sample_weight = np.ones(n_samples)
else:
sample_weight = None
with_mean = True
for dtype in [np.float16, np.float32, np.float64]:
X = rng.randn(n_samples, n_features).astype(dtype)
if sparse_constructor is not None:
X = sparse_constructor(X)
with_mean = False
scaler = StandardScaler(with_mean=with_mean)
X_scaled = scaler.fit(X, sample_weight=sample_weight).transform(X)
assert X.dtype == X_scaled.dtype
assert scaler.mean_.dtype == np.float64
assert scaler.scale_.dtype == np.float64
@pytest.mark.parametrize("scaler", [
StandardScaler(with_mean=False),
RobustScaler(with_centering=False),
])
@pytest.mark.parametrize("sparse_constructor",
[np.asarray, sparse.csc_matrix, sparse.csr_matrix])
@pytest.mark.parametrize("add_sample_weight", [False, True])
@pytest.mark.parametrize("dtype", [np.float32, np.float64])
@pytest.mark.parametrize("constant", [0, 1., 100.])
def test_standard_scaler_constant_features(
scaler, add_sample_weight, sparse_constructor, dtype, constant):
if (isinstance(scaler, StandardScaler)
and constant > 1
and sparse_constructor is not np.asarray
and add_sample_weight):
# https://github.com/scikit-learn/scikit-learn/issues/19546
pytest.xfail("Computation of weighted variance is numerically unstable"
" for sparse data. See: #19546.")
if isinstance(scaler, RobustScaler) and add_sample_weight:
pytest.skip(f"{scaler.__class__.__name__} does not yet support"
f" sample_weight")
rng = np.random.RandomState(0)
n_samples = 100
n_features = 1
if add_sample_weight:
fit_params = dict(sample_weight=rng.uniform(size=n_samples) * 2)
else:
fit_params = {}
X_array = np.full(shape=(n_samples, n_features), fill_value=constant,
dtype=dtype)
X = sparse_constructor(X_array)
X_scaled = scaler.fit(X, **fit_params).transform(X)
if isinstance(scaler, StandardScaler):
# The variance info should be close to zero for constant features.
assert_allclose(scaler.var_, np.zeros(X.shape[1]), atol=1e-7)
# Constant features should not be scaled (scale of 1.):
assert_allclose(scaler.scale_, np.ones(X.shape[1]))
if hasattr(X_scaled, "toarray"):
assert_allclose(X_scaled.toarray(), X_array)
else:
assert_allclose(X_scaled, X)
if isinstance(scaler, StandardScaler) and not add_sample_weight:
# Also check consistency with the standard scale function.
X_scaled_2 = scale(X, with_mean=scaler.with_mean)
if hasattr(X_scaled_2, "toarray"):
assert_allclose(X_scaled_2.toarray(), X_scaled_2.toarray())
else:
assert_allclose(X_scaled_2, X_scaled_2)
def test_scale_1d():
# 1-d inputs
X_list = [1., 3., 5., 0.]
X_arr = np.array(X_list)
for X in [X_list, X_arr]:
X_scaled = scale(X)
assert_array_almost_equal(X_scaled.mean(), 0.0)
assert_array_almost_equal(X_scaled.std(), 1.0)
assert_array_equal(scale(X, with_mean=False, with_std=False), X)
@skip_if_32bit
def test_standard_scaler_numerical_stability():
# Test numerical stability of scaling
# np.log(1e-5) is taken because of its floating point representation
# was empirically found to cause numerical problems with np.mean & np.std.
x = np.full(8, np.log(1e-5), dtype=np.float64)
# This does not raise a warning as the number of samples is too low
# to trigger the problem in recent numpy
with pytest.warns(None) as record:
scale(x)
assert len(record) == 0
assert_array_almost_equal(scale(x), np.zeros(8))
# with 2 more samples, the std computation run into numerical issues:
x = np.full(10, np.log(1e-5), dtype=np.float64)
warning_message = (
"standard deviation of the data is probably very close to 0"
)
with pytest.warns(UserWarning, match=warning_message):
x_scaled = scale(x)
assert_array_almost_equal(x_scaled, np.zeros(10))
x = np.full(10, 1e-100, dtype=np.float64)
with pytest.warns(None) as record:
x_small_scaled = scale(x)
assert len(record) == 0
assert_array_almost_equal(x_small_scaled, np.zeros(10))
# Large values can cause (often recoverable) numerical stability issues:
x_big = np.full(10, 1e100, dtype=np.float64)
warning_message = (
"Dataset may contain too large values"
)
with pytest.warns(UserWarning, match=warning_message):
x_big_scaled = scale(x_big)
assert_array_almost_equal(x_big_scaled, np.zeros(10))
assert_array_almost_equal(x_big_scaled, x_small_scaled)
with pytest.warns(UserWarning, match=warning_message):
x_big_centered = scale(x_big, with_std=False)
assert_array_almost_equal(x_big_centered, np.zeros(10))
assert_array_almost_equal(x_big_centered, x_small_scaled)
def test_scaler_2d_arrays():
# Test scaling of 2d array along first axis
rng = np.random.RandomState(0)
n_features = 5
n_samples = 4
X = rng.randn(n_samples, n_features)
X[:, 0] = 0.0 # first feature is always of zero
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert not np.any(np.isnan(X_scaled))
assert scaler.n_samples_seen_ == n_samples
assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has been copied
assert X_scaled is not X
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert X_scaled_back is not X
assert X_scaled_back is not X_scaled
assert_array_almost_equal(X_scaled_back, X)
X_scaled = scale(X, axis=1, with_std=False)
assert not np.any(np.isnan(X_scaled))
assert_array_almost_equal(X_scaled.mean(axis=1), n_samples * [0.0])
X_scaled = scale(X, axis=1, with_std=True)
assert not np.any(np.isnan(X_scaled))
assert_array_almost_equal(X_scaled.mean(axis=1), n_samples * [0.0])
assert_array_almost_equal(X_scaled.std(axis=1), n_samples * [1.0])
# Check that the data hasn't been modified
assert X_scaled is not X
X_scaled = scaler.fit(X).transform(X, copy=False)
assert not np.any(np.isnan(X_scaled))
assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert X_scaled is X
X = rng.randn(4, 5)
X[:, 0] = 1.0 # first feature is a constant, non zero feature
scaler = StandardScaler()
X_scaled = scaler.fit(X).transform(X, copy=True)
assert not np.any(np.isnan(X_scaled))
assert_array_almost_equal(X_scaled.mean(axis=0), n_features * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert X_scaled is not X
def test_scaler_float16_overflow():
# Test if the scaler will not overflow on float16 numpy arrays
rng = np.random.RandomState(0)
# float16 has a maximum of 65500.0. On the worst case 5 * 200000 is 100000
# which is enough to overflow the data type
X = rng.uniform(5, 10, [200000, 1]).astype(np.float16)
with np.errstate(over='raise'):
scaler = StandardScaler().fit(X)
X_scaled = scaler.transform(X)
# Calculate the float64 equivalent to verify result
X_scaled_f64 = StandardScaler().fit_transform(X.astype(np.float64))
# Overflow calculations may cause -inf, inf, or nan. Since there is no nan
# input, all of the outputs should be finite. This may be redundant since a
# FloatingPointError exception will be thrown on overflow above.
assert np.all(np.isfinite(X_scaled))
# The normal distribution is very unlikely to go above 4. At 4.0-8.0 the
# float16 precision is 2^-8 which is around 0.004. Thus only 2 decimals are
# checked to account for precision differences.
assert_array_almost_equal(X_scaled, X_scaled_f64, decimal=2)
def test_handle_zeros_in_scale():
s1 = np.array([0, 1e-16, 1, 2, 3])
s2 = _handle_zeros_in_scale(s1, copy=True)
assert_allclose(s1, np.array([0, 1e-16, 1, 2, 3]))
assert_allclose(s2, np.array([1, 1, 1, 2, 3]))
def test_minmax_scaler_partial_fit():
# Test if partial_fit run over many batches of size 1 and 50
# gives the same results as fit
X = X_2d
n = X.shape[0]
for chunk_size in [1, 2, 50, n, n + 42]:
# Test mean at the end of the process
scaler_batch = MinMaxScaler().fit(X)
scaler_incr = MinMaxScaler()
for batch in gen_batches(n_samples, chunk_size):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_array_almost_equal(scaler_batch.data_min_,
scaler_incr.data_min_)
assert_array_almost_equal(scaler_batch.data_max_,
scaler_incr.data_max_)
assert scaler_batch.n_samples_seen_ == scaler_incr.n_samples_seen_
assert_array_almost_equal(scaler_batch.data_range_,
scaler_incr.data_range_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.min_, scaler_incr.min_)
# Test std after 1 step
batch0 = slice(0, chunk_size)
scaler_batch = MinMaxScaler().fit(X[batch0])
scaler_incr = MinMaxScaler().partial_fit(X[batch0])
assert_array_almost_equal(scaler_batch.data_min_,
scaler_incr.data_min_)
assert_array_almost_equal(scaler_batch.data_max_,
scaler_incr.data_max_)
assert scaler_batch.n_samples_seen_ == scaler_incr.n_samples_seen_
assert_array_almost_equal(scaler_batch.data_range_,
scaler_incr.data_range_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.min_, scaler_incr.min_)
# Test std until the end of partial fits, and
scaler_batch = MinMaxScaler().fit(X)
scaler_incr = MinMaxScaler() # Clean estimator
for i, batch in enumerate(gen_batches(n_samples, chunk_size)):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_correct_incr(i, batch_start=batch.start,
batch_stop=batch.stop, n=n,
chunk_size=chunk_size,
n_samples_seen=scaler_incr.n_samples_seen_)
def test_standard_scaler_partial_fit():
# Test if partial_fit run over many batches of size 1 and 50
# gives the same results as fit
X = X_2d
n = X.shape[0]
for chunk_size in [1, 2, 50, n, n + 42]:
# Test mean at the end of the process
scaler_batch = StandardScaler(with_std=False).fit(X)
scaler_incr = StandardScaler(with_std=False)
for batch in gen_batches(n_samples, chunk_size):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_array_almost_equal(scaler_batch.mean_, scaler_incr.mean_)
assert scaler_batch.var_ == scaler_incr.var_ # Nones
assert scaler_batch.n_samples_seen_ == scaler_incr.n_samples_seen_
# Test std after 1 step
batch0 = slice(0, chunk_size)
scaler_incr = StandardScaler().partial_fit(X[batch0])
if chunk_size == 1:
assert_array_almost_equal(np.zeros(n_features, dtype=np.float64),
scaler_incr.var_)
assert_array_almost_equal(np.ones(n_features, dtype=np.float64),
scaler_incr.scale_)
else:
assert_array_almost_equal(np.var(X[batch0], axis=0),
scaler_incr.var_)
assert_array_almost_equal(np.std(X[batch0], axis=0),
scaler_incr.scale_) # no constants
# Test std until the end of partial fits, and
scaler_batch = StandardScaler().fit(X)
scaler_incr = StandardScaler() # Clean estimator
for i, batch in enumerate(gen_batches(n_samples, chunk_size)):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_correct_incr(i, batch_start=batch.start,
batch_stop=batch.stop, n=n,
chunk_size=chunk_size,
n_samples_seen=scaler_incr.n_samples_seen_)
assert_array_almost_equal(scaler_batch.var_, scaler_incr.var_)
assert scaler_batch.n_samples_seen_ == scaler_incr.n_samples_seen_
def test_standard_scaler_partial_fit_numerical_stability():
# Test if the incremental computation introduces significative errors
# for large datasets with values of large magniture
rng = np.random.RandomState(0)
n_features = 2
n_samples = 100
offsets = rng.uniform(-1e15, 1e15, size=n_features)
scales = rng.uniform(1e3, 1e6, size=n_features)
X = rng.randn(n_samples, n_features) * scales + offsets
scaler_batch = StandardScaler().fit(X)
scaler_incr = StandardScaler()
for chunk in X:
scaler_incr = scaler_incr.partial_fit(chunk.reshape(1, n_features))
# Regardless of abs values, they must not be more diff 6 significant digits
tol = 10 ** (-6)
assert_allclose(scaler_incr.mean_, scaler_batch.mean_, rtol=tol)
assert_allclose(scaler_incr.var_, scaler_batch.var_, rtol=tol)
assert_allclose(scaler_incr.scale_, scaler_batch.scale_, rtol=tol)
# NOTE Be aware that for much larger offsets std is very unstable (last
# assert) while mean is OK.
# Sparse input
size = (100, 3)
scale = 1e20
X = rng.randint(0, 2, size).astype(np.float64) * scale
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
for X in [X_csr, X_csc]:
# with_mean=False is required with sparse input
scaler = StandardScaler(with_mean=False).fit(X)
scaler_incr = StandardScaler(with_mean=False)
for chunk in X:
# chunk = sparse.csr_matrix(data_chunks)
scaler_incr = scaler_incr.partial_fit(chunk)
# Regardless of magnitude, they must not differ more than of 6 digits
tol = 10 ** (-6)
assert scaler.mean_ is not None
assert_allclose(scaler_incr.var_, scaler.var_, rtol=tol)
assert_allclose(scaler_incr.scale_, scaler.scale_, rtol=tol)
@pytest.mark.parametrize("sample_weight", [True, None])
def test_partial_fit_sparse_input(sample_weight):
# Check that sparsity is not destroyed
X = np.array([[1.], [0.], [0.], [5.]])
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
if sample_weight:
sample_weight = rng.rand(X_csc.shape[0])
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
for X in [X_csr, X_csc]:
X_null = null_transform.partial_fit(
X, sample_weight=sample_weight).transform(X)
assert_array_equal(X_null.toarray(), X.toarray())
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.toarray(), X_null.toarray())
assert_array_equal(X_orig.toarray(), X.toarray())
@pytest.mark.parametrize("sample_weight", [True, None])
def test_standard_scaler_trasform_with_partial_fit(sample_weight):
# Check some postconditions after applying partial_fit and transform
X = X_2d[:100, :]
if sample_weight:
sample_weight = rng.rand(X.shape[0])
scaler_incr = StandardScaler()
for i, batch in enumerate(gen_batches(X.shape[0], 1)):
X_sofar = X[:(i + 1), :]
chunks_copy = X_sofar.copy()
if sample_weight is None:
scaled_batch = StandardScaler().fit_transform(X_sofar)
scaler_incr = scaler_incr.partial_fit(X[batch])
else:
scaled_batch = StandardScaler().fit_transform(
X_sofar, sample_weight=sample_weight[:i + 1])
scaler_incr = scaler_incr.partial_fit(
X[batch], sample_weight=sample_weight[batch])
scaled_incr = scaler_incr.transform(X_sofar)
assert_array_almost_equal(scaled_batch, scaled_incr)
assert_array_almost_equal(X_sofar, chunks_copy) # No change
right_input = scaler_incr.inverse_transform(scaled_incr)
assert_array_almost_equal(X_sofar, right_input)
zero = np.zeros(X.shape[1])
epsilon = np.finfo(float).eps
assert_array_less(zero, scaler_incr.var_ + epsilon) # as less or equal
assert_array_less(zero, scaler_incr.scale_ + epsilon)
if sample_weight is None:
# (i+1) because the Scaler has been already fitted
assert (i + 1) == scaler_incr.n_samples_seen_
else:
assert (
np.sum(sample_weight[:i + 1]) ==
pytest.approx(scaler_incr.n_samples_seen_)
)
def test_min_max_scaler_iris():
X = iris.data
scaler = MinMaxScaler()
# default params
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 0)
assert_array_almost_equal(X_trans.max(axis=0), 1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# not default params: min=1, max=2
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), 1)
assert_array_almost_equal(X_trans.max(axis=0), 2)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# min=-.5, max=.6
scaler = MinMaxScaler(feature_range=(-.5, .6))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(X_trans.min(axis=0), -.5)
assert_array_almost_equal(X_trans.max(axis=0), .6)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# raises on invalid range
scaler = MinMaxScaler(feature_range=(2, 1))
with pytest.raises(ValueError):
scaler.fit(X)
def test_min_max_scaler_zero_variance_features():
# Check min max scaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
# default params
scaler = MinMaxScaler()
X_trans = scaler.fit_transform(X)
X_expected_0_1 = [[0., 0., 0.5],
[0., 0., 0.0],
[0., 0., 1.0]]
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
X_trans_new = scaler.transform(X_new)
X_expected_0_1_new = [[+0., 1., 0.500],
[-1., 0., 0.083],
[+0., 0., 1.333]]
assert_array_almost_equal(X_trans_new, X_expected_0_1_new, decimal=2)
# not default params
scaler = MinMaxScaler(feature_range=(1, 2))
X_trans = scaler.fit_transform(X)
X_expected_1_2 = [[1., 1., 1.5],
[1., 1., 1.0],
[1., 1., 2.0]]
assert_array_almost_equal(X_trans, X_expected_1_2)
# function interface
X_trans = minmax_scale(X)
assert_array_almost_equal(X_trans, X_expected_0_1)
X_trans = minmax_scale(X, feature_range=(1, 2))
assert_array_almost_equal(X_trans, X_expected_1_2)
def test_minmax_scale_axis1():
X = iris.data
X_trans = minmax_scale(X, axis=1)
assert_array_almost_equal(np.min(X_trans, axis=1), 0)
assert_array_almost_equal(np.max(X_trans, axis=1), 1)
def test_min_max_scaler_1d():
# Test scaling of dataset along single axis
for X in [X_1row, X_1col, X_list_1row, X_list_1row]:
scaler = MinMaxScaler(copy=True)
X_scaled = scaler.fit(X).transform(X)
if isinstance(X, list):
X = np.array(X) # cast only after scaling done
if _check_dim_1axis(X) == 1:
assert_array_almost_equal(X_scaled.min(axis=0),
np.zeros(n_features))
assert_array_almost_equal(X_scaled.max(axis=0),
np.zeros(n_features))
else:
assert_array_almost_equal(X_scaled.min(axis=0), .0)
assert_array_almost_equal(X_scaled.max(axis=0), 1.)
assert scaler.n_samples_seen_ == X.shape[0]
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X)
# Constant feature
X = np.ones((5, 1))
scaler = MinMaxScaler()
X_scaled = scaler.fit(X).transform(X)
assert X_scaled.min() >= 0.
assert X_scaled.max() <= 1.
assert scaler.n_samples_seen_ == X.shape[0]
# Function interface
X_1d = X_1row.ravel()
min_ = X_1d.min()
max_ = X_1d.max()
assert_array_almost_equal((X_1d - min_) / (max_ - min_),
minmax_scale(X_1d, copy=True))
@pytest.mark.parametrize("sample_weight", [True, None])
def test_scaler_without_centering(sample_weight):
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
if sample_weight:
sample_weight = rng.rand(X.shape[0])
with pytest.raises(ValueError):
StandardScaler().fit(X_csr)
with pytest.raises(ValueError):
StandardScaler().fit(X_csc)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
scaler = StandardScaler(with_mean=False).fit(
X, sample_weight=sample_weight)
X_scaled = scaler.transform(X, copy=True)
assert not np.any(np.isnan(X_scaled))
scaler_csr = StandardScaler(with_mean=False).fit(
X_csr, sample_weight=sample_weight)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert not np.any(np.isnan(X_csr_scaled.data))
scaler_csc = StandardScaler(with_mean=False).fit(
X_csc, sample_weight=sample_weight)
X_csc_scaled = scaler_csc.transform(X_csc, copy=True)
assert not np.any(np.isnan(X_csc_scaled.data))
assert_array_almost_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.var_, scaler_csr.var_)
assert_array_almost_equal(scaler.scale_, scaler_csr.scale_)
assert_array_almost_equal(scaler.n_samples_seen_,
scaler_csr.n_samples_seen_)
assert_array_almost_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.var_, scaler_csc.var_)
assert_array_almost_equal(scaler.scale_, scaler_csc.scale_)
assert_array_almost_equal(scaler.n_samples_seen_,
scaler_csc.n_samples_seen_)
if sample_weight is None:
assert_array_almost_equal(
X_scaled.mean(axis=0), [0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_var = \
mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_var, X_scaled.var(axis=0))
# Check that X has not been modified (copy)
assert X_scaled is not X
assert X_csr_scaled is not X_csr
X_scaled_back = scaler.inverse_transform(X_scaled)
assert X_scaled_back is not X
assert X_scaled_back is not X_scaled
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert X_csr_scaled_back is not X_csr
assert X_csr_scaled_back is not X_csr_scaled
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert X_csc_scaled_back is not X_csc
assert X_csc_scaled_back is not X_csc_scaled
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
@pytest.mark.parametrize("with_mean", [True, False])
@pytest.mark.parametrize("with_std", [True, False])
@pytest.mark.parametrize("array_constructor",
[np.asarray, sparse.csc_matrix, sparse.csr_matrix])
def test_scaler_n_samples_seen_with_nan(with_mean, with_std,
array_constructor):
X = np.array([[0, 1, 3],
[np.nan, 6, 10],
[5, 4, np.nan],
[8, 0, np.nan]],
dtype=np.float64)
X = array_constructor(X)
if sparse.issparse(X) and with_mean:
pytest.skip("'with_mean=True' cannot be used with sparse matrix.")
transformer = StandardScaler(with_mean=with_mean, with_std=with_std)
transformer.fit(X)
assert_array_equal(transformer.n_samples_seen_, np.array([3, 4, 2]))
def _check_identity_scalers_attributes(scaler_1, scaler_2):
assert scaler_1.mean_ is scaler_2.mean_ is None
assert scaler_1.var_ is scaler_2.var_ is None
assert scaler_1.scale_ is scaler_2.scale_ is None
assert scaler_1.n_samples_seen_ == scaler_2.n_samples_seen_
def test_scaler_return_identity():
# test that the scaler return identity when with_mean and with_std are
# False
X_dense = np.array([[0, 1, 3],
[5, 6, 0],
[8, 0, 10]],
dtype=np.float64)
X_csr = sparse.csr_matrix(X_dense)
X_csc = X_csr.tocsc()
transformer_dense = StandardScaler(with_mean=False, with_std=False)
X_trans_dense = transformer_dense.fit_transform(X_dense)
transformer_csr = clone(transformer_dense)
X_trans_csr = transformer_csr.fit_transform(X_csr)
transformer_csc = clone(transformer_dense)
X_trans_csc = transformer_csc.fit_transform(X_csc)
assert_allclose_dense_sparse(X_trans_csr, X_csr)
assert_allclose_dense_sparse(X_trans_csc, X_csc)
assert_allclose(X_trans_dense, X_dense)
for trans_1, trans_2 in itertools.combinations([transformer_dense,
transformer_csr,
transformer_csc],
2):
_check_identity_scalers_attributes(trans_1, trans_2)
transformer_dense.partial_fit(X_dense)
transformer_csr.partial_fit(X_csr)
transformer_csc.partial_fit(X_csc)
for trans_1, trans_2 in itertools.combinations([transformer_dense,
transformer_csr,
transformer_csc],
2):
_check_identity_scalers_attributes(trans_1, trans_2)
transformer_dense.fit(X_dense)
transformer_csr.fit(X_csr)
transformer_csc.fit(X_csc)
for trans_1, trans_2 in itertools.combinations([transformer_dense,
transformer_csr,
transformer_csc],
2):
_check_identity_scalers_attributes(trans_1, trans_2)
def test_scaler_int():
# test that scaler converts integer input to floating
# for both sparse and dense matrices
rng = np.random.RandomState(42)
X = rng.randint(20, size=(4, 5))
X[:, 0] = 0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
null_transform = StandardScaler(with_mean=False, with_std=False, copy=True)
with warnings.catch_warnings(record=True):
X_null = null_transform.fit_transform(X_csr)
assert_array_equal(X_null.data, X_csr.data)
X_orig = null_transform.inverse_transform(X_null)
assert_array_equal(X_orig.data, X_csr.data)
with warnings.catch_warnings(record=True):
scaler = StandardScaler(with_mean=False).fit(X)
X_scaled = scaler.transform(X, copy=True)
assert not np.any(np.isnan(X_scaled))
with warnings.catch_warnings(record=True):
scaler_csr = StandardScaler(with_mean=False).fit(X_csr)
X_csr_scaled = scaler_csr.transform(X_csr, copy=True)
assert not np.any(np.isnan(X_csr_scaled.data))
with warnings.catch_warnings(record=True):
scaler_csc = StandardScaler(with_mean=False).fit(X_csc)
X_csc_scaled = scaler_csc.transform(X_csc, copy=True)
assert not np.any(np.isnan(X_csc_scaled.data))
assert_array_almost_equal(scaler.mean_, scaler_csr.mean_)
assert_array_almost_equal(scaler.var_, scaler_csr.var_)
assert_array_almost_equal(scaler.scale_, scaler_csr.scale_)
assert_array_almost_equal(scaler.mean_, scaler_csc.mean_)
assert_array_almost_equal(scaler.var_, scaler_csc.var_)
assert_array_almost_equal(scaler.scale_, scaler_csc.scale_)
assert_array_almost_equal(
X_scaled.mean(axis=0),
[0., 1.109, 1.856, 21., 1.559], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(
X_csr_scaled.astype(float), 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# Check that X has not been modified (copy)
assert X_scaled is not X
assert X_csr_scaled is not X_csr
X_scaled_back = scaler.inverse_transform(X_scaled)
assert X_scaled_back is not X
assert X_scaled_back is not X_scaled
assert_array_almost_equal(X_scaled_back, X)
X_csr_scaled_back = scaler_csr.inverse_transform(X_csr_scaled)
assert X_csr_scaled_back is not X_csr
assert X_csr_scaled_back is not X_csr_scaled
assert_array_almost_equal(X_csr_scaled_back.toarray(), X)
X_csc_scaled_back = scaler_csr.inverse_transform(X_csc_scaled.tocsc())
assert X_csc_scaled_back is not X_csc
assert X_csc_scaled_back is not X_csc_scaled
assert_array_almost_equal(X_csc_scaled_back.toarray(), X)
def test_scaler_without_copy():
# Check that StandardScaler.fit does not change input
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_copy = X.copy()
StandardScaler(copy=False).fit(X)
assert_array_equal(X, X_copy)
X_csr_copy = X_csr.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csr)
assert_array_equal(X_csr.toarray(), X_csr_copy.toarray())
X_csc_copy = X_csc.copy()
StandardScaler(with_mean=False, copy=False).fit(X_csc)
assert_array_equal(X_csc.toarray(), X_csc_copy.toarray())
def test_scale_sparse_with_mean_raise_exception():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
# check scaling and fit with direct calls on sparse data
with pytest.raises(ValueError):
scale(X_csr, with_mean=True)
with pytest.raises(ValueError):
StandardScaler(with_mean=True).fit(X_csr)
with pytest.raises(ValueError):
scale(X_csc, with_mean=True)
with pytest.raises(ValueError):
StandardScaler(with_mean=True).fit(X_csc)
# check transform and inverse_transform after a fit on a dense array
scaler = StandardScaler(with_mean=True).fit(X)
with pytest.raises(ValueError):
scaler.transform(X_csr)
with pytest.raises(ValueError):
scaler.transform(X_csc)
X_transformed_csr = sparse.csr_matrix(scaler.transform(X))
with pytest.raises(ValueError):
scaler.inverse_transform(X_transformed_csr)
X_transformed_csc = sparse.csc_matrix(scaler.transform(X))
with pytest.raises(ValueError):
scaler.inverse_transform(X_transformed_csc)
def test_scale_input_finiteness_validation():
# Check if non finite inputs raise ValueError
X = [[np.inf, 5, 6, 7, 8]]
with pytest.raises(ValueError, match="Input contains infinity "
"or a value too large"):
scale(X)
def test_robust_scaler_error_sparse():
X_sparse = sparse.rand(1000, 10)
scaler = RobustScaler(with_centering=True)
err_msg = "Cannot center sparse matrices"
with pytest.raises(ValueError, match=err_msg):
scaler.fit(X_sparse)
@pytest.mark.parametrize("with_centering", [True, False])
@pytest.mark.parametrize("with_scaling", [True, False])
@pytest.mark.parametrize("X", [np.random.randn(10, 3),
sparse.rand(10, 3, density=0.5)])
def test_robust_scaler_attributes(X, with_centering, with_scaling):
# check consistent type of attributes
if with_centering and sparse.issparse(X):
pytest.skip("RobustScaler cannot center sparse matrix")
scaler = RobustScaler(with_centering=with_centering,
with_scaling=with_scaling)
scaler.fit(X)
if with_centering:
assert isinstance(scaler.center_, np.ndarray)
else:
assert scaler.center_ is None
if with_scaling:
assert isinstance(scaler.scale_, np.ndarray)
else:
assert scaler.scale_ is None
def test_robust_scaler_col_zero_sparse():
# check that the scaler is working when there is not data materialized in a
# column of a sparse matrix
X = np.random.randn(10, 5)
X[:, 0] = 0
X = sparse.csr_matrix(X)
scaler = RobustScaler(with_centering=False)
scaler.fit(X)
assert scaler.scale_[0] == pytest.approx(1)
X_trans = scaler.transform(X)
assert_allclose(X[:, 0].toarray(), X_trans[:, 0].toarray())
def test_robust_scaler_2d_arrays():
# Test robust scaling of 2d array along first axis
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
scaler = RobustScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(np.median(X_scaled, axis=0), 5 * [0.0])
assert_array_almost_equal(X_scaled.std(axis=0)[0], 0)
@pytest.mark.parametrize("density", [0, 0.05, 0.1, 0.5, 1])
@pytest.mark.parametrize("strictly_signed",
['positive', 'negative', 'zeros', None])
def test_robust_scaler_equivalence_dense_sparse(density, strictly_signed):
# Check the equivalence of the fitting with dense and sparse matrices
X_sparse = sparse.rand(1000, 5, density=density).tocsc()
if strictly_signed == 'positive':
X_sparse.data = np.abs(X_sparse.data)
elif strictly_signed == 'negative':
X_sparse.data = - np.abs(X_sparse.data)
elif strictly_signed == 'zeros':
X_sparse.data = np.zeros(X_sparse.data.shape, dtype=np.float64)
X_dense = X_sparse.toarray()
scaler_sparse = RobustScaler(with_centering=False)
scaler_dense = RobustScaler(with_centering=False)
scaler_sparse.fit(X_sparse)
scaler_dense.fit(X_dense)
assert_allclose(scaler_sparse.scale_, scaler_dense.scale_)
def test_robust_scaler_transform_one_row_csr():
# Check RobustScaler on transforming csr matrix with one row
rng = np.random.RandomState(0)
X = rng.randn(4, 5)
single_row = np.array([[0.1, 1., 2., 0., -1.]])
scaler = RobustScaler(with_centering=False)
scaler = scaler.fit(X)
row_trans = scaler.transform(sparse.csr_matrix(single_row))
row_expected = single_row / scaler.scale_
assert_array_almost_equal(row_trans.toarray(), row_expected)
row_scaled_back = scaler.inverse_transform(row_trans)
assert_array_almost_equal(single_row, row_scaled_back.toarray())
def test_robust_scaler_iris():
X = iris.data
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(np.median(X_trans, axis=0), 0)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
q = np.percentile(X_trans, q=(25, 75), axis=0)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scaler_iris_quantiles():
X = iris.data
scaler = RobustScaler(quantile_range=(10, 90))
X_trans = scaler.fit_transform(X)
assert_array_almost_equal(np.median(X_trans, axis=0), 0)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
q = np.percentile(X_trans, q=(10, 90), axis=0)
q_range = q[1] - q[0]
assert_array_almost_equal(q_range, 1)
def test_quantile_transform_iris():
X = iris.data
# uniform output distribution
transformer = QuantileTransformer(n_quantiles=30)
X_trans = transformer.fit_transform(X)
X_trans_inv = transformer.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# normal output distribution
transformer = QuantileTransformer(n_quantiles=30,
output_distribution='normal')
X_trans = transformer.fit_transform(X)
X_trans_inv = transformer.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure it is possible to take the inverse of a sparse matrix
# which contain negative value; this is the case in the iris dataset
X_sparse = sparse.csc_matrix(X)
X_sparse_tran = transformer.fit_transform(X_sparse)
X_sparse_tran_inv = transformer.inverse_transform(X_sparse_tran)
assert_array_almost_equal(X_sparse.A, X_sparse_tran_inv.A)
def test_quantile_transform_check_error():
X = np.transpose([[0, 25, 50, 0, 0, 0, 75, 0, 0, 100],
[2, 4, 0, 0, 6, 8, 0, 10, 0, 0],
[0, 0, 2.6, 4.1, 0, 0, 2.3, 0, 9.5, 0.1]])
X = sparse.csc_matrix(X)
X_neg = np.transpose([[0, 25, 50, 0, 0, 0, 75, 0, 0, 100],
[-2, 4, 0, 0, 6, 8, 0, 10, 0, 0],
[0, 0, 2.6, 4.1, 0, 0, 2.3, 0, 9.5, 0.1]])
X_neg = sparse.csc_matrix(X_neg)
err_msg = "Invalid value for 'n_quantiles': 0."
with pytest.raises(ValueError, match=err_msg):
QuantileTransformer(n_quantiles=0).fit(X)
err_msg = "Invalid value for 'subsample': 0."
with pytest.raises(ValueError, match=err_msg):
QuantileTransformer(subsample=0).fit(X)
err_msg = ("The number of quantiles cannot be greater than "
"the number of samples used. Got 1000 quantiles "
"and 10 samples.")
with pytest.raises(ValueError, match=err_msg):
QuantileTransformer(subsample=10).fit(X)
transformer = QuantileTransformer(n_quantiles=10)
err_msg = "QuantileTransformer only accepts non-negative sparse matrices."
with pytest.raises(ValueError, match=err_msg):
transformer.fit(X_neg)
transformer.fit(X)
err_msg = "QuantileTransformer only accepts non-negative sparse matrices."
with pytest.raises(ValueError, match=err_msg):
transformer.transform(X_neg)
X_bad_feat = np.transpose([[0, 25, 50, 0, 0, 0, 75, 0, 0, 100],
[0, 0, 2.6, 4.1, 0, 0, 2.3, 0, 9.5, 0.1]])
err_msg = ("X has 2 features, but QuantileTransformer is expecting "
"3 features as input.")
with pytest.raises(ValueError, match=err_msg):
transformer.inverse_transform(X_bad_feat)
transformer = QuantileTransformer(n_quantiles=10,
output_distribution='rnd')
# check that an error is raised at fit time
err_msg = ("'output_distribution' has to be either 'normal' or "
"'uniform'. Got 'rnd' instead.")
with pytest.raises(ValueError, match=err_msg):
transformer.fit(X)
# check that an error is raised at transform time
transformer.output_distribution = 'uniform'
transformer.fit(X)
X_tran = transformer.transform(X)
transformer.output_distribution = 'rnd'
err_msg = ("'output_distribution' has to be either 'normal' or 'uniform'."
" Got 'rnd' instead.")
with pytest.raises(ValueError, match=err_msg):
transformer.transform(X)
# check that an error is raised at inverse_transform time
err_msg = ("'output_distribution' has to be either 'normal' or 'uniform'."
" Got 'rnd' instead.")
with pytest.raises(ValueError, match=err_msg):
transformer.inverse_transform(X_tran)
# check that an error is raised if input is scalar
with pytest.raises(ValueError,
match='Expected 2D array, got scalar array instead'):
transformer.transform(10)
# check that a warning is raised is n_quantiles > n_samples
transformer = QuantileTransformer(n_quantiles=100)
warn_msg = "n_quantiles is set to n_samples"
with pytest.warns(UserWarning, match=warn_msg) as record:
transformer.fit(X)
assert len(record) == 1
assert transformer.n_quantiles_ == X.shape[0]
def test_quantile_transform_sparse_ignore_zeros():
X = np.array([[0, 1],
[0, 0],
[0, 2],
[0, 2],
[0, 1]])
X_sparse = sparse.csc_matrix(X)
transformer = QuantileTransformer(ignore_implicit_zeros=True,
n_quantiles=5)
# dense case -> warning raise
warning_message = ("'ignore_implicit_zeros' takes effect"
" only with sparse matrix. This parameter has no"
" effect.")
with pytest.warns(UserWarning, match=warning_message):
transformer.fit(X)
X_expected = np.array([[0, 0],
[0, 0],
[0, 1],
[0, 1],
[0, 0]])
X_trans = transformer.fit_transform(X_sparse)
assert_almost_equal(X_expected, X_trans.A)
# consider the case where sparse entries are missing values and user-given
# zeros are to be considered
X_data = np.array([0, 0, 1, 0, 2, 2, 1, 0, 1, 2, 0])
X_col = np.array([0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1])
X_row = np.array([0, 4, 0, 1, 2, 3, 4, 5, 6, 7, 8])
X_sparse = sparse.csc_matrix((X_data, (X_row, X_col)))
X_trans = transformer.fit_transform(X_sparse)
X_expected = np.array([[0., 0.5],
[0., 0.],
[0., 1.],
[0., 1.],
[0., 0.5],
[0., 0.],
[0., 0.5],
[0., 1.],
[0., 0.]])
assert_almost_equal(X_expected, X_trans.A)
transformer = QuantileTransformer(ignore_implicit_zeros=True,
n_quantiles=5)
X_data = np.array([-1, -1, 1, 0, 0, 0, 1, -1, 1])
X_col = np.array([0, 0, 1, 1, 1, 1, 1, 1, 1])
X_row = np.array([0, 4, 0, 1, 2, 3, 4, 5, 6])
X_sparse = sparse.csc_matrix((X_data, (X_row, X_col)))
X_trans = transformer.fit_transform(X_sparse)
X_expected = np.array([[0, 1],
[0, 0.375],
[0, 0.375],
[0, 0.375],
[0, 1],
[0, 0],
[0, 1]])
assert_almost_equal(X_expected, X_trans.A)
assert_almost_equal(X_sparse.A, transformer.inverse_transform(X_trans).A)
# check in conjunction with subsampling
transformer = QuantileTransformer(ignore_implicit_zeros=True,
n_quantiles=5,
subsample=8,
random_state=0)
X_trans = transformer.fit_transform(X_sparse)
assert_almost_equal(X_expected, X_trans.A)
assert_almost_equal(X_sparse.A, transformer.inverse_transform(X_trans).A)
def test_quantile_transform_dense_toy():
X = np.array([[0, 2, 2.6],
[25, 4, 4.1],
[50, 6, 2.3],
[75, 8, 9.5],
[100, 10, 0.1]])
transformer = QuantileTransformer(n_quantiles=5)
transformer.fit(X)
# using the a uniform output, each entry of X should be map between 0 and 1
# and equally spaced
X_trans = transformer.fit_transform(X)
X_expected = np.tile(np.linspace(0, 1, num=5), (3, 1)).T
assert_almost_equal(np.sort(X_trans, axis=0), X_expected)
X_test = np.array([
[-1, 1, 0],
[101, 11, 10],
])
X_expected = np.array([
[0, 0, 0],
[1, 1, 1],
])
assert_array_almost_equal(transformer.transform(X_test), X_expected)
X_trans_inv = transformer.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
def test_quantile_transform_subsampling():
# Test that subsampling the input yield to a consistent results We check
# that the computed quantiles are almost mapped to a [0, 1] vector where
# values are equally spaced. The infinite norm is checked to be smaller
# than a given threshold. This is repeated 5 times.
# dense support
n_samples = 1000000
n_quantiles = 1000
X = np.sort(np.random.sample((n_samples, 1)), axis=0)
ROUND = 5
inf_norm_arr = []
for random_state in range(ROUND):
transformer = QuantileTransformer(random_state=random_state,
n_quantiles=n_quantiles,
subsample=n_samples // 10)
transformer.fit(X)
diff = (np.linspace(0, 1, n_quantiles) -
np.ravel(transformer.quantiles_))
inf_norm = np.max(np.abs(diff))
assert inf_norm < 1e-2
inf_norm_arr.append(inf_norm)
# each random subsampling yield a unique approximation to the expected
# linspace CDF
assert len(np.unique(inf_norm_arr)) == len(inf_norm_arr)
# sparse support
X = sparse.rand(n_samples, 1, density=.99, format='csc', random_state=0)
inf_norm_arr = []
for random_state in range(ROUND):
transformer = QuantileTransformer(random_state=random_state,
n_quantiles=n_quantiles,
subsample=n_samples // 10)
transformer.fit(X)
diff = (np.linspace(0, 1, n_quantiles) -
np.ravel(transformer.quantiles_))
inf_norm = np.max(np.abs(diff))
assert inf_norm < 1e-1
inf_norm_arr.append(inf_norm)
# each random subsampling yield a unique approximation to the expected
# linspace CDF
assert len(np.unique(inf_norm_arr)) == len(inf_norm_arr)
def test_quantile_transform_sparse_toy():
X = np.array([[0., 2., 0.],
[25., 4., 0.],
[50., 0., 2.6],
[0., 0., 4.1],
[0., 6., 0.],
[0., 8., 0.],
[75., 0., 2.3],
[0., 10., 0.],
[0., 0., 9.5],
[100., 0., 0.1]])
X = sparse.csc_matrix(X)
transformer = QuantileTransformer(n_quantiles=10)
transformer.fit(X)
X_trans = transformer.fit_transform(X)
assert_array_almost_equal(np.min(X_trans.toarray(), axis=0), 0.)
assert_array_almost_equal(np.max(X_trans.toarray(), axis=0), 1.)
X_trans_inv = transformer.inverse_transform(X_trans)
assert_array_almost_equal(X.toarray(), X_trans_inv.toarray())
transformer_dense = QuantileTransformer(n_quantiles=10).fit(
X.toarray())
X_trans = transformer_dense.transform(X)
assert_array_almost_equal(np.min(X_trans.toarray(), axis=0), 0.)
assert_array_almost_equal(np.max(X_trans.toarray(), axis=0), 1.)
X_trans_inv = transformer_dense.inverse_transform(X_trans)
assert_array_almost_equal(X.toarray(), X_trans_inv.toarray())
def test_quantile_transform_axis1():
X = np.array([[0, 25, 50, 75, 100],
[2, 4, 6, 8, 10],
[2.6, 4.1, 2.3, 9.5, 0.1]])
X_trans_a0 = quantile_transform(X.T, axis=0, n_quantiles=5)
X_trans_a1 = quantile_transform(X, axis=1, n_quantiles=5)
assert_array_almost_equal(X_trans_a0, X_trans_a1.T)
def test_quantile_transform_bounds():
# Lower and upper bounds are manually mapped. We checked that in the case
# of a constant feature and binary feature, the bounds are properly mapped.
X_dense = np.array([[0, 0],
[0, 0],
[1, 0]])
X_sparse = sparse.csc_matrix(X_dense)
# check sparse and dense are consistent
X_trans = QuantileTransformer(n_quantiles=3,
random_state=0).fit_transform(X_dense)
assert_array_almost_equal(X_trans, X_dense)
X_trans_sp = QuantileTransformer(n_quantiles=3,
random_state=0).fit_transform(X_sparse)
assert_array_almost_equal(X_trans_sp.A, X_dense)
assert_array_almost_equal(X_trans, X_trans_sp.A)
# check the consistency of the bounds by learning on 1 matrix
# and transforming another
X = np.array([[0, 1],
[0, 0.5],
[1, 0]])
X1 = np.array([[0, 0.1],
[0, 0.5],
[1, 0.1]])
transformer = QuantileTransformer(n_quantiles=3).fit(X)
X_trans = transformer.transform(X1)
assert_array_almost_equal(X_trans, X1)
# check that values outside of the range learned will be mapped properly.
X = np.random.random((1000, 1))
transformer = QuantileTransformer()
transformer.fit(X)
assert (transformer.transform([[-10]]) ==
transformer.transform([[np.min(X)]]))
assert (transformer.transform([[10]]) ==
transformer.transform([[np.max(X)]]))
assert (transformer.inverse_transform([[-10]]) ==
transformer.inverse_transform([[np.min(transformer.references_)]]))
assert (transformer.inverse_transform([[10]]) ==
transformer.inverse_transform([[np.max(transformer.references_)]]))
def test_quantile_transform_and_inverse():
X_1 = iris.data
X_2 = np.array([[0.], [BOUNDS_THRESHOLD / 10], [1.5], [2], [3], [3], [4]])
for X in [X_1, X_2]:
transformer = QuantileTransformer(n_quantiles=1000, random_state=0)
X_trans = transformer.fit_transform(X)
X_trans_inv = transformer.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv, decimal=9)
def test_quantile_transform_nan():
X = np.array([[np.nan, 0, 0, 1],
[np.nan, np.nan, 0, 0.5],
[np.nan, 1, 1, 0]])
transformer = QuantileTransformer(n_quantiles=10, random_state=42)
transformer.fit_transform(X)
# check that the quantile of the first column is all NaN
assert np.isnan(transformer.quantiles_[:, 0]).all()
# all other column should not contain NaN
assert not np.isnan(transformer.quantiles_[:, 1:]).any()
@pytest.mark.parametrize("array_type", ['array', 'sparse'])
def test_quantile_transformer_sorted_quantiles(array_type):
# Non-regression test for:
# https://github.com/scikit-learn/scikit-learn/issues/15733
# Taken from upstream bug report:
# https://github.com/numpy/numpy/issues/14685
X = np.array([0, 1, 1, 2, 2, 3, 3, 4, 5, 5, 1, 1, 9, 9, 9, 8, 8, 7] * 10)
X = 0.1 * X.reshape(-1, 1)
X = _convert_container(X, array_type)
n_quantiles = 100
qt = QuantileTransformer(n_quantiles=n_quantiles).fit(X)
# Check that the estimated quantile threasholds are monotically
# increasing:
quantiles = qt.quantiles_[:, 0]
assert len(quantiles) == 100
assert all(np.diff(quantiles) >= 0)
def test_robust_scaler_invalid_range():
for range_ in [
(-1, 90),
(-2, -3),
(10, 101),
(100.5, 101),
(90, 50),
]:
scaler = RobustScaler(quantile_range=range_)
with pytest.raises(ValueError, match=r'Invalid quantile range: \('):
scaler.fit(iris.data)
def test_scale_function_without_centering():
rng = np.random.RandomState(42)
X = rng.randn(4, 5)
X[:, 0] = 0.0 # first feature is always of zero
X_csr = sparse.csr_matrix(X)
X_scaled = scale(X, with_mean=False)
assert not np.any(np.isnan(X_scaled))
X_csr_scaled = scale(X_csr, with_mean=False)
assert not np.any(np.isnan(X_csr_scaled.data))
# test csc has same outcome
X_csc_scaled = scale(X_csr.tocsc(), with_mean=False)
assert_array_almost_equal(X_scaled, X_csc_scaled.toarray())
# raises value error on axis != 0
with pytest.raises(ValueError):
scale(X_csr, with_mean=False, axis=1)
assert_array_almost_equal(X_scaled.mean(axis=0),
[0., -0.01, 2.24, -0.35, -0.78], 2)
assert_array_almost_equal(X_scaled.std(axis=0), [0., 1., 1., 1., 1.])
# Check that X has not been copied
assert X_scaled is not X
X_csr_scaled_mean, X_csr_scaled_std = mean_variance_axis(X_csr_scaled, 0)
assert_array_almost_equal(X_csr_scaled_mean, X_scaled.mean(axis=0))
assert_array_almost_equal(X_csr_scaled_std, X_scaled.std(axis=0))
# null scale
X_csr_scaled = scale(X_csr, with_mean=False, with_std=False, copy=True)
assert_array_almost_equal(X_csr.toarray(), X_csr_scaled.toarray())
def test_robust_scale_axis1():
X = iris.data
X_trans = robust_scale(X, axis=1)
assert_array_almost_equal(np.median(X_trans, axis=1), 0)
q = np.percentile(X_trans, q=(25, 75), axis=1)
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scale_1d_array():
X = iris.data[:, 1]
X_trans = robust_scale(X)
assert_array_almost_equal(np.median(X_trans), 0)
q = np.percentile(X_trans, q=(25, 75))
iqr = q[1] - q[0]
assert_array_almost_equal(iqr, 1)
def test_robust_scaler_zero_variance_features():
# Check RobustScaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.1],
[0., 1., +1.1]]
scaler = RobustScaler()
X_trans = scaler.fit_transform(X)
# NOTE: for such a small sample size, what we expect in the third column
# depends HEAVILY on the method used to calculate quantiles. The values
# here were calculated to fit the quantiles produces by np.percentile
# using numpy 1.9 Calculating quantiles with
# scipy.stats.mstats.scoreatquantile or scipy.stats.mstats.mquantiles
# would yield very different results!
X_expected = [[0., 0., +0.0],
[0., 0., -1.0],
[0., 0., +1.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 1., +0.],
[-1., 0., -0.83333],
[+0., 0., +1.66667]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=3)
def test_robust_scaler_unit_variance():
# Check RobustScaler with unit_variance=True on standard normal data with
# outliers
rng = np.random.RandomState(42)
X = rng.randn(1000000, 1)
X_with_outliers = np.vstack(
[X, np.ones((100, 1)) * 100, np.ones((100, 1)) * -100]
)
quantile_range = (1, 99)
robust_scaler = RobustScaler(
quantile_range=quantile_range, unit_variance=True
).fit(X_with_outliers)
X_trans = robust_scaler.transform(X)
assert robust_scaler.center_ == pytest.approx(0, abs=1e-3)
assert robust_scaler.scale_ == pytest.approx(1, abs=1e-2)
assert X_trans.std() == pytest.approx(1, abs=1e-2)
def test_maxabs_scaler_zero_variance_features():
# Check MaxAbsScaler on toy data with zero variance features
X = [[0., 1., +0.5],
[0., 1., -0.3],
[0., 1., +1.5],
[0., 0., +0.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans, X_expected)
X_trans_inv = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X, X_trans_inv)
# make sure new data gets transformed correctly
X_new = [[+0., 2., 0.5],
[-1., 1., 0.0],
[+0., 1., 1.5]]
X_trans_new = scaler.transform(X_new)
X_expected_new = [[+0., 2.0, 1.0 / 3.0],
[-1., 1.0, 0.0],
[+0., 1.0, 1.0]]
assert_array_almost_equal(X_trans_new, X_expected_new, decimal=2)
# function interface
X_trans = maxabs_scale(X)
assert_array_almost_equal(X_trans, X_expected)
# sparse data
X_csr = sparse.csr_matrix(X)
X_csc = sparse.csc_matrix(X)
X_trans_csr = scaler.fit_transform(X_csr)
X_trans_csc = scaler.fit_transform(X_csc)
X_expected = [[0., 1., 1.0 / 3.0],
[0., 1., -0.2],
[0., 1., 1.0],
[0., 0., 0.0]]
assert_array_almost_equal(X_trans_csr.A, X_expected)
assert_array_almost_equal(X_trans_csc.A, X_expected)
X_trans_csr_inv = scaler.inverse_transform(X_trans_csr)
X_trans_csc_inv = scaler.inverse_transform(X_trans_csc)
assert_array_almost_equal(X, X_trans_csr_inv.A)
assert_array_almost_equal(X, X_trans_csc_inv.A)
def test_maxabs_scaler_large_negative_value():
# Check MaxAbsScaler on toy data with a large negative value
X = [[0., 1., +0.5, -1.0],
[0., 1., -0.3, -0.5],
[0., 1., -100.0, 0.0],
[0., 0., +0.0, -2.0]]
scaler = MaxAbsScaler()
X_trans = scaler.fit_transform(X)
X_expected = [[0., 1., 0.005, -0.5],
[0., 1., -0.003, -0.25],
[0., 1., -1.0, 0.0],
[0., 0., 0.0, -1.0]]
assert_array_almost_equal(X_trans, X_expected)
def test_maxabs_scaler_transform_one_row_csr():
# Check MaxAbsScaler on transforming csr matrix with one row
X = sparse.csr_matrix([[0.5, 1., 1.]])
scaler = MaxAbsScaler()
scaler = scaler.fit(X)
X_trans = scaler.transform(X)
X_expected = sparse.csr_matrix([[1., 1., 1.]])
assert_array_almost_equal(X_trans.toarray(), X_expected.toarray())
X_scaled_back = scaler.inverse_transform(X_trans)
assert_array_almost_equal(X.toarray(), X_scaled_back.toarray())
def test_maxabs_scaler_1d():
# Test scaling of dataset along single axis
for X in [X_1row, X_1col, X_list_1row, X_list_1row]:
scaler = MaxAbsScaler(copy=True)
X_scaled = scaler.fit(X).transform(X)
if isinstance(X, list):
X = np.array(X) # cast only after scaling done
if _check_dim_1axis(X) == 1:
assert_array_almost_equal(np.abs(X_scaled.max(axis=0)),
np.ones(n_features))
else:
assert_array_almost_equal(np.abs(X_scaled.max(axis=0)), 1.)
assert scaler.n_samples_seen_ == X.shape[0]
# check inverse transform
X_scaled_back = scaler.inverse_transform(X_scaled)
assert_array_almost_equal(X_scaled_back, X)
# Constant feature
X = np.ones((5, 1))
scaler = MaxAbsScaler()
X_scaled = scaler.fit(X).transform(X)
assert_array_almost_equal(np.abs(X_scaled.max(axis=0)), 1.)
assert scaler.n_samples_seen_ == X.shape[0]
# function interface
X_1d = X_1row.ravel()
max_abs = np.abs(X_1d).max()
assert_array_almost_equal(X_1d / max_abs, maxabs_scale(X_1d, copy=True))
def test_maxabs_scaler_partial_fit():
# Test if partial_fit run over many batches of size 1 and 50
# gives the same results as fit
X = X_2d[:100, :]
n = X.shape[0]
for chunk_size in [1, 2, 50, n, n + 42]:
# Test mean at the end of the process
scaler_batch = MaxAbsScaler().fit(X)
scaler_incr = MaxAbsScaler()
scaler_incr_csr = MaxAbsScaler()
scaler_incr_csc = MaxAbsScaler()
for batch in gen_batches(n, chunk_size):
scaler_incr = scaler_incr.partial_fit(X[batch])
X_csr = sparse.csr_matrix(X[batch])
scaler_incr_csr = scaler_incr_csr.partial_fit(X_csr)
X_csc = sparse.csc_matrix(X[batch])
scaler_incr_csc = scaler_incr_csc.partial_fit(X_csc)
assert_array_almost_equal(scaler_batch.max_abs_, scaler_incr.max_abs_)
assert_array_almost_equal(scaler_batch.max_abs_,
scaler_incr_csr.max_abs_)
assert_array_almost_equal(scaler_batch.max_abs_,
scaler_incr_csc.max_abs_)
assert scaler_batch.n_samples_seen_ == scaler_incr.n_samples_seen_
assert (scaler_batch.n_samples_seen_ ==
scaler_incr_csr.n_samples_seen_)
assert (scaler_batch.n_samples_seen_ ==
scaler_incr_csc.n_samples_seen_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr_csr.scale_)
assert_array_almost_equal(scaler_batch.scale_, scaler_incr_csc.scale_)
assert_array_almost_equal(scaler_batch.transform(X),
scaler_incr.transform(X))
# Test std after 1 step
batch0 = slice(0, chunk_size)
scaler_batch = MaxAbsScaler().fit(X[batch0])
scaler_incr = MaxAbsScaler().partial_fit(X[batch0])
assert_array_almost_equal(scaler_batch.max_abs_, scaler_incr.max_abs_)
assert scaler_batch.n_samples_seen_ == scaler_incr.n_samples_seen_
assert_array_almost_equal(scaler_batch.scale_, scaler_incr.scale_)
assert_array_almost_equal(scaler_batch.transform(X),
scaler_incr.transform(X))
# Test std until the end of partial fits, and
scaler_batch = MaxAbsScaler().fit(X)
scaler_incr = MaxAbsScaler() # Clean estimator
for i, batch in enumerate(gen_batches(n, chunk_size)):
scaler_incr = scaler_incr.partial_fit(X[batch])
assert_correct_incr(i, batch_start=batch.start,
batch_stop=batch.stop, n=n,
chunk_size=chunk_size,
n_samples_seen=scaler_incr.n_samples_seen_)
def test_normalizer_l1():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l1', copy=True)
X_norm = normalizer.transform(X)
assert X_norm is not X
X_norm1 = toarray(X_norm)
normalizer = Normalizer(norm='l1', copy=False)
X_norm = normalizer.transform(X)
assert X_norm is X
X_norm2 = toarray(X_norm)
for X_norm in (X_norm1, X_norm2):
row_sums = np.abs(X_norm).sum(axis=1)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(row_sums[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert X_norm is not X
assert isinstance(X_norm, sparse.csr_matrix)
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_sums[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_l2():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='l2', copy=True)
X_norm1 = normalizer.transform(X)
assert X_norm1 is not X
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='l2', copy=False)
X_norm2 = normalizer.transform(X)
assert X_norm2 is X
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert X_norm is not X
assert isinstance(X_norm, sparse.csr_matrix)
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(la.norm(X_norm[i]), 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_max():
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
X_sparse_unpruned = sparse.csr_matrix(X_dense)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# set the row number 3 to zero without pruning (can happen in real life)
indptr_3 = X_sparse_unpruned.indptr[3]
indptr_4 = X_sparse_unpruned.indptr[4]
X_sparse_unpruned.data[indptr_3:indptr_4] = 0.0
# build the pruned variant using the regular constructor
X_sparse_pruned = sparse.csr_matrix(X_dense)
# check inputs that support the no-copy optim
for X in (X_dense, X_sparse_pruned, X_sparse_unpruned):
normalizer = Normalizer(norm='max', copy=True)
X_norm1 = normalizer.transform(X)
assert X_norm1 is not X
X_norm1 = toarray(X_norm1)
normalizer = Normalizer(norm='max', copy=False)
X_norm2 = normalizer.transform(X)
assert X_norm2 is X
X_norm2 = toarray(X_norm2)
for X_norm in (X_norm1, X_norm2):
row_maxs = abs(X_norm).max(axis=1)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(row_maxs[3], 0.0)
# check input for which copy=False won't prevent a copy
for init in (sparse.coo_matrix, sparse.csc_matrix, sparse.lil_matrix):
X = init(X_dense)
X_norm = normalizer = Normalizer(norm='l2', copy=False).transform(X)
assert X_norm is not X
assert isinstance(X_norm, sparse.csr_matrix)
X_norm = toarray(X_norm)
for i in range(3):
assert_almost_equal(row_maxs[i], 1.0)
assert_almost_equal(la.norm(X_norm[3]), 0.0)
def test_normalizer_max_sign():
# check that we normalize by a positive number even for negative data
rng = np.random.RandomState(0)
X_dense = rng.randn(4, 5)
# set the row number 3 to zero
X_dense[3, :] = 0.0
# check for mixed data where the value with
# largest magnitude is negative
X_dense[2, abs(X_dense[2, :]).argmax()] *= -1
X_all_neg = -np.abs(X_dense)
X_all_neg_sparse = sparse.csr_matrix(X_all_neg)
for X in (X_dense, X_all_neg, X_all_neg_sparse):
normalizer = Normalizer(norm='max')
X_norm = normalizer.transform(X)
assert X_norm is not X
X_norm = toarray(X_norm)
assert_array_equal(
np.sign(X_norm), np.sign(toarray(X)))
def test_normalize():
# Test normalize function
# Only tests functionality not used by the tests for Normalizer.
X = np.random.RandomState(37).randn(3, 2)
assert_array_equal(normalize(X, copy=False),
normalize(X.T, axis=0, copy=False).T)
with pytest.raises(ValueError):
normalize([[0]], axis=2)
with pytest.raises(ValueError):
normalize([[0]], norm='l3')
rs = np.random.RandomState(0)
X_dense = rs.randn(10, 5)
X_sparse = sparse.csr_matrix(X_dense)
ones = np.ones((10))
for X in (X_dense, X_sparse):
for dtype in (np.float32, np.float64):
for norm in ('l1', 'l2'):
X = X.astype(dtype)
X_norm = normalize(X, norm=norm)
assert X_norm.dtype == dtype
X_norm = toarray(X_norm)
if norm == 'l1':
row_sums = np.abs(X_norm).sum(axis=1)
else:
X_norm_squared = X_norm**2
row_sums = X_norm_squared.sum(axis=1)
assert_array_almost_equal(row_sums, ones)
# Test return_norm
X_dense = np.array([[3.0, 0, 4.0], [1.0, 0.0, 0.0], [2.0, 3.0, 0.0]])
for norm in ('l1', 'l2', 'max'):
_, norms = normalize(X_dense, norm=norm, return_norm=True)
if norm == 'l1':
assert_array_almost_equal(norms, np.array([7.0, 1.0, 5.0]))
elif norm == 'l2':
assert_array_almost_equal(norms, np.array([5.0, 1.0, 3.60555127]))
else:
assert_array_almost_equal(norms, np.array([4.0, 1.0, 3.0]))
X_sparse = sparse.csr_matrix(X_dense)
for norm in ('l1', 'l2'):
with pytest.raises(NotImplementedError):
normalize(X_sparse, norm=norm, return_norm=True)
_, norms = normalize(X_sparse, norm='max', return_norm=True)
assert_array_almost_equal(norms, np.array([4.0, 1.0, 3.0]))
def test_binarizer():
X_ = np.array([[1, 0, 5], [2, 3, -1]])
for init in (np.array, list, sparse.csr_matrix, sparse.csc_matrix):
X = init(X_.copy())
binarizer = Binarizer(threshold=2.0, copy=True)
X_bin = toarray(binarizer.transform(X))
assert np.sum(X_bin == 0) == 4
assert np.sum(X_bin == 1) == 2
X_bin = binarizer.transform(X)
assert sparse.issparse(X) == sparse.issparse(X_bin)
binarizer = Binarizer(copy=True).fit(X)
X_bin = toarray(binarizer.transform(X))
assert X_bin is not X
assert np.sum(X_bin == 0) == 2
assert np.sum(X_bin == 1) == 4
binarizer = Binarizer(copy=True)
X_bin = binarizer.transform(X)
assert X_bin is not X
X_bin = toarray(X_bin)
assert np.sum(X_bin == 0) == 2
assert np.sum(X_bin == 1) == 4
binarizer = Binarizer(copy=False)
X_bin = binarizer.transform(X)
if init is not list:
assert X_bin is X
binarizer = Binarizer(copy=False)
X_float = np.array([[1, 0, 5], [2, 3, -1]], dtype=np.float64)
X_bin = binarizer.transform(X_float)
if init is not list:
assert X_bin is X_float
X_bin = toarray(X_bin)
assert np.sum(X_bin == 0) == 2
assert np.sum(X_bin == 1) == 4
binarizer = Binarizer(threshold=-0.5, copy=True)
for init in (np.array, list):
X = init(X_.copy())
X_bin = toarray(binarizer.transform(X))
assert np.sum(X_bin == 0) == 1
assert np.sum(X_bin == 1) == 5
X_bin = binarizer.transform(X)
# Cannot use threshold < 0 for sparse
with pytest.raises(ValueError):
binarizer.transform(sparse.csc_matrix(X))
def test_center_kernel():
# Test that KernelCenterer is equivalent to StandardScaler
# in feature space
rng = np.random.RandomState(0)
X_fit = rng.random_sample((5, 4))
scaler = StandardScaler(with_std=False)
scaler.fit(X_fit)
X_fit_centered = scaler.transform(X_fit)
K_fit = np.dot(X_fit, X_fit.T)
# center fit time matrix
centerer = KernelCenterer()
K_fit_centered = | np.dot(X_fit_centered, X_fit_centered.T) | numpy.dot |
#!/usr/bin/env python
# -*- coding: utf-8 -*-
# This file is part of the SPORCO package. Details of the copyright
# and user license can be found in the 'LICENSE.txt' file distributed
# with the package.
"""
CSC with a Spatial Mask
=======================
This example demonstrates the use of :class:`.cbpdn.AddMaskSim` for convolutional sparse coding with a spatial mask :cite:`wohlberg-2016-boundary`. If the ``sporco-cuda`` extension is installed and a GPU is available, a GPU accelerated version is used. The example problem is inpainting of randomly distributed corruption of a greyscale image.
"""
from __future__ import print_function
from builtins import input
import pyfftw # See https://github.com/pyFFTW/pyFFTW/issues/40
import numpy as np
from sporco import util
from sporco import signal
from sporco import metric
from sporco import plot
from sporco.admm import tvl2
from sporco.admm import cbpdn
from sporco.fft import fftconv
from sporco import cuda
# If running in a notebook, try to use wurlitzer so that output from the CUDA
# code will be properly captured in the notebook.
sys_pipes = util.notebook_system_output()
"""
Load a reference image.
"""
img = util.ExampleImages().image('monarch.png', zoom=0.5, scaled=True,
gray=True, idxexp=np.s_[:, 160:672])
"""
Create random mask and apply to reference image to obtain test image. (The call to ``numpy.random.seed`` ensures that the pseudo-random noise is reproducible.)
"""
np.random.seed(12345)
frc = 0.5
msk = signal.rndmask(img.shape, frc, dtype=np.float32)
imgw = msk * img
"""
Define pad and crop functions.
"""
pn = 8
spad = lambda x: np.pad(x, pn, mode='symmetric')
zpad = lambda x: np.pad(x, pn, mode='constant')
crop = lambda x: x[pn:-pn, pn:-pn]
"""
Construct padded mask and test image.
"""
mskp = zpad(msk)
imgwp = spad(imgw)
"""
$\ell_2$-TV denoising with a spatial mask as a non-linear lowpass filter. The highpass component is the difference between the test image and the lowpass component, multiplied by the mask for faster convergence of the convolutional sparse coding (see :cite:`wohlberg-2017-convolutional3`).
"""
lmbda = 0.05
opt = tvl2.TVL2Denoise.Options({'Verbose': False, 'MaxMainIter': 200,
'DFidWeight': mskp, 'gEvalY': False,
'AutoRho': {'Enabled': True}})
b = tvl2.TVL2Denoise(imgwp, lmbda, opt)
sl = b.solve()
sh = mskp * (imgwp - sl)
"""
Load dictionary.
"""
D = util.convdicts()['G:8x8x128']
"""
Set up :class:`.admm.cbpdn.ConvBPDN` options.
"""
lmbda = 2e-2
opt = cbpdn.ConvBPDN.Options({'Verbose': True, 'MaxMainIter': 200,
'HighMemSolve': True, 'RelStopTol': 5e-3,
'AuxVarObj': False, 'RelaxParam': 1.8,
'rho': 5e1*lmbda + 1e-1, 'AutoRho': {'Enabled': False,
'StdResiduals': False}})
"""
Construct :class:`.admm.cbpdn.AddMaskSim` wrapper for :class:`.admm.cbpdn.ConvBPDN` and solve via wrapper. This example could also have made use of :class:`.admm.cbpdn.ConvBPDNMaskDcpl` (see example `cbpdn_md_gry`), which has similar performance in this application, but :class:`.admm.cbpdn.AddMaskSim` has the advantage of greater flexibility in that the wrapper can be applied to a variety of CSC solver objects. If the ``sporco-cuda`` extension is installed and a GPU is available, use the CUDA implementation of this combination.
"""
if cuda.device_count() > 0:
ams = None
print('%s GPU found: running CUDA solver' % cuda.device_name())
tm = util.Timer()
with sys_pipes(), util.ContextTimer(tm):
X = cuda.cbpdnmsk(D, sh, mskp, lmbda, opt)
t = tm.elapsed()
imgr = crop(sl + np.sum(fftconv(D, X, axes=(0, 1)), axis=-1))
else:
ams = cbpdn.AddMaskSim(cbpdn.ConvBPDN, D, sh, mskp, lmbda, opt=opt)
X = ams.solve()
t = ams.timer.elapsed('solve')
imgr = crop(sl + ams.reconstruct().squeeze())
"""
Display solve time and reconstruction performance.
"""
print("AddMaskSim wrapped ConvBPDN solve time: %.2fs" % t)
print("Corrupted image PSNR: %5.2f dB" % metric.psnr(img, imgw))
print("Recovered image PSNR: %5.2f dB" % metric.psnr(img, imgr))
"""
Display reference, test, and reconstructed image
"""
fig = plot.figure(figsize=(21, 7))
plot.subplot(1, 3, 1)
plot.imview(img, title='Reference image', fig=fig)
plot.subplot(1, 3, 2)
plot.imview(imgw, title='Corrupted image', fig=fig)
plot.subplot(1, 3, 3)
plot.imview(imgr, title='Reconstructed image', fig=fig)
fig.show()
"""
Display lowpass component and sparse representation
"""
fig = plot.figure(figsize=(14, 7))
plot.subplot(1, 2, 1)
plot.imview(sl, cmap=plot.cm.Blues, title='Lowpass component', fig=fig)
plot.subplot(1, 2, 2)
plot.imview(np.sum(abs(X).squeeze(), axis=-1), cmap=plot.cm.Blues,
title='Sparse representation', fig=fig)
fig.show()
"""
Plot functional value, residuals, and rho (not available if GPU implementation used).
"""
if ams is not None:
its = ams.getitstat()
fig = plot.figure(figsize=(21, 7))
plot.subplot(1, 3, 1)
plot.plot(its.ObjFun, xlbl='Iterations', ylbl='Functional', fig=fig)
plot.subplot(1, 3, 2)
plot.plot( | np.vstack((its.PrimalRsdl, its.DualRsdl)) | numpy.vstack |
#!/usr/bin/env python
import numpy as np
import rospy
from geometry_msgs.msg import PoseStamped
from styx_msgs.msg import Lane, Waypoint
from scipy.spatial import KDTree
from std_msgs.msg import Int32
import sensor_msgs.point_cloud2 as pc2
from sensor_msgs.msg import PointCloud2
import math
import copy
import yaml
'''
This node will publish waypoints from the car's current position to some `x` distance ahead.
As mentioned in the doc, you should ideally first implement a version which does not care
about traffic lights or obstacles.
Once you have created dbw_node, you will update this node to use the status of traffic lights too.
Please note that our simulator also provides the exact location of traffic lights and their
current status in `/vehicle/traffic_lights` message. You can use this message to build this node
as well as to verify your TL classifier.
TODO (for Yousuf and Aaron): Stopline location for each traffic light.
'''
LOOKAHEAD_WPS = 50 # Number of waypoints we will publish. You can change this number
MAX_DECEL = .5
class WaypointUpdater(object):
def __init__(self):
rospy.init_node('waypoint_updater')
self.base_lane = [None, None, None]
self.pose = None
self.stopline_wp_idx = -1
self.waypoints_2d = None
self.waypoint_tree = None
self.lasttraffic = 0
self.lane = 1 # 0,1,2
self.lanetime = 0
self.prev_pose = None
self.stopobstacle_idx = -1
self.obstacles = np.array([])
config_string = rospy.get_param("/traffic_light_config")
self.config = yaml.load(config_string)
self.stop_line_positions = self.config['stop_line_positions']
rospy.Subscriber('/current_pose', PoseStamped, self.pose_cb)
rospy.Subscriber('/base_waypoints', Lane, self.waypoints_cb)
rospy.Subscriber('/traffic_waypoint', Int32, self.traffic_cb)
rospy.Subscriber('/vehicle/obstacle_points', PointCloud2, self.obstacle_cb)
# TODO: Add a subscriber for /traffic_waypoint and /obstacle_waypoint below
self.final_waypoints_pub = rospy.Publisher('final_waypoints', Lane, queue_size=1)
# TODO: Add other member variables you need below
self.loop()
def loop(self):
rate = rospy.Rate(30)
while not rospy.is_shutdown():
if self.pose and self.base_lane[1]:
self.publish_waypoints()
rate.sleep()
def get_closest_waypoint_idx(self):
x = self.pose.pose.position.x
y = self.pose.pose.position.y
if self.waypoint_tree is not None:
closest_idx = self.waypoint_tree.query([x, y], 1)[1]
# check if closest is ahead or behind vehicle
closest_coord = self.waypoints_2d[closest_idx]
prev_coord = self.waypoints_2d[closest_idx - 1]
# equation for hyperplane through closest_coords
cl_vect = np.array(closest_coord)
pre_vect = np.array(prev_coord)
pos_vect = np.array([x, y])
val = np.dot(cl_vect - pre_vect, pos_vect - cl_vect)
if val > 0:
closest_idx = (closest_idx + 1) % len(self.waypoints_2d)
return closest_idx
return 0
def get_closest_waypoint_idx1(self, p):
x = p[0]
y = p[1]
if self.waypoint_tree is not None:
closest_idx = self.waypoint_tree.query([x, y], 1)[1]
# check if closest is ahead or behind vehicle
closest_coord = self.waypoints_2d[closest_idx]
prev_coord = self.waypoints_2d[closest_idx - 1]
# equation for hyperplane through closest_coords
cl_vect = np.array(closest_coord)
pre_vect = np.array(prev_coord)
pos_vect = np.array([x, y])
val = np.dot(cl_vect - pre_vect, pos_vect - cl_vect)
if val > 0:
closest_idx = (closest_idx + 1) % len(self.waypoints_2d)
return closest_idx
return 0
# select the best lane,if the current lane has obstacles,than change to other lane
@staticmethod
def choose_best_lane(obstacles, lane, s, snum):
close_s = []
for j in range(3):
item = [100000, 0]
item1 = [item, item]
close_s.append(item1)
# calculate the most nearest car before or after the ego car in each lane,get their distance and velocity
for i in range(len(obstacles)):
d = obstacles[i][1]
if (d > 0) and (d < 12):
if d > 8:
check_lane = 2
elif d < 4:
check_lane = 0
else:
check_lane = 1
check_car_s = obstacles[i][0]
pre_dis = (check_car_s - s + snum) % snum
after_dis = (s - check_car_s + snum) % snum
if close_s[check_lane][0][0] > pre_dis:
close_s[check_lane][0][0] = pre_dis
close_s[check_lane][0][1] = 0
if close_s[check_lane][1][0] > after_dis:
close_s[check_lane][1][0] = after_dis
close_s[check_lane][1][1] = 0
# calculate the cost of each lane
# print(close_s)
costs = [0, 0, 0]
for j in range(3):
if close_s[j][0][0] <= 50:
# if the distance of the car that is before the ego car in that lane is less then 30 meters,don't change
costs[j] = 10000
else:
# if the distance of the car that is after the ego car in that lane is less then 15 meters,don't change
if j != lane and close_s[j][1][0] < 15:
costs[j] = 10000
elif costs[j] == 0:
tems = close_s[j][0][0]
cost1 = 1 - math.exp(-1 * (1 / tems))
costs[j] = 1000 * cost1
# decrease the cost of current lane by 1,in order to hold the current lane if it is equal to others.
costs[lane] -= 1
min_cost = costs[0]
min_lane = 0
# select the minimum cost lane
for j in range(3):
if min_cost > costs[j]:
min_cost = costs[j]
min_lane = j
# if change lane from 0 to 2 or from 2 to 0,than look at the cost of lane 1.
# If the cost of lane 1 is 10000,don't change lane.Otherwise,change to lane 1 first.
if abs(min_lane - lane) == 2:
if costs[1] < 10000:
return 1
else:
return lane
return min_lane
# whether the car is too close to the obstacles,if current lane has obstacles
def istooclose(self, closest_idx):
size = len(self.obstacles)
too_close = False
self.stopobstacle_idx = -1
closedis = 10000
for i in range(size):
d = self.obstacles[i][1]
s = self.obstacles[i][0]
# the vehicle which is changing to this line is also considered as in this lane
if (2 + 4 * self.lane + 2) + 1 > d > (2 + 4 * self.lane - 2) - 1 \
and s > closest_idx and s - closest_idx < 30:
too_close = True
if closedis > s - closest_idx:
closedis = s - closest_idx
if closedis == 10000:
self.stopobstacle_idx = -1
else:
self.stopobstacle_idx = closedis
return too_close
def publish_waypoints(self):
if self.waypoint_tree:
final_lane = self.generate_lane()
self.final_waypoints_pub.publish(final_lane)
# convert the s,d coordinates to x,y coordinates
@staticmethod
def get_xy(s, d, waypoints):
d = d - 6
wlen = len(waypoints)
ret = copy.deepcopy(waypoints[s])
ps = (s - 1 + wlen) % wlen
x2 = waypoints[s].pose.pose.position.x
y2 = waypoints[s].pose.pose.position.y
x1 = waypoints[ps].pose.pose.position.x
y1 = waypoints[ps].pose.pose.position.y
alf = math.atan2(y2 - y1, x2 - x1)
# print(alf)
# print(str(ps)+','+str(x1)+','+str(y1)+'=='+str(s)+','+str(x2)+','+str(y2))
dy21 = y2 - y1
dx21 = x2 - x1
if (dy21 >= 0 and dx21 >= 0) or (dy21 < 0 and dx21 < 0):
alf = math.pi / 2 - alf
y0 = y2 - d * math.sin(alf)
x0 = x2 + d * math.cos(alf)
ret.pose.pose.position.x = x0
ret.pose.pose.position.y = y0
elif (dy21 >= 0 > dx21) or (dy21 < 0 <= dx21):
alf = math.pi / 2 + alf
y0 = y2 - d * math.sin(alf)
x0 = x2 - d * math.cos(alf)
ret.pose.pose.position.x = x0
ret.pose.pose.position.y = y0
return ret
# calculate the distance from current car position to the nearest front stop_line which is read from config file
def distostopline(self, closest_idx):
ret = 10000
for i in range(len(self.stop_line_positions)):
stopline_idx = self.get_closest_waypoint_idx1(self.stop_line_positions[i])
dis = stopline_idx - closest_idx
if (dis > 0) and (ret > dis):
ret = dis
return ret
def generate_lane(self):
lane = Lane()
closest_idx = self.get_closest_waypoint_idx()
stopdis = self.distostopline(closest_idx)
# select new lane
if stopdis > 50 and len(self.obstacles) > 0:
self.lane = self.choose_best_lane(self.obstacles, self.lane, closest_idx, len(self.base_lane[1].waypoints))
# print('newlane:'+str(self.lane))
farthest_idx = closest_idx + LOOKAHEAD_WPS
base_waypoints = copy.deepcopy(self.base_lane[self.lane].waypoints[closest_idx:farthest_idx])
if len(base_waypoints) > 10:
x1 = self.pose.pose.position.x
y1 = self.pose.pose.position.y
x2 = base_waypoints[0].pose.pose.position.x
y2 = base_waypoints[0].pose.pose.position.y
x3 = base_waypoints[1].pose.pose.position.x
y3 = base_waypoints[1].pose.pose.position.y
# if change lane,than caculate the first 10 points of the path
if self.distance2line([x1, y1], [x2, y2], [x3, y3]) > 1:
x2 = base_waypoints[10].pose.pose.position.x
y2 = base_waypoints[10].pose.pose.position.y
for i in range(10):
base_waypoints[i].pose.pose.position.y = (y2 - y1) * (i + 2) / 10 + y1
base_waypoints[i].pose.pose.position.x = (x2 - x1) * (i + 2) / 10 + x1
# judge if too close
self.stopobstacle_idx = -1
if len(self.obstacles) > 0:
self.istooclose(closest_idx)
if self.stopobstacle_idx != -1:
print('obs_point:' + str(self.stopobstacle_idx))
if self.stopline_wp_idx != -1 and self.stopline_wp_idx < farthest_idx:
stop_point = min(self.stopobstacle_idx, self.stopline_wp_idx)
else:
stop_point = self.stopobstacle_idx
else:
if self.stopline_wp_idx != -1 and self.stopline_wp_idx < farthest_idx:
stop_point = self.stopline_wp_idx
else:
stop_point = -1
# print('stop:'+str(self.stopline_wp_idx))
if stop_point != -1:
lane.waypoints = self.decelerate_waypoints(base_waypoints, closest_idx, stop_point)
else:
lane.waypoints = base_waypoints
return lane
def decelerate_waypoints(self, waypoints, closest_idx, stop_point):
temp = []
# rospy.loginfo('%s %s %s',closest_idx,self.stopline_wp_idx,len(waypoints))
for i, wp in enumerate(waypoints):
p = Waypoint()
p.pose = wp.pose
stop_idx = max(stop_point - closest_idx - 2, 0)
dist = self.distance(waypoints, i, stop_idx)
vel = math.sqrt(2 * MAX_DECEL * dist)
if vel < 1.:
vel = 0.
p.twist.twist.linear.x = min(vel, wp.twist.twist.linear.x)
temp.append(p)
return temp
def pose_cb(self, msg):
# TODO: Implement
self.pose = msg
# rospy.loginfo('self_pos:%s',self.pose.pose.position.x)
def waypoints_cb(self, waypoints):
# TODO: Implement
print('waypoints_cb')
self.base_lane[1] = waypoints
self.base_lane[0] = copy.deepcopy(waypoints)
self.base_lane[2] = copy.deepcopy(waypoints)
for i in range(len(self.base_lane[1].waypoints)):
self.base_lane[0].waypoints[i] = self.get_xy(i, 2, self.base_lane[1].waypoints)
self.base_lane[2].waypoints[i] = self.get_xy(i, 10, self.base_lane[1].waypoints)
if not self.waypoints_2d:
self.waypoints_2d = [[waypoint.pose.pose.position.x, waypoint.pose.pose.position.y] for waypoint in
waypoints.waypoints]
self.waypoint_tree = KDTree(self.waypoints_2d)
def traffic_cb(self, msg):
# TODO: Callback for /traffic_waypoint message. Implement
if self.lasttraffic != msg.data:
# rospy.loginfo('traffic:%s',msg.data)
self.lasttraffic = msg.data
self.stopline_wp_idx = msg.data
@staticmethod
def distance2line(p0, p1, p2):
x0 = p0[0]
y0 = p0[1]
x1 = p1[0]
y1 = p1[1]
x2 = p2[0]
y2 = p2[1]
d = math.sqrt((y2 - y1) * (y2 - y1) + (x1 - x2) * (x1 - x2))
return (x0 * (y2 - y1) + y0 * (x1 - x2) + y1 * x2 - x1 * y2) / d
def obstacle_cb(self, msg):
# TODO: Callback for /obstacle_waypoint message. We will implement it later
obstacle_list = []
if self.waypoint_tree:
for p in pc2.read_points(msg, skip_nans=True, field_names=('x', 'y', 'z')):
closest_idx = self.waypoint_tree.query([p[0], p[1]], 1)[1]
prev_idx = (closest_idx - 1) % len(self.waypoints_2d)
closest_coord = self.waypoints_2d[closest_idx]
prev_coord = self.waypoints_2d[closest_idx - 1]
# equation for hyperplane through closest_coords
cl_vect = np.array(closest_coord)
pre_vect = | np.array(prev_coord) | numpy.array |
Subsets and Splits
No saved queries yet
Save your SQL queries to embed, download, and access them later. Queries will appear here once saved.